File-copy from v4.4.100

This is the result of 'cp' from a linux-stable tree with the 'v4.4.100'
tag checked out (commit 26d6298789e695c9f627ce49a7bbd2286405798a) on
git://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git

Please refer to that tree for all history prior to this point.

Change-Id: I8a9ee2aea93cd29c52c847d0ce33091a73ae6afe
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
new file mode 100644
index 0000000..051eab3
--- /dev/null
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -0,0 +1,49 @@
+config DRM_I915
+	tristate "Intel 8xx/9xx/G3x/G4x/HD Graphics"
+	depends on DRM
+	depends on X86 && PCI
+	depends on (AGP || AGP=n)
+	select INTEL_GTT
+	select AGP_INTEL if AGP
+	select INTERVAL_TREE
+	# we need shmfs for the swappable backing store, and in particular
+	# the shmem_readpage() which depends upon tmpfs
+	select SHMEM
+	select TMPFS
+	select DRM_KMS_HELPER
+	select DRM_PANEL
+	select DRM_MIPI_DSI
+	# i915 depends on ACPI_VIDEO when ACPI is enabled
+	# but for select to work, need to select ACPI_VIDEO's dependencies, ick
+	select BACKLIGHT_LCD_SUPPORT if ACPI
+	select BACKLIGHT_CLASS_DEVICE if ACPI
+	select INPUT if ACPI
+	select ACPI_VIDEO if ACPI
+	select ACPI_BUTTON if ACPI
+	help
+	  Choose this option if you have a system that has "Intel Graphics
+	  Media Accelerator" or "HD Graphics" integrated graphics,
+	  including 830M, 845G, 852GM, 855GM, 865G, 915G, 945G, 965G,
+	  G35, G41, G43, G45 chipsets and Celeron, Pentium, Core i3,
+	  Core i5, Core i7 as well as Atom CPUs with integrated graphics.
+	  If M is selected, the module will be called i915.  AGP support
+	  is required for this driver to work. This driver is used by
+	  the Intel driver in X.org 6.8 and XFree86 4.4 and above. It
+	  replaces the older i830 module that supported a subset of the
+	  hardware in older X.org releases.
+
+	  Note that the older i810/i815 chipsets require the use of the
+	  i810 driver instead, and the Atom z5xx series has an entirely
+	  different implementation.
+
+config DRM_I915_PRELIMINARY_HW_SUPPORT
+	bool "Enable preliminary support for prerelease Intel hardware by default"
+	depends on DRM_I915
+	default n
+	help
+	  Choose this option if you have prerelease Intel hardware and want the
+	  i915 driver to support it by default.  You can enable such support at
+	  runtime with the module option i915.preliminary_hw_support=1; this
+	  option changes the default for that module option.
+
+	  If in doubt, say "N".
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
new file mode 100644
index 0000000..44d290a
--- /dev/null
+++ b/drivers/gpu/drm/i915/Makefile
@@ -0,0 +1,101 @@
+#
+# Makefile for the drm device driver.  This driver provides support for the
+# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
+
+# Please keep these build lists sorted!
+
+# core driver code
+i915-y := i915_drv.o \
+	  i915_irq.o \
+	  i915_params.o \
+          i915_suspend.o \
+	  i915_sysfs.o \
+	  intel_csr.o \
+	  intel_pm.o \
+	  intel_runtime_pm.o
+
+i915-$(CONFIG_COMPAT)   += i915_ioc32.o
+i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o
+
+# GEM code
+i915-y += i915_cmd_parser.o \
+	  i915_gem_batch_pool.o \
+	  i915_gem_context.o \
+	  i915_gem_debug.o \
+	  i915_gem_dmabuf.o \
+	  i915_gem_evict.o \
+	  i915_gem_execbuffer.o \
+	  i915_gem_fence.o \
+	  i915_gem_gtt.o \
+	  i915_gem.o \
+	  i915_gem_render_state.o \
+	  i915_gem_shrinker.o \
+	  i915_gem_stolen.o \
+	  i915_gem_tiling.o \
+	  i915_gem_userptr.o \
+	  i915_gpu_error.o \
+	  i915_trace_points.o \
+	  intel_lrc.o \
+	  intel_mocs.o \
+	  intel_ringbuffer.o \
+	  intel_uncore.o
+
+# general-purpose microcontroller (GuC) support
+i915-y += intel_guc_loader.o \
+	  i915_guc_submission.o
+
+# autogenerated null render state
+i915-y += intel_renderstate_gen6.o \
+	  intel_renderstate_gen7.o \
+	  intel_renderstate_gen8.o \
+	  intel_renderstate_gen9.o
+
+# modesetting core code
+i915-y += intel_audio.o \
+	  intel_atomic.o \
+	  intel_atomic_plane.o \
+	  intel_bios.o \
+	  intel_display.o \
+	  intel_fbc.o \
+	  intel_fifo_underrun.o \
+	  intel_frontbuffer.o \
+	  intel_hotplug.o \
+	  intel_modes.o \
+	  intel_overlay.o \
+	  intel_psr.o \
+	  intel_sideband.o \
+	  intel_sprite.o
+i915-$(CONFIG_ACPI)		+= intel_acpi.o intel_opregion.o
+i915-$(CONFIG_DRM_FBDEV_EMULATION)	+= intel_fbdev.o
+
+# modesetting output/encoder code
+i915-y += dvo_ch7017.o \
+	  dvo_ch7xxx.o \
+	  dvo_ivch.o \
+	  dvo_ns2501.o \
+	  dvo_sil164.o \
+	  dvo_tfp410.o \
+	  intel_crt.o \
+	  intel_ddi.o \
+	  intel_dp_mst.o \
+	  intel_dp.o \
+	  intel_dsi.o \
+	  intel_dsi_panel_vbt.o \
+	  intel_dsi_pll.o \
+	  intel_dvo.o \
+	  intel_hdmi.o \
+	  intel_i2c.o \
+	  intel_lvds.o \
+	  intel_panel.o \
+	  intel_sdvo.o \
+	  intel_tv.o
+
+# virtual gpu code
+i915-y += i915_vgpu.o
+
+# legacy horrors
+i915-y += i915_dma.o
+
+obj-$(CONFIG_DRM_I915)  += i915.o
+
+CFLAGS_i915_trace_points.o := -I$(src)
diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h
new file mode 100644
index 0000000..0e2c1b9
--- /dev/null
+++ b/drivers/gpu/drm/i915/dvo.h
@@ -0,0 +1,138 @@
+/*
+ * Copyright © 2006 Eric Anholt
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission.  The copyright holders make no representations
+ * about the suitability of this software for any purpose.  It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#ifndef _INTEL_DVO_H
+#define _INTEL_DVO_H
+
+#include <linux/i2c.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include "intel_drv.h"
+
+struct intel_dvo_device {
+	const char *name;
+	int type;
+	/* DVOA/B/C output register */
+	u32 dvo_reg;
+	/* GPIO register used for i2c bus to control this device */
+	u32 gpio;
+	int slave_addr;
+
+	const struct intel_dvo_dev_ops *dev_ops;
+	void *dev_priv;
+	struct i2c_adapter *i2c_bus;
+};
+
+struct intel_dvo_dev_ops {
+	/*
+	 * Initialize the device at startup time.
+	 * Returns NULL if the device does not exist.
+	 */
+	bool (*init)(struct intel_dvo_device *dvo,
+		     struct i2c_adapter *i2cbus);
+
+	/*
+	 * Called to allow the output a chance to create properties after the
+	 * RandR objects have been created.
+	 */
+	void (*create_resources)(struct intel_dvo_device *dvo);
+
+	/*
+	 * Turn on/off output.
+	 *
+	 * Because none of our dvo drivers support an intermediate power levels,
+	 * we don't expose this in the interfac.
+	 */
+	void (*dpms)(struct intel_dvo_device *dvo, bool enable);
+
+	/*
+	 * Callback for testing a video mode for a given output.
+	 *
+	 * This function should only check for cases where a mode can't
+	 * be supported on the output specifically, and not represent
+	 * generic CRTC limitations.
+	 *
+	 * \return MODE_OK if the mode is valid, or another MODE_* otherwise.
+	 */
+	int (*mode_valid)(struct intel_dvo_device *dvo,
+			  struct drm_display_mode *mode);
+
+	/*
+	 * Callback for preparing mode changes on an output
+	 */
+	void (*prepare)(struct intel_dvo_device *dvo);
+
+	/*
+	 * Callback for committing mode changes on an output
+	 */
+	void (*commit)(struct intel_dvo_device *dvo);
+
+	/*
+	 * Callback for setting up a video mode after fixups have been made.
+	 *
+	 * This is only called while the output is disabled.  The dpms callback
+	 * must be all that's necessary for the output, to turn the output on
+	 * after this function is called.
+	 */
+	void (*mode_set)(struct intel_dvo_device *dvo,
+			 const struct drm_display_mode *mode,
+			 const struct drm_display_mode *adjusted_mode);
+
+	/*
+	 * Probe for a connected output, and return detect_status.
+	 */
+	enum drm_connector_status (*detect)(struct intel_dvo_device *dvo);
+
+	/*
+	 * Probe the current hw status, returning true if the connected output
+	 * is active.
+	 */
+	bool (*get_hw_state)(struct intel_dvo_device *dev);
+
+	/**
+	 * Query the device for the modes it provides.
+	 *
+	 * This function may also update MonInfo, mm_width, and mm_height.
+	 *
+	 * \return singly-linked list of modes or NULL if no modes found.
+	 */
+	struct drm_display_mode *(*get_modes)(struct intel_dvo_device *dvo);
+
+	/**
+	 * Clean up driver-specific bits of the output
+	 */
+	void (*destroy) (struct intel_dvo_device *dvo);
+
+	/**
+	 * Debugging hook to dump device registers to log file
+	 */
+	void (*dump_regs)(struct intel_dvo_device *dvo);
+};
+
+extern struct intel_dvo_dev_ops sil164_ops;
+extern struct intel_dvo_dev_ops ch7xxx_ops;
+extern struct intel_dvo_dev_ops ivch_ops;
+extern struct intel_dvo_dev_ops tfp410_ops;
+extern struct intel_dvo_dev_ops ch7017_ops;
+extern struct intel_dvo_dev_ops ns2501_ops;
+
+#endif /* _INTEL_DVO_H */
diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c
new file mode 100644
index 0000000..cbb2202
--- /dev/null
+++ b/drivers/gpu/drm/i915/dvo_ch7017.c
@@ -0,0 +1,414 @@
+/*
+ * Copyright © 2006 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#include "dvo.h"
+
+#define CH7017_TV_DISPLAY_MODE		0x00
+#define CH7017_FLICKER_FILTER		0x01
+#define CH7017_VIDEO_BANDWIDTH		0x02
+#define CH7017_TEXT_ENHANCEMENT		0x03
+#define CH7017_START_ACTIVE_VIDEO	0x04
+#define CH7017_HORIZONTAL_POSITION	0x05
+#define CH7017_VERTICAL_POSITION	0x06
+#define CH7017_BLACK_LEVEL		0x07
+#define CH7017_CONTRAST_ENHANCEMENT	0x08
+#define CH7017_TV_PLL			0x09
+#define CH7017_TV_PLL_M			0x0a
+#define CH7017_TV_PLL_N			0x0b
+#define CH7017_SUB_CARRIER_0		0x0c
+#define CH7017_CIV_CONTROL		0x10
+#define CH7017_CIV_0			0x11
+#define CH7017_CHROMA_BOOST		0x14
+#define CH7017_CLOCK_MODE		0x1c
+#define CH7017_INPUT_CLOCK		0x1d
+#define CH7017_GPIO_CONTROL		0x1e
+#define CH7017_INPUT_DATA_FORMAT	0x1f
+#define CH7017_CONNECTION_DETECT	0x20
+#define CH7017_DAC_CONTROL		0x21
+#define CH7017_BUFFERED_CLOCK_OUTPUT	0x22
+#define CH7017_DEFEAT_VSYNC		0x47
+#define CH7017_TEST_PATTERN		0x48
+
+#define CH7017_POWER_MANAGEMENT		0x49
+/** Enables the TV output path. */
+#define CH7017_TV_EN			(1 << 0)
+#define CH7017_DAC0_POWER_DOWN		(1 << 1)
+#define CH7017_DAC1_POWER_DOWN		(1 << 2)
+#define CH7017_DAC2_POWER_DOWN		(1 << 3)
+#define CH7017_DAC3_POWER_DOWN		(1 << 4)
+/** Powers down the TV out block, and DAC0-3 */
+#define CH7017_TV_POWER_DOWN_EN		(1 << 5)
+
+#define CH7017_VERSION_ID		0x4a
+
+#define CH7017_DEVICE_ID		0x4b
+#define CH7017_DEVICE_ID_VALUE		0x1b
+#define CH7018_DEVICE_ID_VALUE		0x1a
+#define CH7019_DEVICE_ID_VALUE		0x19
+
+#define CH7017_XCLK_D2_ADJUST		0x53
+#define CH7017_UP_SCALER_COEFF_0	0x55
+#define CH7017_UP_SCALER_COEFF_1	0x56
+#define CH7017_UP_SCALER_COEFF_2	0x57
+#define CH7017_UP_SCALER_COEFF_3	0x58
+#define CH7017_UP_SCALER_COEFF_4	0x59
+#define CH7017_UP_SCALER_VERTICAL_INC_0	0x5a
+#define CH7017_UP_SCALER_VERTICAL_INC_1	0x5b
+#define CH7017_GPIO_INVERT		0x5c
+#define CH7017_UP_SCALER_HORIZONTAL_INC_0	0x5d
+#define CH7017_UP_SCALER_HORIZONTAL_INC_1	0x5e
+
+#define CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT	0x5f
+/**< Low bits of horizontal active pixel input */
+
+#define CH7017_ACTIVE_INPUT_LINE_OUTPUT	0x60
+/** High bits of horizontal active pixel input */
+#define CH7017_LVDS_HAP_INPUT_MASK	(0x7 << 0)
+/** High bits of vertical active line output */
+#define CH7017_LVDS_VAL_HIGH_MASK	(0x7 << 3)
+
+#define CH7017_VERTICAL_ACTIVE_LINE_OUTPUT	0x61
+/**< Low bits of vertical active line output */
+
+#define CH7017_HORIZONTAL_ACTIVE_PIXEL_OUTPUT	0x62
+/**< Low bits of horizontal active pixel output */
+
+#define CH7017_LVDS_POWER_DOWN		0x63
+/** High bits of horizontal active pixel output */
+#define CH7017_LVDS_HAP_HIGH_MASK	(0x7 << 0)
+/** Enables the LVDS power down state transition */
+#define CH7017_LVDS_POWER_DOWN_EN	(1 << 6)
+/** Enables the LVDS upscaler */
+#define CH7017_LVDS_UPSCALER_EN		(1 << 7)
+#define CH7017_LVDS_POWER_DOWN_DEFAULT_RESERVED 0x08
+
+#define CH7017_LVDS_ENCODING		0x64
+#define CH7017_LVDS_DITHER_2D		(1 << 2)
+#define CH7017_LVDS_DITHER_DIS		(1 << 3)
+#define CH7017_LVDS_DUAL_CHANNEL_EN	(1 << 4)
+#define CH7017_LVDS_24_BIT		(1 << 5)
+
+#define CH7017_LVDS_ENCODING_2		0x65
+
+#define CH7017_LVDS_PLL_CONTROL		0x66
+/** Enables the LVDS panel output path */
+#define CH7017_LVDS_PANEN		(1 << 0)
+/** Enables the LVDS panel backlight */
+#define CH7017_LVDS_BKLEN		(1 << 3)
+
+#define CH7017_POWER_SEQUENCING_T1	0x67
+#define CH7017_POWER_SEQUENCING_T2	0x68
+#define CH7017_POWER_SEQUENCING_T3	0x69
+#define CH7017_POWER_SEQUENCING_T4	0x6a
+#define CH7017_POWER_SEQUENCING_T5	0x6b
+#define CH7017_GPIO_DRIVER_TYPE		0x6c
+#define CH7017_GPIO_DATA		0x6d
+#define CH7017_GPIO_DIRECTION_CONTROL	0x6e
+
+#define CH7017_LVDS_PLL_FEEDBACK_DIV	0x71
+# define CH7017_LVDS_PLL_FEED_BACK_DIVIDER_SHIFT 4
+# define CH7017_LVDS_PLL_FEED_FORWARD_DIVIDER_SHIFT 0
+# define CH7017_LVDS_PLL_FEEDBACK_DEFAULT_RESERVED 0x80
+
+#define CH7017_LVDS_PLL_VCO_CONTROL	0x72
+# define CH7017_LVDS_PLL_VCO_DEFAULT_RESERVED 0x80
+# define CH7017_LVDS_PLL_VCO_SHIFT	4
+# define CH7017_LVDS_PLL_POST_SCALE_DIV_SHIFT 0
+
+#define CH7017_OUTPUTS_ENABLE		0x73
+# define CH7017_CHARGE_PUMP_LOW		0x0
+# define CH7017_CHARGE_PUMP_HIGH	0x3
+# define CH7017_LVDS_CHANNEL_A		(1 << 3)
+# define CH7017_LVDS_CHANNEL_B		(1 << 4)
+# define CH7017_TV_DAC_A		(1 << 5)
+# define CH7017_TV_DAC_B		(1 << 6)
+# define CH7017_DDC_SELECT_DC2		(1 << 7)
+
+#define CH7017_LVDS_OUTPUT_AMPLITUDE	0x74
+#define CH7017_LVDS_PLL_EMI_REDUCTION	0x75
+#define CH7017_LVDS_POWER_DOWN_FLICKER	0x76
+
+#define CH7017_LVDS_CONTROL_2		0x78
+# define CH7017_LOOP_FILTER_SHIFT	5
+# define CH7017_PHASE_DETECTOR_SHIFT	0
+
+#define CH7017_BANG_LIMIT_CONTROL	0x7f
+
+struct ch7017_priv {
+	uint8_t dummy;
+};
+
+static void ch7017_dump_regs(struct intel_dvo_device *dvo);
+static void ch7017_dpms(struct intel_dvo_device *dvo, bool enable);
+
+static bool ch7017_read(struct intel_dvo_device *dvo, u8 addr, u8 *val)
+{
+	struct i2c_msg msgs[] = {
+		{
+			.addr = dvo->slave_addr,
+			.flags = 0,
+			.len = 1,
+			.buf = &addr,
+		},
+		{
+			.addr = dvo->slave_addr,
+			.flags = I2C_M_RD,
+			.len = 1,
+			.buf = val,
+		}
+	};
+	return i2c_transfer(dvo->i2c_bus, msgs, 2) == 2;
+}
+
+static bool ch7017_write(struct intel_dvo_device *dvo, u8 addr, u8 val)
+{
+	uint8_t buf[2] = { addr, val };
+	struct i2c_msg msg = {
+		.addr = dvo->slave_addr,
+		.flags = 0,
+		.len = 2,
+		.buf = buf,
+	};
+	return i2c_transfer(dvo->i2c_bus, &msg, 1) == 1;
+}
+
+/** Probes for a CH7017 on the given bus and slave address. */
+static bool ch7017_init(struct intel_dvo_device *dvo,
+			struct i2c_adapter *adapter)
+{
+	struct ch7017_priv *priv;
+	const char *str;
+	u8 val;
+
+	priv = kzalloc(sizeof(struct ch7017_priv), GFP_KERNEL);
+	if (priv == NULL)
+		return false;
+
+	dvo->i2c_bus = adapter;
+	dvo->dev_priv = priv;
+
+	if (!ch7017_read(dvo, CH7017_DEVICE_ID, &val))
+		goto fail;
+
+	switch (val) {
+	case CH7017_DEVICE_ID_VALUE:
+		str = "ch7017";
+		break;
+	case CH7018_DEVICE_ID_VALUE:
+		str = "ch7018";
+		break;
+	case CH7019_DEVICE_ID_VALUE:
+		str = "ch7019";
+		break;
+	default:
+		DRM_DEBUG_KMS("ch701x not detected, got %d: from %s "
+			      "slave %d.\n",
+			      val, adapter->name, dvo->slave_addr);
+		goto fail;
+	}
+
+	DRM_DEBUG_KMS("%s detected on %s, addr %d\n",
+		      str, adapter->name, dvo->slave_addr);
+	return true;
+
+fail:
+	kfree(priv);
+	return false;
+}
+
+static enum drm_connector_status ch7017_detect(struct intel_dvo_device *dvo)
+{
+	return connector_status_connected;
+}
+
+static enum drm_mode_status ch7017_mode_valid(struct intel_dvo_device *dvo,
+					      struct drm_display_mode *mode)
+{
+	if (mode->clock > 160000)
+		return MODE_CLOCK_HIGH;
+
+	return MODE_OK;
+}
+
+static void ch7017_mode_set(struct intel_dvo_device *dvo,
+			    const struct drm_display_mode *mode,
+			    const struct drm_display_mode *adjusted_mode)
+{
+	uint8_t lvds_pll_feedback_div, lvds_pll_vco_control;
+	uint8_t outputs_enable, lvds_control_2, lvds_power_down;
+	uint8_t horizontal_active_pixel_input;
+	uint8_t horizontal_active_pixel_output, vertical_active_line_output;
+	uint8_t active_input_line_output;
+
+	DRM_DEBUG_KMS("Registers before mode setting\n");
+	ch7017_dump_regs(dvo);
+
+	/* LVDS PLL settings from page 75 of 7017-7017ds.pdf*/
+	if (mode->clock < 100000) {
+		outputs_enable = CH7017_LVDS_CHANNEL_A | CH7017_CHARGE_PUMP_LOW;
+		lvds_pll_feedback_div = CH7017_LVDS_PLL_FEEDBACK_DEFAULT_RESERVED |
+			(2 << CH7017_LVDS_PLL_FEED_BACK_DIVIDER_SHIFT) |
+			(13 << CH7017_LVDS_PLL_FEED_FORWARD_DIVIDER_SHIFT);
+		lvds_pll_vco_control = CH7017_LVDS_PLL_VCO_DEFAULT_RESERVED |
+			(2 << CH7017_LVDS_PLL_VCO_SHIFT) |
+			(3 << CH7017_LVDS_PLL_POST_SCALE_DIV_SHIFT);
+		lvds_control_2 = (1 << CH7017_LOOP_FILTER_SHIFT) |
+			(0 << CH7017_PHASE_DETECTOR_SHIFT);
+	} else {
+		outputs_enable = CH7017_LVDS_CHANNEL_A | CH7017_CHARGE_PUMP_HIGH;
+		lvds_pll_feedback_div = CH7017_LVDS_PLL_FEEDBACK_DEFAULT_RESERVED |
+			(2 << CH7017_LVDS_PLL_FEED_BACK_DIVIDER_SHIFT) |
+			(3 << CH7017_LVDS_PLL_FEED_FORWARD_DIVIDER_SHIFT);
+		lvds_pll_feedback_div = 35;
+		lvds_control_2 = (3 << CH7017_LOOP_FILTER_SHIFT) |
+			(0 << CH7017_PHASE_DETECTOR_SHIFT);
+		if (1) { /* XXX: dual channel panel detection.  Assume yes for now. */
+			outputs_enable |= CH7017_LVDS_CHANNEL_B;
+			lvds_pll_vco_control = CH7017_LVDS_PLL_VCO_DEFAULT_RESERVED |
+				(2 << CH7017_LVDS_PLL_VCO_SHIFT) |
+				(13 << CH7017_LVDS_PLL_POST_SCALE_DIV_SHIFT);
+		} else {
+			lvds_pll_vco_control = CH7017_LVDS_PLL_VCO_DEFAULT_RESERVED |
+				(1 << CH7017_LVDS_PLL_VCO_SHIFT) |
+				(13 << CH7017_LVDS_PLL_POST_SCALE_DIV_SHIFT);
+		}
+	}
+
+	horizontal_active_pixel_input = mode->hdisplay & 0x00ff;
+
+	vertical_active_line_output = mode->vdisplay & 0x00ff;
+	horizontal_active_pixel_output = mode->hdisplay & 0x00ff;
+
+	active_input_line_output = ((mode->hdisplay & 0x0700) >> 8) |
+				   (((mode->vdisplay & 0x0700) >> 8) << 3);
+
+	lvds_power_down = CH7017_LVDS_POWER_DOWN_DEFAULT_RESERVED |
+			  (mode->hdisplay & 0x0700) >> 8;
+
+	ch7017_dpms(dvo, false);
+	ch7017_write(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT,
+			horizontal_active_pixel_input);
+	ch7017_write(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_OUTPUT,
+			horizontal_active_pixel_output);
+	ch7017_write(dvo, CH7017_VERTICAL_ACTIVE_LINE_OUTPUT,
+			vertical_active_line_output);
+	ch7017_write(dvo, CH7017_ACTIVE_INPUT_LINE_OUTPUT,
+			active_input_line_output);
+	ch7017_write(dvo, CH7017_LVDS_PLL_VCO_CONTROL, lvds_pll_vco_control);
+	ch7017_write(dvo, CH7017_LVDS_PLL_FEEDBACK_DIV, lvds_pll_feedback_div);
+	ch7017_write(dvo, CH7017_LVDS_CONTROL_2, lvds_control_2);
+	ch7017_write(dvo, CH7017_OUTPUTS_ENABLE, outputs_enable);
+
+	/* Turn the LVDS back on with new settings. */
+	ch7017_write(dvo, CH7017_LVDS_POWER_DOWN, lvds_power_down);
+
+	DRM_DEBUG_KMS("Registers after mode setting\n");
+	ch7017_dump_regs(dvo);
+}
+
+/* set the CH7017 power state */
+static void ch7017_dpms(struct intel_dvo_device *dvo, bool enable)
+{
+	uint8_t val;
+
+	ch7017_read(dvo, CH7017_LVDS_POWER_DOWN, &val);
+
+	/* Turn off TV/VGA, and never turn it on since we don't support it. */
+	ch7017_write(dvo, CH7017_POWER_MANAGEMENT,
+			CH7017_DAC0_POWER_DOWN |
+			CH7017_DAC1_POWER_DOWN |
+			CH7017_DAC2_POWER_DOWN |
+			CH7017_DAC3_POWER_DOWN |
+			CH7017_TV_POWER_DOWN_EN);
+
+	if (enable) {
+		/* Turn on the LVDS */
+		ch7017_write(dvo, CH7017_LVDS_POWER_DOWN,
+			     val & ~CH7017_LVDS_POWER_DOWN_EN);
+	} else {
+		/* Turn off the LVDS */
+		ch7017_write(dvo, CH7017_LVDS_POWER_DOWN,
+			     val | CH7017_LVDS_POWER_DOWN_EN);
+	}
+
+	/* XXX: Should actually wait for update power status somehow */
+	msleep(20);
+}
+
+static bool ch7017_get_hw_state(struct intel_dvo_device *dvo)
+{
+	uint8_t val;
+
+	ch7017_read(dvo, CH7017_LVDS_POWER_DOWN, &val);
+
+	if (val & CH7017_LVDS_POWER_DOWN_EN)
+		return false;
+	else
+		return true;
+}
+
+static void ch7017_dump_regs(struct intel_dvo_device *dvo)
+{
+	uint8_t val;
+
+#define DUMP(reg)					\
+do {							\
+	ch7017_read(dvo, reg, &val);			\
+	DRM_DEBUG_KMS(#reg ": %02x\n", val);		\
+} while (0)
+
+	DUMP(CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT);
+	DUMP(CH7017_HORIZONTAL_ACTIVE_PIXEL_OUTPUT);
+	DUMP(CH7017_VERTICAL_ACTIVE_LINE_OUTPUT);
+	DUMP(CH7017_ACTIVE_INPUT_LINE_OUTPUT);
+	DUMP(CH7017_LVDS_PLL_VCO_CONTROL);
+	DUMP(CH7017_LVDS_PLL_FEEDBACK_DIV);
+	DUMP(CH7017_LVDS_CONTROL_2);
+	DUMP(CH7017_OUTPUTS_ENABLE);
+	DUMP(CH7017_LVDS_POWER_DOWN);
+}
+
+static void ch7017_destroy(struct intel_dvo_device *dvo)
+{
+	struct ch7017_priv *priv = dvo->dev_priv;
+
+	if (priv) {
+		kfree(priv);
+		dvo->dev_priv = NULL;
+	}
+}
+
+struct intel_dvo_dev_ops ch7017_ops = {
+	.init = ch7017_init,
+	.detect = ch7017_detect,
+	.mode_valid = ch7017_mode_valid,
+	.mode_set = ch7017_mode_set,
+	.dpms = ch7017_dpms,
+	.get_hw_state = ch7017_get_hw_state,
+	.dump_regs = ch7017_dump_regs,
+	.destroy = ch7017_destroy,
+};
diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c
new file mode 100644
index 0000000..4b4acc1
--- /dev/null
+++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c
@@ -0,0 +1,368 @@
+/**************************************************************************
+
+Copyright © 2006 Dave Airlie
+
+All Rights Reserved.
+
+Permission is hereby granted, free of charge, to any person obtaining a
+copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sub license, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+**************************************************************************/
+
+#include "dvo.h"
+
+#define CH7xxx_REG_VID		0x4a
+#define CH7xxx_REG_DID		0x4b
+
+#define CH7011_VID		0x83 /* 7010 as well */
+#define CH7010B_VID		0x05
+#define CH7009A_VID		0x84
+#define CH7009B_VID		0x85
+#define CH7301_VID		0x95
+
+#define CH7xxx_VID		0x84
+#define CH7xxx_DID		0x17
+#define CH7010_DID		0x16
+
+#define CH7xxx_NUM_REGS		0x4c
+
+#define CH7xxx_CM		0x1c
+#define CH7xxx_CM_XCM		(1<<0)
+#define CH7xxx_CM_MCP		(1<<2)
+#define CH7xxx_INPUT_CLOCK	0x1d
+#define CH7xxx_GPIO		0x1e
+#define CH7xxx_GPIO_HPIR	(1<<3)
+#define CH7xxx_IDF		0x1f
+
+#define CH7xxx_IDF_HSP		(1<<3)
+#define CH7xxx_IDF_VSP		(1<<4)
+
+#define CH7xxx_CONNECTION_DETECT 0x20
+#define CH7xxx_CDET_DVI		(1<<5)
+
+#define CH7301_DAC_CNTL		0x21
+#define CH7301_HOTPLUG		0x23
+#define CH7xxx_TCTL		0x31
+#define CH7xxx_TVCO		0x32
+#define CH7xxx_TPCP		0x33
+#define CH7xxx_TPD		0x34
+#define CH7xxx_TPVT		0x35
+#define CH7xxx_TLPF		0x36
+#define CH7xxx_TCT		0x37
+#define CH7301_TEST_PATTERN	0x48
+
+#define CH7xxx_PM		0x49
+#define CH7xxx_PM_FPD		(1<<0)
+#define CH7301_PM_DACPD0	(1<<1)
+#define CH7301_PM_DACPD1	(1<<2)
+#define CH7301_PM_DACPD2	(1<<3)
+#define CH7xxx_PM_DVIL		(1<<6)
+#define CH7xxx_PM_DVIP		(1<<7)
+
+#define CH7301_SYNC_POLARITY	0x56
+#define CH7301_SYNC_RGB_YUV	(1<<0)
+#define CH7301_SYNC_POL_DVI	(1<<5)
+
+/** @file
+ * driver for the Chrontel 7xxx DVI chip over DVO.
+ */
+
+static struct ch7xxx_id_struct {
+	uint8_t vid;
+	char *name;
+} ch7xxx_ids[] = {
+	{ CH7011_VID, "CH7011" },
+	{ CH7010B_VID, "CH7010B" },
+	{ CH7009A_VID, "CH7009A" },
+	{ CH7009B_VID, "CH7009B" },
+	{ CH7301_VID, "CH7301" },
+};
+
+static struct ch7xxx_did_struct {
+	uint8_t did;
+	char *name;
+} ch7xxx_dids[] = {
+	{ CH7xxx_DID, "CH7XXX" },
+	{ CH7010_DID, "CH7010B" },
+};
+
+struct ch7xxx_priv {
+	bool quiet;
+};
+
+static char *ch7xxx_get_id(uint8_t vid)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(ch7xxx_ids); i++) {
+		if (ch7xxx_ids[i].vid == vid)
+			return ch7xxx_ids[i].name;
+	}
+
+	return NULL;
+}
+
+static char *ch7xxx_get_did(uint8_t did)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(ch7xxx_dids); i++) {
+		if (ch7xxx_dids[i].did == did)
+			return ch7xxx_dids[i].name;
+	}
+
+	return NULL;
+}
+
+/** Reads an 8 bit register */
+static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
+{
+	struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
+	struct i2c_adapter *adapter = dvo->i2c_bus;
+	u8 out_buf[2];
+	u8 in_buf[2];
+
+	struct i2c_msg msgs[] = {
+		{
+			.addr = dvo->slave_addr,
+			.flags = 0,
+			.len = 1,
+			.buf = out_buf,
+		},
+		{
+			.addr = dvo->slave_addr,
+			.flags = I2C_M_RD,
+			.len = 1,
+			.buf = in_buf,
+		}
+	};
+
+	out_buf[0] = addr;
+	out_buf[1] = 0;
+
+	if (i2c_transfer(adapter, msgs, 2) == 2) {
+		*ch = in_buf[0];
+		return true;
+	}
+
+	if (!ch7xxx->quiet) {
+		DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
+			  addr, adapter->name, dvo->slave_addr);
+	}
+	return false;
+}
+
+/** Writes an 8 bit register */
+static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
+{
+	struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
+	struct i2c_adapter *adapter = dvo->i2c_bus;
+	uint8_t out_buf[2];
+	struct i2c_msg msg = {
+		.addr = dvo->slave_addr,
+		.flags = 0,
+		.len = 2,
+		.buf = out_buf,
+	};
+
+	out_buf[0] = addr;
+	out_buf[1] = ch;
+
+	if (i2c_transfer(adapter, &msg, 1) == 1)
+		return true;
+
+	if (!ch7xxx->quiet) {
+		DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
+			  addr, adapter->name, dvo->slave_addr);
+	}
+
+	return false;
+}
+
+static bool ch7xxx_init(struct intel_dvo_device *dvo,
+			struct i2c_adapter *adapter)
+{
+	/* this will detect the CH7xxx chip on the specified i2c bus */
+	struct ch7xxx_priv *ch7xxx;
+	uint8_t vendor, device;
+	char *name, *devid;
+
+	ch7xxx = kzalloc(sizeof(struct ch7xxx_priv), GFP_KERNEL);
+	if (ch7xxx == NULL)
+		return false;
+
+	dvo->i2c_bus = adapter;
+	dvo->dev_priv = ch7xxx;
+	ch7xxx->quiet = true;
+
+	if (!ch7xxx_readb(dvo, CH7xxx_REG_VID, &vendor))
+		goto out;
+
+	name = ch7xxx_get_id(vendor);
+	if (!name) {
+		DRM_DEBUG_KMS("ch7xxx not detected; got 0x%02x from %s "
+				"slave %d.\n",
+			  vendor, adapter->name, dvo->slave_addr);
+		goto out;
+	}
+
+
+	if (!ch7xxx_readb(dvo, CH7xxx_REG_DID, &device))
+		goto out;
+
+	devid = ch7xxx_get_did(device);
+	if (!devid) {
+		DRM_DEBUG_KMS("ch7xxx not detected; got 0x%02x from %s "
+				"slave %d.\n",
+			  vendor, adapter->name, dvo->slave_addr);
+		goto out;
+	}
+
+	ch7xxx->quiet = false;
+	DRM_DEBUG_KMS("Detected %s chipset, vendor/device ID 0x%02x/0x%02x\n",
+		  name, vendor, device);
+	return true;
+out:
+	kfree(ch7xxx);
+	return false;
+}
+
+static enum drm_connector_status ch7xxx_detect(struct intel_dvo_device *dvo)
+{
+	uint8_t cdet, orig_pm, pm;
+
+	ch7xxx_readb(dvo, CH7xxx_PM, &orig_pm);
+
+	pm = orig_pm;
+	pm &= ~CH7xxx_PM_FPD;
+	pm |= CH7xxx_PM_DVIL | CH7xxx_PM_DVIP;
+
+	ch7xxx_writeb(dvo, CH7xxx_PM, pm);
+
+	ch7xxx_readb(dvo, CH7xxx_CONNECTION_DETECT, &cdet);
+
+	ch7xxx_writeb(dvo, CH7xxx_PM, orig_pm);
+
+	if (cdet & CH7xxx_CDET_DVI)
+		return connector_status_connected;
+	return connector_status_disconnected;
+}
+
+static enum drm_mode_status ch7xxx_mode_valid(struct intel_dvo_device *dvo,
+					      struct drm_display_mode *mode)
+{
+	if (mode->clock > 165000)
+		return MODE_CLOCK_HIGH;
+
+	return MODE_OK;
+}
+
+static void ch7xxx_mode_set(struct intel_dvo_device *dvo,
+			    const struct drm_display_mode *mode,
+			    const struct drm_display_mode *adjusted_mode)
+{
+	uint8_t tvco, tpcp, tpd, tlpf, idf;
+
+	if (mode->clock <= 65000) {
+		tvco = 0x23;
+		tpcp = 0x08;
+		tpd = 0x16;
+		tlpf = 0x60;
+	} else {
+		tvco = 0x2d;
+		tpcp = 0x06;
+		tpd = 0x26;
+		tlpf = 0xa0;
+	}
+
+	ch7xxx_writeb(dvo, CH7xxx_TCTL, 0x00);
+	ch7xxx_writeb(dvo, CH7xxx_TVCO, tvco);
+	ch7xxx_writeb(dvo, CH7xxx_TPCP, tpcp);
+	ch7xxx_writeb(dvo, CH7xxx_TPD, tpd);
+	ch7xxx_writeb(dvo, CH7xxx_TPVT, 0x30);
+	ch7xxx_writeb(dvo, CH7xxx_TLPF, tlpf);
+	ch7xxx_writeb(dvo, CH7xxx_TCT, 0x00);
+
+	ch7xxx_readb(dvo, CH7xxx_IDF, &idf);
+
+	idf &= ~(CH7xxx_IDF_HSP | CH7xxx_IDF_VSP);
+	if (mode->flags & DRM_MODE_FLAG_PHSYNC)
+		idf |= CH7xxx_IDF_HSP;
+
+	if (mode->flags & DRM_MODE_FLAG_PVSYNC)
+		idf |= CH7xxx_IDF_VSP;
+
+	ch7xxx_writeb(dvo, CH7xxx_IDF, idf);
+}
+
+/* set the CH7xxx power state */
+static void ch7xxx_dpms(struct intel_dvo_device *dvo, bool enable)
+{
+	if (enable)
+		ch7xxx_writeb(dvo, CH7xxx_PM, CH7xxx_PM_DVIL | CH7xxx_PM_DVIP);
+	else
+		ch7xxx_writeb(dvo, CH7xxx_PM, CH7xxx_PM_FPD);
+}
+
+static bool ch7xxx_get_hw_state(struct intel_dvo_device *dvo)
+{
+	u8 val;
+
+	ch7xxx_readb(dvo, CH7xxx_PM, &val);
+
+	if (val & (CH7xxx_PM_DVIL | CH7xxx_PM_DVIP))
+		return true;
+	else
+		return false;
+}
+
+static void ch7xxx_dump_regs(struct intel_dvo_device *dvo)
+{
+	int i;
+
+	for (i = 0; i < CH7xxx_NUM_REGS; i++) {
+		uint8_t val;
+		if ((i % 8) == 0)
+			DRM_DEBUG_KMS("\n %02X: ", i);
+		ch7xxx_readb(dvo, i, &val);
+		DRM_DEBUG_KMS("%02X ", val);
+	}
+}
+
+static void ch7xxx_destroy(struct intel_dvo_device *dvo)
+{
+	struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
+
+	if (ch7xxx) {
+		kfree(ch7xxx);
+		dvo->dev_priv = NULL;
+	}
+}
+
+struct intel_dvo_dev_ops ch7xxx_ops = {
+	.init = ch7xxx_init,
+	.detect = ch7xxx_detect,
+	.mode_valid = ch7xxx_mode_valid,
+	.mode_set = ch7xxx_mode_set,
+	.dpms = ch7xxx_dpms,
+	.get_hw_state = ch7xxx_get_hw_state,
+	.dump_regs = ch7xxx_dump_regs,
+	.destroy = ch7xxx_destroy,
+};
diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c
new file mode 100644
index 0000000..ff9f1b0
--- /dev/null
+++ b/drivers/gpu/drm/i915/dvo_ivch.c
@@ -0,0 +1,502 @@
+/*
+ * Copyright © 2006 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Eric Anholt <eric@anholt.net>
+ *    Thomas Richter <thor@math.tu-berlin.de>
+ *
+ * Minor modifications (Dithering enable):
+ *    Thomas Richter <thor@math.tu-berlin.de>
+ *
+ */
+
+#include "dvo.h"
+
+/*
+ * register definitions for the i82807aa.
+ *
+ * Documentation on this chipset can be found in datasheet #29069001 at
+ * intel.com.
+ */
+
+/*
+ * VCH Revision & GMBus Base Addr
+ */
+#define VR00		0x00
+# define VR00_BASE_ADDRESS_MASK		0x007f
+
+/*
+ * Functionality Enable
+ */
+#define VR01		0x01
+
+/*
+ * Enable the panel fitter
+ */
+# define VR01_PANEL_FIT_ENABLE		(1 << 3)
+/*
+ * Enables the LCD display.
+ *
+ * This must not be set while VR01_DVO_BYPASS_ENABLE is set.
+ */
+# define VR01_LCD_ENABLE		(1 << 2)
+/** Enables the DVO repeater. */
+# define VR01_DVO_BYPASS_ENABLE		(1 << 1)
+/** Enables the DVO clock */
+# define VR01_DVO_ENABLE		(1 << 0)
+/** Enable dithering for 18bpp panels. Not documented. */
+# define VR01_DITHER_ENABLE             (1 << 4)
+
+/*
+ * LCD Interface Format
+ */
+#define VR10		0x10
+/** Enables LVDS output instead of CMOS */
+# define VR10_LVDS_ENABLE		(1 << 4)
+/** Enables 18-bit LVDS output. */
+# define VR10_INTERFACE_1X18		(0 << 2)
+/** Enables 24-bit LVDS or CMOS output */
+# define VR10_INTERFACE_1X24		(1 << 2)
+/** Enables 2x18-bit LVDS or CMOS output. */
+# define VR10_INTERFACE_2X18		(2 << 2)
+/** Enables 2x24-bit LVDS output */
+# define VR10_INTERFACE_2X24		(3 << 2)
+/** Mask that defines the depth of the pipeline */
+# define VR10_INTERFACE_DEPTH_MASK      (3 << 2)
+
+/*
+ * VR20 LCD Horizontal Display Size
+ */
+#define VR20	0x20
+
+/*
+ * LCD Vertical Display Size
+ */
+#define VR21	0x21
+
+/*
+ * Panel power down status
+ */
+#define VR30		0x30
+/** Read only bit indicating that the panel is not in a safe poweroff state. */
+# define VR30_PANEL_ON			(1 << 15)
+
+#define VR40		0x40
+# define VR40_STALL_ENABLE		(1 << 13)
+# define VR40_VERTICAL_INTERP_ENABLE	(1 << 12)
+# define VR40_ENHANCED_PANEL_FITTING	(1 << 11)
+# define VR40_HORIZONTAL_INTERP_ENABLE	(1 << 10)
+# define VR40_AUTO_RATIO_ENABLE		(1 << 9)
+# define VR40_CLOCK_GATING_ENABLE	(1 << 8)
+
+/*
+ * Panel Fitting Vertical Ratio
+ * (((image_height - 1) << 16) / ((panel_height - 1))) >> 2
+ */
+#define VR41		0x41
+
+/*
+ * Panel Fitting Horizontal Ratio
+ * (((image_width - 1) << 16) / ((panel_width - 1))) >> 2
+ */
+#define VR42		0x42
+
+/*
+ * Horizontal Image Size
+ */
+#define VR43		0x43
+
+/* VR80 GPIO 0
+ */
+#define VR80	    0x80
+#define VR81	    0x81
+#define VR82	    0x82
+#define VR83	    0x83
+#define VR84	    0x84
+#define VR85	    0x85
+#define VR86	    0x86
+#define VR87	    0x87
+
+/* VR88 GPIO 8
+ */
+#define VR88	    0x88
+
+/* Graphics BIOS scratch 0
+ */
+#define VR8E	    0x8E
+# define VR8E_PANEL_TYPE_MASK		(0xf << 0)
+# define VR8E_PANEL_INTERFACE_CMOS	(0 << 4)
+# define VR8E_PANEL_INTERFACE_LVDS	(1 << 4)
+# define VR8E_FORCE_DEFAULT_PANEL	(1 << 5)
+
+/* Graphics BIOS scratch 1
+ */
+#define VR8F	    0x8F
+# define VR8F_VCH_PRESENT		(1 << 0)
+# define VR8F_DISPLAY_CONN		(1 << 1)
+# define VR8F_POWER_MASK		(0x3c)
+# define VR8F_POWER_POS			(2)
+
+/* Some Bios implementations do not restore the DVO state upon
+ * resume from standby. Thus, this driver has to handle it
+ * instead. The following list contains all registers that
+ * require saving.
+ */
+static const uint16_t backup_addresses[] = {
+	0x11, 0x12,
+	0x18, 0x19, 0x1a, 0x1f,
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
+	0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+	0x8e, 0x8f,
+	0x10		/* this must come last */
+};
+
+
+struct ivch_priv {
+	bool quiet;
+
+	uint16_t width, height;
+
+	/* Register backup */
+
+	uint16_t reg_backup[ARRAY_SIZE(backup_addresses)];
+};
+
+
+static void ivch_dump_regs(struct intel_dvo_device *dvo);
+/**
+ * Reads a register on the ivch.
+ *
+ * Each of the 256 registers are 16 bits long.
+ */
+static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data)
+{
+	struct ivch_priv *priv = dvo->dev_priv;
+	struct i2c_adapter *adapter = dvo->i2c_bus;
+	u8 out_buf[1];
+	u8 in_buf[2];
+
+	struct i2c_msg msgs[] = {
+		{
+			.addr = dvo->slave_addr,
+			.flags = I2C_M_RD,
+			.len = 0,
+		},
+		{
+			.addr = 0,
+			.flags = I2C_M_NOSTART,
+			.len = 1,
+			.buf = out_buf,
+		},
+		{
+			.addr = dvo->slave_addr,
+			.flags = I2C_M_RD | I2C_M_NOSTART,
+			.len = 2,
+			.buf = in_buf,
+		}
+	};
+
+	out_buf[0] = addr;
+
+	if (i2c_transfer(adapter, msgs, 3) == 3) {
+		*data = (in_buf[1] << 8) | in_buf[0];
+		return true;
+	}
+
+	if (!priv->quiet) {
+		DRM_DEBUG_KMS("Unable to read register 0x%02x from "
+				"%s:%02x.\n",
+			  addr, adapter->name, dvo->slave_addr);
+	}
+	return false;
+}
+
+/** Writes a 16-bit register on the ivch */
+static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data)
+{
+	struct ivch_priv *priv = dvo->dev_priv;
+	struct i2c_adapter *adapter = dvo->i2c_bus;
+	u8 out_buf[3];
+	struct i2c_msg msg = {
+		.addr = dvo->slave_addr,
+		.flags = 0,
+		.len = 3,
+		.buf = out_buf,
+	};
+
+	out_buf[0] = addr;
+	out_buf[1] = data & 0xff;
+	out_buf[2] = data >> 8;
+
+	if (i2c_transfer(adapter, &msg, 1) == 1)
+		return true;
+
+	if (!priv->quiet) {
+		DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
+			  addr, adapter->name, dvo->slave_addr);
+	}
+
+	return false;
+}
+
+/** Probes the given bus and slave address for an ivch */
+static bool ivch_init(struct intel_dvo_device *dvo,
+		      struct i2c_adapter *adapter)
+{
+	struct ivch_priv *priv;
+	uint16_t temp;
+	int i;
+
+	priv = kzalloc(sizeof(struct ivch_priv), GFP_KERNEL);
+	if (priv == NULL)
+		return false;
+
+	dvo->i2c_bus = adapter;
+	dvo->dev_priv = priv;
+	priv->quiet = true;
+
+	if (!ivch_read(dvo, VR00, &temp))
+		goto out;
+	priv->quiet = false;
+
+	/* Since the identification bits are probably zeroes, which doesn't seem
+	 * very unique, check that the value in the base address field matches
+	 * the address it's responding on.
+	 */
+	if ((temp & VR00_BASE_ADDRESS_MASK) != dvo->slave_addr) {
+		DRM_DEBUG_KMS("ivch detect failed due to address mismatch "
+			  "(%d vs %d)\n",
+			  (temp & VR00_BASE_ADDRESS_MASK), dvo->slave_addr);
+		goto out;
+	}
+
+	ivch_read(dvo, VR20, &priv->width);
+	ivch_read(dvo, VR21, &priv->height);
+
+	/* Make a backup of the registers to be able to restore them
+	 * upon suspend.
+	 */
+	for (i = 0; i < ARRAY_SIZE(backup_addresses); i++)
+		ivch_read(dvo, backup_addresses[i], priv->reg_backup + i);
+
+	ivch_dump_regs(dvo);
+
+	return true;
+
+out:
+	kfree(priv);
+	return false;
+}
+
+static enum drm_connector_status ivch_detect(struct intel_dvo_device *dvo)
+{
+	return connector_status_connected;
+}
+
+static enum drm_mode_status ivch_mode_valid(struct intel_dvo_device *dvo,
+					    struct drm_display_mode *mode)
+{
+	if (mode->clock > 112000)
+		return MODE_CLOCK_HIGH;
+
+	return MODE_OK;
+}
+
+/* Restore the DVO registers after a resume
+ * from RAM. Registers have been saved during
+ * the initialization.
+ */
+static void ivch_reset(struct intel_dvo_device *dvo)
+{
+	struct ivch_priv *priv = dvo->dev_priv;
+	int i;
+
+	DRM_DEBUG_KMS("Resetting the IVCH registers\n");
+
+	ivch_write(dvo, VR10, 0x0000);
+
+	for (i = 0; i < ARRAY_SIZE(backup_addresses); i++)
+		ivch_write(dvo, backup_addresses[i], priv->reg_backup[i]);
+}
+
+/** Sets the power state of the panel connected to the ivch */
+static void ivch_dpms(struct intel_dvo_device *dvo, bool enable)
+{
+	int i;
+	uint16_t vr01, vr30, backlight;
+
+	ivch_reset(dvo);
+
+	/* Set the new power state of the panel. */
+	if (!ivch_read(dvo, VR01, &vr01))
+		return;
+
+	if (enable)
+		backlight = 1;
+	else
+		backlight = 0;
+
+	ivch_write(dvo, VR80, backlight);
+
+	if (enable)
+		vr01 |= VR01_LCD_ENABLE | VR01_DVO_ENABLE;
+	else
+		vr01 &= ~(VR01_LCD_ENABLE | VR01_DVO_ENABLE);
+
+	ivch_write(dvo, VR01, vr01);
+
+	/* Wait for the panel to make its state transition */
+	for (i = 0; i < 100; i++) {
+		if (!ivch_read(dvo, VR30, &vr30))
+			break;
+
+		if (((vr30 & VR30_PANEL_ON) != 0) == enable)
+			break;
+		udelay(1000);
+	}
+	/* wait some more; vch may fail to resync sometimes without this */
+	udelay(16 * 1000);
+}
+
+static bool ivch_get_hw_state(struct intel_dvo_device *dvo)
+{
+	uint16_t vr01;
+
+	ivch_reset(dvo);
+
+	/* Set the new power state of the panel. */
+	if (!ivch_read(dvo, VR01, &vr01))
+		return false;
+
+	if (vr01 & VR01_LCD_ENABLE)
+		return true;
+	else
+		return false;
+}
+
+static void ivch_mode_set(struct intel_dvo_device *dvo,
+			  const struct drm_display_mode *mode,
+			  const struct drm_display_mode *adjusted_mode)
+{
+	struct ivch_priv *priv = dvo->dev_priv;
+	uint16_t vr40 = 0;
+	uint16_t vr01 = 0;
+	uint16_t vr10;
+
+	ivch_reset(dvo);
+
+	vr10 = priv->reg_backup[ARRAY_SIZE(backup_addresses) - 1];
+
+	/* Enable dithering for 18 bpp pipelines */
+	vr10 &= VR10_INTERFACE_DEPTH_MASK;
+	if (vr10 == VR10_INTERFACE_2X18 || vr10 == VR10_INTERFACE_1X18)
+		vr01 = VR01_DITHER_ENABLE;
+
+	vr40 = (VR40_STALL_ENABLE | VR40_VERTICAL_INTERP_ENABLE |
+		VR40_HORIZONTAL_INTERP_ENABLE);
+
+	if (mode->hdisplay != adjusted_mode->crtc_hdisplay ||
+	    mode->vdisplay != adjusted_mode->crtc_vdisplay) {
+		uint16_t x_ratio, y_ratio;
+
+		vr01 |= VR01_PANEL_FIT_ENABLE;
+		vr40 |= VR40_CLOCK_GATING_ENABLE;
+		x_ratio = (((mode->hdisplay - 1) << 16) /
+			   (adjusted_mode->crtc_hdisplay - 1)) >> 2;
+		y_ratio = (((mode->vdisplay - 1) << 16) /
+			   (adjusted_mode->crtc_vdisplay - 1)) >> 2;
+		ivch_write(dvo, VR42, x_ratio);
+		ivch_write(dvo, VR41, y_ratio);
+	} else {
+		vr01 &= ~VR01_PANEL_FIT_ENABLE;
+		vr40 &= ~VR40_CLOCK_GATING_ENABLE;
+	}
+	vr40 &= ~VR40_AUTO_RATIO_ENABLE;
+
+	ivch_write(dvo, VR01, vr01);
+	ivch_write(dvo, VR40, vr40);
+}
+
+static void ivch_dump_regs(struct intel_dvo_device *dvo)
+{
+	uint16_t val;
+
+	ivch_read(dvo, VR00, &val);
+	DRM_DEBUG_KMS("VR00: 0x%04x\n", val);
+	ivch_read(dvo, VR01, &val);
+	DRM_DEBUG_KMS("VR01: 0x%04x\n", val);
+	ivch_read(dvo, VR10, &val);
+	DRM_DEBUG_KMS("VR10: 0x%04x\n", val);
+	ivch_read(dvo, VR30, &val);
+	DRM_DEBUG_KMS("VR30: 0x%04x\n", val);
+	ivch_read(dvo, VR40, &val);
+	DRM_DEBUG_KMS("VR40: 0x%04x\n", val);
+
+	/* GPIO registers */
+	ivch_read(dvo, VR80, &val);
+	DRM_DEBUG_KMS("VR80: 0x%04x\n", val);
+	ivch_read(dvo, VR81, &val);
+	DRM_DEBUG_KMS("VR81: 0x%04x\n", val);
+	ivch_read(dvo, VR82, &val);
+	DRM_DEBUG_KMS("VR82: 0x%04x\n", val);
+	ivch_read(dvo, VR83, &val);
+	DRM_DEBUG_KMS("VR83: 0x%04x\n", val);
+	ivch_read(dvo, VR84, &val);
+	DRM_DEBUG_KMS("VR84: 0x%04x\n", val);
+	ivch_read(dvo, VR85, &val);
+	DRM_DEBUG_KMS("VR85: 0x%04x\n", val);
+	ivch_read(dvo, VR86, &val);
+	DRM_DEBUG_KMS("VR86: 0x%04x\n", val);
+	ivch_read(dvo, VR87, &val);
+	DRM_DEBUG_KMS("VR87: 0x%04x\n", val);
+	ivch_read(dvo, VR88, &val);
+	DRM_DEBUG_KMS("VR88: 0x%04x\n", val);
+
+	/* Scratch register 0 - AIM Panel type */
+	ivch_read(dvo, VR8E, &val);
+	DRM_DEBUG_KMS("VR8E: 0x%04x\n", val);
+
+	/* Scratch register 1 - Status register */
+	ivch_read(dvo, VR8F, &val);
+	DRM_DEBUG_KMS("VR8F: 0x%04x\n", val);
+}
+
+static void ivch_destroy(struct intel_dvo_device *dvo)
+{
+	struct ivch_priv *priv = dvo->dev_priv;
+
+	if (priv) {
+		kfree(priv);
+		dvo->dev_priv = NULL;
+	}
+}
+
+struct intel_dvo_dev_ops ivch_ops = {
+	.init = ivch_init,
+	.dpms = ivch_dpms,
+	.get_hw_state = ivch_get_hw_state,
+	.mode_valid = ivch_mode_valid,
+	.mode_set = ivch_mode_set,
+	.detect = ivch_detect,
+	.dump_regs = ivch_dump_regs,
+	.destroy = ivch_destroy,
+};
diff --git a/drivers/gpu/drm/i915/dvo_ns2501.c b/drivers/gpu/drm/i915/dvo_ns2501.c
new file mode 100644
index 0000000..063859f
--- /dev/null
+++ b/drivers/gpu/drm/i915/dvo_ns2501.c
@@ -0,0 +1,709 @@
+/*
+ *
+ * Copyright (c) 2012 Gilles Dartiguelongue, Thomas Richter
+ *
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "dvo.h"
+#include "i915_reg.h"
+#include "i915_drv.h"
+
+#define NS2501_VID 0x1305
+#define NS2501_DID 0x6726
+
+#define NS2501_VID_LO 0x00
+#define NS2501_VID_HI 0x01
+#define NS2501_DID_LO 0x02
+#define NS2501_DID_HI 0x03
+#define NS2501_REV 0x04
+#define NS2501_RSVD 0x05
+#define NS2501_FREQ_LO 0x06
+#define NS2501_FREQ_HI 0x07
+
+#define NS2501_REG8 0x08
+#define NS2501_8_VEN (1<<5)
+#define NS2501_8_HEN (1<<4)
+#define NS2501_8_DSEL (1<<3)
+#define NS2501_8_BPAS (1<<2)
+#define NS2501_8_RSVD (1<<1)
+#define NS2501_8_PD (1<<0)
+
+#define NS2501_REG9 0x09
+#define NS2501_9_VLOW (1<<7)
+#define NS2501_9_MSEL_MASK (0x7<<4)
+#define NS2501_9_TSEL (1<<3)
+#define NS2501_9_RSEN (1<<2)
+#define NS2501_9_RSVD (1<<1)
+#define NS2501_9_MDI (1<<0)
+
+#define NS2501_REGC 0x0c
+
+/*
+ * The following registers are not part of the official datasheet
+ * and are the result of reverse engineering.
+ */
+
+/*
+ * Register c0 controls how the DVO synchronizes with
+ * its input.
+ */
+#define NS2501_REGC0 0xc0
+#define NS2501_C0_ENABLE (1<<0)	/* enable the DVO sync in general */
+#define NS2501_C0_HSYNC (1<<1)	/* synchronize horizontal with input */
+#define NS2501_C0_VSYNC (1<<2)	/* synchronize vertical with input */
+#define NS2501_C0_RESET (1<<7)	/* reset the synchronization flip/flops */
+
+/*
+ * Register 41 is somehow related to the sync register and sync
+ * configuration. It should be 0x32 whenever regC0 is 0x05 (hsync off)
+ * and 0x00 otherwise.
+ */
+#define NS2501_REG41 0x41
+
+/*
+ * this register controls the dithering of the DVO
+ * One bit enables it, the other define the dithering depth.
+ * The higher the value, the lower the dithering depth.
+ */
+#define NS2501_F9_REG 0xf9
+#define NS2501_F9_ENABLE (1<<0)		/* if set, dithering is enabled */
+#define NS2501_F9_DITHER_MASK (0x7f<<1)	/* controls the dither depth */
+#define NS2501_F9_DITHER_SHIFT 1	/* shifts the dither mask */
+
+/*
+ * PLL configuration register. This is a pair of registers,
+ * one single byte register at 1B, and a pair at 1C,1D.
+ * These registers are counters/dividers.
+ */
+#define NS2501_REG1B 0x1b /* one byte PLL control register */
+#define NS2501_REG1C 0x1c /* low-part of the second register */
+#define NS2501_REG1D 0x1d /* high-part of the second register */
+
+/*
+ * Scaler control registers. Horizontal at b8,b9,
+ * vertical at 10,11. The scale factor is computed as
+ * 2^16/control-value. The low-byte comes first.
+ */
+#define NS2501_REG10 0x10 /* low-byte vertical scaler */
+#define NS2501_REG11 0x11 /* high-byte vertical scaler */
+#define NS2501_REGB8 0xb8 /* low-byte horizontal scaler */
+#define NS2501_REGB9 0xb9 /* high-byte horizontal scaler */
+
+/*
+ * Display window definition. This consists of four registers
+ * per dimension. One register pair defines the start of the
+ * display, one the end.
+ * As far as I understand, this defines the window within which
+ * the scaler samples the input.
+ */
+#define NS2501_REGC1 0xc1 /* low-byte horizontal display start */
+#define NS2501_REGC2 0xc2 /* high-byte horizontal display start */
+#define NS2501_REGC3 0xc3 /* low-byte horizontal display stop */
+#define NS2501_REGC4 0xc4 /* high-byte horizontal display stop */
+#define NS2501_REGC5 0xc5 /* low-byte vertical display start */
+#define NS2501_REGC6 0xc6 /* high-byte vertical display start */
+#define NS2501_REGC7 0xc7 /* low-byte vertical display stop */
+#define NS2501_REGC8 0xc8 /* high-byte vertical display stop */
+
+/*
+ * The following register pair seems to define the start of
+ * the vertical sync. If automatic syncing is enabled, and the
+ * register value defines a sync pulse that is later than the
+ * incoming sync, then the register value is ignored and the
+ * external hsync triggers the synchronization.
+ */
+#define NS2501_REG80 0x80 /* low-byte vsync-start */
+#define NS2501_REG81 0x81 /* high-byte vsync-start */
+
+/*
+ * The following register pair seems to define the total number
+ * of lines created at the output side of the scaler.
+ * This is again a low-high register pair.
+ */
+#define NS2501_REG82 0x82 /* output display height, low byte */
+#define NS2501_REG83 0x83 /* output display height, high byte */
+
+/*
+ * The following registers define the end of the front-porch
+ * in horizontal and vertical position and hence allow to shift
+ * the image left/right or up/down.
+ */
+#define NS2501_REG98 0x98 /* horizontal start of display + 256, low */
+#define NS2501_REG99 0x99 /* horizontal start of display + 256, high */
+#define NS2501_REG8E 0x8e /* vertical start of the display, low byte */
+#define NS2501_REG8F 0x8f /* vertical start of the display, high byte */
+
+/*
+ * The following register pair control the function of the
+ * backlight and the DVO output. To enable the corresponding
+ * function, the corresponding bit must be set in both registers.
+ */
+#define NS2501_REG34 0x34 /* DVO enable functions, first register */
+#define NS2501_REG35 0x35 /* DVO enable functions, second register */
+#define NS2501_34_ENABLE_OUTPUT (1<<0) /* enable DVO output */
+#define NS2501_34_ENABLE_BACKLIGHT (1<<1) /* enable backlight */
+
+/*
+ * Registers 9C and 9D define the vertical output offset
+ * of the visible region.
+ */
+#define NS2501_REG9C 0x9c
+#define NS2501_REG9D 0x9d
+
+/*
+ * The register 9F defines the dithering. This requires the
+ * scaler to be ON. Bit 0 enables dithering, the remaining
+ * bits control the depth of the dither. The higher the value,
+ * the LOWER the dithering amplitude. A good value seems to be
+ * 15 (total register value).
+ */
+#define NS2501_REGF9 0xf9
+#define NS2501_F9_ENABLE_DITHER (1<<0) /* enable dithering */
+#define NS2501_F9_DITHER_MASK (0x7f<<1) /* dither masking */
+#define NS2501_F9_DITHER_SHIFT 1	/* upshift of the dither mask */
+
+enum {
+	MODE_640x480,
+	MODE_800x600,
+	MODE_1024x768,
+};
+
+struct ns2501_reg {
+	 uint8_t offset;
+	 uint8_t value;
+};
+
+/*
+ * The following structure keeps the complete configuration of
+ * the DVO, given a specific output configuration.
+ * This is pretty much guess-work from reverse-engineering, so
+ * read all this with a grain of salt.
+ */
+struct ns2501_configuration {
+	uint8_t sync;		/* configuration of the C0 register */
+	uint8_t conf;		/* configuration register 8 */
+	uint8_t syncb;		/* configuration register 41 */
+	uint8_t	dither;		/* configuration of the dithering */
+	uint8_t pll_a;		/* PLL configuration, register A, 1B */
+	uint16_t pll_b;		/* PLL configuration, register B, 1C/1D */
+	uint16_t hstart;	/* horizontal start, registers C1/C2 */
+	uint16_t hstop;		/* horizontal total, registers C3/C4 */
+	uint16_t vstart;	/* vertical start, registers C5/C6 */
+	uint16_t vstop;		/* vertical total, registers C7/C8 */
+	uint16_t vsync;         /* manual vertical sync start, 80/81 */
+	uint16_t vtotal;        /* number of lines generated, 82/83 */
+	uint16_t hpos;		/* horizontal position + 256, 98/99  */
+	uint16_t vpos;		/* vertical position, 8e/8f */
+	uint16_t voffs;		/* vertical output offset, 9c/9d */
+	uint16_t hscale;	/* horizontal scaling factor, b8/b9 */
+	uint16_t vscale;	/* vertical scaling factor, 10/11 */
+};
+
+/*
+ * DVO configuration values, partially based on what the BIOS
+ * of the Fujitsu Lifebook S6010 writes into registers,
+ * partially found by manual tweaking. These configurations assume
+ * a 1024x768 panel.
+ */
+static const struct ns2501_configuration ns2501_modes[] = {
+	[MODE_640x480] = {
+		.sync	= NS2501_C0_ENABLE | NS2501_C0_VSYNC,
+		.conf	= NS2501_8_VEN | NS2501_8_HEN | NS2501_8_PD,
+		.syncb	= 0x32,
+		.dither	= 0x0f,
+		.pll_a	= 17,
+		.pll_b	= 852,
+		.hstart	= 144,
+		.hstop	= 783,
+		.vstart	= 22,
+		.vstop	= 514,
+		.vsync	= 2047, /* actually, ignored with this config */
+		.vtotal	= 1341,
+		.hpos	= 0,
+		.vpos	= 16,
+		.voffs	= 36,
+		.hscale	= 40960,
+		.vscale	= 40960
+	},
+	[MODE_800x600] = {
+		.sync	= NS2501_C0_ENABLE |
+			  NS2501_C0_HSYNC | NS2501_C0_VSYNC,
+		.conf   = NS2501_8_VEN | NS2501_8_HEN | NS2501_8_PD,
+		.syncb	= 0x00,
+		.dither	= 0x0f,
+		.pll_a	= 25,
+		.pll_b	= 612,
+		.hstart	= 215,
+		.hstop	= 1016,
+		.vstart	= 26,
+		.vstop	= 627,
+		.vsync	= 807,
+		.vtotal	= 1341,
+		.hpos	= 0,
+		.vpos	= 4,
+		.voffs	= 35,
+		.hscale	= 51248,
+		.vscale	= 51232
+	},
+	[MODE_1024x768] = {
+		.sync	= NS2501_C0_ENABLE | NS2501_C0_VSYNC,
+		.conf   = NS2501_8_VEN | NS2501_8_HEN | NS2501_8_PD,
+		.syncb	= 0x32,
+		.dither	= 0x0f,
+		.pll_a	= 11,
+		.pll_b	= 1350,
+		.hstart	= 276,
+		.hstop	= 1299,
+		.vstart	= 15,
+		.vstop	= 1056,
+		.vsync	= 2047,
+		.vtotal	= 1341,
+		.hpos	= 0,
+		.vpos	= 7,
+		.voffs	= 27,
+		.hscale	= 65535,
+		.vscale	= 65535
+	}
+};
+
+/*
+ * Other configuration values left by the BIOS of the
+ * Fujitsu S6010 in the DVO control registers. Their
+ * value does not depend on the BIOS and their meaning
+ * is unknown.
+ */
+
+static const struct ns2501_reg mode_agnostic_values[] = {
+	/* 08 is mode specific */
+	[0] = { .offset = 0x0a, .value = 0x81, },
+	/* 10,11 are part of the mode specific configuration */
+	[1] = { .offset = 0x12, .value = 0x02, },
+	[2] = { .offset = 0x18, .value = 0x07, },
+	[3] = { .offset = 0x19, .value = 0x00, },
+	[4] = { .offset = 0x1a, .value = 0x00, }, /* PLL?, ignored */
+	/* 1b,1c,1d are part of the mode specific configuration */
+	[5] = { .offset = 0x1e, .value = 0x02, },
+	[6] = { .offset = 0x1f, .value = 0x40, },
+	[7] = { .offset = 0x20, .value = 0x00, },
+	[8] = { .offset = 0x21, .value = 0x00, },
+	[9] = { .offset = 0x22, .value = 0x00, },
+	[10] = { .offset = 0x23, .value = 0x00, },
+	[11] = { .offset = 0x24, .value = 0x00, },
+	[12] = { .offset = 0x25, .value = 0x00, },
+	[13] = { .offset = 0x26, .value = 0x00, },
+	[14] = { .offset = 0x27, .value = 0x00, },
+	[15] = { .offset = 0x7e, .value = 0x18, },
+	/* 80-84 are part of the mode-specific configuration */
+	[16] = { .offset = 0x84, .value = 0x00, },
+	[17] = { .offset = 0x85, .value = 0x00, },
+	[18] = { .offset = 0x86, .value = 0x00, },
+	[19] = { .offset = 0x87, .value = 0x00, },
+	[20] = { .offset = 0x88, .value = 0x00, },
+	[21] = { .offset = 0x89, .value = 0x00, },
+	[22] = { .offset = 0x8a, .value = 0x00, },
+	[23] = { .offset = 0x8b, .value = 0x00, },
+	[24] = { .offset = 0x8c, .value = 0x10, },
+	[25] = { .offset = 0x8d, .value = 0x02, },
+	/* 8e,8f are part of the mode-specific configuration */
+	[26] = { .offset = 0x90, .value = 0xff, },
+	[27] = { .offset = 0x91, .value = 0x07, },
+	[28] = { .offset = 0x92, .value = 0xa0, },
+	[29] = { .offset = 0x93, .value = 0x02, },
+	[30] = { .offset = 0x94, .value = 0x00, },
+	[31] = { .offset = 0x95, .value = 0x00, },
+	[32] = { .offset = 0x96, .value = 0x05, },
+	[33] = { .offset = 0x97, .value = 0x00, },
+	/* 98,99 are part of the mode-specific configuration */
+	[34] = { .offset = 0x9a, .value = 0x88, },
+	[35] = { .offset = 0x9b, .value = 0x00, },
+	/* 9c,9d are part of the mode-specific configuration */
+	[36] = { .offset = 0x9e, .value = 0x25, },
+	[37] = { .offset = 0x9f, .value = 0x03, },
+	[38] = { .offset = 0xa0, .value = 0x28, },
+	[39] = { .offset = 0xa1, .value = 0x01, },
+	[40] = { .offset = 0xa2, .value = 0x28, },
+	[41] = { .offset = 0xa3, .value = 0x05, },
+	/* register 0xa4 is mode specific, but 0x80..0x84 works always */
+	[42] = { .offset = 0xa4, .value = 0x84, },
+	[43] = { .offset = 0xa5, .value = 0x00, },
+	[44] = { .offset = 0xa6, .value = 0x00, },
+	[45] = { .offset = 0xa7, .value = 0x00, },
+	[46] = { .offset = 0xa8, .value = 0x00, },
+	/* 0xa9 to 0xab are mode specific, but have no visible effect */
+	[47] = { .offset = 0xa9, .value = 0x04, },
+	[48] = { .offset = 0xaa, .value = 0x70, },
+	[49] = { .offset = 0xab, .value = 0x4f, },
+	[50] = { .offset = 0xac, .value = 0x00, },
+	[51] = { .offset = 0xad, .value = 0x00, },
+	[52] = { .offset = 0xb6, .value = 0x09, },
+	[53] = { .offset = 0xb7, .value = 0x03, },
+	/* b8,b9 are part of the mode-specific configuration */
+	[54] = { .offset = 0xba, .value = 0x00, },
+	[55] = { .offset = 0xbb, .value = 0x20, },
+	[56] = { .offset = 0xf3, .value = 0x90, },
+	[57] = { .offset = 0xf4, .value = 0x00, },
+	[58] = { .offset = 0xf7, .value = 0x88, },
+	/* f8 is mode specific, but the value does not matter */
+	[59] = { .offset = 0xf8, .value = 0x0a, },
+	[60] = { .offset = 0xf9, .value = 0x00, }
+};
+
+static const struct ns2501_reg regs_init[] = {
+	[0] = { .offset = 0x35, .value = 0xff, },
+	[1] = { .offset = 0x34, .value = 0x00, },
+	[2] = { .offset = 0x08, .value = 0x30, },
+};
+
+struct ns2501_priv {
+	bool quiet;
+	const struct ns2501_configuration *conf;
+};
+
+#define NSPTR(d) ((NS2501Ptr)(d->DriverPrivate.ptr))
+
+/*
+** Read a register from the ns2501.
+** Returns true if successful, false otherwise.
+** If it returns false, it might be wise to enable the
+** DVO with the above function.
+*/
+static bool ns2501_readb(struct intel_dvo_device *dvo, int addr, uint8_t * ch)
+{
+	struct ns2501_priv *ns = dvo->dev_priv;
+	struct i2c_adapter *adapter = dvo->i2c_bus;
+	u8 out_buf[2];
+	u8 in_buf[2];
+
+	struct i2c_msg msgs[] = {
+		{
+		 .addr = dvo->slave_addr,
+		 .flags = 0,
+		 .len = 1,
+		 .buf = out_buf,
+		 },
+		{
+		 .addr = dvo->slave_addr,
+		 .flags = I2C_M_RD,
+		 .len = 1,
+		 .buf = in_buf,
+		 }
+	};
+
+	out_buf[0] = addr;
+	out_buf[1] = 0;
+
+	if (i2c_transfer(adapter, msgs, 2) == 2) {
+		*ch = in_buf[0];
+		return true;
+	}
+
+	if (!ns->quiet) {
+		DRM_DEBUG_KMS
+		    ("Unable to read register 0x%02x from %s:0x%02x.\n", addr,
+		     adapter->name, dvo->slave_addr);
+	}
+
+	return false;
+}
+
+/*
+** Write a register to the ns2501.
+** Returns true if successful, false otherwise.
+** If it returns false, it might be wise to enable the
+** DVO with the above function.
+*/
+static bool ns2501_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
+{
+	struct ns2501_priv *ns = dvo->dev_priv;
+	struct i2c_adapter *adapter = dvo->i2c_bus;
+	uint8_t out_buf[2];
+
+	struct i2c_msg msg = {
+		.addr = dvo->slave_addr,
+		.flags = 0,
+		.len = 2,
+		.buf = out_buf,
+	};
+
+	out_buf[0] = addr;
+	out_buf[1] = ch;
+
+	if (i2c_transfer(adapter, &msg, 1) == 1) {
+		return true;
+	}
+
+	if (!ns->quiet) {
+		DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d\n",
+			      addr, adapter->name, dvo->slave_addr);
+	}
+
+	return false;
+}
+
+/* National Semiconductor 2501 driver for chip on i2c bus
+ * scan for the chip on the bus.
+ * Hope the VBIOS initialized the PLL correctly so we can
+ * talk to it. If not, it will not be seen and not detected.
+ * Bummer!
+ */
+static bool ns2501_init(struct intel_dvo_device *dvo,
+			struct i2c_adapter *adapter)
+{
+	/* this will detect the NS2501 chip on the specified i2c bus */
+	struct ns2501_priv *ns;
+	unsigned char ch;
+
+	ns = kzalloc(sizeof(struct ns2501_priv), GFP_KERNEL);
+	if (ns == NULL)
+		return false;
+
+	dvo->i2c_bus = adapter;
+	dvo->dev_priv = ns;
+	ns->quiet = true;
+
+	if (!ns2501_readb(dvo, NS2501_VID_LO, &ch))
+		goto out;
+
+	if (ch != (NS2501_VID & 0xff)) {
+		DRM_DEBUG_KMS("ns2501 not detected got %d: from %s Slave %d.\n",
+			      ch, adapter->name, dvo->slave_addr);
+		goto out;
+	}
+
+	if (!ns2501_readb(dvo, NS2501_DID_LO, &ch))
+		goto out;
+
+	if (ch != (NS2501_DID & 0xff)) {
+		DRM_DEBUG_KMS("ns2501 not detected got %d: from %s Slave %d.\n",
+			      ch, adapter->name, dvo->slave_addr);
+		goto out;
+	}
+	ns->quiet = false;
+
+	DRM_DEBUG_KMS("init ns2501 dvo controller successfully!\n");
+
+	return true;
+
+out:
+	kfree(ns);
+	return false;
+}
+
+static enum drm_connector_status ns2501_detect(struct intel_dvo_device *dvo)
+{
+	/*
+	 * This is a Laptop display, it doesn't have hotplugging.
+	 * Even if not, the detection bit of the 2501 is unreliable as
+	 * it only works for some display types.
+	 * It is even more unreliable as the PLL must be active for
+	 * allowing reading from the chiop.
+	 */
+	return connector_status_connected;
+}
+
+static enum drm_mode_status ns2501_mode_valid(struct intel_dvo_device *dvo,
+					      struct drm_display_mode *mode)
+{
+	DRM_DEBUG_KMS
+	    ("is mode valid (hdisplay=%d,htotal=%d,vdisplay=%d,vtotal=%d)\n",
+	     mode->hdisplay, mode->htotal, mode->vdisplay, mode->vtotal);
+
+	/*
+	 * Currently, these are all the modes I have data from.
+	 * More might exist. Unclear how to find the native resolution
+	 * of the panel in here so we could always accept it
+	 * by disabling the scaler.
+	 */
+	if ((mode->hdisplay == 640 && mode->vdisplay == 480 && mode->clock == 25175) ||
+	    (mode->hdisplay == 800 && mode->vdisplay == 600 && mode->clock == 40000) ||
+	    (mode->hdisplay == 1024 && mode->vdisplay == 768 && mode->clock == 65000)) {
+		return MODE_OK;
+	} else {
+		return MODE_ONE_SIZE;	/* Is this a reasonable error? */
+	}
+}
+
+static void ns2501_mode_set(struct intel_dvo_device *dvo,
+			    const struct drm_display_mode *mode,
+			    const struct drm_display_mode *adjusted_mode)
+{
+	const struct ns2501_configuration *conf;
+	struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
+	int mode_idx, i;
+
+	DRM_DEBUG_KMS
+	    ("set mode (hdisplay=%d,htotal=%d,vdisplay=%d,vtotal=%d).\n",
+	     mode->hdisplay, mode->htotal, mode->vdisplay, mode->vtotal);
+
+	DRM_DEBUG_KMS("Detailed requested mode settings are:\n"
+			"clock		: %d kHz\n"
+			"hdisplay	: %d\n"
+			"hblank start	: %d\n"
+			"hblank end	: %d\n"
+			"hsync start	: %d\n"
+			"hsync end	: %d\n"
+			"htotal		: %d\n"
+			"hskew		: %d\n"
+			"vdisplay	: %d\n"
+			"vblank start	: %d\n"
+			"hblank end	: %d\n"
+			"vsync start	: %d\n"
+			"vsync end	: %d\n"
+			"vtotal		: %d\n",
+			adjusted_mode->crtc_clock,
+			adjusted_mode->crtc_hdisplay,
+			adjusted_mode->crtc_hblank_start,
+			adjusted_mode->crtc_hblank_end,
+			adjusted_mode->crtc_hsync_start,
+			adjusted_mode->crtc_hsync_end,
+			adjusted_mode->crtc_htotal,
+			adjusted_mode->crtc_hskew,
+			adjusted_mode->crtc_vdisplay,
+			adjusted_mode->crtc_vblank_start,
+			adjusted_mode->crtc_vblank_end,
+			adjusted_mode->crtc_vsync_start,
+			adjusted_mode->crtc_vsync_end,
+			adjusted_mode->crtc_vtotal);
+
+	if (mode->hdisplay == 640 && mode->vdisplay == 480)
+		mode_idx = MODE_640x480;
+	else if (mode->hdisplay == 800 && mode->vdisplay == 600)
+		mode_idx = MODE_800x600;
+	else if (mode->hdisplay == 1024 && mode->vdisplay == 768)
+		mode_idx = MODE_1024x768;
+	else
+		return;
+
+	/* Hopefully doing it every time won't hurt... */
+	for (i = 0; i < ARRAY_SIZE(regs_init); i++)
+		ns2501_writeb(dvo, regs_init[i].offset, regs_init[i].value);
+
+	/* Write the mode-agnostic values */
+	for (i = 0; i < ARRAY_SIZE(mode_agnostic_values); i++)
+		ns2501_writeb(dvo, mode_agnostic_values[i].offset,
+				mode_agnostic_values[i].value);
+
+	/* Write now the mode-specific configuration */
+	conf = ns2501_modes + mode_idx;
+	ns->conf = conf;
+
+	ns2501_writeb(dvo, NS2501_REG8, conf->conf);
+	ns2501_writeb(dvo, NS2501_REG1B, conf->pll_a);
+	ns2501_writeb(dvo, NS2501_REG1C, conf->pll_b & 0xff);
+	ns2501_writeb(dvo, NS2501_REG1D, conf->pll_b >> 8);
+	ns2501_writeb(dvo, NS2501_REGC1, conf->hstart & 0xff);
+	ns2501_writeb(dvo, NS2501_REGC2, conf->hstart >> 8);
+	ns2501_writeb(dvo, NS2501_REGC3, conf->hstop & 0xff);
+	ns2501_writeb(dvo, NS2501_REGC4, conf->hstop >> 8);
+	ns2501_writeb(dvo, NS2501_REGC5, conf->vstart & 0xff);
+	ns2501_writeb(dvo, NS2501_REGC6, conf->vstart >> 8);
+	ns2501_writeb(dvo, NS2501_REGC7, conf->vstop & 0xff);
+	ns2501_writeb(dvo, NS2501_REGC8, conf->vstop >> 8);
+	ns2501_writeb(dvo, NS2501_REG80, conf->vsync & 0xff);
+	ns2501_writeb(dvo, NS2501_REG81, conf->vsync >> 8);
+	ns2501_writeb(dvo, NS2501_REG82, conf->vtotal & 0xff);
+	ns2501_writeb(dvo, NS2501_REG83, conf->vtotal >> 8);
+	ns2501_writeb(dvo, NS2501_REG98, conf->hpos & 0xff);
+	ns2501_writeb(dvo, NS2501_REG99, conf->hpos >> 8);
+	ns2501_writeb(dvo, NS2501_REG8E, conf->vpos & 0xff);
+	ns2501_writeb(dvo, NS2501_REG8F, conf->vpos >> 8);
+	ns2501_writeb(dvo, NS2501_REG9C, conf->voffs & 0xff);
+	ns2501_writeb(dvo, NS2501_REG9D, conf->voffs >> 8);
+	ns2501_writeb(dvo, NS2501_REGB8, conf->hscale & 0xff);
+	ns2501_writeb(dvo, NS2501_REGB9, conf->hscale >> 8);
+	ns2501_writeb(dvo, NS2501_REG10, conf->vscale & 0xff);
+	ns2501_writeb(dvo, NS2501_REG11, conf->vscale >> 8);
+	ns2501_writeb(dvo, NS2501_REGF9, conf->dither);
+	ns2501_writeb(dvo, NS2501_REG41, conf->syncb);
+	ns2501_writeb(dvo, NS2501_REGC0, conf->sync);
+}
+
+/* set the NS2501 power state */
+static bool ns2501_get_hw_state(struct intel_dvo_device *dvo)
+{
+	unsigned char ch;
+
+	if (!ns2501_readb(dvo, NS2501_REG8, &ch))
+		return false;
+
+	return ch & NS2501_8_PD;
+}
+
+/* set the NS2501 power state */
+static void ns2501_dpms(struct intel_dvo_device *dvo, bool enable)
+{
+	struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
+
+	DRM_DEBUG_KMS("Trying set the dpms of the DVO to %i\n", enable);
+
+	if (enable) {
+		ns2501_writeb(dvo, NS2501_REGC0, ns->conf->sync | 0x08);
+
+		ns2501_writeb(dvo, NS2501_REG41, ns->conf->syncb);
+
+		ns2501_writeb(dvo, NS2501_REG34, NS2501_34_ENABLE_OUTPUT);
+		msleep(15);
+
+		ns2501_writeb(dvo, NS2501_REG8,
+				ns->conf->conf | NS2501_8_BPAS);
+		if (!(ns->conf->conf & NS2501_8_BPAS))
+			ns2501_writeb(dvo, NS2501_REG8, ns->conf->conf);
+		msleep(200);
+
+		ns2501_writeb(dvo, NS2501_REG34,
+			NS2501_34_ENABLE_OUTPUT | NS2501_34_ENABLE_BACKLIGHT);
+
+		ns2501_writeb(dvo, NS2501_REGC0, ns->conf->sync);
+	} else {
+		ns2501_writeb(dvo, NS2501_REG34, NS2501_34_ENABLE_OUTPUT);
+		msleep(200);
+
+		ns2501_writeb(dvo, NS2501_REG8, NS2501_8_VEN | NS2501_8_HEN |
+				NS2501_8_BPAS);
+		msleep(15);
+
+		ns2501_writeb(dvo, NS2501_REG34, 0x00);
+	}
+}
+
+static void ns2501_destroy(struct intel_dvo_device *dvo)
+{
+	struct ns2501_priv *ns = dvo->dev_priv;
+
+	if (ns) {
+		kfree(ns);
+		dvo->dev_priv = NULL;
+	}
+}
+
+struct intel_dvo_dev_ops ns2501_ops = {
+	.init = ns2501_init,
+	.detect = ns2501_detect,
+	.mode_valid = ns2501_mode_valid,
+	.mode_set = ns2501_mode_set,
+	.dpms = ns2501_dpms,
+	.get_hw_state = ns2501_get_hw_state,
+	.destroy = ns2501_destroy,
+};
diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c
new file mode 100644
index 0000000..26f13eb
--- /dev/null
+++ b/drivers/gpu/drm/i915/dvo_sil164.c
@@ -0,0 +1,279 @@
+/**************************************************************************
+
+Copyright © 2006 Dave Airlie
+
+All Rights Reserved.
+
+Permission is hereby granted, free of charge, to any person obtaining a
+copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sub license, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+**************************************************************************/
+
+#include "dvo.h"
+
+#define SIL164_VID 0x0001
+#define SIL164_DID 0x0006
+
+#define SIL164_VID_LO 0x00
+#define SIL164_VID_HI 0x01
+#define SIL164_DID_LO 0x02
+#define SIL164_DID_HI 0x03
+#define SIL164_REV    0x04
+#define SIL164_RSVD   0x05
+#define SIL164_FREQ_LO 0x06
+#define SIL164_FREQ_HI 0x07
+
+#define SIL164_REG8 0x08
+#define SIL164_8_VEN (1<<5)
+#define SIL164_8_HEN (1<<4)
+#define SIL164_8_DSEL (1<<3)
+#define SIL164_8_BSEL (1<<2)
+#define SIL164_8_EDGE (1<<1)
+#define SIL164_8_PD   (1<<0)
+
+#define SIL164_REG9 0x09
+#define SIL164_9_VLOW (1<<7)
+#define SIL164_9_MSEL_MASK (0x7<<4)
+#define SIL164_9_TSEL (1<<3)
+#define SIL164_9_RSEN (1<<2)
+#define SIL164_9_HTPLG (1<<1)
+#define SIL164_9_MDI (1<<0)
+
+#define SIL164_REGC 0x0c
+
+struct sil164_priv {
+	//I2CDevRec d;
+	bool quiet;
+};
+
+#define SILPTR(d) ((SIL164Ptr)(d->DriverPrivate.ptr))
+
+static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
+{
+	struct sil164_priv *sil = dvo->dev_priv;
+	struct i2c_adapter *adapter = dvo->i2c_bus;
+	u8 out_buf[2];
+	u8 in_buf[2];
+
+	struct i2c_msg msgs[] = {
+		{
+			.addr = dvo->slave_addr,
+			.flags = 0,
+			.len = 1,
+			.buf = out_buf,
+		},
+		{
+			.addr = dvo->slave_addr,
+			.flags = I2C_M_RD,
+			.len = 1,
+			.buf = in_buf,
+		}
+	};
+
+	out_buf[0] = addr;
+	out_buf[1] = 0;
+
+	if (i2c_transfer(adapter, msgs, 2) == 2) {
+		*ch = in_buf[0];
+		return true;
+	}
+
+	if (!sil->quiet) {
+		DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
+			  addr, adapter->name, dvo->slave_addr);
+	}
+	return false;
+}
+
+static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
+{
+	struct sil164_priv *sil = dvo->dev_priv;
+	struct i2c_adapter *adapter = dvo->i2c_bus;
+	uint8_t out_buf[2];
+	struct i2c_msg msg = {
+		.addr = dvo->slave_addr,
+		.flags = 0,
+		.len = 2,
+		.buf = out_buf,
+	};
+
+	out_buf[0] = addr;
+	out_buf[1] = ch;
+
+	if (i2c_transfer(adapter, &msg, 1) == 1)
+		return true;
+
+	if (!sil->quiet) {
+		DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
+			  addr, adapter->name, dvo->slave_addr);
+	}
+
+	return false;
+}
+
+/* Silicon Image 164 driver for chip on i2c bus */
+static bool sil164_init(struct intel_dvo_device *dvo,
+			struct i2c_adapter *adapter)
+{
+	/* this will detect the SIL164 chip on the specified i2c bus */
+	struct sil164_priv *sil;
+	unsigned char ch;
+
+	sil = kzalloc(sizeof(struct sil164_priv), GFP_KERNEL);
+	if (sil == NULL)
+		return false;
+
+	dvo->i2c_bus = adapter;
+	dvo->dev_priv = sil;
+	sil->quiet = true;
+
+	if (!sil164_readb(dvo, SIL164_VID_LO, &ch))
+		goto out;
+
+	if (ch != (SIL164_VID & 0xff)) {
+		DRM_DEBUG_KMS("sil164 not detected got %d: from %s Slave %d.\n",
+			  ch, adapter->name, dvo->slave_addr);
+		goto out;
+	}
+
+	if (!sil164_readb(dvo, SIL164_DID_LO, &ch))
+		goto out;
+
+	if (ch != (SIL164_DID & 0xff)) {
+		DRM_DEBUG_KMS("sil164 not detected got %d: from %s Slave %d.\n",
+			  ch, adapter->name, dvo->slave_addr);
+		goto out;
+	}
+	sil->quiet = false;
+
+	DRM_DEBUG_KMS("init sil164 dvo controller successfully!\n");
+	return true;
+
+out:
+	kfree(sil);
+	return false;
+}
+
+static enum drm_connector_status sil164_detect(struct intel_dvo_device *dvo)
+{
+	uint8_t reg9;
+
+	sil164_readb(dvo, SIL164_REG9, &reg9);
+
+	if (reg9 & SIL164_9_HTPLG)
+		return connector_status_connected;
+	else
+		return connector_status_disconnected;
+}
+
+static enum drm_mode_status sil164_mode_valid(struct intel_dvo_device *dvo,
+					      struct drm_display_mode *mode)
+{
+	return MODE_OK;
+}
+
+static void sil164_mode_set(struct intel_dvo_device *dvo,
+			    const struct drm_display_mode *mode,
+			    const struct drm_display_mode *adjusted_mode)
+{
+	/* As long as the basics are set up, since we don't have clock
+	 * dependencies in the mode setup, we can just leave the
+	 * registers alone and everything will work fine.
+	 */
+	/* recommended programming sequence from doc */
+	/*sil164_writeb(sil, 0x08, 0x30);
+	  sil164_writeb(sil, 0x09, 0x00);
+	  sil164_writeb(sil, 0x0a, 0x90);
+	  sil164_writeb(sil, 0x0c, 0x89);
+	  sil164_writeb(sil, 0x08, 0x31);*/
+	/* don't do much */
+	return;
+}
+
+/* set the SIL164 power state */
+static void sil164_dpms(struct intel_dvo_device *dvo, bool enable)
+{
+	int ret;
+	unsigned char ch;
+
+	ret = sil164_readb(dvo, SIL164_REG8, &ch);
+	if (ret == false)
+		return;
+
+	if (enable)
+		ch |= SIL164_8_PD;
+	else
+		ch &= ~SIL164_8_PD;
+
+	sil164_writeb(dvo, SIL164_REG8, ch);
+	return;
+}
+
+static bool sil164_get_hw_state(struct intel_dvo_device *dvo)
+{
+	int ret;
+	unsigned char ch;
+
+	ret = sil164_readb(dvo, SIL164_REG8, &ch);
+	if (ret == false)
+		return false;
+
+	if (ch & SIL164_8_PD)
+		return true;
+	else
+		return false;
+}
+
+static void sil164_dump_regs(struct intel_dvo_device *dvo)
+{
+	uint8_t val;
+
+	sil164_readb(dvo, SIL164_FREQ_LO, &val);
+	DRM_DEBUG_KMS("SIL164_FREQ_LO: 0x%02x\n", val);
+	sil164_readb(dvo, SIL164_FREQ_HI, &val);
+	DRM_DEBUG_KMS("SIL164_FREQ_HI: 0x%02x\n", val);
+	sil164_readb(dvo, SIL164_REG8, &val);
+	DRM_DEBUG_KMS("SIL164_REG8: 0x%02x\n", val);
+	sil164_readb(dvo, SIL164_REG9, &val);
+	DRM_DEBUG_KMS("SIL164_REG9: 0x%02x\n", val);
+	sil164_readb(dvo, SIL164_REGC, &val);
+	DRM_DEBUG_KMS("SIL164_REGC: 0x%02x\n", val);
+}
+
+static void sil164_destroy(struct intel_dvo_device *dvo)
+{
+	struct sil164_priv *sil = dvo->dev_priv;
+
+	if (sil) {
+		kfree(sil);
+		dvo->dev_priv = NULL;
+	}
+}
+
+struct intel_dvo_dev_ops sil164_ops = {
+	.init = sil164_init,
+	.detect = sil164_detect,
+	.mode_valid = sil164_mode_valid,
+	.mode_set = sil164_mode_set,
+	.dpms = sil164_dpms,
+	.get_hw_state = sil164_get_hw_state,
+	.dump_regs = sil164_dump_regs,
+	.destroy = sil164_destroy,
+};
diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c
new file mode 100644
index 0000000..6f1a0a6
--- /dev/null
+++ b/drivers/gpu/drm/i915/dvo_tfp410.c
@@ -0,0 +1,318 @@
+/*
+ * Copyright © 2007 Dave Mueller
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Dave Mueller <dave.mueller@gmx.ch>
+ *
+ */
+
+#include "dvo.h"
+
+/* register definitions according to the TFP410 data sheet */
+#define TFP410_VID		0x014C
+#define TFP410_DID		0x0410
+
+#define TFP410_VID_LO		0x00
+#define TFP410_VID_HI		0x01
+#define TFP410_DID_LO		0x02
+#define TFP410_DID_HI		0x03
+#define TFP410_REV		0x04
+
+#define TFP410_CTL_1		0x08
+#define TFP410_CTL_1_TDIS	(1<<6)
+#define TFP410_CTL_1_VEN	(1<<5)
+#define TFP410_CTL_1_HEN	(1<<4)
+#define TFP410_CTL_1_DSEL	(1<<3)
+#define TFP410_CTL_1_BSEL	(1<<2)
+#define TFP410_CTL_1_EDGE	(1<<1)
+#define TFP410_CTL_1_PD		(1<<0)
+
+#define TFP410_CTL_2		0x09
+#define TFP410_CTL_2_VLOW	(1<<7)
+#define TFP410_CTL_2_MSEL_MASK	(0x7<<4)
+#define TFP410_CTL_2_MSEL	(1<<4)
+#define TFP410_CTL_2_TSEL	(1<<3)
+#define TFP410_CTL_2_RSEN	(1<<2)
+#define TFP410_CTL_2_HTPLG	(1<<1)
+#define TFP410_CTL_2_MDI	(1<<0)
+
+#define TFP410_CTL_3		0x0A
+#define TFP410_CTL_3_DK_MASK	(0x7<<5)
+#define TFP410_CTL_3_DK		(1<<5)
+#define TFP410_CTL_3_DKEN	(1<<4)
+#define TFP410_CTL_3_CTL_MASK	(0x7<<1)
+#define TFP410_CTL_3_CTL	(1<<1)
+
+#define TFP410_USERCFG		0x0B
+
+#define TFP410_DE_DLY		0x32
+
+#define TFP410_DE_CTL		0x33
+#define TFP410_DE_CTL_DEGEN	(1<<6)
+#define TFP410_DE_CTL_VSPOL	(1<<5)
+#define TFP410_DE_CTL_HSPOL	(1<<4)
+#define TFP410_DE_CTL_DEDLY8	(1<<0)
+
+#define TFP410_DE_TOP		0x34
+
+#define TFP410_DE_CNT_LO	0x36
+#define TFP410_DE_CNT_HI	0x37
+
+#define TFP410_DE_LIN_LO	0x38
+#define TFP410_DE_LIN_HI	0x39
+
+#define TFP410_H_RES_LO		0x3A
+#define TFP410_H_RES_HI		0x3B
+
+#define TFP410_V_RES_LO		0x3C
+#define TFP410_V_RES_HI		0x3D
+
+struct tfp410_priv {
+	bool quiet;
+};
+
+static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
+{
+	struct tfp410_priv *tfp = dvo->dev_priv;
+	struct i2c_adapter *adapter = dvo->i2c_bus;
+	u8 out_buf[2];
+	u8 in_buf[2];
+
+	struct i2c_msg msgs[] = {
+		{
+			.addr = dvo->slave_addr,
+			.flags = 0,
+			.len = 1,
+			.buf = out_buf,
+		},
+		{
+			.addr = dvo->slave_addr,
+			.flags = I2C_M_RD,
+			.len = 1,
+			.buf = in_buf,
+		}
+	};
+
+	out_buf[0] = addr;
+	out_buf[1] = 0;
+
+	if (i2c_transfer(adapter, msgs, 2) == 2) {
+		*ch = in_buf[0];
+		return true;
+	}
+
+	if (!tfp->quiet) {
+		DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
+			  addr, adapter->name, dvo->slave_addr);
+	}
+	return false;
+}
+
+static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
+{
+	struct tfp410_priv *tfp = dvo->dev_priv;
+	struct i2c_adapter *adapter = dvo->i2c_bus;
+	uint8_t out_buf[2];
+	struct i2c_msg msg = {
+		.addr = dvo->slave_addr,
+		.flags = 0,
+		.len = 2,
+		.buf = out_buf,
+	};
+
+	out_buf[0] = addr;
+	out_buf[1] = ch;
+
+	if (i2c_transfer(adapter, &msg, 1) == 1)
+		return true;
+
+	if (!tfp->quiet) {
+		DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
+			  addr, adapter->name, dvo->slave_addr);
+	}
+
+	return false;
+}
+
+static int tfp410_getid(struct intel_dvo_device *dvo, int addr)
+{
+	uint8_t ch1, ch2;
+
+	if (tfp410_readb(dvo, addr+0, &ch1) &&
+	    tfp410_readb(dvo, addr+1, &ch2))
+		return ((ch2 << 8) & 0xFF00) | (ch1 & 0x00FF);
+
+	return -1;
+}
+
+/* Ti TFP410 driver for chip on i2c bus */
+static bool tfp410_init(struct intel_dvo_device *dvo,
+			struct i2c_adapter *adapter)
+{
+	/* this will detect the tfp410 chip on the specified i2c bus */
+	struct tfp410_priv *tfp;
+	int id;
+
+	tfp = kzalloc(sizeof(struct tfp410_priv), GFP_KERNEL);
+	if (tfp == NULL)
+		return false;
+
+	dvo->i2c_bus = adapter;
+	dvo->dev_priv = tfp;
+	tfp->quiet = true;
+
+	if ((id = tfp410_getid(dvo, TFP410_VID_LO)) != TFP410_VID) {
+		DRM_DEBUG_KMS("tfp410 not detected got VID %X: from %s "
+				"Slave %d.\n",
+			  id, adapter->name, dvo->slave_addr);
+		goto out;
+	}
+
+	if ((id = tfp410_getid(dvo, TFP410_DID_LO)) != TFP410_DID) {
+		DRM_DEBUG_KMS("tfp410 not detected got DID %X: from %s "
+				"Slave %d.\n",
+			  id, adapter->name, dvo->slave_addr);
+		goto out;
+	}
+	tfp->quiet = false;
+	return true;
+out:
+	kfree(tfp);
+	return false;
+}
+
+static enum drm_connector_status tfp410_detect(struct intel_dvo_device *dvo)
+{
+	enum drm_connector_status ret = connector_status_disconnected;
+	uint8_t ctl2;
+
+	if (tfp410_readb(dvo, TFP410_CTL_2, &ctl2)) {
+		if (ctl2 & TFP410_CTL_2_RSEN)
+			ret = connector_status_connected;
+		else
+			ret = connector_status_disconnected;
+	}
+
+	return ret;
+}
+
+static enum drm_mode_status tfp410_mode_valid(struct intel_dvo_device *dvo,
+					      struct drm_display_mode *mode)
+{
+	return MODE_OK;
+}
+
+static void tfp410_mode_set(struct intel_dvo_device *dvo,
+			    const struct drm_display_mode *mode,
+			    const struct drm_display_mode *adjusted_mode)
+{
+	/* As long as the basics are set up, since we don't have clock dependencies
+	* in the mode setup, we can just leave the registers alone and everything
+	* will work fine.
+	*/
+	/* don't do much */
+	return;
+}
+
+/* set the tfp410 power state */
+static void tfp410_dpms(struct intel_dvo_device *dvo, bool enable)
+{
+	uint8_t ctl1;
+
+	if (!tfp410_readb(dvo, TFP410_CTL_1, &ctl1))
+		return;
+
+	if (enable)
+		ctl1 |= TFP410_CTL_1_PD;
+	else
+		ctl1 &= ~TFP410_CTL_1_PD;
+
+	tfp410_writeb(dvo, TFP410_CTL_1, ctl1);
+}
+
+static bool tfp410_get_hw_state(struct intel_dvo_device *dvo)
+{
+	uint8_t ctl1;
+
+	if (!tfp410_readb(dvo, TFP410_CTL_1, &ctl1))
+		return false;
+
+	if (ctl1 & TFP410_CTL_1_PD)
+		return true;
+	else
+		return false;
+}
+
+static void tfp410_dump_regs(struct intel_dvo_device *dvo)
+{
+	uint8_t val, val2;
+
+	tfp410_readb(dvo, TFP410_REV, &val);
+	DRM_DEBUG_KMS("TFP410_REV: 0x%02X\n", val);
+	tfp410_readb(dvo, TFP410_CTL_1, &val);
+	DRM_DEBUG_KMS("TFP410_CTL1: 0x%02X\n", val);
+	tfp410_readb(dvo, TFP410_CTL_2, &val);
+	DRM_DEBUG_KMS("TFP410_CTL2: 0x%02X\n", val);
+	tfp410_readb(dvo, TFP410_CTL_3, &val);
+	DRM_DEBUG_KMS("TFP410_CTL3: 0x%02X\n", val);
+	tfp410_readb(dvo, TFP410_USERCFG, &val);
+	DRM_DEBUG_KMS("TFP410_USERCFG: 0x%02X\n", val);
+	tfp410_readb(dvo, TFP410_DE_DLY, &val);
+	DRM_DEBUG_KMS("TFP410_DE_DLY: 0x%02X\n", val);
+	tfp410_readb(dvo, TFP410_DE_CTL, &val);
+	DRM_DEBUG_KMS("TFP410_DE_CTL: 0x%02X\n", val);
+	tfp410_readb(dvo, TFP410_DE_TOP, &val);
+	DRM_DEBUG_KMS("TFP410_DE_TOP: 0x%02X\n", val);
+	tfp410_readb(dvo, TFP410_DE_CNT_LO, &val);
+	tfp410_readb(dvo, TFP410_DE_CNT_HI, &val2);
+	DRM_DEBUG_KMS("TFP410_DE_CNT: 0x%02X%02X\n", val2, val);
+	tfp410_readb(dvo, TFP410_DE_LIN_LO, &val);
+	tfp410_readb(dvo, TFP410_DE_LIN_HI, &val2);
+	DRM_DEBUG_KMS("TFP410_DE_LIN: 0x%02X%02X\n", val2, val);
+	tfp410_readb(dvo, TFP410_H_RES_LO, &val);
+	tfp410_readb(dvo, TFP410_H_RES_HI, &val2);
+	DRM_DEBUG_KMS("TFP410_H_RES: 0x%02X%02X\n", val2, val);
+	tfp410_readb(dvo, TFP410_V_RES_LO, &val);
+	tfp410_readb(dvo, TFP410_V_RES_HI, &val2);
+	DRM_DEBUG_KMS("TFP410_V_RES: 0x%02X%02X\n", val2, val);
+}
+
+static void tfp410_destroy(struct intel_dvo_device *dvo)
+{
+	struct tfp410_priv *tfp = dvo->dev_priv;
+
+	if (tfp) {
+		kfree(tfp);
+		dvo->dev_priv = NULL;
+	}
+}
+
+struct intel_dvo_dev_ops tfp410_ops = {
+	.init = tfp410_init,
+	.detect = tfp410_detect,
+	.mode_valid = tfp410_mode_valid,
+	.mode_set = tfp410_mode_set,
+	.dpms = tfp410_dpms,
+	.get_hw_state = tfp410_get_hw_state,
+	.dump_regs = tfp410_dump_regs,
+	.destroy = tfp410_destroy,
+};
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
new file mode 100644
index 0000000..db58c8d
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -0,0 +1,1223 @@
+/*
+ * Copyright © 2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Brad Volkin <bradley.d.volkin@intel.com>
+ *
+ */
+
+#include "i915_drv.h"
+
+/**
+ * DOC: batch buffer command parser
+ *
+ * Motivation:
+ * Certain OpenGL features (e.g. transform feedback, performance monitoring)
+ * require userspace code to submit batches containing commands such as
+ * MI_LOAD_REGISTER_IMM to access various registers. Unfortunately, some
+ * generations of the hardware will noop these commands in "unsecure" batches
+ * (which includes all userspace batches submitted via i915) even though the
+ * commands may be safe and represent the intended programming model of the
+ * device.
+ *
+ * The software command parser is similar in operation to the command parsing
+ * done in hardware for unsecure batches. However, the software parser allows
+ * some operations that would be noop'd by hardware, if the parser determines
+ * the operation is safe, and submits the batch as "secure" to prevent hardware
+ * parsing.
+ *
+ * Threats:
+ * At a high level, the hardware (and software) checks attempt to prevent
+ * granting userspace undue privileges. There are three categories of privilege.
+ *
+ * First, commands which are explicitly defined as privileged or which should
+ * only be used by the kernel driver. The parser generally rejects such
+ * commands, though it may allow some from the drm master process.
+ *
+ * Second, commands which access registers. To support correct/enhanced
+ * userspace functionality, particularly certain OpenGL extensions, the parser
+ * provides a whitelist of registers which userspace may safely access (for both
+ * normal and drm master processes).
+ *
+ * Third, commands which access privileged memory (i.e. GGTT, HWS page, etc).
+ * The parser always rejects such commands.
+ *
+ * The majority of the problematic commands fall in the MI_* range, with only a
+ * few specific commands on each ring (e.g. PIPE_CONTROL and MI_FLUSH_DW).
+ *
+ * Implementation:
+ * Each ring maintains tables of commands and registers which the parser uses in
+ * scanning batch buffers submitted to that ring.
+ *
+ * Since the set of commands that the parser must check for is significantly
+ * smaller than the number of commands supported, the parser tables contain only
+ * those commands required by the parser. This generally works because command
+ * opcode ranges have standard command length encodings. So for commands that
+ * the parser does not need to check, it can easily skip them. This is
+ * implemented via a per-ring length decoding vfunc.
+ *
+ * Unfortunately, there are a number of commands that do not follow the standard
+ * length encoding for their opcode range, primarily amongst the MI_* commands.
+ * To handle this, the parser provides a way to define explicit "skip" entries
+ * in the per-ring command tables.
+ *
+ * Other command table entries map fairly directly to high level categories
+ * mentioned above: rejected, master-only, register whitelist. The parser
+ * implements a number of checks, including the privileged memory checks, via a
+ * general bitmasking mechanism.
+ */
+
+#define STD_MI_OPCODE_MASK  0xFF800000
+#define STD_3D_OPCODE_MASK  0xFFFF0000
+#define STD_2D_OPCODE_MASK  0xFFC00000
+#define STD_MFX_OPCODE_MASK 0xFFFF0000
+
+#define CMD(op, opm, f, lm, fl, ...)				\
+	{							\
+		.flags = (fl) | ((f) ? CMD_DESC_FIXED : 0),	\
+		.cmd = { (op), (opm) },				\
+		.length = { (lm) },				\
+		__VA_ARGS__					\
+	}
+
+/* Convenience macros to compress the tables */
+#define SMI STD_MI_OPCODE_MASK
+#define S3D STD_3D_OPCODE_MASK
+#define S2D STD_2D_OPCODE_MASK
+#define SMFX STD_MFX_OPCODE_MASK
+#define F true
+#define S CMD_DESC_SKIP
+#define R CMD_DESC_REJECT
+#define W CMD_DESC_REGISTER
+#define B CMD_DESC_BITMASK
+#define M CMD_DESC_MASTER
+
+/*            Command                          Mask   Fixed Len   Action
+	      ---------------------------------------------------------- */
+static const struct drm_i915_cmd_descriptor common_cmds[] = {
+	CMD(  MI_NOOP,                          SMI,    F,  1,      S  ),
+	CMD(  MI_USER_INTERRUPT,                SMI,    F,  1,      R  ),
+	CMD(  MI_WAIT_FOR_EVENT,                SMI,    F,  1,      M  ),
+	CMD(  MI_ARB_CHECK,                     SMI,    F,  1,      S  ),
+	CMD(  MI_REPORT_HEAD,                   SMI,    F,  1,      S  ),
+	CMD(  MI_SUSPEND_FLUSH,                 SMI,    F,  1,      S  ),
+	CMD(  MI_SEMAPHORE_MBOX,                SMI,   !F,  0xFF,   R  ),
+	CMD(  MI_STORE_DWORD_INDEX,             SMI,   !F,  0xFF,   R  ),
+	CMD(  MI_LOAD_REGISTER_IMM(1),          SMI,   !F,  0xFF,   W,
+	      .reg = { .offset = 1, .mask = 0x007FFFFC, .step = 2 }    ),
+	CMD(  MI_STORE_REGISTER_MEM,            SMI,    F,  3,     W | B,
+	      .reg = { .offset = 1, .mask = 0x007FFFFC },
+	      .bits = {{
+			.offset = 0,
+			.mask = MI_GLOBAL_GTT,
+			.expected = 0,
+	      }},						       ),
+	CMD(  MI_LOAD_REGISTER_MEM,             SMI,    F,  3,     W | B,
+	      .reg = { .offset = 1, .mask = 0x007FFFFC },
+	      .bits = {{
+			.offset = 0,
+			.mask = MI_GLOBAL_GTT,
+			.expected = 0,
+	      }},						       ),
+	/*
+	 * MI_BATCH_BUFFER_START requires some special handling. It's not
+	 * really a 'skip' action but it doesn't seem like it's worth adding
+	 * a new action. See i915_parse_cmds().
+	 */
+	CMD(  MI_BATCH_BUFFER_START,            SMI,   !F,  0xFF,   S  ),
+};
+
+static const struct drm_i915_cmd_descriptor render_cmds[] = {
+	CMD(  MI_FLUSH,                         SMI,    F,  1,      S  ),
+	CMD(  MI_ARB_ON_OFF,                    SMI,    F,  1,      R  ),
+	CMD(  MI_PREDICATE,                     SMI,    F,  1,      S  ),
+	CMD(  MI_TOPOLOGY_FILTER,               SMI,    F,  1,      S  ),
+	CMD(  MI_SET_APPID,                     SMI,    F,  1,      S  ),
+	CMD(  MI_DISPLAY_FLIP,                  SMI,   !F,  0xFF,   R  ),
+	CMD(  MI_SET_CONTEXT,                   SMI,   !F,  0xFF,   R  ),
+	CMD(  MI_URB_CLEAR,                     SMI,   !F,  0xFF,   S  ),
+	CMD(  MI_STORE_DWORD_IMM,               SMI,   !F,  0x3F,   B,
+	      .bits = {{
+			.offset = 0,
+			.mask = MI_GLOBAL_GTT,
+			.expected = 0,
+	      }},						       ),
+	CMD(  MI_UPDATE_GTT,                    SMI,   !F,  0xFF,   R  ),
+	CMD(  MI_CLFLUSH,                       SMI,   !F,  0x3FF,  B,
+	      .bits = {{
+			.offset = 0,
+			.mask = MI_GLOBAL_GTT,
+			.expected = 0,
+	      }},						       ),
+	CMD(  MI_REPORT_PERF_COUNT,             SMI,   !F,  0x3F,   B,
+	      .bits = {{
+			.offset = 1,
+			.mask = MI_REPORT_PERF_COUNT_GGTT,
+			.expected = 0,
+	      }},						       ),
+	CMD(  MI_CONDITIONAL_BATCH_BUFFER_END,  SMI,   !F,  0xFF,   B,
+	      .bits = {{
+			.offset = 0,
+			.mask = MI_GLOBAL_GTT,
+			.expected = 0,
+	      }},						       ),
+	CMD(  GFX_OP_3DSTATE_VF_STATISTICS,     S3D,    F,  1,      S  ),
+	CMD(  PIPELINE_SELECT,                  S3D,    F,  1,      S  ),
+	CMD(  MEDIA_VFE_STATE,			S3D,   !F,  0xFFFF, B,
+	      .bits = {{
+			.offset = 2,
+			.mask = MEDIA_VFE_STATE_MMIO_ACCESS_MASK,
+			.expected = 0,
+	      }},						       ),
+	CMD(  GPGPU_OBJECT,                     S3D,   !F,  0xFF,   S  ),
+	CMD(  GPGPU_WALKER,                     S3D,   !F,  0xFF,   S  ),
+	CMD(  GFX_OP_3DSTATE_SO_DECL_LIST,      S3D,   !F,  0x1FF,  S  ),
+	CMD(  GFX_OP_PIPE_CONTROL(5),           S3D,   !F,  0xFF,   B,
+	      .bits = {{
+			.offset = 1,
+			.mask = (PIPE_CONTROL_MMIO_WRITE | PIPE_CONTROL_NOTIFY),
+			.expected = 0,
+	      },
+	      {
+			.offset = 1,
+		        .mask = (PIPE_CONTROL_GLOBAL_GTT_IVB |
+				 PIPE_CONTROL_STORE_DATA_INDEX),
+			.expected = 0,
+			.condition_offset = 1,
+			.condition_mask = PIPE_CONTROL_POST_SYNC_OP_MASK,
+	      }},						       ),
+};
+
+static const struct drm_i915_cmd_descriptor hsw_render_cmds[] = {
+	CMD(  MI_SET_PREDICATE,                 SMI,    F,  1,      S  ),
+	CMD(  MI_RS_CONTROL,                    SMI,    F,  1,      S  ),
+	CMD(  MI_URB_ATOMIC_ALLOC,              SMI,    F,  1,      S  ),
+	CMD(  MI_SET_APPID,                     SMI,    F,  1,      S  ),
+	CMD(  MI_RS_CONTEXT,                    SMI,    F,  1,      S  ),
+	CMD(  MI_LOAD_SCAN_LINES_INCL,          SMI,   !F,  0x3F,   M  ),
+	CMD(  MI_LOAD_SCAN_LINES_EXCL,          SMI,   !F,  0x3F,   R  ),
+	CMD(  MI_LOAD_REGISTER_REG,             SMI,   !F,  0xFF,   R  ),
+	CMD(  MI_RS_STORE_DATA_IMM,             SMI,   !F,  0xFF,   S  ),
+	CMD(  MI_LOAD_URB_MEM,                  SMI,   !F,  0xFF,   S  ),
+	CMD(  MI_STORE_URB_MEM,                 SMI,   !F,  0xFF,   S  ),
+	CMD(  GFX_OP_3DSTATE_DX9_CONSTANTF_VS,  S3D,   !F,  0x7FF,  S  ),
+	CMD(  GFX_OP_3DSTATE_DX9_CONSTANTF_PS,  S3D,   !F,  0x7FF,  S  ),
+
+	CMD(  GFX_OP_3DSTATE_BINDING_TABLE_EDIT_VS,  S3D,   !F,  0x1FF,  S  ),
+	CMD(  GFX_OP_3DSTATE_BINDING_TABLE_EDIT_GS,  S3D,   !F,  0x1FF,  S  ),
+	CMD(  GFX_OP_3DSTATE_BINDING_TABLE_EDIT_HS,  S3D,   !F,  0x1FF,  S  ),
+	CMD(  GFX_OP_3DSTATE_BINDING_TABLE_EDIT_DS,  S3D,   !F,  0x1FF,  S  ),
+	CMD(  GFX_OP_3DSTATE_BINDING_TABLE_EDIT_PS,  S3D,   !F,  0x1FF,  S  ),
+};
+
+static const struct drm_i915_cmd_descriptor video_cmds[] = {
+	CMD(  MI_ARB_ON_OFF,                    SMI,    F,  1,      R  ),
+	CMD(  MI_SET_APPID,                     SMI,    F,  1,      S  ),
+	CMD(  MI_STORE_DWORD_IMM,               SMI,   !F,  0xFF,   B,
+	      .bits = {{
+			.offset = 0,
+			.mask = MI_GLOBAL_GTT,
+			.expected = 0,
+	      }},						       ),
+	CMD(  MI_UPDATE_GTT,                    SMI,   !F,  0x3F,   R  ),
+	CMD(  MI_FLUSH_DW,                      SMI,   !F,  0x3F,   B,
+	      .bits = {{
+			.offset = 0,
+			.mask = MI_FLUSH_DW_NOTIFY,
+			.expected = 0,
+	      },
+	      {
+			.offset = 1,
+			.mask = MI_FLUSH_DW_USE_GTT,
+			.expected = 0,
+			.condition_offset = 0,
+			.condition_mask = MI_FLUSH_DW_OP_MASK,
+	      },
+	      {
+			.offset = 0,
+			.mask = MI_FLUSH_DW_STORE_INDEX,
+			.expected = 0,
+			.condition_offset = 0,
+			.condition_mask = MI_FLUSH_DW_OP_MASK,
+	      }},						       ),
+	CMD(  MI_CONDITIONAL_BATCH_BUFFER_END,  SMI,   !F,  0xFF,   B,
+	      .bits = {{
+			.offset = 0,
+			.mask = MI_GLOBAL_GTT,
+			.expected = 0,
+	      }},						       ),
+	/*
+	 * MFX_WAIT doesn't fit the way we handle length for most commands.
+	 * It has a length field but it uses a non-standard length bias.
+	 * It is always 1 dword though, so just treat it as fixed length.
+	 */
+	CMD(  MFX_WAIT,                         SMFX,   F,  1,      S  ),
+};
+
+static const struct drm_i915_cmd_descriptor vecs_cmds[] = {
+	CMD(  MI_ARB_ON_OFF,                    SMI,    F,  1,      R  ),
+	CMD(  MI_SET_APPID,                     SMI,    F,  1,      S  ),
+	CMD(  MI_STORE_DWORD_IMM,               SMI,   !F,  0xFF,   B,
+	      .bits = {{
+			.offset = 0,
+			.mask = MI_GLOBAL_GTT,
+			.expected = 0,
+	      }},						       ),
+	CMD(  MI_UPDATE_GTT,                    SMI,   !F,  0x3F,   R  ),
+	CMD(  MI_FLUSH_DW,                      SMI,   !F,  0x3F,   B,
+	      .bits = {{
+			.offset = 0,
+			.mask = MI_FLUSH_DW_NOTIFY,
+			.expected = 0,
+	      },
+	      {
+			.offset = 1,
+			.mask = MI_FLUSH_DW_USE_GTT,
+			.expected = 0,
+			.condition_offset = 0,
+			.condition_mask = MI_FLUSH_DW_OP_MASK,
+	      },
+	      {
+			.offset = 0,
+			.mask = MI_FLUSH_DW_STORE_INDEX,
+			.expected = 0,
+			.condition_offset = 0,
+			.condition_mask = MI_FLUSH_DW_OP_MASK,
+	      }},						       ),
+	CMD(  MI_CONDITIONAL_BATCH_BUFFER_END,  SMI,   !F,  0xFF,   B,
+	      .bits = {{
+			.offset = 0,
+			.mask = MI_GLOBAL_GTT,
+			.expected = 0,
+	      }},						       ),
+};
+
+static const struct drm_i915_cmd_descriptor blt_cmds[] = {
+	CMD(  MI_DISPLAY_FLIP,                  SMI,   !F,  0xFF,   R  ),
+	CMD(  MI_STORE_DWORD_IMM,               SMI,   !F,  0x3FF,  B,
+	      .bits = {{
+			.offset = 0,
+			.mask = MI_GLOBAL_GTT,
+			.expected = 0,
+	      }},						       ),
+	CMD(  MI_UPDATE_GTT,                    SMI,   !F,  0x3F,   R  ),
+	CMD(  MI_FLUSH_DW,                      SMI,   !F,  0x3F,   B,
+	      .bits = {{
+			.offset = 0,
+			.mask = MI_FLUSH_DW_NOTIFY,
+			.expected = 0,
+	      },
+	      {
+			.offset = 1,
+			.mask = MI_FLUSH_DW_USE_GTT,
+			.expected = 0,
+			.condition_offset = 0,
+			.condition_mask = MI_FLUSH_DW_OP_MASK,
+	      },
+	      {
+			.offset = 0,
+			.mask = MI_FLUSH_DW_STORE_INDEX,
+			.expected = 0,
+			.condition_offset = 0,
+			.condition_mask = MI_FLUSH_DW_OP_MASK,
+	      }},						       ),
+	CMD(  COLOR_BLT,                        S2D,   !F,  0x3F,   S  ),
+	CMD(  SRC_COPY_BLT,                     S2D,   !F,  0x3F,   S  ),
+};
+
+static const struct drm_i915_cmd_descriptor hsw_blt_cmds[] = {
+	CMD(  MI_LOAD_SCAN_LINES_INCL,          SMI,   !F,  0x3F,   M  ),
+	CMD(  MI_LOAD_SCAN_LINES_EXCL,          SMI,   !F,  0x3F,   R  ),
+};
+
+#undef CMD
+#undef SMI
+#undef S3D
+#undef S2D
+#undef SMFX
+#undef F
+#undef S
+#undef R
+#undef W
+#undef B
+#undef M
+
+static const struct drm_i915_cmd_table gen7_render_cmds[] = {
+	{ common_cmds, ARRAY_SIZE(common_cmds) },
+	{ render_cmds, ARRAY_SIZE(render_cmds) },
+};
+
+static const struct drm_i915_cmd_table hsw_render_ring_cmds[] = {
+	{ common_cmds, ARRAY_SIZE(common_cmds) },
+	{ render_cmds, ARRAY_SIZE(render_cmds) },
+	{ hsw_render_cmds, ARRAY_SIZE(hsw_render_cmds) },
+};
+
+static const struct drm_i915_cmd_table gen7_video_cmds[] = {
+	{ common_cmds, ARRAY_SIZE(common_cmds) },
+	{ video_cmds, ARRAY_SIZE(video_cmds) },
+};
+
+static const struct drm_i915_cmd_table hsw_vebox_cmds[] = {
+	{ common_cmds, ARRAY_SIZE(common_cmds) },
+	{ vecs_cmds, ARRAY_SIZE(vecs_cmds) },
+};
+
+static const struct drm_i915_cmd_table gen7_blt_cmds[] = {
+	{ common_cmds, ARRAY_SIZE(common_cmds) },
+	{ blt_cmds, ARRAY_SIZE(blt_cmds) },
+};
+
+static const struct drm_i915_cmd_table hsw_blt_ring_cmds[] = {
+	{ common_cmds, ARRAY_SIZE(common_cmds) },
+	{ blt_cmds, ARRAY_SIZE(blt_cmds) },
+	{ hsw_blt_cmds, ARRAY_SIZE(hsw_blt_cmds) },
+};
+
+/*
+ * Register whitelists, sorted by increasing register offset.
+ */
+
+/*
+ * An individual whitelist entry granting access to register addr.  If
+ * mask is non-zero the argument of immediate register writes will be
+ * AND-ed with mask, and the command will be rejected if the result
+ * doesn't match value.
+ *
+ * Registers with non-zero mask are only allowed to be written using
+ * LRI.
+ */
+struct drm_i915_reg_descriptor {
+	u32 addr;
+	u32 mask;
+	u32 value;
+};
+
+/* Convenience macro for adding 32-bit registers. */
+#define REG32(address, ...)                             \
+	{ .addr = address, __VA_ARGS__ }
+
+/*
+ * Convenience macro for adding 64-bit registers.
+ *
+ * Some registers that userspace accesses are 64 bits. The register
+ * access commands only allow 32-bit accesses. Hence, we have to include
+ * entries for both halves of the 64-bit registers.
+ */
+#define REG64(addr)                                     \
+	REG32(addr), REG32(addr + sizeof(u32))
+
+static const struct drm_i915_reg_descriptor gen7_render_regs[] = {
+	REG64(GPGPU_THREADS_DISPATCHED),
+	REG64(HS_INVOCATION_COUNT),
+	REG64(DS_INVOCATION_COUNT),
+	REG64(IA_VERTICES_COUNT),
+	REG64(IA_PRIMITIVES_COUNT),
+	REG64(VS_INVOCATION_COUNT),
+	REG64(GS_INVOCATION_COUNT),
+	REG64(GS_PRIMITIVES_COUNT),
+	REG64(CL_INVOCATION_COUNT),
+	REG64(CL_PRIMITIVES_COUNT),
+	REG64(PS_INVOCATION_COUNT),
+	REG64(PS_DEPTH_COUNT),
+	REG32(OACONTROL), /* Only allowed for LRI and SRM. See below. */
+	REG64(MI_PREDICATE_SRC0),
+	REG64(MI_PREDICATE_SRC1),
+	REG32(GEN7_3DPRIM_END_OFFSET),
+	REG32(GEN7_3DPRIM_START_VERTEX),
+	REG32(GEN7_3DPRIM_VERTEX_COUNT),
+	REG32(GEN7_3DPRIM_INSTANCE_COUNT),
+	REG32(GEN7_3DPRIM_START_INSTANCE),
+	REG32(GEN7_3DPRIM_BASE_VERTEX),
+	REG32(GEN7_GPGPU_DISPATCHDIMX),
+	REG32(GEN7_GPGPU_DISPATCHDIMY),
+	REG32(GEN7_GPGPU_DISPATCHDIMZ),
+	REG64(GEN7_SO_NUM_PRIMS_WRITTEN(0)),
+	REG64(GEN7_SO_NUM_PRIMS_WRITTEN(1)),
+	REG64(GEN7_SO_NUM_PRIMS_WRITTEN(2)),
+	REG64(GEN7_SO_NUM_PRIMS_WRITTEN(3)),
+	REG64(GEN7_SO_PRIM_STORAGE_NEEDED(0)),
+	REG64(GEN7_SO_PRIM_STORAGE_NEEDED(1)),
+	REG64(GEN7_SO_PRIM_STORAGE_NEEDED(2)),
+	REG64(GEN7_SO_PRIM_STORAGE_NEEDED(3)),
+	REG32(GEN7_SO_WRITE_OFFSET(0)),
+	REG32(GEN7_SO_WRITE_OFFSET(1)),
+	REG32(GEN7_SO_WRITE_OFFSET(2)),
+	REG32(GEN7_SO_WRITE_OFFSET(3)),
+	REG32(GEN7_L3SQCREG1),
+	REG32(GEN7_L3CNTLREG2),
+	REG32(GEN7_L3CNTLREG3),
+	REG32(HSW_SCRATCH1,
+	      .mask = ~HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE,
+	      .value = 0),
+	REG32(HSW_ROW_CHICKEN3,
+	      .mask = ~(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE << 16 |
+                        HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE),
+	      .value = 0),
+};
+
+static const struct drm_i915_reg_descriptor gen7_blt_regs[] = {
+	REG32(BCS_SWCTRL),
+};
+
+static const struct drm_i915_reg_descriptor ivb_master_regs[] = {
+	REG32(FORCEWAKE_MT),
+	REG32(DERRMR),
+	REG32(GEN7_PIPE_DE_LOAD_SL(PIPE_A)),
+	REG32(GEN7_PIPE_DE_LOAD_SL(PIPE_B)),
+	REG32(GEN7_PIPE_DE_LOAD_SL(PIPE_C)),
+};
+
+static const struct drm_i915_reg_descriptor hsw_master_regs[] = {
+	REG32(FORCEWAKE_MT),
+	REG32(DERRMR),
+};
+
+#undef REG64
+#undef REG32
+
+static u32 gen7_render_get_cmd_length_mask(u32 cmd_header)
+{
+	u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT;
+	u32 subclient =
+		(cmd_header & INSTR_SUBCLIENT_MASK) >> INSTR_SUBCLIENT_SHIFT;
+
+	if (client == INSTR_MI_CLIENT)
+		return 0x3F;
+	else if (client == INSTR_RC_CLIENT) {
+		if (subclient == INSTR_MEDIA_SUBCLIENT)
+			return 0xFFFF;
+		else
+			return 0xFF;
+	}
+
+	DRM_DEBUG_DRIVER("CMD: Abnormal rcs cmd length! 0x%08X\n", cmd_header);
+	return 0;
+}
+
+static u32 gen7_bsd_get_cmd_length_mask(u32 cmd_header)
+{
+	u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT;
+	u32 subclient =
+		(cmd_header & INSTR_SUBCLIENT_MASK) >> INSTR_SUBCLIENT_SHIFT;
+	u32 op = (cmd_header & INSTR_26_TO_24_MASK) >> INSTR_26_TO_24_SHIFT;
+
+	if (client == INSTR_MI_CLIENT)
+		return 0x3F;
+	else if (client == INSTR_RC_CLIENT) {
+		if (subclient == INSTR_MEDIA_SUBCLIENT) {
+			if (op == 6)
+				return 0xFFFF;
+			else
+				return 0xFFF;
+		} else
+			return 0xFF;
+	}
+
+	DRM_DEBUG_DRIVER("CMD: Abnormal bsd cmd length! 0x%08X\n", cmd_header);
+	return 0;
+}
+
+static u32 gen7_blt_get_cmd_length_mask(u32 cmd_header)
+{
+	u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT;
+
+	if (client == INSTR_MI_CLIENT)
+		return 0x3F;
+	else if (client == INSTR_BC_CLIENT)
+		return 0xFF;
+
+	DRM_DEBUG_DRIVER("CMD: Abnormal blt cmd length! 0x%08X\n", cmd_header);
+	return 0;
+}
+
+static bool validate_cmds_sorted(struct intel_engine_cs *ring,
+				 const struct drm_i915_cmd_table *cmd_tables,
+				 int cmd_table_count)
+{
+	int i;
+	bool ret = true;
+
+	if (!cmd_tables || cmd_table_count == 0)
+		return true;
+
+	for (i = 0; i < cmd_table_count; i++) {
+		const struct drm_i915_cmd_table *table = &cmd_tables[i];
+		u32 previous = 0;
+		int j;
+
+		for (j = 0; j < table->count; j++) {
+			const struct drm_i915_cmd_descriptor *desc =
+				&table->table[j];
+			u32 curr = desc->cmd.value & desc->cmd.mask;
+
+			if (curr < previous) {
+				DRM_ERROR("CMD: table not sorted ring=%d table=%d entry=%d cmd=0x%08X prev=0x%08X\n",
+					  ring->id, i, j, curr, previous);
+				ret = false;
+			}
+
+			previous = curr;
+		}
+	}
+
+	return ret;
+}
+
+static bool check_sorted(int ring_id,
+			 const struct drm_i915_reg_descriptor *reg_table,
+			 int reg_count)
+{
+	int i;
+	u32 previous = 0;
+	bool ret = true;
+
+	for (i = 0; i < reg_count; i++) {
+		u32 curr = reg_table[i].addr;
+
+		if (curr < previous) {
+			DRM_ERROR("CMD: table not sorted ring=%d entry=%d reg=0x%08X prev=0x%08X\n",
+				  ring_id, i, curr, previous);
+			ret = false;
+		}
+
+		previous = curr;
+	}
+
+	return ret;
+}
+
+static bool validate_regs_sorted(struct intel_engine_cs *ring)
+{
+	return check_sorted(ring->id, ring->reg_table, ring->reg_count) &&
+		check_sorted(ring->id, ring->master_reg_table,
+			     ring->master_reg_count);
+}
+
+struct cmd_node {
+	const struct drm_i915_cmd_descriptor *desc;
+	struct hlist_node node;
+};
+
+/*
+ * Different command ranges have different numbers of bits for the opcode. For
+ * example, MI commands use bits 31:23 while 3D commands use bits 31:16. The
+ * problem is that, for example, MI commands use bits 22:16 for other fields
+ * such as GGTT vs PPGTT bits. If we include those bits in the mask then when
+ * we mask a command from a batch it could hash to the wrong bucket due to
+ * non-opcode bits being set. But if we don't include those bits, some 3D
+ * commands may hash to the same bucket due to not including opcode bits that
+ * make the command unique. For now, we will risk hashing to the same bucket.
+ *
+ * If we attempt to generate a perfect hash, we should be able to look at bits
+ * 31:29 of a command from a batch buffer and use the full mask for that
+ * client. The existing INSTR_CLIENT_MASK/SHIFT defines can be used for this.
+ */
+#define CMD_HASH_MASK STD_MI_OPCODE_MASK
+
+static int init_hash_table(struct intel_engine_cs *ring,
+			   const struct drm_i915_cmd_table *cmd_tables,
+			   int cmd_table_count)
+{
+	int i, j;
+
+	hash_init(ring->cmd_hash);
+
+	for (i = 0; i < cmd_table_count; i++) {
+		const struct drm_i915_cmd_table *table = &cmd_tables[i];
+
+		for (j = 0; j < table->count; j++) {
+			const struct drm_i915_cmd_descriptor *desc =
+				&table->table[j];
+			struct cmd_node *desc_node =
+				kmalloc(sizeof(*desc_node), GFP_KERNEL);
+
+			if (!desc_node)
+				return -ENOMEM;
+
+			desc_node->desc = desc;
+			hash_add(ring->cmd_hash, &desc_node->node,
+				 desc->cmd.value & CMD_HASH_MASK);
+		}
+	}
+
+	return 0;
+}
+
+static void fini_hash_table(struct intel_engine_cs *ring)
+{
+	struct hlist_node *tmp;
+	struct cmd_node *desc_node;
+	int i;
+
+	hash_for_each_safe(ring->cmd_hash, i, tmp, desc_node, node) {
+		hash_del(&desc_node->node);
+		kfree(desc_node);
+	}
+}
+
+/**
+ * i915_cmd_parser_init_ring() - set cmd parser related fields for a ringbuffer
+ * @ring: the ringbuffer to initialize
+ *
+ * Optionally initializes fields related to batch buffer command parsing in the
+ * struct intel_engine_cs based on whether the platform requires software
+ * command parsing.
+ *
+ * Return: non-zero if initialization fails
+ */
+int i915_cmd_parser_init_ring(struct intel_engine_cs *ring)
+{
+	const struct drm_i915_cmd_table *cmd_tables;
+	int cmd_table_count;
+	int ret;
+
+	if (!IS_GEN7(ring->dev))
+		return 0;
+
+	switch (ring->id) {
+	case RCS:
+		if (IS_HASWELL(ring->dev)) {
+			cmd_tables = hsw_render_ring_cmds;
+			cmd_table_count =
+				ARRAY_SIZE(hsw_render_ring_cmds);
+		} else {
+			cmd_tables = gen7_render_cmds;
+			cmd_table_count = ARRAY_SIZE(gen7_render_cmds);
+		}
+
+		ring->reg_table = gen7_render_regs;
+		ring->reg_count = ARRAY_SIZE(gen7_render_regs);
+
+		if (IS_HASWELL(ring->dev)) {
+			ring->master_reg_table = hsw_master_regs;
+			ring->master_reg_count = ARRAY_SIZE(hsw_master_regs);
+		} else {
+			ring->master_reg_table = ivb_master_regs;
+			ring->master_reg_count = ARRAY_SIZE(ivb_master_regs);
+		}
+
+		ring->get_cmd_length_mask = gen7_render_get_cmd_length_mask;
+		break;
+	case VCS:
+		cmd_tables = gen7_video_cmds;
+		cmd_table_count = ARRAY_SIZE(gen7_video_cmds);
+		ring->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
+		break;
+	case BCS:
+		if (IS_HASWELL(ring->dev)) {
+			cmd_tables = hsw_blt_ring_cmds;
+			cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmds);
+		} else {
+			cmd_tables = gen7_blt_cmds;
+			cmd_table_count = ARRAY_SIZE(gen7_blt_cmds);
+		}
+
+		ring->reg_table = gen7_blt_regs;
+		ring->reg_count = ARRAY_SIZE(gen7_blt_regs);
+
+		if (IS_HASWELL(ring->dev)) {
+			ring->master_reg_table = hsw_master_regs;
+			ring->master_reg_count = ARRAY_SIZE(hsw_master_regs);
+		} else {
+			ring->master_reg_table = ivb_master_regs;
+			ring->master_reg_count = ARRAY_SIZE(ivb_master_regs);
+		}
+
+		ring->get_cmd_length_mask = gen7_blt_get_cmd_length_mask;
+		break;
+	case VECS:
+		cmd_tables = hsw_vebox_cmds;
+		cmd_table_count = ARRAY_SIZE(hsw_vebox_cmds);
+		/* VECS can use the same length_mask function as VCS */
+		ring->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
+		break;
+	default:
+		DRM_ERROR("CMD: cmd_parser_init with unknown ring: %d\n",
+			  ring->id);
+		BUG();
+	}
+
+	BUG_ON(!validate_cmds_sorted(ring, cmd_tables, cmd_table_count));
+	BUG_ON(!validate_regs_sorted(ring));
+
+	WARN_ON(!hash_empty(ring->cmd_hash));
+
+	ret = init_hash_table(ring, cmd_tables, cmd_table_count);
+	if (ret) {
+		DRM_ERROR("CMD: cmd_parser_init failed!\n");
+		fini_hash_table(ring);
+		return ret;
+	}
+
+	ring->needs_cmd_parser = true;
+
+	return 0;
+}
+
+/**
+ * i915_cmd_parser_fini_ring() - clean up cmd parser related fields
+ * @ring: the ringbuffer to clean up
+ *
+ * Releases any resources related to command parsing that may have been
+ * initialized for the specified ring.
+ */
+void i915_cmd_parser_fini_ring(struct intel_engine_cs *ring)
+{
+	if (!ring->needs_cmd_parser)
+		return;
+
+	fini_hash_table(ring);
+}
+
+static const struct drm_i915_cmd_descriptor*
+find_cmd_in_table(struct intel_engine_cs *ring,
+		  u32 cmd_header)
+{
+	struct cmd_node *desc_node;
+
+	hash_for_each_possible(ring->cmd_hash, desc_node, node,
+			       cmd_header & CMD_HASH_MASK) {
+		const struct drm_i915_cmd_descriptor *desc = desc_node->desc;
+		u32 masked_cmd = desc->cmd.mask & cmd_header;
+		u32 masked_value = desc->cmd.value & desc->cmd.mask;
+
+		if (masked_cmd == masked_value)
+			return desc;
+	}
+
+	return NULL;
+}
+
+/*
+ * Returns a pointer to a descriptor for the command specified by cmd_header.
+ *
+ * The caller must supply space for a default descriptor via the default_desc
+ * parameter. If no descriptor for the specified command exists in the ring's
+ * command parser tables, this function fills in default_desc based on the
+ * ring's default length encoding and returns default_desc.
+ */
+static const struct drm_i915_cmd_descriptor*
+find_cmd(struct intel_engine_cs *ring,
+	 u32 cmd_header,
+	 struct drm_i915_cmd_descriptor *default_desc)
+{
+	const struct drm_i915_cmd_descriptor *desc;
+	u32 mask;
+
+	desc = find_cmd_in_table(ring, cmd_header);
+	if (desc)
+		return desc;
+
+	mask = ring->get_cmd_length_mask(cmd_header);
+	if (!mask)
+		return NULL;
+
+	BUG_ON(!default_desc);
+	default_desc->flags = CMD_DESC_SKIP;
+	default_desc->length.mask = mask;
+
+	return default_desc;
+}
+
+static const struct drm_i915_reg_descriptor *
+find_reg(const struct drm_i915_reg_descriptor *table,
+	 int count, u32 addr)
+{
+	if (table) {
+		int i;
+
+		for (i = 0; i < count; i++) {
+			if (table[i].addr == addr)
+				return &table[i];
+		}
+	}
+
+	return NULL;
+}
+
+static u32 *vmap_batch(struct drm_i915_gem_object *obj,
+		       unsigned start, unsigned len)
+{
+	int i;
+	void *addr = NULL;
+	struct sg_page_iter sg_iter;
+	int first_page = start >> PAGE_SHIFT;
+	int last_page = (len + start + 4095) >> PAGE_SHIFT;
+	int npages = last_page - first_page;
+	struct page **pages;
+
+	pages = drm_malloc_ab(npages, sizeof(*pages));
+	if (pages == NULL) {
+		DRM_DEBUG_DRIVER("Failed to get space for pages\n");
+		goto finish;
+	}
+
+	i = 0;
+	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, first_page) {
+		pages[i++] = sg_page_iter_page(&sg_iter);
+		if (i == npages)
+			break;
+	}
+
+	addr = vmap(pages, i, 0, PAGE_KERNEL);
+	if (addr == NULL) {
+		DRM_DEBUG_DRIVER("Failed to vmap pages\n");
+		goto finish;
+	}
+
+finish:
+	if (pages)
+		drm_free_large(pages);
+	return (u32*)addr;
+}
+
+/* Returns a vmap'd pointer to dest_obj, which the caller must unmap */
+static u32 *copy_batch(struct drm_i915_gem_object *dest_obj,
+		       struct drm_i915_gem_object *src_obj,
+		       u32 batch_start_offset,
+		       u32 batch_len)
+{
+	int needs_clflush = 0;
+	void *src_base, *src;
+	void *dst = NULL;
+	int ret;
+
+	if (batch_len > dest_obj->base.size ||
+	    batch_len + batch_start_offset > src_obj->base.size)
+		return ERR_PTR(-E2BIG);
+
+	if (WARN_ON(dest_obj->pages_pin_count == 0))
+		return ERR_PTR(-ENODEV);
+
+	ret = i915_gem_obj_prepare_shmem_read(src_obj, &needs_clflush);
+	if (ret) {
+		DRM_DEBUG_DRIVER("CMD: failed to prepare shadow batch\n");
+		return ERR_PTR(ret);
+	}
+
+	src_base = vmap_batch(src_obj, batch_start_offset, batch_len);
+	if (!src_base) {
+		DRM_DEBUG_DRIVER("CMD: Failed to vmap batch\n");
+		ret = -ENOMEM;
+		goto unpin_src;
+	}
+
+	ret = i915_gem_object_set_to_cpu_domain(dest_obj, true);
+	if (ret) {
+		DRM_DEBUG_DRIVER("CMD: Failed to set shadow batch to CPU\n");
+		goto unmap_src;
+	}
+
+	dst = vmap_batch(dest_obj, 0, batch_len);
+	if (!dst) {
+		DRM_DEBUG_DRIVER("CMD: Failed to vmap shadow batch\n");
+		ret = -ENOMEM;
+		goto unmap_src;
+	}
+
+	src = src_base + offset_in_page(batch_start_offset);
+	if (needs_clflush)
+		drm_clflush_virt_range(src, batch_len);
+
+	memcpy(dst, src, batch_len);
+
+unmap_src:
+	vunmap(src_base);
+unpin_src:
+	i915_gem_object_unpin_pages(src_obj);
+
+	return ret ? ERR_PTR(ret) : dst;
+}
+
+/**
+ * i915_needs_cmd_parser() - should a given ring use software command parsing?
+ * @ring: the ring in question
+ *
+ * Only certain platforms require software batch buffer command parsing, and
+ * only when enabled via module parameter.
+ *
+ * Return: true if the ring requires software command parsing
+ */
+bool i915_needs_cmd_parser(struct intel_engine_cs *ring)
+{
+	if (!ring->needs_cmd_parser)
+		return false;
+
+	if (!USES_PPGTT(ring->dev))
+		return false;
+
+	return (i915.enable_cmd_parser == 1);
+}
+
+static bool check_cmd(const struct intel_engine_cs *ring,
+		      const struct drm_i915_cmd_descriptor *desc,
+		      const u32 *cmd, u32 length,
+		      const bool is_master,
+		      bool *oacontrol_set)
+{
+	if (desc->flags & CMD_DESC_REJECT) {
+		DRM_DEBUG_DRIVER("CMD: Rejected command: 0x%08X\n", *cmd);
+		return false;
+	}
+
+	if ((desc->flags & CMD_DESC_MASTER) && !is_master) {
+		DRM_DEBUG_DRIVER("CMD: Rejected master-only command: 0x%08X\n",
+				 *cmd);
+		return false;
+	}
+
+	if (desc->flags & CMD_DESC_REGISTER) {
+		/*
+		 * Get the distance between individual register offset
+		 * fields if the command can perform more than one
+		 * access at a time.
+		 */
+		const u32 step = desc->reg.step ? desc->reg.step : length;
+		u32 offset;
+
+		for (offset = desc->reg.offset; offset < length;
+		     offset += step) {
+			const u32 reg_addr = cmd[offset] & desc->reg.mask;
+			const struct drm_i915_reg_descriptor *reg =
+				find_reg(ring->reg_table, ring->reg_count,
+					 reg_addr);
+
+			if (!reg && is_master)
+				reg = find_reg(ring->master_reg_table,
+					       ring->master_reg_count,
+					       reg_addr);
+
+			if (!reg) {
+				DRM_DEBUG_DRIVER("CMD: Rejected register 0x%08X in command: 0x%08X (ring=%d)\n",
+						 reg_addr, *cmd, ring->id);
+				return false;
+			}
+
+			/*
+			 * OACONTROL requires some special handling for
+			 * writes. We want to make sure that any batch which
+			 * enables OA also disables it before the end of the
+			 * batch. The goal is to prevent one process from
+			 * snooping on the perf data from another process. To do
+			 * that, we need to check the value that will be written
+			 * to the register. Hence, limit OACONTROL writes to
+			 * only MI_LOAD_REGISTER_IMM commands.
+			 */
+			if (reg_addr == OACONTROL) {
+				if (desc->cmd.value == MI_LOAD_REGISTER_MEM) {
+					DRM_DEBUG_DRIVER("CMD: Rejected LRM to OACONTROL\n");
+					return false;
+				}
+
+				if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1))
+					*oacontrol_set = (cmd[offset + 1] != 0);
+			}
+
+			/*
+			 * Check the value written to the register against the
+			 * allowed mask/value pair given in the whitelist entry.
+			 */
+			if (reg->mask) {
+				if (desc->cmd.value == MI_LOAD_REGISTER_MEM) {
+					DRM_DEBUG_DRIVER("CMD: Rejected LRM to masked register 0x%08X\n",
+							 reg_addr);
+					return false;
+				}
+
+				if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1) &&
+				    (offset + 2 > length ||
+				     (cmd[offset + 1] & reg->mask) != reg->value)) {
+					DRM_DEBUG_DRIVER("CMD: Rejected LRI to masked register 0x%08X\n",
+							 reg_addr);
+					return false;
+				}
+			}
+		}
+	}
+
+	if (desc->flags & CMD_DESC_BITMASK) {
+		int i;
+
+		for (i = 0; i < MAX_CMD_DESC_BITMASKS; i++) {
+			u32 dword;
+
+			if (desc->bits[i].mask == 0)
+				break;
+
+			if (desc->bits[i].condition_mask != 0) {
+				u32 offset =
+					desc->bits[i].condition_offset;
+				u32 condition = cmd[offset] &
+					desc->bits[i].condition_mask;
+
+				if (condition == 0)
+					continue;
+			}
+
+			dword = cmd[desc->bits[i].offset] &
+				desc->bits[i].mask;
+
+			if (dword != desc->bits[i].expected) {
+				DRM_DEBUG_DRIVER("CMD: Rejected command 0x%08X for bitmask 0x%08X (exp=0x%08X act=0x%08X) (ring=%d)\n",
+						 *cmd,
+						 desc->bits[i].mask,
+						 desc->bits[i].expected,
+						 dword, ring->id);
+				return false;
+			}
+		}
+	}
+
+	return true;
+}
+
+#define LENGTH_BIAS 2
+
+/**
+ * i915_parse_cmds() - parse a submitted batch buffer for privilege violations
+ * @ring: the ring on which the batch is to execute
+ * @batch_obj: the batch buffer in question
+ * @shadow_batch_obj: copy of the batch buffer in question
+ * @batch_start_offset: byte offset in the batch at which execution starts
+ * @batch_len: length of the commands in batch_obj
+ * @is_master: is the submitting process the drm master?
+ *
+ * Parses the specified batch buffer looking for privilege violations as
+ * described in the overview.
+ *
+ * Return: non-zero if the parser finds violations or otherwise fails; -EACCES
+ * if the batch appears legal but should use hardware parsing
+ */
+int i915_parse_cmds(struct intel_engine_cs *ring,
+		    struct drm_i915_gem_object *batch_obj,
+		    struct drm_i915_gem_object *shadow_batch_obj,
+		    u32 batch_start_offset,
+		    u32 batch_len,
+		    bool is_master)
+{
+	u32 *cmd, *batch_base, *batch_end;
+	struct drm_i915_cmd_descriptor default_desc = { 0 };
+	bool oacontrol_set = false; /* OACONTROL tracking. See check_cmd() */
+	int ret = 0;
+
+	batch_base = copy_batch(shadow_batch_obj, batch_obj,
+				batch_start_offset, batch_len);
+	if (IS_ERR(batch_base)) {
+		DRM_DEBUG_DRIVER("CMD: Failed to copy batch\n");
+		return PTR_ERR(batch_base);
+	}
+
+	/*
+	 * We use the batch length as size because the shadow object is as
+	 * large or larger and copy_batch() will write MI_NOPs to the extra
+	 * space. Parsing should be faster in some cases this way.
+	 */
+	batch_end = batch_base + (batch_len / sizeof(*batch_end));
+
+	cmd = batch_base;
+	while (cmd < batch_end) {
+		const struct drm_i915_cmd_descriptor *desc;
+		u32 length;
+
+		if (*cmd == MI_BATCH_BUFFER_END)
+			break;
+
+		desc = find_cmd(ring, *cmd, &default_desc);
+		if (!desc) {
+			DRM_DEBUG_DRIVER("CMD: Unrecognized command: 0x%08X\n",
+					 *cmd);
+			ret = -EINVAL;
+			break;
+		}
+
+		/*
+		 * If the batch buffer contains a chained batch, return an
+		 * error that tells the caller to abort and dispatch the
+		 * workload as a non-secure batch.
+		 */
+		if (desc->cmd.value == MI_BATCH_BUFFER_START) {
+			ret = -EACCES;
+			break;
+		}
+
+		if (desc->flags & CMD_DESC_FIXED)
+			length = desc->length.fixed;
+		else
+			length = ((*cmd & desc->length.mask) + LENGTH_BIAS);
+
+		if ((batch_end - cmd) < length) {
+			DRM_DEBUG_DRIVER("CMD: Command length exceeds batch length: 0x%08X length=%u batchlen=%td\n",
+					 *cmd,
+					 length,
+					 batch_end - cmd);
+			ret = -EINVAL;
+			break;
+		}
+
+		if (!check_cmd(ring, desc, cmd, length, is_master,
+			       &oacontrol_set)) {
+			ret = -EINVAL;
+			break;
+		}
+
+		cmd += length;
+	}
+
+	if (oacontrol_set) {
+		DRM_DEBUG_DRIVER("CMD: batch set OACONTROL but did not clear it\n");
+		ret = -EINVAL;
+	}
+
+	if (cmd >= batch_end) {
+		DRM_DEBUG_DRIVER("CMD: Got to the end of the buffer w/o a BBE cmd!\n");
+		ret = -EINVAL;
+	}
+
+	vunmap(batch_base);
+
+	return ret;
+}
+
+/**
+ * i915_cmd_parser_get_version() - get the cmd parser version number
+ *
+ * The cmd parser maintains a simple increasing integer version number suitable
+ * for passing to userspace clients to determine what operations are permitted.
+ *
+ * Return: the current version number of the cmd parser
+ */
+int i915_cmd_parser_get_version(void)
+{
+	/*
+	 * Command parser version history
+	 *
+	 * 1. Initial version. Checks batches and reports violations, but leaves
+	 *    hardware parsing enabled (so does not allow new use cases).
+	 * 2. Allow access to the MI_PREDICATE_SRC0 and
+	 *    MI_PREDICATE_SRC1 registers.
+	 * 3. Allow access to the GPGPU_THREADS_DISPATCHED register.
+	 * 4. L3 atomic chicken bits of HSW_SCRATCH1 and HSW_ROW_CHICKEN3.
+	 * 5. GPGPU dispatch compute indirect registers.
+	 */
+	return 5;
+}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
new file mode 100644
index 0000000..8aab974
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -0,0 +1,5440 @@
+/*
+ * Copyright © 2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Eric Anholt <eric@anholt.net>
+ *    Keith Packard <keithp@keithp.com>
+ *
+ */
+
+#include <linux/seq_file.h>
+#include <linux/circ_buf.h>
+#include <linux/ctype.h>
+#include <linux/debugfs.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+#include <linux/list_sort.h>
+#include <asm/msr-index.h>
+#include <drm/drmP.h>
+#include "intel_drv.h"
+#include "intel_ringbuffer.h"
+#include <drm/i915_drm.h>
+#include "i915_drv.h"
+
+enum {
+	ACTIVE_LIST,
+	INACTIVE_LIST,
+	PINNED_LIST,
+};
+
+/* As the drm_debugfs_init() routines are called before dev->dev_private is
+ * allocated we need to hook into the minor for release. */
+static int
+drm_add_fake_info_node(struct drm_minor *minor,
+		       struct dentry *ent,
+		       const void *key)
+{
+	struct drm_info_node *node;
+
+	node = kmalloc(sizeof(*node), GFP_KERNEL);
+	if (node == NULL) {
+		debugfs_remove(ent);
+		return -ENOMEM;
+	}
+
+	node->minor = minor;
+	node->dent = ent;
+	node->info_ent = (void *) key;
+
+	mutex_lock(&minor->debugfs_lock);
+	list_add(&node->list, &minor->debugfs_list);
+	mutex_unlock(&minor->debugfs_lock);
+
+	return 0;
+}
+
+static int i915_capabilities(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = m->private;
+	struct drm_device *dev = node->minor->dev;
+	const struct intel_device_info *info = INTEL_INFO(dev);
+
+	seq_printf(m, "gen: %d\n", info->gen);
+	seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev));
+#define PRINT_FLAG(x)  seq_printf(m, #x ": %s\n", yesno(info->x))
+#define SEP_SEMICOLON ;
+	DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_SEMICOLON);
+#undef PRINT_FLAG
+#undef SEP_SEMICOLON
+
+	return 0;
+}
+
+static const char *get_pin_flag(struct drm_i915_gem_object *obj)
+{
+	if (obj->pin_display)
+		return "p";
+	else
+		return " ";
+}
+
+static const char *get_tiling_flag(struct drm_i915_gem_object *obj)
+{
+	switch (obj->tiling_mode) {
+	default:
+	case I915_TILING_NONE: return " ";
+	case I915_TILING_X: return "X";
+	case I915_TILING_Y: return "Y";
+	}
+}
+
+static inline const char *get_global_flag(struct drm_i915_gem_object *obj)
+{
+	return i915_gem_obj_to_ggtt(obj) ? "g" : " ";
+}
+
+static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
+{
+	u64 size = 0;
+	struct i915_vma *vma;
+
+	list_for_each_entry(vma, &obj->vma_list, vma_link) {
+		if (i915_is_ggtt(vma->vm) &&
+		    drm_mm_node_allocated(&vma->node))
+			size += vma->node.size;
+	}
+
+	return size;
+}
+
+static void
+describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
+{
+	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+	struct intel_engine_cs *ring;
+	struct i915_vma *vma;
+	int pin_count = 0;
+	int i;
+
+	seq_printf(m, "%pK: %s%s%s%s %8zdKiB %02x %02x [ ",
+		   &obj->base,
+		   obj->active ? "*" : " ",
+		   get_pin_flag(obj),
+		   get_tiling_flag(obj),
+		   get_global_flag(obj),
+		   obj->base.size / 1024,
+		   obj->base.read_domains,
+		   obj->base.write_domain);
+	for_each_ring(ring, dev_priv, i)
+		seq_printf(m, "%x ",
+				i915_gem_request_get_seqno(obj->last_read_req[i]));
+	seq_printf(m, "] %x %x%s%s%s",
+		   i915_gem_request_get_seqno(obj->last_write_req),
+		   i915_gem_request_get_seqno(obj->last_fenced_req),
+		   i915_cache_level_str(to_i915(obj->base.dev), obj->cache_level),
+		   obj->dirty ? " dirty" : "",
+		   obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
+	if (obj->base.name)
+		seq_printf(m, " (name: %d)", obj->base.name);
+	list_for_each_entry(vma, &obj->vma_list, vma_link) {
+		if (vma->pin_count > 0)
+			pin_count++;
+	}
+	seq_printf(m, " (pinned x %d)", pin_count);
+	if (obj->pin_display)
+		seq_printf(m, " (display)");
+	if (obj->fence_reg != I915_FENCE_REG_NONE)
+		seq_printf(m, " (fence: %d)", obj->fence_reg);
+	list_for_each_entry(vma, &obj->vma_list, vma_link) {
+		seq_printf(m, " (%sgtt offset: %08llx, size: %08llx",
+			   i915_is_ggtt(vma->vm) ? "g" : "pp",
+			   vma->node.start, vma->node.size);
+		if (i915_is_ggtt(vma->vm))
+			seq_printf(m, ", type: %u)", vma->ggtt_view.type);
+		else
+			seq_puts(m, ")");
+	}
+	if (obj->stolen)
+		seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
+	if (obj->pin_display || obj->fault_mappable) {
+		char s[3], *t = s;
+		if (obj->pin_display)
+			*t++ = 'p';
+		if (obj->fault_mappable)
+			*t++ = 'f';
+		*t = '\0';
+		seq_printf(m, " (%s mappable)", s);
+	}
+	if (obj->last_write_req != NULL)
+		seq_printf(m, " (%s)",
+			   i915_gem_request_get_ring(obj->last_write_req)->name);
+	if (obj->frontbuffer_bits)
+		seq_printf(m, " (frontbuffer: 0x%03x)", obj->frontbuffer_bits);
+}
+
+static void describe_ctx(struct seq_file *m, struct intel_context *ctx)
+{
+	seq_putc(m, ctx->legacy_hw_ctx.initialized ? 'I' : 'i');
+	seq_putc(m, ctx->remap_slice ? 'R' : 'r');
+	seq_putc(m, ' ');
+}
+
+static int i915_gem_object_list_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = m->private;
+	uintptr_t list = (uintptr_t) node->info_ent->data;
+	struct list_head *head;
+	struct drm_device *dev = node->minor->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct i915_address_space *vm = &dev_priv->gtt.base;
+	struct i915_vma *vma;
+	u64 total_obj_size, total_gtt_size;
+	int count, ret;
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
+	/* FIXME: the user of this interface might want more than just GGTT */
+	switch (list) {
+	case ACTIVE_LIST:
+		seq_puts(m, "Active:\n");
+		head = &vm->active_list;
+		break;
+	case INACTIVE_LIST:
+		seq_puts(m, "Inactive:\n");
+		head = &vm->inactive_list;
+		break;
+	default:
+		mutex_unlock(&dev->struct_mutex);
+		return -EINVAL;
+	}
+
+	total_obj_size = total_gtt_size = count = 0;
+	list_for_each_entry(vma, head, mm_list) {
+		seq_printf(m, "   ");
+		describe_obj(m, vma->obj);
+		seq_printf(m, "\n");
+		total_obj_size += vma->obj->base.size;
+		total_gtt_size += vma->node.size;
+		count++;
+	}
+	mutex_unlock(&dev->struct_mutex);
+
+	seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
+		   count, total_obj_size, total_gtt_size);
+	return 0;
+}
+
+static int obj_rank_by_stolen(void *priv,
+			      struct list_head *A, struct list_head *B)
+{
+	struct drm_i915_gem_object *a =
+		container_of(A, struct drm_i915_gem_object, obj_exec_link);
+	struct drm_i915_gem_object *b =
+		container_of(B, struct drm_i915_gem_object, obj_exec_link);
+
+	if (a->stolen->start < b->stolen->start)
+		return -1;
+	if (a->stolen->start > b->stolen->start)
+		return 1;
+	return 0;
+}
+
+static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_gem_object *obj;
+	u64 total_obj_size, total_gtt_size;
+	LIST_HEAD(stolen);
+	int count, ret;
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
+	total_obj_size = total_gtt_size = count = 0;
+	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+		if (obj->stolen == NULL)
+			continue;
+
+		list_add(&obj->obj_exec_link, &stolen);
+
+		total_obj_size += obj->base.size;
+		total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
+		count++;
+	}
+	list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
+		if (obj->stolen == NULL)
+			continue;
+
+		list_add(&obj->obj_exec_link, &stolen);
+
+		total_obj_size += obj->base.size;
+		count++;
+	}
+	list_sort(NULL, &stolen, obj_rank_by_stolen);
+	seq_puts(m, "Stolen:\n");
+	while (!list_empty(&stolen)) {
+		obj = list_first_entry(&stolen, typeof(*obj), obj_exec_link);
+		seq_puts(m, "   ");
+		describe_obj(m, obj);
+		seq_putc(m, '\n');
+		list_del_init(&obj->obj_exec_link);
+	}
+	mutex_unlock(&dev->struct_mutex);
+
+	seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
+		   count, total_obj_size, total_gtt_size);
+	return 0;
+}
+
+#define count_objects(list, member) do { \
+	list_for_each_entry(obj, list, member) { \
+		size += i915_gem_obj_total_ggtt_size(obj); \
+		++count; \
+		if (obj->map_and_fenceable) { \
+			mappable_size += i915_gem_obj_ggtt_size(obj); \
+			++mappable_count; \
+		} \
+	} \
+} while (0)
+
+struct file_stats {
+	struct drm_i915_file_private *file_priv;
+	unsigned long count;
+	u64 total, unbound;
+	u64 global, shared;
+	u64 active, inactive;
+};
+
+static int per_file_stats(int id, void *ptr, void *data)
+{
+	struct drm_i915_gem_object *obj = ptr;
+	struct file_stats *stats = data;
+	struct i915_vma *vma;
+
+	stats->count++;
+	stats->total += obj->base.size;
+
+	if (obj->base.name || obj->base.dma_buf)
+		stats->shared += obj->base.size;
+
+	if (USES_FULL_PPGTT(obj->base.dev)) {
+		list_for_each_entry(vma, &obj->vma_list, vma_link) {
+			struct i915_hw_ppgtt *ppgtt;
+
+			if (!drm_mm_node_allocated(&vma->node))
+				continue;
+
+			if (i915_is_ggtt(vma->vm)) {
+				stats->global += obj->base.size;
+				continue;
+			}
+
+			ppgtt = container_of(vma->vm, struct i915_hw_ppgtt, base);
+			if (ppgtt->file_priv != stats->file_priv)
+				continue;
+
+			if (obj->active) /* XXX per-vma statistic */
+				stats->active += obj->base.size;
+			else
+				stats->inactive += obj->base.size;
+
+			return 0;
+		}
+	} else {
+		if (i915_gem_obj_ggtt_bound(obj)) {
+			stats->global += obj->base.size;
+			if (obj->active)
+				stats->active += obj->base.size;
+			else
+				stats->inactive += obj->base.size;
+			return 0;
+		}
+	}
+
+	if (!list_empty(&obj->global_list))
+		stats->unbound += obj->base.size;
+
+	return 0;
+}
+
+#define print_file_stats(m, name, stats) do { \
+	if (stats.count) \
+		seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound)\n", \
+			   name, \
+			   stats.count, \
+			   stats.total, \
+			   stats.active, \
+			   stats.inactive, \
+			   stats.global, \
+			   stats.shared, \
+			   stats.unbound); \
+} while (0)
+
+static void print_batch_pool_stats(struct seq_file *m,
+				   struct drm_i915_private *dev_priv)
+{
+	struct drm_i915_gem_object *obj;
+	struct file_stats stats;
+	struct intel_engine_cs *ring;
+	int i, j;
+
+	memset(&stats, 0, sizeof(stats));
+
+	for_each_ring(ring, dev_priv, i) {
+		for (j = 0; j < ARRAY_SIZE(ring->batch_pool.cache_list); j++) {
+			list_for_each_entry(obj,
+					    &ring->batch_pool.cache_list[j],
+					    batch_pool_link)
+				per_file_stats(0, obj, &stats);
+		}
+	}
+
+	print_file_stats(m, "[k]batch pool", stats);
+}
+
+#define count_vmas(list, member) do { \
+	list_for_each_entry(vma, list, member) { \
+		size += i915_gem_obj_total_ggtt_size(vma->obj); \
+		++count; \
+		if (vma->obj->map_and_fenceable) { \
+			mappable_size += i915_gem_obj_ggtt_size(vma->obj); \
+			++mappable_count; \
+		} \
+	} \
+} while (0)
+
+static int i915_gem_object_info(struct seq_file *m, void* data)
+{
+	struct drm_info_node *node = m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 count, mappable_count, purgeable_count;
+	u64 size, mappable_size, purgeable_size;
+	struct drm_i915_gem_object *obj;
+	struct i915_address_space *vm = &dev_priv->gtt.base;
+	struct drm_file *file;
+	struct i915_vma *vma;
+	int ret;
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
+	seq_printf(m, "%u objects, %zu bytes\n",
+		   dev_priv->mm.object_count,
+		   dev_priv->mm.object_memory);
+
+	size = count = mappable_size = mappable_count = 0;
+	count_objects(&dev_priv->mm.bound_list, global_list);
+	seq_printf(m, "%u [%u] objects, %llu [%llu] bytes in gtt\n",
+		   count, mappable_count, size, mappable_size);
+
+	size = count = mappable_size = mappable_count = 0;
+	count_vmas(&vm->active_list, mm_list);
+	seq_printf(m, "  %u [%u] active objects, %llu [%llu] bytes\n",
+		   count, mappable_count, size, mappable_size);
+
+	size = count = mappable_size = mappable_count = 0;
+	count_vmas(&vm->inactive_list, mm_list);
+	seq_printf(m, "  %u [%u] inactive objects, %llu [%llu] bytes\n",
+		   count, mappable_count, size, mappable_size);
+
+	size = count = purgeable_size = purgeable_count = 0;
+	list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
+		size += obj->base.size, ++count;
+		if (obj->madv == I915_MADV_DONTNEED)
+			purgeable_size += obj->base.size, ++purgeable_count;
+	}
+	seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
+
+	size = count = mappable_size = mappable_count = 0;
+	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+		if (obj->fault_mappable) {
+			size += i915_gem_obj_ggtt_size(obj);
+			++count;
+		}
+		if (obj->pin_display) {
+			mappable_size += i915_gem_obj_ggtt_size(obj);
+			++mappable_count;
+		}
+		if (obj->madv == I915_MADV_DONTNEED) {
+			purgeable_size += obj->base.size;
+			++purgeable_count;
+		}
+	}
+	seq_printf(m, "%u purgeable objects, %llu bytes\n",
+		   purgeable_count, purgeable_size);
+	seq_printf(m, "%u pinned mappable objects, %llu bytes\n",
+		   mappable_count, mappable_size);
+	seq_printf(m, "%u fault mappable objects, %llu bytes\n",
+		   count, size);
+
+	seq_printf(m, "%llu [%llu] gtt total\n",
+		   dev_priv->gtt.base.total,
+		   (u64)dev_priv->gtt.mappable_end - dev_priv->gtt.base.start);
+
+	seq_putc(m, '\n');
+	print_batch_pool_stats(m, dev_priv);
+	list_for_each_entry_reverse(file, &dev->filelist, lhead) {
+		struct file_stats stats;
+		struct task_struct *task;
+
+		memset(&stats, 0, sizeof(stats));
+		stats.file_priv = file->driver_priv;
+		spin_lock(&file->table_lock);
+		idr_for_each(&file->object_idr, per_file_stats, &stats);
+		spin_unlock(&file->table_lock);
+		/*
+		 * Although we have a valid reference on file->pid, that does
+		 * not guarantee that the task_struct who called get_pid() is
+		 * still alive (e.g. get_pid(current) => fork() => exit()).
+		 * Therefore, we need to protect this ->comm access using RCU.
+		 */
+		rcu_read_lock();
+		task = pid_task(file->pid, PIDTYPE_PID);
+		print_file_stats(m, task ? task->comm : "<unknown>", stats);
+		rcu_read_unlock();
+	}
+
+	mutex_unlock(&dev->struct_mutex);
+
+	return 0;
+}
+
+static int i915_gem_gtt_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = m->private;
+	struct drm_device *dev = node->minor->dev;
+	uintptr_t list = (uintptr_t) node->info_ent->data;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_gem_object *obj;
+	u64 total_obj_size, total_gtt_size;
+	int count, ret;
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
+	total_obj_size = total_gtt_size = count = 0;
+	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+		if (list == PINNED_LIST && !i915_gem_obj_is_pinned(obj))
+			continue;
+
+		seq_puts(m, "   ");
+		describe_obj(m, obj);
+		seq_putc(m, '\n');
+		total_obj_size += obj->base.size;
+		total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
+		count++;
+	}
+
+	mutex_unlock(&dev->struct_mutex);
+
+	seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
+		   count, total_obj_size, total_gtt_size);
+
+	return 0;
+}
+
+static int i915_gem_pageflip_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *crtc;
+	int ret;
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
+	for_each_intel_crtc(dev, crtc) {
+		const char pipe = pipe_name(crtc->pipe);
+		const char plane = plane_name(crtc->plane);
+		struct intel_unpin_work *work;
+
+		spin_lock_irq(&dev->event_lock);
+		work = crtc->unpin_work;
+		if (work == NULL) {
+			seq_printf(m, "No flip due on pipe %c (plane %c)\n",
+				   pipe, plane);
+		} else {
+			u32 addr;
+
+			if (atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
+				seq_printf(m, "Flip queued on pipe %c (plane %c)\n",
+					   pipe, plane);
+			} else {
+				seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
+					   pipe, plane);
+			}
+			if (work->flip_queued_req) {
+				struct intel_engine_cs *ring =
+					i915_gem_request_get_ring(work->flip_queued_req);
+
+				seq_printf(m, "Flip queued on %s at seqno %x, next seqno %x [current breadcrumb %x], completed? %d\n",
+					   ring->name,
+					   i915_gem_request_get_seqno(work->flip_queued_req),
+					   dev_priv->next_seqno,
+					   ring->get_seqno(ring, true),
+					   i915_gem_request_completed(work->flip_queued_req, true));
+			} else
+				seq_printf(m, "Flip not associated with any ring\n");
+			seq_printf(m, "Flip queued on frame %d, (was ready on frame %d), now %d\n",
+				   work->flip_queued_vblank,
+				   work->flip_ready_vblank,
+				   drm_crtc_vblank_count(&crtc->base));
+			if (work->enable_stall_check)
+				seq_puts(m, "Stall check enabled, ");
+			else
+				seq_puts(m, "Stall check waiting for page flip ioctl, ");
+			seq_printf(m, "%d prepares\n", atomic_read(&work->pending));
+
+			if (INTEL_INFO(dev)->gen >= 4)
+				addr = I915_HI_DISPBASE(I915_READ(DSPSURF(crtc->plane)));
+			else
+				addr = I915_READ(DSPADDR(crtc->plane));
+			seq_printf(m, "Current scanout address 0x%08x\n", addr);
+
+			if (work->pending_flip_obj) {
+				seq_printf(m, "New framebuffer address 0x%08lx\n", (long)work->gtt_offset);
+				seq_printf(m, "MMIO update completed? %d\n",  addr == work->gtt_offset);
+			}
+		}
+		spin_unlock_irq(&dev->event_lock);
+	}
+
+	mutex_unlock(&dev->struct_mutex);
+
+	return 0;
+}
+
+static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_gem_object *obj;
+	struct intel_engine_cs *ring;
+	int total = 0;
+	int ret, i, j;
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
+	for_each_ring(ring, dev_priv, i) {
+		for (j = 0; j < ARRAY_SIZE(ring->batch_pool.cache_list); j++) {
+			int count;
+
+			count = 0;
+			list_for_each_entry(obj,
+					    &ring->batch_pool.cache_list[j],
+					    batch_pool_link)
+				count++;
+			seq_printf(m, "%s cache[%d]: %d objects\n",
+				   ring->name, j, count);
+
+			list_for_each_entry(obj,
+					    &ring->batch_pool.cache_list[j],
+					    batch_pool_link) {
+				seq_puts(m, "   ");
+				describe_obj(m, obj);
+				seq_putc(m, '\n');
+			}
+
+			total += count;
+		}
+	}
+
+	seq_printf(m, "total: %d\n", total);
+
+	mutex_unlock(&dev->struct_mutex);
+
+	return 0;
+}
+
+static int i915_gem_request_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_engine_cs *ring;
+	struct drm_i915_gem_request *req;
+	int ret, any, i;
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
+	any = 0;
+	for_each_ring(ring, dev_priv, i) {
+		int count;
+
+		count = 0;
+		list_for_each_entry(req, &ring->request_list, list)
+			count++;
+		if (count == 0)
+			continue;
+
+		seq_printf(m, "%s requests: %d\n", ring->name, count);
+		list_for_each_entry(req, &ring->request_list, list) {
+			struct task_struct *task;
+
+			rcu_read_lock();
+			task = NULL;
+			if (req->pid)
+				task = pid_task(req->pid, PIDTYPE_PID);
+			seq_printf(m, "    %x @ %d: %s [%d]\n",
+				   req->seqno,
+				   (int) (jiffies - req->emitted_jiffies),
+				   task ? task->comm : "<unknown>",
+				   task ? task->pid : -1);
+			rcu_read_unlock();
+		}
+
+		any++;
+	}
+	mutex_unlock(&dev->struct_mutex);
+
+	if (any == 0)
+		seq_puts(m, "No requests\n");
+
+	return 0;
+}
+
+static void i915_ring_seqno_info(struct seq_file *m,
+				 struct intel_engine_cs *ring)
+{
+	if (ring->get_seqno) {
+		seq_printf(m, "Current sequence (%s): %x\n",
+			   ring->name, ring->get_seqno(ring, false));
+	}
+}
+
+static int i915_gem_seqno_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_engine_cs *ring;
+	int ret, i;
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+	intel_runtime_pm_get(dev_priv);
+
+	for_each_ring(ring, dev_priv, i)
+		i915_ring_seqno_info(m, ring);
+
+	intel_runtime_pm_put(dev_priv);
+	mutex_unlock(&dev->struct_mutex);
+
+	return 0;
+}
+
+
+static int i915_interrupt_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_engine_cs *ring;
+	int ret, i, pipe;
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+	intel_runtime_pm_get(dev_priv);
+
+	if (IS_CHERRYVIEW(dev)) {
+		seq_printf(m, "Master Interrupt Control:\t%08x\n",
+			   I915_READ(GEN8_MASTER_IRQ));
+
+		seq_printf(m, "Display IER:\t%08x\n",
+			   I915_READ(VLV_IER));
+		seq_printf(m, "Display IIR:\t%08x\n",
+			   I915_READ(VLV_IIR));
+		seq_printf(m, "Display IIR_RW:\t%08x\n",
+			   I915_READ(VLV_IIR_RW));
+		seq_printf(m, "Display IMR:\t%08x\n",
+			   I915_READ(VLV_IMR));
+		for_each_pipe(dev_priv, pipe)
+			seq_printf(m, "Pipe %c stat:\t%08x\n",
+				   pipe_name(pipe),
+				   I915_READ(PIPESTAT(pipe)));
+
+		seq_printf(m, "Port hotplug:\t%08x\n",
+			   I915_READ(PORT_HOTPLUG_EN));
+		seq_printf(m, "DPFLIPSTAT:\t%08x\n",
+			   I915_READ(VLV_DPFLIPSTAT));
+		seq_printf(m, "DPINVGTT:\t%08x\n",
+			   I915_READ(DPINVGTT));
+
+		for (i = 0; i < 4; i++) {
+			seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
+				   i, I915_READ(GEN8_GT_IMR(i)));
+			seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
+				   i, I915_READ(GEN8_GT_IIR(i)));
+			seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
+				   i, I915_READ(GEN8_GT_IER(i)));
+		}
+
+		seq_printf(m, "PCU interrupt mask:\t%08x\n",
+			   I915_READ(GEN8_PCU_IMR));
+		seq_printf(m, "PCU interrupt identity:\t%08x\n",
+			   I915_READ(GEN8_PCU_IIR));
+		seq_printf(m, "PCU interrupt enable:\t%08x\n",
+			   I915_READ(GEN8_PCU_IER));
+	} else if (INTEL_INFO(dev)->gen >= 8) {
+		seq_printf(m, "Master Interrupt Control:\t%08x\n",
+			   I915_READ(GEN8_MASTER_IRQ));
+
+		for (i = 0; i < 4; i++) {
+			seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
+				   i, I915_READ(GEN8_GT_IMR(i)));
+			seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
+				   i, I915_READ(GEN8_GT_IIR(i)));
+			seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
+				   i, I915_READ(GEN8_GT_IER(i)));
+		}
+
+		for_each_pipe(dev_priv, pipe) {
+			if (!intel_display_power_is_enabled(dev_priv,
+						POWER_DOMAIN_PIPE(pipe))) {
+				seq_printf(m, "Pipe %c power disabled\n",
+					   pipe_name(pipe));
+				continue;
+			}
+			seq_printf(m, "Pipe %c IMR:\t%08x\n",
+				   pipe_name(pipe),
+				   I915_READ(GEN8_DE_PIPE_IMR(pipe)));
+			seq_printf(m, "Pipe %c IIR:\t%08x\n",
+				   pipe_name(pipe),
+				   I915_READ(GEN8_DE_PIPE_IIR(pipe)));
+			seq_printf(m, "Pipe %c IER:\t%08x\n",
+				   pipe_name(pipe),
+				   I915_READ(GEN8_DE_PIPE_IER(pipe)));
+		}
+
+		seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
+			   I915_READ(GEN8_DE_PORT_IMR));
+		seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
+			   I915_READ(GEN8_DE_PORT_IIR));
+		seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
+			   I915_READ(GEN8_DE_PORT_IER));
+
+		seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
+			   I915_READ(GEN8_DE_MISC_IMR));
+		seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
+			   I915_READ(GEN8_DE_MISC_IIR));
+		seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
+			   I915_READ(GEN8_DE_MISC_IER));
+
+		seq_printf(m, "PCU interrupt mask:\t%08x\n",
+			   I915_READ(GEN8_PCU_IMR));
+		seq_printf(m, "PCU interrupt identity:\t%08x\n",
+			   I915_READ(GEN8_PCU_IIR));
+		seq_printf(m, "PCU interrupt enable:\t%08x\n",
+			   I915_READ(GEN8_PCU_IER));
+	} else if (IS_VALLEYVIEW(dev)) {
+		seq_printf(m, "Display IER:\t%08x\n",
+			   I915_READ(VLV_IER));
+		seq_printf(m, "Display IIR:\t%08x\n",
+			   I915_READ(VLV_IIR));
+		seq_printf(m, "Display IIR_RW:\t%08x\n",
+			   I915_READ(VLV_IIR_RW));
+		seq_printf(m, "Display IMR:\t%08x\n",
+			   I915_READ(VLV_IMR));
+		for_each_pipe(dev_priv, pipe)
+			seq_printf(m, "Pipe %c stat:\t%08x\n",
+				   pipe_name(pipe),
+				   I915_READ(PIPESTAT(pipe)));
+
+		seq_printf(m, "Master IER:\t%08x\n",
+			   I915_READ(VLV_MASTER_IER));
+
+		seq_printf(m, "Render IER:\t%08x\n",
+			   I915_READ(GTIER));
+		seq_printf(m, "Render IIR:\t%08x\n",
+			   I915_READ(GTIIR));
+		seq_printf(m, "Render IMR:\t%08x\n",
+			   I915_READ(GTIMR));
+
+		seq_printf(m, "PM IER:\t\t%08x\n",
+			   I915_READ(GEN6_PMIER));
+		seq_printf(m, "PM IIR:\t\t%08x\n",
+			   I915_READ(GEN6_PMIIR));
+		seq_printf(m, "PM IMR:\t\t%08x\n",
+			   I915_READ(GEN6_PMIMR));
+
+		seq_printf(m, "Port hotplug:\t%08x\n",
+			   I915_READ(PORT_HOTPLUG_EN));
+		seq_printf(m, "DPFLIPSTAT:\t%08x\n",
+			   I915_READ(VLV_DPFLIPSTAT));
+		seq_printf(m, "DPINVGTT:\t%08x\n",
+			   I915_READ(DPINVGTT));
+
+	} else if (!HAS_PCH_SPLIT(dev)) {
+		seq_printf(m, "Interrupt enable:    %08x\n",
+			   I915_READ(IER));
+		seq_printf(m, "Interrupt identity:  %08x\n",
+			   I915_READ(IIR));
+		seq_printf(m, "Interrupt mask:      %08x\n",
+			   I915_READ(IMR));
+		for_each_pipe(dev_priv, pipe)
+			seq_printf(m, "Pipe %c stat:         %08x\n",
+				   pipe_name(pipe),
+				   I915_READ(PIPESTAT(pipe)));
+	} else {
+		seq_printf(m, "North Display Interrupt enable:		%08x\n",
+			   I915_READ(DEIER));
+		seq_printf(m, "North Display Interrupt identity:	%08x\n",
+			   I915_READ(DEIIR));
+		seq_printf(m, "North Display Interrupt mask:		%08x\n",
+			   I915_READ(DEIMR));
+		seq_printf(m, "South Display Interrupt enable:		%08x\n",
+			   I915_READ(SDEIER));
+		seq_printf(m, "South Display Interrupt identity:	%08x\n",
+			   I915_READ(SDEIIR));
+		seq_printf(m, "South Display Interrupt mask:		%08x\n",
+			   I915_READ(SDEIMR));
+		seq_printf(m, "Graphics Interrupt enable:		%08x\n",
+			   I915_READ(GTIER));
+		seq_printf(m, "Graphics Interrupt identity:		%08x\n",
+			   I915_READ(GTIIR));
+		seq_printf(m, "Graphics Interrupt mask:		%08x\n",
+			   I915_READ(GTIMR));
+	}
+	for_each_ring(ring, dev_priv, i) {
+		if (INTEL_INFO(dev)->gen >= 6) {
+			seq_printf(m,
+				   "Graphics Interrupt mask (%s):	%08x\n",
+				   ring->name, I915_READ_IMR(ring));
+		}
+		i915_ring_seqno_info(m, ring);
+	}
+	intel_runtime_pm_put(dev_priv);
+	mutex_unlock(&dev->struct_mutex);
+
+	return 0;
+}
+
+static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int i, ret;
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
+	seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
+	for (i = 0; i < dev_priv->num_fence_regs; i++) {
+		struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj;
+
+		seq_printf(m, "Fence %d, pin count = %d, object = ",
+			   i, dev_priv->fence_regs[i].pin_count);
+		if (obj == NULL)
+			seq_puts(m, "unused");
+		else
+			describe_obj(m, obj);
+		seq_putc(m, '\n');
+	}
+
+	mutex_unlock(&dev->struct_mutex);
+	return 0;
+}
+
+static int i915_hws_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_engine_cs *ring;
+	const u32 *hws;
+	int i;
+
+	ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
+	hws = ring->status_page.page_addr;
+	if (hws == NULL)
+		return 0;
+
+	for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) {
+		seq_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
+			   i * 4,
+			   hws[i], hws[i + 1], hws[i + 2], hws[i + 3]);
+	}
+	return 0;
+}
+
+static ssize_t
+i915_error_state_write(struct file *filp,
+		       const char __user *ubuf,
+		       size_t cnt,
+		       loff_t *ppos)
+{
+	struct i915_error_state_file_priv *error_priv = filp->private_data;
+	struct drm_device *dev = error_priv->dev;
+	int ret;
+
+	DRM_DEBUG_DRIVER("Resetting error state\n");
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
+	i915_destroy_error_state(dev);
+	mutex_unlock(&dev->struct_mutex);
+
+	return cnt;
+}
+
+static int i915_error_state_open(struct inode *inode, struct file *file)
+{
+	struct drm_device *dev = inode->i_private;
+	struct i915_error_state_file_priv *error_priv;
+
+	error_priv = kzalloc(sizeof(*error_priv), GFP_KERNEL);
+	if (!error_priv)
+		return -ENOMEM;
+
+	error_priv->dev = dev;
+
+	i915_error_state_get(dev, error_priv);
+
+	file->private_data = error_priv;
+
+	return 0;
+}
+
+static int i915_error_state_release(struct inode *inode, struct file *file)
+{
+	struct i915_error_state_file_priv *error_priv = file->private_data;
+
+	i915_error_state_put(error_priv);
+	kfree(error_priv);
+
+	return 0;
+}
+
+static ssize_t i915_error_state_read(struct file *file, char __user *userbuf,
+				     size_t count, loff_t *pos)
+{
+	struct i915_error_state_file_priv *error_priv = file->private_data;
+	struct drm_i915_error_state_buf error_str;
+	loff_t tmp_pos = 0;
+	ssize_t ret_count = 0;
+	int ret;
+
+	ret = i915_error_state_buf_init(&error_str, to_i915(error_priv->dev), count, *pos);
+	if (ret)
+		return ret;
+
+	ret = i915_error_state_to_str(&error_str, error_priv);
+	if (ret)
+		goto out;
+
+	ret_count = simple_read_from_buffer(userbuf, count, &tmp_pos,
+					    error_str.buf,
+					    error_str.bytes);
+
+	if (ret_count < 0)
+		ret = ret_count;
+	else
+		*pos = error_str.start + ret_count;
+out:
+	i915_error_state_buf_release(&error_str);
+	return ret ?: ret_count;
+}
+
+static const struct file_operations i915_error_state_fops = {
+	.owner = THIS_MODULE,
+	.open = i915_error_state_open,
+	.read = i915_error_state_read,
+	.write = i915_error_state_write,
+	.llseek = default_llseek,
+	.release = i915_error_state_release,
+};
+
+static int
+i915_next_seqno_get(void *data, u64 *val)
+{
+	struct drm_device *dev = data;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret;
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
+	*val = dev_priv->next_seqno;
+	mutex_unlock(&dev->struct_mutex);
+
+	return 0;
+}
+
+static int
+i915_next_seqno_set(void *data, u64 val)
+{
+	struct drm_device *dev = data;
+	int ret;
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
+	ret = i915_gem_set_seqno(dev, val);
+	mutex_unlock(&dev->struct_mutex);
+
+	return ret;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
+			i915_next_seqno_get, i915_next_seqno_set,
+			"0x%llx\n");
+
+static int i915_frequency_info(struct seq_file *m, void *unused)
+{
+	struct drm_info_node *node = m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret = 0;
+
+	intel_runtime_pm_get(dev_priv);
+
+	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
+	if (IS_GEN5(dev)) {
+		u16 rgvswctl = I915_READ16(MEMSWCTL);
+		u16 rgvstat = I915_READ16(MEMSTAT_ILK);
+
+		seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
+		seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
+		seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
+			   MEMSTAT_VID_SHIFT);
+		seq_printf(m, "Current P-state: %d\n",
+			   (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
+	} else if (IS_GEN6(dev) || (IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) ||
+		   IS_BROADWELL(dev) || IS_GEN9(dev)) {
+		u32 rp_state_limits;
+		u32 gt_perf_status;
+		u32 rp_state_cap;
+		u32 rpmodectl, rpinclimit, rpdeclimit;
+		u32 rpstat, cagf, reqf;
+		u32 rpupei, rpcurup, rpprevup;
+		u32 rpdownei, rpcurdown, rpprevdown;
+		u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
+		int max_freq;
+
+		rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
+		if (IS_BROXTON(dev)) {
+			rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
+			gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
+		} else {
+			rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+			gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
+		}
+
+		/* RPSTAT1 is in the GT power well */
+		ret = mutex_lock_interruptible(&dev->struct_mutex);
+		if (ret)
+			goto out;
+
+		intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+
+		reqf = I915_READ(GEN6_RPNSWREQ);
+		if (IS_GEN9(dev))
+			reqf >>= 23;
+		else {
+			reqf &= ~GEN6_TURBO_DISABLE;
+			if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+				reqf >>= 24;
+			else
+				reqf >>= 25;
+		}
+		reqf = intel_gpu_freq(dev_priv, reqf);
+
+		rpmodectl = I915_READ(GEN6_RP_CONTROL);
+		rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
+		rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
+
+		rpstat = I915_READ(GEN6_RPSTAT1);
+		rpupei = I915_READ(GEN6_RP_CUR_UP_EI);
+		rpcurup = I915_READ(GEN6_RP_CUR_UP);
+		rpprevup = I915_READ(GEN6_RP_PREV_UP);
+		rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI);
+		rpcurdown = I915_READ(GEN6_RP_CUR_DOWN);
+		rpprevdown = I915_READ(GEN6_RP_PREV_DOWN);
+		if (IS_GEN9(dev))
+			cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
+		else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+			cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
+		else
+			cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
+		cagf = intel_gpu_freq(dev_priv, cagf);
+
+		intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+		mutex_unlock(&dev->struct_mutex);
+
+		if (IS_GEN6(dev) || IS_GEN7(dev)) {
+			pm_ier = I915_READ(GEN6_PMIER);
+			pm_imr = I915_READ(GEN6_PMIMR);
+			pm_isr = I915_READ(GEN6_PMISR);
+			pm_iir = I915_READ(GEN6_PMIIR);
+			pm_mask = I915_READ(GEN6_PMINTRMSK);
+		} else {
+			pm_ier = I915_READ(GEN8_GT_IER(2));
+			pm_imr = I915_READ(GEN8_GT_IMR(2));
+			pm_isr = I915_READ(GEN8_GT_ISR(2));
+			pm_iir = I915_READ(GEN8_GT_IIR(2));
+			pm_mask = I915_READ(GEN6_PMINTRMSK);
+		}
+		seq_printf(m, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n",
+			   pm_ier, pm_imr, pm_isr, pm_iir, pm_mask);
+		seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
+		seq_printf(m, "Render p-state ratio: %d\n",
+			   (gt_perf_status & (IS_GEN9(dev) ? 0x1ff00 : 0xff00)) >> 8);
+		seq_printf(m, "Render p-state VID: %d\n",
+			   gt_perf_status & 0xff);
+		seq_printf(m, "Render p-state limit: %d\n",
+			   rp_state_limits & 0xff);
+		seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
+		seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
+		seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
+		seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
+		seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
+		seq_printf(m, "CAGF: %dMHz\n", cagf);
+		seq_printf(m, "RP CUR UP EI: %dus\n", rpupei &
+			   GEN6_CURICONT_MASK);
+		seq_printf(m, "RP CUR UP: %dus\n", rpcurup &
+			   GEN6_CURBSYTAVG_MASK);
+		seq_printf(m, "RP PREV UP: %dus\n", rpprevup &
+			   GEN6_CURBSYTAVG_MASK);
+		seq_printf(m, "Up threshold: %d%%\n",
+			   dev_priv->rps.up_threshold);
+
+		seq_printf(m, "RP CUR DOWN EI: %dus\n", rpdownei &
+			   GEN6_CURIAVG_MASK);
+		seq_printf(m, "RP CUR DOWN: %dus\n", rpcurdown &
+			   GEN6_CURBSYTAVG_MASK);
+		seq_printf(m, "RP PREV DOWN: %dus\n", rpprevdown &
+			   GEN6_CURBSYTAVG_MASK);
+		seq_printf(m, "Down threshold: %d%%\n",
+			   dev_priv->rps.down_threshold);
+
+		max_freq = (IS_BROXTON(dev) ? rp_state_cap >> 0 :
+			    rp_state_cap >> 16) & 0xff;
+		max_freq *= (IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1);
+		seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
+			   intel_gpu_freq(dev_priv, max_freq));
+
+		max_freq = (rp_state_cap & 0xff00) >> 8;
+		max_freq *= (IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1);
+		seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
+			   intel_gpu_freq(dev_priv, max_freq));
+
+		max_freq = (IS_BROXTON(dev) ? rp_state_cap >> 16 :
+			    rp_state_cap >> 0) & 0xff;
+		max_freq *= (IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1);
+		seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
+			   intel_gpu_freq(dev_priv, max_freq));
+		seq_printf(m, "Max overclocked frequency: %dMHz\n",
+			   intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
+
+		seq_printf(m, "Current freq: %d MHz\n",
+			   intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq));
+		seq_printf(m, "Actual freq: %d MHz\n", cagf);
+		seq_printf(m, "Idle freq: %d MHz\n",
+			   intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq));
+		seq_printf(m, "Min freq: %d MHz\n",
+			   intel_gpu_freq(dev_priv, dev_priv->rps.min_freq));
+		seq_printf(m, "Max freq: %d MHz\n",
+			   intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
+		seq_printf(m,
+			   "efficient (RPe) frequency: %d MHz\n",
+			   intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
+	} else if (IS_VALLEYVIEW(dev)) {
+		u32 freq_sts;
+
+		mutex_lock(&dev_priv->rps.hw_lock);
+		freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
+		seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
+		seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
+
+		seq_printf(m, "actual GPU freq: %d MHz\n",
+			   intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
+
+		seq_printf(m, "current GPU freq: %d MHz\n",
+			   intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq));
+
+		seq_printf(m, "max GPU freq: %d MHz\n",
+			   intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
+
+		seq_printf(m, "min GPU freq: %d MHz\n",
+			   intel_gpu_freq(dev_priv, dev_priv->rps.min_freq));
+
+		seq_printf(m, "idle GPU freq: %d MHz\n",
+			   intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq));
+
+		seq_printf(m,
+			   "efficient (RPe) frequency: %d MHz\n",
+			   intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
+		mutex_unlock(&dev_priv->rps.hw_lock);
+	} else {
+		seq_puts(m, "no P-state info available\n");
+	}
+
+	seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk_freq);
+	seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
+	seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
+
+out:
+	intel_runtime_pm_put(dev_priv);
+	return ret;
+}
+
+static int i915_hangcheck_info(struct seq_file *m, void *unused)
+{
+	struct drm_info_node *node = m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_engine_cs *ring;
+	u64 acthd[I915_NUM_RINGS];
+	u32 seqno[I915_NUM_RINGS];
+	int i;
+
+	if (!i915.enable_hangcheck) {
+		seq_printf(m, "Hangcheck disabled\n");
+		return 0;
+	}
+
+	intel_runtime_pm_get(dev_priv);
+
+	for_each_ring(ring, dev_priv, i) {
+		seqno[i] = ring->get_seqno(ring, false);
+		acthd[i] = intel_ring_get_active_head(ring);
+	}
+
+	intel_runtime_pm_put(dev_priv);
+
+	if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work)) {
+		seq_printf(m, "Hangcheck active, fires in %dms\n",
+			   jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires -
+					    jiffies));
+	} else
+		seq_printf(m, "Hangcheck inactive\n");
+
+	for_each_ring(ring, dev_priv, i) {
+		seq_printf(m, "%s:\n", ring->name);
+		seq_printf(m, "\tseqno = %x [current %x]\n",
+			   ring->hangcheck.seqno, seqno[i]);
+		seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
+			   (long long)ring->hangcheck.acthd,
+			   (long long)acthd[i]);
+		seq_printf(m, "\tmax ACTHD = 0x%08llx\n",
+			   (long long)ring->hangcheck.max_acthd);
+		seq_printf(m, "\tscore = %d\n", ring->hangcheck.score);
+		seq_printf(m, "\taction = %d\n", ring->hangcheck.action);
+	}
+
+	return 0;
+}
+
+static int ironlake_drpc_info(struct seq_file *m)
+{
+	struct drm_info_node *node = m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 rgvmodectl, rstdbyctl;
+	u16 crstandvid;
+	int ret;
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+	intel_runtime_pm_get(dev_priv);
+
+	rgvmodectl = I915_READ(MEMMODECTL);
+	rstdbyctl = I915_READ(RSTDBYCTL);
+	crstandvid = I915_READ16(CRSTANDVID);
+
+	intel_runtime_pm_put(dev_priv);
+	mutex_unlock(&dev->struct_mutex);
+
+	seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
+	seq_printf(m, "Boost freq: %d\n",
+		   (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
+		   MEMMODE_BOOST_FREQ_SHIFT);
+	seq_printf(m, "HW control enabled: %s\n",
+		   yesno(rgvmodectl & MEMMODE_HWIDLE_EN));
+	seq_printf(m, "SW control enabled: %s\n",
+		   yesno(rgvmodectl & MEMMODE_SWMODE_EN));
+	seq_printf(m, "Gated voltage change: %s\n",
+		   yesno(rgvmodectl & MEMMODE_RCLK_GATE));
+	seq_printf(m, "Starting frequency: P%d\n",
+		   (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
+	seq_printf(m, "Max P-state: P%d\n",
+		   (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
+	seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
+	seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
+	seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
+	seq_printf(m, "Render standby enabled: %s\n",
+		   yesno(!(rstdbyctl & RCX_SW_EXIT)));
+	seq_puts(m, "Current RS state: ");
+	switch (rstdbyctl & RSX_STATUS_MASK) {
+	case RSX_STATUS_ON:
+		seq_puts(m, "on\n");
+		break;
+	case RSX_STATUS_RC1:
+		seq_puts(m, "RC1\n");
+		break;
+	case RSX_STATUS_RC1E:
+		seq_puts(m, "RC1E\n");
+		break;
+	case RSX_STATUS_RS1:
+		seq_puts(m, "RS1\n");
+		break;
+	case RSX_STATUS_RS2:
+		seq_puts(m, "RS2 (RC6)\n");
+		break;
+	case RSX_STATUS_RS3:
+		seq_puts(m, "RC3 (RC6+)\n");
+		break;
+	default:
+		seq_puts(m, "unknown\n");
+		break;
+	}
+
+	return 0;
+}
+
+static int i915_forcewake_domains(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_uncore_forcewake_domain *fw_domain;
+	int i;
+
+	spin_lock_irq(&dev_priv->uncore.lock);
+	for_each_fw_domain(fw_domain, dev_priv, i) {
+		seq_printf(m, "%s.wake_count = %u\n",
+			   intel_uncore_forcewake_domain_to_str(i),
+			   fw_domain->wake_count);
+	}
+	spin_unlock_irq(&dev_priv->uncore.lock);
+
+	return 0;
+}
+
+static int vlv_drpc_info(struct seq_file *m)
+{
+	struct drm_info_node *node = m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 rpmodectl1, rcctl1, pw_status;
+
+	intel_runtime_pm_get(dev_priv);
+
+	pw_status = I915_READ(VLV_GTLC_PW_STATUS);
+	rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
+	rcctl1 = I915_READ(GEN6_RC_CONTROL);
+
+	intel_runtime_pm_put(dev_priv);
+
+	seq_printf(m, "Video Turbo Mode: %s\n",
+		   yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
+	seq_printf(m, "Turbo enabled: %s\n",
+		   yesno(rpmodectl1 & GEN6_RP_ENABLE));
+	seq_printf(m, "HW control enabled: %s\n",
+		   yesno(rpmodectl1 & GEN6_RP_ENABLE));
+	seq_printf(m, "SW control enabled: %s\n",
+		   yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
+			  GEN6_RP_MEDIA_SW_MODE));
+	seq_printf(m, "RC6 Enabled: %s\n",
+		   yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
+					GEN6_RC_CTL_EI_MODE(1))));
+	seq_printf(m, "Render Power Well: %s\n",
+		   (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
+	seq_printf(m, "Media Power Well: %s\n",
+		   (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
+
+	seq_printf(m, "Render RC6 residency since boot: %u\n",
+		   I915_READ(VLV_GT_RENDER_RC6));
+	seq_printf(m, "Media RC6 residency since boot: %u\n",
+		   I915_READ(VLV_GT_MEDIA_RC6));
+
+	return i915_forcewake_domains(m, NULL);
+}
+
+static int gen6_drpc_info(struct seq_file *m)
+{
+	struct drm_info_node *node = m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0;
+	unsigned forcewake_count;
+	int count = 0, ret;
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+	intel_runtime_pm_get(dev_priv);
+
+	spin_lock_irq(&dev_priv->uncore.lock);
+	forcewake_count = dev_priv->uncore.fw_domain[FW_DOMAIN_ID_RENDER].wake_count;
+	spin_unlock_irq(&dev_priv->uncore.lock);
+
+	if (forcewake_count) {
+		seq_puts(m, "RC information inaccurate because somebody "
+			    "holds a forcewake reference \n");
+	} else {
+		/* NB: we cannot use forcewake, else we read the wrong values */
+		while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
+			udelay(10);
+		seq_printf(m, "RC information accurate: %s\n", yesno(count < 51));
+	}
+
+	gt_core_status = readl(dev_priv->regs + GEN6_GT_CORE_STATUS);
+	trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
+
+	rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
+	rcctl1 = I915_READ(GEN6_RC_CONTROL);
+	mutex_unlock(&dev->struct_mutex);
+	mutex_lock(&dev_priv->rps.hw_lock);
+	sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
+	mutex_unlock(&dev_priv->rps.hw_lock);
+
+	intel_runtime_pm_put(dev_priv);
+
+	seq_printf(m, "Video Turbo Mode: %s\n",
+		   yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
+	seq_printf(m, "HW control enabled: %s\n",
+		   yesno(rpmodectl1 & GEN6_RP_ENABLE));
+	seq_printf(m, "SW control enabled: %s\n",
+		   yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
+			  GEN6_RP_MEDIA_SW_MODE));
+	seq_printf(m, "RC1e Enabled: %s\n",
+		   yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
+	seq_printf(m, "RC6 Enabled: %s\n",
+		   yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
+	seq_printf(m, "Deep RC6 Enabled: %s\n",
+		   yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
+	seq_printf(m, "Deepest RC6 Enabled: %s\n",
+		   yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
+	seq_puts(m, "Current RC state: ");
+	switch (gt_core_status & GEN6_RCn_MASK) {
+	case GEN6_RC0:
+		if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
+			seq_puts(m, "Core Power Down\n");
+		else
+			seq_puts(m, "on\n");
+		break;
+	case GEN6_RC3:
+		seq_puts(m, "RC3\n");
+		break;
+	case GEN6_RC6:
+		seq_puts(m, "RC6\n");
+		break;
+	case GEN6_RC7:
+		seq_puts(m, "RC7\n");
+		break;
+	default:
+		seq_puts(m, "Unknown\n");
+		break;
+	}
+
+	seq_printf(m, "Core Power Down: %s\n",
+		   yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
+
+	/* Not exactly sure what this is */
+	seq_printf(m, "RC6 \"Locked to RPn\" residency since boot: %u\n",
+		   I915_READ(GEN6_GT_GFX_RC6_LOCKED));
+	seq_printf(m, "RC6 residency since boot: %u\n",
+		   I915_READ(GEN6_GT_GFX_RC6));
+	seq_printf(m, "RC6+ residency since boot: %u\n",
+		   I915_READ(GEN6_GT_GFX_RC6p));
+	seq_printf(m, "RC6++ residency since boot: %u\n",
+		   I915_READ(GEN6_GT_GFX_RC6pp));
+
+	seq_printf(m, "RC6   voltage: %dmV\n",
+		   GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
+	seq_printf(m, "RC6+  voltage: %dmV\n",
+		   GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
+	seq_printf(m, "RC6++ voltage: %dmV\n",
+		   GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
+	return 0;
+}
+
+static int i915_drpc_info(struct seq_file *m, void *unused)
+{
+	struct drm_info_node *node = m->private;
+	struct drm_device *dev = node->minor->dev;
+
+	if (IS_VALLEYVIEW(dev))
+		return vlv_drpc_info(m);
+	else if (INTEL_INFO(dev)->gen >= 6)
+		return gen6_drpc_info(m);
+	else
+		return ironlake_drpc_info(m);
+}
+
+static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
+{
+	struct drm_info_node *node = m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	seq_printf(m, "FB tracking busy bits: 0x%08x\n",
+		   dev_priv->fb_tracking.busy_bits);
+
+	seq_printf(m, "FB tracking flip bits: 0x%08x\n",
+		   dev_priv->fb_tracking.flip_bits);
+
+	return 0;
+}
+
+static int i915_fbc_status(struct seq_file *m, void *unused)
+{
+	struct drm_info_node *node = m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (!HAS_FBC(dev)) {
+		seq_puts(m, "FBC unsupported on this chipset\n");
+		return 0;
+	}
+
+	intel_runtime_pm_get(dev_priv);
+	mutex_lock(&dev_priv->fbc.lock);
+
+	if (intel_fbc_enabled(dev_priv))
+		seq_puts(m, "FBC enabled\n");
+	else
+		seq_printf(m, "FBC disabled: %s\n",
+			  intel_no_fbc_reason_str(dev_priv->fbc.no_fbc_reason));
+
+	if (INTEL_INFO(dev_priv)->gen >= 7)
+		seq_printf(m, "Compressing: %s\n",
+			   yesno(I915_READ(FBC_STATUS2) &
+				 FBC_COMPRESSION_MASK));
+
+	mutex_unlock(&dev_priv->fbc.lock);
+	intel_runtime_pm_put(dev_priv);
+
+	return 0;
+}
+
+static int i915_fbc_fc_get(void *data, u64 *val)
+{
+	struct drm_device *dev = data;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev))
+		return -ENODEV;
+
+	*val = dev_priv->fbc.false_color;
+
+	return 0;
+}
+
+static int i915_fbc_fc_set(void *data, u64 val)
+{
+	struct drm_device *dev = data;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 reg;
+
+	if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev))
+		return -ENODEV;
+
+	mutex_lock(&dev_priv->fbc.lock);
+
+	reg = I915_READ(ILK_DPFC_CONTROL);
+	dev_priv->fbc.false_color = val;
+
+	I915_WRITE(ILK_DPFC_CONTROL, val ?
+		   (reg | FBC_CTL_FALSE_COLOR) :
+		   (reg & ~FBC_CTL_FALSE_COLOR));
+
+	mutex_unlock(&dev_priv->fbc.lock);
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_fc_fops,
+			i915_fbc_fc_get, i915_fbc_fc_set,
+			"%llu\n");
+
+static int i915_ips_status(struct seq_file *m, void *unused)
+{
+	struct drm_info_node *node = m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (!HAS_IPS(dev)) {
+		seq_puts(m, "not supported\n");
+		return 0;
+	}
+
+	intel_runtime_pm_get(dev_priv);
+
+	seq_printf(m, "Enabled by kernel parameter: %s\n",
+		   yesno(i915.enable_ips));
+
+	if (INTEL_INFO(dev)->gen >= 8) {
+		seq_puts(m, "Currently: unknown\n");
+	} else {
+		if (I915_READ(IPS_CTL) & IPS_ENABLE)
+			seq_puts(m, "Currently: enabled\n");
+		else
+			seq_puts(m, "Currently: disabled\n");
+	}
+
+	intel_runtime_pm_put(dev_priv);
+
+	return 0;
+}
+
+static int i915_sr_status(struct seq_file *m, void *unused)
+{
+	struct drm_info_node *node = m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	bool sr_enabled = false;
+
+	intel_runtime_pm_get(dev_priv);
+
+	if (HAS_PCH_SPLIT(dev))
+		sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
+	else if (IS_CRESTLINE(dev) || IS_G4X(dev) ||
+		 IS_I945G(dev) || IS_I945GM(dev))
+		sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
+	else if (IS_I915GM(dev))
+		sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
+	else if (IS_PINEVIEW(dev))
+		sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
+	else if (IS_VALLEYVIEW(dev))
+		sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
+
+	intel_runtime_pm_put(dev_priv);
+
+	seq_printf(m, "self-refresh: %s\n",
+		   sr_enabled ? "enabled" : "disabled");
+
+	return 0;
+}
+
+static int i915_emon_status(struct seq_file *m, void *unused)
+{
+	struct drm_info_node *node = m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	unsigned long temp, chipset, gfx;
+	int ret;
+
+	if (!IS_GEN5(dev))
+		return -ENODEV;
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
+	temp = i915_mch_val(dev_priv);
+	chipset = i915_chipset_val(dev_priv);
+	gfx = i915_gfx_val(dev_priv);
+	mutex_unlock(&dev->struct_mutex);
+
+	seq_printf(m, "GMCH temp: %ld\n", temp);
+	seq_printf(m, "Chipset power: %ld\n", chipset);
+	seq_printf(m, "GFX power: %ld\n", gfx);
+	seq_printf(m, "Total power: %ld\n", chipset + gfx);
+
+	return 0;
+}
+
+static int i915_ring_freq_table(struct seq_file *m, void *unused)
+{
+	struct drm_info_node *node = m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret = 0;
+	int gpu_freq, ia_freq;
+	unsigned int max_gpu_freq, min_gpu_freq;
+
+	if (!HAS_CORE_RING_FREQ(dev)) {
+		seq_puts(m, "unsupported on this chipset\n");
+		return 0;
+	}
+
+	intel_runtime_pm_get(dev_priv);
+
+	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
+	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
+	if (ret)
+		goto out;
+
+	if (IS_SKYLAKE(dev)) {
+		/* Convert GT frequency to 50 HZ units */
+		min_gpu_freq =
+			dev_priv->rps.min_freq_softlimit / GEN9_FREQ_SCALER;
+		max_gpu_freq =
+			dev_priv->rps.max_freq_softlimit / GEN9_FREQ_SCALER;
+	} else {
+		min_gpu_freq = dev_priv->rps.min_freq_softlimit;
+		max_gpu_freq = dev_priv->rps.max_freq_softlimit;
+	}
+
+	seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
+
+	for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
+		ia_freq = gpu_freq;
+		sandybridge_pcode_read(dev_priv,
+				       GEN6_PCODE_READ_MIN_FREQ_TABLE,
+				       &ia_freq);
+		seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
+			   intel_gpu_freq(dev_priv, (gpu_freq *
+				(IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1))),
+			   ((ia_freq >> 0) & 0xff) * 100,
+			   ((ia_freq >> 8) & 0xff) * 100);
+	}
+
+	mutex_unlock(&dev_priv->rps.hw_lock);
+
+out:
+	intel_runtime_pm_put(dev_priv);
+	return ret;
+}
+
+static int i915_opregion(struct seq_file *m, void *unused)
+{
+	struct drm_info_node *node = m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_opregion *opregion = &dev_priv->opregion;
+	void *data = kmalloc(OPREGION_SIZE, GFP_KERNEL);
+	int ret;
+
+	if (data == NULL)
+		return -ENOMEM;
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		goto out;
+
+	if (opregion->header) {
+		memcpy(data, opregion->header, OPREGION_SIZE);
+		seq_write(m, data, OPREGION_SIZE);
+	}
+
+	mutex_unlock(&dev->struct_mutex);
+
+out:
+	kfree(data);
+	return 0;
+}
+
+static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct intel_fbdev *ifbdev = NULL;
+	struct intel_framebuffer *fb;
+	struct drm_framebuffer *drm_fb;
+
+#ifdef CONFIG_DRM_FBDEV_EMULATION
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	ifbdev = dev_priv->fbdev;
+	fb = to_intel_framebuffer(ifbdev->helper.fb);
+
+	seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
+		   fb->base.width,
+		   fb->base.height,
+		   fb->base.depth,
+		   fb->base.bits_per_pixel,
+		   fb->base.modifier[0],
+		   atomic_read(&fb->base.refcount.refcount));
+	describe_obj(m, fb->obj);
+	seq_putc(m, '\n');
+#endif
+
+	mutex_lock(&dev->mode_config.fb_lock);
+	drm_for_each_fb(drm_fb, dev) {
+		fb = to_intel_framebuffer(drm_fb);
+		if (ifbdev && &fb->base == ifbdev->helper.fb)
+			continue;
+
+		seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
+			   fb->base.width,
+			   fb->base.height,
+			   fb->base.depth,
+			   fb->base.bits_per_pixel,
+			   fb->base.modifier[0],
+			   atomic_read(&fb->base.refcount.refcount));
+		describe_obj(m, fb->obj);
+		seq_putc(m, '\n');
+	}
+	mutex_unlock(&dev->mode_config.fb_lock);
+
+	return 0;
+}
+
+static void describe_ctx_ringbuf(struct seq_file *m,
+				 struct intel_ringbuffer *ringbuf)
+{
+	seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, last head: %d)",
+		   ringbuf->space, ringbuf->head, ringbuf->tail,
+		   ringbuf->last_retired_head);
+}
+
+static int i915_context_status(struct seq_file *m, void *unused)
+{
+	struct drm_info_node *node = m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_engine_cs *ring;
+	struct intel_context *ctx;
+	int ret, i;
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
+	list_for_each_entry(ctx, &dev_priv->context_list, link) {
+		if (!i915.enable_execlists &&
+		    ctx->legacy_hw_ctx.rcs_state == NULL)
+			continue;
+
+		seq_puts(m, "HW context ");
+		describe_ctx(m, ctx);
+		for_each_ring(ring, dev_priv, i) {
+			if (ring->default_context == ctx)
+				seq_printf(m, "(default context %s) ",
+					   ring->name);
+		}
+
+		if (i915.enable_execlists) {
+			seq_putc(m, '\n');
+			for_each_ring(ring, dev_priv, i) {
+				struct drm_i915_gem_object *ctx_obj =
+					ctx->engine[i].state;
+				struct intel_ringbuffer *ringbuf =
+					ctx->engine[i].ringbuf;
+
+				seq_printf(m, "%s: ", ring->name);
+				if (ctx_obj)
+					describe_obj(m, ctx_obj);
+				if (ringbuf)
+					describe_ctx_ringbuf(m, ringbuf);
+				seq_putc(m, '\n');
+			}
+		} else {
+			describe_obj(m, ctx->legacy_hw_ctx.rcs_state);
+		}
+
+		seq_putc(m, '\n');
+	}
+
+	mutex_unlock(&dev->struct_mutex);
+
+	return 0;
+}
+
+static void i915_dump_lrc_obj(struct seq_file *m,
+			      struct intel_engine_cs *ring,
+			      struct drm_i915_gem_object *ctx_obj)
+{
+	struct page *page;
+	uint32_t *reg_state;
+	int j;
+	unsigned long ggtt_offset = 0;
+
+	if (ctx_obj == NULL) {
+		seq_printf(m, "Context on %s with no gem object\n",
+			   ring->name);
+		return;
+	}
+
+	seq_printf(m, "CONTEXT: %s %u\n", ring->name,
+		   intel_execlists_ctx_id(ctx_obj));
+
+	if (!i915_gem_obj_ggtt_bound(ctx_obj))
+		seq_puts(m, "\tNot bound in GGTT\n");
+	else
+		ggtt_offset = i915_gem_obj_ggtt_offset(ctx_obj);
+
+	if (i915_gem_object_get_pages(ctx_obj)) {
+		seq_puts(m, "\tFailed to get pages for context object\n");
+		return;
+	}
+
+	page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
+	if (!WARN_ON(page == NULL)) {
+		reg_state = kmap_atomic(page);
+
+		for (j = 0; j < 0x600 / sizeof(u32) / 4; j += 4) {
+			seq_printf(m, "\t[0x%08lx] 0x%08x 0x%08x 0x%08x 0x%08x\n",
+				   ggtt_offset + 4096 + (j * 4),
+				   reg_state[j], reg_state[j + 1],
+				   reg_state[j + 2], reg_state[j + 3]);
+		}
+		kunmap_atomic(reg_state);
+	}
+
+	seq_putc(m, '\n');
+}
+
+static int i915_dump_lrc(struct seq_file *m, void *unused)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_engine_cs *ring;
+	struct intel_context *ctx;
+	int ret, i;
+
+	if (!i915.enable_execlists) {
+		seq_printf(m, "Logical Ring Contexts are disabled\n");
+		return 0;
+	}
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
+	list_for_each_entry(ctx, &dev_priv->context_list, link) {
+		for_each_ring(ring, dev_priv, i) {
+			if (ring->default_context != ctx)
+				i915_dump_lrc_obj(m, ring,
+						  ctx->engine[i].state);
+		}
+	}
+
+	mutex_unlock(&dev->struct_mutex);
+
+	return 0;
+}
+
+static int i915_execlists(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *)m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_engine_cs *ring;
+	u32 status_pointer;
+	u8 read_pointer;
+	u8 write_pointer;
+	u32 status;
+	u32 ctx_id;
+	struct list_head *cursor;
+	int ring_id, i;
+	int ret;
+
+	if (!i915.enable_execlists) {
+		seq_puts(m, "Logical Ring Contexts are disabled\n");
+		return 0;
+	}
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
+	intel_runtime_pm_get(dev_priv);
+
+	for_each_ring(ring, dev_priv, ring_id) {
+		struct drm_i915_gem_request *head_req = NULL;
+		int count = 0;
+		unsigned long flags;
+
+		seq_printf(m, "%s\n", ring->name);
+
+		status = I915_READ(RING_EXECLIST_STATUS_LO(ring));
+		ctx_id = I915_READ(RING_EXECLIST_STATUS_HI(ring));
+		seq_printf(m, "\tExeclist status: 0x%08X, context: %u\n",
+			   status, ctx_id);
+
+		status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(ring));
+		seq_printf(m, "\tStatus pointer: 0x%08X\n", status_pointer);
+
+		read_pointer = ring->next_context_status_buffer;
+		write_pointer = status_pointer & 0x07;
+		if (read_pointer > write_pointer)
+			write_pointer += 6;
+		seq_printf(m, "\tRead pointer: 0x%08X, write pointer 0x%08X\n",
+			   read_pointer, write_pointer);
+
+		for (i = 0; i < 6; i++) {
+			status = I915_READ(RING_CONTEXT_STATUS_BUF_LO(ring, i));
+			ctx_id = I915_READ(RING_CONTEXT_STATUS_BUF_HI(ring, i));
+
+			seq_printf(m, "\tStatus buffer %d: 0x%08X, context: %u\n",
+				   i, status, ctx_id);
+		}
+
+		spin_lock_irqsave(&ring->execlist_lock, flags);
+		list_for_each(cursor, &ring->execlist_queue)
+			count++;
+		head_req = list_first_entry_or_null(&ring->execlist_queue,
+				struct drm_i915_gem_request, execlist_link);
+		spin_unlock_irqrestore(&ring->execlist_lock, flags);
+
+		seq_printf(m, "\t%d requests in queue\n", count);
+		if (head_req) {
+			struct drm_i915_gem_object *ctx_obj;
+
+			ctx_obj = head_req->ctx->engine[ring_id].state;
+			seq_printf(m, "\tHead request id: %u\n",
+				   intel_execlists_ctx_id(ctx_obj));
+			seq_printf(m, "\tHead request tail: %u\n",
+				   head_req->tail);
+		}
+
+		seq_putc(m, '\n');
+	}
+
+	intel_runtime_pm_put(dev_priv);
+	mutex_unlock(&dev->struct_mutex);
+
+	return 0;
+}
+
+static const char *swizzle_string(unsigned swizzle)
+{
+	switch (swizzle) {
+	case I915_BIT_6_SWIZZLE_NONE:
+		return "none";
+	case I915_BIT_6_SWIZZLE_9:
+		return "bit9";
+	case I915_BIT_6_SWIZZLE_9_10:
+		return "bit9/bit10";
+	case I915_BIT_6_SWIZZLE_9_11:
+		return "bit9/bit11";
+	case I915_BIT_6_SWIZZLE_9_10_11:
+		return "bit9/bit10/bit11";
+	case I915_BIT_6_SWIZZLE_9_17:
+		return "bit9/bit17";
+	case I915_BIT_6_SWIZZLE_9_10_17:
+		return "bit9/bit10/bit17";
+	case I915_BIT_6_SWIZZLE_UNKNOWN:
+		return "unknown";
+	}
+
+	return "bug";
+}
+
+static int i915_swizzle_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret;
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+	intel_runtime_pm_get(dev_priv);
+
+	seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
+		   swizzle_string(dev_priv->mm.bit_6_swizzle_x));
+	seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
+		   swizzle_string(dev_priv->mm.bit_6_swizzle_y));
+
+	if (IS_GEN3(dev) || IS_GEN4(dev)) {
+		seq_printf(m, "DDC = 0x%08x\n",
+			   I915_READ(DCC));
+		seq_printf(m, "DDC2 = 0x%08x\n",
+			   I915_READ(DCC2));
+		seq_printf(m, "C0DRB3 = 0x%04x\n",
+			   I915_READ16(C0DRB3));
+		seq_printf(m, "C1DRB3 = 0x%04x\n",
+			   I915_READ16(C1DRB3));
+	} else if (INTEL_INFO(dev)->gen >= 6) {
+		seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
+			   I915_READ(MAD_DIMM_C0));
+		seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
+			   I915_READ(MAD_DIMM_C1));
+		seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
+			   I915_READ(MAD_DIMM_C2));
+		seq_printf(m, "TILECTL = 0x%08x\n",
+			   I915_READ(TILECTL));
+		if (INTEL_INFO(dev)->gen >= 8)
+			seq_printf(m, "GAMTARBMODE = 0x%08x\n",
+				   I915_READ(GAMTARBMODE));
+		else
+			seq_printf(m, "ARB_MODE = 0x%08x\n",
+				   I915_READ(ARB_MODE));
+		seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
+			   I915_READ(DISP_ARB_CTL));
+	}
+
+	if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
+		seq_puts(m, "L-shaped memory detected\n");
+
+	intel_runtime_pm_put(dev_priv);
+	mutex_unlock(&dev->struct_mutex);
+
+	return 0;
+}
+
+static int per_file_ctx(int id, void *ptr, void *data)
+{
+	struct intel_context *ctx = ptr;
+	struct seq_file *m = data;
+	struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
+
+	if (!ppgtt) {
+		seq_printf(m, "  no ppgtt for context %d\n",
+			   ctx->user_handle);
+		return 0;
+	}
+
+	if (i915_gem_context_is_default(ctx))
+		seq_puts(m, "  default context:\n");
+	else
+		seq_printf(m, "  context %d:\n", ctx->user_handle);
+	ppgtt->debug_dump(ppgtt, m);
+
+	return 0;
+}
+
+static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_engine_cs *ring;
+	struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
+	int unused, i;
+
+	if (!ppgtt)
+		return;
+
+	for_each_ring(ring, dev_priv, unused) {
+		seq_printf(m, "%s\n", ring->name);
+		for (i = 0; i < 4; i++) {
+			u64 pdp = I915_READ(GEN8_RING_PDP_UDW(ring, i));
+			pdp <<= 32;
+			pdp |= I915_READ(GEN8_RING_PDP_LDW(ring, i));
+			seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp);
+		}
+	}
+}
+
+static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_engine_cs *ring;
+	int i;
+
+	if (INTEL_INFO(dev)->gen == 6)
+		seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
+
+	for_each_ring(ring, dev_priv, i) {
+		seq_printf(m, "%s\n", ring->name);
+		if (INTEL_INFO(dev)->gen == 7)
+			seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring)));
+		seq_printf(m, "PP_DIR_BASE: 0x%08x\n", I915_READ(RING_PP_DIR_BASE(ring)));
+		seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n", I915_READ(RING_PP_DIR_BASE_READ(ring)));
+		seq_printf(m, "PP_DIR_DCLV: 0x%08x\n", I915_READ(RING_PP_DIR_DCLV(ring)));
+	}
+	if (dev_priv->mm.aliasing_ppgtt) {
+		struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
+
+		seq_puts(m, "aliasing PPGTT:\n");
+		seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd.base.ggtt_offset);
+
+		ppgtt->debug_dump(ppgtt, m);
+	}
+
+	seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
+}
+
+static int i915_ppgtt_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_file *file;
+
+	int ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+	intel_runtime_pm_get(dev_priv);
+
+	if (INTEL_INFO(dev)->gen >= 8)
+		gen8_ppgtt_info(m, dev);
+	else if (INTEL_INFO(dev)->gen >= 6)
+		gen6_ppgtt_info(m, dev);
+
+	list_for_each_entry_reverse(file, &dev->filelist, lhead) {
+		struct drm_i915_file_private *file_priv = file->driver_priv;
+		struct task_struct *task;
+
+		task = get_pid_task(file->pid, PIDTYPE_PID);
+		if (!task) {
+			ret = -ESRCH;
+			goto out_put;
+		}
+		seq_printf(m, "\nproc: %s\n", task->comm);
+		put_task_struct(task);
+		idr_for_each(&file_priv->context_idr, per_file_ctx,
+			     (void *)(unsigned long)m);
+	}
+
+out_put:
+	intel_runtime_pm_put(dev_priv);
+	mutex_unlock(&dev->struct_mutex);
+
+	return ret;
+}
+
+static int count_irq_waiters(struct drm_i915_private *i915)
+{
+	struct intel_engine_cs *ring;
+	int count = 0;
+	int i;
+
+	for_each_ring(ring, i915, i)
+		count += ring->irq_refcount;
+
+	return count;
+}
+
+static int i915_rps_boost_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_file *file;
+
+	seq_printf(m, "RPS enabled? %d\n", dev_priv->rps.enabled);
+	seq_printf(m, "GPU busy? %d\n", dev_priv->mm.busy);
+	seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv));
+	seq_printf(m, "Frequency requested %d; min hard:%d, soft:%d; max soft:%d, hard:%d\n",
+		   intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
+		   intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
+		   intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit),
+		   intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit),
+		   intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
+	spin_lock(&dev_priv->rps.client_lock);
+	list_for_each_entry_reverse(file, &dev->filelist, lhead) {
+		struct drm_i915_file_private *file_priv = file->driver_priv;
+		struct task_struct *task;
+
+		rcu_read_lock();
+		task = pid_task(file->pid, PIDTYPE_PID);
+		seq_printf(m, "%s [%d]: %d boosts%s\n",
+			   task ? task->comm : "<unknown>",
+			   task ? task->pid : -1,
+			   file_priv->rps.boosts,
+			   list_empty(&file_priv->rps.link) ? "" : ", active");
+		rcu_read_unlock();
+	}
+	seq_printf(m, "Semaphore boosts: %d%s\n",
+		   dev_priv->rps.semaphores.boosts,
+		   list_empty(&dev_priv->rps.semaphores.link) ? "" : ", active");
+	seq_printf(m, "MMIO flip boosts: %d%s\n",
+		   dev_priv->rps.mmioflips.boosts,
+		   list_empty(&dev_priv->rps.mmioflips.link) ? "" : ", active");
+	seq_printf(m, "Kernel boosts: %d\n", dev_priv->rps.boosts);
+	spin_unlock(&dev_priv->rps.client_lock);
+
+	return 0;
+}
+
+static int i915_llc(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	/* Size calculation for LLC is a bit of a pain. Ignore for now. */
+	seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev)));
+	seq_printf(m, "eLLC: %zuMB\n", dev_priv->ellc_size);
+
+	return 0;
+}
+
+static int i915_guc_load_status_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = m->private;
+	struct drm_i915_private *dev_priv = node->minor->dev->dev_private;
+	struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
+	u32 tmp, i;
+
+	if (!HAS_GUC_UCODE(dev_priv->dev))
+		return 0;
+
+	seq_printf(m, "GuC firmware status:\n");
+	seq_printf(m, "\tpath: %s\n",
+		guc_fw->guc_fw_path);
+	seq_printf(m, "\tfetch: %s\n",
+		intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status));
+	seq_printf(m, "\tload: %s\n",
+		intel_guc_fw_status_repr(guc_fw->guc_fw_load_status));
+	seq_printf(m, "\tversion wanted: %d.%d\n",
+		guc_fw->guc_fw_major_wanted, guc_fw->guc_fw_minor_wanted);
+	seq_printf(m, "\tversion found: %d.%d\n",
+		guc_fw->guc_fw_major_found, guc_fw->guc_fw_minor_found);
+
+	tmp = I915_READ(GUC_STATUS);
+
+	seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
+	seq_printf(m, "\tBootrom status = 0x%x\n",
+		(tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
+	seq_printf(m, "\tuKernel status = 0x%x\n",
+		(tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
+	seq_printf(m, "\tMIA Core status = 0x%x\n",
+		(tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
+	seq_puts(m, "\nScratch registers:\n");
+	for (i = 0; i < 16; i++)
+		seq_printf(m, "\t%2d: \t0x%x\n", i, I915_READ(SOFT_SCRATCH(i)));
+
+	return 0;
+}
+
+static void i915_guc_client_info(struct seq_file *m,
+				 struct drm_i915_private *dev_priv,
+				 struct i915_guc_client *client)
+{
+	struct intel_engine_cs *ring;
+	uint64_t tot = 0;
+	uint32_t i;
+
+	seq_printf(m, "\tPriority %d, GuC ctx index: %u, PD offset 0x%x\n",
+		client->priority, client->ctx_index, client->proc_desc_offset);
+	seq_printf(m, "\tDoorbell id %d, offset: 0x%x, cookie 0x%x\n",
+		client->doorbell_id, client->doorbell_offset, client->cookie);
+	seq_printf(m, "\tWQ size %d, offset: 0x%x, tail %d\n",
+		client->wq_size, client->wq_offset, client->wq_tail);
+
+	seq_printf(m, "\tFailed to queue: %u\n", client->q_fail);
+	seq_printf(m, "\tFailed doorbell: %u\n", client->b_fail);
+	seq_printf(m, "\tLast submission result: %d\n", client->retcode);
+
+	for_each_ring(ring, dev_priv, i) {
+		seq_printf(m, "\tSubmissions: %llu %s\n",
+				client->submissions[i],
+				ring->name);
+		tot += client->submissions[i];
+	}
+	seq_printf(m, "\tTotal: %llu\n", tot);
+}
+
+static int i915_guc_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_guc guc;
+	struct i915_guc_client client = {};
+	struct intel_engine_cs *ring;
+	enum intel_ring_id i;
+	u64 total = 0;
+
+	if (!HAS_GUC_SCHED(dev_priv->dev))
+		return 0;
+
+	/* Take a local copy of the GuC data, so we can dump it at leisure */
+	spin_lock(&dev_priv->guc.host2guc_lock);
+	guc = dev_priv->guc;
+	if (guc.execbuf_client) {
+		spin_lock(&guc.execbuf_client->wq_lock);
+		client = *guc.execbuf_client;
+		spin_unlock(&guc.execbuf_client->wq_lock);
+	}
+	spin_unlock(&dev_priv->guc.host2guc_lock);
+
+	seq_printf(m, "GuC total action count: %llu\n", guc.action_count);
+	seq_printf(m, "GuC action failure count: %u\n", guc.action_fail);
+	seq_printf(m, "GuC last action command: 0x%x\n", guc.action_cmd);
+	seq_printf(m, "GuC last action status: 0x%x\n", guc.action_status);
+	seq_printf(m, "GuC last action error code: %d\n", guc.action_err);
+
+	seq_printf(m, "\nGuC submissions:\n");
+	for_each_ring(ring, dev_priv, i) {
+		seq_printf(m, "\t%-24s: %10llu, last seqno 0x%08x %9d\n",
+			ring->name, guc.submissions[i],
+			guc.last_seqno[i], guc.last_seqno[i]);
+		total += guc.submissions[i];
+	}
+	seq_printf(m, "\t%s: %llu\n", "Total", total);
+
+	seq_printf(m, "\nGuC execbuf client @ %p:\n", guc.execbuf_client);
+	i915_guc_client_info(m, dev_priv, &client);
+
+	/* Add more as required ... */
+
+	return 0;
+}
+
+static int i915_guc_log_dump(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_gem_object *log_obj = dev_priv->guc.log_obj;
+	u32 *log;
+	int i = 0, pg;
+
+	if (!log_obj)
+		return 0;
+
+	for (pg = 0; pg < log_obj->base.size / PAGE_SIZE; pg++) {
+		log = kmap_atomic(i915_gem_object_get_page(log_obj, pg));
+
+		for (i = 0; i < PAGE_SIZE / sizeof(u32); i += 4)
+			seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
+				   *(log + i), *(log + i + 1),
+				   *(log + i + 2), *(log + i + 3));
+
+		kunmap_atomic(log);
+	}
+
+	seq_putc(m, '\n');
+
+	return 0;
+}
+
+static int i915_edp_psr_status(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 psrperf = 0;
+	u32 stat[3];
+	enum pipe pipe;
+	bool enabled = false;
+
+	if (!HAS_PSR(dev)) {
+		seq_puts(m, "PSR not supported\n");
+		return 0;
+	}
+
+	intel_runtime_pm_get(dev_priv);
+
+	mutex_lock(&dev_priv->psr.lock);
+	seq_printf(m, "Sink_Support: %s\n", yesno(dev_priv->psr.sink_support));
+	seq_printf(m, "Source_OK: %s\n", yesno(dev_priv->psr.source_ok));
+	seq_printf(m, "Enabled: %s\n", yesno((bool)dev_priv->psr.enabled));
+	seq_printf(m, "Active: %s\n", yesno(dev_priv->psr.active));
+	seq_printf(m, "Busy frontbuffer bits: 0x%03x\n",
+		   dev_priv->psr.busy_frontbuffer_bits);
+	seq_printf(m, "Re-enable work scheduled: %s\n",
+		   yesno(work_busy(&dev_priv->psr.work.work)));
+
+	if (HAS_DDI(dev))
+		enabled = I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
+	else {
+		for_each_pipe(dev_priv, pipe) {
+			stat[pipe] = I915_READ(VLV_PSRSTAT(pipe)) &
+				VLV_EDP_PSR_CURR_STATE_MASK;
+			if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
+			    (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
+				enabled = true;
+		}
+	}
+	seq_printf(m, "HW Enabled & Active bit: %s", yesno(enabled));
+
+	if (!HAS_DDI(dev))
+		for_each_pipe(dev_priv, pipe) {
+			if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
+			    (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
+				seq_printf(m, " pipe %c", pipe_name(pipe));
+		}
+	seq_puts(m, "\n");
+
+	/* CHV PSR has no kind of performance counter */
+	if (HAS_DDI(dev)) {
+		psrperf = I915_READ(EDP_PSR_PERF_CNT(dev)) &
+			EDP_PSR_PERF_CNT_MASK;
+
+		seq_printf(m, "Performance_Counter: %u\n", psrperf);
+	}
+	mutex_unlock(&dev_priv->psr.lock);
+
+	intel_runtime_pm_put(dev_priv);
+	return 0;
+}
+
+static int i915_sink_crc(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct intel_encoder *encoder;
+	struct intel_connector *connector;
+	struct intel_dp *intel_dp = NULL;
+	int ret;
+	u8 crc[6];
+
+	drm_modeset_lock_all(dev);
+	for_each_intel_connector(dev, connector) {
+
+		if (connector->base.dpms != DRM_MODE_DPMS_ON)
+			continue;
+
+		if (!connector->base.encoder)
+			continue;
+
+		encoder = to_intel_encoder(connector->base.encoder);
+		if (encoder->type != INTEL_OUTPUT_EDP)
+			continue;
+
+		intel_dp = enc_to_intel_dp(&encoder->base);
+
+		ret = intel_dp_sink_crc(intel_dp, crc);
+		if (ret)
+			goto out;
+
+		seq_printf(m, "%02x%02x%02x%02x%02x%02x\n",
+			   crc[0], crc[1], crc[2],
+			   crc[3], crc[4], crc[5]);
+		goto out;
+	}
+	ret = -ENODEV;
+out:
+	drm_modeset_unlock_all(dev);
+	return ret;
+}
+
+static int i915_energy_uJ(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u64 power;
+	u32 units;
+
+	if (INTEL_INFO(dev)->gen < 6)
+		return -ENODEV;
+
+	intel_runtime_pm_get(dev_priv);
+
+	rdmsrl(MSR_RAPL_POWER_UNIT, power);
+	power = (power & 0x1f00) >> 8;
+	units = 1000000 / (1 << power); /* convert to uJ */
+	power = I915_READ(MCH_SECP_NRG_STTS);
+	power *= units;
+
+	intel_runtime_pm_put(dev_priv);
+
+	seq_printf(m, "%llu", (long long unsigned)power);
+
+	return 0;
+}
+
+static int i915_runtime_pm_status(struct seq_file *m, void *unused)
+{
+	struct drm_info_node *node = m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (!HAS_RUNTIME_PM(dev)) {
+		seq_puts(m, "not supported\n");
+		return 0;
+	}
+
+	seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->mm.busy));
+	seq_printf(m, "IRQs disabled: %s\n",
+		   yesno(!intel_irqs_enabled(dev_priv)));
+#ifdef CONFIG_PM
+	seq_printf(m, "Usage count: %d\n",
+		   atomic_read(&dev->dev->power.usage_count));
+#else
+	seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
+#endif
+
+	return 0;
+}
+
+static const char *power_domain_str(enum intel_display_power_domain domain)
+{
+	switch (domain) {
+	case POWER_DOMAIN_PIPE_A:
+		return "PIPE_A";
+	case POWER_DOMAIN_PIPE_B:
+		return "PIPE_B";
+	case POWER_DOMAIN_PIPE_C:
+		return "PIPE_C";
+	case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
+		return "PIPE_A_PANEL_FITTER";
+	case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
+		return "PIPE_B_PANEL_FITTER";
+	case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
+		return "PIPE_C_PANEL_FITTER";
+	case POWER_DOMAIN_TRANSCODER_A:
+		return "TRANSCODER_A";
+	case POWER_DOMAIN_TRANSCODER_B:
+		return "TRANSCODER_B";
+	case POWER_DOMAIN_TRANSCODER_C:
+		return "TRANSCODER_C";
+	case POWER_DOMAIN_TRANSCODER_EDP:
+		return "TRANSCODER_EDP";
+	case POWER_DOMAIN_PORT_DDI_A_2_LANES:
+		return "PORT_DDI_A_2_LANES";
+	case POWER_DOMAIN_PORT_DDI_A_4_LANES:
+		return "PORT_DDI_A_4_LANES";
+	case POWER_DOMAIN_PORT_DDI_B_2_LANES:
+		return "PORT_DDI_B_2_LANES";
+	case POWER_DOMAIN_PORT_DDI_B_4_LANES:
+		return "PORT_DDI_B_4_LANES";
+	case POWER_DOMAIN_PORT_DDI_C_2_LANES:
+		return "PORT_DDI_C_2_LANES";
+	case POWER_DOMAIN_PORT_DDI_C_4_LANES:
+		return "PORT_DDI_C_4_LANES";
+	case POWER_DOMAIN_PORT_DDI_D_2_LANES:
+		return "PORT_DDI_D_2_LANES";
+	case POWER_DOMAIN_PORT_DDI_D_4_LANES:
+		return "PORT_DDI_D_4_LANES";
+	case POWER_DOMAIN_PORT_DDI_E_2_LANES:
+		return "PORT_DDI_E_2_LANES";
+	case POWER_DOMAIN_PORT_DSI:
+		return "PORT_DSI";
+	case POWER_DOMAIN_PORT_CRT:
+		return "PORT_CRT";
+	case POWER_DOMAIN_PORT_OTHER:
+		return "PORT_OTHER";
+	case POWER_DOMAIN_VGA:
+		return "VGA";
+	case POWER_DOMAIN_AUDIO:
+		return "AUDIO";
+	case POWER_DOMAIN_PLLS:
+		return "PLLS";
+	case POWER_DOMAIN_AUX_A:
+		return "AUX_A";
+	case POWER_DOMAIN_AUX_B:
+		return "AUX_B";
+	case POWER_DOMAIN_AUX_C:
+		return "AUX_C";
+	case POWER_DOMAIN_AUX_D:
+		return "AUX_D";
+	case POWER_DOMAIN_GMBUS:
+		return "GMBUS";
+	case POWER_DOMAIN_INIT:
+		return "INIT";
+	default:
+		MISSING_CASE(domain);
+		return "?";
+	}
+}
+
+static int i915_power_domain_info(struct seq_file *m, void *unused)
+{
+	struct drm_info_node *node = m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct i915_power_domains *power_domains = &dev_priv->power_domains;
+	int i;
+
+	mutex_lock(&power_domains->lock);
+
+	seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
+	for (i = 0; i < power_domains->power_well_count; i++) {
+		struct i915_power_well *power_well;
+		enum intel_display_power_domain power_domain;
+
+		power_well = &power_domains->power_wells[i];
+		seq_printf(m, "%-25s %d\n", power_well->name,
+			   power_well->count);
+
+		for (power_domain = 0; power_domain < POWER_DOMAIN_NUM;
+		     power_domain++) {
+			if (!(BIT(power_domain) & power_well->domains))
+				continue;
+
+			seq_printf(m, "  %-23s %d\n",
+				 power_domain_str(power_domain),
+				 power_domains->domain_use_count[power_domain]);
+		}
+	}
+
+	mutex_unlock(&power_domains->lock);
+
+	return 0;
+}
+
+static void intel_seq_print_mode(struct seq_file *m, int tabs,
+				 struct drm_display_mode *mode)
+{
+	int i;
+
+	for (i = 0; i < tabs; i++)
+		seq_putc(m, '\t');
+
+	seq_printf(m, "id %d:\"%s\" freq %d clock %d hdisp %d hss %d hse %d htot %d vdisp %d vss %d vse %d vtot %d type 0x%x flags 0x%x\n",
+		   mode->base.id, mode->name,
+		   mode->vrefresh, mode->clock,
+		   mode->hdisplay, mode->hsync_start,
+		   mode->hsync_end, mode->htotal,
+		   mode->vdisplay, mode->vsync_start,
+		   mode->vsync_end, mode->vtotal,
+		   mode->type, mode->flags);
+}
+
+static void intel_encoder_info(struct seq_file *m,
+			       struct intel_crtc *intel_crtc,
+			       struct intel_encoder *intel_encoder)
+{
+	struct drm_info_node *node = m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct drm_crtc *crtc = &intel_crtc->base;
+	struct intel_connector *intel_connector;
+	struct drm_encoder *encoder;
+
+	encoder = &intel_encoder->base;
+	seq_printf(m, "\tencoder %d: type: %s, connectors:\n",
+		   encoder->base.id, encoder->name);
+	for_each_connector_on_encoder(dev, encoder, intel_connector) {
+		struct drm_connector *connector = &intel_connector->base;
+		seq_printf(m, "\t\tconnector %d: type: %s, status: %s",
+			   connector->base.id,
+			   connector->name,
+			   drm_get_connector_status_name(connector->status));
+		if (connector->status == connector_status_connected) {
+			struct drm_display_mode *mode = &crtc->mode;
+			seq_printf(m, ", mode:\n");
+			intel_seq_print_mode(m, 2, mode);
+		} else {
+			seq_putc(m, '\n');
+		}
+	}
+}
+
+static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
+{
+	struct drm_info_node *node = m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct drm_crtc *crtc = &intel_crtc->base;
+	struct intel_encoder *intel_encoder;
+	struct drm_plane_state *plane_state = crtc->primary->state;
+	struct drm_framebuffer *fb = plane_state->fb;
+
+	if (fb)
+		seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
+			   fb->base.id, plane_state->src_x >> 16,
+			   plane_state->src_y >> 16, fb->width, fb->height);
+	else
+		seq_puts(m, "\tprimary plane disabled\n");
+	for_each_encoder_on_crtc(dev, crtc, intel_encoder)
+		intel_encoder_info(m, intel_crtc, intel_encoder);
+}
+
+static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
+{
+	struct drm_display_mode *mode = panel->fixed_mode;
+
+	seq_printf(m, "\tfixed mode:\n");
+	intel_seq_print_mode(m, 2, mode);
+}
+
+static void intel_dp_info(struct seq_file *m,
+			  struct intel_connector *intel_connector)
+{
+	struct intel_encoder *intel_encoder = intel_connector->encoder;
+	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
+
+	seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
+	seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
+	if (intel_encoder->type == INTEL_OUTPUT_EDP)
+		intel_panel_info(m, &intel_connector->panel);
+}
+
+static void intel_hdmi_info(struct seq_file *m,
+			    struct intel_connector *intel_connector)
+{
+	struct intel_encoder *intel_encoder = intel_connector->encoder;
+	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
+
+	seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
+}
+
+static void intel_lvds_info(struct seq_file *m,
+			    struct intel_connector *intel_connector)
+{
+	intel_panel_info(m, &intel_connector->panel);
+}
+
+static void intel_connector_info(struct seq_file *m,
+				 struct drm_connector *connector)
+{
+	struct intel_connector *intel_connector = to_intel_connector(connector);
+	struct intel_encoder *intel_encoder = intel_connector->encoder;
+	struct drm_display_mode *mode;
+
+	seq_printf(m, "connector %d: type %s, status: %s\n",
+		   connector->base.id, connector->name,
+		   drm_get_connector_status_name(connector->status));
+	if (connector->status == connector_status_connected) {
+		seq_printf(m, "\tname: %s\n", connector->display_info.name);
+		seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
+			   connector->display_info.width_mm,
+			   connector->display_info.height_mm);
+		seq_printf(m, "\tsubpixel order: %s\n",
+			   drm_get_subpixel_order_name(connector->display_info.subpixel_order));
+		seq_printf(m, "\tCEA rev: %d\n",
+			   connector->display_info.cea_rev);
+	}
+	if (intel_encoder) {
+		if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
+		    intel_encoder->type == INTEL_OUTPUT_EDP)
+			intel_dp_info(m, intel_connector);
+		else if (intel_encoder->type == INTEL_OUTPUT_HDMI)
+			intel_hdmi_info(m, intel_connector);
+		else if (intel_encoder->type == INTEL_OUTPUT_LVDS)
+			intel_lvds_info(m, intel_connector);
+	}
+
+	seq_printf(m, "\tmodes:\n");
+	list_for_each_entry(mode, &connector->modes, head)
+		intel_seq_print_mode(m, 2, mode);
+}
+
+static bool cursor_active(struct drm_device *dev, int pipe)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 state;
+
+	if (IS_845G(dev) || IS_I865G(dev))
+		state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
+	else
+		state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
+
+	return state;
+}
+
+static bool cursor_position(struct drm_device *dev, int pipe, int *x, int *y)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 pos;
+
+	pos = I915_READ(CURPOS(pipe));
+
+	*x = (pos >> CURSOR_X_SHIFT) & CURSOR_POS_MASK;
+	if (pos & (CURSOR_POS_SIGN << CURSOR_X_SHIFT))
+		*x = -*x;
+
+	*y = (pos >> CURSOR_Y_SHIFT) & CURSOR_POS_MASK;
+	if (pos & (CURSOR_POS_SIGN << CURSOR_Y_SHIFT))
+		*y = -*y;
+
+	return cursor_active(dev, pipe);
+}
+
+static int i915_display_info(struct seq_file *m, void *unused)
+{
+	struct drm_info_node *node = m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *crtc;
+	struct drm_connector *connector;
+
+	intel_runtime_pm_get(dev_priv);
+	drm_modeset_lock_all(dev);
+	seq_printf(m, "CRTC info\n");
+	seq_printf(m, "---------\n");
+	for_each_intel_crtc(dev, crtc) {
+		bool active;
+		struct intel_crtc_state *pipe_config;
+		int x, y;
+
+		pipe_config = to_intel_crtc_state(crtc->base.state);
+
+		seq_printf(m, "CRTC %d: pipe: %c, active=%s (size=%dx%d)\n",
+			   crtc->base.base.id, pipe_name(crtc->pipe),
+			   yesno(pipe_config->base.active),
+			   pipe_config->pipe_src_w, pipe_config->pipe_src_h);
+		if (pipe_config->base.active) {
+			intel_crtc_info(m, crtc);
+
+			active = cursor_position(dev, crtc->pipe, &x, &y);
+			seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x, active? %s\n",
+				   yesno(crtc->cursor_base),
+				   x, y, crtc->base.cursor->state->crtc_w,
+				   crtc->base.cursor->state->crtc_h,
+				   crtc->cursor_addr, yesno(active));
+		}
+
+		seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
+			   yesno(!crtc->cpu_fifo_underrun_disabled),
+			   yesno(!crtc->pch_fifo_underrun_disabled));
+	}
+
+	seq_printf(m, "\n");
+	seq_printf(m, "Connector info\n");
+	seq_printf(m, "--------------\n");
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		intel_connector_info(m, connector);
+	}
+	drm_modeset_unlock_all(dev);
+	intel_runtime_pm_put(dev_priv);
+
+	return 0;
+}
+
+static int i915_semaphore_status(struct seq_file *m, void *unused)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_engine_cs *ring;
+	int num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
+	int i, j, ret;
+
+	if (!i915_semaphore_is_enabled(dev)) {
+		seq_puts(m, "Semaphores are disabled\n");
+		return 0;
+	}
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+	intel_runtime_pm_get(dev_priv);
+
+	if (IS_BROADWELL(dev)) {
+		struct page *page;
+		uint64_t *seqno;
+
+		page = i915_gem_object_get_page(dev_priv->semaphore_obj, 0);
+
+		seqno = (uint64_t *)kmap_atomic(page);
+		for_each_ring(ring, dev_priv, i) {
+			uint64_t offset;
+
+			seq_printf(m, "%s\n", ring->name);
+
+			seq_puts(m, "  Last signal:");
+			for (j = 0; j < num_rings; j++) {
+				offset = i * I915_NUM_RINGS + j;
+				seq_printf(m, "0x%08llx (0x%02llx) ",
+					   seqno[offset], offset * 8);
+			}
+			seq_putc(m, '\n');
+
+			seq_puts(m, "  Last wait:  ");
+			for (j = 0; j < num_rings; j++) {
+				offset = i + (j * I915_NUM_RINGS);
+				seq_printf(m, "0x%08llx (0x%02llx) ",
+					   seqno[offset], offset * 8);
+			}
+			seq_putc(m, '\n');
+
+		}
+		kunmap_atomic(seqno);
+	} else {
+		seq_puts(m, "  Last signal:");
+		for_each_ring(ring, dev_priv, i)
+			for (j = 0; j < num_rings; j++)
+				seq_printf(m, "0x%08x\n",
+					   I915_READ(ring->semaphore.mbox.signal[j]));
+		seq_putc(m, '\n');
+	}
+
+	seq_puts(m, "\nSync seqno:\n");
+	for_each_ring(ring, dev_priv, i) {
+		for (j = 0; j < num_rings; j++) {
+			seq_printf(m, "  0x%08x ", ring->semaphore.sync_seqno[j]);
+		}
+		seq_putc(m, '\n');
+	}
+	seq_putc(m, '\n');
+
+	intel_runtime_pm_put(dev_priv);
+	mutex_unlock(&dev->struct_mutex);
+	return 0;
+}
+
+static int i915_shared_dplls_info(struct seq_file *m, void *unused)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int i;
+
+	drm_modeset_lock_all(dev);
+	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
+		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
+
+		seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->name, pll->id);
+		seq_printf(m, " crtc_mask: 0x%08x, active: %d, on: %s\n",
+			   pll->config.crtc_mask, pll->active, yesno(pll->on));
+		seq_printf(m, " tracked hardware state:\n");
+		seq_printf(m, " dpll:    0x%08x\n", pll->config.hw_state.dpll);
+		seq_printf(m, " dpll_md: 0x%08x\n",
+			   pll->config.hw_state.dpll_md);
+		seq_printf(m, " fp0:     0x%08x\n", pll->config.hw_state.fp0);
+		seq_printf(m, " fp1:     0x%08x\n", pll->config.hw_state.fp1);
+		seq_printf(m, " wrpll:   0x%08x\n", pll->config.hw_state.wrpll);
+	}
+	drm_modeset_unlock_all(dev);
+
+	return 0;
+}
+
+static int i915_wa_registers(struct seq_file *m, void *unused)
+{
+	int i;
+	int ret;
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
+	intel_runtime_pm_get(dev_priv);
+
+	seq_printf(m, "Workarounds applied: %d\n", dev_priv->workarounds.count);
+	for (i = 0; i < dev_priv->workarounds.count; ++i) {
+		u32 addr, mask, value, read;
+		bool ok;
+
+		addr = dev_priv->workarounds.reg[i].addr;
+		mask = dev_priv->workarounds.reg[i].mask;
+		value = dev_priv->workarounds.reg[i].value;
+		read = I915_READ(addr);
+		ok = (value & mask) == (read & mask);
+		seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X, read: 0x%08x, status: %s\n",
+			   addr, value, mask, read, ok ? "OK" : "FAIL");
+	}
+
+	intel_runtime_pm_put(dev_priv);
+	mutex_unlock(&dev->struct_mutex);
+
+	return 0;
+}
+
+static int i915_ddb_info(struct seq_file *m, void *unused)
+{
+	struct drm_info_node *node = m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct skl_ddb_allocation *ddb;
+	struct skl_ddb_entry *entry;
+	enum pipe pipe;
+	int plane;
+
+	if (INTEL_INFO(dev)->gen < 9)
+		return 0;
+
+	drm_modeset_lock_all(dev);
+
+	ddb = &dev_priv->wm.skl_hw.ddb;
+
+	seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
+
+	for_each_pipe(dev_priv, pipe) {
+		seq_printf(m, "Pipe %c\n", pipe_name(pipe));
+
+		for_each_plane(dev_priv, pipe, plane) {
+			entry = &ddb->plane[pipe][plane];
+			seq_printf(m, "  Plane%-8d%8u%8u%8u\n", plane + 1,
+				   entry->start, entry->end,
+				   skl_ddb_entry_size(entry));
+		}
+
+		entry = &ddb->plane[pipe][PLANE_CURSOR];
+		seq_printf(m, "  %-13s%8u%8u%8u\n", "Cursor", entry->start,
+			   entry->end, skl_ddb_entry_size(entry));
+	}
+
+	drm_modeset_unlock_all(dev);
+
+	return 0;
+}
+
+static void drrs_status_per_crtc(struct seq_file *m,
+		struct drm_device *dev, struct intel_crtc *intel_crtc)
+{
+	struct intel_encoder *intel_encoder;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct i915_drrs *drrs = &dev_priv->drrs;
+	int vrefresh = 0;
+
+	for_each_encoder_on_crtc(dev, &intel_crtc->base, intel_encoder) {
+		/* Encoder connected on this CRTC */
+		switch (intel_encoder->type) {
+		case INTEL_OUTPUT_EDP:
+			seq_puts(m, "eDP:\n");
+			break;
+		case INTEL_OUTPUT_DSI:
+			seq_puts(m, "DSI:\n");
+			break;
+		case INTEL_OUTPUT_HDMI:
+			seq_puts(m, "HDMI:\n");
+			break;
+		case INTEL_OUTPUT_DISPLAYPORT:
+			seq_puts(m, "DP:\n");
+			break;
+		default:
+			seq_printf(m, "Other encoder (id=%d).\n",
+						intel_encoder->type);
+			return;
+		}
+	}
+
+	if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT)
+		seq_puts(m, "\tVBT: DRRS_type: Static");
+	else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT)
+		seq_puts(m, "\tVBT: DRRS_type: Seamless");
+	else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED)
+		seq_puts(m, "\tVBT: DRRS_type: None");
+	else
+		seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value");
+
+	seq_puts(m, "\n\n");
+
+	if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
+		struct intel_panel *panel;
+
+		mutex_lock(&drrs->mutex);
+		/* DRRS Supported */
+		seq_puts(m, "\tDRRS Supported: Yes\n");
+
+		/* disable_drrs() will make drrs->dp NULL */
+		if (!drrs->dp) {
+			seq_puts(m, "Idleness DRRS: Disabled");
+			mutex_unlock(&drrs->mutex);
+			return;
+		}
+
+		panel = &drrs->dp->attached_connector->panel;
+		seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
+					drrs->busy_frontbuffer_bits);
+
+		seq_puts(m, "\n\t\t");
+		if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
+			seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
+			vrefresh = panel->fixed_mode->vrefresh;
+		} else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
+			seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
+			vrefresh = panel->downclock_mode->vrefresh;
+		} else {
+			seq_printf(m, "DRRS_State: Unknown(%d)\n",
+						drrs->refresh_rate_type);
+			mutex_unlock(&drrs->mutex);
+			return;
+		}
+		seq_printf(m, "\t\tVrefresh: %d", vrefresh);
+
+		seq_puts(m, "\n\t\t");
+		mutex_unlock(&drrs->mutex);
+	} else {
+		/* DRRS not supported. Print the VBT parameter*/
+		seq_puts(m, "\tDRRS Supported : No");
+	}
+	seq_puts(m, "\n");
+}
+
+static int i915_drrs_status(struct seq_file *m, void *unused)
+{
+	struct drm_info_node *node = m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct intel_crtc *intel_crtc;
+	int active_crtc_cnt = 0;
+
+	for_each_intel_crtc(dev, intel_crtc) {
+		drm_modeset_lock(&intel_crtc->base.mutex, NULL);
+
+		if (intel_crtc->base.state->active) {
+			active_crtc_cnt++;
+			seq_printf(m, "\nCRTC %d:  ", active_crtc_cnt);
+
+			drrs_status_per_crtc(m, dev, intel_crtc);
+		}
+
+		drm_modeset_unlock(&intel_crtc->base.mutex);
+	}
+
+	if (!active_crtc_cnt)
+		seq_puts(m, "No active crtc found\n");
+
+	return 0;
+}
+
+struct pipe_crc_info {
+	const char *name;
+	struct drm_device *dev;
+	enum pipe pipe;
+};
+
+static int i915_dp_mst_info(struct seq_file *m, void *unused)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct drm_encoder *encoder;
+	struct intel_encoder *intel_encoder;
+	struct intel_digital_port *intel_dig_port;
+	drm_modeset_lock_all(dev);
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		intel_encoder = to_intel_encoder(encoder);
+		if (intel_encoder->type != INTEL_OUTPUT_DISPLAYPORT)
+			continue;
+		intel_dig_port = enc_to_dig_port(encoder);
+		if (!intel_dig_port->dp.can_mst)
+			continue;
+
+		drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
+	}
+	drm_modeset_unlock_all(dev);
+	return 0;
+}
+
+static int i915_pipe_crc_open(struct inode *inode, struct file *filep)
+{
+	struct pipe_crc_info *info = inode->i_private;
+	struct drm_i915_private *dev_priv = info->dev->dev_private;
+	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
+
+	if (info->pipe >= INTEL_INFO(info->dev)->num_pipes)
+		return -ENODEV;
+
+	spin_lock_irq(&pipe_crc->lock);
+
+	if (pipe_crc->opened) {
+		spin_unlock_irq(&pipe_crc->lock);
+		return -EBUSY; /* already open */
+	}
+
+	pipe_crc->opened = true;
+	filep->private_data = inode->i_private;
+
+	spin_unlock_irq(&pipe_crc->lock);
+
+	return 0;
+}
+
+static int i915_pipe_crc_release(struct inode *inode, struct file *filep)
+{
+	struct pipe_crc_info *info = inode->i_private;
+	struct drm_i915_private *dev_priv = info->dev->dev_private;
+	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
+
+	spin_lock_irq(&pipe_crc->lock);
+	pipe_crc->opened = false;
+	spin_unlock_irq(&pipe_crc->lock);
+
+	return 0;
+}
+
+/* (6 fields, 8 chars each, space separated (5) + '\n') */
+#define PIPE_CRC_LINE_LEN	(6 * 8 + 5 + 1)
+/* account for \'0' */
+#define PIPE_CRC_BUFFER_LEN	(PIPE_CRC_LINE_LEN + 1)
+
+static int pipe_crc_data_count(struct intel_pipe_crc *pipe_crc)
+{
+	assert_spin_locked(&pipe_crc->lock);
+	return CIRC_CNT(pipe_crc->head, pipe_crc->tail,
+			INTEL_PIPE_CRC_ENTRIES_NR);
+}
+
+static ssize_t
+i915_pipe_crc_read(struct file *filep, char __user *user_buf, size_t count,
+		   loff_t *pos)
+{
+	struct pipe_crc_info *info = filep->private_data;
+	struct drm_device *dev = info->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
+	char buf[PIPE_CRC_BUFFER_LEN];
+	int n_entries;
+	ssize_t bytes_read;
+
+	/*
+	 * Don't allow user space to provide buffers not big enough to hold
+	 * a line of data.
+	 */
+	if (count < PIPE_CRC_LINE_LEN)
+		return -EINVAL;
+
+	if (pipe_crc->source == INTEL_PIPE_CRC_SOURCE_NONE)
+		return 0;
+
+	/* nothing to read */
+	spin_lock_irq(&pipe_crc->lock);
+	while (pipe_crc_data_count(pipe_crc) == 0) {
+		int ret;
+
+		if (filep->f_flags & O_NONBLOCK) {
+			spin_unlock_irq(&pipe_crc->lock);
+			return -EAGAIN;
+		}
+
+		ret = wait_event_interruptible_lock_irq(pipe_crc->wq,
+				pipe_crc_data_count(pipe_crc), pipe_crc->lock);
+		if (ret) {
+			spin_unlock_irq(&pipe_crc->lock);
+			return ret;
+		}
+	}
+
+	/* We now have one or more entries to read */
+	n_entries = count / PIPE_CRC_LINE_LEN;
+
+	bytes_read = 0;
+	while (n_entries > 0) {
+		struct intel_pipe_crc_entry *entry =
+			&pipe_crc->entries[pipe_crc->tail];
+		int ret;
+
+		if (CIRC_CNT(pipe_crc->head, pipe_crc->tail,
+			     INTEL_PIPE_CRC_ENTRIES_NR) < 1)
+			break;
+
+		BUILD_BUG_ON_NOT_POWER_OF_2(INTEL_PIPE_CRC_ENTRIES_NR);
+		pipe_crc->tail = (pipe_crc->tail + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
+
+		bytes_read += snprintf(buf, PIPE_CRC_BUFFER_LEN,
+				       "%8u %8x %8x %8x %8x %8x\n",
+				       entry->frame, entry->crc[0],
+				       entry->crc[1], entry->crc[2],
+				       entry->crc[3], entry->crc[4]);
+
+		spin_unlock_irq(&pipe_crc->lock);
+
+		ret = copy_to_user(user_buf, buf, PIPE_CRC_LINE_LEN);
+		if (ret == PIPE_CRC_LINE_LEN)
+			return -EFAULT;
+
+		user_buf += PIPE_CRC_LINE_LEN;
+		n_entries--;
+
+		spin_lock_irq(&pipe_crc->lock);
+	}
+
+	spin_unlock_irq(&pipe_crc->lock);
+
+	return bytes_read;
+}
+
+static const struct file_operations i915_pipe_crc_fops = {
+	.owner = THIS_MODULE,
+	.open = i915_pipe_crc_open,
+	.read = i915_pipe_crc_read,
+	.release = i915_pipe_crc_release,
+};
+
+static struct pipe_crc_info i915_pipe_crc_data[I915_MAX_PIPES] = {
+	{
+		.name = "i915_pipe_A_crc",
+		.pipe = PIPE_A,
+	},
+	{
+		.name = "i915_pipe_B_crc",
+		.pipe = PIPE_B,
+	},
+	{
+		.name = "i915_pipe_C_crc",
+		.pipe = PIPE_C,
+	},
+};
+
+static int i915_pipe_crc_create(struct dentry *root, struct drm_minor *minor,
+				enum pipe pipe)
+{
+	struct drm_device *dev = minor->dev;
+	struct dentry *ent;
+	struct pipe_crc_info *info = &i915_pipe_crc_data[pipe];
+
+	info->dev = dev;
+	ent = debugfs_create_file(info->name, S_IRUGO, root, info,
+				  &i915_pipe_crc_fops);
+	if (!ent)
+		return -ENOMEM;
+
+	return drm_add_fake_info_node(minor, ent, info);
+}
+
+static const char * const pipe_crc_sources[] = {
+	"none",
+	"plane1",
+	"plane2",
+	"pf",
+	"pipe",
+	"TV",
+	"DP-B",
+	"DP-C",
+	"DP-D",
+	"auto",
+};
+
+static const char *pipe_crc_source_name(enum intel_pipe_crc_source source)
+{
+	BUILD_BUG_ON(ARRAY_SIZE(pipe_crc_sources) != INTEL_PIPE_CRC_SOURCE_MAX);
+	return pipe_crc_sources[source];
+}
+
+static int display_crc_ctl_show(struct seq_file *m, void *data)
+{
+	struct drm_device *dev = m->private;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int i;
+
+	for (i = 0; i < I915_MAX_PIPES; i++)
+		seq_printf(m, "%c %s\n", pipe_name(i),
+			   pipe_crc_source_name(dev_priv->pipe_crc[i].source));
+
+	return 0;
+}
+
+static int display_crc_ctl_open(struct inode *inode, struct file *file)
+{
+	struct drm_device *dev = inode->i_private;
+
+	return single_open(file, display_crc_ctl_show, dev);
+}
+
+static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
+				 uint32_t *val)
+{
+	if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
+		*source = INTEL_PIPE_CRC_SOURCE_PIPE;
+
+	switch (*source) {
+	case INTEL_PIPE_CRC_SOURCE_PIPE:
+		*val = PIPE_CRC_ENABLE | PIPE_CRC_INCLUDE_BORDER_I8XX;
+		break;
+	case INTEL_PIPE_CRC_SOURCE_NONE:
+		*val = 0;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int i9xx_pipe_crc_auto_source(struct drm_device *dev, enum pipe pipe,
+				     enum intel_pipe_crc_source *source)
+{
+	struct intel_encoder *encoder;
+	struct intel_crtc *crtc;
+	struct intel_digital_port *dig_port;
+	int ret = 0;
+
+	*source = INTEL_PIPE_CRC_SOURCE_PIPE;
+
+	drm_modeset_lock_all(dev);
+	for_each_intel_encoder(dev, encoder) {
+		if (!encoder->base.crtc)
+			continue;
+
+		crtc = to_intel_crtc(encoder->base.crtc);
+
+		if (crtc->pipe != pipe)
+			continue;
+
+		switch (encoder->type) {
+		case INTEL_OUTPUT_TVOUT:
+			*source = INTEL_PIPE_CRC_SOURCE_TV;
+			break;
+		case INTEL_OUTPUT_DISPLAYPORT:
+		case INTEL_OUTPUT_EDP:
+			dig_port = enc_to_dig_port(&encoder->base);
+			switch (dig_port->port) {
+			case PORT_B:
+				*source = INTEL_PIPE_CRC_SOURCE_DP_B;
+				break;
+			case PORT_C:
+				*source = INTEL_PIPE_CRC_SOURCE_DP_C;
+				break;
+			case PORT_D:
+				*source = INTEL_PIPE_CRC_SOURCE_DP_D;
+				break;
+			default:
+				WARN(1, "nonexisting DP port %c\n",
+				     port_name(dig_port->port));
+				break;
+			}
+			break;
+		default:
+			break;
+		}
+	}
+	drm_modeset_unlock_all(dev);
+
+	return ret;
+}
+
+static int vlv_pipe_crc_ctl_reg(struct drm_device *dev,
+				enum pipe pipe,
+				enum intel_pipe_crc_source *source,
+				uint32_t *val)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	bool need_stable_symbols = false;
+
+	if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
+		int ret = i9xx_pipe_crc_auto_source(dev, pipe, source);
+		if (ret)
+			return ret;
+	}
+
+	switch (*source) {
+	case INTEL_PIPE_CRC_SOURCE_PIPE:
+		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_VLV;
+		break;
+	case INTEL_PIPE_CRC_SOURCE_DP_B:
+		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_VLV;
+		need_stable_symbols = true;
+		break;
+	case INTEL_PIPE_CRC_SOURCE_DP_C:
+		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_VLV;
+		need_stable_symbols = true;
+		break;
+	case INTEL_PIPE_CRC_SOURCE_DP_D:
+		if (!IS_CHERRYVIEW(dev))
+			return -EINVAL;
+		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_VLV;
+		need_stable_symbols = true;
+		break;
+	case INTEL_PIPE_CRC_SOURCE_NONE:
+		*val = 0;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/*
+	 * When the pipe CRC tap point is after the transcoders we need
+	 * to tweak symbol-level features to produce a deterministic series of
+	 * symbols for a given frame. We need to reset those features only once
+	 * a frame (instead of every nth symbol):
+	 *   - DC-balance: used to ensure a better clock recovery from the data
+	 *     link (SDVO)
+	 *   - DisplayPort scrambling: used for EMI reduction
+	 */
+	if (need_stable_symbols) {
+		uint32_t tmp = I915_READ(PORT_DFT2_G4X);
+
+		tmp |= DC_BALANCE_RESET_VLV;
+		switch (pipe) {
+		case PIPE_A:
+			tmp |= PIPE_A_SCRAMBLE_RESET;
+			break;
+		case PIPE_B:
+			tmp |= PIPE_B_SCRAMBLE_RESET;
+			break;
+		case PIPE_C:
+			tmp |= PIPE_C_SCRAMBLE_RESET;
+			break;
+		default:
+			return -EINVAL;
+		}
+		I915_WRITE(PORT_DFT2_G4X, tmp);
+	}
+
+	return 0;
+}
+
+static int i9xx_pipe_crc_ctl_reg(struct drm_device *dev,
+				 enum pipe pipe,
+				 enum intel_pipe_crc_source *source,
+				 uint32_t *val)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	bool need_stable_symbols = false;
+
+	if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
+		int ret = i9xx_pipe_crc_auto_source(dev, pipe, source);
+		if (ret)
+			return ret;
+	}
+
+	switch (*source) {
+	case INTEL_PIPE_CRC_SOURCE_PIPE:
+		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_I9XX;
+		break;
+	case INTEL_PIPE_CRC_SOURCE_TV:
+		if (!SUPPORTS_TV(dev))
+			return -EINVAL;
+		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_TV_PRE;
+		break;
+	case INTEL_PIPE_CRC_SOURCE_DP_B:
+		if (!IS_G4X(dev))
+			return -EINVAL;
+		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_G4X;
+		need_stable_symbols = true;
+		break;
+	case INTEL_PIPE_CRC_SOURCE_DP_C:
+		if (!IS_G4X(dev))
+			return -EINVAL;
+		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_G4X;
+		need_stable_symbols = true;
+		break;
+	case INTEL_PIPE_CRC_SOURCE_DP_D:
+		if (!IS_G4X(dev))
+			return -EINVAL;
+		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_G4X;
+		need_stable_symbols = true;
+		break;
+	case INTEL_PIPE_CRC_SOURCE_NONE:
+		*val = 0;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/*
+	 * When the pipe CRC tap point is after the transcoders we need
+	 * to tweak symbol-level features to produce a deterministic series of
+	 * symbols for a given frame. We need to reset those features only once
+	 * a frame (instead of every nth symbol):
+	 *   - DC-balance: used to ensure a better clock recovery from the data
+	 *     link (SDVO)
+	 *   - DisplayPort scrambling: used for EMI reduction
+	 */
+	if (need_stable_symbols) {
+		uint32_t tmp = I915_READ(PORT_DFT2_G4X);
+
+		WARN_ON(!IS_G4X(dev));
+
+		I915_WRITE(PORT_DFT_I9XX,
+			   I915_READ(PORT_DFT_I9XX) | DC_BALANCE_RESET);
+
+		if (pipe == PIPE_A)
+			tmp |= PIPE_A_SCRAMBLE_RESET;
+		else
+			tmp |= PIPE_B_SCRAMBLE_RESET;
+
+		I915_WRITE(PORT_DFT2_G4X, tmp);
+	}
+
+	return 0;
+}
+
+static void vlv_undo_pipe_scramble_reset(struct drm_device *dev,
+					 enum pipe pipe)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t tmp = I915_READ(PORT_DFT2_G4X);
+
+	switch (pipe) {
+	case PIPE_A:
+		tmp &= ~PIPE_A_SCRAMBLE_RESET;
+		break;
+	case PIPE_B:
+		tmp &= ~PIPE_B_SCRAMBLE_RESET;
+		break;
+	case PIPE_C:
+		tmp &= ~PIPE_C_SCRAMBLE_RESET;
+		break;
+	default:
+		return;
+	}
+	if (!(tmp & PIPE_SCRAMBLE_RESET_MASK))
+		tmp &= ~DC_BALANCE_RESET_VLV;
+	I915_WRITE(PORT_DFT2_G4X, tmp);
+
+}
+
+static void g4x_undo_pipe_scramble_reset(struct drm_device *dev,
+					 enum pipe pipe)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t tmp = I915_READ(PORT_DFT2_G4X);
+
+	if (pipe == PIPE_A)
+		tmp &= ~PIPE_A_SCRAMBLE_RESET;
+	else
+		tmp &= ~PIPE_B_SCRAMBLE_RESET;
+	I915_WRITE(PORT_DFT2_G4X, tmp);
+
+	if (!(tmp & PIPE_SCRAMBLE_RESET_MASK)) {
+		I915_WRITE(PORT_DFT_I9XX,
+			   I915_READ(PORT_DFT_I9XX) & ~DC_BALANCE_RESET);
+	}
+}
+
+static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
+				uint32_t *val)
+{
+	if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
+		*source = INTEL_PIPE_CRC_SOURCE_PIPE;
+
+	switch (*source) {
+	case INTEL_PIPE_CRC_SOURCE_PLANE1:
+		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_ILK;
+		break;
+	case INTEL_PIPE_CRC_SOURCE_PLANE2:
+		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_ILK;
+		break;
+	case INTEL_PIPE_CRC_SOURCE_PIPE:
+		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_ILK;
+		break;
+	case INTEL_PIPE_CRC_SOURCE_NONE:
+		*val = 0;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void hsw_trans_edp_pipe_A_crc_wa(struct drm_device *dev, bool enable)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *crtc =
+		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]);
+	struct intel_crtc_state *pipe_config;
+	struct drm_atomic_state *state;
+	int ret = 0;
+
+	drm_modeset_lock_all(dev);
+	state = drm_atomic_state_alloc(dev);
+	if (!state) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	state->acquire_ctx = drm_modeset_legacy_acquire_ctx(&crtc->base);
+	pipe_config = intel_atomic_get_crtc_state(state, crtc);
+	if (IS_ERR(pipe_config)) {
+		ret = PTR_ERR(pipe_config);
+		goto out;
+	}
+
+	pipe_config->pch_pfit.force_thru = enable;
+	if (pipe_config->cpu_transcoder == TRANSCODER_EDP &&
+	    pipe_config->pch_pfit.enabled != enable)
+		pipe_config->base.connectors_changed = true;
+
+	ret = drm_atomic_commit(state);
+out:
+	drm_modeset_unlock_all(dev);
+	WARN(ret, "Toggling workaround to %i returns %i\n", enable, ret);
+	if (ret)
+		drm_atomic_state_free(state);
+}
+
+static int ivb_pipe_crc_ctl_reg(struct drm_device *dev,
+				enum pipe pipe,
+				enum intel_pipe_crc_source *source,
+				uint32_t *val)
+{
+	if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
+		*source = INTEL_PIPE_CRC_SOURCE_PF;
+
+	switch (*source) {
+	case INTEL_PIPE_CRC_SOURCE_PLANE1:
+		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_IVB;
+		break;
+	case INTEL_PIPE_CRC_SOURCE_PLANE2:
+		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_IVB;
+		break;
+	case INTEL_PIPE_CRC_SOURCE_PF:
+		if (IS_HASWELL(dev) && pipe == PIPE_A)
+			hsw_trans_edp_pipe_A_crc_wa(dev, true);
+
+		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB;
+		break;
+	case INTEL_PIPE_CRC_SOURCE_NONE:
+		*val = 0;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
+			       enum intel_pipe_crc_source source)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
+	struct intel_crtc *crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev,
+									pipe));
+	u32 val = 0; /* shut up gcc */
+	int ret;
+
+	if (pipe_crc->source == source)
+		return 0;
+
+	/* forbid changing the source without going back to 'none' */
+	if (pipe_crc->source && source)
+		return -EINVAL;
+
+	if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PIPE(pipe))) {
+		DRM_DEBUG_KMS("Trying to capture CRC while pipe is off\n");
+		return -EIO;
+	}
+
+	if (IS_GEN2(dev))
+		ret = i8xx_pipe_crc_ctl_reg(&source, &val);
+	else if (INTEL_INFO(dev)->gen < 5)
+		ret = i9xx_pipe_crc_ctl_reg(dev, pipe, &source, &val);
+	else if (IS_VALLEYVIEW(dev))
+		ret = vlv_pipe_crc_ctl_reg(dev, pipe, &source, &val);
+	else if (IS_GEN5(dev) || IS_GEN6(dev))
+		ret = ilk_pipe_crc_ctl_reg(&source, &val);
+	else
+		ret = ivb_pipe_crc_ctl_reg(dev, pipe, &source, &val);
+
+	if (ret != 0)
+		return ret;
+
+	/* none -> real source transition */
+	if (source) {
+		struct intel_pipe_crc_entry *entries;
+
+		DRM_DEBUG_DRIVER("collecting CRCs for pipe %c, %s\n",
+				 pipe_name(pipe), pipe_crc_source_name(source));
+
+		entries = kcalloc(INTEL_PIPE_CRC_ENTRIES_NR,
+				  sizeof(pipe_crc->entries[0]),
+				  GFP_KERNEL);
+		if (!entries)
+			return -ENOMEM;
+
+		/*
+		 * When IPS gets enabled, the pipe CRC changes. Since IPS gets
+		 * enabled and disabled dynamically based on package C states,
+		 * user space can't make reliable use of the CRCs, so let's just
+		 * completely disable it.
+		 */
+		hsw_disable_ips(crtc);
+
+		spin_lock_irq(&pipe_crc->lock);
+		kfree(pipe_crc->entries);
+		pipe_crc->entries = entries;
+		pipe_crc->head = 0;
+		pipe_crc->tail = 0;
+		spin_unlock_irq(&pipe_crc->lock);
+	}
+
+	pipe_crc->source = source;
+
+	I915_WRITE(PIPE_CRC_CTL(pipe), val);
+	POSTING_READ(PIPE_CRC_CTL(pipe));
+
+	/* real source -> none transition */
+	if (source == INTEL_PIPE_CRC_SOURCE_NONE) {
+		struct intel_pipe_crc_entry *entries;
+		struct intel_crtc *crtc =
+			to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+
+		DRM_DEBUG_DRIVER("stopping CRCs for pipe %c\n",
+				 pipe_name(pipe));
+
+		drm_modeset_lock(&crtc->base.mutex, NULL);
+		if (crtc->base.state->active)
+			intel_wait_for_vblank(dev, pipe);
+		drm_modeset_unlock(&crtc->base.mutex);
+
+		spin_lock_irq(&pipe_crc->lock);
+		entries = pipe_crc->entries;
+		pipe_crc->entries = NULL;
+		pipe_crc->head = 0;
+		pipe_crc->tail = 0;
+		spin_unlock_irq(&pipe_crc->lock);
+
+		kfree(entries);
+
+		if (IS_G4X(dev))
+			g4x_undo_pipe_scramble_reset(dev, pipe);
+		else if (IS_VALLEYVIEW(dev))
+			vlv_undo_pipe_scramble_reset(dev, pipe);
+		else if (IS_HASWELL(dev) && pipe == PIPE_A)
+			hsw_trans_edp_pipe_A_crc_wa(dev, false);
+
+		hsw_enable_ips(crtc);
+	}
+
+	return 0;
+}
+
+/*
+ * Parse pipe CRC command strings:
+ *   command: wsp* object wsp+ name wsp+ source wsp*
+ *   object: 'pipe'
+ *   name: (A | B | C)
+ *   source: (none | plane1 | plane2 | pf)
+ *   wsp: (#0x20 | #0x9 | #0xA)+
+ *
+ * eg.:
+ *  "pipe A plane1"  ->  Start CRC computations on plane1 of pipe A
+ *  "pipe A none"    ->  Stop CRC
+ */
+static int display_crc_ctl_tokenize(char *buf, char *words[], int max_words)
+{
+	int n_words = 0;
+
+	while (*buf) {
+		char *end;
+
+		/* skip leading white space */
+		buf = skip_spaces(buf);
+		if (!*buf)
+			break;	/* end of buffer */
+
+		/* find end of word */
+		for (end = buf; *end && !isspace(*end); end++)
+			;
+
+		if (n_words == max_words) {
+			DRM_DEBUG_DRIVER("too many words, allowed <= %d\n",
+					 max_words);
+			return -EINVAL;	/* ran out of words[] before bytes */
+		}
+
+		if (*end)
+			*end++ = '\0';
+		words[n_words++] = buf;
+		buf = end;
+	}
+
+	return n_words;
+}
+
+enum intel_pipe_crc_object {
+	PIPE_CRC_OBJECT_PIPE,
+};
+
+static const char * const pipe_crc_objects[] = {
+	"pipe",
+};
+
+static int
+display_crc_ctl_parse_object(const char *buf, enum intel_pipe_crc_object *o)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(pipe_crc_objects); i++)
+		if (!strcmp(buf, pipe_crc_objects[i])) {
+			*o = i;
+			return 0;
+		    }
+
+	return -EINVAL;
+}
+
+static int display_crc_ctl_parse_pipe(const char *buf, enum pipe *pipe)
+{
+	const char name = buf[0];
+
+	if (name < 'A' || name >= pipe_name(I915_MAX_PIPES))
+		return -EINVAL;
+
+	*pipe = name - 'A';
+
+	return 0;
+}
+
+static int
+display_crc_ctl_parse_source(const char *buf, enum intel_pipe_crc_source *s)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(pipe_crc_sources); i++)
+		if (!strcmp(buf, pipe_crc_sources[i])) {
+			*s = i;
+			return 0;
+		    }
+
+	return -EINVAL;
+}
+
+static int display_crc_ctl_parse(struct drm_device *dev, char *buf, size_t len)
+{
+#define N_WORDS 3
+	int n_words;
+	char *words[N_WORDS];
+	enum pipe pipe;
+	enum intel_pipe_crc_object object;
+	enum intel_pipe_crc_source source;
+
+	n_words = display_crc_ctl_tokenize(buf, words, N_WORDS);
+	if (n_words != N_WORDS) {
+		DRM_DEBUG_DRIVER("tokenize failed, a command is %d words\n",
+				 N_WORDS);
+		return -EINVAL;
+	}
+
+	if (display_crc_ctl_parse_object(words[0], &object) < 0) {
+		DRM_DEBUG_DRIVER("unknown object %s\n", words[0]);
+		return -EINVAL;
+	}
+
+	if (display_crc_ctl_parse_pipe(words[1], &pipe) < 0) {
+		DRM_DEBUG_DRIVER("unknown pipe %s\n", words[1]);
+		return -EINVAL;
+	}
+
+	if (display_crc_ctl_parse_source(words[2], &source) < 0) {
+		DRM_DEBUG_DRIVER("unknown source %s\n", words[2]);
+		return -EINVAL;
+	}
+
+	return pipe_crc_set_source(dev, pipe, source);
+}
+
+static ssize_t display_crc_ctl_write(struct file *file, const char __user *ubuf,
+				     size_t len, loff_t *offp)
+{
+	struct seq_file *m = file->private_data;
+	struct drm_device *dev = m->private;
+	char *tmpbuf;
+	int ret;
+
+	if (len == 0)
+		return 0;
+
+	if (len > PAGE_SIZE - 1) {
+		DRM_DEBUG_DRIVER("expected <%lu bytes into pipe crc control\n",
+				 PAGE_SIZE);
+		return -E2BIG;
+	}
+
+	tmpbuf = kmalloc(len + 1, GFP_KERNEL);
+	if (!tmpbuf)
+		return -ENOMEM;
+
+	if (copy_from_user(tmpbuf, ubuf, len)) {
+		ret = -EFAULT;
+		goto out;
+	}
+	tmpbuf[len] = '\0';
+
+	ret = display_crc_ctl_parse(dev, tmpbuf, len);
+
+out:
+	kfree(tmpbuf);
+	if (ret < 0)
+		return ret;
+
+	*offp += len;
+	return len;
+}
+
+static const struct file_operations i915_display_crc_ctl_fops = {
+	.owner = THIS_MODULE,
+	.open = display_crc_ctl_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+	.write = display_crc_ctl_write
+};
+
+static ssize_t i915_displayport_test_active_write(struct file *file,
+					    const char __user *ubuf,
+					    size_t len, loff_t *offp)
+{
+	char *input_buffer;
+	int status = 0;
+	struct drm_device *dev;
+	struct drm_connector *connector;
+	struct list_head *connector_list;
+	struct intel_dp *intel_dp;
+	int val = 0;
+
+	dev = ((struct seq_file *)file->private_data)->private;
+
+	connector_list = &dev->mode_config.connector_list;
+
+	if (len == 0)
+		return 0;
+
+	input_buffer = kmalloc(len + 1, GFP_KERNEL);
+	if (!input_buffer)
+		return -ENOMEM;
+
+	if (copy_from_user(input_buffer, ubuf, len)) {
+		status = -EFAULT;
+		goto out;
+	}
+
+	input_buffer[len] = '\0';
+	DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);
+
+	list_for_each_entry(connector, connector_list, head) {
+
+		if (connector->connector_type !=
+		    DRM_MODE_CONNECTOR_DisplayPort)
+			continue;
+
+		if (connector->status == connector_status_connected &&
+		    connector->encoder != NULL) {
+			intel_dp = enc_to_intel_dp(connector->encoder);
+			status = kstrtoint(input_buffer, 10, &val);
+			if (status < 0)
+				goto out;
+			DRM_DEBUG_DRIVER("Got %d for test active\n", val);
+			/* To prevent erroneous activation of the compliance
+			 * testing code, only accept an actual value of 1 here
+			 */
+			if (val == 1)
+				intel_dp->compliance_test_active = 1;
+			else
+				intel_dp->compliance_test_active = 0;
+		}
+	}
+out:
+	kfree(input_buffer);
+	if (status < 0)
+		return status;
+
+	*offp += len;
+	return len;
+}
+
+static int i915_displayport_test_active_show(struct seq_file *m, void *data)
+{
+	struct drm_device *dev = m->private;
+	struct drm_connector *connector;
+	struct list_head *connector_list = &dev->mode_config.connector_list;
+	struct intel_dp *intel_dp;
+
+	list_for_each_entry(connector, connector_list, head) {
+
+		if (connector->connector_type !=
+		    DRM_MODE_CONNECTOR_DisplayPort)
+			continue;
+
+		if (connector->status == connector_status_connected &&
+		    connector->encoder != NULL) {
+			intel_dp = enc_to_intel_dp(connector->encoder);
+			if (intel_dp->compliance_test_active)
+				seq_puts(m, "1");
+			else
+				seq_puts(m, "0");
+		} else
+			seq_puts(m, "0");
+	}
+
+	return 0;
+}
+
+static int i915_displayport_test_active_open(struct inode *inode,
+				       struct file *file)
+{
+	struct drm_device *dev = inode->i_private;
+
+	return single_open(file, i915_displayport_test_active_show, dev);
+}
+
+static const struct file_operations i915_displayport_test_active_fops = {
+	.owner = THIS_MODULE,
+	.open = i915_displayport_test_active_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+	.write = i915_displayport_test_active_write
+};
+
+static int i915_displayport_test_data_show(struct seq_file *m, void *data)
+{
+	struct drm_device *dev = m->private;
+	struct drm_connector *connector;
+	struct list_head *connector_list = &dev->mode_config.connector_list;
+	struct intel_dp *intel_dp;
+
+	list_for_each_entry(connector, connector_list, head) {
+
+		if (connector->connector_type !=
+		    DRM_MODE_CONNECTOR_DisplayPort)
+			continue;
+
+		if (connector->status == connector_status_connected &&
+		    connector->encoder != NULL) {
+			intel_dp = enc_to_intel_dp(connector->encoder);
+			seq_printf(m, "%lx", intel_dp->compliance_test_data);
+		} else
+			seq_puts(m, "0");
+	}
+
+	return 0;
+}
+static int i915_displayport_test_data_open(struct inode *inode,
+				       struct file *file)
+{
+	struct drm_device *dev = inode->i_private;
+
+	return single_open(file, i915_displayport_test_data_show, dev);
+}
+
+static const struct file_operations i915_displayport_test_data_fops = {
+	.owner = THIS_MODULE,
+	.open = i915_displayport_test_data_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release
+};
+
+static int i915_displayport_test_type_show(struct seq_file *m, void *data)
+{
+	struct drm_device *dev = m->private;
+	struct drm_connector *connector;
+	struct list_head *connector_list = &dev->mode_config.connector_list;
+	struct intel_dp *intel_dp;
+
+	list_for_each_entry(connector, connector_list, head) {
+
+		if (connector->connector_type !=
+		    DRM_MODE_CONNECTOR_DisplayPort)
+			continue;
+
+		if (connector->status == connector_status_connected &&
+		    connector->encoder != NULL) {
+			intel_dp = enc_to_intel_dp(connector->encoder);
+			seq_printf(m, "%02lx", intel_dp->compliance_test_type);
+		} else
+			seq_puts(m, "0");
+	}
+
+	return 0;
+}
+
+static int i915_displayport_test_type_open(struct inode *inode,
+				       struct file *file)
+{
+	struct drm_device *dev = inode->i_private;
+
+	return single_open(file, i915_displayport_test_type_show, dev);
+}
+
+static const struct file_operations i915_displayport_test_type_fops = {
+	.owner = THIS_MODULE,
+	.open = i915_displayport_test_type_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release
+};
+
+static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
+{
+	struct drm_device *dev = m->private;
+	int level;
+	int num_levels;
+
+	if (IS_CHERRYVIEW(dev))
+		num_levels = 3;
+	else if (IS_VALLEYVIEW(dev))
+		num_levels = 1;
+	else
+		num_levels = ilk_wm_max_level(dev) + 1;
+
+	drm_modeset_lock_all(dev);
+
+	for (level = 0; level < num_levels; level++) {
+		unsigned int latency = wm[level];
+
+		/*
+		 * - WM1+ latency values in 0.5us units
+		 * - latencies are in us on gen9/vlv/chv
+		 */
+		if (INTEL_INFO(dev)->gen >= 9 || IS_VALLEYVIEW(dev))
+			latency *= 10;
+		else if (level > 0)
+			latency *= 5;
+
+		seq_printf(m, "WM%d %u (%u.%u usec)\n",
+			   level, wm[level], latency / 10, latency % 10);
+	}
+
+	drm_modeset_unlock_all(dev);
+}
+
+static int pri_wm_latency_show(struct seq_file *m, void *data)
+{
+	struct drm_device *dev = m->private;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	const uint16_t *latencies;
+
+	if (INTEL_INFO(dev)->gen >= 9)
+		latencies = dev_priv->wm.skl_latency;
+	else
+		latencies = to_i915(dev)->wm.pri_latency;
+
+	wm_latency_show(m, latencies);
+
+	return 0;
+}
+
+static int spr_wm_latency_show(struct seq_file *m, void *data)
+{
+	struct drm_device *dev = m->private;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	const uint16_t *latencies;
+
+	if (INTEL_INFO(dev)->gen >= 9)
+		latencies = dev_priv->wm.skl_latency;
+	else
+		latencies = to_i915(dev)->wm.spr_latency;
+
+	wm_latency_show(m, latencies);
+
+	return 0;
+}
+
+static int cur_wm_latency_show(struct seq_file *m, void *data)
+{
+	struct drm_device *dev = m->private;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	const uint16_t *latencies;
+
+	if (INTEL_INFO(dev)->gen >= 9)
+		latencies = dev_priv->wm.skl_latency;
+	else
+		latencies = to_i915(dev)->wm.cur_latency;
+
+	wm_latency_show(m, latencies);
+
+	return 0;
+}
+
+static int pri_wm_latency_open(struct inode *inode, struct file *file)
+{
+	struct drm_device *dev = inode->i_private;
+
+	if (INTEL_INFO(dev)->gen < 5)
+		return -ENODEV;
+
+	return single_open(file, pri_wm_latency_show, dev);
+}
+
+static int spr_wm_latency_open(struct inode *inode, struct file *file)
+{
+	struct drm_device *dev = inode->i_private;
+
+	if (HAS_GMCH_DISPLAY(dev))
+		return -ENODEV;
+
+	return single_open(file, spr_wm_latency_show, dev);
+}
+
+static int cur_wm_latency_open(struct inode *inode, struct file *file)
+{
+	struct drm_device *dev = inode->i_private;
+
+	if (HAS_GMCH_DISPLAY(dev))
+		return -ENODEV;
+
+	return single_open(file, cur_wm_latency_show, dev);
+}
+
+static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
+				size_t len, loff_t *offp, uint16_t wm[8])
+{
+	struct seq_file *m = file->private_data;
+	struct drm_device *dev = m->private;
+	uint16_t new[8] = { 0 };
+	int num_levels;
+	int level;
+	int ret;
+	char tmp[32];
+
+	if (IS_CHERRYVIEW(dev))
+		num_levels = 3;
+	else if (IS_VALLEYVIEW(dev))
+		num_levels = 1;
+	else
+		num_levels = ilk_wm_max_level(dev) + 1;
+
+	if (len >= sizeof(tmp))
+		return -EINVAL;
+
+	if (copy_from_user(tmp, ubuf, len))
+		return -EFAULT;
+
+	tmp[len] = '\0';
+
+	ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
+		     &new[0], &new[1], &new[2], &new[3],
+		     &new[4], &new[5], &new[6], &new[7]);
+	if (ret != num_levels)
+		return -EINVAL;
+
+	drm_modeset_lock_all(dev);
+
+	for (level = 0; level < num_levels; level++)
+		wm[level] = new[level];
+
+	drm_modeset_unlock_all(dev);
+
+	return len;
+}
+
+
+static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
+				    size_t len, loff_t *offp)
+{
+	struct seq_file *m = file->private_data;
+	struct drm_device *dev = m->private;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint16_t *latencies;
+
+	if (INTEL_INFO(dev)->gen >= 9)
+		latencies = dev_priv->wm.skl_latency;
+	else
+		latencies = to_i915(dev)->wm.pri_latency;
+
+	return wm_latency_write(file, ubuf, len, offp, latencies);
+}
+
+static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
+				    size_t len, loff_t *offp)
+{
+	struct seq_file *m = file->private_data;
+	struct drm_device *dev = m->private;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint16_t *latencies;
+
+	if (INTEL_INFO(dev)->gen >= 9)
+		latencies = dev_priv->wm.skl_latency;
+	else
+		latencies = to_i915(dev)->wm.spr_latency;
+
+	return wm_latency_write(file, ubuf, len, offp, latencies);
+}
+
+static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
+				    size_t len, loff_t *offp)
+{
+	struct seq_file *m = file->private_data;
+	struct drm_device *dev = m->private;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint16_t *latencies;
+
+	if (INTEL_INFO(dev)->gen >= 9)
+		latencies = dev_priv->wm.skl_latency;
+	else
+		latencies = to_i915(dev)->wm.cur_latency;
+
+	return wm_latency_write(file, ubuf, len, offp, latencies);
+}
+
+static const struct file_operations i915_pri_wm_latency_fops = {
+	.owner = THIS_MODULE,
+	.open = pri_wm_latency_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+	.write = pri_wm_latency_write
+};
+
+static const struct file_operations i915_spr_wm_latency_fops = {
+	.owner = THIS_MODULE,
+	.open = spr_wm_latency_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+	.write = spr_wm_latency_write
+};
+
+static const struct file_operations i915_cur_wm_latency_fops = {
+	.owner = THIS_MODULE,
+	.open = cur_wm_latency_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+	.write = cur_wm_latency_write
+};
+
+static int
+i915_wedged_get(void *data, u64 *val)
+{
+	struct drm_device *dev = data;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	*val = atomic_read(&dev_priv->gpu_error.reset_counter);
+
+	return 0;
+}
+
+static int
+i915_wedged_set(void *data, u64 val)
+{
+	struct drm_device *dev = data;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	/*
+	 * There is no safeguard against this debugfs entry colliding
+	 * with the hangcheck calling same i915_handle_error() in
+	 * parallel, causing an explosion. For now we assume that the
+	 * test harness is responsible enough not to inject gpu hangs
+	 * while it is writing to 'i915_wedged'
+	 */
+
+	if (i915_reset_in_progress(&dev_priv->gpu_error))
+		return -EAGAIN;
+
+	intel_runtime_pm_get(dev_priv);
+
+	i915_handle_error(dev, val,
+			  "Manually setting wedged to %llu", val);
+
+	intel_runtime_pm_put(dev_priv);
+
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
+			i915_wedged_get, i915_wedged_set,
+			"%llu\n");
+
+static int
+i915_ring_stop_get(void *data, u64 *val)
+{
+	struct drm_device *dev = data;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	*val = dev_priv->gpu_error.stop_rings;
+
+	return 0;
+}
+
+static int
+i915_ring_stop_set(void *data, u64 val)
+{
+	struct drm_device *dev = data;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret;
+
+	DRM_DEBUG_DRIVER("Stopping rings 0x%08llx\n", val);
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
+	dev_priv->gpu_error.stop_rings = val;
+	mutex_unlock(&dev->struct_mutex);
+
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(i915_ring_stop_fops,
+			i915_ring_stop_get, i915_ring_stop_set,
+			"0x%08llx\n");
+
+static int
+i915_ring_missed_irq_get(void *data, u64 *val)
+{
+	struct drm_device *dev = data;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	*val = dev_priv->gpu_error.missed_irq_rings;
+	return 0;
+}
+
+static int
+i915_ring_missed_irq_set(void *data, u64 val)
+{
+	struct drm_device *dev = data;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret;
+
+	/* Lock against concurrent debugfs callers */
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+	dev_priv->gpu_error.missed_irq_rings = val;
+	mutex_unlock(&dev->struct_mutex);
+
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops,
+			i915_ring_missed_irq_get, i915_ring_missed_irq_set,
+			"0x%08llx\n");
+
+static int
+i915_ring_test_irq_get(void *data, u64 *val)
+{
+	struct drm_device *dev = data;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	*val = dev_priv->gpu_error.test_irq_rings;
+
+	return 0;
+}
+
+static int
+i915_ring_test_irq_set(void *data, u64 val)
+{
+	struct drm_device *dev = data;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret;
+
+	DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val);
+
+	/* Lock against concurrent debugfs callers */
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
+	dev_priv->gpu_error.test_irq_rings = val;
+	mutex_unlock(&dev->struct_mutex);
+
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops,
+			i915_ring_test_irq_get, i915_ring_test_irq_set,
+			"0x%08llx\n");
+
+#define DROP_UNBOUND 0x1
+#define DROP_BOUND 0x2
+#define DROP_RETIRE 0x4
+#define DROP_ACTIVE 0x8
+#define DROP_ALL (DROP_UNBOUND | \
+		  DROP_BOUND | \
+		  DROP_RETIRE | \
+		  DROP_ACTIVE)
+static int
+i915_drop_caches_get(void *data, u64 *val)
+{
+	*val = DROP_ALL;
+
+	return 0;
+}
+
+static int
+i915_drop_caches_set(void *data, u64 val)
+{
+	struct drm_device *dev = data;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret;
+
+	DRM_DEBUG("Dropping caches: 0x%08llx\n", val);
+
+	/* No need to check and wait for gpu resets, only libdrm auto-restarts
+	 * on ioctls on -EAGAIN. */
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
+	if (val & DROP_ACTIVE) {
+		ret = i915_gpu_idle(dev);
+		if (ret)
+			goto unlock;
+	}
+
+	if (val & (DROP_RETIRE | DROP_ACTIVE))
+		i915_gem_retire_requests(dev);
+
+	if (val & DROP_BOUND)
+		i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_BOUND);
+
+	if (val & DROP_UNBOUND)
+		i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_UNBOUND);
+
+unlock:
+	mutex_unlock(&dev->struct_mutex);
+
+	return ret;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
+			i915_drop_caches_get, i915_drop_caches_set,
+			"0x%08llx\n");
+
+static int
+i915_max_freq_get(void *data, u64 *val)
+{
+	struct drm_device *dev = data;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret;
+
+	if (INTEL_INFO(dev)->gen < 6)
+		return -ENODEV;
+
+	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
+	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
+	if (ret)
+		return ret;
+
+	*val = intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit);
+	mutex_unlock(&dev_priv->rps.hw_lock);
+
+	return 0;
+}
+
+static int
+i915_max_freq_set(void *data, u64 val)
+{
+	struct drm_device *dev = data;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 hw_max, hw_min;
+	int ret;
+
+	if (INTEL_INFO(dev)->gen < 6)
+		return -ENODEV;
+
+	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
+	DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val);
+
+	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
+	if (ret)
+		return ret;
+
+	/*
+	 * Turbo will still be enabled, but won't go above the set value.
+	 */
+	val = intel_freq_opcode(dev_priv, val);
+
+	hw_max = dev_priv->rps.max_freq;
+	hw_min = dev_priv->rps.min_freq;
+
+	if (val < hw_min || val > hw_max || val < dev_priv->rps.min_freq_softlimit) {
+		mutex_unlock(&dev_priv->rps.hw_lock);
+		return -EINVAL;
+	}
+
+	dev_priv->rps.max_freq_softlimit = val;
+
+	intel_set_rps(dev, val);
+
+	mutex_unlock(&dev_priv->rps.hw_lock);
+
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(i915_max_freq_fops,
+			i915_max_freq_get, i915_max_freq_set,
+			"%llu\n");
+
+static int
+i915_min_freq_get(void *data, u64 *val)
+{
+	struct drm_device *dev = data;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret;
+
+	if (INTEL_INFO(dev)->gen < 6)
+		return -ENODEV;
+
+	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
+	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
+	if (ret)
+		return ret;
+
+	*val = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit);
+	mutex_unlock(&dev_priv->rps.hw_lock);
+
+	return 0;
+}
+
+static int
+i915_min_freq_set(void *data, u64 val)
+{
+	struct drm_device *dev = data;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 hw_max, hw_min;
+	int ret;
+
+	if (INTEL_INFO(dev)->gen < 6)
+		return -ENODEV;
+
+	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
+	DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val);
+
+	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
+	if (ret)
+		return ret;
+
+	/*
+	 * Turbo will still be enabled, but won't go below the set value.
+	 */
+	val = intel_freq_opcode(dev_priv, val);
+
+	hw_max = dev_priv->rps.max_freq;
+	hw_min = dev_priv->rps.min_freq;
+
+	if (val < hw_min || val > hw_max || val > dev_priv->rps.max_freq_softlimit) {
+		mutex_unlock(&dev_priv->rps.hw_lock);
+		return -EINVAL;
+	}
+
+	dev_priv->rps.min_freq_softlimit = val;
+
+	intel_set_rps(dev, val);
+
+	mutex_unlock(&dev_priv->rps.hw_lock);
+
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(i915_min_freq_fops,
+			i915_min_freq_get, i915_min_freq_set,
+			"%llu\n");
+
+static int
+i915_cache_sharing_get(void *data, u64 *val)
+{
+	struct drm_device *dev = data;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 snpcr;
+	int ret;
+
+	if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+		return -ENODEV;
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+	intel_runtime_pm_get(dev_priv);
+
+	snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
+
+	intel_runtime_pm_put(dev_priv);
+	mutex_unlock(&dev_priv->dev->struct_mutex);
+
+	*val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
+
+	return 0;
+}
+
+static int
+i915_cache_sharing_set(void *data, u64 val)
+{
+	struct drm_device *dev = data;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 snpcr;
+
+	if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+		return -ENODEV;
+
+	if (val > 3)
+		return -EINVAL;
+
+	intel_runtime_pm_get(dev_priv);
+	DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
+
+	/* Update the cache sharing policy here as well */
+	snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
+	snpcr &= ~GEN6_MBC_SNPCR_MASK;
+	snpcr |= (val << GEN6_MBC_SNPCR_SHIFT);
+	I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
+
+	intel_runtime_pm_put(dev_priv);
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
+			i915_cache_sharing_get, i915_cache_sharing_set,
+			"%llu\n");
+
+struct sseu_dev_status {
+	unsigned int slice_total;
+	unsigned int subslice_total;
+	unsigned int subslice_per_slice;
+	unsigned int eu_total;
+	unsigned int eu_per_subslice;
+};
+
+static void cherryview_sseu_device_status(struct drm_device *dev,
+					  struct sseu_dev_status *stat)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ss_max = 2;
+	int ss;
+	u32 sig1[ss_max], sig2[ss_max];
+
+	sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
+	sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
+	sig2[0] = I915_READ(CHV_POWER_SS0_SIG2);
+	sig2[1] = I915_READ(CHV_POWER_SS1_SIG2);
+
+	for (ss = 0; ss < ss_max; ss++) {
+		unsigned int eu_cnt;
+
+		if (sig1[ss] & CHV_SS_PG_ENABLE)
+			/* skip disabled subslice */
+			continue;
+
+		stat->slice_total = 1;
+		stat->subslice_per_slice++;
+		eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
+			 ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
+			 ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
+			 ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
+		stat->eu_total += eu_cnt;
+		stat->eu_per_subslice = max(stat->eu_per_subslice, eu_cnt);
+	}
+	stat->subslice_total = stat->subslice_per_slice;
+}
+
+static void gen9_sseu_device_status(struct drm_device *dev,
+				    struct sseu_dev_status *stat)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int s_max = 3, ss_max = 4;
+	int s, ss;
+	u32 s_reg[s_max], eu_reg[2*s_max], eu_mask[2];
+
+	/* BXT has a single slice and at most 3 subslices. */
+	if (IS_BROXTON(dev)) {
+		s_max = 1;
+		ss_max = 3;
+	}
+
+	for (s = 0; s < s_max; s++) {
+		s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s));
+		eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s));
+		eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s));
+	}
+
+	eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
+		     GEN9_PGCTL_SSA_EU19_ACK |
+		     GEN9_PGCTL_SSA_EU210_ACK |
+		     GEN9_PGCTL_SSA_EU311_ACK;
+	eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
+		     GEN9_PGCTL_SSB_EU19_ACK |
+		     GEN9_PGCTL_SSB_EU210_ACK |
+		     GEN9_PGCTL_SSB_EU311_ACK;
+
+	for (s = 0; s < s_max; s++) {
+		unsigned int ss_cnt = 0;
+
+		if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
+			/* skip disabled slice */
+			continue;
+
+		stat->slice_total++;
+
+		if (IS_SKYLAKE(dev))
+			ss_cnt = INTEL_INFO(dev)->subslice_per_slice;
+
+		for (ss = 0; ss < ss_max; ss++) {
+			unsigned int eu_cnt;
+
+			if (IS_BROXTON(dev) &&
+			    !(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
+				/* skip disabled subslice */
+				continue;
+
+			if (IS_BROXTON(dev))
+				ss_cnt++;
+
+			eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
+					       eu_mask[ss%2]);
+			stat->eu_total += eu_cnt;
+			stat->eu_per_subslice = max(stat->eu_per_subslice,
+						    eu_cnt);
+		}
+
+		stat->subslice_total += ss_cnt;
+		stat->subslice_per_slice = max(stat->subslice_per_slice,
+					       ss_cnt);
+	}
+}
+
+static void broadwell_sseu_device_status(struct drm_device *dev,
+					 struct sseu_dev_status *stat)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int s;
+	u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
+
+	stat->slice_total = hweight32(slice_info & GEN8_LSLICESTAT_MASK);
+
+	if (stat->slice_total) {
+		stat->subslice_per_slice = INTEL_INFO(dev)->subslice_per_slice;
+		stat->subslice_total = stat->slice_total *
+				       stat->subslice_per_slice;
+		stat->eu_per_subslice = INTEL_INFO(dev)->eu_per_subslice;
+		stat->eu_total = stat->eu_per_subslice * stat->subslice_total;
+
+		/* subtract fused off EU(s) from enabled slice(s) */
+		for (s = 0; s < stat->slice_total; s++) {
+			u8 subslice_7eu = INTEL_INFO(dev)->subslice_7eu[s];
+
+			stat->eu_total -= hweight8(subslice_7eu);
+		}
+	}
+}
+
+static int i915_sseu_status(struct seq_file *m, void *unused)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct sseu_dev_status stat;
+
+	if (INTEL_INFO(dev)->gen < 8)
+		return -ENODEV;
+
+	seq_puts(m, "SSEU Device Info\n");
+	seq_printf(m, "  Available Slice Total: %u\n",
+		   INTEL_INFO(dev)->slice_total);
+	seq_printf(m, "  Available Subslice Total: %u\n",
+		   INTEL_INFO(dev)->subslice_total);
+	seq_printf(m, "  Available Subslice Per Slice: %u\n",
+		   INTEL_INFO(dev)->subslice_per_slice);
+	seq_printf(m, "  Available EU Total: %u\n",
+		   INTEL_INFO(dev)->eu_total);
+	seq_printf(m, "  Available EU Per Subslice: %u\n",
+		   INTEL_INFO(dev)->eu_per_subslice);
+	seq_printf(m, "  Has Slice Power Gating: %s\n",
+		   yesno(INTEL_INFO(dev)->has_slice_pg));
+	seq_printf(m, "  Has Subslice Power Gating: %s\n",
+		   yesno(INTEL_INFO(dev)->has_subslice_pg));
+	seq_printf(m, "  Has EU Power Gating: %s\n",
+		   yesno(INTEL_INFO(dev)->has_eu_pg));
+
+	seq_puts(m, "SSEU Device Status\n");
+	memset(&stat, 0, sizeof(stat));
+	if (IS_CHERRYVIEW(dev)) {
+		cherryview_sseu_device_status(dev, &stat);
+	} else if (IS_BROADWELL(dev)) {
+		broadwell_sseu_device_status(dev, &stat);
+	} else if (INTEL_INFO(dev)->gen >= 9) {
+		gen9_sseu_device_status(dev, &stat);
+	}
+	seq_printf(m, "  Enabled Slice Total: %u\n",
+		   stat.slice_total);
+	seq_printf(m, "  Enabled Subslice Total: %u\n",
+		   stat.subslice_total);
+	seq_printf(m, "  Enabled Subslice Per Slice: %u\n",
+		   stat.subslice_per_slice);
+	seq_printf(m, "  Enabled EU Total: %u\n",
+		   stat.eu_total);
+	seq_printf(m, "  Enabled EU Per Subslice: %u\n",
+		   stat.eu_per_subslice);
+
+	return 0;
+}
+
+static int i915_forcewake_open(struct inode *inode, struct file *file)
+{
+	struct drm_device *dev = inode->i_private;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (INTEL_INFO(dev)->gen < 6)
+		return 0;
+
+	intel_runtime_pm_get(dev_priv);
+	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+
+	return 0;
+}
+
+static int i915_forcewake_release(struct inode *inode, struct file *file)
+{
+	struct drm_device *dev = inode->i_private;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (INTEL_INFO(dev)->gen < 6)
+		return 0;
+
+	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+	intel_runtime_pm_put(dev_priv);
+
+	return 0;
+}
+
+static const struct file_operations i915_forcewake_fops = {
+	.owner = THIS_MODULE,
+	.open = i915_forcewake_open,
+	.release = i915_forcewake_release,
+};
+
+static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor)
+{
+	struct drm_device *dev = minor->dev;
+	struct dentry *ent;
+
+	ent = debugfs_create_file("i915_forcewake_user",
+				  S_IRUSR,
+				  root, dev,
+				  &i915_forcewake_fops);
+	if (!ent)
+		return -ENOMEM;
+
+	return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops);
+}
+
+static int i915_debugfs_create(struct dentry *root,
+			       struct drm_minor *minor,
+			       const char *name,
+			       const struct file_operations *fops)
+{
+	struct drm_device *dev = minor->dev;
+	struct dentry *ent;
+
+	ent = debugfs_create_file(name,
+				  S_IRUGO | S_IWUSR,
+				  root, dev,
+				  fops);
+	if (!ent)
+		return -ENOMEM;
+
+	return drm_add_fake_info_node(minor, ent, fops);
+}
+
+static const struct drm_info_list i915_debugfs_list[] = {
+	{"i915_capabilities", i915_capabilities, 0},
+	{"i915_gem_objects", i915_gem_object_info, 0},
+	{"i915_gem_gtt", i915_gem_gtt_info, 0},
+	{"i915_gem_pinned", i915_gem_gtt_info, 0, (void *) PINNED_LIST},
+	{"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
+	{"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
+	{"i915_gem_stolen", i915_gem_stolen_list_info },
+	{"i915_gem_pageflip", i915_gem_pageflip_info, 0},
+	{"i915_gem_request", i915_gem_request_info, 0},
+	{"i915_gem_seqno", i915_gem_seqno_info, 0},
+	{"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
+	{"i915_gem_interrupt", i915_interrupt_info, 0},
+	{"i915_gem_hws", i915_hws_info, 0, (void *)RCS},
+	{"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS},
+	{"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS},
+	{"i915_gem_hws_vebox", i915_hws_info, 0, (void *)VECS},
+	{"i915_gem_batch_pool", i915_gem_batch_pool_info, 0},
+	{"i915_guc_info", i915_guc_info, 0},
+	{"i915_guc_load_status", i915_guc_load_status_info, 0},
+	{"i915_guc_log_dump", i915_guc_log_dump, 0},
+	{"i915_frequency_info", i915_frequency_info, 0},
+	{"i915_hangcheck_info", i915_hangcheck_info, 0},
+	{"i915_drpc_info", i915_drpc_info, 0},
+	{"i915_emon_status", i915_emon_status, 0},
+	{"i915_ring_freq_table", i915_ring_freq_table, 0},
+	{"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
+	{"i915_fbc_status", i915_fbc_status, 0},
+	{"i915_ips_status", i915_ips_status, 0},
+	{"i915_sr_status", i915_sr_status, 0},
+	{"i915_opregion", i915_opregion, 0},
+	{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
+	{"i915_context_status", i915_context_status, 0},
+	{"i915_dump_lrc", i915_dump_lrc, 0},
+	{"i915_execlists", i915_execlists, 0},
+	{"i915_forcewake_domains", i915_forcewake_domains, 0},
+	{"i915_swizzle_info", i915_swizzle_info, 0},
+	{"i915_ppgtt_info", i915_ppgtt_info, 0},
+	{"i915_llc", i915_llc, 0},
+	{"i915_edp_psr_status", i915_edp_psr_status, 0},
+	{"i915_sink_crc_eDP1", i915_sink_crc, 0},
+	{"i915_energy_uJ", i915_energy_uJ, 0},
+	{"i915_runtime_pm_status", i915_runtime_pm_status, 0},
+	{"i915_power_domain_info", i915_power_domain_info, 0},
+	{"i915_display_info", i915_display_info, 0},
+	{"i915_semaphore_status", i915_semaphore_status, 0},
+	{"i915_shared_dplls_info", i915_shared_dplls_info, 0},
+	{"i915_dp_mst_info", i915_dp_mst_info, 0},
+	{"i915_wa_registers", i915_wa_registers, 0},
+	{"i915_ddb_info", i915_ddb_info, 0},
+	{"i915_sseu_status", i915_sseu_status, 0},
+	{"i915_drrs_status", i915_drrs_status, 0},
+	{"i915_rps_boost_info", i915_rps_boost_info, 0},
+};
+#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
+
+static const struct i915_debugfs_files {
+	const char *name;
+	const struct file_operations *fops;
+} i915_debugfs_files[] = {
+	{"i915_wedged", &i915_wedged_fops},
+	{"i915_max_freq", &i915_max_freq_fops},
+	{"i915_min_freq", &i915_min_freq_fops},
+	{"i915_cache_sharing", &i915_cache_sharing_fops},
+	{"i915_ring_stop", &i915_ring_stop_fops},
+	{"i915_ring_missed_irq", &i915_ring_missed_irq_fops},
+	{"i915_ring_test_irq", &i915_ring_test_irq_fops},
+	{"i915_gem_drop_caches", &i915_drop_caches_fops},
+	{"i915_error_state", &i915_error_state_fops},
+	{"i915_next_seqno", &i915_next_seqno_fops},
+	{"i915_display_crc_ctl", &i915_display_crc_ctl_fops},
+	{"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
+	{"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
+	{"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
+	{"i915_fbc_false_color", &i915_fbc_fc_fops},
+	{"i915_dp_test_data", &i915_displayport_test_data_fops},
+	{"i915_dp_test_type", &i915_displayport_test_type_fops},
+	{"i915_dp_test_active", &i915_displayport_test_active_fops}
+};
+
+void intel_display_crc_init(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum pipe pipe;
+
+	for_each_pipe(dev_priv, pipe) {
+		struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
+
+		pipe_crc->opened = false;
+		spin_lock_init(&pipe_crc->lock);
+		init_waitqueue_head(&pipe_crc->wq);
+	}
+}
+
+int i915_debugfs_init(struct drm_minor *minor)
+{
+	int ret, i;
+
+	ret = i915_forcewake_create(minor->debugfs_root, minor);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) {
+		ret = i915_pipe_crc_create(minor->debugfs_root, minor, i);
+		if (ret)
+			return ret;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
+		ret = i915_debugfs_create(minor->debugfs_root, minor,
+					  i915_debugfs_files[i].name,
+					  i915_debugfs_files[i].fops);
+		if (ret)
+			return ret;
+	}
+
+	return drm_debugfs_create_files(i915_debugfs_list,
+					I915_DEBUGFS_ENTRIES,
+					minor->debugfs_root, minor);
+}
+
+void i915_debugfs_cleanup(struct drm_minor *minor)
+{
+	int i;
+
+	drm_debugfs_remove_files(i915_debugfs_list,
+				 I915_DEBUGFS_ENTRIES, minor);
+
+	drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops,
+				 1, minor);
+
+	for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) {
+		struct drm_info_list *info_list =
+			(struct drm_info_list *)&i915_pipe_crc_data[i];
+
+		drm_debugfs_remove_files(info_list, 1, minor);
+	}
+
+	for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
+		struct drm_info_list *info_list =
+			(struct drm_info_list *) i915_debugfs_files[i].fops;
+
+		drm_debugfs_remove_files(info_list, 1, minor);
+	}
+}
+
+struct dpcd_block {
+	/* DPCD dump start address. */
+	unsigned int offset;
+	/* DPCD dump end address, inclusive. If unset, .size will be used. */
+	unsigned int end;
+	/* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */
+	size_t size;
+	/* Only valid for eDP. */
+	bool edp;
+};
+
+static const struct dpcd_block i915_dpcd_debug[] = {
+	{ .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE },
+	{ .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS },
+	{ .offset = DP_DOWNSTREAM_PORT_0, .size = 16 },
+	{ .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET },
+	{ .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 },
+	{ .offset = DP_SET_POWER },
+	{ .offset = DP_EDP_DPCD_REV },
+	{ .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 },
+	{ .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB },
+	{ .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET },
+};
+
+static int i915_dpcd_show(struct seq_file *m, void *data)
+{
+	struct drm_connector *connector = m->private;
+	struct intel_dp *intel_dp =
+		enc_to_intel_dp(&intel_attached_encoder(connector)->base);
+	uint8_t buf[16];
+	ssize_t err;
+	int i;
+
+	if (connector->status != connector_status_connected)
+		return -ENODEV;
+
+	for (i = 0; i < ARRAY_SIZE(i915_dpcd_debug); i++) {
+		const struct dpcd_block *b = &i915_dpcd_debug[i];
+		size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1);
+
+		if (b->edp &&
+		    connector->connector_type != DRM_MODE_CONNECTOR_eDP)
+			continue;
+
+		/* low tech for now */
+		if (WARN_ON(size > sizeof(buf)))
+			continue;
+
+		err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size);
+		if (err <= 0) {
+			DRM_ERROR("dpcd read (%zu bytes at %u) failed (%zd)\n",
+				  size, b->offset, err);
+			continue;
+		}
+
+		seq_printf(m, "%04x: %*ph\n", b->offset, (int) size, buf);
+	}
+
+	return 0;
+}
+
+static int i915_dpcd_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, i915_dpcd_show, inode->i_private);
+}
+
+static const struct file_operations i915_dpcd_fops = {
+	.owner = THIS_MODULE,
+	.open = i915_dpcd_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+/**
+ * i915_debugfs_connector_add - add i915 specific connector debugfs files
+ * @connector: pointer to a registered drm_connector
+ *
+ * Cleanup will be done by drm_connector_unregister() through a call to
+ * drm_debugfs_connector_remove().
+ *
+ * Returns 0 on success, negative error codes on error.
+ */
+int i915_debugfs_connector_add(struct drm_connector *connector)
+{
+	struct dentry *root = connector->debugfs_entry;
+
+	/* The connector must have been registered beforehands. */
+	if (!root)
+		return -ENODEV;
+
+	if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
+	    connector->connector_type == DRM_MODE_CONNECTOR_eDP)
+		debugfs_create_file("i915_dpcd", S_IRUGO, root, connector,
+				    &i915_dpcd_fops);
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
new file mode 100644
index 0000000..61fcb3b
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -0,0 +1,1334 @@
+/* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
+ */
+/*
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/async.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_legacy.h>
+#include "intel_drv.h"
+#include <drm/i915_drm.h>
+#include "i915_drv.h"
+#include "i915_vgpu.h"
+#include "i915_trace.h"
+#include <linux/pci.h>
+#include <linux/console.h>
+#include <linux/vt.h>
+#include <linux/vgaarb.h>
+#include <linux/acpi.h>
+#include <linux/pnp.h>
+#include <linux/vga_switcheroo.h>
+#include <linux/slab.h>
+#include <acpi/video.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/oom.h>
+
+
+static int i915_getparam(struct drm_device *dev, void *data,
+			 struct drm_file *file_priv)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	drm_i915_getparam_t *param = data;
+	int value;
+
+	switch (param->param) {
+	case I915_PARAM_IRQ_ACTIVE:
+	case I915_PARAM_ALLOW_BATCHBUFFER:
+	case I915_PARAM_LAST_DISPATCH:
+		/* Reject all old ums/dri params. */
+		return -ENODEV;
+	case I915_PARAM_CHIPSET_ID:
+		value = dev->pdev->device;
+		break;
+	case I915_PARAM_REVISION:
+		value = dev->pdev->revision;
+		break;
+	case I915_PARAM_HAS_GEM:
+		value = 1;
+		break;
+	case I915_PARAM_NUM_FENCES_AVAIL:
+		value = dev_priv->num_fence_regs;
+		break;
+	case I915_PARAM_HAS_OVERLAY:
+		value = dev_priv->overlay ? 1 : 0;
+		break;
+	case I915_PARAM_HAS_PAGEFLIPPING:
+		value = 1;
+		break;
+	case I915_PARAM_HAS_EXECBUF2:
+		/* depends on GEM */
+		value = 1;
+		break;
+	case I915_PARAM_HAS_BSD:
+		value = intel_ring_initialized(&dev_priv->ring[VCS]);
+		break;
+	case I915_PARAM_HAS_BLT:
+		value = intel_ring_initialized(&dev_priv->ring[BCS]);
+		break;
+	case I915_PARAM_HAS_VEBOX:
+		value = intel_ring_initialized(&dev_priv->ring[VECS]);
+		break;
+	case I915_PARAM_HAS_BSD2:
+		value = intel_ring_initialized(&dev_priv->ring[VCS2]);
+		break;
+	case I915_PARAM_HAS_RELAXED_FENCING:
+		value = 1;
+		break;
+	case I915_PARAM_HAS_COHERENT_RINGS:
+		value = 1;
+		break;
+	case I915_PARAM_HAS_EXEC_CONSTANTS:
+		value = INTEL_INFO(dev)->gen >= 4;
+		break;
+	case I915_PARAM_HAS_RELAXED_DELTA:
+		value = 1;
+		break;
+	case I915_PARAM_HAS_GEN7_SOL_RESET:
+		value = 1;
+		break;
+	case I915_PARAM_HAS_LLC:
+		value = HAS_LLC(dev);
+		break;
+	case I915_PARAM_HAS_WT:
+		value = HAS_WT(dev);
+		break;
+	case I915_PARAM_HAS_ALIASING_PPGTT:
+		value = USES_PPGTT(dev);
+		break;
+	case I915_PARAM_HAS_WAIT_TIMEOUT:
+		value = 1;
+		break;
+	case I915_PARAM_HAS_SEMAPHORES:
+		value = i915_semaphore_is_enabled(dev);
+		break;
+	case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
+		value = 1;
+		break;
+	case I915_PARAM_HAS_SECURE_BATCHES:
+		value = capable(CAP_SYS_ADMIN);
+		break;
+	case I915_PARAM_HAS_PINNED_BATCHES:
+		value = 1;
+		break;
+	case I915_PARAM_HAS_EXEC_NO_RELOC:
+		value = 1;
+		break;
+	case I915_PARAM_HAS_EXEC_HANDLE_LUT:
+		value = 1;
+		break;
+	case I915_PARAM_CMD_PARSER_VERSION:
+		value = i915_cmd_parser_get_version();
+		break;
+	case I915_PARAM_HAS_COHERENT_PHYS_GTT:
+		value = 1;
+		break;
+	case I915_PARAM_MMAP_VERSION:
+		value = 1;
+		break;
+	case I915_PARAM_SUBSLICE_TOTAL:
+		value = INTEL_INFO(dev)->subslice_total;
+		if (!value)
+			return -ENODEV;
+		break;
+	case I915_PARAM_EU_TOTAL:
+		value = INTEL_INFO(dev)->eu_total;
+		if (!value)
+			return -ENODEV;
+		break;
+	case I915_PARAM_HAS_GPU_RESET:
+		value = i915.enable_hangcheck &&
+			intel_has_gpu_reset(dev);
+		break;
+	case I915_PARAM_HAS_RESOURCE_STREAMER:
+		value = HAS_RESOURCE_STREAMER(dev);
+		break;
+	default:
+		DRM_DEBUG("Unknown parameter %d\n", param->param);
+		return -EINVAL;
+	}
+
+	if (copy_to_user(param->value, &value, sizeof(int))) {
+		DRM_ERROR("copy_to_user failed\n");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+static int i915_get_bridge_dev(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
+	if (!dev_priv->bridge_dev) {
+		DRM_ERROR("bridge device not found\n");
+		return -1;
+	}
+	return 0;
+}
+
+#define MCHBAR_I915 0x44
+#define MCHBAR_I965 0x48
+#define MCHBAR_SIZE (4*4096)
+
+#define DEVEN_REG 0x54
+#define   DEVEN_MCHBAR_EN (1 << 28)
+
+/* Allocate space for the MCH regs if needed, return nonzero on error */
+static int
+intel_alloc_mchbar_resource(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
+	u32 temp_lo, temp_hi = 0;
+	u64 mchbar_addr;
+	int ret;
+
+	if (INTEL_INFO(dev)->gen >= 4)
+		pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
+	pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
+	mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
+
+	/* If ACPI doesn't have it, assume we need to allocate it ourselves */
+#ifdef CONFIG_PNP
+	if (mchbar_addr &&
+	    pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
+		return 0;
+#endif
+
+	/* Get some space for it */
+	dev_priv->mch_res.name = "i915 MCHBAR";
+	dev_priv->mch_res.flags = IORESOURCE_MEM;
+	ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus,
+				     &dev_priv->mch_res,
+				     MCHBAR_SIZE, MCHBAR_SIZE,
+				     PCIBIOS_MIN_MEM,
+				     0, pcibios_align_resource,
+				     dev_priv->bridge_dev);
+	if (ret) {
+		DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
+		dev_priv->mch_res.start = 0;
+		return ret;
+	}
+
+	if (INTEL_INFO(dev)->gen >= 4)
+		pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
+				       upper_32_bits(dev_priv->mch_res.start));
+
+	pci_write_config_dword(dev_priv->bridge_dev, reg,
+			       lower_32_bits(dev_priv->mch_res.start));
+	return 0;
+}
+
+/* Setup MCHBAR if possible, return true if we should disable it again */
+static void
+intel_setup_mchbar(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
+	u32 temp;
+	bool enabled;
+
+	if (IS_VALLEYVIEW(dev))
+		return;
+
+	dev_priv->mchbar_need_disable = false;
+
+	if (IS_I915G(dev) || IS_I915GM(dev)) {
+		pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
+		enabled = !!(temp & DEVEN_MCHBAR_EN);
+	} else {
+		pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
+		enabled = temp & 1;
+	}
+
+	/* If it's already enabled, don't have to do anything */
+	if (enabled)
+		return;
+
+	if (intel_alloc_mchbar_resource(dev))
+		return;
+
+	dev_priv->mchbar_need_disable = true;
+
+	/* Space is allocated or reserved, so enable it. */
+	if (IS_I915G(dev) || IS_I915GM(dev)) {
+		pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG,
+				       temp | DEVEN_MCHBAR_EN);
+	} else {
+		pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
+		pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
+	}
+}
+
+static void
+intel_teardown_mchbar(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
+	u32 temp;
+
+	if (dev_priv->mchbar_need_disable) {
+		if (IS_I915G(dev) || IS_I915GM(dev)) {
+			pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
+			temp &= ~DEVEN_MCHBAR_EN;
+			pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, temp);
+		} else {
+			pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
+			temp &= ~1;
+			pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp);
+		}
+	}
+
+	if (dev_priv->mch_res.start)
+		release_resource(&dev_priv->mch_res);
+}
+
+/* true = enable decode, false = disable decoder */
+static unsigned int i915_vga_set_decode(void *cookie, bool state)
+{
+	struct drm_device *dev = cookie;
+
+	intel_modeset_vga_set_state(dev, state);
+	if (state)
+		return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
+		       VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
+	else
+		return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
+}
+
+static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
+{
+	struct drm_device *dev = pci_get_drvdata(pdev);
+	pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
+
+	if (state == VGA_SWITCHEROO_ON) {
+		pr_info("switched on\n");
+		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
+		/* i915 resume handler doesn't set to D0 */
+		pci_set_power_state(dev->pdev, PCI_D0);
+		i915_resume_switcheroo(dev);
+		dev->switch_power_state = DRM_SWITCH_POWER_ON;
+	} else {
+		pr_err("switched off\n");
+		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
+		i915_suspend_switcheroo(dev, pmm);
+		dev->switch_power_state = DRM_SWITCH_POWER_OFF;
+	}
+}
+
+static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
+{
+	struct drm_device *dev = pci_get_drvdata(pdev);
+
+	/*
+	 * FIXME: open_count is protected by drm_global_mutex but that would lead to
+	 * locking inversion with the driver load path. And the access here is
+	 * completely racy anyway. So don't bother with locking for now.
+	 */
+	return dev->open_count == 0;
+}
+
+static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
+	.set_gpu_state = i915_switcheroo_set_state,
+	.reprobe = NULL,
+	.can_switch = i915_switcheroo_can_switch,
+};
+
+static int i915_load_modeset_init(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret;
+
+	ret = intel_parse_bios(dev);
+	if (ret)
+		DRM_INFO("failed to find VBIOS tables\n");
+
+	/* If we have > 1 VGA cards, then we need to arbitrate access
+	 * to the common VGA resources.
+	 *
+	 * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA),
+	 * then we do not take part in VGA arbitration and the
+	 * vga_client_register() fails with -ENODEV.
+	 */
+	ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode);
+	if (ret && ret != -ENODEV)
+		goto out;
+
+	intel_register_dsm_handler();
+
+	ret = vga_switcheroo_register_client(dev->pdev, &i915_switcheroo_ops, false);
+	if (ret)
+		goto cleanup_vga_client;
+
+	/* Initialise stolen first so that we may reserve preallocated
+	 * objects for the BIOS to KMS transition.
+	 */
+	ret = i915_gem_init_stolen(dev);
+	if (ret)
+		goto cleanup_vga_switcheroo;
+
+	intel_power_domains_init_hw(dev_priv);
+
+	ret = intel_irq_install(dev_priv);
+	if (ret)
+		goto cleanup_gem_stolen;
+
+	intel_setup_gmbus(dev);
+
+	/* Important: The output setup functions called by modeset_init need
+	 * working irqs for e.g. gmbus and dp aux transfers. */
+	intel_modeset_init(dev);
+
+	intel_guc_ucode_init(dev);
+
+	ret = i915_gem_init(dev);
+	if (ret)
+		goto cleanup_irq;
+
+	intel_modeset_gem_init(dev);
+
+	/* Always safe in the mode setting case. */
+	/* FIXME: do pre/post-mode set stuff in core KMS code */
+	dev->vblank_disable_allowed = true;
+	if (INTEL_INFO(dev)->num_pipes == 0)
+		return 0;
+
+	ret = intel_fbdev_init(dev);
+	if (ret)
+		goto cleanup_gem;
+
+	/* Only enable hotplug handling once the fbdev is fully set up. */
+	intel_hpd_init(dev_priv);
+
+	/*
+	 * Some ports require correctly set-up hpd registers for detection to
+	 * work properly (leading to ghost connected connector status), e.g. VGA
+	 * on gm45.  Hence we can only set up the initial fbdev config after hpd
+	 * irqs are fully enabled. Now we should scan for the initial config
+	 * only once hotplug handling is enabled, but due to screwed-up locking
+	 * around kms/fbdev init we can't protect the fdbev initial config
+	 * scanning against hotplug events. Hence do this first and ignore the
+	 * tiny window where we will loose hotplug notifactions.
+	 */
+	async_schedule(intel_fbdev_initial_config, dev_priv);
+
+	drm_kms_helper_poll_init(dev);
+
+	return 0;
+
+cleanup_gem:
+	mutex_lock(&dev->struct_mutex);
+	i915_gem_cleanup_ringbuffer(dev);
+	i915_gem_context_fini(dev);
+	mutex_unlock(&dev->struct_mutex);
+cleanup_irq:
+	intel_guc_ucode_fini(dev);
+	drm_irq_uninstall(dev);
+	intel_teardown_gmbus(dev);
+cleanup_gem_stolen:
+	i915_gem_cleanup_stolen(dev);
+cleanup_vga_switcheroo:
+	vga_switcheroo_unregister_client(dev->pdev);
+cleanup_vga_client:
+	vga_client_register(dev->pdev, NULL, NULL, NULL);
+out:
+	return ret;
+}
+
+#if IS_ENABLED(CONFIG_FB)
+static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
+{
+	struct apertures_struct *ap;
+	struct pci_dev *pdev = dev_priv->dev->pdev;
+	bool primary;
+	int ret;
+
+	ap = alloc_apertures(1);
+	if (!ap)
+		return -ENOMEM;
+
+	ap->ranges[0].base = dev_priv->gtt.mappable_base;
+	ap->ranges[0].size = dev_priv->gtt.mappable_end;
+
+	primary =
+		pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
+
+	ret = remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
+
+	kfree(ap);
+
+	return ret;
+}
+#else
+static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
+{
+	return 0;
+}
+#endif
+
+#if !defined(CONFIG_VGA_CONSOLE)
+static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
+{
+	return 0;
+}
+#elif !defined(CONFIG_DUMMY_CONSOLE)
+static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
+{
+	return -ENODEV;
+}
+#else
+static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
+{
+	int ret = 0;
+
+	DRM_INFO("Replacing VGA console driver\n");
+
+	console_lock();
+	if (con_is_bound(&vga_con))
+		ret = do_take_over_console(&dummy_con, 0, MAX_NR_CONSOLES - 1, 1);
+	if (ret == 0) {
+		ret = do_unregister_con_driver(&vga_con);
+
+		/* Ignore "already unregistered". */
+		if (ret == -ENODEV)
+			ret = 0;
+	}
+	console_unlock();
+
+	return ret;
+}
+#endif
+
+static void i915_dump_device_info(struct drm_i915_private *dev_priv)
+{
+	const struct intel_device_info *info = &dev_priv->info;
+
+#define PRINT_S(name) "%s"
+#define SEP_EMPTY
+#define PRINT_FLAG(name) info->name ? #name "," : ""
+#define SEP_COMMA ,
+	DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x rev=0x%02x flags="
+			 DEV_INFO_FOR_EACH_FLAG(PRINT_S, SEP_EMPTY),
+			 info->gen,
+			 dev_priv->dev->pdev->device,
+			 dev_priv->dev->pdev->revision,
+			 DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_COMMA));
+#undef PRINT_S
+#undef SEP_EMPTY
+#undef PRINT_FLAG
+#undef SEP_COMMA
+}
+
+static void cherryview_sseu_info_init(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_device_info *info;
+	u32 fuse, eu_dis;
+
+	info = (struct intel_device_info *)&dev_priv->info;
+	fuse = I915_READ(CHV_FUSE_GT);
+
+	info->slice_total = 1;
+
+	if (!(fuse & CHV_FGT_DISABLE_SS0)) {
+		info->subslice_per_slice++;
+		eu_dis = fuse & (CHV_FGT_EU_DIS_SS0_R0_MASK |
+				 CHV_FGT_EU_DIS_SS0_R1_MASK);
+		info->eu_total += 8 - hweight32(eu_dis);
+	}
+
+	if (!(fuse & CHV_FGT_DISABLE_SS1)) {
+		info->subslice_per_slice++;
+		eu_dis = fuse & (CHV_FGT_EU_DIS_SS1_R0_MASK |
+				 CHV_FGT_EU_DIS_SS1_R1_MASK);
+		info->eu_total += 8 - hweight32(eu_dis);
+	}
+
+	info->subslice_total = info->subslice_per_slice;
+	/*
+	 * CHV expected to always have a uniform distribution of EU
+	 * across subslices.
+	*/
+	info->eu_per_subslice = info->subslice_total ?
+				info->eu_total / info->subslice_total :
+				0;
+	/*
+	 * CHV supports subslice power gating on devices with more than
+	 * one subslice, and supports EU power gating on devices with
+	 * more than one EU pair per subslice.
+	*/
+	info->has_slice_pg = 0;
+	info->has_subslice_pg = (info->subslice_total > 1);
+	info->has_eu_pg = (info->eu_per_subslice > 2);
+}
+
+static void gen9_sseu_info_init(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_device_info *info;
+	int s_max = 3, ss_max = 4, eu_max = 8;
+	int s, ss;
+	u32 fuse2, s_enable, ss_disable, eu_disable;
+	u8 eu_mask = 0xff;
+
+	info = (struct intel_device_info *)&dev_priv->info;
+	fuse2 = I915_READ(GEN8_FUSE2);
+	s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >>
+		   GEN8_F2_S_ENA_SHIFT;
+	ss_disable = (fuse2 & GEN9_F2_SS_DIS_MASK) >>
+		     GEN9_F2_SS_DIS_SHIFT;
+
+	info->slice_total = hweight32(s_enable);
+	/*
+	 * The subslice disable field is global, i.e. it applies
+	 * to each of the enabled slices.
+	*/
+	info->subslice_per_slice = ss_max - hweight32(ss_disable);
+	info->subslice_total = info->slice_total *
+			       info->subslice_per_slice;
+
+	/*
+	 * Iterate through enabled slices and subslices to
+	 * count the total enabled EU.
+	*/
+	for (s = 0; s < s_max; s++) {
+		if (!(s_enable & (0x1 << s)))
+			/* skip disabled slice */
+			continue;
+
+		eu_disable = I915_READ(GEN9_EU_DISABLE(s));
+		for (ss = 0; ss < ss_max; ss++) {
+			int eu_per_ss;
+
+			if (ss_disable & (0x1 << ss))
+				/* skip disabled subslice */
+				continue;
+
+			eu_per_ss = eu_max - hweight8((eu_disable >> (ss*8)) &
+						      eu_mask);
+
+			/*
+			 * Record which subslice(s) has(have) 7 EUs. we
+			 * can tune the hash used to spread work among
+			 * subslices if they are unbalanced.
+			 */
+			if (eu_per_ss == 7)
+				info->subslice_7eu[s] |= 1 << ss;
+
+			info->eu_total += eu_per_ss;
+		}
+	}
+
+	/*
+	 * SKL is expected to always have a uniform distribution
+	 * of EU across subslices with the exception that any one
+	 * EU in any one subslice may be fused off for die
+	 * recovery. BXT is expected to be perfectly uniform in EU
+	 * distribution.
+	*/
+	info->eu_per_subslice = info->subslice_total ?
+				DIV_ROUND_UP(info->eu_total,
+					     info->subslice_total) : 0;
+	/*
+	 * SKL supports slice power gating on devices with more than
+	 * one slice, and supports EU power gating on devices with
+	 * more than one EU pair per subslice. BXT supports subslice
+	 * power gating on devices with more than one subslice, and
+	 * supports EU power gating on devices with more than one EU
+	 * pair per subslice.
+	*/
+	info->has_slice_pg = (IS_SKYLAKE(dev) && (info->slice_total > 1));
+	info->has_subslice_pg = (IS_BROXTON(dev) && (info->subslice_total > 1));
+	info->has_eu_pg = (info->eu_per_subslice > 2);
+}
+
+static void broadwell_sseu_info_init(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_device_info *info;
+	const int s_max = 3, ss_max = 3, eu_max = 8;
+	int s, ss;
+	u32 fuse2, eu_disable[s_max], s_enable, ss_disable;
+
+	fuse2 = I915_READ(GEN8_FUSE2);
+	s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
+	ss_disable = (fuse2 & GEN8_F2_SS_DIS_MASK) >> GEN8_F2_SS_DIS_SHIFT;
+
+	eu_disable[0] = I915_READ(GEN8_EU_DISABLE0) & GEN8_EU_DIS0_S0_MASK;
+	eu_disable[1] = (I915_READ(GEN8_EU_DISABLE0) >> GEN8_EU_DIS0_S1_SHIFT) |
+			((I915_READ(GEN8_EU_DISABLE1) & GEN8_EU_DIS1_S1_MASK) <<
+			 (32 - GEN8_EU_DIS0_S1_SHIFT));
+	eu_disable[2] = (I915_READ(GEN8_EU_DISABLE1) >> GEN8_EU_DIS1_S2_SHIFT) |
+			((I915_READ(GEN8_EU_DISABLE2) & GEN8_EU_DIS2_S2_MASK) <<
+			 (32 - GEN8_EU_DIS1_S2_SHIFT));
+
+
+	info = (struct intel_device_info *)&dev_priv->info;
+	info->slice_total = hweight32(s_enable);
+
+	/*
+	 * The subslice disable field is global, i.e. it applies
+	 * to each of the enabled slices.
+	 */
+	info->subslice_per_slice = ss_max - hweight32(ss_disable);
+	info->subslice_total = info->slice_total * info->subslice_per_slice;
+
+	/*
+	 * Iterate through enabled slices and subslices to
+	 * count the total enabled EU.
+	 */
+	for (s = 0; s < s_max; s++) {
+		if (!(s_enable & (0x1 << s)))
+			/* skip disabled slice */
+			continue;
+
+		for (ss = 0; ss < ss_max; ss++) {
+			u32 n_disabled;
+
+			if (ss_disable & (0x1 << ss))
+				/* skip disabled subslice */
+				continue;
+
+			n_disabled = hweight8(eu_disable[s] >> (ss * eu_max));
+
+			/*
+			 * Record which subslices have 7 EUs.
+			 */
+			if (eu_max - n_disabled == 7)
+				info->subslice_7eu[s] |= 1 << ss;
+
+			info->eu_total += eu_max - n_disabled;
+		}
+	}
+
+	/*
+	 * BDW is expected to always have a uniform distribution of EU across
+	 * subslices with the exception that any one EU in any one subslice may
+	 * be fused off for die recovery.
+	 */
+	info->eu_per_subslice = info->subslice_total ?
+		DIV_ROUND_UP(info->eu_total, info->subslice_total) : 0;
+
+	/*
+	 * BDW supports slice power gating on devices with more than
+	 * one slice.
+	 */
+	info->has_slice_pg = (info->slice_total > 1);
+	info->has_subslice_pg = 0;
+	info->has_eu_pg = 0;
+}
+
+/*
+ * Determine various intel_device_info fields at runtime.
+ *
+ * Use it when either:
+ *   - it's judged too laborious to fill n static structures with the limit
+ *     when a simple if statement does the job,
+ *   - run-time checks (eg read fuse/strap registers) are needed.
+ *
+ * This function needs to be called:
+ *   - after the MMIO has been setup as we are reading registers,
+ *   - after the PCH has been detected,
+ *   - before the first usage of the fields it can tweak.
+ */
+static void intel_device_info_runtime_init(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_device_info *info;
+	enum pipe pipe;
+
+	info = (struct intel_device_info *)&dev_priv->info;
+
+	/*
+	 * Skylake and Broxton currently don't expose the topmost plane as its
+	 * use is exclusive with the legacy cursor and we only want to expose
+	 * one of those, not both. Until we can safely expose the topmost plane
+	 * as a DRM_PLANE_TYPE_CURSOR with all the features exposed/supported,
+	 * we don't expose the topmost plane at all to prevent ABI breakage
+	 * down the line.
+	 */
+	if (IS_BROXTON(dev)) {
+		info->num_sprites[PIPE_A] = 2;
+		info->num_sprites[PIPE_B] = 2;
+		info->num_sprites[PIPE_C] = 1;
+	} else if (IS_VALLEYVIEW(dev))
+		for_each_pipe(dev_priv, pipe)
+			info->num_sprites[pipe] = 2;
+	else
+		for_each_pipe(dev_priv, pipe)
+			info->num_sprites[pipe] = 1;
+
+	if (i915.disable_display) {
+		DRM_INFO("Display disabled (module parameter)\n");
+		info->num_pipes = 0;
+	} else if (info->num_pipes > 0 &&
+		   (INTEL_INFO(dev)->gen == 7 || INTEL_INFO(dev)->gen == 8) &&
+		   !IS_VALLEYVIEW(dev)) {
+		u32 fuse_strap = I915_READ(FUSE_STRAP);
+		u32 sfuse_strap = I915_READ(SFUSE_STRAP);
+
+		/*
+		 * SFUSE_STRAP is supposed to have a bit signalling the display
+		 * is fused off. Unfortunately it seems that, at least in
+		 * certain cases, fused off display means that PCH display
+		 * reads don't land anywhere. In that case, we read 0s.
+		 *
+		 * On CPT/PPT, we can detect this case as SFUSE_STRAP_FUSE_LOCK
+		 * should be set when taking over after the firmware.
+		 */
+		if (fuse_strap & ILK_INTERNAL_DISPLAY_DISABLE ||
+		    sfuse_strap & SFUSE_STRAP_DISPLAY_DISABLED ||
+		    (dev_priv->pch_type == PCH_CPT &&
+		     !(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) {
+			DRM_INFO("Display fused off, disabling\n");
+			info->num_pipes = 0;
+		}
+	}
+
+	/* Initialize slice/subslice/EU info */
+	if (IS_CHERRYVIEW(dev))
+		cherryview_sseu_info_init(dev);
+	else if (IS_BROADWELL(dev))
+		broadwell_sseu_info_init(dev);
+	else if (INTEL_INFO(dev)->gen >= 9)
+		gen9_sseu_info_init(dev);
+
+	DRM_DEBUG_DRIVER("slice total: %u\n", info->slice_total);
+	DRM_DEBUG_DRIVER("subslice total: %u\n", info->subslice_total);
+	DRM_DEBUG_DRIVER("subslice per slice: %u\n", info->subslice_per_slice);
+	DRM_DEBUG_DRIVER("EU total: %u\n", info->eu_total);
+	DRM_DEBUG_DRIVER("EU per subslice: %u\n", info->eu_per_subslice);
+	DRM_DEBUG_DRIVER("has slice power gating: %s\n",
+			 info->has_slice_pg ? "y" : "n");
+	DRM_DEBUG_DRIVER("has subslice power gating: %s\n",
+			 info->has_subslice_pg ? "y" : "n");
+	DRM_DEBUG_DRIVER("has EU power gating: %s\n",
+			 info->has_eu_pg ? "y" : "n");
+}
+
+static void intel_init_dpio(struct drm_i915_private *dev_priv)
+{
+	if (!IS_VALLEYVIEW(dev_priv))
+		return;
+
+	/*
+	 * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C),
+	 * CHV x1 PHY (DP/HDMI D)
+	 * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C)
+	 */
+	if (IS_CHERRYVIEW(dev_priv)) {
+		DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2;
+		DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO;
+	} else {
+		DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
+	}
+}
+
+/**
+ * i915_driver_load - setup chip and create an initial config
+ * @dev: DRM device
+ * @flags: startup flags
+ *
+ * The driver load routine has to do several things:
+ *   - drive output discovery via intel_modeset_init()
+ *   - initialize the memory manager
+ *   - allocate initial config memory
+ *   - setup the DRM framebuffer with the allocated memory
+ */
+int i915_driver_load(struct drm_device *dev, unsigned long flags)
+{
+	struct drm_i915_private *dev_priv;
+	struct intel_device_info *info, *device_info;
+	int ret = 0, mmio_bar, mmio_size;
+	uint32_t aperture_size;
+
+	info = (struct intel_device_info *) flags;
+
+	dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
+	if (dev_priv == NULL)
+		return -ENOMEM;
+
+	dev->dev_private = dev_priv;
+	dev_priv->dev = dev;
+
+	/* Setup the write-once "constant" device info */
+	device_info = (struct intel_device_info *)&dev_priv->info;
+	memcpy(device_info, info, sizeof(dev_priv->info));
+	device_info->device_id = dev->pdev->device;
+
+	spin_lock_init(&dev_priv->irq_lock);
+	spin_lock_init(&dev_priv->gpu_error.lock);
+	mutex_init(&dev_priv->backlight_lock);
+	spin_lock_init(&dev_priv->uncore.lock);
+	spin_lock_init(&dev_priv->mm.object_stat_lock);
+	spin_lock_init(&dev_priv->mmio_flip_lock);
+	mutex_init(&dev_priv->sb_lock);
+	mutex_init(&dev_priv->modeset_restore_lock);
+	mutex_init(&dev_priv->csr_lock);
+	mutex_init(&dev_priv->av_mutex);
+
+	intel_pm_setup(dev);
+
+	intel_display_crc_init(dev);
+
+	i915_dump_device_info(dev_priv);
+
+	/* Not all pre-production machines fall into this category, only the
+	 * very first ones. Almost everything should work, except for maybe
+	 * suspend/resume. And we don't implement workarounds that affect only
+	 * pre-production machines. */
+	if (IS_HSW_EARLY_SDV(dev))
+		DRM_INFO("This is an early pre-production Haswell machine. "
+			 "It may not be fully functional.\n");
+
+	if (i915_get_bridge_dev(dev)) {
+		ret = -EIO;
+		goto free_priv;
+	}
+
+	mmio_bar = IS_GEN2(dev) ? 1 : 0;
+	/* Before gen4, the registers and the GTT are behind different BARs.
+	 * However, from gen4 onwards, the registers and the GTT are shared
+	 * in the same BAR, so we want to restrict this ioremap from
+	 * clobbering the GTT which we want ioremap_wc instead. Fortunately,
+	 * the register BAR remains the same size for all the earlier
+	 * generations up to Ironlake.
+	 */
+	if (info->gen < 5)
+		mmio_size = 512*1024;
+	else
+		mmio_size = 2*1024*1024;
+
+	dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size);
+	if (!dev_priv->regs) {
+		DRM_ERROR("failed to map registers\n");
+		ret = -EIO;
+		goto put_bridge;
+	}
+
+	/* This must be called before any calls to HAS_PCH_* */
+	intel_detect_pch(dev);
+
+	intel_uncore_init(dev);
+
+	/* Load CSR Firmware for SKL */
+	intel_csr_ucode_init(dev);
+
+	ret = i915_gem_gtt_init(dev);
+	if (ret)
+		goto out_freecsr;
+
+	/* WARNING: Apparently we must kick fbdev drivers before vgacon,
+	 * otherwise the vga fbdev driver falls over. */
+	ret = i915_kick_out_firmware_fb(dev_priv);
+	if (ret) {
+		DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
+		goto out_gtt;
+	}
+
+	ret = i915_kick_out_vgacon(dev_priv);
+	if (ret) {
+		DRM_ERROR("failed to remove conflicting VGA console\n");
+		goto out_gtt;
+	}
+
+	pci_set_master(dev->pdev);
+
+	/* overlay on gen2 is broken and can't address above 1G */
+	if (IS_GEN2(dev))
+		dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
+
+	/* 965GM sometimes incorrectly writes to hardware status page (HWS)
+	 * using 32bit addressing, overwriting memory if HWS is located
+	 * above 4GB.
+	 *
+	 * The documentation also mentions an issue with undefined
+	 * behaviour if any general state is accessed within a page above 4GB,
+	 * which also needs to be handled carefully.
+	 */
+	if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
+		dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
+
+	aperture_size = dev_priv->gtt.mappable_end;
+
+	dev_priv->gtt.mappable =
+		io_mapping_create_wc(dev_priv->gtt.mappable_base,
+				     aperture_size);
+	if (dev_priv->gtt.mappable == NULL) {
+		ret = -EIO;
+		goto out_gtt;
+	}
+
+	dev_priv->gtt.mtrr = arch_phys_wc_add(dev_priv->gtt.mappable_base,
+					      aperture_size);
+
+	/* The i915 workqueue is primarily used for batched retirement of
+	 * requests (and thus managing bo) once the task has been completed
+	 * by the GPU. i915_gem_retire_requests() is called directly when we
+	 * need high-priority retirement, such as waiting for an explicit
+	 * bo.
+	 *
+	 * It is also used for periodic low-priority events, such as
+	 * idle-timers and recording error state.
+	 *
+	 * All tasks on the workqueue are expected to acquire the dev mutex
+	 * so there is no point in running more than one instance of the
+	 * workqueue at any time.  Use an ordered one.
+	 */
+	dev_priv->wq = alloc_ordered_workqueue("i915", 0);
+	if (dev_priv->wq == NULL) {
+		DRM_ERROR("Failed to create our workqueue.\n");
+		ret = -ENOMEM;
+		goto out_mtrrfree;
+	}
+
+	dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
+	if (dev_priv->hotplug.dp_wq == NULL) {
+		DRM_ERROR("Failed to create our dp workqueue.\n");
+		ret = -ENOMEM;
+		goto out_freewq;
+	}
+
+	dev_priv->gpu_error.hangcheck_wq =
+		alloc_ordered_workqueue("i915-hangcheck", 0);
+	if (dev_priv->gpu_error.hangcheck_wq == NULL) {
+		DRM_ERROR("Failed to create our hangcheck workqueue.\n");
+		ret = -ENOMEM;
+		goto out_freedpwq;
+	}
+
+	intel_irq_init(dev_priv);
+	intel_uncore_sanitize(dev);
+
+	/* Try to make sure MCHBAR is enabled before poking at it */
+	intel_setup_mchbar(dev);
+	intel_opregion_setup(dev);
+
+	i915_gem_load(dev);
+
+	/* On the 945G/GM, the chipset reports the MSI capability on the
+	 * integrated graphics even though the support isn't actually there
+	 * according to the published specs.  It doesn't appear to function
+	 * correctly in testing on 945G.
+	 * This may be a side effect of MSI having been made available for PEG
+	 * and the registers being closely associated.
+	 *
+	 * According to chipset errata, on the 965GM, MSI interrupts may
+	 * be lost or delayed, but we use them anyways to avoid
+	 * stuck interrupts on some machines.
+	 */
+	if (!IS_I945G(dev) && !IS_I945GM(dev))
+		pci_enable_msi(dev->pdev);
+
+	intel_device_info_runtime_init(dev);
+
+	intel_init_dpio(dev_priv);
+
+	if (INTEL_INFO(dev)->num_pipes) {
+		ret = drm_vblank_init(dev, INTEL_INFO(dev)->num_pipes);
+		if (ret)
+			goto out_gem_unload;
+	}
+
+	intel_power_domains_init(dev_priv);
+
+	ret = i915_load_modeset_init(dev);
+	if (ret < 0) {
+		DRM_ERROR("failed to init modeset\n");
+		goto out_power_well;
+	}
+
+	/*
+	 * Notify a valid surface after modesetting,
+	 * when running inside a VM.
+	 */
+	if (intel_vgpu_active(dev))
+		I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY);
+
+	i915_setup_sysfs(dev);
+
+	if (INTEL_INFO(dev)->num_pipes) {
+		/* Must be done after probing outputs */
+		intel_opregion_init(dev);
+		acpi_video_register();
+	}
+
+	if (IS_GEN5(dev))
+		intel_gpu_ips_init(dev_priv);
+
+	intel_runtime_pm_enable(dev_priv);
+
+	i915_audio_component_init(dev_priv);
+
+	return 0;
+
+out_power_well:
+	intel_power_domains_fini(dev_priv);
+	drm_vblank_cleanup(dev);
+out_gem_unload:
+	WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
+	unregister_shrinker(&dev_priv->mm.shrinker);
+
+	if (dev->pdev->msi_enabled)
+		pci_disable_msi(dev->pdev);
+
+	intel_teardown_mchbar(dev);
+	pm_qos_remove_request(&dev_priv->pm_qos);
+	destroy_workqueue(dev_priv->gpu_error.hangcheck_wq);
+out_freedpwq:
+	destroy_workqueue(dev_priv->hotplug.dp_wq);
+out_freewq:
+	destroy_workqueue(dev_priv->wq);
+out_mtrrfree:
+	arch_phys_wc_del(dev_priv->gtt.mtrr);
+	io_mapping_free(dev_priv->gtt.mappable);
+out_gtt:
+	i915_global_gtt_cleanup(dev);
+out_freecsr:
+	intel_csr_ucode_fini(dev);
+	intel_uncore_fini(dev);
+	pci_iounmap(dev->pdev, dev_priv->regs);
+put_bridge:
+	pci_dev_put(dev_priv->bridge_dev);
+free_priv:
+	kmem_cache_destroy(dev_priv->requests);
+	kmem_cache_destroy(dev_priv->vmas);
+	kmem_cache_destroy(dev_priv->objects);
+	kfree(dev_priv);
+	return ret;
+}
+
+int i915_driver_unload(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret;
+
+	i915_audio_component_cleanup(dev_priv);
+
+	ret = i915_gem_suspend(dev);
+	if (ret) {
+		DRM_ERROR("failed to idle hardware: %d\n", ret);
+		return ret;
+	}
+
+	intel_power_domains_fini(dev_priv);
+
+	intel_gpu_ips_teardown();
+
+	i915_teardown_sysfs(dev);
+
+	WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
+	unregister_shrinker(&dev_priv->mm.shrinker);
+
+	io_mapping_free(dev_priv->gtt.mappable);
+	arch_phys_wc_del(dev_priv->gtt.mtrr);
+
+	acpi_video_unregister();
+
+	intel_fbdev_fini(dev);
+
+	drm_vblank_cleanup(dev);
+
+	intel_modeset_cleanup(dev);
+
+	/*
+	 * free the memory space allocated for the child device
+	 * config parsed from VBT
+	 */
+	if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) {
+		kfree(dev_priv->vbt.child_dev);
+		dev_priv->vbt.child_dev = NULL;
+		dev_priv->vbt.child_dev_num = 0;
+	}
+	kfree(dev_priv->vbt.sdvo_lvds_vbt_mode);
+	dev_priv->vbt.sdvo_lvds_vbt_mode = NULL;
+	kfree(dev_priv->vbt.lfp_lvds_vbt_mode);
+	dev_priv->vbt.lfp_lvds_vbt_mode = NULL;
+
+	vga_switcheroo_unregister_client(dev->pdev);
+	vga_client_register(dev->pdev, NULL, NULL, NULL);
+
+	/* Free error state after interrupts are fully disabled. */
+	cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
+	i915_destroy_error_state(dev);
+
+	if (dev->pdev->msi_enabled)
+		pci_disable_msi(dev->pdev);
+
+	intel_opregion_fini(dev);
+
+	/* Flush any outstanding unpin_work. */
+	flush_workqueue(dev_priv->wq);
+
+	intel_guc_ucode_fini(dev);
+	mutex_lock(&dev->struct_mutex);
+	i915_gem_cleanup_ringbuffer(dev);
+	i915_gem_context_fini(dev);
+	mutex_unlock(&dev->struct_mutex);
+	intel_fbc_cleanup_cfb(dev_priv);
+	i915_gem_cleanup_stolen(dev);
+
+	intel_csr_ucode_fini(dev);
+
+	intel_teardown_mchbar(dev);
+
+	destroy_workqueue(dev_priv->hotplug.dp_wq);
+	destroy_workqueue(dev_priv->wq);
+	destroy_workqueue(dev_priv->gpu_error.hangcheck_wq);
+	pm_qos_remove_request(&dev_priv->pm_qos);
+
+	i915_global_gtt_cleanup(dev);
+
+	intel_uncore_fini(dev);
+	if (dev_priv->regs != NULL)
+		pci_iounmap(dev->pdev, dev_priv->regs);
+
+	kmem_cache_destroy(dev_priv->requests);
+	kmem_cache_destroy(dev_priv->vmas);
+	kmem_cache_destroy(dev_priv->objects);
+	pci_dev_put(dev_priv->bridge_dev);
+	kfree(dev_priv);
+
+	return 0;
+}
+
+int i915_driver_open(struct drm_device *dev, struct drm_file *file)
+{
+	int ret;
+
+	ret = i915_gem_open(dev, file);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+/**
+ * i915_driver_lastclose - clean up after all DRM clients have exited
+ * @dev: DRM device
+ *
+ * Take care of cleaning up after all DRM clients have exited.  In the
+ * mode setting case, we want to restore the kernel's initial mode (just
+ * in case the last client left us in a bad state).
+ *
+ * Additionally, in the non-mode setting case, we'll tear down the GTT
+ * and DMA structures, since the kernel won't be using them, and clea
+ * up any GEM state.
+ */
+void i915_driver_lastclose(struct drm_device *dev)
+{
+	intel_fbdev_restore_mode(dev);
+	vga_switcheroo_process_delayed_switch();
+}
+
+void i915_driver_preclose(struct drm_device *dev, struct drm_file *file)
+{
+	mutex_lock(&dev->struct_mutex);
+	i915_gem_context_close(dev, file);
+	i915_gem_release(dev, file);
+	mutex_unlock(&dev->struct_mutex);
+
+	intel_modeset_preclose(dev, file);
+}
+
+void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
+{
+	struct drm_i915_file_private *file_priv = file->driver_priv;
+
+	if (file_priv && file_priv->bsd_ring)
+		file_priv->bsd_ring = NULL;
+	kfree(file_priv);
+}
+
+static int
+i915_gem_reject_pin_ioctl(struct drm_device *dev, void *data,
+			  struct drm_file *file)
+{
+	return -ENODEV;
+}
+
+const struct drm_ioctl_desc i915_ioctls[] = {
+	DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(I915_FLIP, drm_noop, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(I915_SETPARAM, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE,  drm_noop, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0),
+	DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW),
+	DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW),
+	DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW),
+	DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW),
+	DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_get_reset_stats_ioctl, DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW),
+};
+
+int i915_max_ioctl = ARRAY_SIZE(i915_ioctls);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
new file mode 100644
index 0000000..a6ad938
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -0,0 +1,1786 @@
+/* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
+ */
+/*
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/acpi.h>
+#include <drm/drmP.h>
+#include <drm/i915_drm.h>
+#include "i915_drv.h"
+#include "i915_trace.h"
+#include "intel_drv.h"
+
+#include <linux/console.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <drm/drm_crtc_helper.h>
+
+static struct drm_driver driver;
+
+#define GEN_DEFAULT_PIPEOFFSETS \
+	.pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
+			  PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \
+	.trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
+			   TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \
+	.palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET }
+
+#define GEN_CHV_PIPEOFFSETS \
+	.pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
+			  CHV_PIPE_C_OFFSET }, \
+	.trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
+			   CHV_TRANSCODER_C_OFFSET, }, \
+	.palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET, \
+			     CHV_PALETTE_C_OFFSET }
+
+#define CURSOR_OFFSETS \
+	.cursor_offsets = { CURSOR_A_OFFSET, CURSOR_B_OFFSET, CHV_CURSOR_C_OFFSET }
+
+#define IVB_CURSOR_OFFSETS \
+	.cursor_offsets = { CURSOR_A_OFFSET, IVB_CURSOR_B_OFFSET, IVB_CURSOR_C_OFFSET }
+
+static const struct intel_device_info intel_i830_info = {
+	.gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
+	.has_overlay = 1, .overlay_needs_physical = 1,
+	.ring_mask = RENDER_RING,
+	GEN_DEFAULT_PIPEOFFSETS,
+	CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_845g_info = {
+	.gen = 2, .num_pipes = 1,
+	.has_overlay = 1, .overlay_needs_physical = 1,
+	.ring_mask = RENDER_RING,
+	GEN_DEFAULT_PIPEOFFSETS,
+	CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_i85x_info = {
+	.gen = 2, .is_i85x = 1, .is_mobile = 1, .num_pipes = 2,
+	.cursor_needs_physical = 1,
+	.has_overlay = 1, .overlay_needs_physical = 1,
+	.has_fbc = 1,
+	.ring_mask = RENDER_RING,
+	GEN_DEFAULT_PIPEOFFSETS,
+	CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_i865g_info = {
+	.gen = 2, .num_pipes = 1,
+	.has_overlay = 1, .overlay_needs_physical = 1,
+	.ring_mask = RENDER_RING,
+	GEN_DEFAULT_PIPEOFFSETS,
+	CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_i915g_info = {
+	.gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2,
+	.has_overlay = 1, .overlay_needs_physical = 1,
+	.ring_mask = RENDER_RING,
+	GEN_DEFAULT_PIPEOFFSETS,
+	CURSOR_OFFSETS,
+};
+static const struct intel_device_info intel_i915gm_info = {
+	.gen = 3, .is_mobile = 1, .num_pipes = 2,
+	.cursor_needs_physical = 1,
+	.has_overlay = 1, .overlay_needs_physical = 1,
+	.supports_tv = 1,
+	.has_fbc = 1,
+	.ring_mask = RENDER_RING,
+	GEN_DEFAULT_PIPEOFFSETS,
+	CURSOR_OFFSETS,
+};
+static const struct intel_device_info intel_i945g_info = {
+	.gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2,
+	.has_overlay = 1, .overlay_needs_physical = 1,
+	.ring_mask = RENDER_RING,
+	GEN_DEFAULT_PIPEOFFSETS,
+	CURSOR_OFFSETS,
+};
+static const struct intel_device_info intel_i945gm_info = {
+	.gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2,
+	.has_hotplug = 1, .cursor_needs_physical = 1,
+	.has_overlay = 1, .overlay_needs_physical = 1,
+	.supports_tv = 1,
+	.has_fbc = 1,
+	.ring_mask = RENDER_RING,
+	GEN_DEFAULT_PIPEOFFSETS,
+	CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_i965g_info = {
+	.gen = 4, .is_broadwater = 1, .num_pipes = 2,
+	.has_hotplug = 1,
+	.has_overlay = 1,
+	.ring_mask = RENDER_RING,
+	GEN_DEFAULT_PIPEOFFSETS,
+	CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_i965gm_info = {
+	.gen = 4, .is_crestline = 1, .num_pipes = 2,
+	.is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
+	.has_overlay = 1,
+	.supports_tv = 1,
+	.ring_mask = RENDER_RING,
+	GEN_DEFAULT_PIPEOFFSETS,
+	CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_g33_info = {
+	.gen = 3, .is_g33 = 1, .num_pipes = 2,
+	.need_gfx_hws = 1, .has_hotplug = 1,
+	.has_overlay = 1,
+	.ring_mask = RENDER_RING,
+	GEN_DEFAULT_PIPEOFFSETS,
+	CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_g45_info = {
+	.gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2,
+	.has_pipe_cxsr = 1, .has_hotplug = 1,
+	.ring_mask = RENDER_RING | BSD_RING,
+	GEN_DEFAULT_PIPEOFFSETS,
+	CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_gm45_info = {
+	.gen = 4, .is_g4x = 1, .num_pipes = 2,
+	.is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
+	.has_pipe_cxsr = 1, .has_hotplug = 1,
+	.supports_tv = 1,
+	.ring_mask = RENDER_RING | BSD_RING,
+	GEN_DEFAULT_PIPEOFFSETS,
+	CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_pineview_info = {
+	.gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2,
+	.need_gfx_hws = 1, .has_hotplug = 1,
+	.has_overlay = 1,
+	GEN_DEFAULT_PIPEOFFSETS,
+	CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_ironlake_d_info = {
+	.gen = 5, .num_pipes = 2,
+	.need_gfx_hws = 1, .has_hotplug = 1,
+	.ring_mask = RENDER_RING | BSD_RING,
+	GEN_DEFAULT_PIPEOFFSETS,
+	CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_ironlake_m_info = {
+	.gen = 5, .is_mobile = 1, .num_pipes = 2,
+	.need_gfx_hws = 1, .has_hotplug = 1,
+	.has_fbc = 1,
+	.ring_mask = RENDER_RING | BSD_RING,
+	GEN_DEFAULT_PIPEOFFSETS,
+	CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_sandybridge_d_info = {
+	.gen = 6, .num_pipes = 2,
+	.need_gfx_hws = 1, .has_hotplug = 1,
+	.has_fbc = 1,
+	.ring_mask = RENDER_RING | BSD_RING | BLT_RING,
+	.has_llc = 1,
+	GEN_DEFAULT_PIPEOFFSETS,
+	CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_sandybridge_m_info = {
+	.gen = 6, .is_mobile = 1, .num_pipes = 2,
+	.need_gfx_hws = 1, .has_hotplug = 1,
+	.has_fbc = 1,
+	.ring_mask = RENDER_RING | BSD_RING | BLT_RING,
+	.has_llc = 1,
+	GEN_DEFAULT_PIPEOFFSETS,
+	CURSOR_OFFSETS,
+};
+
+#define GEN7_FEATURES  \
+	.gen = 7, .num_pipes = 3, \
+	.need_gfx_hws = 1, .has_hotplug = 1, \
+	.has_fbc = 1, \
+	.ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
+	.has_llc = 1
+
+static const struct intel_device_info intel_ivybridge_d_info = {
+	GEN7_FEATURES,
+	.is_ivybridge = 1,
+	GEN_DEFAULT_PIPEOFFSETS,
+	IVB_CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_ivybridge_m_info = {
+	GEN7_FEATURES,
+	.is_ivybridge = 1,
+	.is_mobile = 1,
+	GEN_DEFAULT_PIPEOFFSETS,
+	IVB_CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_ivybridge_q_info = {
+	GEN7_FEATURES,
+	.is_ivybridge = 1,
+	.num_pipes = 0, /* legal, last one wins */
+	GEN_DEFAULT_PIPEOFFSETS,
+	IVB_CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_valleyview_m_info = {
+	GEN7_FEATURES,
+	.is_mobile = 1,
+	.num_pipes = 2,
+	.is_valleyview = 1,
+	.display_mmio_offset = VLV_DISPLAY_BASE,
+	.has_fbc = 0, /* legal, last one wins */
+	.has_llc = 0, /* legal, last one wins */
+	GEN_DEFAULT_PIPEOFFSETS,
+	CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_valleyview_d_info = {
+	GEN7_FEATURES,
+	.num_pipes = 2,
+	.is_valleyview = 1,
+	.display_mmio_offset = VLV_DISPLAY_BASE,
+	.has_fbc = 0, /* legal, last one wins */
+	.has_llc = 0, /* legal, last one wins */
+	GEN_DEFAULT_PIPEOFFSETS,
+	CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_haswell_d_info = {
+	GEN7_FEATURES,
+	.is_haswell = 1,
+	.has_ddi = 1,
+	.has_fpga_dbg = 1,
+	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
+	GEN_DEFAULT_PIPEOFFSETS,
+	IVB_CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_haswell_m_info = {
+	GEN7_FEATURES,
+	.is_haswell = 1,
+	.is_mobile = 1,
+	.has_ddi = 1,
+	.has_fpga_dbg = 1,
+	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
+	GEN_DEFAULT_PIPEOFFSETS,
+	IVB_CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_broadwell_d_info = {
+	.gen = 8, .num_pipes = 3,
+	.need_gfx_hws = 1, .has_hotplug = 1,
+	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
+	.has_llc = 1,
+	.has_ddi = 1,
+	.has_fpga_dbg = 1,
+	.has_fbc = 1,
+	GEN_DEFAULT_PIPEOFFSETS,
+	IVB_CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_broadwell_m_info = {
+	.gen = 8, .is_mobile = 1, .num_pipes = 3,
+	.need_gfx_hws = 1, .has_hotplug = 1,
+	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
+	.has_llc = 1,
+	.has_ddi = 1,
+	.has_fpga_dbg = 1,
+	.has_fbc = 1,
+	GEN_DEFAULT_PIPEOFFSETS,
+	IVB_CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_broadwell_gt3d_info = {
+	.gen = 8, .num_pipes = 3,
+	.need_gfx_hws = 1, .has_hotplug = 1,
+	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
+	.has_llc = 1,
+	.has_ddi = 1,
+	.has_fpga_dbg = 1,
+	.has_fbc = 1,
+	GEN_DEFAULT_PIPEOFFSETS,
+	IVB_CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_broadwell_gt3m_info = {
+	.gen = 8, .is_mobile = 1, .num_pipes = 3,
+	.need_gfx_hws = 1, .has_hotplug = 1,
+	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
+	.has_llc = 1,
+	.has_ddi = 1,
+	.has_fpga_dbg = 1,
+	.has_fbc = 1,
+	GEN_DEFAULT_PIPEOFFSETS,
+	IVB_CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_cherryview_info = {
+	.gen = 8, .num_pipes = 3,
+	.need_gfx_hws = 1, .has_hotplug = 1,
+	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
+	.is_valleyview = 1,
+	.display_mmio_offset = VLV_DISPLAY_BASE,
+	GEN_CHV_PIPEOFFSETS,
+	CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_skylake_info = {
+	.is_skylake = 1,
+	.gen = 9, .num_pipes = 3,
+	.need_gfx_hws = 1, .has_hotplug = 1,
+	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
+	.has_llc = 1,
+	.has_ddi = 1,
+	.has_fpga_dbg = 1,
+	.has_fbc = 1,
+	GEN_DEFAULT_PIPEOFFSETS,
+	IVB_CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_skylake_gt3_info = {
+	.is_skylake = 1,
+	.gen = 9, .num_pipes = 3,
+	.need_gfx_hws = 1, .has_hotplug = 1,
+	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
+	.has_llc = 1,
+	.has_ddi = 1,
+	.has_fpga_dbg = 1,
+	.has_fbc = 1,
+	GEN_DEFAULT_PIPEOFFSETS,
+	IVB_CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_broxton_info = {
+	.is_preliminary = 1,
+	.gen = 9,
+	.need_gfx_hws = 1, .has_hotplug = 1,
+	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
+	.num_pipes = 3,
+	.has_ddi = 1,
+	.has_fpga_dbg = 1,
+	.has_fbc = 1,
+	GEN_DEFAULT_PIPEOFFSETS,
+	IVB_CURSOR_OFFSETS,
+};
+
+/*
+ * Make sure any device matches here are from most specific to most
+ * general.  For example, since the Quanta match is based on the subsystem
+ * and subvendor IDs, we need it to come before the more general IVB
+ * PCI ID matches, otherwise we'll use the wrong info struct above.
+ */
+#define INTEL_PCI_IDS \
+	INTEL_I830_IDS(&intel_i830_info),	\
+	INTEL_I845G_IDS(&intel_845g_info),	\
+	INTEL_I85X_IDS(&intel_i85x_info),	\
+	INTEL_I865G_IDS(&intel_i865g_info),	\
+	INTEL_I915G_IDS(&intel_i915g_info),	\
+	INTEL_I915GM_IDS(&intel_i915gm_info),	\
+	INTEL_I945G_IDS(&intel_i945g_info),	\
+	INTEL_I945GM_IDS(&intel_i945gm_info),	\
+	INTEL_I965G_IDS(&intel_i965g_info),	\
+	INTEL_G33_IDS(&intel_g33_info),		\
+	INTEL_I965GM_IDS(&intel_i965gm_info),	\
+	INTEL_GM45_IDS(&intel_gm45_info), 	\
+	INTEL_G45_IDS(&intel_g45_info), 	\
+	INTEL_PINEVIEW_IDS(&intel_pineview_info),	\
+	INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info),	\
+	INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info),	\
+	INTEL_SNB_D_IDS(&intel_sandybridge_d_info),	\
+	INTEL_SNB_M_IDS(&intel_sandybridge_m_info),	\
+	INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */ \
+	INTEL_IVB_M_IDS(&intel_ivybridge_m_info),	\
+	INTEL_IVB_D_IDS(&intel_ivybridge_d_info),	\
+	INTEL_HSW_D_IDS(&intel_haswell_d_info), \
+	INTEL_HSW_M_IDS(&intel_haswell_m_info), \
+	INTEL_VLV_M_IDS(&intel_valleyview_m_info),	\
+	INTEL_VLV_D_IDS(&intel_valleyview_d_info),	\
+	INTEL_BDW_GT12M_IDS(&intel_broadwell_m_info),	\
+	INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info),	\
+	INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info),	\
+	INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info), \
+	INTEL_CHV_IDS(&intel_cherryview_info),	\
+	INTEL_SKL_GT1_IDS(&intel_skylake_info),	\
+	INTEL_SKL_GT2_IDS(&intel_skylake_info),	\
+	INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info),	\
+	INTEL_BXT_IDS(&intel_broxton_info)
+
+static const struct pci_device_id pciidlist[] = {		/* aka */
+	INTEL_PCI_IDS,
+	{0, 0, 0}
+};
+
+MODULE_DEVICE_TABLE(pci, pciidlist);
+
+static enum intel_pch intel_virt_detect_pch(struct drm_device *dev)
+{
+	enum intel_pch ret = PCH_NOP;
+
+	/*
+	 * In a virtualized passthrough environment we can be in a
+	 * setup where the ISA bridge is not able to be passed through.
+	 * In this case, a south bridge can be emulated and we have to
+	 * make an educated guess as to which PCH is really there.
+	 */
+
+	if (IS_GEN5(dev)) {
+		ret = PCH_IBX;
+		DRM_DEBUG_KMS("Assuming Ibex Peak PCH\n");
+	} else if (IS_GEN6(dev) || IS_IVYBRIDGE(dev)) {
+		ret = PCH_CPT;
+		DRM_DEBUG_KMS("Assuming CouarPoint PCH\n");
+	} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+		ret = PCH_LPT;
+		DRM_DEBUG_KMS("Assuming LynxPoint PCH\n");
+	} else if (IS_SKYLAKE(dev)) {
+		ret = PCH_SPT;
+		DRM_DEBUG_KMS("Assuming SunrisePoint PCH\n");
+	}
+
+	return ret;
+}
+
+void intel_detect_pch(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct pci_dev *pch = NULL;
+
+	/* In all current cases, num_pipes is equivalent to the PCH_NOP setting
+	 * (which really amounts to a PCH but no South Display).
+	 */
+	if (INTEL_INFO(dev)->num_pipes == 0) {
+		dev_priv->pch_type = PCH_NOP;
+		return;
+	}
+
+	/*
+	 * The reason to probe ISA bridge instead of Dev31:Fun0 is to
+	 * make graphics device passthrough work easy for VMM, that only
+	 * need to expose ISA bridge to let driver know the real hardware
+	 * underneath. This is a requirement from virtualization team.
+	 *
+	 * In some virtualized environments (e.g. XEN), there is irrelevant
+	 * ISA bridge in the system. To work reliably, we should scan trhough
+	 * all the ISA bridge devices and check for the first match, instead
+	 * of only checking the first one.
+	 */
+	while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
+		if (pch->vendor == PCI_VENDOR_ID_INTEL) {
+			unsigned short id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
+			dev_priv->pch_id = id;
+
+			if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
+				dev_priv->pch_type = PCH_IBX;
+				DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
+				WARN_ON(!IS_GEN5(dev));
+			} else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
+				dev_priv->pch_type = PCH_CPT;
+				DRM_DEBUG_KMS("Found CougarPoint PCH\n");
+				WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
+			} else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
+				/* PantherPoint is CPT compatible */
+				dev_priv->pch_type = PCH_CPT;
+				DRM_DEBUG_KMS("Found PantherPoint PCH\n");
+				WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
+			} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
+				dev_priv->pch_type = PCH_LPT;
+				DRM_DEBUG_KMS("Found LynxPoint PCH\n");
+				WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev));
+				WARN_ON(IS_HSW_ULT(dev) || IS_BDW_ULT(dev));
+			} else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
+				dev_priv->pch_type = PCH_LPT;
+				DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
+				WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev));
+				WARN_ON(!IS_HSW_ULT(dev) && !IS_BDW_ULT(dev));
+			} else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) {
+				dev_priv->pch_type = PCH_SPT;
+				DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
+				WARN_ON(!IS_SKYLAKE(dev));
+			} else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) {
+				dev_priv->pch_type = PCH_SPT;
+				DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
+				WARN_ON(!IS_SKYLAKE(dev));
+			} else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) ||
+				   ((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) &&
+				    pch->subsystem_vendor == 0x1af4 &&
+				    pch->subsystem_device == 0x1100)) {
+				dev_priv->pch_type = intel_virt_detect_pch(dev);
+			} else
+				continue;
+
+			break;
+		}
+	}
+	if (!pch)
+		DRM_DEBUG_KMS("No PCH found.\n");
+
+	pci_dev_put(pch);
+}
+
+bool i915_semaphore_is_enabled(struct drm_device *dev)
+{
+	if (INTEL_INFO(dev)->gen < 6)
+		return false;
+
+	if (i915.semaphores >= 0)
+		return i915.semaphores;
+
+	/* TODO: make semaphores and Execlists play nicely together */
+	if (i915.enable_execlists)
+		return false;
+
+	/* Until we get further testing... */
+	if (IS_GEN8(dev))
+		return false;
+
+#ifdef CONFIG_INTEL_IOMMU
+	/* Enable semaphores on SNB when IO remapping is off */
+	if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
+		return false;
+#endif
+
+	return true;
+}
+
+void i915_firmware_load_error_print(const char *fw_path, int err)
+{
+	DRM_ERROR("failed to load firmware %s (%d)\n", fw_path, err);
+
+	/*
+	 * If the reason is not known assume -ENOENT since that's the most
+	 * usual failure mode.
+	 */
+	if (!err)
+		err = -ENOENT;
+
+	if (!(IS_BUILTIN(CONFIG_DRM_I915) && err == -ENOENT))
+		return;
+
+	DRM_ERROR(
+	  "The driver is built-in, so to load the firmware you need to\n"
+	  "include it either in the kernel (see CONFIG_EXTRA_FIRMWARE) or\n"
+	  "in your initrd/initramfs image.\n");
+}
+
+static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
+{
+	struct drm_device *dev = dev_priv->dev;
+	struct drm_encoder *encoder;
+
+	drm_modeset_lock_all(dev);
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
+
+		if (intel_encoder->suspend)
+			intel_encoder->suspend(intel_encoder);
+	}
+	drm_modeset_unlock_all(dev);
+}
+
+static int intel_suspend_complete(struct drm_i915_private *dev_priv);
+static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
+			      bool rpm_resume);
+static int skl_resume_prepare(struct drm_i915_private *dev_priv);
+static int bxt_resume_prepare(struct drm_i915_private *dev_priv);
+
+
+static int i915_drm_suspend(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	pci_power_t opregion_target_state;
+	int error;
+
+	/* ignore lid events during suspend */
+	mutex_lock(&dev_priv->modeset_restore_lock);
+	dev_priv->modeset_restore = MODESET_SUSPENDED;
+	mutex_unlock(&dev_priv->modeset_restore_lock);
+
+	/* We do a lot of poking in a lot of registers, make sure they work
+	 * properly. */
+	intel_display_set_init_power(dev_priv, true);
+
+	drm_kms_helper_poll_disable(dev);
+
+	pci_save_state(dev->pdev);
+
+	error = i915_gem_suspend(dev);
+	if (error) {
+		dev_err(&dev->pdev->dev,
+			"GEM idle failed, resume might fail\n");
+		return error;
+	}
+
+	intel_guc_suspend(dev);
+
+	intel_suspend_gt_powersave(dev);
+
+	/*
+	 * Disable CRTCs directly since we want to preserve sw state
+	 * for _thaw. Also, power gate the CRTC power wells.
+	 */
+	drm_modeset_lock_all(dev);
+	intel_display_suspend(dev);
+	drm_modeset_unlock_all(dev);
+
+	intel_dp_mst_suspend(dev);
+
+	intel_runtime_pm_disable_interrupts(dev_priv);
+	intel_hpd_cancel_work(dev_priv);
+
+	intel_suspend_encoders(dev_priv);
+
+	intel_suspend_hw(dev);
+
+	i915_gem_suspend_gtt_mappings(dev);
+
+	i915_save_state(dev);
+
+	opregion_target_state = PCI_D3cold;
+#if IS_ENABLED(CONFIG_ACPI_SLEEP)
+	if (acpi_target_system_state() < ACPI_STATE_S3)
+		opregion_target_state = PCI_D1;
+#endif
+	intel_opregion_notify_adapter(dev, opregion_target_state);
+
+	intel_uncore_forcewake_reset(dev, false);
+	intel_opregion_fini(dev);
+
+	intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
+
+	dev_priv->suspend_count++;
+
+	intel_display_set_init_power(dev_priv, false);
+
+	return 0;
+}
+
+static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation)
+{
+	struct drm_i915_private *dev_priv = drm_dev->dev_private;
+	int ret;
+
+	ret = intel_suspend_complete(dev_priv);
+
+	if (ret) {
+		DRM_ERROR("Suspend complete failed: %d\n", ret);
+
+		return ret;
+	}
+
+	pci_disable_device(drm_dev->pdev);
+	/*
+	 * During hibernation on some platforms the BIOS may try to access
+	 * the device even though it's already in D3 and hang the machine. So
+	 * leave the device in D0 on those platforms and hope the BIOS will
+	 * power down the device properly. The issue was seen on multiple old
+	 * GENs with different BIOS vendors, so having an explicit blacklist
+	 * is inpractical; apply the workaround on everything pre GEN6. The
+	 * platforms where the issue was seen:
+	 * Lenovo Thinkpad X301, X61s, X60, T60, X41
+	 * Fujitsu FSC S7110
+	 * Acer Aspire 1830T
+	 */
+	if (!(hibernation && INTEL_INFO(dev_priv)->gen < 6))
+		pci_set_power_state(drm_dev->pdev, PCI_D3hot);
+
+	return 0;
+}
+
+int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state)
+{
+	int error;
+
+	if (!dev || !dev->dev_private) {
+		DRM_ERROR("dev: %p\n", dev);
+		DRM_ERROR("DRM not initialized, aborting suspend.\n");
+		return -ENODEV;
+	}
+
+	if (WARN_ON_ONCE(state.event != PM_EVENT_SUSPEND &&
+			 state.event != PM_EVENT_FREEZE))
+		return -EINVAL;
+
+	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+		return 0;
+
+	error = i915_drm_suspend(dev);
+	if (error)
+		return error;
+
+	return i915_drm_suspend_late(dev, false);
+}
+
+static int i915_drm_resume(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	mutex_lock(&dev->struct_mutex);
+	i915_gem_restore_gtt_mappings(dev);
+	mutex_unlock(&dev->struct_mutex);
+
+	i915_restore_state(dev);
+	intel_opregion_setup(dev);
+
+	intel_init_pch_refclk(dev);
+	drm_mode_config_reset(dev);
+
+	/*
+	 * Interrupts have to be enabled before any batches are run. If not the
+	 * GPU will hang. i915_gem_init_hw() will initiate batches to
+	 * update/restore the context.
+	 *
+	 * Modeset enabling in intel_modeset_init_hw() also needs working
+	 * interrupts.
+	 */
+	intel_runtime_pm_enable_interrupts(dev_priv);
+
+	mutex_lock(&dev->struct_mutex);
+	if (i915_gem_init_hw(dev)) {
+		DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
+			atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
+	}
+	mutex_unlock(&dev->struct_mutex);
+
+	intel_guc_resume(dev);
+
+	intel_modeset_init_hw(dev);
+
+	spin_lock_irq(&dev_priv->irq_lock);
+	if (dev_priv->display.hpd_irq_setup)
+		dev_priv->display.hpd_irq_setup(dev);
+	spin_unlock_irq(&dev_priv->irq_lock);
+
+	drm_modeset_lock_all(dev);
+	intel_display_resume(dev);
+	drm_modeset_unlock_all(dev);
+
+	intel_dp_mst_resume(dev);
+
+	/*
+	 * ... but also need to make sure that hotplug processing
+	 * doesn't cause havoc. Like in the driver load code we don't
+	 * bother with the tiny race here where we might loose hotplug
+	 * notifications.
+	 * */
+	intel_hpd_init(dev_priv);
+	/* Config may have changed between suspend and resume */
+	drm_helper_hpd_irq_event(dev);
+
+	intel_opregion_init(dev);
+
+	intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
+
+	mutex_lock(&dev_priv->modeset_restore_lock);
+	dev_priv->modeset_restore = MODESET_DONE;
+	mutex_unlock(&dev_priv->modeset_restore_lock);
+
+	intel_opregion_notify_adapter(dev, PCI_D0);
+
+	drm_kms_helper_poll_enable(dev);
+
+	return 0;
+}
+
+static int i915_drm_resume_early(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret = 0;
+
+	/*
+	 * We have a resume ordering issue with the snd-hda driver also
+	 * requiring our device to be power up. Due to the lack of a
+	 * parent/child relationship we currently solve this with an early
+	 * resume hook.
+	 *
+	 * FIXME: This should be solved with a special hdmi sink device or
+	 * similar so that power domains can be employed.
+	 */
+	if (pci_enable_device(dev->pdev))
+		return -EIO;
+
+	pci_set_master(dev->pdev);
+
+	if (IS_VALLEYVIEW(dev_priv))
+		ret = vlv_resume_prepare(dev_priv, false);
+	if (ret)
+		DRM_ERROR("Resume prepare failed: %d, continuing anyway\n",
+			  ret);
+
+	intel_uncore_early_sanitize(dev, true);
+
+	if (IS_BROXTON(dev))
+		ret = bxt_resume_prepare(dev_priv);
+	else if (IS_SKYLAKE(dev_priv))
+		ret = skl_resume_prepare(dev_priv);
+	else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+		hsw_disable_pc8(dev_priv);
+
+	intel_uncore_sanitize(dev);
+	intel_power_domains_init_hw(dev_priv);
+
+	return ret;
+}
+
+int i915_resume_switcheroo(struct drm_device *dev)
+{
+	int ret;
+
+	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+		return 0;
+
+	ret = i915_drm_resume_early(dev);
+	if (ret)
+		return ret;
+
+	return i915_drm_resume(dev);
+}
+
+/**
+ * i915_reset - reset chip after a hang
+ * @dev: drm device to reset
+ *
+ * Reset the chip.  Useful if a hang is detected. Returns zero on successful
+ * reset or otherwise an error code.
+ *
+ * Procedure is fairly simple:
+ *   - reset the chip using the reset reg
+ *   - re-init context state
+ *   - re-init hardware status page
+ *   - re-init ring buffer
+ *   - re-init interrupt state
+ *   - re-init display
+ */
+int i915_reset(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	bool simulated;
+	int ret;
+
+	intel_reset_gt_powersave(dev);
+
+	mutex_lock(&dev->struct_mutex);
+
+	i915_gem_reset(dev);
+
+	simulated = dev_priv->gpu_error.stop_rings != 0;
+
+	ret = intel_gpu_reset(dev);
+
+	/* Also reset the gpu hangman. */
+	if (simulated) {
+		DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
+		dev_priv->gpu_error.stop_rings = 0;
+		if (ret == -ENODEV) {
+			DRM_INFO("Reset not implemented, but ignoring "
+				 "error for simulated gpu hangs\n");
+			ret = 0;
+		}
+	}
+
+	if (i915_stop_ring_allow_warn(dev_priv))
+		pr_notice("drm/i915: Resetting chip after gpu hang\n");
+
+	if (ret) {
+		DRM_ERROR("Failed to reset chip: %i\n", ret);
+		mutex_unlock(&dev->struct_mutex);
+		return ret;
+	}
+
+	intel_overlay_reset(dev_priv);
+
+	/* Ok, now get things going again... */
+
+	/*
+	 * Everything depends on having the GTT running, so we need to start
+	 * there.  Fortunately we don't need to do this unless we reset the
+	 * chip at a PCI level.
+	 *
+	 * Next we need to restore the context, but we don't use those
+	 * yet either...
+	 *
+	 * Ring buffer needs to be re-initialized in the KMS case, or if X
+	 * was running at the time of the reset (i.e. we weren't VT
+	 * switched away).
+	 */
+
+	/* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset */
+	dev_priv->gpu_error.reload_in_reset = true;
+
+	ret = i915_gem_init_hw(dev);
+
+	dev_priv->gpu_error.reload_in_reset = false;
+
+	mutex_unlock(&dev->struct_mutex);
+	if (ret) {
+		DRM_ERROR("Failed hw init on reset %d\n", ret);
+		return ret;
+	}
+
+	/*
+	 * rps/rc6 re-init is necessary to restore state lost after the
+	 * reset and the re-install of gt irqs. Skip for ironlake per
+	 * previous concerns that it doesn't respond well to some forms
+	 * of re-init after reset.
+	 */
+	if (INTEL_INFO(dev)->gen > 5)
+		intel_enable_gt_powersave(dev);
+
+	return 0;
+}
+
+static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	struct intel_device_info *intel_info =
+		(struct intel_device_info *) ent->driver_data;
+
+	if (IS_PRELIMINARY_HW(intel_info) && !i915.preliminary_hw_support) {
+		DRM_INFO("This hardware requires preliminary hardware support.\n"
+			 "See CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n");
+		return -ENODEV;
+	}
+
+	/* Only bind to function 0 of the device. Early generations
+	 * used function 1 as a placeholder for multi-head. This causes
+	 * us confusion instead, especially on the systems where both
+	 * functions have the same PCI-ID!
+	 */
+	if (PCI_FUNC(pdev->devfn))
+		return -ENODEV;
+
+	return drm_get_pci_dev(pdev, ent, &driver);
+}
+
+static void
+i915_pci_remove(struct pci_dev *pdev)
+{
+	struct drm_device *dev = pci_get_drvdata(pdev);
+
+	drm_put_dev(dev);
+}
+
+static int i915_pm_suspend(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct drm_device *drm_dev = pci_get_drvdata(pdev);
+
+	if (!drm_dev || !drm_dev->dev_private) {
+		dev_err(dev, "DRM not initialized, aborting suspend.\n");
+		return -ENODEV;
+	}
+
+	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+		return 0;
+
+	return i915_drm_suspend(drm_dev);
+}
+
+static int i915_pm_suspend_late(struct device *dev)
+{
+	struct drm_device *drm_dev = dev_to_i915(dev)->dev;
+
+	/*
+	 * We have a suspend ordering issue with the snd-hda driver also
+	 * requiring our device to be power up. Due to the lack of a
+	 * parent/child relationship we currently solve this with an late
+	 * suspend hook.
+	 *
+	 * FIXME: This should be solved with a special hdmi sink device or
+	 * similar so that power domains can be employed.
+	 */
+	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+		return 0;
+
+	return i915_drm_suspend_late(drm_dev, false);
+}
+
+static int i915_pm_poweroff_late(struct device *dev)
+{
+	struct drm_device *drm_dev = dev_to_i915(dev)->dev;
+
+	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+		return 0;
+
+	return i915_drm_suspend_late(drm_dev, true);
+}
+
+static int i915_pm_resume_early(struct device *dev)
+{
+	struct drm_device *drm_dev = dev_to_i915(dev)->dev;
+
+	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+		return 0;
+
+	return i915_drm_resume_early(drm_dev);
+}
+
+static int i915_pm_resume(struct device *dev)
+{
+	struct drm_device *drm_dev = dev_to_i915(dev)->dev;
+
+	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+		return 0;
+
+	return i915_drm_resume(drm_dev);
+}
+
+static int skl_suspend_complete(struct drm_i915_private *dev_priv)
+{
+	/* Enabling DC6 is not a hard requirement to enter runtime D3 */
+
+	skl_uninit_cdclk(dev_priv);
+
+	return 0;
+}
+
+static int hsw_suspend_complete(struct drm_i915_private *dev_priv)
+{
+	hsw_enable_pc8(dev_priv);
+
+	return 0;
+}
+
+static int bxt_suspend_complete(struct drm_i915_private *dev_priv)
+{
+	struct drm_device *dev = dev_priv->dev;
+
+	/* TODO: when DC5 support is added disable DC5 here. */
+
+	broxton_ddi_phy_uninit(dev);
+	broxton_uninit_cdclk(dev);
+	bxt_enable_dc9(dev_priv);
+
+	return 0;
+}
+
+static int bxt_resume_prepare(struct drm_i915_private *dev_priv)
+{
+	struct drm_device *dev = dev_priv->dev;
+
+	/* TODO: when CSR FW support is added make sure the FW is loaded */
+
+	bxt_disable_dc9(dev_priv);
+
+	/*
+	 * TODO: when DC5 support is added enable DC5 here if the CSR FW
+	 * is available.
+	 */
+	broxton_init_cdclk(dev);
+	broxton_ddi_phy_init(dev);
+	intel_prepare_ddi(dev);
+
+	return 0;
+}
+
+static int skl_resume_prepare(struct drm_i915_private *dev_priv)
+{
+	struct drm_device *dev = dev_priv->dev;
+
+	skl_init_cdclk(dev_priv);
+	intel_csr_load_program(dev);
+
+	return 0;
+}
+
+/*
+ * Save all Gunit registers that may be lost after a D3 and a subsequent
+ * S0i[R123] transition. The list of registers needing a save/restore is
+ * defined in the VLV2_S0IXRegs document. This documents marks all Gunit
+ * registers in the following way:
+ * - Driver: saved/restored by the driver
+ * - Punit : saved/restored by the Punit firmware
+ * - No, w/o marking: no need to save/restore, since the register is R/O or
+ *                    used internally by the HW in a way that doesn't depend
+ *                    keeping the content across a suspend/resume.
+ * - Debug : used for debugging
+ *
+ * We save/restore all registers marked with 'Driver', with the following
+ * exceptions:
+ * - Registers out of use, including also registers marked with 'Debug'.
+ *   These have no effect on the driver's operation, so we don't save/restore
+ *   them to reduce the overhead.
+ * - Registers that are fully setup by an initialization function called from
+ *   the resume path. For example many clock gating and RPS/RC6 registers.
+ * - Registers that provide the right functionality with their reset defaults.
+ *
+ * TODO: Except for registers that based on the above 3 criteria can be safely
+ * ignored, we save/restore all others, practically treating the HW context as
+ * a black-box for the driver. Further investigation is needed to reduce the
+ * saved/restored registers even further, by following the same 3 criteria.
+ */
+static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv)
+{
+	struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
+	int i;
+
+	/* GAM 0x4000-0x4770 */
+	s->wr_watermark		= I915_READ(GEN7_WR_WATERMARK);
+	s->gfx_prio_ctrl	= I915_READ(GEN7_GFX_PRIO_CTRL);
+	s->arb_mode		= I915_READ(ARB_MODE);
+	s->gfx_pend_tlb0	= I915_READ(GEN7_GFX_PEND_TLB0);
+	s->gfx_pend_tlb1	= I915_READ(GEN7_GFX_PEND_TLB1);
+
+	for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
+		s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS(i));
+
+	s->media_max_req_count	= I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
+	s->gfx_max_req_count	= I915_READ(GEN7_GFX_MAX_REQ_COUNT);
+
+	s->render_hwsp		= I915_READ(RENDER_HWS_PGA_GEN7);
+	s->ecochk		= I915_READ(GAM_ECOCHK);
+	s->bsd_hwsp		= I915_READ(BSD_HWS_PGA_GEN7);
+	s->blt_hwsp		= I915_READ(BLT_HWS_PGA_GEN7);
+
+	s->tlb_rd_addr		= I915_READ(GEN7_TLB_RD_ADDR);
+
+	/* MBC 0x9024-0x91D0, 0x8500 */
+	s->g3dctl		= I915_READ(VLV_G3DCTL);
+	s->gsckgctl		= I915_READ(VLV_GSCKGCTL);
+	s->mbctl		= I915_READ(GEN6_MBCTL);
+
+	/* GCP 0x9400-0x9424, 0x8100-0x810C */
+	s->ucgctl1		= I915_READ(GEN6_UCGCTL1);
+	s->ucgctl3		= I915_READ(GEN6_UCGCTL3);
+	s->rcgctl1		= I915_READ(GEN6_RCGCTL1);
+	s->rcgctl2		= I915_READ(GEN6_RCGCTL2);
+	s->rstctl		= I915_READ(GEN6_RSTCTL);
+	s->misccpctl		= I915_READ(GEN7_MISCCPCTL);
+
+	/* GPM 0xA000-0xAA84, 0x8000-0x80FC */
+	s->gfxpause		= I915_READ(GEN6_GFXPAUSE);
+	s->rpdeuhwtc		= I915_READ(GEN6_RPDEUHWTC);
+	s->rpdeuc		= I915_READ(GEN6_RPDEUC);
+	s->ecobus		= I915_READ(ECOBUS);
+	s->pwrdwnupctl		= I915_READ(VLV_PWRDWNUPCTL);
+	s->rp_down_timeout	= I915_READ(GEN6_RP_DOWN_TIMEOUT);
+	s->rp_deucsw		= I915_READ(GEN6_RPDEUCSW);
+	s->rcubmabdtmr		= I915_READ(GEN6_RCUBMABDTMR);
+	s->rcedata		= I915_READ(VLV_RCEDATA);
+	s->spare2gh		= I915_READ(VLV_SPAREG2H);
+
+	/* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
+	s->gt_imr		= I915_READ(GTIMR);
+	s->gt_ier		= I915_READ(GTIER);
+	s->pm_imr		= I915_READ(GEN6_PMIMR);
+	s->pm_ier		= I915_READ(GEN6_PMIER);
+
+	for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
+		s->gt_scratch[i] = I915_READ(GEN7_GT_SCRATCH(i));
+
+	/* GT SA CZ domain, 0x100000-0x138124 */
+	s->tilectl		= I915_READ(TILECTL);
+	s->gt_fifoctl		= I915_READ(GTFIFOCTL);
+	s->gtlc_wake_ctrl	= I915_READ(VLV_GTLC_WAKE_CTRL);
+	s->gtlc_survive		= I915_READ(VLV_GTLC_SURVIVABILITY_REG);
+	s->pmwgicz		= I915_READ(VLV_PMWGICZ);
+
+	/* Gunit-Display CZ domain, 0x182028-0x1821CF */
+	s->gu_ctl0		= I915_READ(VLV_GU_CTL0);
+	s->gu_ctl1		= I915_READ(VLV_GU_CTL1);
+	s->pcbr			= I915_READ(VLV_PCBR);
+	s->clock_gate_dis2	= I915_READ(VLV_GUNIT_CLOCK_GATE2);
+
+	/*
+	 * Not saving any of:
+	 * DFT,		0x9800-0x9EC0
+	 * SARB,	0xB000-0xB1FC
+	 * GAC,		0x5208-0x524C, 0x14000-0x14C000
+	 * PCI CFG
+	 */
+}
+
+static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv)
+{
+	struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
+	u32 val;
+	int i;
+
+	/* GAM 0x4000-0x4770 */
+	I915_WRITE(GEN7_WR_WATERMARK,	s->wr_watermark);
+	I915_WRITE(GEN7_GFX_PRIO_CTRL,	s->gfx_prio_ctrl);
+	I915_WRITE(ARB_MODE,		s->arb_mode | (0xffff << 16));
+	I915_WRITE(GEN7_GFX_PEND_TLB0,	s->gfx_pend_tlb0);
+	I915_WRITE(GEN7_GFX_PEND_TLB1,	s->gfx_pend_tlb1);
+
+	for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
+		I915_WRITE(GEN7_LRA_LIMITS(i), s->lra_limits[i]);
+
+	I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count);
+	I915_WRITE(GEN7_GFX_MAX_REQ_COUNT, s->gfx_max_req_count);
+
+	I915_WRITE(RENDER_HWS_PGA_GEN7,	s->render_hwsp);
+	I915_WRITE(GAM_ECOCHK,		s->ecochk);
+	I915_WRITE(BSD_HWS_PGA_GEN7,	s->bsd_hwsp);
+	I915_WRITE(BLT_HWS_PGA_GEN7,	s->blt_hwsp);
+
+	I915_WRITE(GEN7_TLB_RD_ADDR,	s->tlb_rd_addr);
+
+	/* MBC 0x9024-0x91D0, 0x8500 */
+	I915_WRITE(VLV_G3DCTL,		s->g3dctl);
+	I915_WRITE(VLV_GSCKGCTL,	s->gsckgctl);
+	I915_WRITE(GEN6_MBCTL,		s->mbctl);
+
+	/* GCP 0x9400-0x9424, 0x8100-0x810C */
+	I915_WRITE(GEN6_UCGCTL1,	s->ucgctl1);
+	I915_WRITE(GEN6_UCGCTL3,	s->ucgctl3);
+	I915_WRITE(GEN6_RCGCTL1,	s->rcgctl1);
+	I915_WRITE(GEN6_RCGCTL2,	s->rcgctl2);
+	I915_WRITE(GEN6_RSTCTL,		s->rstctl);
+	I915_WRITE(GEN7_MISCCPCTL,	s->misccpctl);
+
+	/* GPM 0xA000-0xAA84, 0x8000-0x80FC */
+	I915_WRITE(GEN6_GFXPAUSE,	s->gfxpause);
+	I915_WRITE(GEN6_RPDEUHWTC,	s->rpdeuhwtc);
+	I915_WRITE(GEN6_RPDEUC,		s->rpdeuc);
+	I915_WRITE(ECOBUS,		s->ecobus);
+	I915_WRITE(VLV_PWRDWNUPCTL,	s->pwrdwnupctl);
+	I915_WRITE(GEN6_RP_DOWN_TIMEOUT,s->rp_down_timeout);
+	I915_WRITE(GEN6_RPDEUCSW,	s->rp_deucsw);
+	I915_WRITE(GEN6_RCUBMABDTMR,	s->rcubmabdtmr);
+	I915_WRITE(VLV_RCEDATA,		s->rcedata);
+	I915_WRITE(VLV_SPAREG2H,	s->spare2gh);
+
+	/* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
+	I915_WRITE(GTIMR,		s->gt_imr);
+	I915_WRITE(GTIER,		s->gt_ier);
+	I915_WRITE(GEN6_PMIMR,		s->pm_imr);
+	I915_WRITE(GEN6_PMIER,		s->pm_ier);
+
+	for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
+		I915_WRITE(GEN7_GT_SCRATCH(i), s->gt_scratch[i]);
+
+	/* GT SA CZ domain, 0x100000-0x138124 */
+	I915_WRITE(TILECTL,			s->tilectl);
+	I915_WRITE(GTFIFOCTL,			s->gt_fifoctl);
+	/*
+	 * Preserve the GT allow wake and GFX force clock bit, they are not
+	 * be restored, as they are used to control the s0ix suspend/resume
+	 * sequence by the caller.
+	 */
+	val = I915_READ(VLV_GTLC_WAKE_CTRL);
+	val &= VLV_GTLC_ALLOWWAKEREQ;
+	val |= s->gtlc_wake_ctrl & ~VLV_GTLC_ALLOWWAKEREQ;
+	I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
+
+	val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
+	val &= VLV_GFX_CLK_FORCE_ON_BIT;
+	val |= s->gtlc_survive & ~VLV_GFX_CLK_FORCE_ON_BIT;
+	I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);
+
+	I915_WRITE(VLV_PMWGICZ,			s->pmwgicz);
+
+	/* Gunit-Display CZ domain, 0x182028-0x1821CF */
+	I915_WRITE(VLV_GU_CTL0,			s->gu_ctl0);
+	I915_WRITE(VLV_GU_CTL1,			s->gu_ctl1);
+	I915_WRITE(VLV_PCBR,			s->pcbr);
+	I915_WRITE(VLV_GUNIT_CLOCK_GATE2,	s->clock_gate_dis2);
+}
+
+int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on)
+{
+	u32 val;
+	int err;
+
+#define COND (I915_READ(VLV_GTLC_SURVIVABILITY_REG) & VLV_GFX_CLK_STATUS_BIT)
+
+	val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
+	val &= ~VLV_GFX_CLK_FORCE_ON_BIT;
+	if (force_on)
+		val |= VLV_GFX_CLK_FORCE_ON_BIT;
+	I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);
+
+	if (!force_on)
+		return 0;
+
+	err = wait_for(COND, 20);
+	if (err)
+		DRM_ERROR("timeout waiting for GFX clock force-on (%08x)\n",
+			  I915_READ(VLV_GTLC_SURVIVABILITY_REG));
+
+	return err;
+#undef COND
+}
+
+static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow)
+{
+	u32 val;
+	int err = 0;
+
+	val = I915_READ(VLV_GTLC_WAKE_CTRL);
+	val &= ~VLV_GTLC_ALLOWWAKEREQ;
+	if (allow)
+		val |= VLV_GTLC_ALLOWWAKEREQ;
+	I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
+	POSTING_READ(VLV_GTLC_WAKE_CTRL);
+
+#define COND (!!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEACK) == \
+	      allow)
+	err = wait_for(COND, 1);
+	if (err)
+		DRM_ERROR("timeout disabling GT waking\n");
+	return err;
+#undef COND
+}
+
+static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
+				 bool wait_for_on)
+{
+	u32 mask;
+	u32 val;
+	int err;
+
+	mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK;
+	val = wait_for_on ? mask : 0;
+#define COND ((I915_READ(VLV_GTLC_PW_STATUS) & mask) == val)
+	if (COND)
+		return 0;
+
+	DRM_DEBUG_KMS("waiting for GT wells to go %s (%08x)\n",
+			wait_for_on ? "on" : "off",
+			I915_READ(VLV_GTLC_PW_STATUS));
+
+	/*
+	 * RC6 transitioning can be delayed up to 2 msec (see
+	 * valleyview_enable_rps), use 3 msec for safety.
+	 */
+	err = wait_for(COND, 3);
+	if (err)
+		DRM_ERROR("timeout waiting for GT wells to go %s\n",
+			  wait_for_on ? "on" : "off");
+
+	return err;
+#undef COND
+}
+
+static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv)
+{
+	if (!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR))
+		return;
+
+	DRM_ERROR("GT register access while GT waking disabled\n");
+	I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR);
+}
+
+static int vlv_suspend_complete(struct drm_i915_private *dev_priv)
+{
+	u32 mask;
+	int err;
+
+	/*
+	 * Bspec defines the following GT well on flags as debug only, so
+	 * don't treat them as hard failures.
+	 */
+	(void)vlv_wait_for_gt_wells(dev_priv, false);
+
+	mask = VLV_GTLC_RENDER_CTX_EXISTS | VLV_GTLC_MEDIA_CTX_EXISTS;
+	WARN_ON((I915_READ(VLV_GTLC_WAKE_CTRL) & mask) != mask);
+
+	vlv_check_no_gt_access(dev_priv);
+
+	err = vlv_force_gfx_clock(dev_priv, true);
+	if (err)
+		goto err1;
+
+	err = vlv_allow_gt_wake(dev_priv, false);
+	if (err)
+		goto err2;
+
+	if (!IS_CHERRYVIEW(dev_priv->dev))
+		vlv_save_gunit_s0ix_state(dev_priv);
+
+	err = vlv_force_gfx_clock(dev_priv, false);
+	if (err)
+		goto err2;
+
+	return 0;
+
+err2:
+	/* For safety always re-enable waking and disable gfx clock forcing */
+	vlv_allow_gt_wake(dev_priv, true);
+err1:
+	vlv_force_gfx_clock(dev_priv, false);
+
+	return err;
+}
+
+static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
+				bool rpm_resume)
+{
+	struct drm_device *dev = dev_priv->dev;
+	int err;
+	int ret;
+
+	/*
+	 * If any of the steps fail just try to continue, that's the best we
+	 * can do at this point. Return the first error code (which will also
+	 * leave RPM permanently disabled).
+	 */
+	ret = vlv_force_gfx_clock(dev_priv, true);
+
+	if (!IS_CHERRYVIEW(dev_priv->dev))
+		vlv_restore_gunit_s0ix_state(dev_priv);
+
+	err = vlv_allow_gt_wake(dev_priv, true);
+	if (!ret)
+		ret = err;
+
+	err = vlv_force_gfx_clock(dev_priv, false);
+	if (!ret)
+		ret = err;
+
+	vlv_check_no_gt_access(dev_priv);
+
+	if (rpm_resume) {
+		intel_init_clock_gating(dev);
+		i915_gem_restore_fences(dev);
+	}
+
+	return ret;
+}
+
+static int intel_runtime_suspend(struct device *device)
+{
+	struct pci_dev *pdev = to_pci_dev(device);
+	struct drm_device *dev = pci_get_drvdata(pdev);
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret;
+
+	if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6(dev))))
+		return -ENODEV;
+
+	if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
+		return -ENODEV;
+
+	DRM_DEBUG_KMS("Suspending device\n");
+
+	/*
+	 * We could deadlock here in case another thread holding struct_mutex
+	 * calls RPM suspend concurrently, since the RPM suspend will wait
+	 * first for this RPM suspend to finish. In this case the concurrent
+	 * RPM resume will be followed by its RPM suspend counterpart. Still
+	 * for consistency return -EAGAIN, which will reschedule this suspend.
+	 */
+	if (!mutex_trylock(&dev->struct_mutex)) {
+		DRM_DEBUG_KMS("device lock contention, deffering suspend\n");
+		/*
+		 * Bump the expiration timestamp, otherwise the suspend won't
+		 * be rescheduled.
+		 */
+		pm_runtime_mark_last_busy(device);
+
+		return -EAGAIN;
+	}
+	/*
+	 * We are safe here against re-faults, since the fault handler takes
+	 * an RPM reference.
+	 */
+	i915_gem_release_all_mmaps(dev_priv);
+	mutex_unlock(&dev->struct_mutex);
+
+	intel_guc_suspend(dev);
+
+	intel_suspend_gt_powersave(dev);
+	intel_runtime_pm_disable_interrupts(dev_priv);
+
+	ret = intel_suspend_complete(dev_priv);
+	if (ret) {
+		DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
+		intel_runtime_pm_enable_interrupts(dev_priv);
+
+		return ret;
+	}
+
+	cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
+	intel_uncore_forcewake_reset(dev, false);
+	dev_priv->pm.suspended = true;
+
+	/*
+	 * FIXME: We really should find a document that references the arguments
+	 * used below!
+	 */
+	if (IS_BROADWELL(dev)) {
+		/*
+		 * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop
+		 * being detected, and the call we do at intel_runtime_resume()
+		 * won't be able to restore them. Since PCI_D3hot matches the
+		 * actual specification and appears to be working, use it.
+		 */
+		intel_opregion_notify_adapter(dev, PCI_D3hot);
+	} else {
+		/*
+		 * current versions of firmware which depend on this opregion
+		 * notification have repurposed the D1 definition to mean
+		 * "runtime suspended" vs. what you would normally expect (D3)
+		 * to distinguish it from notifications that might be sent via
+		 * the suspend path.
+		 */
+		intel_opregion_notify_adapter(dev, PCI_D1);
+	}
+
+	assert_forcewakes_inactive(dev_priv);
+
+	DRM_DEBUG_KMS("Device suspended\n");
+	return 0;
+}
+
+static int intel_runtime_resume(struct device *device)
+{
+	struct pci_dev *pdev = to_pci_dev(device);
+	struct drm_device *dev = pci_get_drvdata(pdev);
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret = 0;
+
+	if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
+		return -ENODEV;
+
+	DRM_DEBUG_KMS("Resuming device\n");
+
+	intel_opregion_notify_adapter(dev, PCI_D0);
+	dev_priv->pm.suspended = false;
+
+	intel_guc_resume(dev);
+
+	if (IS_GEN6(dev_priv))
+		intel_init_pch_refclk(dev);
+
+	if (IS_BROXTON(dev))
+		ret = bxt_resume_prepare(dev_priv);
+	else if (IS_SKYLAKE(dev))
+		ret = skl_resume_prepare(dev_priv);
+	else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+		hsw_disable_pc8(dev_priv);
+	else if (IS_VALLEYVIEW(dev_priv))
+		ret = vlv_resume_prepare(dev_priv, true);
+
+	/*
+	 * No point of rolling back things in case of an error, as the best
+	 * we can do is to hope that things will still work (and disable RPM).
+	 */
+	i915_gem_init_swizzling(dev);
+	gen6_update_ring_freq(dev);
+
+	intel_runtime_pm_enable_interrupts(dev_priv);
+
+	/*
+	 * On VLV/CHV display interrupts are part of the display
+	 * power well, so hpd is reinitialized from there. For
+	 * everyone else do it here.
+	 */
+	if (!IS_VALLEYVIEW(dev_priv))
+		intel_hpd_init(dev_priv);
+
+	intel_enable_gt_powersave(dev);
+
+	if (ret)
+		DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret);
+	else
+		DRM_DEBUG_KMS("Device resumed\n");
+
+	return ret;
+}
+
+/*
+ * This function implements common functionality of runtime and system
+ * suspend sequence.
+ */
+static int intel_suspend_complete(struct drm_i915_private *dev_priv)
+{
+	int ret;
+
+	if (IS_BROXTON(dev_priv))
+		ret = bxt_suspend_complete(dev_priv);
+	else if (IS_SKYLAKE(dev_priv))
+		ret = skl_suspend_complete(dev_priv);
+	else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+		ret = hsw_suspend_complete(dev_priv);
+	else if (IS_VALLEYVIEW(dev_priv))
+		ret = vlv_suspend_complete(dev_priv);
+	else
+		ret = 0;
+
+	return ret;
+}
+
+static const struct dev_pm_ops i915_pm_ops = {
+	/*
+	 * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND,
+	 * PMSG_RESUME]
+	 */
+	.suspend = i915_pm_suspend,
+	.suspend_late = i915_pm_suspend_late,
+	.resume_early = i915_pm_resume_early,
+	.resume = i915_pm_resume,
+
+	/*
+	 * S4 event handlers
+	 * @freeze, @freeze_late    : called (1) before creating the
+	 *                            hibernation image [PMSG_FREEZE] and
+	 *                            (2) after rebooting, before restoring
+	 *                            the image [PMSG_QUIESCE]
+	 * @thaw, @thaw_early       : called (1) after creating the hibernation
+	 *                            image, before writing it [PMSG_THAW]
+	 *                            and (2) after failing to create or
+	 *                            restore the image [PMSG_RECOVER]
+	 * @poweroff, @poweroff_late: called after writing the hibernation
+	 *                            image, before rebooting [PMSG_HIBERNATE]
+	 * @restore, @restore_early : called after rebooting and restoring the
+	 *                            hibernation image [PMSG_RESTORE]
+	 */
+	.freeze = i915_pm_suspend,
+	.freeze_late = i915_pm_suspend_late,
+	.thaw_early = i915_pm_resume_early,
+	.thaw = i915_pm_resume,
+	.poweroff = i915_pm_suspend,
+	.poweroff_late = i915_pm_poweroff_late,
+	.restore_early = i915_pm_resume_early,
+	.restore = i915_pm_resume,
+
+	/* S0ix (via runtime suspend) event handlers */
+	.runtime_suspend = intel_runtime_suspend,
+	.runtime_resume = intel_runtime_resume,
+};
+
+static const struct vm_operations_struct i915_gem_vm_ops = {
+	.fault = i915_gem_fault,
+	.open = drm_gem_vm_open,
+	.close = drm_gem_vm_close,
+};
+
+static const struct file_operations i915_driver_fops = {
+	.owner = THIS_MODULE,
+	.open = drm_open,
+	.release = drm_release,
+	.unlocked_ioctl = drm_ioctl,
+	.mmap = drm_gem_mmap,
+	.poll = drm_poll,
+	.read = drm_read,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = i915_compat_ioctl,
+#endif
+	.llseek = noop_llseek,
+};
+
+static struct drm_driver driver = {
+	/* Don't use MTRRs here; the Xserver or userspace app should
+	 * deal with them for Intel hardware.
+	 */
+	.driver_features =
+	    DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME |
+	    DRIVER_RENDER | DRIVER_MODESET,
+	.load = i915_driver_load,
+	.unload = i915_driver_unload,
+	.open = i915_driver_open,
+	.lastclose = i915_driver_lastclose,
+	.preclose = i915_driver_preclose,
+	.postclose = i915_driver_postclose,
+	.set_busid = drm_pci_set_busid,
+
+#if defined(CONFIG_DEBUG_FS)
+	.debugfs_init = i915_debugfs_init,
+	.debugfs_cleanup = i915_debugfs_cleanup,
+#endif
+	.gem_free_object = i915_gem_free_object,
+	.gem_vm_ops = &i915_gem_vm_ops,
+
+	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+	.gem_prime_export = i915_gem_prime_export,
+	.gem_prime_import = i915_gem_prime_import,
+
+	.dumb_create = i915_gem_dumb_create,
+	.dumb_map_offset = i915_gem_mmap_gtt,
+	.dumb_destroy = drm_gem_dumb_destroy,
+	.ioctls = i915_ioctls,
+	.fops = &i915_driver_fops,
+	.name = DRIVER_NAME,
+	.desc = DRIVER_DESC,
+	.date = DRIVER_DATE,
+	.major = DRIVER_MAJOR,
+	.minor = DRIVER_MINOR,
+	.patchlevel = DRIVER_PATCHLEVEL,
+};
+
+static struct pci_driver i915_pci_driver = {
+	.name = DRIVER_NAME,
+	.id_table = pciidlist,
+	.probe = i915_pci_probe,
+	.remove = i915_pci_remove,
+	.driver.pm = &i915_pm_ops,
+};
+
+static int __init i915_init(void)
+{
+	driver.num_ioctls = i915_max_ioctl;
+
+	/*
+	 * Enable KMS by default, unless explicitly overriden by
+	 * either the i915.modeset prarameter or by the
+	 * vga_text_mode_force boot option.
+	 */
+
+	if (i915.modeset == 0)
+		driver.driver_features &= ~DRIVER_MODESET;
+
+#ifdef CONFIG_VGA_CONSOLE
+	if (vgacon_text_force() && i915.modeset == -1)
+		driver.driver_features &= ~DRIVER_MODESET;
+#endif
+
+	if (!(driver.driver_features & DRIVER_MODESET)) {
+		/* Silently fail loading to not upset userspace. */
+		DRM_DEBUG_DRIVER("KMS and UMS disabled.\n");
+		return 0;
+	}
+
+	if (i915.nuclear_pageflip)
+		driver.driver_features |= DRIVER_ATOMIC;
+
+	return drm_pci_init(&driver, &i915_pci_driver);
+}
+
+static void __exit i915_exit(void)
+{
+	if (!(driver.driver_features & DRIVER_MODESET))
+		return; /* Never loaded a driver. */
+
+	drm_pci_exit(&driver, &i915_pci_driver);
+}
+
+module_init(i915_init);
+module_exit(i915_exit);
+
+MODULE_AUTHOR("Tungsten Graphics, Inc.");
+MODULE_AUTHOR("Intel Corporation");
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
new file mode 100644
index 0000000..5044f22
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -0,0 +1,3537 @@
+/* i915_drv.h -- Private header for the I915 driver -*- linux-c -*-
+ */
+/*
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _I915_DRV_H_
+#define _I915_DRV_H_
+
+#include <uapi/drm/i915_drm.h>
+#include <uapi/drm/drm_fourcc.h>
+
+#include "i915_reg.h"
+#include "intel_bios.h"
+#include "intel_ringbuffer.h"
+#include "intel_lrc.h"
+#include "i915_gem_gtt.h"
+#include "i915_gem_render_state.h"
+#include <linux/io-mapping.h>
+#include <linux/i2c.h>
+#include <linux/i2c-algo-bit.h>
+#include <drm/intel-gtt.h>
+#include <drm/drm_legacy.h> /* for struct drm_dma_handle */
+#include <drm/drm_gem.h>
+#include <linux/backlight.h>
+#include <linux/hashtable.h>
+#include <linux/intel-iommu.h>
+#include <linux/kref.h>
+#include <linux/pm_qos.h>
+#include "intel_guc.h"
+
+/* General customization:
+ */
+
+#define DRIVER_NAME		"i915"
+#define DRIVER_DESC		"Intel Graphics"
+#define DRIVER_DATE		"20151010"
+
+#undef WARN_ON
+/* Many gcc seem to no see through this and fall over :( */
+#if 0
+#define WARN_ON(x) ({ \
+	bool __i915_warn_cond = (x); \
+	if (__builtin_constant_p(__i915_warn_cond)) \
+		BUILD_BUG_ON(__i915_warn_cond); \
+	WARN(__i915_warn_cond, "WARN_ON(" #x ")"); })
+#else
+#define WARN_ON(x) WARN((x), "WARN_ON(%s)", #x )
+#endif
+
+#undef WARN_ON_ONCE
+#define WARN_ON_ONCE(x) WARN_ONCE((x), "WARN_ON_ONCE(%s)", #x )
+
+#define MISSING_CASE(x) WARN(1, "Missing switch case (%lu) in %s\n", \
+			     (long) (x), __func__);
+
+/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
+ * WARN_ON()) for hw state sanity checks to check for unexpected conditions
+ * which may not necessarily be a user visible problem.  This will either
+ * WARN() or DRM_ERROR() depending on the verbose_checks moduleparam, to
+ * enable distros and users to tailor their preferred amount of i915 abrt
+ * spam.
+ */
+#define I915_STATE_WARN(condition, format...) ({			\
+	int __ret_warn_on = !!(condition);				\
+	if (unlikely(__ret_warn_on)) {					\
+		if (i915.verbose_state_checks)				\
+			WARN(1, format);				\
+		else 							\
+			DRM_ERROR(format);				\
+	}								\
+	unlikely(__ret_warn_on);					\
+})
+
+#define I915_STATE_WARN_ON(condition) ({				\
+	int __ret_warn_on = !!(condition);				\
+	if (unlikely(__ret_warn_on)) {					\
+		if (i915.verbose_state_checks)				\
+			WARN(1, "WARN_ON(" #condition ")\n");		\
+		else 							\
+			DRM_ERROR("WARN_ON(" #condition ")\n");		\
+	}								\
+	unlikely(__ret_warn_on);					\
+})
+
+static inline const char *yesno(bool v)
+{
+	return v ? "yes" : "no";
+}
+
+enum pipe {
+	INVALID_PIPE = -1,
+	PIPE_A = 0,
+	PIPE_B,
+	PIPE_C,
+	_PIPE_EDP,
+	I915_MAX_PIPES = _PIPE_EDP
+};
+#define pipe_name(p) ((p) + 'A')
+
+enum transcoder {
+	TRANSCODER_A = 0,
+	TRANSCODER_B,
+	TRANSCODER_C,
+	TRANSCODER_EDP,
+	I915_MAX_TRANSCODERS
+};
+#define transcoder_name(t) ((t) + 'A')
+
+/*
+ * I915_MAX_PLANES in the enum below is the maximum (across all platforms)
+ * number of planes per CRTC.  Not all platforms really have this many planes,
+ * which means some arrays of size I915_MAX_PLANES may have unused entries
+ * between the topmost sprite plane and the cursor plane.
+ */
+enum plane {
+	PLANE_A = 0,
+	PLANE_B,
+	PLANE_C,
+	PLANE_CURSOR,
+	I915_MAX_PLANES,
+};
+#define plane_name(p) ((p) + 'A')
+
+#define sprite_name(p, s) ((p) * INTEL_INFO(dev)->num_sprites[(p)] + (s) + 'A')
+
+enum port {
+	PORT_A = 0,
+	PORT_B,
+	PORT_C,
+	PORT_D,
+	PORT_E,
+	I915_MAX_PORTS
+};
+#define port_name(p) ((p) + 'A')
+
+#define I915_NUM_PHYS_VLV 2
+
+enum dpio_channel {
+	DPIO_CH0,
+	DPIO_CH1
+};
+
+enum dpio_phy {
+	DPIO_PHY0,
+	DPIO_PHY1
+};
+
+enum intel_display_power_domain {
+	POWER_DOMAIN_PIPE_A,
+	POWER_DOMAIN_PIPE_B,
+	POWER_DOMAIN_PIPE_C,
+	POWER_DOMAIN_PIPE_A_PANEL_FITTER,
+	POWER_DOMAIN_PIPE_B_PANEL_FITTER,
+	POWER_DOMAIN_PIPE_C_PANEL_FITTER,
+	POWER_DOMAIN_TRANSCODER_A,
+	POWER_DOMAIN_TRANSCODER_B,
+	POWER_DOMAIN_TRANSCODER_C,
+	POWER_DOMAIN_TRANSCODER_EDP,
+	POWER_DOMAIN_PORT_DDI_A_2_LANES,
+	POWER_DOMAIN_PORT_DDI_A_4_LANES,
+	POWER_DOMAIN_PORT_DDI_B_2_LANES,
+	POWER_DOMAIN_PORT_DDI_B_4_LANES,
+	POWER_DOMAIN_PORT_DDI_C_2_LANES,
+	POWER_DOMAIN_PORT_DDI_C_4_LANES,
+	POWER_DOMAIN_PORT_DDI_D_2_LANES,
+	POWER_DOMAIN_PORT_DDI_D_4_LANES,
+	POWER_DOMAIN_PORT_DDI_E_2_LANES,
+	POWER_DOMAIN_PORT_DSI,
+	POWER_DOMAIN_PORT_CRT,
+	POWER_DOMAIN_PORT_OTHER,
+	POWER_DOMAIN_VGA,
+	POWER_DOMAIN_AUDIO,
+	POWER_DOMAIN_PLLS,
+	POWER_DOMAIN_AUX_A,
+	POWER_DOMAIN_AUX_B,
+	POWER_DOMAIN_AUX_C,
+	POWER_DOMAIN_AUX_D,
+	POWER_DOMAIN_GMBUS,
+	POWER_DOMAIN_INIT,
+
+	POWER_DOMAIN_NUM,
+};
+
+#define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A)
+#define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \
+		((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER)
+#define POWER_DOMAIN_TRANSCODER(tran) \
+	((tran) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : \
+	 (tran) + POWER_DOMAIN_TRANSCODER_A)
+
+enum hpd_pin {
+	HPD_NONE = 0,
+	HPD_TV = HPD_NONE,     /* TV is known to be unreliable */
+	HPD_CRT,
+	HPD_SDVO_B,
+	HPD_SDVO_C,
+	HPD_PORT_A,
+	HPD_PORT_B,
+	HPD_PORT_C,
+	HPD_PORT_D,
+	HPD_PORT_E,
+	HPD_NUM_PINS
+};
+
+#define for_each_hpd_pin(__pin) \
+	for ((__pin) = (HPD_NONE + 1); (__pin) < HPD_NUM_PINS; (__pin)++)
+
+struct i915_hotplug {
+	struct work_struct hotplug_work;
+
+	struct {
+		unsigned long last_jiffies;
+		int count;
+		enum {
+			HPD_ENABLED = 0,
+			HPD_DISABLED = 1,
+			HPD_MARK_DISABLED = 2
+		} state;
+	} stats[HPD_NUM_PINS];
+	u32 event_bits;
+	struct delayed_work reenable_work;
+
+	struct intel_digital_port *irq_port[I915_MAX_PORTS];
+	u32 long_port_mask;
+	u32 short_port_mask;
+	struct work_struct dig_port_work;
+
+	/*
+	 * if we get a HPD irq from DP and a HPD irq from non-DP
+	 * the non-DP HPD could block the workqueue on a mode config
+	 * mutex getting, that userspace may have taken. However
+	 * userspace is waiting on the DP workqueue to run which is
+	 * blocked behind the non-DP one.
+	 */
+	struct workqueue_struct *dp_wq;
+};
+
+#define I915_GEM_GPU_DOMAINS \
+	(I915_GEM_DOMAIN_RENDER | \
+	 I915_GEM_DOMAIN_SAMPLER | \
+	 I915_GEM_DOMAIN_COMMAND | \
+	 I915_GEM_DOMAIN_INSTRUCTION | \
+	 I915_GEM_DOMAIN_VERTEX)
+
+#define for_each_pipe(__dev_priv, __p) \
+	for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++)
+#define for_each_plane(__dev_priv, __pipe, __p)				\
+	for ((__p) = 0;							\
+	     (__p) < INTEL_INFO(__dev_priv)->num_sprites[(__pipe)] + 1;	\
+	     (__p)++)
+#define for_each_sprite(__dev_priv, __p, __s)				\
+	for ((__s) = 0;							\
+	     (__s) < INTEL_INFO(__dev_priv)->num_sprites[(__p)];	\
+	     (__s)++)
+
+#define for_each_crtc(dev, crtc) \
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+
+#define for_each_intel_plane(dev, intel_plane) \
+	list_for_each_entry(intel_plane,			\
+			    &dev->mode_config.plane_list,	\
+			    base.head)
+
+#define for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane)	\
+	list_for_each_entry(intel_plane,				\
+			    &(dev)->mode_config.plane_list,		\
+			    base.head)					\
+		if ((intel_plane)->pipe == (intel_crtc)->pipe)
+
+#define for_each_intel_crtc(dev, intel_crtc) \
+	list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head)
+
+#define for_each_intel_encoder(dev, intel_encoder)		\
+	list_for_each_entry(intel_encoder,			\
+			    &(dev)->mode_config.encoder_list,	\
+			    base.head)
+
+#define for_each_intel_connector(dev, intel_connector)		\
+	list_for_each_entry(intel_connector,			\
+			    &dev->mode_config.connector_list,	\
+			    base.head)
+
+#define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
+	list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
+		if ((intel_encoder)->base.crtc == (__crtc))
+
+#define for_each_connector_on_encoder(dev, __encoder, intel_connector) \
+	list_for_each_entry((intel_connector), &(dev)->mode_config.connector_list, base.head) \
+		if ((intel_connector)->base.encoder == (__encoder))
+
+#define for_each_power_domain(domain, mask)				\
+	for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++)	\
+		if ((1 << (domain)) & (mask))
+
+struct drm_i915_private;
+struct i915_mm_struct;
+struct i915_mmu_object;
+
+struct drm_i915_file_private {
+	struct drm_i915_private *dev_priv;
+	struct drm_file *file;
+
+	struct {
+		spinlock_t lock;
+		struct list_head request_list;
+/* 20ms is a fairly arbitrary limit (greater than the average frame time)
+ * chosen to prevent the CPU getting more than a frame ahead of the GPU
+ * (when using lax throttling for the frontbuffer). We also use it to
+ * offer free GPU waitboosts for severely congested workloads.
+ */
+#define DRM_I915_THROTTLE_JIFFIES msecs_to_jiffies(20)
+	} mm;
+	struct idr context_idr;
+
+	struct intel_rps_client {
+		struct list_head link;
+		unsigned boosts;
+	} rps;
+
+	struct intel_engine_cs *bsd_ring;
+};
+
+enum intel_dpll_id {
+	DPLL_ID_PRIVATE = -1, /* non-shared dpll in use */
+	/* real shared dpll ids must be >= 0 */
+	DPLL_ID_PCH_PLL_A = 0,
+	DPLL_ID_PCH_PLL_B = 1,
+	/* hsw/bdw */
+	DPLL_ID_WRPLL1 = 0,
+	DPLL_ID_WRPLL2 = 1,
+	DPLL_ID_SPLL = 2,
+
+	/* skl */
+	DPLL_ID_SKL_DPLL1 = 0,
+	DPLL_ID_SKL_DPLL2 = 1,
+	DPLL_ID_SKL_DPLL3 = 2,
+};
+#define I915_NUM_PLLS 3
+
+struct intel_dpll_hw_state {
+	/* i9xx, pch plls */
+	uint32_t dpll;
+	uint32_t dpll_md;
+	uint32_t fp0;
+	uint32_t fp1;
+
+	/* hsw, bdw */
+	uint32_t wrpll;
+	uint32_t spll;
+
+	/* skl */
+	/*
+	 * DPLL_CTRL1 has 6 bits for each each this DPLL. We store those in
+	 * lower part of ctrl1 and they get shifted into position when writing
+	 * the register.  This allows us to easily compare the state to share
+	 * the DPLL.
+	 */
+	uint32_t ctrl1;
+	/* HDMI only, 0 when used for DP */
+	uint32_t cfgcr1, cfgcr2;
+
+	/* bxt */
+	uint32_t ebb0, ebb4, pll0, pll1, pll2, pll3, pll6, pll8, pll9, pll10,
+		 pcsdw12;
+};
+
+struct intel_shared_dpll_config {
+	unsigned crtc_mask; /* mask of CRTCs sharing this PLL */
+	struct intel_dpll_hw_state hw_state;
+};
+
+struct intel_shared_dpll {
+	struct intel_shared_dpll_config config;
+
+	int active; /* count of number of active CRTCs (i.e. DPMS on) */
+	bool on; /* is the PLL actually active? Disabled during modeset */
+	const char *name;
+	/* should match the index in the dev_priv->shared_dplls array */
+	enum intel_dpll_id id;
+	/* The mode_set hook is optional and should be used together with the
+	 * intel_prepare_shared_dpll function. */
+	void (*mode_set)(struct drm_i915_private *dev_priv,
+			 struct intel_shared_dpll *pll);
+	void (*enable)(struct drm_i915_private *dev_priv,
+		       struct intel_shared_dpll *pll);
+	void (*disable)(struct drm_i915_private *dev_priv,
+			struct intel_shared_dpll *pll);
+	bool (*get_hw_state)(struct drm_i915_private *dev_priv,
+			     struct intel_shared_dpll *pll,
+			     struct intel_dpll_hw_state *hw_state);
+};
+
+#define SKL_DPLL0 0
+#define SKL_DPLL1 1
+#define SKL_DPLL2 2
+#define SKL_DPLL3 3
+
+/* Used by dp and fdi links */
+struct intel_link_m_n {
+	uint32_t	tu;
+	uint32_t	gmch_m;
+	uint32_t	gmch_n;
+	uint32_t	link_m;
+	uint32_t	link_n;
+};
+
+void intel_link_compute_m_n(int bpp, int nlanes,
+			    int pixel_clock, int link_clock,
+			    struct intel_link_m_n *m_n);
+
+/* Interface history:
+ *
+ * 1.1: Original.
+ * 1.2: Add Power Management
+ * 1.3: Add vblank support
+ * 1.4: Fix cmdbuffer path, add heap destroy
+ * 1.5: Add vblank pipe configuration
+ * 1.6: - New ioctl for scheduling buffer swaps on vertical blank
+ *      - Support vertical blank on secondary display pipe
+ */
+#define DRIVER_MAJOR		1
+#define DRIVER_MINOR		6
+#define DRIVER_PATCHLEVEL	0
+
+#define WATCH_LISTS	0
+
+struct opregion_header;
+struct opregion_acpi;
+struct opregion_swsci;
+struct opregion_asle;
+
+struct intel_opregion {
+	struct opregion_header *header;
+	struct opregion_acpi *acpi;
+	struct opregion_swsci *swsci;
+	u32 swsci_gbda_sub_functions;
+	u32 swsci_sbcb_sub_functions;
+	struct opregion_asle *asle;
+	void *vbt;
+	u32 *lid_state;
+	struct work_struct asle_work;
+};
+#define OPREGION_SIZE            (8*1024)
+
+struct intel_overlay;
+struct intel_overlay_error_state;
+
+#define I915_FENCE_REG_NONE -1
+#define I915_MAX_NUM_FENCES 32
+/* 32 fences + sign bit for FENCE_REG_NONE */
+#define I915_MAX_NUM_FENCE_BITS 6
+
+struct drm_i915_fence_reg {
+	struct list_head lru_list;
+	struct drm_i915_gem_object *obj;
+	int pin_count;
+};
+
+struct sdvo_device_mapping {
+	u8 initialized;
+	u8 dvo_port;
+	u8 slave_addr;
+	u8 dvo_wiring;
+	u8 i2c_pin;
+	u8 ddc_pin;
+};
+
+struct intel_display_error_state;
+
+struct drm_i915_error_state {
+	struct kref ref;
+	struct timeval time;
+
+	char error_msg[128];
+	int iommu;
+	u32 reset_count;
+	u32 suspend_count;
+
+	/* Generic register state */
+	u32 eir;
+	u32 pgtbl_er;
+	u32 ier;
+	u32 gtier[4];
+	u32 ccid;
+	u32 derrmr;
+	u32 forcewake;
+	u32 error; /* gen6+ */
+	u32 err_int; /* gen7 */
+	u32 fault_data0; /* gen8, gen9 */
+	u32 fault_data1; /* gen8, gen9 */
+	u32 done_reg;
+	u32 gac_eco;
+	u32 gam_ecochk;
+	u32 gab_ctl;
+	u32 gfx_mode;
+	u32 extra_instdone[I915_NUM_INSTDONE_REG];
+	u64 fence[I915_MAX_NUM_FENCES];
+	struct intel_overlay_error_state *overlay;
+	struct intel_display_error_state *display;
+	struct drm_i915_error_object *semaphore_obj;
+
+	struct drm_i915_error_ring {
+		bool valid;
+		/* Software tracked state */
+		bool waiting;
+		int hangcheck_score;
+		enum intel_ring_hangcheck_action hangcheck_action;
+		int num_requests;
+
+		/* our own tracking of ring head and tail */
+		u32 cpu_ring_head;
+		u32 cpu_ring_tail;
+
+		u32 semaphore_seqno[I915_NUM_RINGS - 1];
+
+		/* Register state */
+		u32 start;
+		u32 tail;
+		u32 head;
+		u32 ctl;
+		u32 hws;
+		u32 ipeir;
+		u32 ipehr;
+		u32 instdone;
+		u32 bbstate;
+		u32 instpm;
+		u32 instps;
+		u32 seqno;
+		u64 bbaddr;
+		u64 acthd;
+		u32 fault_reg;
+		u64 faddr;
+		u32 rc_psmi; /* sleep state */
+		u32 semaphore_mboxes[I915_NUM_RINGS - 1];
+
+		struct drm_i915_error_object {
+			int page_count;
+			u64 gtt_offset;
+			u32 *pages[0];
+		} *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page;
+
+		struct drm_i915_error_request {
+			long jiffies;
+			u32 seqno;
+			u32 tail;
+		} *requests;
+
+		struct {
+			u32 gfx_mode;
+			union {
+				u64 pdp[4];
+				u32 pp_dir_base;
+			};
+		} vm_info;
+
+		pid_t pid;
+		char comm[TASK_COMM_LEN];
+	} ring[I915_NUM_RINGS];
+
+	struct drm_i915_error_buffer {
+		u32 size;
+		u32 name;
+		u32 rseqno[I915_NUM_RINGS], wseqno;
+		u64 gtt_offset;
+		u32 read_domains;
+		u32 write_domain;
+		s32 fence_reg:I915_MAX_NUM_FENCE_BITS;
+		s32 pinned:2;
+		u32 tiling:2;
+		u32 dirty:1;
+		u32 purgeable:1;
+		u32 userptr:1;
+		s32 ring:4;
+		u32 cache_level:3;
+	} **active_bo, **pinned_bo;
+
+	u32 *active_bo_count, *pinned_bo_count;
+	u32 vm_count;
+};
+
+struct intel_connector;
+struct intel_encoder;
+struct intel_crtc_state;
+struct intel_initial_plane_config;
+struct intel_crtc;
+struct intel_limit;
+struct dpll;
+
+struct drm_i915_display_funcs {
+	int (*get_display_clock_speed)(struct drm_device *dev);
+	int (*get_fifo_size)(struct drm_device *dev, int plane);
+	/**
+	 * find_dpll() - Find the best values for the PLL
+	 * @limit: limits for the PLL
+	 * @crtc: current CRTC
+	 * @target: target frequency in kHz
+	 * @refclk: reference clock frequency in kHz
+	 * @match_clock: if provided, @best_clock P divider must
+	 *               match the P divider from @match_clock
+	 *               used for LVDS downclocking
+	 * @best_clock: best PLL values found
+	 *
+	 * Returns true on success, false on failure.
+	 */
+	bool (*find_dpll)(const struct intel_limit *limit,
+			  struct intel_crtc_state *crtc_state,
+			  int target, int refclk,
+			  struct dpll *match_clock,
+			  struct dpll *best_clock);
+	void (*update_wm)(struct drm_crtc *crtc);
+	void (*update_sprite_wm)(struct drm_plane *plane,
+				 struct drm_crtc *crtc,
+				 uint32_t sprite_width, uint32_t sprite_height,
+				 int pixel_size, bool enable, bool scaled);
+	int (*modeset_calc_cdclk)(struct drm_atomic_state *state);
+	void (*modeset_commit_cdclk)(struct drm_atomic_state *state);
+	/* Returns the active state of the crtc, and if the crtc is active,
+	 * fills out the pipe-config with the hw state. */
+	bool (*get_pipe_config)(struct intel_crtc *,
+				struct intel_crtc_state *);
+	void (*get_initial_plane_config)(struct intel_crtc *,
+					 struct intel_initial_plane_config *);
+	int (*crtc_compute_clock)(struct intel_crtc *crtc,
+				  struct intel_crtc_state *crtc_state);
+	void (*crtc_enable)(struct drm_crtc *crtc);
+	void (*crtc_disable)(struct drm_crtc *crtc);
+	void (*audio_codec_enable)(struct drm_connector *connector,
+				   struct intel_encoder *encoder,
+				   const struct drm_display_mode *adjusted_mode);
+	void (*audio_codec_disable)(struct intel_encoder *encoder);
+	void (*fdi_link_train)(struct drm_crtc *crtc);
+	void (*init_clock_gating)(struct drm_device *dev);
+	int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
+			  struct drm_framebuffer *fb,
+			  struct drm_i915_gem_object *obj,
+			  struct drm_i915_gem_request *req,
+			  uint32_t flags);
+	void (*update_primary_plane)(struct drm_crtc *crtc,
+				     struct drm_framebuffer *fb,
+				     int x, int y);
+	void (*hpd_irq_setup)(struct drm_device *dev);
+	/* clock updates for mode set */
+	/* cursor updates */
+	/* render clock increase/decrease */
+	/* display clock increase/decrease */
+	/* pll clock increase/decrease */
+};
+
+enum forcewake_domain_id {
+	FW_DOMAIN_ID_RENDER = 0,
+	FW_DOMAIN_ID_BLITTER,
+	FW_DOMAIN_ID_MEDIA,
+
+	FW_DOMAIN_ID_COUNT
+};
+
+enum forcewake_domains {
+	FORCEWAKE_RENDER = (1 << FW_DOMAIN_ID_RENDER),
+	FORCEWAKE_BLITTER = (1 << FW_DOMAIN_ID_BLITTER),
+	FORCEWAKE_MEDIA	= (1 << FW_DOMAIN_ID_MEDIA),
+	FORCEWAKE_ALL = (FORCEWAKE_RENDER |
+			 FORCEWAKE_BLITTER |
+			 FORCEWAKE_MEDIA)
+};
+
+struct intel_uncore_funcs {
+	void (*force_wake_get)(struct drm_i915_private *dev_priv,
+							enum forcewake_domains domains);
+	void (*force_wake_put)(struct drm_i915_private *dev_priv,
+							enum forcewake_domains domains);
+
+	uint8_t  (*mmio_readb)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
+	uint16_t (*mmio_readw)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
+	uint32_t (*mmio_readl)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
+	uint64_t (*mmio_readq)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
+
+	void (*mmio_writeb)(struct drm_i915_private *dev_priv, off_t offset,
+				uint8_t val, bool trace);
+	void (*mmio_writew)(struct drm_i915_private *dev_priv, off_t offset,
+				uint16_t val, bool trace);
+	void (*mmio_writel)(struct drm_i915_private *dev_priv, off_t offset,
+				uint32_t val, bool trace);
+	void (*mmio_writeq)(struct drm_i915_private *dev_priv, off_t offset,
+				uint64_t val, bool trace);
+};
+
+struct intel_uncore {
+	spinlock_t lock; /** lock is also taken in irq contexts. */
+
+	struct intel_uncore_funcs funcs;
+
+	unsigned fifo_count;
+	enum forcewake_domains fw_domains;
+
+	struct intel_uncore_forcewake_domain {
+		struct drm_i915_private *i915;
+		enum forcewake_domain_id id;
+		unsigned wake_count;
+		struct timer_list timer;
+		u32 reg_set;
+		u32 val_set;
+		u32 val_clear;
+		u32 reg_ack;
+		u32 reg_post;
+		u32 val_reset;
+	} fw_domain[FW_DOMAIN_ID_COUNT];
+};
+
+/* Iterate over initialised fw domains */
+#define for_each_fw_domain_mask(domain__, mask__, dev_priv__, i__) \
+	for ((i__) = 0, (domain__) = &(dev_priv__)->uncore.fw_domain[0]; \
+	     (i__) < FW_DOMAIN_ID_COUNT; \
+	     (i__)++, (domain__) = &(dev_priv__)->uncore.fw_domain[i__]) \
+		if (((mask__) & (dev_priv__)->uncore.fw_domains) & (1 << (i__)))
+
+#define for_each_fw_domain(domain__, dev_priv__, i__) \
+	for_each_fw_domain_mask(domain__, FORCEWAKE_ALL, dev_priv__, i__)
+
+enum csr_state {
+	FW_UNINITIALIZED = 0,
+	FW_LOADED,
+	FW_FAILED
+};
+
+struct intel_csr {
+	const char *fw_path;
+	uint32_t *dmc_payload;
+	uint32_t dmc_fw_size;
+	uint32_t mmio_count;
+	uint32_t mmioaddr[8];
+	uint32_t mmiodata[8];
+	enum csr_state state;
+};
+
+#define DEV_INFO_FOR_EACH_FLAG(func, sep) \
+	func(is_mobile) sep \
+	func(is_i85x) sep \
+	func(is_i915g) sep \
+	func(is_i945gm) sep \
+	func(is_g33) sep \
+	func(need_gfx_hws) sep \
+	func(is_g4x) sep \
+	func(is_pineview) sep \
+	func(is_broadwater) sep \
+	func(is_crestline) sep \
+	func(is_ivybridge) sep \
+	func(is_valleyview) sep \
+	func(is_haswell) sep \
+	func(is_skylake) sep \
+	func(is_preliminary) sep \
+	func(has_fbc) sep \
+	func(has_pipe_cxsr) sep \
+	func(has_hotplug) sep \
+	func(cursor_needs_physical) sep \
+	func(has_overlay) sep \
+	func(overlay_needs_physical) sep \
+	func(supports_tv) sep \
+	func(has_llc) sep \
+	func(has_ddi) sep \
+	func(has_fpga_dbg)
+
+#define DEFINE_FLAG(name) u8 name:1
+#define SEP_SEMICOLON ;
+
+struct intel_device_info {
+	u32 display_mmio_offset;
+	u16 device_id;
+	u8 num_pipes:3;
+	u8 num_sprites[I915_MAX_PIPES];
+	u8 gen;
+	u8 ring_mask; /* Rings supported by the HW */
+	DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON);
+	/* Register offsets for the various display pipes and transcoders */
+	int pipe_offsets[I915_MAX_TRANSCODERS];
+	int trans_offsets[I915_MAX_TRANSCODERS];
+	int palette_offsets[I915_MAX_PIPES];
+	int cursor_offsets[I915_MAX_PIPES];
+
+	/* Slice/subslice/EU info */
+	u8 slice_total;
+	u8 subslice_total;
+	u8 subslice_per_slice;
+	u8 eu_total;
+	u8 eu_per_subslice;
+	/* For each slice, which subslice(s) has(have) 7 EUs (bitfield)? */
+	u8 subslice_7eu[3];
+	u8 has_slice_pg:1;
+	u8 has_subslice_pg:1;
+	u8 has_eu_pg:1;
+};
+
+#undef DEFINE_FLAG
+#undef SEP_SEMICOLON
+
+enum i915_cache_level {
+	I915_CACHE_NONE = 0,
+	I915_CACHE_LLC, /* also used for snoopable memory on non-LLC */
+	I915_CACHE_L3_LLC, /* gen7+, L3 sits between the domain specifc
+			      caches, eg sampler/render caches, and the
+			      large Last-Level-Cache. LLC is coherent with
+			      the CPU, but L3 is only visible to the GPU. */
+	I915_CACHE_WT, /* hsw:gt3e WriteThrough for scanouts */
+};
+
+struct i915_ctx_hang_stats {
+	/* This context had batch pending when hang was declared */
+	unsigned batch_pending;
+
+	/* This context had batch active when hang was declared */
+	unsigned batch_active;
+
+	/* Time when this context was last blamed for a GPU reset */
+	unsigned long guilty_ts;
+
+	/* If the contexts causes a second GPU hang within this time,
+	 * it is permanently banned from submitting any more work.
+	 */
+	unsigned long ban_period_seconds;
+
+	/* This context is banned to submit more work */
+	bool banned;
+};
+
+/* This must match up with the value previously used for execbuf2.rsvd1. */
+#define DEFAULT_CONTEXT_HANDLE 0
+
+#define CONTEXT_NO_ZEROMAP (1<<0)
+/**
+ * struct intel_context - as the name implies, represents a context.
+ * @ref: reference count.
+ * @user_handle: userspace tracking identity for this context.
+ * @remap_slice: l3 row remapping information.
+ * @flags: context specific flags:
+ *         CONTEXT_NO_ZEROMAP: do not allow mapping things to page 0.
+ * @file_priv: filp associated with this context (NULL for global default
+ *	       context).
+ * @hang_stats: information about the role of this context in possible GPU
+ *		hangs.
+ * @ppgtt: virtual memory space used by this context.
+ * @legacy_hw_ctx: render context backing object and whether it is correctly
+ *                initialized (legacy ring submission mechanism only).
+ * @link: link in the global list of contexts.
+ *
+ * Contexts are memory images used by the hardware to store copies of their
+ * internal state.
+ */
+struct intel_context {
+	struct kref ref;
+	int user_handle;
+	uint8_t remap_slice;
+	struct drm_i915_private *i915;
+	int flags;
+	struct drm_i915_file_private *file_priv;
+	struct i915_ctx_hang_stats hang_stats;
+	struct i915_hw_ppgtt *ppgtt;
+
+	/* Legacy ring buffer submission */
+	struct {
+		struct drm_i915_gem_object *rcs_state;
+		bool initialized;
+	} legacy_hw_ctx;
+
+	/* Execlists */
+	struct {
+		struct drm_i915_gem_object *state;
+		struct intel_ringbuffer *ringbuf;
+		int pin_count;
+	} engine[I915_NUM_RINGS];
+
+	struct list_head link;
+};
+
+enum fb_op_origin {
+	ORIGIN_GTT,
+	ORIGIN_CPU,
+	ORIGIN_CS,
+	ORIGIN_FLIP,
+	ORIGIN_DIRTYFB,
+};
+
+struct i915_fbc {
+	/* This is always the inner lock when overlapping with struct_mutex and
+	 * it's the outer lock when overlapping with stolen_lock. */
+	struct mutex lock;
+	unsigned long uncompressed_size;
+	unsigned threshold;
+	unsigned int fb_id;
+	unsigned int possible_framebuffer_bits;
+	unsigned int busy_bits;
+	struct intel_crtc *crtc;
+	int y;
+
+	struct drm_mm_node compressed_fb;
+	struct drm_mm_node *compressed_llb;
+
+	bool false_color;
+
+	/* Tracks whether the HW is actually enabled, not whether the feature is
+	 * possible. */
+	bool enabled;
+
+	struct intel_fbc_work {
+		struct delayed_work work;
+		struct intel_crtc *crtc;
+		struct drm_framebuffer *fb;
+	} *fbc_work;
+
+	enum no_fbc_reason {
+		FBC_OK, /* FBC is enabled */
+		FBC_UNSUPPORTED, /* FBC is not supported by this chipset */
+		FBC_NO_OUTPUT, /* no outputs enabled to compress */
+		FBC_STOLEN_TOO_SMALL, /* not enough space for buffers */
+		FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */
+		FBC_MODE_TOO_LARGE, /* mode too large for compression */
+		FBC_BAD_PLANE, /* fbc not supported on plane */
+		FBC_NOT_TILED, /* buffer not tiled */
+		FBC_MULTIPLE_PIPES, /* more than one pipe active */
+		FBC_MODULE_PARAM,
+		FBC_CHIP_DEFAULT, /* disabled by default on this chip */
+		FBC_ROTATION, /* rotation is not supported */
+		FBC_IN_DBG_MASTER, /* kernel debugger is active */
+		FBC_BAD_STRIDE, /* stride is not supported */
+		FBC_PIXEL_RATE, /* pixel rate is too big */
+		FBC_PIXEL_FORMAT /* pixel format is invalid */
+	} no_fbc_reason;
+
+	bool (*fbc_enabled)(struct drm_i915_private *dev_priv);
+	void (*enable_fbc)(struct intel_crtc *crtc);
+	void (*disable_fbc)(struct drm_i915_private *dev_priv);
+};
+
+/**
+ * HIGH_RR is the highest eDP panel refresh rate read from EDID
+ * LOW_RR is the lowest eDP panel refresh rate found from EDID
+ * parsing for same resolution.
+ */
+enum drrs_refresh_rate_type {
+	DRRS_HIGH_RR,
+	DRRS_LOW_RR,
+	DRRS_MAX_RR, /* RR count */
+};
+
+enum drrs_support_type {
+	DRRS_NOT_SUPPORTED = 0,
+	STATIC_DRRS_SUPPORT = 1,
+	SEAMLESS_DRRS_SUPPORT = 2
+};
+
+struct intel_dp;
+struct i915_drrs {
+	struct mutex mutex;
+	struct delayed_work work;
+	struct intel_dp *dp;
+	unsigned busy_frontbuffer_bits;
+	enum drrs_refresh_rate_type refresh_rate_type;
+	enum drrs_support_type type;
+};
+
+struct i915_psr {
+	struct mutex lock;
+	bool sink_support;
+	bool source_ok;
+	struct intel_dp *enabled;
+	bool active;
+	struct delayed_work work;
+	unsigned busy_frontbuffer_bits;
+	bool psr2_support;
+	bool aux_frame_sync;
+};
+
+enum intel_pch {
+	PCH_NONE = 0,	/* No PCH present */
+	PCH_IBX,	/* Ibexpeak PCH */
+	PCH_CPT,	/* Cougarpoint PCH */
+	PCH_LPT,	/* Lynxpoint PCH */
+	PCH_SPT,        /* Sunrisepoint PCH */
+	PCH_NOP,
+};
+
+enum intel_sbi_destination {
+	SBI_ICLK,
+	SBI_MPHY,
+};
+
+#define QUIRK_PIPEA_FORCE (1<<0)
+#define QUIRK_LVDS_SSC_DISABLE (1<<1)
+#define QUIRK_INVERT_BRIGHTNESS (1<<2)
+#define QUIRK_BACKLIGHT_PRESENT (1<<3)
+#define QUIRK_PIPEB_FORCE (1<<4)
+#define QUIRK_PIN_SWIZZLED_PAGES (1<<5)
+
+struct intel_fbdev;
+struct intel_fbc_work;
+
+struct intel_gmbus {
+	struct i2c_adapter adapter;
+	u32 force_bit;
+	u32 reg0;
+	u32 gpio_reg;
+	struct i2c_algo_bit_data bit_algo;
+	struct drm_i915_private *dev_priv;
+};
+
+struct i915_suspend_saved_registers {
+	u32 saveDSPARB;
+	u32 saveLVDS;
+	u32 savePP_ON_DELAYS;
+	u32 savePP_OFF_DELAYS;
+	u32 savePP_ON;
+	u32 savePP_OFF;
+	u32 savePP_CONTROL;
+	u32 savePP_DIVISOR;
+	u32 saveFBC_CONTROL;
+	u32 saveCACHE_MODE_0;
+	u32 saveMI_ARB_STATE;
+	u32 saveSWF0[16];
+	u32 saveSWF1[16];
+	u32 saveSWF3[3];
+	uint64_t saveFENCE[I915_MAX_NUM_FENCES];
+	u32 savePCH_PORT_HOTPLUG;
+	u16 saveGCDGMBUS;
+};
+
+struct vlv_s0ix_state {
+	/* GAM */
+	u32 wr_watermark;
+	u32 gfx_prio_ctrl;
+	u32 arb_mode;
+	u32 gfx_pend_tlb0;
+	u32 gfx_pend_tlb1;
+	u32 lra_limits[GEN7_LRA_LIMITS_REG_NUM];
+	u32 media_max_req_count;
+	u32 gfx_max_req_count;
+	u32 render_hwsp;
+	u32 ecochk;
+	u32 bsd_hwsp;
+	u32 blt_hwsp;
+	u32 tlb_rd_addr;
+
+	/* MBC */
+	u32 g3dctl;
+	u32 gsckgctl;
+	u32 mbctl;
+
+	/* GCP */
+	u32 ucgctl1;
+	u32 ucgctl3;
+	u32 rcgctl1;
+	u32 rcgctl2;
+	u32 rstctl;
+	u32 misccpctl;
+
+	/* GPM */
+	u32 gfxpause;
+	u32 rpdeuhwtc;
+	u32 rpdeuc;
+	u32 ecobus;
+	u32 pwrdwnupctl;
+	u32 rp_down_timeout;
+	u32 rp_deucsw;
+	u32 rcubmabdtmr;
+	u32 rcedata;
+	u32 spare2gh;
+
+	/* Display 1 CZ domain */
+	u32 gt_imr;
+	u32 gt_ier;
+	u32 pm_imr;
+	u32 pm_ier;
+	u32 gt_scratch[GEN7_GT_SCRATCH_REG_NUM];
+
+	/* GT SA CZ domain */
+	u32 tilectl;
+	u32 gt_fifoctl;
+	u32 gtlc_wake_ctrl;
+	u32 gtlc_survive;
+	u32 pmwgicz;
+
+	/* Display 2 CZ domain */
+	u32 gu_ctl0;
+	u32 gu_ctl1;
+	u32 pcbr;
+	u32 clock_gate_dis2;
+};
+
+struct intel_rps_ei {
+	u32 cz_clock;
+	u32 render_c0;
+	u32 media_c0;
+};
+
+struct intel_gen6_power_mgmt {
+	/*
+	 * work, interrupts_enabled and pm_iir are protected by
+	 * dev_priv->irq_lock
+	 */
+	struct work_struct work;
+	bool interrupts_enabled;
+	u32 pm_iir;
+
+	/* Frequencies are stored in potentially platform dependent multiples.
+	 * In other words, *_freq needs to be multiplied by X to be interesting.
+	 * Soft limits are those which are used for the dynamic reclocking done
+	 * by the driver (raise frequencies under heavy loads, and lower for
+	 * lighter loads). Hard limits are those imposed by the hardware.
+	 *
+	 * A distinction is made for overclocking, which is never enabled by
+	 * default, and is considered to be above the hard limit if it's
+	 * possible at all.
+	 */
+	u8 cur_freq;		/* Current frequency (cached, may not == HW) */
+	u8 min_freq_softlimit;	/* Minimum frequency permitted by the driver */
+	u8 max_freq_softlimit;	/* Max frequency permitted by the driver */
+	u8 max_freq;		/* Maximum frequency, RP0 if not overclocking */
+	u8 min_freq;		/* AKA RPn. Minimum frequency */
+	u8 idle_freq;		/* Frequency to request when we are idle */
+	u8 efficient_freq;	/* AKA RPe. Pre-determined balanced frequency */
+	u8 rp1_freq;		/* "less than" RP0 power/freqency */
+	u8 rp0_freq;		/* Non-overclocked max frequency. */
+
+	u8 up_threshold; /* Current %busy required to uplock */
+	u8 down_threshold; /* Current %busy required to downclock */
+
+	int last_adj;
+	enum { LOW_POWER, BETWEEN, HIGH_POWER } power;
+
+	spinlock_t client_lock;
+	struct list_head clients;
+	bool client_boost;
+
+	bool enabled;
+	struct delayed_work delayed_resume_work;
+	unsigned boosts;
+
+	struct intel_rps_client semaphores, mmioflips;
+
+	/* manual wa residency calculations */
+	struct intel_rps_ei ei;
+
+	/*
+	 * Protects RPS/RC6 register access and PCU communication.
+	 * Must be taken after struct_mutex if nested. Note that
+	 * this lock may be held for long periods of time when
+	 * talking to hw - so only take it when talking to hw!
+	 */
+	struct mutex hw_lock;
+};
+
+/* defined intel_pm.c */
+extern spinlock_t mchdev_lock;
+
+struct intel_ilk_power_mgmt {
+	u8 cur_delay;
+	u8 min_delay;
+	u8 max_delay;
+	u8 fmax;
+	u8 fstart;
+
+	u64 last_count1;
+	unsigned long last_time1;
+	unsigned long chipset_power;
+	u64 last_count2;
+	u64 last_time2;
+	unsigned long gfx_power;
+	u8 corr;
+
+	int c_m;
+	int r_t;
+};
+
+struct drm_i915_private;
+struct i915_power_well;
+
+struct i915_power_well_ops {
+	/*
+	 * Synchronize the well's hw state to match the current sw state, for
+	 * example enable/disable it based on the current refcount. Called
+	 * during driver init and resume time, possibly after first calling
+	 * the enable/disable handlers.
+	 */
+	void (*sync_hw)(struct drm_i915_private *dev_priv,
+			struct i915_power_well *power_well);
+	/*
+	 * Enable the well and resources that depend on it (for example
+	 * interrupts located on the well). Called after the 0->1 refcount
+	 * transition.
+	 */
+	void (*enable)(struct drm_i915_private *dev_priv,
+		       struct i915_power_well *power_well);
+	/*
+	 * Disable the well and resources that depend on it. Called after
+	 * the 1->0 refcount transition.
+	 */
+	void (*disable)(struct drm_i915_private *dev_priv,
+			struct i915_power_well *power_well);
+	/* Returns the hw enabled state. */
+	bool (*is_enabled)(struct drm_i915_private *dev_priv,
+			   struct i915_power_well *power_well);
+};
+
+/* Power well structure for haswell */
+struct i915_power_well {
+	const char *name;
+	bool always_on;
+	/* power well enable/disable usage count */
+	int count;
+	/* cached hw enabled state */
+	bool hw_enabled;
+	unsigned long domains;
+	unsigned long data;
+	const struct i915_power_well_ops *ops;
+};
+
+struct i915_power_domains {
+	/*
+	 * Power wells needed for initialization at driver init and suspend
+	 * time are on. They are kept on until after the first modeset.
+	 */
+	bool init_power_on;
+	bool initializing;
+	int power_well_count;
+
+	struct mutex lock;
+	int domain_use_count[POWER_DOMAIN_NUM];
+	struct i915_power_well *power_wells;
+};
+
+#define MAX_L3_SLICES 2
+struct intel_l3_parity {
+	u32 *remap_info[MAX_L3_SLICES];
+	struct work_struct error_work;
+	int which_slice;
+};
+
+struct i915_gem_mm {
+	/** Memory allocator for GTT stolen memory */
+	struct drm_mm stolen;
+	/** Protects the usage of the GTT stolen memory allocator. This is
+	 * always the inner lock when overlapping with struct_mutex. */
+	struct mutex stolen_lock;
+
+	/** List of all objects in gtt_space. Used to restore gtt
+	 * mappings on resume */
+	struct list_head bound_list;
+	/**
+	 * List of objects which are not bound to the GTT (thus
+	 * are idle and not used by the GPU) but still have
+	 * (presumably uncached) pages still attached.
+	 */
+	struct list_head unbound_list;
+
+	/** Usable portion of the GTT for GEM */
+	unsigned long stolen_base; /* limited to low memory (32-bit) */
+
+	/** PPGTT used for aliasing the PPGTT with the GTT */
+	struct i915_hw_ppgtt *aliasing_ppgtt;
+
+	struct notifier_block oom_notifier;
+	struct shrinker shrinker;
+	bool shrinker_no_lock_stealing;
+
+	/** LRU list of objects with fence regs on them. */
+	struct list_head fence_list;
+
+	/**
+	 * We leave the user IRQ off as much as possible,
+	 * but this means that requests will finish and never
+	 * be retired once the system goes idle. Set a timer to
+	 * fire periodically while the ring is running. When it
+	 * fires, go retire requests.
+	 */
+	struct delayed_work retire_work;
+
+	/**
+	 * When we detect an idle GPU, we want to turn on
+	 * powersaving features. So once we see that there
+	 * are no more requests outstanding and no more
+	 * arrive within a small period of time, we fire
+	 * off the idle_work.
+	 */
+	struct delayed_work idle_work;
+
+	/**
+	 * Are we in a non-interruptible section of code like
+	 * modesetting?
+	 */
+	bool interruptible;
+
+	/**
+	 * Is the GPU currently considered idle, or busy executing userspace
+	 * requests?  Whilst idle, we attempt to power down the hardware and
+	 * display clocks. In order to reduce the effect on performance, there
+	 * is a slight delay before we do so.
+	 */
+	bool busy;
+
+	/* the indicator for dispatch video commands on two BSD rings */
+	int bsd_ring_dispatch_index;
+
+	/** Bit 6 swizzling required for X tiling */
+	uint32_t bit_6_swizzle_x;
+	/** Bit 6 swizzling required for Y tiling */
+	uint32_t bit_6_swizzle_y;
+
+	/* accounting, useful for userland debugging */
+	spinlock_t object_stat_lock;
+	size_t object_memory;
+	u32 object_count;
+};
+
+struct drm_i915_error_state_buf {
+	struct drm_i915_private *i915;
+	unsigned bytes;
+	unsigned size;
+	int err;
+	u8 *buf;
+	loff_t start;
+	loff_t pos;
+};
+
+struct i915_error_state_file_priv {
+	struct drm_device *dev;
+	struct drm_i915_error_state *error;
+};
+
+struct i915_gpu_error {
+	/* For hangcheck timer */
+#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
+#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
+	/* Hang gpu twice in this window and your context gets banned */
+#define DRM_I915_CTX_BAN_PERIOD DIV_ROUND_UP(8*DRM_I915_HANGCHECK_PERIOD, 1000)
+
+	struct workqueue_struct *hangcheck_wq;
+	struct delayed_work hangcheck_work;
+
+	/* For reset and error_state handling. */
+	spinlock_t lock;
+	/* Protected by the above dev->gpu_error.lock. */
+	struct drm_i915_error_state *first_error;
+
+	unsigned long missed_irq_rings;
+
+	/**
+	 * State variable controlling the reset flow and count
+	 *
+	 * This is a counter which gets incremented when reset is triggered,
+	 * and again when reset has been handled. So odd values (lowest bit set)
+	 * means that reset is in progress and even values that
+	 * (reset_counter >> 1):th reset was successfully completed.
+	 *
+	 * If reset is not completed succesfully, the I915_WEDGE bit is
+	 * set meaning that hardware is terminally sour and there is no
+	 * recovery. All waiters on the reset_queue will be woken when
+	 * that happens.
+	 *
+	 * This counter is used by the wait_seqno code to notice that reset
+	 * event happened and it needs to restart the entire ioctl (since most
+	 * likely the seqno it waited for won't ever signal anytime soon).
+	 *
+	 * This is important for lock-free wait paths, where no contended lock
+	 * naturally enforces the correct ordering between the bail-out of the
+	 * waiter and the gpu reset work code.
+	 */
+	atomic_t reset_counter;
+
+#define I915_RESET_IN_PROGRESS_FLAG	1
+#define I915_WEDGED			(1 << 31)
+
+	/**
+	 * Waitqueue to signal when the reset has completed. Used by clients
+	 * that wait for dev_priv->mm.wedged to settle.
+	 */
+	wait_queue_head_t reset_queue;
+
+	/* Userspace knobs for gpu hang simulation;
+	 * combines both a ring mask, and extra flags
+	 */
+	u32 stop_rings;
+#define I915_STOP_RING_ALLOW_BAN       (1 << 31)
+#define I915_STOP_RING_ALLOW_WARN      (1 << 30)
+
+	/* For missed irq/seqno simulation. */
+	unsigned int test_irq_rings;
+
+	/* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset   */
+	bool reload_in_reset;
+};
+
+enum modeset_restore {
+	MODESET_ON_LID_OPEN,
+	MODESET_DONE,
+	MODESET_SUSPENDED,
+};
+
+#define DP_AUX_A 0x40
+#define DP_AUX_B 0x10
+#define DP_AUX_C 0x20
+#define DP_AUX_D 0x30
+
+#define DDC_PIN_B  0x05
+#define DDC_PIN_C  0x04
+#define DDC_PIN_D  0x06
+
+struct ddi_vbt_port_info {
+	/*
+	 * This is an index in the HDMI/DVI DDI buffer translation table.
+	 * The special value HDMI_LEVEL_SHIFT_UNKNOWN means the VBT didn't
+	 * populate this field.
+	 */
+#define HDMI_LEVEL_SHIFT_UNKNOWN	0xff
+	uint8_t hdmi_level_shift;
+
+	uint8_t supports_dvi:1;
+	uint8_t supports_hdmi:1;
+	uint8_t supports_dp:1;
+
+	uint8_t alternate_aux_channel;
+	uint8_t alternate_ddc_pin;
+
+	uint8_t dp_boost_level;
+	uint8_t hdmi_boost_level;
+};
+
+enum psr_lines_to_wait {
+	PSR_0_LINES_TO_WAIT = 0,
+	PSR_1_LINE_TO_WAIT,
+	PSR_4_LINES_TO_WAIT,
+	PSR_8_LINES_TO_WAIT
+};
+
+struct intel_vbt_data {
+	struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
+	struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
+
+	/* Feature bits */
+	unsigned int int_tv_support:1;
+	unsigned int lvds_dither:1;
+	unsigned int lvds_vbt:1;
+	unsigned int int_crt_support:1;
+	unsigned int lvds_use_ssc:1;
+	unsigned int display_clock_mode:1;
+	unsigned int fdi_rx_polarity_inverted:1;
+	unsigned int has_mipi:1;
+	int lvds_ssc_freq;
+	unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
+
+	enum drrs_support_type drrs_type;
+
+	/* eDP */
+	int edp_rate;
+	int edp_lanes;
+	int edp_preemphasis;
+	int edp_vswing;
+	bool edp_initialized;
+	bool edp_support;
+	int edp_bpp;
+	struct edp_power_seq edp_pps;
+
+	struct {
+		bool full_link;
+		bool require_aux_wakeup;
+		int idle_frames;
+		enum psr_lines_to_wait lines_to_wait;
+		int tp1_wakeup_time;
+		int tp2_tp3_wakeup_time;
+	} psr;
+
+	struct {
+		u16 pwm_freq_hz;
+		bool present;
+		bool active_low_pwm;
+		u8 min_brightness;	/* min_brightness/255 of max */
+	} backlight;
+
+	/* MIPI DSI */
+	struct {
+		u16 port;
+		u16 panel_id;
+		struct mipi_config *config;
+		struct mipi_pps_data *pps;
+		u8 seq_version;
+		u32 size;
+		u8 *data;
+		u8 *sequence[MIPI_SEQ_MAX];
+	} dsi;
+
+	int crt_ddc_pin;
+
+	int child_dev_num;
+	union child_device_config *child_dev;
+
+	struct ddi_vbt_port_info ddi_port_info[I915_MAX_PORTS];
+};
+
+enum intel_ddb_partitioning {
+	INTEL_DDB_PART_1_2,
+	INTEL_DDB_PART_5_6, /* IVB+ */
+};
+
+struct intel_wm_level {
+	bool enable;
+	uint32_t pri_val;
+	uint32_t spr_val;
+	uint32_t cur_val;
+	uint32_t fbc_val;
+};
+
+struct ilk_wm_values {
+	uint32_t wm_pipe[3];
+	uint32_t wm_lp[3];
+	uint32_t wm_lp_spr[3];
+	uint32_t wm_linetime[3];
+	bool enable_fbc_wm;
+	enum intel_ddb_partitioning partitioning;
+};
+
+struct vlv_pipe_wm {
+	uint16_t primary;
+	uint16_t sprite[2];
+	uint8_t cursor;
+};
+
+struct vlv_sr_wm {
+	uint16_t plane;
+	uint8_t cursor;
+};
+
+struct vlv_wm_values {
+	struct vlv_pipe_wm pipe[3];
+	struct vlv_sr_wm sr;
+	struct {
+		uint8_t cursor;
+		uint8_t sprite[2];
+		uint8_t primary;
+	} ddl[3];
+	uint8_t level;
+	bool cxsr;
+};
+
+struct skl_ddb_entry {
+	uint16_t start, end;	/* in number of blocks, 'end' is exclusive */
+};
+
+static inline uint16_t skl_ddb_entry_size(const struct skl_ddb_entry *entry)
+{
+	return entry->end - entry->start;
+}
+
+static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1,
+				       const struct skl_ddb_entry *e2)
+{
+	if (e1->start == e2->start && e1->end == e2->end)
+		return true;
+
+	return false;
+}
+
+struct skl_ddb_allocation {
+	struct skl_ddb_entry pipe[I915_MAX_PIPES];
+	struct skl_ddb_entry plane[I915_MAX_PIPES][I915_MAX_PLANES]; /* packed/uv */
+	struct skl_ddb_entry y_plane[I915_MAX_PIPES][I915_MAX_PLANES];
+};
+
+struct skl_wm_values {
+	bool dirty[I915_MAX_PIPES];
+	struct skl_ddb_allocation ddb;
+	uint32_t wm_linetime[I915_MAX_PIPES];
+	uint32_t plane[I915_MAX_PIPES][I915_MAX_PLANES][8];
+	uint32_t plane_trans[I915_MAX_PIPES][I915_MAX_PLANES];
+};
+
+struct skl_wm_level {
+	bool plane_en[I915_MAX_PLANES];
+	uint16_t plane_res_b[I915_MAX_PLANES];
+	uint8_t plane_res_l[I915_MAX_PLANES];
+};
+
+/*
+ * This struct helps tracking the state needed for runtime PM, which puts the
+ * device in PCI D3 state. Notice that when this happens, nothing on the
+ * graphics device works, even register access, so we don't get interrupts nor
+ * anything else.
+ *
+ * Every piece of our code that needs to actually touch the hardware needs to
+ * either call intel_runtime_pm_get or call intel_display_power_get with the
+ * appropriate power domain.
+ *
+ * Our driver uses the autosuspend delay feature, which means we'll only really
+ * suspend if we stay with zero refcount for a certain amount of time. The
+ * default value is currently very conservative (see intel_runtime_pm_enable), but
+ * it can be changed with the standard runtime PM files from sysfs.
+ *
+ * The irqs_disabled variable becomes true exactly after we disable the IRQs and
+ * goes back to false exactly before we reenable the IRQs. We use this variable
+ * to check if someone is trying to enable/disable IRQs while they're supposed
+ * to be disabled. This shouldn't happen and we'll print some error messages in
+ * case it happens.
+ *
+ * For more, read the Documentation/power/runtime_pm.txt.
+ */
+struct i915_runtime_pm {
+	bool suspended;
+	bool irqs_enabled;
+};
+
+enum intel_pipe_crc_source {
+	INTEL_PIPE_CRC_SOURCE_NONE,
+	INTEL_PIPE_CRC_SOURCE_PLANE1,
+	INTEL_PIPE_CRC_SOURCE_PLANE2,
+	INTEL_PIPE_CRC_SOURCE_PF,
+	INTEL_PIPE_CRC_SOURCE_PIPE,
+	/* TV/DP on pre-gen5/vlv can't use the pipe source. */
+	INTEL_PIPE_CRC_SOURCE_TV,
+	INTEL_PIPE_CRC_SOURCE_DP_B,
+	INTEL_PIPE_CRC_SOURCE_DP_C,
+	INTEL_PIPE_CRC_SOURCE_DP_D,
+	INTEL_PIPE_CRC_SOURCE_AUTO,
+	INTEL_PIPE_CRC_SOURCE_MAX,
+};
+
+struct intel_pipe_crc_entry {
+	uint32_t frame;
+	uint32_t crc[5];
+};
+
+#define INTEL_PIPE_CRC_ENTRIES_NR	128
+struct intel_pipe_crc {
+	spinlock_t lock;
+	bool opened;		/* exclusive access to the result file */
+	struct intel_pipe_crc_entry *entries;
+	enum intel_pipe_crc_source source;
+	int head, tail;
+	wait_queue_head_t wq;
+};
+
+struct i915_frontbuffer_tracking {
+	struct mutex lock;
+
+	/*
+	 * Tracking bits for delayed frontbuffer flushing du to gpu activity or
+	 * scheduled flips.
+	 */
+	unsigned busy_bits;
+	unsigned flip_bits;
+};
+
+struct i915_wa_reg {
+	u32 addr;
+	u32 value;
+	/* bitmask representing WA bits */
+	u32 mask;
+};
+
+#define I915_MAX_WA_REGS 16
+
+struct i915_workarounds {
+	struct i915_wa_reg reg[I915_MAX_WA_REGS];
+	u32 count;
+};
+
+struct i915_virtual_gpu {
+	bool active;
+};
+
+struct i915_execbuffer_params {
+	struct drm_device               *dev;
+	struct drm_file                 *file;
+	uint32_t                        dispatch_flags;
+	uint32_t                        args_batch_start_offset;
+	uint64_t                        batch_obj_vm_offset;
+	struct intel_engine_cs          *ring;
+	struct drm_i915_gem_object      *batch_obj;
+	struct intel_context            *ctx;
+	struct drm_i915_gem_request     *request;
+};
+
+struct drm_i915_private {
+	struct drm_device *dev;
+	struct kmem_cache *objects;
+	struct kmem_cache *vmas;
+	struct kmem_cache *requests;
+
+	const struct intel_device_info info;
+
+	int relative_constants_mode;
+
+	void __iomem *regs;
+
+	struct intel_uncore uncore;
+
+	struct i915_virtual_gpu vgpu;
+
+	struct intel_guc guc;
+
+	struct intel_csr csr;
+
+	/* Display CSR-related protection */
+	struct mutex csr_lock;
+
+	struct intel_gmbus gmbus[GMBUS_NUM_PINS];
+
+	/** gmbus_mutex protects against concurrent usage of the single hw gmbus
+	 * controller on different i2c buses. */
+	struct mutex gmbus_mutex;
+
+	/**
+	 * Base address of the gmbus and gpio block.
+	 */
+	uint32_t gpio_mmio_base;
+
+	/* MMIO base address for MIPI regs */
+	uint32_t mipi_mmio_base;
+
+	wait_queue_head_t gmbus_wait_queue;
+
+	struct pci_dev *bridge_dev;
+	struct intel_engine_cs ring[I915_NUM_RINGS];
+	struct drm_i915_gem_object *semaphore_obj;
+	uint32_t last_seqno, next_seqno;
+
+	struct drm_dma_handle *status_page_dmah;
+	struct resource mch_res;
+
+	/* protects the irq masks */
+	spinlock_t irq_lock;
+
+	/* protects the mmio flip data */
+	spinlock_t mmio_flip_lock;
+
+	bool display_irqs_enabled;
+
+	/* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
+	struct pm_qos_request pm_qos;
+
+	/* Sideband mailbox protection */
+	struct mutex sb_lock;
+
+	/** Cached value of IMR to avoid reads in updating the bitfield */
+	union {
+		u32 irq_mask;
+		u32 de_irq_mask[I915_MAX_PIPES];
+	};
+	u32 gt_irq_mask;
+	u32 pm_irq_mask;
+	u32 pm_rps_events;
+	u32 pipestat_irq_mask[I915_MAX_PIPES];
+
+	struct i915_hotplug hotplug;
+	struct i915_fbc fbc;
+	struct i915_drrs drrs;
+	struct intel_opregion opregion;
+	struct intel_vbt_data vbt;
+
+	bool preserve_bios_swizzle;
+
+	/* overlay */
+	struct intel_overlay *overlay;
+
+	/* backlight registers and fields in struct intel_panel */
+	struct mutex backlight_lock;
+
+	/* LVDS info */
+	bool no_aux_handshake;
+
+	/* protects panel power sequencer state */
+	struct mutex pps_mutex;
+
+	struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
+	int num_fence_regs; /* 8 on pre-965, 16 otherwise */
+
+	unsigned int fsb_freq, mem_freq, is_ddr3;
+	unsigned int skl_boot_cdclk;
+	unsigned int cdclk_freq, max_cdclk_freq;
+	unsigned int max_dotclk_freq;
+	unsigned int hpll_freq;
+	unsigned int czclk_freq;
+
+	/**
+	 * wq - Driver workqueue for GEM.
+	 *
+	 * NOTE: Work items scheduled here are not allowed to grab any modeset
+	 * locks, for otherwise the flushing done in the pageflip code will
+	 * result in deadlocks.
+	 */
+	struct workqueue_struct *wq;
+
+	/* Display functions */
+	struct drm_i915_display_funcs display;
+
+	/* PCH chipset type */
+	enum intel_pch pch_type;
+	unsigned short pch_id;
+
+	unsigned long quirks;
+
+	enum modeset_restore modeset_restore;
+	struct mutex modeset_restore_lock;
+
+	struct list_head vm_list; /* Global list of all address spaces */
+	struct i915_gtt gtt; /* VM representing the global address space */
+
+	struct i915_gem_mm mm;
+	DECLARE_HASHTABLE(mm_structs, 7);
+	struct mutex mm_lock;
+
+	/* Kernel Modesetting */
+
+	struct sdvo_device_mapping sdvo_mappings[2];
+
+	struct drm_crtc *plane_to_crtc_mapping[I915_MAX_PIPES];
+	struct drm_crtc *pipe_to_crtc_mapping[I915_MAX_PIPES];
+	wait_queue_head_t pending_flip_queue;
+
+#ifdef CONFIG_DEBUG_FS
+	struct intel_pipe_crc pipe_crc[I915_MAX_PIPES];
+#endif
+
+	int num_shared_dpll;
+	struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
+	int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
+
+	struct i915_workarounds workarounds;
+
+	/* Reclocking support */
+	bool render_reclock_avail;
+
+	struct i915_frontbuffer_tracking fb_tracking;
+
+	u16 orig_clock;
+
+	bool mchbar_need_disable;
+
+	struct intel_l3_parity l3_parity;
+
+	/* Cannot be determined by PCIID. You must always read a register. */
+	size_t ellc_size;
+
+	/* gen6+ rps state */
+	struct intel_gen6_power_mgmt rps;
+
+	/* ilk-only ips/rps state. Everything in here is protected by the global
+	 * mchdev_lock in intel_pm.c */
+	struct intel_ilk_power_mgmt ips;
+
+	struct i915_power_domains power_domains;
+
+	struct i915_psr psr;
+
+	struct i915_gpu_error gpu_error;
+
+	struct drm_i915_gem_object *vlv_pctx;
+
+#ifdef CONFIG_DRM_FBDEV_EMULATION
+	/* list of fbdev register on this device */
+	struct intel_fbdev *fbdev;
+	struct work_struct fbdev_suspend_work;
+#endif
+
+	struct drm_property *broadcast_rgb_property;
+	struct drm_property *force_audio_property;
+
+	/* hda/i915 audio component */
+	struct i915_audio_component *audio_component;
+	bool audio_component_registered;
+	/**
+	 * av_mutex - mutex for audio/video sync
+	 *
+	 */
+	struct mutex av_mutex;
+
+	uint32_t hw_context_size;
+	struct list_head context_list;
+
+	u32 fdi_rx_config;
+
+	u32 chv_phy_control;
+
+	u32 suspend_count;
+	struct i915_suspend_saved_registers regfile;
+	struct vlv_s0ix_state vlv_s0ix_state;
+
+	struct {
+		/*
+		 * Raw watermark latency values:
+		 * in 0.1us units for WM0,
+		 * in 0.5us units for WM1+.
+		 */
+		/* primary */
+		uint16_t pri_latency[5];
+		/* sprite */
+		uint16_t spr_latency[5];
+		/* cursor */
+		uint16_t cur_latency[5];
+		/*
+		 * Raw watermark memory latency values
+		 * for SKL for all 8 levels
+		 * in 1us units.
+		 */
+		uint16_t skl_latency[8];
+
+		/*
+		 * The skl_wm_values structure is a bit too big for stack
+		 * allocation, so we keep the staging struct where we store
+		 * intermediate results here instead.
+		 */
+		struct skl_wm_values skl_results;
+
+		/* current hardware state */
+		union {
+			struct ilk_wm_values hw;
+			struct skl_wm_values skl_hw;
+			struct vlv_wm_values vlv;
+		};
+
+		uint8_t max_level;
+	} wm;
+
+	struct i915_runtime_pm pm;
+
+	/* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
+	struct {
+		int (*execbuf_submit)(struct i915_execbuffer_params *params,
+				      struct drm_i915_gem_execbuffer2 *args,
+				      struct list_head *vmas);
+		int (*init_rings)(struct drm_device *dev);
+		void (*cleanup_ring)(struct intel_engine_cs *ring);
+		void (*stop_ring)(struct intel_engine_cs *ring);
+	} gt;
+
+	bool edp_low_vswing;
+
+	/* perform PHY state sanity checks? */
+	bool chv_phy_assert[2];
+
+	/*
+	 * NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch
+	 * will be rejected. Instead look for a better place.
+	 */
+};
+
+static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
+{
+	return dev->dev_private;
+}
+
+static inline struct drm_i915_private *dev_to_i915(struct device *dev)
+{
+	return to_i915(dev_get_drvdata(dev));
+}
+
+static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc)
+{
+	return container_of(guc, struct drm_i915_private, guc);
+}
+
+/* Iterate over initialised rings */
+#define for_each_ring(ring__, dev_priv__, i__) \
+	for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \
+		if (((ring__) = &(dev_priv__)->ring[(i__)]), intel_ring_initialized((ring__)))
+
+enum hdmi_force_audio {
+	HDMI_AUDIO_OFF_DVI = -2,	/* no aux data for HDMI-DVI converter */
+	HDMI_AUDIO_OFF,			/* force turn off HDMI audio */
+	HDMI_AUDIO_AUTO,		/* trust EDID */
+	HDMI_AUDIO_ON,			/* force turn on HDMI audio */
+};
+
+#define I915_GTT_OFFSET_NONE ((u32)-1)
+
+struct drm_i915_gem_object_ops {
+	/* Interface between the GEM object and its backing storage.
+	 * get_pages() is called once prior to the use of the associated set
+	 * of pages before to binding them into the GTT, and put_pages() is
+	 * called after we no longer need them. As we expect there to be
+	 * associated cost with migrating pages between the backing storage
+	 * and making them available for the GPU (e.g. clflush), we may hold
+	 * onto the pages after they are no longer referenced by the GPU
+	 * in case they may be used again shortly (for example migrating the
+	 * pages to a different memory domain within the GTT). put_pages()
+	 * will therefore most likely be called when the object itself is
+	 * being released or under memory pressure (where we attempt to
+	 * reap pages for the shrinker).
+	 */
+	int (*get_pages)(struct drm_i915_gem_object *);
+	void (*put_pages)(struct drm_i915_gem_object *);
+	int (*dmabuf_export)(struct drm_i915_gem_object *);
+	void (*release)(struct drm_i915_gem_object *);
+};
+
+/*
+ * Frontbuffer tracking bits. Set in obj->frontbuffer_bits while a gem bo is
+ * considered to be the frontbuffer for the given plane interface-wise. This
+ * doesn't mean that the hw necessarily already scans it out, but that any
+ * rendering (by the cpu or gpu) will land in the frontbuffer eventually.
+ *
+ * We have one bit per pipe and per scanout plane type.
+ */
+#define INTEL_MAX_SPRITE_BITS_PER_PIPE 5
+#define INTEL_FRONTBUFFER_BITS_PER_PIPE 8
+#define INTEL_FRONTBUFFER_BITS \
+	(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES)
+#define INTEL_FRONTBUFFER_PRIMARY(pipe) \
+	(1 << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))
+#define INTEL_FRONTBUFFER_CURSOR(pipe) \
+	(1 << (1 + (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))))
+#define INTEL_FRONTBUFFER_SPRITE(pipe, plane) \
+	(1 << (2 + plane + (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))))
+#define INTEL_FRONTBUFFER_OVERLAY(pipe) \
+	(1 << (2 + INTEL_MAX_SPRITE_BITS_PER_PIPE + (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))))
+#define INTEL_FRONTBUFFER_ALL_MASK(pipe) \
+	(0xff << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))
+
+struct drm_i915_gem_object {
+	struct drm_gem_object base;
+
+	const struct drm_i915_gem_object_ops *ops;
+
+	/** List of VMAs backed by this object */
+	struct list_head vma_list;
+
+	/** Stolen memory for this object, instead of being backed by shmem. */
+	struct drm_mm_node *stolen;
+	struct list_head global_list;
+
+	struct list_head ring_list[I915_NUM_RINGS];
+	/** Used in execbuf to temporarily hold a ref */
+	struct list_head obj_exec_link;
+
+	struct list_head batch_pool_link;
+
+	/**
+	 * This is set if the object is on the active lists (has pending
+	 * rendering and so a non-zero seqno), and is not set if it i s on
+	 * inactive (ready to be unbound) list.
+	 */
+	unsigned int active:I915_NUM_RINGS;
+
+	/**
+	 * This is set if the object has been written to since last bound
+	 * to the GTT
+	 */
+	unsigned int dirty:1;
+
+	/**
+	 * Fence register bits (if any) for this object.  Will be set
+	 * as needed when mapped into the GTT.
+	 * Protected by dev->struct_mutex.
+	 */
+	signed int fence_reg:I915_MAX_NUM_FENCE_BITS;
+
+	/**
+	 * Advice: are the backing pages purgeable?
+	 */
+	unsigned int madv:2;
+
+	/**
+	 * Current tiling mode for the object.
+	 */
+	unsigned int tiling_mode:2;
+	/**
+	 * Whether the tiling parameters for the currently associated fence
+	 * register have changed. Note that for the purposes of tracking
+	 * tiling changes we also treat the unfenced register, the register
+	 * slot that the object occupies whilst it executes a fenced
+	 * command (such as BLT on gen2/3), as a "fence".
+	 */
+	unsigned int fence_dirty:1;
+
+	/**
+	 * Is the object at the current location in the gtt mappable and
+	 * fenceable? Used to avoid costly recalculations.
+	 */
+	unsigned int map_and_fenceable:1;
+
+	/**
+	 * Whether the current gtt mapping needs to be mappable (and isn't just
+	 * mappable by accident). Track pin and fault separate for a more
+	 * accurate mappable working set.
+	 */
+	unsigned int fault_mappable:1;
+
+	/*
+	 * Is the object to be mapped as read-only to the GPU
+	 * Only honoured if hardware has relevant pte bit
+	 */
+	unsigned long gt_ro:1;
+	unsigned int cache_level:3;
+	unsigned int cache_dirty:1;
+
+	unsigned int frontbuffer_bits:INTEL_FRONTBUFFER_BITS;
+
+	unsigned int pin_display;
+
+	struct sg_table *pages;
+	int pages_pin_count;
+	struct get_page {
+		struct scatterlist *sg;
+		int last;
+	} get_page;
+
+	/* prime dma-buf support */
+	void *dma_buf_vmapping;
+	int vmapping_count;
+
+	/** Breadcrumb of last rendering to the buffer.
+	 * There can only be one writer, but we allow for multiple readers.
+	 * If there is a writer that necessarily implies that all other
+	 * read requests are complete - but we may only be lazily clearing
+	 * the read requests. A read request is naturally the most recent
+	 * request on a ring, so we may have two different write and read
+	 * requests on one ring where the write request is older than the
+	 * read request. This allows for the CPU to read from an active
+	 * buffer by only waiting for the write to complete.
+	 * */
+	struct drm_i915_gem_request *last_read_req[I915_NUM_RINGS];
+	struct drm_i915_gem_request *last_write_req;
+	/** Breadcrumb of last fenced GPU access to the buffer. */
+	struct drm_i915_gem_request *last_fenced_req;
+
+	/** Current tiling stride for the object, if it's tiled. */
+	uint32_t stride;
+
+	/** References from framebuffers, locks out tiling changes. */
+	unsigned long framebuffer_references;
+
+	/** Record of address bit 17 of each page at last unbind. */
+	unsigned long *bit_17;
+
+	struct i915_gem_userptr {
+		uintptr_t ptr;
+		unsigned read_only :1;
+		unsigned workers :4;
+#define I915_GEM_USERPTR_MAX_WORKERS 15
+
+		struct i915_mm_struct *mm;
+		struct i915_mmu_object *mmu_object;
+		struct work_struct *work;
+	} userptr;
+
+	/** for phys allocated objects */
+	struct drm_dma_handle *phys_handle;
+};
+#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
+
+void i915_gem_track_fb(struct drm_i915_gem_object *old,
+		       struct drm_i915_gem_object *new,
+		       unsigned frontbuffer_bits);
+
+/**
+ * Request queue structure.
+ *
+ * The request queue allows us to note sequence numbers that have been emitted
+ * and may be associated with active buffers to be retired.
+ *
+ * By keeping this list, we can avoid having to do questionable sequence
+ * number comparisons on buffer last_read|write_seqno. It also allows an
+ * emission time to be associated with the request for tracking how far ahead
+ * of the GPU the submission is.
+ *
+ * The requests are reference counted, so upon creation they should have an
+ * initial reference taken using kref_init
+ */
+struct drm_i915_gem_request {
+	struct kref ref;
+
+	/** On Which ring this request was generated */
+	struct drm_i915_private *i915;
+	struct intel_engine_cs *ring;
+
+	 /** GEM sequence number associated with the previous request,
+	  * when the HWS breadcrumb is equal to this the GPU is processing
+	  * this request.
+	  */
+	u32 previous_seqno;
+
+	 /** GEM sequence number associated with this request,
+	  * when the HWS breadcrumb is equal or greater than this the GPU
+	  * has finished processing this request.
+	  */
+	u32 seqno;
+
+	/** Position in the ringbuffer of the start of the request */
+	u32 head;
+
+	/**
+	 * Position in the ringbuffer of the start of the postfix.
+	 * This is required to calculate the maximum available ringbuffer
+	 * space without overwriting the postfix.
+	 */
+	 u32 postfix;
+
+	/** Position in the ringbuffer of the end of the whole request */
+	u32 tail;
+
+	/**
+	 * Context and ring buffer related to this request
+	 * Contexts are refcounted, so when this request is associated with a
+	 * context, we must increment the context's refcount, to guarantee that
+	 * it persists while any request is linked to it. Requests themselves
+	 * are also refcounted, so the request will only be freed when the last
+	 * reference to it is dismissed, and the code in
+	 * i915_gem_request_free() will then decrement the refcount on the
+	 * context.
+	 */
+	struct intel_context *ctx;
+	struct intel_ringbuffer *ringbuf;
+
+	/** Batch buffer related to this request if any (used for
+	    error state dump only) */
+	struct drm_i915_gem_object *batch_obj;
+
+	/** Time at which this request was emitted, in jiffies. */
+	unsigned long emitted_jiffies;
+
+	/** global list entry for this request */
+	struct list_head list;
+
+	struct drm_i915_file_private *file_priv;
+	/** file_priv list entry for this request */
+	struct list_head client_list;
+
+	/** process identifier submitting this request */
+	struct pid *pid;
+
+	/**
+	 * The ELSP only accepts two elements at a time, so we queue
+	 * context/tail pairs on a given queue (ring->execlist_queue) until the
+	 * hardware is available. The queue serves a double purpose: we also use
+	 * it to keep track of the up to 2 contexts currently in the hardware
+	 * (usually one in execution and the other queued up by the GPU): We
+	 * only remove elements from the head of the queue when the hardware
+	 * informs us that an element has been completed.
+	 *
+	 * All accesses to the queue are mediated by a spinlock
+	 * (ring->execlist_lock).
+	 */
+
+	/** Execlist link in the submission queue.*/
+	struct list_head execlist_link;
+
+	/** Execlists no. of times this request has been sent to the ELSP */
+	int elsp_submitted;
+
+};
+
+int i915_gem_request_alloc(struct intel_engine_cs *ring,
+			   struct intel_context *ctx,
+			   struct drm_i915_gem_request **req_out);
+void i915_gem_request_cancel(struct drm_i915_gem_request *req);
+void i915_gem_request_free(struct kref *req_ref);
+int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
+				   struct drm_file *file);
+
+static inline uint32_t
+i915_gem_request_get_seqno(struct drm_i915_gem_request *req)
+{
+	return req ? req->seqno : 0;
+}
+
+static inline struct intel_engine_cs *
+i915_gem_request_get_ring(struct drm_i915_gem_request *req)
+{
+	return req ? req->ring : NULL;
+}
+
+static inline struct drm_i915_gem_request *
+i915_gem_request_reference(struct drm_i915_gem_request *req)
+{
+	if (req)
+		kref_get(&req->ref);
+	return req;
+}
+
+static inline void
+i915_gem_request_unreference(struct drm_i915_gem_request *req)
+{
+	WARN_ON(!mutex_is_locked(&req->ring->dev->struct_mutex));
+	kref_put(&req->ref, i915_gem_request_free);
+}
+
+static inline void
+i915_gem_request_unreference__unlocked(struct drm_i915_gem_request *req)
+{
+	struct drm_device *dev;
+
+	if (!req)
+		return;
+
+	dev = req->ring->dev;
+	if (kref_put_mutex(&req->ref, i915_gem_request_free, &dev->struct_mutex))
+		mutex_unlock(&dev->struct_mutex);
+}
+
+static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst,
+					   struct drm_i915_gem_request *src)
+{
+	if (src)
+		i915_gem_request_reference(src);
+
+	if (*pdst)
+		i915_gem_request_unreference(*pdst);
+
+	*pdst = src;
+}
+
+/*
+ * XXX: i915_gem_request_completed should be here but currently needs the
+ * definition of i915_seqno_passed() which is below. It will be moved in
+ * a later patch when the call to i915_seqno_passed() is obsoleted...
+ */
+
+/*
+ * A command that requires special handling by the command parser.
+ */
+struct drm_i915_cmd_descriptor {
+	/*
+	 * Flags describing how the command parser processes the command.
+	 *
+	 * CMD_DESC_FIXED: The command has a fixed length if this is set,
+	 *                 a length mask if not set
+	 * CMD_DESC_SKIP: The command is allowed but does not follow the
+	 *                standard length encoding for the opcode range in
+	 *                which it falls
+	 * CMD_DESC_REJECT: The command is never allowed
+	 * CMD_DESC_REGISTER: The command should be checked against the
+	 *                    register whitelist for the appropriate ring
+	 * CMD_DESC_MASTER: The command is allowed if the submitting process
+	 *                  is the DRM master
+	 */
+	u32 flags;
+#define CMD_DESC_FIXED    (1<<0)
+#define CMD_DESC_SKIP     (1<<1)
+#define CMD_DESC_REJECT   (1<<2)
+#define CMD_DESC_REGISTER (1<<3)
+#define CMD_DESC_BITMASK  (1<<4)
+#define CMD_DESC_MASTER   (1<<5)
+
+	/*
+	 * The command's unique identification bits and the bitmask to get them.
+	 * This isn't strictly the opcode field as defined in the spec and may
+	 * also include type, subtype, and/or subop fields.
+	 */
+	struct {
+		u32 value;
+		u32 mask;
+	} cmd;
+
+	/*
+	 * The command's length. The command is either fixed length (i.e. does
+	 * not include a length field) or has a length field mask. The flag
+	 * CMD_DESC_FIXED indicates a fixed length. Otherwise, the command has
+	 * a length mask. All command entries in a command table must include
+	 * length information.
+	 */
+	union {
+		u32 fixed;
+		u32 mask;
+	} length;
+
+	/*
+	 * Describes where to find a register address in the command to check
+	 * against the ring's register whitelist. Only valid if flags has the
+	 * CMD_DESC_REGISTER bit set.
+	 *
+	 * A non-zero step value implies that the command may access multiple
+	 * registers in sequence (e.g. LRI), in that case step gives the
+	 * distance in dwords between individual offset fields.
+	 */
+	struct {
+		u32 offset;
+		u32 mask;
+		u32 step;
+	} reg;
+
+#define MAX_CMD_DESC_BITMASKS 3
+	/*
+	 * Describes command checks where a particular dword is masked and
+	 * compared against an expected value. If the command does not match
+	 * the expected value, the parser rejects it. Only valid if flags has
+	 * the CMD_DESC_BITMASK bit set. Only entries where mask is non-zero
+	 * are valid.
+	 *
+	 * If the check specifies a non-zero condition_mask then the parser
+	 * only performs the check when the bits specified by condition_mask
+	 * are non-zero.
+	 */
+	struct {
+		u32 offset;
+		u32 mask;
+		u32 expected;
+		u32 condition_offset;
+		u32 condition_mask;
+	} bits[MAX_CMD_DESC_BITMASKS];
+};
+
+/*
+ * A table of commands requiring special handling by the command parser.
+ *
+ * Each ring has an array of tables. Each table consists of an array of command
+ * descriptors, which must be sorted with command opcodes in ascending order.
+ */
+struct drm_i915_cmd_table {
+	const struct drm_i915_cmd_descriptor *table;
+	int count;
+};
+
+/* Note that the (struct drm_i915_private *) cast is just to shut up gcc. */
+#define __I915__(p) ({ \
+	struct drm_i915_private *__p; \
+	if (__builtin_types_compatible_p(typeof(*p), struct drm_i915_private)) \
+		__p = (struct drm_i915_private *)p; \
+	else if (__builtin_types_compatible_p(typeof(*p), struct drm_device)) \
+		__p = to_i915((struct drm_device *)p); \
+	else \
+		BUILD_BUG(); \
+	__p; \
+})
+#define INTEL_INFO(p) 	(&__I915__(p)->info)
+#define INTEL_DEVID(p)	(INTEL_INFO(p)->device_id)
+#define INTEL_REVID(p)	(__I915__(p)->dev->pdev->revision)
+
+#define IS_I830(dev)		(INTEL_DEVID(dev) == 0x3577)
+#define IS_845G(dev)		(INTEL_DEVID(dev) == 0x2562)
+#define IS_I85X(dev)		(INTEL_INFO(dev)->is_i85x)
+#define IS_I865G(dev)		(INTEL_DEVID(dev) == 0x2572)
+#define IS_I915G(dev)		(INTEL_INFO(dev)->is_i915g)
+#define IS_I915GM(dev)		(INTEL_DEVID(dev) == 0x2592)
+#define IS_I945G(dev)		(INTEL_DEVID(dev) == 0x2772)
+#define IS_I945GM(dev)		(INTEL_INFO(dev)->is_i945gm)
+#define IS_BROADWATER(dev)	(INTEL_INFO(dev)->is_broadwater)
+#define IS_CRESTLINE(dev)	(INTEL_INFO(dev)->is_crestline)
+#define IS_GM45(dev)		(INTEL_DEVID(dev) == 0x2A42)
+#define IS_G4X(dev)		(INTEL_INFO(dev)->is_g4x)
+#define IS_PINEVIEW_G(dev)	(INTEL_DEVID(dev) == 0xa001)
+#define IS_PINEVIEW_M(dev)	(INTEL_DEVID(dev) == 0xa011)
+#define IS_PINEVIEW(dev)	(INTEL_INFO(dev)->is_pineview)
+#define IS_G33(dev)		(INTEL_INFO(dev)->is_g33)
+#define IS_IRONLAKE_M(dev)	(INTEL_DEVID(dev) == 0x0046)
+#define IS_IVYBRIDGE(dev)	(INTEL_INFO(dev)->is_ivybridge)
+#define IS_IVB_GT1(dev)		(INTEL_DEVID(dev) == 0x0156 || \
+				 INTEL_DEVID(dev) == 0x0152 || \
+				 INTEL_DEVID(dev) == 0x015a)
+#define IS_VALLEYVIEW(dev)	(INTEL_INFO(dev)->is_valleyview)
+#define IS_CHERRYVIEW(dev)	(INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev))
+#define IS_HASWELL(dev)	(INTEL_INFO(dev)->is_haswell)
+#define IS_BROADWELL(dev)	(!INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev))
+#define IS_SKYLAKE(dev)	(INTEL_INFO(dev)->is_skylake)
+#define IS_BROXTON(dev)	(!INTEL_INFO(dev)->is_skylake && IS_GEN9(dev))
+#define IS_MOBILE(dev)		(INTEL_INFO(dev)->is_mobile)
+#define IS_HSW_EARLY_SDV(dev)	(IS_HASWELL(dev) && \
+				 (INTEL_DEVID(dev) & 0xFF00) == 0x0C00)
+#define IS_BDW_ULT(dev)		(IS_BROADWELL(dev) && \
+				 ((INTEL_DEVID(dev) & 0xf) == 0x6 ||	\
+				 (INTEL_DEVID(dev) & 0xf) == 0xb ||	\
+				 (INTEL_DEVID(dev) & 0xf) == 0xe))
+/* ULX machines are also considered ULT. */
+#define IS_BDW_ULX(dev)		(IS_BROADWELL(dev) && \
+				 (INTEL_DEVID(dev) & 0xf) == 0xe)
+#define IS_BDW_GT3(dev)		(IS_BROADWELL(dev) && \
+				 (INTEL_DEVID(dev) & 0x00F0) == 0x0020)
+#define IS_HSW_ULT(dev)		(IS_HASWELL(dev) && \
+				 (INTEL_DEVID(dev) & 0xFF00) == 0x0A00)
+#define IS_HSW_GT3(dev)		(IS_HASWELL(dev) && \
+				 (INTEL_DEVID(dev) & 0x00F0) == 0x0020)
+/* ULX machines are also considered ULT. */
+#define IS_HSW_ULX(dev)		(INTEL_DEVID(dev) == 0x0A0E || \
+				 INTEL_DEVID(dev) == 0x0A1E)
+#define IS_SKL_ULT(dev)		(INTEL_DEVID(dev) == 0x1906 || \
+				 INTEL_DEVID(dev) == 0x1913 || \
+				 INTEL_DEVID(dev) == 0x1916 || \
+				 INTEL_DEVID(dev) == 0x1921 || \
+				 INTEL_DEVID(dev) == 0x1926)
+#define IS_SKL_ULX(dev)		(INTEL_DEVID(dev) == 0x190E || \
+				 INTEL_DEVID(dev) == 0x1915 || \
+				 INTEL_DEVID(dev) == 0x191E)
+#define IS_SKL_GT3(dev)		(IS_SKYLAKE(dev) && \
+				 (INTEL_DEVID(dev) & 0x00F0) == 0x0020)
+#define IS_SKL_GT4(dev)		(IS_SKYLAKE(dev) && \
+				 (INTEL_DEVID(dev) & 0x00F0) == 0x0030)
+
+#define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
+
+#define SKL_REVID_A0		(0x0)
+#define SKL_REVID_B0		(0x1)
+#define SKL_REVID_C0		(0x2)
+#define SKL_REVID_D0		(0x3)
+#define SKL_REVID_E0		(0x4)
+#define SKL_REVID_F0		(0x5)
+
+#define BXT_REVID_A0		(0x0)
+#define BXT_REVID_B0		(0x3)
+#define BXT_REVID_C0		(0x9)
+
+/*
+ * The genX designation typically refers to the render engine, so render
+ * capability related checks should use IS_GEN, while display and other checks
+ * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular
+ * chips, etc.).
+ */
+#define IS_GEN2(dev)	(INTEL_INFO(dev)->gen == 2)
+#define IS_GEN3(dev)	(INTEL_INFO(dev)->gen == 3)
+#define IS_GEN4(dev)	(INTEL_INFO(dev)->gen == 4)
+#define IS_GEN5(dev)	(INTEL_INFO(dev)->gen == 5)
+#define IS_GEN6(dev)	(INTEL_INFO(dev)->gen == 6)
+#define IS_GEN7(dev)	(INTEL_INFO(dev)->gen == 7)
+#define IS_GEN8(dev)	(INTEL_INFO(dev)->gen == 8)
+#define IS_GEN9(dev)	(INTEL_INFO(dev)->gen == 9)
+
+#define RENDER_RING		(1<<RCS)
+#define BSD_RING		(1<<VCS)
+#define BLT_RING		(1<<BCS)
+#define VEBOX_RING		(1<<VECS)
+#define BSD2_RING		(1<<VCS2)
+#define HAS_BSD(dev)		(INTEL_INFO(dev)->ring_mask & BSD_RING)
+#define HAS_BSD2(dev)		(INTEL_INFO(dev)->ring_mask & BSD2_RING)
+#define HAS_BLT(dev)		(INTEL_INFO(dev)->ring_mask & BLT_RING)
+#define HAS_VEBOX(dev)		(INTEL_INFO(dev)->ring_mask & VEBOX_RING)
+#define HAS_LLC(dev)		(INTEL_INFO(dev)->has_llc)
+#define HAS_WT(dev)		((IS_HASWELL(dev) || IS_BROADWELL(dev)) && \
+				 __I915__(dev)->ellc_size)
+#define I915_NEED_GFX_HWS(dev)	(INTEL_INFO(dev)->need_gfx_hws)
+
+#define HAS_HW_CONTEXTS(dev)	(INTEL_INFO(dev)->gen >= 6)
+#define HAS_LOGICAL_RING_CONTEXTS(dev)	(INTEL_INFO(dev)->gen >= 8)
+#define USES_PPGTT(dev)		(i915.enable_ppgtt)
+#define USES_FULL_PPGTT(dev)	(i915.enable_ppgtt >= 2)
+#define USES_FULL_48BIT_PPGTT(dev)	(i915.enable_ppgtt == 3)
+
+#define HAS_OVERLAY(dev)		(INTEL_INFO(dev)->has_overlay)
+#define OVERLAY_NEEDS_PHYSICAL(dev)	(INTEL_INFO(dev)->overlay_needs_physical)
+
+/* Early gen2 have a totally busted CS tlb and require pinned batches. */
+#define HAS_BROKEN_CS_TLB(dev)		(IS_I830(dev) || IS_845G(dev))
+/*
+ * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts
+ * even when in MSI mode. This results in spurious interrupt warnings if the
+ * legacy irq no. is shared with another device. The kernel then disables that
+ * interrupt source and so prevents the other device from working properly.
+ */
+#define HAS_AUX_IRQ(dev) (INTEL_INFO(dev)->gen >= 5)
+#define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->gen >= 5)
+
+/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
+ * rows, which changed the alignment requirements and fence programming.
+ */
+#define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \
+						      IS_I915GM(dev)))
+#define SUPPORTS_TV(dev)		(INTEL_INFO(dev)->supports_tv)
+#define I915_HAS_HOTPLUG(dev)		 (INTEL_INFO(dev)->has_hotplug)
+
+#define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2)
+#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
+#define HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
+
+#define HAS_IPS(dev)		(IS_HSW_ULT(dev) || IS_BROADWELL(dev))
+
+#define HAS_DP_MST(dev)		(IS_HASWELL(dev) || IS_BROADWELL(dev) || \
+				 INTEL_INFO(dev)->gen >= 9)
+
+#define HAS_DDI(dev)		(INTEL_INFO(dev)->has_ddi)
+#define HAS_FPGA_DBG_UNCLAIMED(dev)	(INTEL_INFO(dev)->has_fpga_dbg)
+#define HAS_PSR(dev)		(IS_HASWELL(dev) || IS_BROADWELL(dev) || \
+				 IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev) || \
+				 IS_SKYLAKE(dev))
+#define HAS_RUNTIME_PM(dev)	(IS_GEN6(dev) || IS_HASWELL(dev) || \
+				 IS_BROADWELL(dev) || IS_VALLEYVIEW(dev) || \
+				 IS_SKYLAKE(dev))
+#define HAS_RC6(dev)		(INTEL_INFO(dev)->gen >= 6)
+#define HAS_RC6p(dev)		(INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev))
+
+#define HAS_CSR(dev)	(IS_GEN9(dev))
+
+#define HAS_GUC_UCODE(dev)	(IS_GEN9(dev))
+#define HAS_GUC_SCHED(dev)	(IS_GEN9(dev))
+
+#define HAS_RESOURCE_STREAMER(dev) (IS_HASWELL(dev) || \
+				    INTEL_INFO(dev)->gen >= 8)
+
+#define HAS_CORE_RING_FREQ(dev)	(INTEL_INFO(dev)->gen >= 6 && \
+				 !IS_VALLEYVIEW(dev) && !IS_BROXTON(dev))
+
+#define INTEL_PCH_DEVICE_ID_MASK		0xff00
+#define INTEL_PCH_IBX_DEVICE_ID_TYPE		0x3b00
+#define INTEL_PCH_CPT_DEVICE_ID_TYPE		0x1c00
+#define INTEL_PCH_PPT_DEVICE_ID_TYPE		0x1e00
+#define INTEL_PCH_LPT_DEVICE_ID_TYPE		0x8c00
+#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE		0x9c00
+#define INTEL_PCH_SPT_DEVICE_ID_TYPE		0xA100
+#define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE		0x9D00
+#define INTEL_PCH_P2X_DEVICE_ID_TYPE		0x7100
+#define INTEL_PCH_QEMU_DEVICE_ID_TYPE		0x2900 /* qemu q35 has 2918 */
+
+#define INTEL_PCH_TYPE(dev) (__I915__(dev)->pch_type)
+#define HAS_PCH_SPT(dev) (INTEL_PCH_TYPE(dev) == PCH_SPT)
+#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
+#define HAS_PCH_LPT_LP(dev) (__I915__(dev)->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)
+#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
+#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
+#define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP)
+#define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE)
+
+#define HAS_GMCH_DISPLAY(dev) (INTEL_INFO(dev)->gen < 5 || IS_VALLEYVIEW(dev))
+
+/* DPF == dynamic parity feature */
+#define HAS_L3_DPF(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
+#define NUM_L3_SLICES(dev) (IS_HSW_GT3(dev) ? 2 : HAS_L3_DPF(dev))
+
+#define GT_FREQUENCY_MULTIPLIER 50
+#define GEN9_FREQ_SCALER 3
+
+#include "i915_trace.h"
+
+extern const struct drm_ioctl_desc i915_ioctls[];
+extern int i915_max_ioctl;
+
+extern int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state);
+extern int i915_resume_switcheroo(struct drm_device *dev);
+
+/* i915_params.c */
+struct i915_params {
+	int modeset;
+	int panel_ignore_lid;
+	int semaphores;
+	int lvds_channel_mode;
+	int panel_use_ssc;
+	int vbt_sdvo_panel_type;
+	int enable_rc6;
+	int enable_fbc;
+	int enable_ppgtt;
+	int enable_execlists;
+	int enable_psr;
+	unsigned int preliminary_hw_support;
+	int disable_power_well;
+	int enable_ips;
+	int invert_brightness;
+	int enable_cmd_parser;
+	/* leave bools at the end to not create holes */
+	bool enable_hangcheck;
+	bool fastboot;
+	bool prefault_disable;
+	bool load_detect_test;
+	bool reset;
+	bool disable_display;
+	bool disable_vtd_wa;
+	bool enable_guc_submission;
+	int guc_log_level;
+	int use_mmio_flip;
+	int mmio_debug;
+	bool verbose_state_checks;
+	bool nuclear_pageflip;
+	int edp_vswing;
+};
+extern struct i915_params i915 __read_mostly;
+
+				/* i915_dma.c */
+extern int i915_driver_load(struct drm_device *, unsigned long flags);
+extern int i915_driver_unload(struct drm_device *);
+extern int i915_driver_open(struct drm_device *dev, struct drm_file *file);
+extern void i915_driver_lastclose(struct drm_device * dev);
+extern void i915_driver_preclose(struct drm_device *dev,
+				 struct drm_file *file);
+extern void i915_driver_postclose(struct drm_device *dev,
+				  struct drm_file *file);
+#ifdef CONFIG_COMPAT
+extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
+			      unsigned long arg);
+#endif
+extern int intel_gpu_reset(struct drm_device *dev);
+extern bool intel_has_gpu_reset(struct drm_device *dev);
+extern int i915_reset(struct drm_device *dev);
+extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
+extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
+extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
+extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
+int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
+void i915_firmware_load_error_print(const char *fw_path, int err);
+
+/* intel_hotplug.c */
+void intel_hpd_irq_handler(struct drm_device *dev, u32 pin_mask, u32 long_mask);
+void intel_hpd_init(struct drm_i915_private *dev_priv);
+void intel_hpd_init_work(struct drm_i915_private *dev_priv);
+void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
+bool intel_hpd_pin_to_port(enum hpd_pin pin, enum port *port);
+
+/* i915_irq.c */
+void i915_queue_hangcheck(struct drm_device *dev);
+__printf(3, 4)
+void i915_handle_error(struct drm_device *dev, bool wedged,
+		       const char *fmt, ...);
+
+extern void intel_irq_init(struct drm_i915_private *dev_priv);
+int intel_irq_install(struct drm_i915_private *dev_priv);
+void intel_irq_uninstall(struct drm_i915_private *dev_priv);
+
+extern void intel_uncore_sanitize(struct drm_device *dev);
+extern void intel_uncore_early_sanitize(struct drm_device *dev,
+					bool restore_forcewake);
+extern void intel_uncore_init(struct drm_device *dev);
+extern void intel_uncore_check_errors(struct drm_device *dev);
+extern void intel_uncore_fini(struct drm_device *dev);
+extern void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore);
+const char *intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id);
+void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
+				enum forcewake_domains domains);
+void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
+				enum forcewake_domains domains);
+/* Like above but the caller must manage the uncore.lock itself.
+ * Must be used with I915_READ_FW and friends.
+ */
+void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv,
+					enum forcewake_domains domains);
+void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
+					enum forcewake_domains domains);
+void assert_forcewakes_inactive(struct drm_i915_private *dev_priv);
+static inline bool intel_vgpu_active(struct drm_device *dev)
+{
+	return to_i915(dev)->vgpu.active;
+}
+
+void
+i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
+		     u32 status_mask);
+
+void
+i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
+		      u32 status_mask);
+
+void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv);
+void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv);
+void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
+				   uint32_t mask,
+				   uint32_t bits);
+void
+ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask);
+void
+ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask);
+void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
+				  uint32_t interrupt_mask,
+				  uint32_t enabled_irq_mask);
+#define ibx_enable_display_interrupt(dev_priv, bits) \
+	ibx_display_interrupt_update((dev_priv), (bits), (bits))
+#define ibx_disable_display_interrupt(dev_priv, bits) \
+	ibx_display_interrupt_update((dev_priv), (bits), 0)
+
+/* i915_gem.c */
+int i915_gem_create_ioctl(struct drm_device *dev, void *data,
+			  struct drm_file *file_priv);
+int i915_gem_pread_ioctl(struct drm_device *dev, void *data,
+			 struct drm_file *file_priv);
+int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
+			  struct drm_file *file_priv);
+int i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
+			struct drm_file *file_priv);
+int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
+			struct drm_file *file_priv);
+int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
+			      struct drm_file *file_priv);
+int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
+			     struct drm_file *file_priv);
+void i915_gem_execbuffer_move_to_active(struct list_head *vmas,
+					struct drm_i915_gem_request *req);
+void i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params);
+int i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
+				   struct drm_i915_gem_execbuffer2 *args,
+				   struct list_head *vmas);
+int i915_gem_execbuffer(struct drm_device *dev, void *data,
+			struct drm_file *file_priv);
+int i915_gem_execbuffer2(struct drm_device *dev, void *data,
+			 struct drm_file *file_priv);
+int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
+			struct drm_file *file_priv);
+int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
+			       struct drm_file *file);
+int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
+			       struct drm_file *file);
+int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
+			    struct drm_file *file_priv);
+int i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
+			   struct drm_file *file_priv);
+int i915_gem_set_tiling(struct drm_device *dev, void *data,
+			struct drm_file *file_priv);
+int i915_gem_get_tiling(struct drm_device *dev, void *data,
+			struct drm_file *file_priv);
+int i915_gem_init_userptr(struct drm_device *dev);
+int i915_gem_userptr_ioctl(struct drm_device *dev, void *data,
+			   struct drm_file *file);
+int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
+				struct drm_file *file_priv);
+int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
+			struct drm_file *file_priv);
+void i915_gem_load(struct drm_device *dev);
+void *i915_gem_object_alloc(struct drm_device *dev);
+void i915_gem_object_free(struct drm_i915_gem_object *obj);
+void i915_gem_object_init(struct drm_i915_gem_object *obj,
+			 const struct drm_i915_gem_object_ops *ops);
+struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
+						  size_t size);
+struct drm_i915_gem_object *i915_gem_object_create_from_data(
+		struct drm_device *dev, const void *data, size_t size);
+void i915_gem_free_object(struct drm_gem_object *obj);
+void i915_gem_vma_destroy(struct i915_vma *vma);
+
+/* Flags used by pin/bind&friends. */
+#define PIN_MAPPABLE	(1<<0)
+#define PIN_NONBLOCK	(1<<1)
+#define PIN_GLOBAL	(1<<2)
+#define PIN_OFFSET_BIAS	(1<<3)
+#define PIN_USER	(1<<4)
+#define PIN_UPDATE	(1<<5)
+#define PIN_ZONE_4G	(1<<6)
+#define PIN_HIGH	(1<<7)
+#define PIN_OFFSET_MASK (~4095)
+int __must_check
+i915_gem_object_pin(struct drm_i915_gem_object *obj,
+		    struct i915_address_space *vm,
+		    uint32_t alignment,
+		    uint64_t flags);
+int __must_check
+i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
+			 const struct i915_ggtt_view *view,
+			 uint32_t alignment,
+			 uint64_t flags);
+
+int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
+		  u32 flags);
+void __i915_vma_set_map_and_fenceable(struct i915_vma *vma);
+int __must_check i915_vma_unbind(struct i915_vma *vma);
+/*
+ * BEWARE: Do not use the function below unless you can _absolutely_
+ * _guarantee_ VMA in question is _not in use_ anywhere.
+ */
+int __must_check __i915_vma_unbind_no_wait(struct i915_vma *vma);
+int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
+void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv);
+void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
+
+int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
+				    int *needs_clflush);
+
+int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
+
+static inline int __sg_page_count(struct scatterlist *sg)
+{
+	return sg->length >> PAGE_SHIFT;
+}
+
+static inline struct page *
+i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
+{
+	if (WARN_ON(n >= obj->base.size >> PAGE_SHIFT))
+		return NULL;
+
+	if (n < obj->get_page.last) {
+		obj->get_page.sg = obj->pages->sgl;
+		obj->get_page.last = 0;
+	}
+
+	while (obj->get_page.last + __sg_page_count(obj->get_page.sg) <= n) {
+		obj->get_page.last += __sg_page_count(obj->get_page.sg++);
+		if (unlikely(sg_is_chain(obj->get_page.sg)))
+			obj->get_page.sg = sg_chain_ptr(obj->get_page.sg);
+	}
+
+	return nth_page(sg_page(obj->get_page.sg), n - obj->get_page.last);
+}
+
+static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
+{
+	BUG_ON(obj->pages == NULL);
+	obj->pages_pin_count++;
+}
+static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
+{
+	BUG_ON(obj->pages_pin_count == 0);
+	obj->pages_pin_count--;
+}
+
+int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
+int i915_gem_object_sync(struct drm_i915_gem_object *obj,
+			 struct intel_engine_cs *to,
+			 struct drm_i915_gem_request **to_req);
+void i915_vma_move_to_active(struct i915_vma *vma,
+			     struct drm_i915_gem_request *req);
+int i915_gem_dumb_create(struct drm_file *file_priv,
+			 struct drm_device *dev,
+			 struct drm_mode_create_dumb *args);
+int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
+		      uint32_t handle, uint64_t *offset);
+/**
+ * Returns true if seq1 is later than seq2.
+ */
+static inline bool
+i915_seqno_passed(uint32_t seq1, uint32_t seq2)
+{
+	return (int32_t)(seq1 - seq2) >= 0;
+}
+
+static inline bool i915_gem_request_started(struct drm_i915_gem_request *req,
+					   bool lazy_coherency)
+{
+	u32 seqno = req->ring->get_seqno(req->ring, lazy_coherency);
+	return i915_seqno_passed(seqno, req->previous_seqno);
+}
+
+static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req,
+					      bool lazy_coherency)
+{
+	u32 seqno = req->ring->get_seqno(req->ring, lazy_coherency);
+	return i915_seqno_passed(seqno, req->seqno);
+}
+
+int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno);
+int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
+
+struct drm_i915_gem_request *
+i915_gem_find_active_request(struct intel_engine_cs *ring);
+
+bool i915_gem_retire_requests(struct drm_device *dev);
+void i915_gem_retire_requests_ring(struct intel_engine_cs *ring);
+int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
+				      bool interruptible);
+
+static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
+{
+	return unlikely(atomic_read(&error->reset_counter)
+			& (I915_RESET_IN_PROGRESS_FLAG | I915_WEDGED));
+}
+
+static inline bool i915_terminally_wedged(struct i915_gpu_error *error)
+{
+	return atomic_read(&error->reset_counter) & I915_WEDGED;
+}
+
+static inline u32 i915_reset_count(struct i915_gpu_error *error)
+{
+	return ((atomic_read(&error->reset_counter) & ~I915_WEDGED) + 1) / 2;
+}
+
+static inline bool i915_stop_ring_allow_ban(struct drm_i915_private *dev_priv)
+{
+	return dev_priv->gpu_error.stop_rings == 0 ||
+		dev_priv->gpu_error.stop_rings & I915_STOP_RING_ALLOW_BAN;
+}
+
+static inline bool i915_stop_ring_allow_warn(struct drm_i915_private *dev_priv)
+{
+	return dev_priv->gpu_error.stop_rings == 0 ||
+		dev_priv->gpu_error.stop_rings & I915_STOP_RING_ALLOW_WARN;
+}
+
+void i915_gem_reset(struct drm_device *dev);
+bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
+int __must_check i915_gem_init(struct drm_device *dev);
+int i915_gem_init_rings(struct drm_device *dev);
+int __must_check i915_gem_init_hw(struct drm_device *dev);
+int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice);
+void i915_gem_init_swizzling(struct drm_device *dev);
+void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
+int __must_check i915_gpu_idle(struct drm_device *dev);
+int __must_check i915_gem_suspend(struct drm_device *dev);
+void __i915_add_request(struct drm_i915_gem_request *req,
+			struct drm_i915_gem_object *batch_obj,
+			bool flush_caches);
+#define i915_add_request(req) \
+	__i915_add_request(req, NULL, true)
+#define i915_add_request_no_flush(req) \
+	__i915_add_request(req, NULL, false)
+int __i915_wait_request(struct drm_i915_gem_request *req,
+			unsigned reset_counter,
+			bool interruptible,
+			s64 *timeout,
+			struct intel_rps_client *rps);
+int __must_check i915_wait_request(struct drm_i915_gem_request *req);
+int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
+int __must_check
+i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
+			       bool readonly);
+int __must_check
+i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
+				  bool write);
+int __must_check
+i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
+int __must_check
+i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
+				     u32 alignment,
+				     struct intel_engine_cs *pipelined,
+				     struct drm_i915_gem_request **pipelined_request,
+				     const struct i915_ggtt_view *view);
+void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj,
+					      const struct i915_ggtt_view *view);
+int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
+				int align);
+int i915_gem_open(struct drm_device *dev, struct drm_file *file);
+void i915_gem_release(struct drm_device *dev, struct drm_file *file);
+
+uint32_t
+i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode);
+uint32_t
+i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
+			    int tiling_mode, bool fenced);
+
+int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
+				    enum i915_cache_level cache_level);
+
+struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
+				struct dma_buf *dma_buf);
+
+struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
+				struct drm_gem_object *gem_obj, int flags);
+
+u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
+				  const struct i915_ggtt_view *view);
+u64 i915_gem_obj_offset(struct drm_i915_gem_object *o,
+			struct i915_address_space *vm);
+static inline u64
+i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *o)
+{
+	return i915_gem_obj_ggtt_offset_view(o, &i915_ggtt_view_normal);
+}
+
+bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o);
+bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
+				  const struct i915_ggtt_view *view);
+bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
+			struct i915_address_space *vm);
+
+unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
+				struct i915_address_space *vm);
+struct i915_vma *
+i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
+		    struct i915_address_space *vm);
+struct i915_vma *
+i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
+			  const struct i915_ggtt_view *view);
+
+struct i915_vma *
+i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
+				  struct i915_address_space *vm);
+struct i915_vma *
+i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj,
+				       const struct i915_ggtt_view *view);
+
+static inline struct i915_vma *
+i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
+{
+	return i915_gem_obj_to_ggtt_view(obj, &i915_ggtt_view_normal);
+}
+bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj);
+
+/* Some GGTT VM helpers */
+#define i915_obj_to_ggtt(obj) \
+	(&((struct drm_i915_private *)(obj)->base.dev->dev_private)->gtt.base)
+static inline bool i915_is_ggtt(struct i915_address_space *vm)
+{
+	struct i915_address_space *ggtt =
+		&((struct drm_i915_private *)(vm)->dev->dev_private)->gtt.base;
+	return vm == ggtt;
+}
+
+static inline struct i915_hw_ppgtt *
+i915_vm_to_ppgtt(struct i915_address_space *vm)
+{
+	WARN_ON(i915_is_ggtt(vm));
+
+	return container_of(vm, struct i915_hw_ppgtt, base);
+}
+
+
+static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj)
+{
+	return i915_gem_obj_ggtt_bound_view(obj, &i915_ggtt_view_normal);
+}
+
+static inline unsigned long
+i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj)
+{
+	return i915_gem_obj_size(obj, i915_obj_to_ggtt(obj));
+}
+
+static inline int __must_check
+i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
+		      uint32_t alignment,
+		      unsigned flags)
+{
+	return i915_gem_object_pin(obj, i915_obj_to_ggtt(obj),
+				   alignment, flags | PIN_GLOBAL);
+}
+
+static inline int
+i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj)
+{
+	return i915_vma_unbind(i915_gem_obj_to_ggtt(obj));
+}
+
+void i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj,
+				     const struct i915_ggtt_view *view);
+static inline void
+i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj)
+{
+	i915_gem_object_ggtt_unpin_view(obj, &i915_ggtt_view_normal);
+}
+
+/* i915_gem_fence.c */
+int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
+int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
+
+bool i915_gem_object_pin_fence(struct drm_i915_gem_object *obj);
+void i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj);
+
+void i915_gem_restore_fences(struct drm_device *dev);
+
+void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
+void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj);
+void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
+
+/* i915_gem_context.c */
+int __must_check i915_gem_context_init(struct drm_device *dev);
+void i915_gem_context_fini(struct drm_device *dev);
+void i915_gem_context_reset(struct drm_device *dev);
+int i915_gem_context_open(struct drm_device *dev, struct drm_file *file);
+int i915_gem_context_enable(struct drm_i915_gem_request *req);
+void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
+int i915_switch_context(struct drm_i915_gem_request *req);
+struct intel_context *
+i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id);
+void i915_gem_context_free(struct kref *ctx_ref);
+struct drm_i915_gem_object *
+i915_gem_alloc_context_obj(struct drm_device *dev, size_t size);
+static inline void i915_gem_context_reference(struct intel_context *ctx)
+{
+	kref_get(&ctx->ref);
+}
+
+static inline void i915_gem_context_unreference(struct intel_context *ctx)
+{
+	kref_put(&ctx->ref, i915_gem_context_free);
+}
+
+static inline bool i915_gem_context_is_default(const struct intel_context *c)
+{
+	return c->user_handle == DEFAULT_CONTEXT_HANDLE;
+}
+
+int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
+				  struct drm_file *file);
+int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
+				   struct drm_file *file);
+int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
+				    struct drm_file *file_priv);
+int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
+				    struct drm_file *file_priv);
+
+/* i915_gem_evict.c */
+int __must_check i915_gem_evict_something(struct drm_device *dev,
+					  struct i915_address_space *vm,
+					  int min_size,
+					  unsigned alignment,
+					  unsigned cache_level,
+					  unsigned long start,
+					  unsigned long end,
+					  unsigned flags);
+int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
+
+/* belongs in i915_gem_gtt.h */
+static inline void i915_gem_chipset_flush(struct drm_device *dev)
+{
+	if (INTEL_INFO(dev)->gen < 6)
+		intel_gtt_chipset_flush();
+}
+
+/* i915_gem_stolen.c */
+int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
+				struct drm_mm_node *node, u64 size,
+				unsigned alignment);
+int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
+					 struct drm_mm_node *node, u64 size,
+					 unsigned alignment, u64 start,
+					 u64 end);
+void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
+				 struct drm_mm_node *node);
+int i915_gem_init_stolen(struct drm_device *dev);
+void i915_gem_cleanup_stolen(struct drm_device *dev);
+struct drm_i915_gem_object *
+i915_gem_object_create_stolen(struct drm_device *dev, u32 size);
+struct drm_i915_gem_object *
+i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
+					       u32 stolen_offset,
+					       u32 gtt_offset,
+					       u32 size);
+
+/* i915_gem_shrinker.c */
+unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv,
+			      unsigned long target,
+			      unsigned flags);
+#define I915_SHRINK_PURGEABLE 0x1
+#define I915_SHRINK_UNBOUND 0x2
+#define I915_SHRINK_BOUND 0x4
+#define I915_SHRINK_ACTIVE 0x8
+unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
+void i915_gem_shrinker_init(struct drm_i915_private *dev_priv);
+
+
+/* i915_gem_tiling.c */
+static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
+{
+	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+
+	return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
+		obj->tiling_mode != I915_TILING_NONE;
+}
+
+/* i915_gem_debug.c */
+#if WATCH_LISTS
+int i915_verify_lists(struct drm_device *dev);
+#else
+#define i915_verify_lists(dev) 0
+#endif
+
+/* i915_debugfs.c */
+int i915_debugfs_init(struct drm_minor *minor);
+void i915_debugfs_cleanup(struct drm_minor *minor);
+#ifdef CONFIG_DEBUG_FS
+int i915_debugfs_connector_add(struct drm_connector *connector);
+void intel_display_crc_init(struct drm_device *dev);
+#else
+static inline int i915_debugfs_connector_add(struct drm_connector *connector)
+{ return 0; }
+static inline void intel_display_crc_init(struct drm_device *dev) {}
+#endif
+
+/* i915_gpu_error.c */
+__printf(2, 3)
+void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
+int i915_error_state_to_str(struct drm_i915_error_state_buf *estr,
+			    const struct i915_error_state_file_priv *error);
+int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb,
+			      struct drm_i915_private *i915,
+			      size_t count, loff_t pos);
+static inline void i915_error_state_buf_release(
+	struct drm_i915_error_state_buf *eb)
+{
+	kfree(eb->buf);
+}
+void i915_capture_error_state(struct drm_device *dev, bool wedge,
+			      const char *error_msg);
+void i915_error_state_get(struct drm_device *dev,
+			  struct i915_error_state_file_priv *error_priv);
+void i915_error_state_put(struct i915_error_state_file_priv *error_priv);
+void i915_destroy_error_state(struct drm_device *dev);
+
+void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone);
+const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
+
+/* i915_cmd_parser.c */
+int i915_cmd_parser_get_version(void);
+int i915_cmd_parser_init_ring(struct intel_engine_cs *ring);
+void i915_cmd_parser_fini_ring(struct intel_engine_cs *ring);
+bool i915_needs_cmd_parser(struct intel_engine_cs *ring);
+int i915_parse_cmds(struct intel_engine_cs *ring,
+		    struct drm_i915_gem_object *batch_obj,
+		    struct drm_i915_gem_object *shadow_batch_obj,
+		    u32 batch_start_offset,
+		    u32 batch_len,
+		    bool is_master);
+
+/* i915_suspend.c */
+extern int i915_save_state(struct drm_device *dev);
+extern int i915_restore_state(struct drm_device *dev);
+
+/* i915_sysfs.c */
+void i915_setup_sysfs(struct drm_device *dev_priv);
+void i915_teardown_sysfs(struct drm_device *dev_priv);
+
+/* intel_i2c.c */
+extern int intel_setup_gmbus(struct drm_device *dev);
+extern void intel_teardown_gmbus(struct drm_device *dev);
+extern bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv,
+				     unsigned int pin);
+
+extern struct i2c_adapter *
+intel_gmbus_get_adapter(struct drm_i915_private *dev_priv, unsigned int pin);
+extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
+extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
+static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
+{
+	return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
+}
+extern void intel_i2c_reset(struct drm_device *dev);
+
+/* intel_bios.c */
+bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port);
+
+/* intel_opregion.c */
+#ifdef CONFIG_ACPI
+extern int intel_opregion_setup(struct drm_device *dev);
+extern void intel_opregion_init(struct drm_device *dev);
+extern void intel_opregion_fini(struct drm_device *dev);
+extern void intel_opregion_asle_intr(struct drm_device *dev);
+extern int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
+					 bool enable);
+extern int intel_opregion_notify_adapter(struct drm_device *dev,
+					 pci_power_t state);
+#else
+static inline int intel_opregion_setup(struct drm_device *dev) { return 0; }
+static inline void intel_opregion_init(struct drm_device *dev) { return; }
+static inline void intel_opregion_fini(struct drm_device *dev) { return; }
+static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; }
+static inline int
+intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable)
+{
+	return 0;
+}
+static inline int
+intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state)
+{
+	return 0;
+}
+#endif
+
+/* intel_acpi.c */
+#ifdef CONFIG_ACPI
+extern void intel_register_dsm_handler(void);
+extern void intel_unregister_dsm_handler(void);
+#else
+static inline void intel_register_dsm_handler(void) { return; }
+static inline void intel_unregister_dsm_handler(void) { return; }
+#endif /* CONFIG_ACPI */
+
+/* modesetting */
+extern void intel_modeset_init_hw(struct drm_device *dev);
+extern void intel_modeset_init(struct drm_device *dev);
+extern void intel_modeset_gem_init(struct drm_device *dev);
+extern void intel_modeset_cleanup(struct drm_device *dev);
+extern void intel_connector_unregister(struct intel_connector *);
+extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
+extern void intel_display_resume(struct drm_device *dev);
+extern void i915_redisable_vga(struct drm_device *dev);
+extern void i915_redisable_vga_power_on(struct drm_device *dev);
+extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
+extern void intel_init_pch_refclk(struct drm_device *dev);
+extern void intel_set_rps(struct drm_device *dev, u8 val);
+extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
+				  bool enable);
+extern void intel_detect_pch(struct drm_device *dev);
+extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
+extern int intel_enable_rc6(const struct drm_device *dev);
+
+extern bool i915_semaphore_is_enabled(struct drm_device *dev);
+int i915_reg_read_ioctl(struct drm_device *dev, void *data,
+			struct drm_file *file);
+int i915_get_reset_stats_ioctl(struct drm_device *dev, void *data,
+			       struct drm_file *file);
+
+/* overlay */
+extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
+extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e,
+					    struct intel_overlay_error_state *error);
+
+extern struct intel_display_error_state *intel_display_capture_error_state(struct drm_device *dev);
+extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
+					    struct drm_device *dev,
+					    struct intel_display_error_state *error);
+
+int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val);
+int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val);
+
+/* intel_sideband.c */
+u32 vlv_punit_read(struct drm_i915_private *dev_priv, u32 addr);
+void vlv_punit_write(struct drm_i915_private *dev_priv, u32 addr, u32 val);
+u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr);
+u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv, u32 reg);
+void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
+u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg);
+void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
+u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg);
+void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
+u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg);
+void vlv_bunit_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
+u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg);
+void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
+u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg);
+void vlv_dpio_write(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 val);
+u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
+		   enum intel_sbi_destination destination);
+void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
+		     enum intel_sbi_destination destination);
+u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg);
+void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
+
+int intel_gpu_freq(struct drm_i915_private *dev_priv, int val);
+int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
+
+#define I915_READ8(reg)		dev_priv->uncore.funcs.mmio_readb(dev_priv, (reg), true)
+#define I915_WRITE8(reg, val)	dev_priv->uncore.funcs.mmio_writeb(dev_priv, (reg), (val), true)
+
+#define I915_READ16(reg)	dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), true)
+#define I915_WRITE16(reg, val)	dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), true)
+#define I915_READ16_NOTRACE(reg)	dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), false)
+#define I915_WRITE16_NOTRACE(reg, val)	dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), false)
+
+#define I915_READ(reg)		dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), true)
+#define I915_WRITE(reg, val)	dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), true)
+#define I915_READ_NOTRACE(reg)		dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), false)
+#define I915_WRITE_NOTRACE(reg, val)	dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), false)
+
+/* Be very careful with read/write 64-bit values. On 32-bit machines, they
+ * will be implemented using 2 32-bit writes in an arbitrary order with
+ * an arbitrary delay between them. This can cause the hardware to
+ * act upon the intermediate value, possibly leading to corruption and
+ * machine death. You have been warned.
+ */
+#define I915_WRITE64(reg, val)	dev_priv->uncore.funcs.mmio_writeq(dev_priv, (reg), (val), true)
+#define I915_READ64(reg)	dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true)
+
+#define I915_READ64_2x32(lower_reg, upper_reg) ({			\
+	u32 upper, lower, old_upper, loop = 0;				\
+	upper = I915_READ(upper_reg);					\
+	do {								\
+		old_upper = upper;					\
+		lower = I915_READ(lower_reg);				\
+		upper = I915_READ(upper_reg);				\
+	} while (upper != old_upper && loop++ < 2);			\
+	(u64)upper << 32 | lower; })
+
+#define POSTING_READ(reg)	(void)I915_READ_NOTRACE(reg)
+#define POSTING_READ16(reg)	(void)I915_READ16_NOTRACE(reg)
+
+/* These are untraced mmio-accessors that are only valid to be used inside
+ * criticial sections inside IRQ handlers where forcewake is explicitly
+ * controlled.
+ * Think twice, and think again, before using these.
+ * Note: Should only be used between intel_uncore_forcewake_irqlock() and
+ * intel_uncore_forcewake_irqunlock().
+ */
+#define I915_READ_FW(reg__) readl(dev_priv->regs + (reg__))
+#define I915_WRITE_FW(reg__, val__) writel(val__, dev_priv->regs + (reg__))
+#define POSTING_READ_FW(reg__) (void)I915_READ_FW(reg__)
+
+/* "Broadcast RGB" property */
+#define INTEL_BROADCAST_RGB_AUTO 0
+#define INTEL_BROADCAST_RGB_FULL 1
+#define INTEL_BROADCAST_RGB_LIMITED 2
+
+static inline uint32_t i915_vgacntrl_reg(struct drm_device *dev)
+{
+	if (IS_VALLEYVIEW(dev))
+		return VLV_VGACNTRL;
+	else if (INTEL_INFO(dev)->gen >= 5)
+		return CPU_VGACNTRL;
+	else
+		return VGACNTRL;
+}
+
+static inline void __user *to_user_ptr(u64 address)
+{
+	return (void __user *)(uintptr_t)address;
+}
+
+static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m)
+{
+	unsigned long j = msecs_to_jiffies(m);
+
+	return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
+}
+
+static inline unsigned long nsecs_to_jiffies_timeout(const u64 n)
+{
+        return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1);
+}
+
+static inline unsigned long
+timespec_to_jiffies_timeout(const struct timespec *value)
+{
+	unsigned long j = timespec_to_jiffies(value);
+
+	return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
+}
+
+/*
+ * If you need to wait X milliseconds between events A and B, but event B
+ * doesn't happen exactly after event A, you record the timestamp (jiffies) of
+ * when event A happened, then just before event B you call this function and
+ * pass the timestamp as the first argument, and X as the second argument.
+ */
+static inline void
+wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
+{
+	unsigned long target_jiffies, tmp_jiffies, remaining_jiffies;
+
+	/*
+	 * Don't re-read the value of "jiffies" every time since it may change
+	 * behind our back and break the math.
+	 */
+	tmp_jiffies = jiffies;
+	target_jiffies = timestamp_jiffies +
+			 msecs_to_jiffies_timeout(to_wait_ms);
+
+	if (time_after(target_jiffies, tmp_jiffies)) {
+		remaining_jiffies = target_jiffies - tmp_jiffies;
+		while (remaining_jiffies)
+			remaining_jiffies =
+			    schedule_timeout_uninterruptible(remaining_jiffies);
+	}
+}
+
+static inline void i915_trace_irq_get(struct intel_engine_cs *ring,
+				      struct drm_i915_gem_request *req)
+{
+	if (ring->trace_irq_req == NULL && ring->irq_get(ring))
+		i915_gem_request_assign(&ring->trace_irq_req, req);
+}
+
+#endif
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
new file mode 100644
index 0000000..f56af0a
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -0,0 +1,5281 @@
+/*
+ * Copyright © 2008-2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_vma_manager.h>
+#include <drm/i915_drm.h>
+#include "i915_drv.h"
+#include "i915_vgpu.h"
+#include "i915_trace.h"
+#include "intel_drv.h"
+#include <linux/shmem_fs.h>
+#include <linux/slab.h>
+#include <linux/swap.h>
+#include <linux/pci.h>
+#include <linux/dma-buf.h>
+
+#define RQ_BUG_ON(expr)
+
+static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
+static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
+static void
+i915_gem_object_retire__write(struct drm_i915_gem_object *obj);
+static void
+i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring);
+
+static bool cpu_cache_is_coherent(struct drm_device *dev,
+				  enum i915_cache_level level)
+{
+	return HAS_LLC(dev) || level != I915_CACHE_NONE;
+}
+
+static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
+{
+	if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
+		return true;
+
+	return obj->pin_display;
+}
+
+/* some bookkeeping */
+static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
+				  size_t size)
+{
+	spin_lock(&dev_priv->mm.object_stat_lock);
+	dev_priv->mm.object_count++;
+	dev_priv->mm.object_memory += size;
+	spin_unlock(&dev_priv->mm.object_stat_lock);
+}
+
+static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
+				     size_t size)
+{
+	spin_lock(&dev_priv->mm.object_stat_lock);
+	dev_priv->mm.object_count--;
+	dev_priv->mm.object_memory -= size;
+	spin_unlock(&dev_priv->mm.object_stat_lock);
+}
+
+static int
+i915_gem_wait_for_error(struct i915_gpu_error *error)
+{
+	int ret;
+
+#define EXIT_COND (!i915_reset_in_progress(error) || \
+		   i915_terminally_wedged(error))
+	if (EXIT_COND)
+		return 0;
+
+	/*
+	 * Only wait 10 seconds for the gpu reset to complete to avoid hanging
+	 * userspace. If it takes that long something really bad is going on and
+	 * we should simply try to bail out and fail as gracefully as possible.
+	 */
+	ret = wait_event_interruptible_timeout(error->reset_queue,
+					       EXIT_COND,
+					       10*HZ);
+	if (ret == 0) {
+		DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
+		return -EIO;
+	} else if (ret < 0) {
+		return ret;
+	}
+#undef EXIT_COND
+
+	return 0;
+}
+
+int i915_mutex_lock_interruptible(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret;
+
+	ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
+	if (ret)
+		return ret;
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
+	WARN_ON(i915_verify_lists(dev));
+	return 0;
+}
+
+int
+i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
+			    struct drm_file *file)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_gem_get_aperture *args = data;
+	struct i915_gtt *ggtt = &dev_priv->gtt;
+	struct i915_vma *vma;
+	size_t pinned;
+
+	pinned = 0;
+	mutex_lock(&dev->struct_mutex);
+	list_for_each_entry(vma, &ggtt->base.active_list, mm_list)
+		if (vma->pin_count)
+			pinned += vma->node.size;
+	list_for_each_entry(vma, &ggtt->base.inactive_list, mm_list)
+		if (vma->pin_count)
+			pinned += vma->node.size;
+	mutex_unlock(&dev->struct_mutex);
+
+	args->aper_size = dev_priv->gtt.base.total;
+	args->aper_available_size = args->aper_size - pinned;
+
+	return 0;
+}
+
+static int
+i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
+{
+	struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
+	char *vaddr = obj->phys_handle->vaddr;
+	struct sg_table *st;
+	struct scatterlist *sg;
+	int i;
+
+	if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
+		return -EINVAL;
+
+	for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
+		struct page *page;
+		char *src;
+
+		page = shmem_read_mapping_page(mapping, i);
+		if (IS_ERR(page))
+			return PTR_ERR(page);
+
+		src = kmap_atomic(page);
+		memcpy(vaddr, src, PAGE_SIZE);
+		drm_clflush_virt_range(vaddr, PAGE_SIZE);
+		kunmap_atomic(src);
+
+		page_cache_release(page);
+		vaddr += PAGE_SIZE;
+	}
+
+	i915_gem_chipset_flush(obj->base.dev);
+
+	st = kmalloc(sizeof(*st), GFP_KERNEL);
+	if (st == NULL)
+		return -ENOMEM;
+
+	if (sg_alloc_table(st, 1, GFP_KERNEL)) {
+		kfree(st);
+		return -ENOMEM;
+	}
+
+	sg = st->sgl;
+	sg->offset = 0;
+	sg->length = obj->base.size;
+
+	sg_dma_address(sg) = obj->phys_handle->busaddr;
+	sg_dma_len(sg) = obj->base.size;
+
+	obj->pages = st;
+	return 0;
+}
+
+static void
+i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
+{
+	int ret;
+
+	BUG_ON(obj->madv == __I915_MADV_PURGED);
+
+	ret = i915_gem_object_set_to_cpu_domain(obj, true);
+	if (ret) {
+		/* In the event of a disaster, abandon all caches and
+		 * hope for the best.
+		 */
+		WARN_ON(ret != -EIO);
+		obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+	}
+
+	if (obj->madv == I915_MADV_DONTNEED)
+		obj->dirty = 0;
+
+	if (obj->dirty) {
+		struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
+		char *vaddr = obj->phys_handle->vaddr;
+		int i;
+
+		for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
+			struct page *page;
+			char *dst;
+
+			page = shmem_read_mapping_page(mapping, i);
+			if (IS_ERR(page))
+				continue;
+
+			dst = kmap_atomic(page);
+			drm_clflush_virt_range(vaddr, PAGE_SIZE);
+			memcpy(dst, vaddr, PAGE_SIZE);
+			kunmap_atomic(dst);
+
+			set_page_dirty(page);
+			if (obj->madv == I915_MADV_WILLNEED)
+				mark_page_accessed(page);
+			page_cache_release(page);
+			vaddr += PAGE_SIZE;
+		}
+		obj->dirty = 0;
+	}
+
+	sg_free_table(obj->pages);
+	kfree(obj->pages);
+}
+
+static void
+i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
+{
+	drm_pci_free(obj->base.dev, obj->phys_handle);
+}
+
+static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
+	.get_pages = i915_gem_object_get_pages_phys,
+	.put_pages = i915_gem_object_put_pages_phys,
+	.release = i915_gem_object_release_phys,
+};
+
+static int
+drop_pages(struct drm_i915_gem_object *obj)
+{
+	struct i915_vma *vma, *next;
+	int ret;
+
+	drm_gem_object_reference(&obj->base);
+	list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link)
+		if (i915_vma_unbind(vma))
+			break;
+
+	ret = i915_gem_object_put_pages(obj);
+	drm_gem_object_unreference(&obj->base);
+
+	return ret;
+}
+
+int
+i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
+			    int align)
+{
+	drm_dma_handle_t *phys;
+	int ret;
+
+	if (obj->phys_handle) {
+		if ((unsigned long)obj->phys_handle->vaddr & (align -1))
+			return -EBUSY;
+
+		return 0;
+	}
+
+	if (obj->madv != I915_MADV_WILLNEED)
+		return -EFAULT;
+
+	if (obj->base.filp == NULL)
+		return -EINVAL;
+
+	ret = drop_pages(obj);
+	if (ret)
+		return ret;
+
+	/* create a new object */
+	phys = drm_pci_alloc(obj->base.dev, obj->base.size, align);
+	if (!phys)
+		return -ENOMEM;
+
+	obj->phys_handle = phys;
+	obj->ops = &i915_gem_phys_ops;
+
+	return i915_gem_object_get_pages(obj);
+}
+
+static int
+i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
+		     struct drm_i915_gem_pwrite *args,
+		     struct drm_file *file_priv)
+{
+	struct drm_device *dev = obj->base.dev;
+	void *vaddr = obj->phys_handle->vaddr + args->offset;
+	char __user *user_data = to_user_ptr(args->data_ptr);
+	int ret = 0;
+
+	/* We manually control the domain here and pretend that it
+	 * remains coherent i.e. in the GTT domain, like shmem_pwrite.
+	 */
+	ret = i915_gem_object_wait_rendering(obj, false);
+	if (ret)
+		return ret;
+
+	intel_fb_obj_invalidate(obj, ORIGIN_CPU);
+	if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
+		unsigned long unwritten;
+
+		/* The physical object once assigned is fixed for the lifetime
+		 * of the obj, so we can safely drop the lock and continue
+		 * to access vaddr.
+		 */
+		mutex_unlock(&dev->struct_mutex);
+		unwritten = copy_from_user(vaddr, user_data, args->size);
+		mutex_lock(&dev->struct_mutex);
+		if (unwritten) {
+			ret = -EFAULT;
+			goto out;
+		}
+	}
+
+	drm_clflush_virt_range(vaddr, args->size);
+	i915_gem_chipset_flush(dev);
+
+out:
+	intel_fb_obj_flush(obj, false, ORIGIN_CPU);
+	return ret;
+}
+
+void *i915_gem_object_alloc(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL);
+}
+
+void i915_gem_object_free(struct drm_i915_gem_object *obj)
+{
+	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+	kmem_cache_free(dev_priv->objects, obj);
+}
+
+static int
+i915_gem_create(struct drm_file *file,
+		struct drm_device *dev,
+		uint64_t size,
+		uint32_t *handle_p)
+{
+	struct drm_i915_gem_object *obj;
+	int ret;
+	u32 handle;
+
+	size = roundup(size, PAGE_SIZE);
+	if (size == 0)
+		return -EINVAL;
+
+	/* Allocate the new object */
+	obj = i915_gem_alloc_object(dev, size);
+	if (obj == NULL)
+		return -ENOMEM;
+
+	ret = drm_gem_handle_create(file, &obj->base, &handle);
+	/* drop reference from allocate - handle holds it now */
+	drm_gem_object_unreference_unlocked(&obj->base);
+	if (ret)
+		return ret;
+
+	*handle_p = handle;
+	return 0;
+}
+
+int
+i915_gem_dumb_create(struct drm_file *file,
+		     struct drm_device *dev,
+		     struct drm_mode_create_dumb *args)
+{
+	/* have to work out size/pitch and return them */
+	args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
+	args->size = args->pitch * args->height;
+	return i915_gem_create(file, dev,
+			       args->size, &args->handle);
+}
+
+/**
+ * Creates a new mm object and returns a handle to it.
+ */
+int
+i915_gem_create_ioctl(struct drm_device *dev, void *data,
+		      struct drm_file *file)
+{
+	struct drm_i915_gem_create *args = data;
+
+	return i915_gem_create(file, dev,
+			       args->size, &args->handle);
+}
+
+static inline int
+__copy_to_user_swizzled(char __user *cpu_vaddr,
+			const char *gpu_vaddr, int gpu_offset,
+			int length)
+{
+	int ret, cpu_offset = 0;
+
+	while (length > 0) {
+		int cacheline_end = ALIGN(gpu_offset + 1, 64);
+		int this_length = min(cacheline_end - gpu_offset, length);
+		int swizzled_gpu_offset = gpu_offset ^ 64;
+
+		ret = __copy_to_user(cpu_vaddr + cpu_offset,
+				     gpu_vaddr + swizzled_gpu_offset,
+				     this_length);
+		if (ret)
+			return ret + length;
+
+		cpu_offset += this_length;
+		gpu_offset += this_length;
+		length -= this_length;
+	}
+
+	return 0;
+}
+
+static inline int
+__copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
+			  const char __user *cpu_vaddr,
+			  int length)
+{
+	int ret, cpu_offset = 0;
+
+	while (length > 0) {
+		int cacheline_end = ALIGN(gpu_offset + 1, 64);
+		int this_length = min(cacheline_end - gpu_offset, length);
+		int swizzled_gpu_offset = gpu_offset ^ 64;
+
+		ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
+				       cpu_vaddr + cpu_offset,
+				       this_length);
+		if (ret)
+			return ret + length;
+
+		cpu_offset += this_length;
+		gpu_offset += this_length;
+		length -= this_length;
+	}
+
+	return 0;
+}
+
+/*
+ * Pins the specified object's pages and synchronizes the object with
+ * GPU accesses. Sets needs_clflush to non-zero if the caller should
+ * flush the object from the CPU cache.
+ */
+int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
+				    int *needs_clflush)
+{
+	int ret;
+
+	*needs_clflush = 0;
+
+	if (!obj->base.filp)
+		return -EINVAL;
+
+	if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
+		/* If we're not in the cpu read domain, set ourself into the gtt
+		 * read domain and manually flush cachelines (if required). This
+		 * optimizes for the case when the gpu will dirty the data
+		 * anyway again before the next pread happens. */
+		*needs_clflush = !cpu_cache_is_coherent(obj->base.dev,
+							obj->cache_level);
+		ret = i915_gem_object_wait_rendering(obj, true);
+		if (ret)
+			return ret;
+	}
+
+	ret = i915_gem_object_get_pages(obj);
+	if (ret)
+		return ret;
+
+	i915_gem_object_pin_pages(obj);
+
+	return ret;
+}
+
+/* Per-page copy function for the shmem pread fastpath.
+ * Flushes invalid cachelines before reading the target if
+ * needs_clflush is set. */
+static int
+shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
+		 char __user *user_data,
+		 bool page_do_bit17_swizzling, bool needs_clflush)
+{
+	char *vaddr;
+	int ret;
+
+	if (unlikely(page_do_bit17_swizzling))
+		return -EINVAL;
+
+	vaddr = kmap_atomic(page);
+	if (needs_clflush)
+		drm_clflush_virt_range(vaddr + shmem_page_offset,
+				       page_length);
+	ret = __copy_to_user_inatomic(user_data,
+				      vaddr + shmem_page_offset,
+				      page_length);
+	kunmap_atomic(vaddr);
+
+	return ret ? -EFAULT : 0;
+}
+
+static void
+shmem_clflush_swizzled_range(char *addr, unsigned long length,
+			     bool swizzled)
+{
+	if (unlikely(swizzled)) {
+		unsigned long start = (unsigned long) addr;
+		unsigned long end = (unsigned long) addr + length;
+
+		/* For swizzling simply ensure that we always flush both
+		 * channels. Lame, but simple and it works. Swizzled
+		 * pwrite/pread is far from a hotpath - current userspace
+		 * doesn't use it at all. */
+		start = round_down(start, 128);
+		end = round_up(end, 128);
+
+		drm_clflush_virt_range((void *)start, end - start);
+	} else {
+		drm_clflush_virt_range(addr, length);
+	}
+
+}
+
+/* Only difference to the fast-path function is that this can handle bit17
+ * and uses non-atomic copy and kmap functions. */
+static int
+shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
+		 char __user *user_data,
+		 bool page_do_bit17_swizzling, bool needs_clflush)
+{
+	char *vaddr;
+	int ret;
+
+	vaddr = kmap(page);
+	if (needs_clflush)
+		shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
+					     page_length,
+					     page_do_bit17_swizzling);
+
+	if (page_do_bit17_swizzling)
+		ret = __copy_to_user_swizzled(user_data,
+					      vaddr, shmem_page_offset,
+					      page_length);
+	else
+		ret = __copy_to_user(user_data,
+				     vaddr + shmem_page_offset,
+				     page_length);
+	kunmap(page);
+
+	return ret ? - EFAULT : 0;
+}
+
+static int
+i915_gem_shmem_pread(struct drm_device *dev,
+		     struct drm_i915_gem_object *obj,
+		     struct drm_i915_gem_pread *args,
+		     struct drm_file *file)
+{
+	char __user *user_data;
+	ssize_t remain;
+	loff_t offset;
+	int shmem_page_offset, page_length, ret = 0;
+	int obj_do_bit17_swizzling, page_do_bit17_swizzling;
+	int prefaulted = 0;
+	int needs_clflush = 0;
+	struct sg_page_iter sg_iter;
+
+	user_data = to_user_ptr(args->data_ptr);
+	remain = args->size;
+
+	obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
+
+	ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
+	if (ret)
+		return ret;
+
+	offset = args->offset;
+
+	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
+			 offset >> PAGE_SHIFT) {
+		struct page *page = sg_page_iter_page(&sg_iter);
+
+		if (remain <= 0)
+			break;
+
+		/* Operation in this page
+		 *
+		 * shmem_page_offset = offset within page in shmem file
+		 * page_length = bytes to copy for this page
+		 */
+		shmem_page_offset = offset_in_page(offset);
+		page_length = remain;
+		if ((shmem_page_offset + page_length) > PAGE_SIZE)
+			page_length = PAGE_SIZE - shmem_page_offset;
+
+		page_do_bit17_swizzling = obj_do_bit17_swizzling &&
+			(page_to_phys(page) & (1 << 17)) != 0;
+
+		ret = shmem_pread_fast(page, shmem_page_offset, page_length,
+				       user_data, page_do_bit17_swizzling,
+				       needs_clflush);
+		if (ret == 0)
+			goto next_page;
+
+		mutex_unlock(&dev->struct_mutex);
+
+		if (likely(!i915.prefault_disable) && !prefaulted) {
+			ret = fault_in_multipages_writeable(user_data, remain);
+			/* Userspace is tricking us, but we've already clobbered
+			 * its pages with the prefault and promised to write the
+			 * data up to the first fault. Hence ignore any errors
+			 * and just continue. */
+			(void)ret;
+			prefaulted = 1;
+		}
+
+		ret = shmem_pread_slow(page, shmem_page_offset, page_length,
+				       user_data, page_do_bit17_swizzling,
+				       needs_clflush);
+
+		mutex_lock(&dev->struct_mutex);
+
+		if (ret)
+			goto out;
+
+next_page:
+		remain -= page_length;
+		user_data += page_length;
+		offset += page_length;
+	}
+
+out:
+	i915_gem_object_unpin_pages(obj);
+
+	return ret;
+}
+
+/**
+ * Reads data from the object referenced by handle.
+ *
+ * On error, the contents of *data are undefined.
+ */
+int
+i915_gem_pread_ioctl(struct drm_device *dev, void *data,
+		     struct drm_file *file)
+{
+	struct drm_i915_gem_pread *args = data;
+	struct drm_i915_gem_object *obj;
+	int ret = 0;
+
+	if (args->size == 0)
+		return 0;
+
+	if (!access_ok(VERIFY_WRITE,
+		       to_user_ptr(args->data_ptr),
+		       args->size))
+		return -EFAULT;
+
+	ret = i915_mutex_lock_interruptible(dev);
+	if (ret)
+		return ret;
+
+	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+	if (&obj->base == NULL) {
+		ret = -ENOENT;
+		goto unlock;
+	}
+
+	/* Bounds check source.  */
+	if (args->offset > obj->base.size ||
+	    args->size > obj->base.size - args->offset) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	/* prime objects have no backing filp to GEM pread/pwrite
+	 * pages from.
+	 */
+	if (!obj->base.filp) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	trace_i915_gem_object_pread(obj, args->offset, args->size);
+
+	ret = i915_gem_shmem_pread(dev, obj, args, file);
+
+out:
+	drm_gem_object_unreference(&obj->base);
+unlock:
+	mutex_unlock(&dev->struct_mutex);
+	return ret;
+}
+
+/* This is the fast write path which cannot handle
+ * page faults in the source data
+ */
+
+static inline int
+fast_user_write(struct io_mapping *mapping,
+		loff_t page_base, int page_offset,
+		char __user *user_data,
+		int length)
+{
+	void __iomem *vaddr_atomic;
+	void *vaddr;
+	unsigned long unwritten;
+
+	vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
+	/* We can use the cpu mem copy function because this is X86. */
+	vaddr = (void __force*)vaddr_atomic + page_offset;
+	unwritten = __copy_from_user_inatomic_nocache(vaddr,
+						      user_data, length);
+	io_mapping_unmap_atomic(vaddr_atomic);
+	return unwritten;
+}
+
+/**
+ * This is the fast pwrite path, where we copy the data directly from the
+ * user into the GTT, uncached.
+ */
+static int
+i915_gem_gtt_pwrite_fast(struct drm_device *dev,
+			 struct drm_i915_gem_object *obj,
+			 struct drm_i915_gem_pwrite *args,
+			 struct drm_file *file)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	ssize_t remain;
+	loff_t offset, page_base;
+	char __user *user_data;
+	int page_offset, page_length, ret;
+
+	ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE | PIN_NONBLOCK);
+	if (ret)
+		goto out;
+
+	ret = i915_gem_object_set_to_gtt_domain(obj, true);
+	if (ret)
+		goto out_unpin;
+
+	ret = i915_gem_object_put_fence(obj);
+	if (ret)
+		goto out_unpin;
+
+	user_data = to_user_ptr(args->data_ptr);
+	remain = args->size;
+
+	offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
+
+	intel_fb_obj_invalidate(obj, ORIGIN_GTT);
+
+	while (remain > 0) {
+		/* Operation in this page
+		 *
+		 * page_base = page offset within aperture
+		 * page_offset = offset within page
+		 * page_length = bytes to copy for this page
+		 */
+		page_base = offset & PAGE_MASK;
+		page_offset = offset_in_page(offset);
+		page_length = remain;
+		if ((page_offset + remain) > PAGE_SIZE)
+			page_length = PAGE_SIZE - page_offset;
+
+		/* If we get a fault while copying data, then (presumably) our
+		 * source page isn't available.  Return the error and we'll
+		 * retry in the slow path.
+		 */
+		if (fast_user_write(dev_priv->gtt.mappable, page_base,
+				    page_offset, user_data, page_length)) {
+			ret = -EFAULT;
+			goto out_flush;
+		}
+
+		remain -= page_length;
+		user_data += page_length;
+		offset += page_length;
+	}
+
+out_flush:
+	intel_fb_obj_flush(obj, false, ORIGIN_GTT);
+out_unpin:
+	i915_gem_object_ggtt_unpin(obj);
+out:
+	return ret;
+}
+
+/* Per-page copy function for the shmem pwrite fastpath.
+ * Flushes invalid cachelines before writing to the target if
+ * needs_clflush_before is set and flushes out any written cachelines after
+ * writing if needs_clflush is set. */
+static int
+shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
+		  char __user *user_data,
+		  bool page_do_bit17_swizzling,
+		  bool needs_clflush_before,
+		  bool needs_clflush_after)
+{
+	char *vaddr;
+	int ret;
+
+	if (unlikely(page_do_bit17_swizzling))
+		return -EINVAL;
+
+	vaddr = kmap_atomic(page);
+	if (needs_clflush_before)
+		drm_clflush_virt_range(vaddr + shmem_page_offset,
+				       page_length);
+	ret = __copy_from_user_inatomic(vaddr + shmem_page_offset,
+					user_data, page_length);
+	if (needs_clflush_after)
+		drm_clflush_virt_range(vaddr + shmem_page_offset,
+				       page_length);
+	kunmap_atomic(vaddr);
+
+	return ret ? -EFAULT : 0;
+}
+
+/* Only difference to the fast-path function is that this can handle bit17
+ * and uses non-atomic copy and kmap functions. */
+static int
+shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
+		  char __user *user_data,
+		  bool page_do_bit17_swizzling,
+		  bool needs_clflush_before,
+		  bool needs_clflush_after)
+{
+	char *vaddr;
+	int ret;
+
+	vaddr = kmap(page);
+	if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
+		shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
+					     page_length,
+					     page_do_bit17_swizzling);
+	if (page_do_bit17_swizzling)
+		ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
+						user_data,
+						page_length);
+	else
+		ret = __copy_from_user(vaddr + shmem_page_offset,
+				       user_data,
+				       page_length);
+	if (needs_clflush_after)
+		shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
+					     page_length,
+					     page_do_bit17_swizzling);
+	kunmap(page);
+
+	return ret ? -EFAULT : 0;
+}
+
+static int
+i915_gem_shmem_pwrite(struct drm_device *dev,
+		      struct drm_i915_gem_object *obj,
+		      struct drm_i915_gem_pwrite *args,
+		      struct drm_file *file)
+{
+	ssize_t remain;
+	loff_t offset;
+	char __user *user_data;
+	int shmem_page_offset, page_length, ret = 0;
+	int obj_do_bit17_swizzling, page_do_bit17_swizzling;
+	int hit_slowpath = 0;
+	int needs_clflush_after = 0;
+	int needs_clflush_before = 0;
+	struct sg_page_iter sg_iter;
+
+	user_data = to_user_ptr(args->data_ptr);
+	remain = args->size;
+
+	obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
+
+	if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
+		/* If we're not in the cpu write domain, set ourself into the gtt
+		 * write domain and manually flush cachelines (if required). This
+		 * optimizes for the case when the gpu will use the data
+		 * right away and we therefore have to clflush anyway. */
+		needs_clflush_after = cpu_write_needs_clflush(obj);
+		ret = i915_gem_object_wait_rendering(obj, false);
+		if (ret)
+			return ret;
+	}
+	/* Same trick applies to invalidate partially written cachelines read
+	 * before writing. */
+	if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
+		needs_clflush_before =
+			!cpu_cache_is_coherent(dev, obj->cache_level);
+
+	ret = i915_gem_object_get_pages(obj);
+	if (ret)
+		return ret;
+
+	intel_fb_obj_invalidate(obj, ORIGIN_CPU);
+
+	i915_gem_object_pin_pages(obj);
+
+	offset = args->offset;
+	obj->dirty = 1;
+
+	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
+			 offset >> PAGE_SHIFT) {
+		struct page *page = sg_page_iter_page(&sg_iter);
+		int partial_cacheline_write;
+
+		if (remain <= 0)
+			break;
+
+		/* Operation in this page
+		 *
+		 * shmem_page_offset = offset within page in shmem file
+		 * page_length = bytes to copy for this page
+		 */
+		shmem_page_offset = offset_in_page(offset);
+
+		page_length = remain;
+		if ((shmem_page_offset + page_length) > PAGE_SIZE)
+			page_length = PAGE_SIZE - shmem_page_offset;
+
+		/* If we don't overwrite a cacheline completely we need to be
+		 * careful to have up-to-date data by first clflushing. Don't
+		 * overcomplicate things and flush the entire patch. */
+		partial_cacheline_write = needs_clflush_before &&
+			((shmem_page_offset | page_length)
+				& (boot_cpu_data.x86_clflush_size - 1));
+
+		page_do_bit17_swizzling = obj_do_bit17_swizzling &&
+			(page_to_phys(page) & (1 << 17)) != 0;
+
+		ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
+					user_data, page_do_bit17_swizzling,
+					partial_cacheline_write,
+					needs_clflush_after);
+		if (ret == 0)
+			goto next_page;
+
+		hit_slowpath = 1;
+		mutex_unlock(&dev->struct_mutex);
+		ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
+					user_data, page_do_bit17_swizzling,
+					partial_cacheline_write,
+					needs_clflush_after);
+
+		mutex_lock(&dev->struct_mutex);
+
+		if (ret)
+			goto out;
+
+next_page:
+		remain -= page_length;
+		user_data += page_length;
+		offset += page_length;
+	}
+
+out:
+	i915_gem_object_unpin_pages(obj);
+
+	if (hit_slowpath) {
+		/*
+		 * Fixup: Flush cpu caches in case we didn't flush the dirty
+		 * cachelines in-line while writing and the object moved
+		 * out of the cpu write domain while we've dropped the lock.
+		 */
+		if (!needs_clflush_after &&
+		    obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
+			if (i915_gem_clflush_object(obj, obj->pin_display))
+				needs_clflush_after = true;
+		}
+	}
+
+	if (needs_clflush_after)
+		i915_gem_chipset_flush(dev);
+	else
+		obj->cache_dirty = true;
+
+	intel_fb_obj_flush(obj, false, ORIGIN_CPU);
+	return ret;
+}
+
+/**
+ * Writes data to the object referenced by handle.
+ *
+ * On error, the contents of the buffer that were to be modified are undefined.
+ */
+int
+i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
+		      struct drm_file *file)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_gem_pwrite *args = data;
+	struct drm_i915_gem_object *obj;
+	int ret;
+
+	if (args->size == 0)
+		return 0;
+
+	if (!access_ok(VERIFY_READ,
+		       to_user_ptr(args->data_ptr),
+		       args->size))
+		return -EFAULT;
+
+	if (likely(!i915.prefault_disable)) {
+		ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
+						   args->size);
+		if (ret)
+			return -EFAULT;
+	}
+
+	intel_runtime_pm_get(dev_priv);
+
+	ret = i915_mutex_lock_interruptible(dev);
+	if (ret)
+		goto put_rpm;
+
+	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+	if (&obj->base == NULL) {
+		ret = -ENOENT;
+		goto unlock;
+	}
+
+	/* Bounds check destination. */
+	if (args->offset > obj->base.size ||
+	    args->size > obj->base.size - args->offset) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	/* prime objects have no backing filp to GEM pread/pwrite
+	 * pages from.
+	 */
+	if (!obj->base.filp) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	trace_i915_gem_object_pwrite(obj, args->offset, args->size);
+
+	ret = -EFAULT;
+	/* We can only do the GTT pwrite on untiled buffers, as otherwise
+	 * it would end up going through the fenced access, and we'll get
+	 * different detiling behavior between reading and writing.
+	 * pread/pwrite currently are reading and writing from the CPU
+	 * perspective, requiring manual detiling by the client.
+	 */
+	if (obj->tiling_mode == I915_TILING_NONE &&
+	    obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
+	    cpu_write_needs_clflush(obj)) {
+		ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
+		/* Note that the gtt paths might fail with non-page-backed user
+		 * pointers (e.g. gtt mappings when moving data between
+		 * textures). Fallback to the shmem path in that case. */
+	}
+
+	if (ret == -EFAULT || ret == -ENOSPC) {
+		if (obj->phys_handle)
+			ret = i915_gem_phys_pwrite(obj, args, file);
+		else
+			ret = i915_gem_shmem_pwrite(dev, obj, args, file);
+	}
+
+out:
+	drm_gem_object_unreference(&obj->base);
+unlock:
+	mutex_unlock(&dev->struct_mutex);
+put_rpm:
+	intel_runtime_pm_put(dev_priv);
+
+	return ret;
+}
+
+int
+i915_gem_check_wedge(struct i915_gpu_error *error,
+		     bool interruptible)
+{
+	if (i915_reset_in_progress(error)) {
+		/* Non-interruptible callers can't handle -EAGAIN, hence return
+		 * -EIO unconditionally for these. */
+		if (!interruptible)
+			return -EIO;
+
+		/* Recovery complete, but the reset failed ... */
+		if (i915_terminally_wedged(error))
+			return -EIO;
+
+		/*
+		 * Check if GPU Reset is in progress - we need intel_ring_begin
+		 * to work properly to reinit the hw state while the gpu is
+		 * still marked as reset-in-progress. Handle this with a flag.
+		 */
+		if (!error->reload_in_reset)
+			return -EAGAIN;
+	}
+
+	return 0;
+}
+
+static void fake_irq(unsigned long data)
+{
+	wake_up_process((struct task_struct *)data);
+}
+
+static bool missed_irq(struct drm_i915_private *dev_priv,
+		       struct intel_engine_cs *ring)
+{
+	return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings);
+}
+
+static unsigned long local_clock_us(unsigned *cpu)
+{
+	unsigned long t;
+
+	/* Cheaply and approximately convert from nanoseconds to microseconds.
+	 * The result and subsequent calculations are also defined in the same
+	 * approximate microseconds units. The principal source of timing
+	 * error here is from the simple truncation.
+	 *
+	 * Note that local_clock() is only defined wrt to the current CPU;
+	 * the comparisons are no longer valid if we switch CPUs. Instead of
+	 * blocking preemption for the entire busywait, we can detect the CPU
+	 * switch and use that as indicator of system load and a reason to
+	 * stop busywaiting, see busywait_stop().
+	 */
+	*cpu = get_cpu();
+	t = local_clock() >> 10;
+	put_cpu();
+
+	return t;
+}
+
+static bool busywait_stop(unsigned long timeout, unsigned cpu)
+{
+	unsigned this_cpu;
+
+	if (time_after(local_clock_us(&this_cpu), timeout))
+		return true;
+
+	return this_cpu != cpu;
+}
+
+static int __i915_spin_request(struct drm_i915_gem_request *req, int state)
+{
+	unsigned long timeout;
+	unsigned cpu;
+
+	/* When waiting for high frequency requests, e.g. during synchronous
+	 * rendering split between the CPU and GPU, the finite amount of time
+	 * required to set up the irq and wait upon it limits the response
+	 * rate. By busywaiting on the request completion for a short while we
+	 * can service the high frequency waits as quick as possible. However,
+	 * if it is a slow request, we want to sleep as quickly as possible.
+	 * The tradeoff between waiting and sleeping is roughly the time it
+	 * takes to sleep on a request, on the order of a microsecond.
+	 */
+
+	if (req->ring->irq_refcount)
+		return -EBUSY;
+
+	/* Only spin if we know the GPU is processing this request */
+	if (!i915_gem_request_started(req, true))
+		return -EAGAIN;
+
+	timeout = local_clock_us(&cpu) + 5;
+	while (!need_resched()) {
+		if (i915_gem_request_completed(req, true))
+			return 0;
+
+		if (signal_pending_state(state, current))
+			break;
+
+		if (busywait_stop(timeout, cpu))
+			break;
+
+		cpu_relax_lowlatency();
+	}
+
+	if (i915_gem_request_completed(req, false))
+		return 0;
+
+	return -EAGAIN;
+}
+
+/**
+ * __i915_wait_request - wait until execution of request has finished
+ * @req: duh!
+ * @reset_counter: reset sequence associated with the given request
+ * @interruptible: do an interruptible wait (normally yes)
+ * @timeout: in - how long to wait (NULL forever); out - how much time remaining
+ *
+ * Note: It is of utmost importance that the passed in seqno and reset_counter
+ * values have been read by the caller in an smp safe manner. Where read-side
+ * locks are involved, it is sufficient to read the reset_counter before
+ * unlocking the lock that protects the seqno. For lockless tricks, the
+ * reset_counter _must_ be read before, and an appropriate smp_rmb must be
+ * inserted.
+ *
+ * Returns 0 if the request was found within the alloted time. Else returns the
+ * errno with remaining time filled in timeout argument.
+ */
+int __i915_wait_request(struct drm_i915_gem_request *req,
+			unsigned reset_counter,
+			bool interruptible,
+			s64 *timeout,
+			struct intel_rps_client *rps)
+{
+	struct intel_engine_cs *ring = i915_gem_request_get_ring(req);
+	struct drm_device *dev = ring->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	const bool irq_test_in_progress =
+		ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring);
+	int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
+	DEFINE_WAIT(wait);
+	unsigned long timeout_expire;
+	s64 before, now;
+	int ret;
+
+	WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled");
+
+	if (list_empty(&req->list))
+		return 0;
+
+	if (i915_gem_request_completed(req, true))
+		return 0;
+
+	timeout_expire = 0;
+	if (timeout) {
+		if (WARN_ON(*timeout < 0))
+			return -EINVAL;
+
+		if (*timeout == 0)
+			return -ETIME;
+
+		timeout_expire = jiffies + nsecs_to_jiffies_timeout(*timeout);
+	}
+
+	if (INTEL_INFO(dev_priv)->gen >= 6)
+		gen6_rps_boost(dev_priv, rps, req->emitted_jiffies);
+
+	/* Record current time in case interrupted by signal, or wedged */
+	trace_i915_gem_request_wait_begin(req);
+	before = ktime_get_raw_ns();
+
+	/* Optimistic spin for the next jiffie before touching IRQs */
+	ret = __i915_spin_request(req, state);
+	if (ret == 0)
+		goto out;
+
+	if (!irq_test_in_progress && WARN_ON(!ring->irq_get(ring))) {
+		ret = -ENODEV;
+		goto out;
+	}
+
+	for (;;) {
+		struct timer_list timer;
+
+		prepare_to_wait(&ring->irq_queue, &wait, state);
+
+		/* We need to check whether any gpu reset happened in between
+		 * the caller grabbing the seqno and now ... */
+		if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) {
+			/* ... but upgrade the -EAGAIN to an -EIO if the gpu
+			 * is truely gone. */
+			ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
+			if (ret == 0)
+				ret = -EAGAIN;
+			break;
+		}
+
+		if (i915_gem_request_completed(req, false)) {
+			ret = 0;
+			break;
+		}
+
+		if (signal_pending_state(state, current)) {
+			ret = -ERESTARTSYS;
+			break;
+		}
+
+		if (timeout && time_after_eq(jiffies, timeout_expire)) {
+			ret = -ETIME;
+			break;
+		}
+
+		timer.function = NULL;
+		if (timeout || missed_irq(dev_priv, ring)) {
+			unsigned long expire;
+
+			setup_timer_on_stack(&timer, fake_irq, (unsigned long)current);
+			expire = missed_irq(dev_priv, ring) ? jiffies + 1 : timeout_expire;
+			mod_timer(&timer, expire);
+		}
+
+		io_schedule();
+
+		if (timer.function) {
+			del_singleshot_timer_sync(&timer);
+			destroy_timer_on_stack(&timer);
+		}
+	}
+	if (!irq_test_in_progress)
+		ring->irq_put(ring);
+
+	finish_wait(&ring->irq_queue, &wait);
+
+out:
+	now = ktime_get_raw_ns();
+	trace_i915_gem_request_wait_end(req);
+
+	if (timeout) {
+		s64 tres = *timeout - (now - before);
+
+		*timeout = tres < 0 ? 0 : tres;
+
+		/*
+		 * Apparently ktime isn't accurate enough and occasionally has a
+		 * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch
+		 * things up to make the test happy. We allow up to 1 jiffy.
+		 *
+		 * This is a regrssion from the timespec->ktime conversion.
+		 */
+		if (ret == -ETIME && *timeout < jiffies_to_usecs(1)*1000)
+			*timeout = 0;
+	}
+
+	return ret;
+}
+
+int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
+				   struct drm_file *file)
+{
+	struct drm_i915_private *dev_private;
+	struct drm_i915_file_private *file_priv;
+
+	WARN_ON(!req || !file || req->file_priv);
+
+	if (!req || !file)
+		return -EINVAL;
+
+	if (req->file_priv)
+		return -EINVAL;
+
+	dev_private = req->ring->dev->dev_private;
+	file_priv = file->driver_priv;
+
+	spin_lock(&file_priv->mm.lock);
+	req->file_priv = file_priv;
+	list_add_tail(&req->client_list, &file_priv->mm.request_list);
+	spin_unlock(&file_priv->mm.lock);
+
+	req->pid = get_pid(task_pid(current));
+
+	return 0;
+}
+
+static inline void
+i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
+{
+	struct drm_i915_file_private *file_priv = request->file_priv;
+
+	if (!file_priv)
+		return;
+
+	spin_lock(&file_priv->mm.lock);
+	list_del(&request->client_list);
+	request->file_priv = NULL;
+	spin_unlock(&file_priv->mm.lock);
+
+	put_pid(request->pid);
+	request->pid = NULL;
+}
+
+static void i915_gem_request_retire(struct drm_i915_gem_request *request)
+{
+	trace_i915_gem_request_retire(request);
+
+	/* We know the GPU must have read the request to have
+	 * sent us the seqno + interrupt, so use the position
+	 * of tail of the request to update the last known position
+	 * of the GPU head.
+	 *
+	 * Note this requires that we are always called in request
+	 * completion order.
+	 */
+	request->ringbuf->last_retired_head = request->postfix;
+
+	list_del_init(&request->list);
+	i915_gem_request_remove_from_client(request);
+
+	i915_gem_request_unreference(request);
+}
+
+static void
+__i915_gem_request_retire__upto(struct drm_i915_gem_request *req)
+{
+	struct intel_engine_cs *engine = req->ring;
+	struct drm_i915_gem_request *tmp;
+
+	lockdep_assert_held(&engine->dev->struct_mutex);
+
+	if (list_empty(&req->list))
+		return;
+
+	do {
+		tmp = list_first_entry(&engine->request_list,
+				       typeof(*tmp), list);
+
+		i915_gem_request_retire(tmp);
+	} while (tmp != req);
+
+	WARN_ON(i915_verify_lists(engine->dev));
+}
+
+/**
+ * Waits for a request to be signaled, and cleans up the
+ * request and object lists appropriately for that event.
+ */
+int
+i915_wait_request(struct drm_i915_gem_request *req)
+{
+	struct drm_device *dev;
+	struct drm_i915_private *dev_priv;
+	bool interruptible;
+	int ret;
+
+	BUG_ON(req == NULL);
+
+	dev = req->ring->dev;
+	dev_priv = dev->dev_private;
+	interruptible = dev_priv->mm.interruptible;
+
+	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
+
+	ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
+	if (ret)
+		return ret;
+
+	ret = __i915_wait_request(req,
+				  atomic_read(&dev_priv->gpu_error.reset_counter),
+				  interruptible, NULL, NULL);
+	if (ret)
+		return ret;
+
+	__i915_gem_request_retire__upto(req);
+	return 0;
+}
+
+/**
+ * Ensures that all rendering to the object has completed and the object is
+ * safe to unbind from the GTT or access from the CPU.
+ */
+int
+i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
+			       bool readonly)
+{
+	int ret, i;
+
+	if (!obj->active)
+		return 0;
+
+	if (readonly) {
+		if (obj->last_write_req != NULL) {
+			ret = i915_wait_request(obj->last_write_req);
+			if (ret)
+				return ret;
+
+			i = obj->last_write_req->ring->id;
+			if (obj->last_read_req[i] == obj->last_write_req)
+				i915_gem_object_retire__read(obj, i);
+			else
+				i915_gem_object_retire__write(obj);
+		}
+	} else {
+		for (i = 0; i < I915_NUM_RINGS; i++) {
+			if (obj->last_read_req[i] == NULL)
+				continue;
+
+			ret = i915_wait_request(obj->last_read_req[i]);
+			if (ret)
+				return ret;
+
+			i915_gem_object_retire__read(obj, i);
+		}
+		RQ_BUG_ON(obj->active);
+	}
+
+	return 0;
+}
+
+static void
+i915_gem_object_retire_request(struct drm_i915_gem_object *obj,
+			       struct drm_i915_gem_request *req)
+{
+	int ring = req->ring->id;
+
+	if (obj->last_read_req[ring] == req)
+		i915_gem_object_retire__read(obj, ring);
+	else if (obj->last_write_req == req)
+		i915_gem_object_retire__write(obj);
+
+	__i915_gem_request_retire__upto(req);
+}
+
+/* A nonblocking variant of the above wait. This is a highly dangerous routine
+ * as the object state may change during this call.
+ */
+static __must_check int
+i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
+					    struct intel_rps_client *rps,
+					    bool readonly)
+{
+	struct drm_device *dev = obj->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_gem_request *requests[I915_NUM_RINGS];
+	unsigned reset_counter;
+	int ret, i, n = 0;
+
+	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
+	BUG_ON(!dev_priv->mm.interruptible);
+
+	if (!obj->active)
+		return 0;
+
+	ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
+	if (ret)
+		return ret;
+
+	reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
+
+	if (readonly) {
+		struct drm_i915_gem_request *req;
+
+		req = obj->last_write_req;
+		if (req == NULL)
+			return 0;
+
+		requests[n++] = i915_gem_request_reference(req);
+	} else {
+		for (i = 0; i < I915_NUM_RINGS; i++) {
+			struct drm_i915_gem_request *req;
+
+			req = obj->last_read_req[i];
+			if (req == NULL)
+				continue;
+
+			requests[n++] = i915_gem_request_reference(req);
+		}
+	}
+
+	mutex_unlock(&dev->struct_mutex);
+	for (i = 0; ret == 0 && i < n; i++)
+		ret = __i915_wait_request(requests[i], reset_counter, true,
+					  NULL, rps);
+	mutex_lock(&dev->struct_mutex);
+
+	for (i = 0; i < n; i++) {
+		if (ret == 0)
+			i915_gem_object_retire_request(obj, requests[i]);
+		i915_gem_request_unreference(requests[i]);
+	}
+
+	return ret;
+}
+
+static struct intel_rps_client *to_rps_client(struct drm_file *file)
+{
+	struct drm_i915_file_private *fpriv = file->driver_priv;
+	return &fpriv->rps;
+}
+
+/**
+ * Called when user space prepares to use an object with the CPU, either
+ * through the mmap ioctl's mapping or a GTT mapping.
+ */
+int
+i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
+			  struct drm_file *file)
+{
+	struct drm_i915_gem_set_domain *args = data;
+	struct drm_i915_gem_object *obj;
+	uint32_t read_domains = args->read_domains;
+	uint32_t write_domain = args->write_domain;
+	int ret;
+
+	/* Only handle setting domains to types used by the CPU. */
+	if (write_domain & I915_GEM_GPU_DOMAINS)
+		return -EINVAL;
+
+	if (read_domains & I915_GEM_GPU_DOMAINS)
+		return -EINVAL;
+
+	/* Having something in the write domain implies it's in the read
+	 * domain, and only that read domain.  Enforce that in the request.
+	 */
+	if (write_domain != 0 && read_domains != write_domain)
+		return -EINVAL;
+
+	ret = i915_mutex_lock_interruptible(dev);
+	if (ret)
+		return ret;
+
+	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+	if (&obj->base == NULL) {
+		ret = -ENOENT;
+		goto unlock;
+	}
+
+	/* Try to flush the object off the GPU without holding the lock.
+	 * We will repeat the flush holding the lock in the normal manner
+	 * to catch cases where we are gazumped.
+	 */
+	ret = i915_gem_object_wait_rendering__nonblocking(obj,
+							  to_rps_client(file),
+							  !write_domain);
+	if (ret)
+		goto unref;
+
+	if (read_domains & I915_GEM_DOMAIN_GTT)
+		ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
+	else
+		ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
+
+	if (write_domain != 0)
+		intel_fb_obj_invalidate(obj,
+					write_domain == I915_GEM_DOMAIN_GTT ?
+					ORIGIN_GTT : ORIGIN_CPU);
+
+unref:
+	drm_gem_object_unreference(&obj->base);
+unlock:
+	mutex_unlock(&dev->struct_mutex);
+	return ret;
+}
+
+/**
+ * Called when user space has done writes to this buffer
+ */
+int
+i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
+			 struct drm_file *file)
+{
+	struct drm_i915_gem_sw_finish *args = data;
+	struct drm_i915_gem_object *obj;
+	int ret = 0;
+
+	ret = i915_mutex_lock_interruptible(dev);
+	if (ret)
+		return ret;
+
+	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+	if (&obj->base == NULL) {
+		ret = -ENOENT;
+		goto unlock;
+	}
+
+	/* Pinned buffers may be scanout, so flush the cache */
+	if (obj->pin_display)
+		i915_gem_object_flush_cpu_write_domain(obj);
+
+	drm_gem_object_unreference(&obj->base);
+unlock:
+	mutex_unlock(&dev->struct_mutex);
+	return ret;
+}
+
+/**
+ * Maps the contents of an object, returning the address it is mapped
+ * into.
+ *
+ * While the mapping holds a reference on the contents of the object, it doesn't
+ * imply a ref on the object itself.
+ *
+ * IMPORTANT:
+ *
+ * DRM driver writers who look a this function as an example for how to do GEM
+ * mmap support, please don't implement mmap support like here. The modern way
+ * to implement DRM mmap support is with an mmap offset ioctl (like
+ * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly.
+ * That way debug tooling like valgrind will understand what's going on, hiding
+ * the mmap call in a driver private ioctl will break that. The i915 driver only
+ * does cpu mmaps this way because we didn't know better.
+ */
+int
+i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
+		    struct drm_file *file)
+{
+	struct drm_i915_gem_mmap *args = data;
+	struct drm_gem_object *obj;
+	unsigned long addr;
+
+	if (args->flags & ~(I915_MMAP_WC))
+		return -EINVAL;
+
+	if (args->flags & I915_MMAP_WC && !cpu_has_pat)
+		return -ENODEV;
+
+	obj = drm_gem_object_lookup(dev, file, args->handle);
+	if (obj == NULL)
+		return -ENOENT;
+
+	/* prime objects have no backing filp to GEM mmap
+	 * pages from.
+	 */
+	if (!obj->filp) {
+		drm_gem_object_unreference_unlocked(obj);
+		return -EINVAL;
+	}
+
+	addr = vm_mmap(obj->filp, 0, args->size,
+		       PROT_READ | PROT_WRITE, MAP_SHARED,
+		       args->offset);
+	if (args->flags & I915_MMAP_WC) {
+		struct mm_struct *mm = current->mm;
+		struct vm_area_struct *vma;
+
+		down_write(&mm->mmap_sem);
+		vma = find_vma(mm, addr);
+		if (vma)
+			vma->vm_page_prot =
+				pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+		else
+			addr = -ENOMEM;
+		up_write(&mm->mmap_sem);
+	}
+	drm_gem_object_unreference_unlocked(obj);
+	if (IS_ERR((void *)addr))
+		return addr;
+
+	args->addr_ptr = (uint64_t) addr;
+
+	return 0;
+}
+
+/**
+ * i915_gem_fault - fault a page into the GTT
+ * @vma: VMA in question
+ * @vmf: fault info
+ *
+ * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
+ * from userspace.  The fault handler takes care of binding the object to
+ * the GTT (if needed), allocating and programming a fence register (again,
+ * only if needed based on whether the old reg is still valid or the object
+ * is tiled) and inserting a new PTE into the faulting process.
+ *
+ * Note that the faulting process may involve evicting existing objects
+ * from the GTT and/or fence registers to make room.  So performance may
+ * suffer if the GTT working set is large or there are few fence registers
+ * left.
+ */
+int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+	struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
+	struct drm_device *dev = obj->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct i915_ggtt_view view = i915_ggtt_view_normal;
+	pgoff_t page_offset;
+	unsigned long pfn;
+	int ret = 0;
+	bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
+
+	intel_runtime_pm_get(dev_priv);
+
+	/* We don't use vmf->pgoff since that has the fake offset */
+	page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
+		PAGE_SHIFT;
+
+	ret = i915_mutex_lock_interruptible(dev);
+	if (ret)
+		goto out;
+
+	trace_i915_gem_object_fault(obj, page_offset, true, write);
+
+	/* Try to flush the object off the GPU first without holding the lock.
+	 * Upon reacquiring the lock, we will perform our sanity checks and then
+	 * repeat the flush holding the lock in the normal manner to catch cases
+	 * where we are gazumped.
+	 */
+	ret = i915_gem_object_wait_rendering__nonblocking(obj, NULL, !write);
+	if (ret)
+		goto unlock;
+
+	/* Access to snoopable pages through the GTT is incoherent. */
+	if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
+		ret = -EFAULT;
+		goto unlock;
+	}
+
+	/* Use a partial view if the object is bigger than the aperture. */
+	if (obj->base.size >= dev_priv->gtt.mappable_end &&
+	    obj->tiling_mode == I915_TILING_NONE) {
+		static const unsigned int chunk_size = 256; // 1 MiB
+
+		memset(&view, 0, sizeof(view));
+		view.type = I915_GGTT_VIEW_PARTIAL;
+		view.params.partial.offset = rounddown(page_offset, chunk_size);
+		view.params.partial.size =
+			min_t(unsigned int,
+			      chunk_size,
+			      (vma->vm_end - vma->vm_start)/PAGE_SIZE -
+			      view.params.partial.offset);
+	}
+
+	/* Now pin it into the GTT if needed */
+	ret = i915_gem_object_ggtt_pin(obj, &view, 0, PIN_MAPPABLE);
+	if (ret)
+		goto unlock;
+
+	ret = i915_gem_object_set_to_gtt_domain(obj, write);
+	if (ret)
+		goto unpin;
+
+	ret = i915_gem_object_get_fence(obj);
+	if (ret)
+		goto unpin;
+
+	/* Finally, remap it using the new GTT offset */
+	pfn = dev_priv->gtt.mappable_base +
+		i915_gem_obj_ggtt_offset_view(obj, &view);
+	pfn >>= PAGE_SHIFT;
+
+	if (unlikely(view.type == I915_GGTT_VIEW_PARTIAL)) {
+		/* Overriding existing pages in partial view does not cause
+		 * us any trouble as TLBs are still valid because the fault
+		 * is due to userspace losing part of the mapping or never
+		 * having accessed it before (at this partials' range).
+		 */
+		unsigned long base = vma->vm_start +
+				     (view.params.partial.offset << PAGE_SHIFT);
+		unsigned int i;
+
+		for (i = 0; i < view.params.partial.size; i++) {
+			ret = vm_insert_pfn(vma, base + i * PAGE_SIZE, pfn + i);
+			if (ret)
+				break;
+		}
+
+		obj->fault_mappable = true;
+	} else {
+		if (!obj->fault_mappable) {
+			unsigned long size = min_t(unsigned long,
+						   vma->vm_end - vma->vm_start,
+						   obj->base.size);
+			int i;
+
+			for (i = 0; i < size >> PAGE_SHIFT; i++) {
+				ret = vm_insert_pfn(vma,
+						    (unsigned long)vma->vm_start + i * PAGE_SIZE,
+						    pfn + i);
+				if (ret)
+					break;
+			}
+
+			obj->fault_mappable = true;
+		} else
+			ret = vm_insert_pfn(vma,
+					    (unsigned long)vmf->virtual_address,
+					    pfn + page_offset);
+	}
+unpin:
+	i915_gem_object_ggtt_unpin_view(obj, &view);
+unlock:
+	mutex_unlock(&dev->struct_mutex);
+out:
+	switch (ret) {
+	case -EIO:
+		/*
+		 * We eat errors when the gpu is terminally wedged to avoid
+		 * userspace unduly crashing (gl has no provisions for mmaps to
+		 * fail). But any other -EIO isn't ours (e.g. swap in failure)
+		 * and so needs to be reported.
+		 */
+		if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
+			ret = VM_FAULT_SIGBUS;
+			break;
+		}
+	case -EAGAIN:
+		/*
+		 * EAGAIN means the gpu is hung and we'll wait for the error
+		 * handler to reset everything when re-faulting in
+		 * i915_mutex_lock_interruptible.
+		 */
+	case 0:
+	case -ERESTARTSYS:
+	case -EINTR:
+	case -EBUSY:
+		/*
+		 * EBUSY is ok: this just means that another thread
+		 * already did the job.
+		 */
+		ret = VM_FAULT_NOPAGE;
+		break;
+	case -ENOMEM:
+		ret = VM_FAULT_OOM;
+		break;
+	case -ENOSPC:
+	case -EFAULT:
+		ret = VM_FAULT_SIGBUS;
+		break;
+	default:
+		WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
+		ret = VM_FAULT_SIGBUS;
+		break;
+	}
+
+	intel_runtime_pm_put(dev_priv);
+	return ret;
+}
+
+/**
+ * i915_gem_release_mmap - remove physical page mappings
+ * @obj: obj in question
+ *
+ * Preserve the reservation of the mmapping with the DRM core code, but
+ * relinquish ownership of the pages back to the system.
+ *
+ * It is vital that we remove the page mapping if we have mapped a tiled
+ * object through the GTT and then lose the fence register due to
+ * resource pressure. Similarly if the object has been moved out of the
+ * aperture, than pages mapped into userspace must be revoked. Removing the
+ * mapping will then trigger a page fault on the next user access, allowing
+ * fixup by i915_gem_fault().
+ */
+void
+i915_gem_release_mmap(struct drm_i915_gem_object *obj)
+{
+	if (!obj->fault_mappable)
+		return;
+
+	drm_vma_node_unmap(&obj->base.vma_node,
+			   obj->base.dev->anon_inode->i_mapping);
+	obj->fault_mappable = false;
+}
+
+void
+i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
+{
+	struct drm_i915_gem_object *obj;
+
+	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
+		i915_gem_release_mmap(obj);
+}
+
+uint32_t
+i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
+{
+	uint32_t gtt_size;
+
+	if (INTEL_INFO(dev)->gen >= 4 ||
+	    tiling_mode == I915_TILING_NONE)
+		return size;
+
+	/* Previous chips need a power-of-two fence region when tiling */
+	if (INTEL_INFO(dev)->gen == 3)
+		gtt_size = 1024*1024;
+	else
+		gtt_size = 512*1024;
+
+	while (gtt_size < size)
+		gtt_size <<= 1;
+
+	return gtt_size;
+}
+
+/**
+ * i915_gem_get_gtt_alignment - return required GTT alignment for an object
+ * @obj: object to check
+ *
+ * Return the required GTT alignment for an object, taking into account
+ * potential fence register mapping.
+ */
+uint32_t
+i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
+			   int tiling_mode, bool fenced)
+{
+	/*
+	 * Minimum alignment is 4k (GTT page size), but might be greater
+	 * if a fence register is needed for the object.
+	 */
+	if (INTEL_INFO(dev)->gen >= 4 || (!fenced && IS_G33(dev)) ||
+	    tiling_mode == I915_TILING_NONE)
+		return 4096;
+
+	/*
+	 * Previous chips need to be aligned to the size of the smallest
+	 * fence register that can contain the object.
+	 */
+	return i915_gem_get_gtt_size(dev, size, tiling_mode);
+}
+
+static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
+{
+	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+	int ret;
+
+	if (drm_vma_node_has_offset(&obj->base.vma_node))
+		return 0;
+
+	dev_priv->mm.shrinker_no_lock_stealing = true;
+
+	ret = drm_gem_create_mmap_offset(&obj->base);
+	if (ret != -ENOSPC)
+		goto out;
+
+	/* Badly fragmented mmap space? The only way we can recover
+	 * space is by destroying unwanted objects. We can't randomly release
+	 * mmap_offsets as userspace expects them to be persistent for the
+	 * lifetime of the objects. The closest we can is to release the
+	 * offsets on purgeable objects by truncating it and marking it purged,
+	 * which prevents userspace from ever using that object again.
+	 */
+	i915_gem_shrink(dev_priv,
+			obj->base.size >> PAGE_SHIFT,
+			I915_SHRINK_BOUND |
+			I915_SHRINK_UNBOUND |
+			I915_SHRINK_PURGEABLE);
+	ret = drm_gem_create_mmap_offset(&obj->base);
+	if (ret != -ENOSPC)
+		goto out;
+
+	i915_gem_shrink_all(dev_priv);
+	ret = drm_gem_create_mmap_offset(&obj->base);
+out:
+	dev_priv->mm.shrinker_no_lock_stealing = false;
+
+	return ret;
+}
+
+static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
+{
+	drm_gem_free_mmap_offset(&obj->base);
+}
+
+int
+i915_gem_mmap_gtt(struct drm_file *file,
+		  struct drm_device *dev,
+		  uint32_t handle,
+		  uint64_t *offset)
+{
+	struct drm_i915_gem_object *obj;
+	int ret;
+
+	ret = i915_mutex_lock_interruptible(dev);
+	if (ret)
+		return ret;
+
+	obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
+	if (&obj->base == NULL) {
+		ret = -ENOENT;
+		goto unlock;
+	}
+
+	if (obj->madv != I915_MADV_WILLNEED) {
+		DRM_DEBUG("Attempting to mmap a purgeable buffer\n");
+		ret = -EFAULT;
+		goto out;
+	}
+
+	ret = i915_gem_object_create_mmap_offset(obj);
+	if (ret)
+		goto out;
+
+	*offset = drm_vma_node_offset_addr(&obj->base.vma_node);
+
+out:
+	drm_gem_object_unreference(&obj->base);
+unlock:
+	mutex_unlock(&dev->struct_mutex);
+	return ret;
+}
+
+/**
+ * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
+ * @dev: DRM device
+ * @data: GTT mapping ioctl data
+ * @file: GEM object info
+ *
+ * Simply returns the fake offset to userspace so it can mmap it.
+ * The mmap call will end up in drm_gem_mmap(), which will set things
+ * up so we can get faults in the handler above.
+ *
+ * The fault handler will take care of binding the object into the GTT
+ * (since it may have been evicted to make room for something), allocating
+ * a fence register, and mapping the appropriate aperture address into
+ * userspace.
+ */
+int
+i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
+			struct drm_file *file)
+{
+	struct drm_i915_gem_mmap_gtt *args = data;
+
+	return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
+}
+
+/* Immediately discard the backing storage */
+static void
+i915_gem_object_truncate(struct drm_i915_gem_object *obj)
+{
+	i915_gem_object_free_mmap_offset(obj);
+
+	if (obj->base.filp == NULL)
+		return;
+
+	/* Our goal here is to return as much of the memory as
+	 * is possible back to the system as we are called from OOM.
+	 * To do this we must instruct the shmfs to drop all of its
+	 * backing pages, *now*.
+	 */
+	shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
+	obj->madv = __I915_MADV_PURGED;
+}
+
+/* Try to discard unwanted pages */
+static void
+i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
+{
+	struct address_space *mapping;
+
+	switch (obj->madv) {
+	case I915_MADV_DONTNEED:
+		i915_gem_object_truncate(obj);
+	case __I915_MADV_PURGED:
+		return;
+	}
+
+	if (obj->base.filp == NULL)
+		return;
+
+	mapping = file_inode(obj->base.filp)->i_mapping,
+	invalidate_mapping_pages(mapping, 0, (loff_t)-1);
+}
+
+static void
+i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
+{
+	struct sg_page_iter sg_iter;
+	int ret;
+
+	BUG_ON(obj->madv == __I915_MADV_PURGED);
+
+	ret = i915_gem_object_set_to_cpu_domain(obj, true);
+	if (ret) {
+		/* In the event of a disaster, abandon all caches and
+		 * hope for the best.
+		 */
+		WARN_ON(ret != -EIO);
+		i915_gem_clflush_object(obj, true);
+		obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+	}
+
+	i915_gem_gtt_finish_object(obj);
+
+	if (i915_gem_object_needs_bit17_swizzle(obj))
+		i915_gem_object_save_bit_17_swizzle(obj);
+
+	if (obj->madv == I915_MADV_DONTNEED)
+		obj->dirty = 0;
+
+	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
+		struct page *page = sg_page_iter_page(&sg_iter);
+
+		if (obj->dirty)
+			set_page_dirty(page);
+
+		if (obj->madv == I915_MADV_WILLNEED)
+			mark_page_accessed(page);
+
+		page_cache_release(page);
+	}
+	obj->dirty = 0;
+
+	sg_free_table(obj->pages);
+	kfree(obj->pages);
+}
+
+int
+i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
+{
+	const struct drm_i915_gem_object_ops *ops = obj->ops;
+
+	if (obj->pages == NULL)
+		return 0;
+
+	if (obj->pages_pin_count)
+		return -EBUSY;
+
+	BUG_ON(i915_gem_obj_bound_any(obj));
+
+	/* ->put_pages might need to allocate memory for the bit17 swizzle
+	 * array, hence protect them from being reaped by removing them from gtt
+	 * lists early. */
+	list_del(&obj->global_list);
+
+	ops->put_pages(obj);
+	obj->pages = NULL;
+
+	i915_gem_object_invalidate(obj);
+
+	return 0;
+}
+
+static int
+i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
+{
+	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+	int page_count, i;
+	struct address_space *mapping;
+	struct sg_table *st;
+	struct scatterlist *sg;
+	struct sg_page_iter sg_iter;
+	struct page *page;
+	unsigned long last_pfn = 0;	/* suppress gcc warning */
+	int ret;
+	gfp_t gfp;
+
+	/* Assert that the object is not currently in any GPU domain. As it
+	 * wasn't in the GTT, there shouldn't be any way it could have been in
+	 * a GPU cache
+	 */
+	BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
+	BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
+
+	st = kmalloc(sizeof(*st), GFP_KERNEL);
+	if (st == NULL)
+		return -ENOMEM;
+
+	page_count = obj->base.size / PAGE_SIZE;
+	if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
+		kfree(st);
+		return -ENOMEM;
+	}
+
+	/* Get the list of pages out of our struct file.  They'll be pinned
+	 * at this point until we release them.
+	 *
+	 * Fail silently without starting the shrinker
+	 */
+	mapping = file_inode(obj->base.filp)->i_mapping;
+	gfp = mapping_gfp_constraint(mapping, ~(__GFP_IO | __GFP_RECLAIM));
+	gfp |= __GFP_NORETRY | __GFP_NOWARN;
+	sg = st->sgl;
+	st->nents = 0;
+	for (i = 0; i < page_count; i++) {
+		page = shmem_read_mapping_page_gfp(mapping, i, gfp);
+		if (IS_ERR(page)) {
+			i915_gem_shrink(dev_priv,
+					page_count,
+					I915_SHRINK_BOUND |
+					I915_SHRINK_UNBOUND |
+					I915_SHRINK_PURGEABLE);
+			page = shmem_read_mapping_page_gfp(mapping, i, gfp);
+		}
+		if (IS_ERR(page)) {
+			/* We've tried hard to allocate the memory by reaping
+			 * our own buffer, now let the real VM do its job and
+			 * go down in flames if truly OOM.
+			 */
+			i915_gem_shrink_all(dev_priv);
+			page = shmem_read_mapping_page(mapping, i);
+			if (IS_ERR(page)) {
+				ret = PTR_ERR(page);
+				goto err_pages;
+			}
+		}
+#ifdef CONFIG_SWIOTLB
+		if (swiotlb_nr_tbl()) {
+			st->nents++;
+			sg_set_page(sg, page, PAGE_SIZE, 0);
+			sg = sg_next(sg);
+			continue;
+		}
+#endif
+		if (!i || page_to_pfn(page) != last_pfn + 1) {
+			if (i)
+				sg = sg_next(sg);
+			st->nents++;
+			sg_set_page(sg, page, PAGE_SIZE, 0);
+		} else {
+			sg->length += PAGE_SIZE;
+		}
+		last_pfn = page_to_pfn(page);
+
+		/* Check that the i965g/gm workaround works. */
+		WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
+	}
+#ifdef CONFIG_SWIOTLB
+	if (!swiotlb_nr_tbl())
+#endif
+		sg_mark_end(sg);
+	obj->pages = st;
+
+	ret = i915_gem_gtt_prepare_object(obj);
+	if (ret)
+		goto err_pages;
+
+	if (i915_gem_object_needs_bit17_swizzle(obj))
+		i915_gem_object_do_bit_17_swizzle(obj);
+
+	if (obj->tiling_mode != I915_TILING_NONE &&
+	    dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
+		i915_gem_object_pin_pages(obj);
+
+	return 0;
+
+err_pages:
+	sg_mark_end(sg);
+	for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
+		page_cache_release(sg_page_iter_page(&sg_iter));
+	sg_free_table(st);
+	kfree(st);
+
+	/* shmemfs first checks if there is enough memory to allocate the page
+	 * and reports ENOSPC should there be insufficient, along with the usual
+	 * ENOMEM for a genuine allocation failure.
+	 *
+	 * We use ENOSPC in our driver to mean that we have run out of aperture
+	 * space and so want to translate the error from shmemfs back to our
+	 * usual understanding of ENOMEM.
+	 */
+	if (ret == -ENOSPC)
+		ret = -ENOMEM;
+
+	return ret;
+}
+
+/* Ensure that the associated pages are gathered from the backing storage
+ * and pinned into our object. i915_gem_object_get_pages() may be called
+ * multiple times before they are released by a single call to
+ * i915_gem_object_put_pages() - once the pages are no longer referenced
+ * either as a result of memory pressure (reaping pages under the shrinker)
+ * or as the object is itself released.
+ */
+int
+i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
+{
+	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+	const struct drm_i915_gem_object_ops *ops = obj->ops;
+	int ret;
+
+	if (obj->pages)
+		return 0;
+
+	if (obj->madv != I915_MADV_WILLNEED) {
+		DRM_DEBUG("Attempting to obtain a purgeable object\n");
+		return -EFAULT;
+	}
+
+	BUG_ON(obj->pages_pin_count);
+
+	ret = ops->get_pages(obj);
+	if (ret)
+		return ret;
+
+	list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
+
+	obj->get_page.sg = obj->pages->sgl;
+	obj->get_page.last = 0;
+
+	return 0;
+}
+
+void i915_vma_move_to_active(struct i915_vma *vma,
+			     struct drm_i915_gem_request *req)
+{
+	struct drm_i915_gem_object *obj = vma->obj;
+	struct intel_engine_cs *ring;
+
+	ring = i915_gem_request_get_ring(req);
+
+	/* Add a reference if we're newly entering the active list. */
+	if (obj->active == 0)
+		drm_gem_object_reference(&obj->base);
+	obj->active |= intel_ring_flag(ring);
+
+	list_move_tail(&obj->ring_list[ring->id], &ring->active_list);
+	i915_gem_request_assign(&obj->last_read_req[ring->id], req);
+
+	list_move_tail(&vma->mm_list, &vma->vm->active_list);
+}
+
+static void
+i915_gem_object_retire__write(struct drm_i915_gem_object *obj)
+{
+	RQ_BUG_ON(obj->last_write_req == NULL);
+	RQ_BUG_ON(!(obj->active & intel_ring_flag(obj->last_write_req->ring)));
+
+	i915_gem_request_assign(&obj->last_write_req, NULL);
+	intel_fb_obj_flush(obj, true, ORIGIN_CS);
+}
+
+static void
+i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring)
+{
+	struct i915_vma *vma;
+
+	RQ_BUG_ON(obj->last_read_req[ring] == NULL);
+	RQ_BUG_ON(!(obj->active & (1 << ring)));
+
+	list_del_init(&obj->ring_list[ring]);
+	i915_gem_request_assign(&obj->last_read_req[ring], NULL);
+
+	if (obj->last_write_req && obj->last_write_req->ring->id == ring)
+		i915_gem_object_retire__write(obj);
+
+	obj->active &= ~(1 << ring);
+	if (obj->active)
+		return;
+
+	/* Bump our place on the bound list to keep it roughly in LRU order
+	 * so that we don't steal from recently used but inactive objects
+	 * (unless we are forced to ofc!)
+	 */
+	list_move_tail(&obj->global_list,
+		       &to_i915(obj->base.dev)->mm.bound_list);
+
+	list_for_each_entry(vma, &obj->vma_list, vma_link) {
+		if (!list_empty(&vma->mm_list))
+			list_move_tail(&vma->mm_list, &vma->vm->inactive_list);
+	}
+
+	i915_gem_request_assign(&obj->last_fenced_req, NULL);
+	drm_gem_object_unreference(&obj->base);
+}
+
+static int
+i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_engine_cs *ring;
+	int ret, i, j;
+
+	/* Carefully retire all requests without writing to the rings */
+	for_each_ring(ring, dev_priv, i) {
+		ret = intel_ring_idle(ring);
+		if (ret)
+			return ret;
+	}
+	i915_gem_retire_requests(dev);
+
+	/* Finally reset hw state */
+	for_each_ring(ring, dev_priv, i) {
+		intel_ring_init_seqno(ring, seqno);
+
+		for (j = 0; j < ARRAY_SIZE(ring->semaphore.sync_seqno); j++)
+			ring->semaphore.sync_seqno[j] = 0;
+	}
+
+	return 0;
+}
+
+int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret;
+
+	if (seqno == 0)
+		return -EINVAL;
+
+	/* HWS page needs to be set less than what we
+	 * will inject to ring
+	 */
+	ret = i915_gem_init_seqno(dev, seqno - 1);
+	if (ret)
+		return ret;
+
+	/* Carefully set the last_seqno value so that wrap
+	 * detection still works
+	 */
+	dev_priv->next_seqno = seqno;
+	dev_priv->last_seqno = seqno - 1;
+	if (dev_priv->last_seqno == 0)
+		dev_priv->last_seqno--;
+
+	return 0;
+}
+
+int
+i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	/* reserve 0 for non-seqno */
+	if (dev_priv->next_seqno == 0) {
+		int ret = i915_gem_init_seqno(dev, 0);
+		if (ret)
+			return ret;
+
+		dev_priv->next_seqno = 1;
+	}
+
+	*seqno = dev_priv->last_seqno = dev_priv->next_seqno++;
+	return 0;
+}
+
+/*
+ * NB: This function is not allowed to fail. Doing so would mean the the
+ * request is not being tracked for completion but the work itself is
+ * going to happen on the hardware. This would be a Bad Thing(tm).
+ */
+void __i915_add_request(struct drm_i915_gem_request *request,
+			struct drm_i915_gem_object *obj,
+			bool flush_caches)
+{
+	struct intel_engine_cs *ring;
+	struct drm_i915_private *dev_priv;
+	struct intel_ringbuffer *ringbuf;
+	u32 request_start;
+	int ret;
+
+	if (WARN_ON(request == NULL))
+		return;
+
+	ring = request->ring;
+	dev_priv = ring->dev->dev_private;
+	ringbuf = request->ringbuf;
+
+	/*
+	 * To ensure that this call will not fail, space for its emissions
+	 * should already have been reserved in the ring buffer. Let the ring
+	 * know that it is time to use that space up.
+	 */
+	intel_ring_reserved_space_use(ringbuf);
+
+	request_start = intel_ring_get_tail(ringbuf);
+	/*
+	 * Emit any outstanding flushes - execbuf can fail to emit the flush
+	 * after having emitted the batchbuffer command. Hence we need to fix
+	 * things up similar to emitting the lazy request. The difference here
+	 * is that the flush _must_ happen before the next request, no matter
+	 * what.
+	 */
+	if (flush_caches) {
+		if (i915.enable_execlists)
+			ret = logical_ring_flush_all_caches(request);
+		else
+			ret = intel_ring_flush_all_caches(request);
+		/* Not allowed to fail! */
+		WARN(ret, "*_ring_flush_all_caches failed: %d!\n", ret);
+	}
+
+	/* Record the position of the start of the request so that
+	 * should we detect the updated seqno part-way through the
+	 * GPU processing the request, we never over-estimate the
+	 * position of the head.
+	 */
+	request->postfix = intel_ring_get_tail(ringbuf);
+
+	if (i915.enable_execlists)
+		ret = ring->emit_request(request);
+	else {
+		ret = ring->add_request(request);
+
+		request->tail = intel_ring_get_tail(ringbuf);
+	}
+	/* Not allowed to fail! */
+	WARN(ret, "emit|add_request failed: %d!\n", ret);
+
+	request->head = request_start;
+
+	/* Whilst this request exists, batch_obj will be on the
+	 * active_list, and so will hold the active reference. Only when this
+	 * request is retired will the the batch_obj be moved onto the
+	 * inactive_list and lose its active reference. Hence we do not need
+	 * to explicitly hold another reference here.
+	 */
+	request->batch_obj = obj;
+
+	request->emitted_jiffies = jiffies;
+	request->previous_seqno = ring->last_submitted_seqno;
+	ring->last_submitted_seqno = request->seqno;
+	list_add_tail(&request->list, &ring->request_list);
+
+	trace_i915_gem_request_add(request);
+
+	i915_queue_hangcheck(ring->dev);
+
+	queue_delayed_work(dev_priv->wq,
+			   &dev_priv->mm.retire_work,
+			   round_jiffies_up_relative(HZ));
+	intel_mark_busy(dev_priv->dev);
+
+	/* Sanity check that the reserved size was large enough. */
+	intel_ring_reserved_space_end(ringbuf);
+}
+
+static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
+				   const struct intel_context *ctx)
+{
+	unsigned long elapsed;
+
+	elapsed = get_seconds() - ctx->hang_stats.guilty_ts;
+
+	if (ctx->hang_stats.banned)
+		return true;
+
+	if (ctx->hang_stats.ban_period_seconds &&
+	    elapsed <= ctx->hang_stats.ban_period_seconds) {
+		if (!i915_gem_context_is_default(ctx)) {
+			DRM_DEBUG("context hanging too fast, banning!\n");
+			return true;
+		} else if (i915_stop_ring_allow_ban(dev_priv)) {
+			if (i915_stop_ring_allow_warn(dev_priv))
+				DRM_ERROR("gpu hanging too fast, banning!\n");
+			return true;
+		}
+	}
+
+	return false;
+}
+
+static void i915_set_reset_status(struct drm_i915_private *dev_priv,
+				  struct intel_context *ctx,
+				  const bool guilty)
+{
+	struct i915_ctx_hang_stats *hs;
+
+	if (WARN_ON(!ctx))
+		return;
+
+	hs = &ctx->hang_stats;
+
+	if (guilty) {
+		hs->banned = i915_context_is_banned(dev_priv, ctx);
+		hs->batch_active++;
+		hs->guilty_ts = get_seconds();
+	} else {
+		hs->batch_pending++;
+	}
+}
+
+void i915_gem_request_free(struct kref *req_ref)
+{
+	struct drm_i915_gem_request *req = container_of(req_ref,
+						 typeof(*req), ref);
+	struct intel_context *ctx = req->ctx;
+
+	if (req->file_priv)
+		i915_gem_request_remove_from_client(req);
+
+	if (ctx) {
+		if (i915.enable_execlists) {
+			if (ctx != req->ring->default_context)
+				intel_lr_context_unpin(req);
+		}
+
+		i915_gem_context_unreference(ctx);
+	}
+
+	kmem_cache_free(req->i915->requests, req);
+}
+
+int i915_gem_request_alloc(struct intel_engine_cs *ring,
+			   struct intel_context *ctx,
+			   struct drm_i915_gem_request **req_out)
+{
+	struct drm_i915_private *dev_priv = to_i915(ring->dev);
+	struct drm_i915_gem_request *req;
+	int ret;
+
+	if (!req_out)
+		return -EINVAL;
+
+	*req_out = NULL;
+
+	req = kmem_cache_zalloc(dev_priv->requests, GFP_KERNEL);
+	if (req == NULL)
+		return -ENOMEM;
+
+	ret = i915_gem_get_seqno(ring->dev, &req->seqno);
+	if (ret)
+		goto err;
+
+	kref_init(&req->ref);
+	req->i915 = dev_priv;
+	req->ring = ring;
+	req->ctx  = ctx;
+	i915_gem_context_reference(req->ctx);
+
+	if (i915.enable_execlists)
+		ret = intel_logical_ring_alloc_request_extras(req);
+	else
+		ret = intel_ring_alloc_request_extras(req);
+	if (ret) {
+		i915_gem_context_unreference(req->ctx);
+		goto err;
+	}
+
+	/*
+	 * Reserve space in the ring buffer for all the commands required to
+	 * eventually emit this request. This is to guarantee that the
+	 * i915_add_request() call can't fail. Note that the reserve may need
+	 * to be redone if the request is not actually submitted straight
+	 * away, e.g. because a GPU scheduler has deferred it.
+	 */
+	if (i915.enable_execlists)
+		ret = intel_logical_ring_reserve_space(req);
+	else
+		ret = intel_ring_reserve_space(req);
+	if (ret) {
+		/*
+		 * At this point, the request is fully allocated even if not
+		 * fully prepared. Thus it can be cleaned up using the proper
+		 * free code.
+		 */
+		i915_gem_request_cancel(req);
+		return ret;
+	}
+
+	*req_out = req;
+	return 0;
+
+err:
+	kmem_cache_free(dev_priv->requests, req);
+	return ret;
+}
+
+void i915_gem_request_cancel(struct drm_i915_gem_request *req)
+{
+	intel_ring_reserved_space_cancel(req->ringbuf);
+
+	i915_gem_request_unreference(req);
+}
+
+struct drm_i915_gem_request *
+i915_gem_find_active_request(struct intel_engine_cs *ring)
+{
+	struct drm_i915_gem_request *request;
+
+	list_for_each_entry(request, &ring->request_list, list) {
+		if (i915_gem_request_completed(request, false))
+			continue;
+
+		return request;
+	}
+
+	return NULL;
+}
+
+static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
+				       struct intel_engine_cs *ring)
+{
+	struct drm_i915_gem_request *request;
+	bool ring_hung;
+
+	request = i915_gem_find_active_request(ring);
+
+	if (request == NULL)
+		return;
+
+	ring_hung = ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
+
+	i915_set_reset_status(dev_priv, request->ctx, ring_hung);
+
+	list_for_each_entry_continue(request, &ring->request_list, list)
+		i915_set_reset_status(dev_priv, request->ctx, false);
+}
+
+static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
+					struct intel_engine_cs *ring)
+{
+	while (!list_empty(&ring->active_list)) {
+		struct drm_i915_gem_object *obj;
+
+		obj = list_first_entry(&ring->active_list,
+				       struct drm_i915_gem_object,
+				       ring_list[ring->id]);
+
+		i915_gem_object_retire__read(obj, ring->id);
+	}
+
+	/*
+	 * Clear the execlists queue up before freeing the requests, as those
+	 * are the ones that keep the context and ringbuffer backing objects
+	 * pinned in place.
+	 */
+	while (!list_empty(&ring->execlist_queue)) {
+		struct drm_i915_gem_request *submit_req;
+
+		submit_req = list_first_entry(&ring->execlist_queue,
+				struct drm_i915_gem_request,
+				execlist_link);
+		list_del(&submit_req->execlist_link);
+
+		if (submit_req->ctx != ring->default_context)
+			intel_lr_context_unpin(submit_req);
+
+		i915_gem_request_unreference(submit_req);
+	}
+
+	/*
+	 * We must free the requests after all the corresponding objects have
+	 * been moved off active lists. Which is the same order as the normal
+	 * retire_requests function does. This is important if object hold
+	 * implicit references on things like e.g. ppgtt address spaces through
+	 * the request.
+	 */
+	while (!list_empty(&ring->request_list)) {
+		struct drm_i915_gem_request *request;
+
+		request = list_first_entry(&ring->request_list,
+					   struct drm_i915_gem_request,
+					   list);
+
+		i915_gem_request_retire(request);
+	}
+}
+
+void i915_gem_reset(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_engine_cs *ring;
+	int i;
+
+	/*
+	 * Before we free the objects from the requests, we need to inspect
+	 * them for finding the guilty party. As the requests only borrow
+	 * their reference to the objects, the inspection must be done first.
+	 */
+	for_each_ring(ring, dev_priv, i)
+		i915_gem_reset_ring_status(dev_priv, ring);
+
+	for_each_ring(ring, dev_priv, i)
+		i915_gem_reset_ring_cleanup(dev_priv, ring);
+
+	i915_gem_context_reset(dev);
+
+	i915_gem_restore_fences(dev);
+
+	WARN_ON(i915_verify_lists(dev));
+}
+
+/**
+ * This function clears the request list as sequence numbers are passed.
+ */
+void
+i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
+{
+	WARN_ON(i915_verify_lists(ring->dev));
+
+	/* Retire requests first as we use it above for the early return.
+	 * If we retire requests last, we may use a later seqno and so clear
+	 * the requests lists without clearing the active list, leading to
+	 * confusion.
+	 */
+	while (!list_empty(&ring->request_list)) {
+		struct drm_i915_gem_request *request;
+
+		request = list_first_entry(&ring->request_list,
+					   struct drm_i915_gem_request,
+					   list);
+
+		if (!i915_gem_request_completed(request, true))
+			break;
+
+		i915_gem_request_retire(request);
+	}
+
+	/* Move any buffers on the active list that are no longer referenced
+	 * by the ringbuffer to the flushing/inactive lists as appropriate,
+	 * before we free the context associated with the requests.
+	 */
+	while (!list_empty(&ring->active_list)) {
+		struct drm_i915_gem_object *obj;
+
+		obj = list_first_entry(&ring->active_list,
+				      struct drm_i915_gem_object,
+				      ring_list[ring->id]);
+
+		if (!list_empty(&obj->last_read_req[ring->id]->list))
+			break;
+
+		i915_gem_object_retire__read(obj, ring->id);
+	}
+
+	if (unlikely(ring->trace_irq_req &&
+		     i915_gem_request_completed(ring->trace_irq_req, true))) {
+		ring->irq_put(ring);
+		i915_gem_request_assign(&ring->trace_irq_req, NULL);
+	}
+
+	WARN_ON(i915_verify_lists(ring->dev));
+}
+
+bool
+i915_gem_retire_requests(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_engine_cs *ring;
+	bool idle = true;
+	int i;
+
+	for_each_ring(ring, dev_priv, i) {
+		i915_gem_retire_requests_ring(ring);
+		idle &= list_empty(&ring->request_list);
+		if (i915.enable_execlists) {
+			unsigned long flags;
+
+			spin_lock_irqsave(&ring->execlist_lock, flags);
+			idle &= list_empty(&ring->execlist_queue);
+			spin_unlock_irqrestore(&ring->execlist_lock, flags);
+
+			intel_execlists_retire_requests(ring);
+		}
+	}
+
+	if (idle)
+		mod_delayed_work(dev_priv->wq,
+				   &dev_priv->mm.idle_work,
+				   msecs_to_jiffies(100));
+
+	return idle;
+}
+
+static void
+i915_gem_retire_work_handler(struct work_struct *work)
+{
+	struct drm_i915_private *dev_priv =
+		container_of(work, typeof(*dev_priv), mm.retire_work.work);
+	struct drm_device *dev = dev_priv->dev;
+	bool idle;
+
+	/* Come back later if the device is busy... */
+	idle = false;
+	if (mutex_trylock(&dev->struct_mutex)) {
+		idle = i915_gem_retire_requests(dev);
+		mutex_unlock(&dev->struct_mutex);
+	}
+	if (!idle)
+		queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
+				   round_jiffies_up_relative(HZ));
+}
+
+static void
+i915_gem_idle_work_handler(struct work_struct *work)
+{
+	struct drm_i915_private *dev_priv =
+		container_of(work, typeof(*dev_priv), mm.idle_work.work);
+	struct drm_device *dev = dev_priv->dev;
+	struct intel_engine_cs *ring;
+	int i;
+
+	for_each_ring(ring, dev_priv, i)
+		if (!list_empty(&ring->request_list))
+			return;
+
+	intel_mark_idle(dev);
+
+	if (mutex_trylock(&dev->struct_mutex)) {
+		struct intel_engine_cs *ring;
+		int i;
+
+		for_each_ring(ring, dev_priv, i)
+			i915_gem_batch_pool_fini(&ring->batch_pool);
+
+		mutex_unlock(&dev->struct_mutex);
+	}
+}
+
+/**
+ * Ensures that an object will eventually get non-busy by flushing any required
+ * write domains, emitting any outstanding lazy request and retiring and
+ * completed requests.
+ */
+static int
+i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
+{
+	int i;
+
+	if (!obj->active)
+		return 0;
+
+	for (i = 0; i < I915_NUM_RINGS; i++) {
+		struct drm_i915_gem_request *req;
+
+		req = obj->last_read_req[i];
+		if (req == NULL)
+			continue;
+
+		if (list_empty(&req->list))
+			goto retire;
+
+		if (i915_gem_request_completed(req, true)) {
+			__i915_gem_request_retire__upto(req);
+retire:
+			i915_gem_object_retire__read(obj, i);
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
+ * @DRM_IOCTL_ARGS: standard ioctl arguments
+ *
+ * Returns 0 if successful, else an error is returned with the remaining time in
+ * the timeout parameter.
+ *  -ETIME: object is still busy after timeout
+ *  -ERESTARTSYS: signal interrupted the wait
+ *  -ENONENT: object doesn't exist
+ * Also possible, but rare:
+ *  -EAGAIN: GPU wedged
+ *  -ENOMEM: damn
+ *  -ENODEV: Internal IRQ fail
+ *  -E?: The add request failed
+ *
+ * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
+ * non-zero timeout parameter the wait ioctl will wait for the given number of
+ * nanoseconds on an object becoming unbusy. Since the wait itself does so
+ * without holding struct_mutex the object may become re-busied before this
+ * function completes. A similar but shorter * race condition exists in the busy
+ * ioctl
+ */
+int
+i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_gem_wait *args = data;
+	struct drm_i915_gem_object *obj;
+	struct drm_i915_gem_request *req[I915_NUM_RINGS];
+	unsigned reset_counter;
+	int i, n = 0;
+	int ret;
+
+	if (args->flags != 0)
+		return -EINVAL;
+
+	ret = i915_mutex_lock_interruptible(dev);
+	if (ret)
+		return ret;
+
+	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->bo_handle));
+	if (&obj->base == NULL) {
+		mutex_unlock(&dev->struct_mutex);
+		return -ENOENT;
+	}
+
+	/* Need to make sure the object gets inactive eventually. */
+	ret = i915_gem_object_flush_active(obj);
+	if (ret)
+		goto out;
+
+	if (!obj->active)
+		goto out;
+
+	/* Do this after OLR check to make sure we make forward progress polling
+	 * on this IOCTL with a timeout == 0 (like busy ioctl)
+	 */
+	if (args->timeout_ns == 0) {
+		ret = -ETIME;
+		goto out;
+	}
+
+	drm_gem_object_unreference(&obj->base);
+	reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
+
+	for (i = 0; i < I915_NUM_RINGS; i++) {
+		if (obj->last_read_req[i] == NULL)
+			continue;
+
+		req[n++] = i915_gem_request_reference(obj->last_read_req[i]);
+	}
+
+	mutex_unlock(&dev->struct_mutex);
+
+	for (i = 0; i < n; i++) {
+		if (ret == 0)
+			ret = __i915_wait_request(req[i], reset_counter, true,
+						  args->timeout_ns > 0 ? &args->timeout_ns : NULL,
+						  file->driver_priv);
+		i915_gem_request_unreference__unlocked(req[i]);
+	}
+	return ret;
+
+out:
+	drm_gem_object_unreference(&obj->base);
+	mutex_unlock(&dev->struct_mutex);
+	return ret;
+}
+
+static int
+__i915_gem_object_sync(struct drm_i915_gem_object *obj,
+		       struct intel_engine_cs *to,
+		       struct drm_i915_gem_request *from_req,
+		       struct drm_i915_gem_request **to_req)
+{
+	struct intel_engine_cs *from;
+	int ret;
+
+	from = i915_gem_request_get_ring(from_req);
+	if (to == from)
+		return 0;
+
+	if (i915_gem_request_completed(from_req, true))
+		return 0;
+
+	if (!i915_semaphore_is_enabled(obj->base.dev)) {
+		struct drm_i915_private *i915 = to_i915(obj->base.dev);
+		ret = __i915_wait_request(from_req,
+					  atomic_read(&i915->gpu_error.reset_counter),
+					  i915->mm.interruptible,
+					  NULL,
+					  &i915->rps.semaphores);
+		if (ret)
+			return ret;
+
+		i915_gem_object_retire_request(obj, from_req);
+	} else {
+		int idx = intel_ring_sync_index(from, to);
+		u32 seqno = i915_gem_request_get_seqno(from_req);
+
+		WARN_ON(!to_req);
+
+		if (seqno <= from->semaphore.sync_seqno[idx])
+			return 0;
+
+		if (*to_req == NULL) {
+			ret = i915_gem_request_alloc(to, to->default_context, to_req);
+			if (ret)
+				return ret;
+		}
+
+		trace_i915_gem_ring_sync_to(*to_req, from, from_req);
+		ret = to->semaphore.sync_to(*to_req, from, seqno);
+		if (ret)
+			return ret;
+
+		/* We use last_read_req because sync_to()
+		 * might have just caused seqno wrap under
+		 * the radar.
+		 */
+		from->semaphore.sync_seqno[idx] =
+			i915_gem_request_get_seqno(obj->last_read_req[from->id]);
+	}
+
+	return 0;
+}
+
+/**
+ * i915_gem_object_sync - sync an object to a ring.
+ *
+ * @obj: object which may be in use on another ring.
+ * @to: ring we wish to use the object on. May be NULL.
+ * @to_req: request we wish to use the object for. See below.
+ *          This will be allocated and returned if a request is
+ *          required but not passed in.
+ *
+ * This code is meant to abstract object synchronization with the GPU.
+ * Calling with NULL implies synchronizing the object with the CPU
+ * rather than a particular GPU ring. Conceptually we serialise writes
+ * between engines inside the GPU. We only allow one engine to write
+ * into a buffer at any time, but multiple readers. To ensure each has
+ * a coherent view of memory, we must:
+ *
+ * - If there is an outstanding write request to the object, the new
+ *   request must wait for it to complete (either CPU or in hw, requests
+ *   on the same ring will be naturally ordered).
+ *
+ * - If we are a write request (pending_write_domain is set), the new
+ *   request must wait for outstanding read requests to complete.
+ *
+ * For CPU synchronisation (NULL to) no request is required. For syncing with
+ * rings to_req must be non-NULL. However, a request does not have to be
+ * pre-allocated. If *to_req is NULL and sync commands will be emitted then a
+ * request will be allocated automatically and returned through *to_req. Note
+ * that it is not guaranteed that commands will be emitted (because the system
+ * might already be idle). Hence there is no need to create a request that
+ * might never have any work submitted. Note further that if a request is
+ * returned in *to_req, it is the responsibility of the caller to submit
+ * that request (after potentially adding more work to it).
+ *
+ * Returns 0 if successful, else propagates up the lower layer error.
+ */
+int
+i915_gem_object_sync(struct drm_i915_gem_object *obj,
+		     struct intel_engine_cs *to,
+		     struct drm_i915_gem_request **to_req)
+{
+	const bool readonly = obj->base.pending_write_domain == 0;
+	struct drm_i915_gem_request *req[I915_NUM_RINGS];
+	int ret, i, n;
+
+	if (!obj->active)
+		return 0;
+
+	if (to == NULL)
+		return i915_gem_object_wait_rendering(obj, readonly);
+
+	n = 0;
+	if (readonly) {
+		if (obj->last_write_req)
+			req[n++] = obj->last_write_req;
+	} else {
+		for (i = 0; i < I915_NUM_RINGS; i++)
+			if (obj->last_read_req[i])
+				req[n++] = obj->last_read_req[i];
+	}
+	for (i = 0; i < n; i++) {
+		ret = __i915_gem_object_sync(obj, to, req[i], to_req);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
+{
+	u32 old_write_domain, old_read_domains;
+
+	/* Force a pagefault for domain tracking on next user access */
+	i915_gem_release_mmap(obj);
+
+	if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
+		return;
+
+	/* Wait for any direct GTT access to complete */
+	mb();
+
+	old_read_domains = obj->base.read_domains;
+	old_write_domain = obj->base.write_domain;
+
+	obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
+	obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
+
+	trace_i915_gem_object_change_domain(obj,
+					    old_read_domains,
+					    old_write_domain);
+}
+
+static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
+{
+	struct drm_i915_gem_object *obj = vma->obj;
+	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+	int ret;
+
+	if (list_empty(&vma->vma_link))
+		return 0;
+
+	if (!drm_mm_node_allocated(&vma->node)) {
+		i915_gem_vma_destroy(vma);
+		return 0;
+	}
+
+	if (vma->pin_count)
+		return -EBUSY;
+
+	BUG_ON(obj->pages == NULL);
+
+	if (wait) {
+		ret = i915_gem_object_wait_rendering(obj, false);
+		if (ret)
+			return ret;
+	}
+
+	if (i915_is_ggtt(vma->vm) &&
+	    vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
+		i915_gem_object_finish_gtt(obj);
+
+		/* release the fence reg _after_ flushing */
+		ret = i915_gem_object_put_fence(obj);
+		if (ret)
+			return ret;
+	}
+
+	trace_i915_vma_unbind(vma);
+
+	vma->vm->unbind_vma(vma);
+	vma->bound = 0;
+
+	list_del_init(&vma->mm_list);
+	if (i915_is_ggtt(vma->vm)) {
+		if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
+			obj->map_and_fenceable = false;
+		} else if (vma->ggtt_view.pages) {
+			sg_free_table(vma->ggtt_view.pages);
+			kfree(vma->ggtt_view.pages);
+		}
+		vma->ggtt_view.pages = NULL;
+	}
+
+	drm_mm_remove_node(&vma->node);
+	i915_gem_vma_destroy(vma);
+
+	/* Since the unbound list is global, only move to that list if
+	 * no more VMAs exist. */
+	if (list_empty(&obj->vma_list))
+		list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
+
+	/* And finally now the object is completely decoupled from this vma,
+	 * we can drop its hold on the backing storage and allow it to be
+	 * reaped by the shrinker.
+	 */
+	i915_gem_object_unpin_pages(obj);
+
+	return 0;
+}
+
+int i915_vma_unbind(struct i915_vma *vma)
+{
+	return __i915_vma_unbind(vma, true);
+}
+
+int __i915_vma_unbind_no_wait(struct i915_vma *vma)
+{
+	return __i915_vma_unbind(vma, false);
+}
+
+int i915_gpu_idle(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_engine_cs *ring;
+	int ret, i;
+
+	/* Flush everything onto the inactive list. */
+	for_each_ring(ring, dev_priv, i) {
+		if (!i915.enable_execlists) {
+			struct drm_i915_gem_request *req;
+
+			ret = i915_gem_request_alloc(ring, ring->default_context, &req);
+			if (ret)
+				return ret;
+
+			ret = i915_switch_context(req);
+			if (ret) {
+				i915_gem_request_cancel(req);
+				return ret;
+			}
+
+			i915_add_request_no_flush(req);
+		}
+
+		ret = intel_ring_idle(ring);
+		if (ret)
+			return ret;
+	}
+
+	WARN_ON(i915_verify_lists(dev));
+	return 0;
+}
+
+static bool i915_gem_valid_gtt_space(struct i915_vma *vma,
+				     unsigned long cache_level)
+{
+	struct drm_mm_node *gtt_space = &vma->node;
+	struct drm_mm_node *other;
+
+	/*
+	 * On some machines we have to be careful when putting differing types
+	 * of snoopable memory together to avoid the prefetcher crossing memory
+	 * domains and dying. During vm initialisation, we decide whether or not
+	 * these constraints apply and set the drm_mm.color_adjust
+	 * appropriately.
+	 */
+	if (vma->vm->mm.color_adjust == NULL)
+		return true;
+
+	if (!drm_mm_node_allocated(gtt_space))
+		return true;
+
+	if (list_empty(&gtt_space->node_list))
+		return true;
+
+	other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
+	if (other->allocated && !other->hole_follows && other->color != cache_level)
+		return false;
+
+	other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
+	if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
+		return false;
+
+	return true;
+}
+
+/**
+ * Finds free space in the GTT aperture and binds the object or a view of it
+ * there.
+ */
+static struct i915_vma *
+i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
+			   struct i915_address_space *vm,
+			   const struct i915_ggtt_view *ggtt_view,
+			   unsigned alignment,
+			   uint64_t flags)
+{
+	struct drm_device *dev = obj->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 fence_alignment, unfenced_alignment;
+	u32 search_flag, alloc_flag;
+	u64 start, end;
+	u64 size, fence_size;
+	struct i915_vma *vma;
+	int ret;
+
+	if (i915_is_ggtt(vm)) {
+		u32 view_size;
+
+		if (WARN_ON(!ggtt_view))
+			return ERR_PTR(-EINVAL);
+
+		view_size = i915_ggtt_view_size(obj, ggtt_view);
+
+		fence_size = i915_gem_get_gtt_size(dev,
+						   view_size,
+						   obj->tiling_mode);
+		fence_alignment = i915_gem_get_gtt_alignment(dev,
+							     view_size,
+							     obj->tiling_mode,
+							     true);
+		unfenced_alignment = i915_gem_get_gtt_alignment(dev,
+								view_size,
+								obj->tiling_mode,
+								false);
+		size = flags & PIN_MAPPABLE ? fence_size : view_size;
+	} else {
+		fence_size = i915_gem_get_gtt_size(dev,
+						   obj->base.size,
+						   obj->tiling_mode);
+		fence_alignment = i915_gem_get_gtt_alignment(dev,
+							     obj->base.size,
+							     obj->tiling_mode,
+							     true);
+		unfenced_alignment =
+			i915_gem_get_gtt_alignment(dev,
+						   obj->base.size,
+						   obj->tiling_mode,
+						   false);
+		size = flags & PIN_MAPPABLE ? fence_size : obj->base.size;
+	}
+
+	start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
+	end = vm->total;
+	if (flags & PIN_MAPPABLE)
+		end = min_t(u64, end, dev_priv->gtt.mappable_end);
+	if (flags & PIN_ZONE_4G)
+		end = min_t(u64, end, (1ULL << 32));
+
+	if (alignment == 0)
+		alignment = flags & PIN_MAPPABLE ? fence_alignment :
+						unfenced_alignment;
+	if (flags & PIN_MAPPABLE && alignment & (fence_alignment - 1)) {
+		DRM_DEBUG("Invalid object (view type=%u) alignment requested %u\n",
+			  ggtt_view ? ggtt_view->type : 0,
+			  alignment);
+		return ERR_PTR(-EINVAL);
+	}
+
+	/* If binding the object/GGTT view requires more space than the entire
+	 * aperture has, reject it early before evicting everything in a vain
+	 * attempt to find space.
+	 */
+	if (size > end) {
+		DRM_DEBUG("Attempting to bind an object (view type=%u) larger than the aperture: size=%llu > %s aperture=%llu\n",
+			  ggtt_view ? ggtt_view->type : 0,
+			  size,
+			  flags & PIN_MAPPABLE ? "mappable" : "total",
+			  end);
+		return ERR_PTR(-E2BIG);
+	}
+
+	ret = i915_gem_object_get_pages(obj);
+	if (ret)
+		return ERR_PTR(ret);
+
+	i915_gem_object_pin_pages(obj);
+
+	vma = ggtt_view ? i915_gem_obj_lookup_or_create_ggtt_vma(obj, ggtt_view) :
+			  i915_gem_obj_lookup_or_create_vma(obj, vm);
+
+	if (IS_ERR(vma))
+		goto err_unpin;
+
+	if (flags & PIN_HIGH) {
+		search_flag = DRM_MM_SEARCH_BELOW;
+		alloc_flag = DRM_MM_CREATE_TOP;
+	} else {
+		search_flag = DRM_MM_SEARCH_DEFAULT;
+		alloc_flag = DRM_MM_CREATE_DEFAULT;
+	}
+
+search_free:
+	ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
+						  size, alignment,
+						  obj->cache_level,
+						  start, end,
+						  search_flag,
+						  alloc_flag);
+	if (ret) {
+		ret = i915_gem_evict_something(dev, vm, size, alignment,
+					       obj->cache_level,
+					       start, end,
+					       flags);
+		if (ret == 0)
+			goto search_free;
+
+		goto err_free_vma;
+	}
+	if (WARN_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level))) {
+		ret = -EINVAL;
+		goto err_remove_node;
+	}
+
+	trace_i915_vma_bind(vma, flags);
+	ret = i915_vma_bind(vma, obj->cache_level, flags);
+	if (ret)
+		goto err_remove_node;
+
+	list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
+	list_add_tail(&vma->mm_list, &vm->inactive_list);
+
+	return vma;
+
+err_remove_node:
+	drm_mm_remove_node(&vma->node);
+err_free_vma:
+	i915_gem_vma_destroy(vma);
+	vma = ERR_PTR(ret);
+err_unpin:
+	i915_gem_object_unpin_pages(obj);
+	return vma;
+}
+
+bool
+i915_gem_clflush_object(struct drm_i915_gem_object *obj,
+			bool force)
+{
+	/* If we don't have a page list set up, then we're not pinned
+	 * to GPU, and we can ignore the cache flush because it'll happen
+	 * again at bind time.
+	 */
+	if (obj->pages == NULL)
+		return false;
+
+	/*
+	 * Stolen memory is always coherent with the GPU as it is explicitly
+	 * marked as wc by the system, or the system is cache-coherent.
+	 */
+	if (obj->stolen || obj->phys_handle)
+		return false;
+
+	/* If the GPU is snooping the contents of the CPU cache,
+	 * we do not need to manually clear the CPU cache lines.  However,
+	 * the caches are only snooped when the render cache is
+	 * flushed/invalidated.  As we always have to emit invalidations
+	 * and flushes when moving into and out of the RENDER domain, correct
+	 * snooping behaviour occurs naturally as the result of our domain
+	 * tracking.
+	 */
+	if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level)) {
+		obj->cache_dirty = true;
+		return false;
+	}
+
+	trace_i915_gem_object_clflush(obj);
+	drm_clflush_sg(obj->pages);
+	obj->cache_dirty = false;
+
+	return true;
+}
+
+/** Flushes the GTT write domain for the object if it's dirty. */
+static void
+i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
+{
+	uint32_t old_write_domain;
+
+	if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
+		return;
+
+	/* No actual flushing is required for the GTT write domain.  Writes
+	 * to it immediately go to main memory as far as we know, so there's
+	 * no chipset flush.  It also doesn't land in render cache.
+	 *
+	 * However, we do have to enforce the order so that all writes through
+	 * the GTT land before any writes to the device, such as updates to
+	 * the GATT itself.
+	 */
+	wmb();
+
+	old_write_domain = obj->base.write_domain;
+	obj->base.write_domain = 0;
+
+	intel_fb_obj_flush(obj, false, ORIGIN_GTT);
+
+	trace_i915_gem_object_change_domain(obj,
+					    obj->base.read_domains,
+					    old_write_domain);
+}
+
+/** Flushes the CPU write domain for the object if it's dirty. */
+static void
+i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
+{
+	uint32_t old_write_domain;
+
+	if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
+		return;
+
+	if (i915_gem_clflush_object(obj, obj->pin_display))
+		i915_gem_chipset_flush(obj->base.dev);
+
+	old_write_domain = obj->base.write_domain;
+	obj->base.write_domain = 0;
+
+	intel_fb_obj_flush(obj, false, ORIGIN_CPU);
+
+	trace_i915_gem_object_change_domain(obj,
+					    obj->base.read_domains,
+					    old_write_domain);
+}
+
+/**
+ * Moves a single object to the GTT read, and possibly write domain.
+ *
+ * This function returns when the move is complete, including waiting on
+ * flushes to occur.
+ */
+int
+i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
+{
+	uint32_t old_write_domain, old_read_domains;
+	struct i915_vma *vma;
+	int ret;
+
+	if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
+		return 0;
+
+	ret = i915_gem_object_wait_rendering(obj, !write);
+	if (ret)
+		return ret;
+
+	/* Flush and acquire obj->pages so that we are coherent through
+	 * direct access in memory with previous cached writes through
+	 * shmemfs and that our cache domain tracking remains valid.
+	 * For example, if the obj->filp was moved to swap without us
+	 * being notified and releasing the pages, we would mistakenly
+	 * continue to assume that the obj remained out of the CPU cached
+	 * domain.
+	 */
+	ret = i915_gem_object_get_pages(obj);
+	if (ret)
+		return ret;
+
+	i915_gem_object_flush_cpu_write_domain(obj);
+
+	/* Serialise direct access to this object with the barriers for
+	 * coherent writes from the GPU, by effectively invalidating the
+	 * GTT domain upon first access.
+	 */
+	if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
+		mb();
+
+	old_write_domain = obj->base.write_domain;
+	old_read_domains = obj->base.read_domains;
+
+	/* It should now be out of any other write domains, and we can update
+	 * the domain values for our changes.
+	 */
+	BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
+	obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
+	if (write) {
+		obj->base.read_domains = I915_GEM_DOMAIN_GTT;
+		obj->base.write_domain = I915_GEM_DOMAIN_GTT;
+		obj->dirty = 1;
+	}
+
+	trace_i915_gem_object_change_domain(obj,
+					    old_read_domains,
+					    old_write_domain);
+
+	/* And bump the LRU for this access */
+	vma = i915_gem_obj_to_ggtt(obj);
+	if (vma && drm_mm_node_allocated(&vma->node) && !obj->active)
+		list_move_tail(&vma->mm_list,
+			       &to_i915(obj->base.dev)->gtt.base.inactive_list);
+
+	return 0;
+}
+
+/**
+ * Changes the cache-level of an object across all VMA.
+ *
+ * After this function returns, the object will be in the new cache-level
+ * across all GTT and the contents of the backing storage will be coherent,
+ * with respect to the new cache-level. In order to keep the backing storage
+ * coherent for all users, we only allow a single cache level to be set
+ * globally on the object and prevent it from being changed whilst the
+ * hardware is reading from the object. That is if the object is currently
+ * on the scanout it will be set to uncached (or equivalent display
+ * cache coherency) and all non-MOCS GPU access will also be uncached so
+ * that all direct access to the scanout remains coherent.
+ */
+int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
+				    enum i915_cache_level cache_level)
+{
+	struct drm_device *dev = obj->base.dev;
+	struct i915_vma *vma, *next;
+	bool bound = false;
+	int ret = 0;
+
+	if (obj->cache_level == cache_level)
+		goto out;
+
+	/* Inspect the list of currently bound VMA and unbind any that would
+	 * be invalid given the new cache-level. This is principally to
+	 * catch the issue of the CS prefetch crossing page boundaries and
+	 * reading an invalid PTE on older architectures.
+	 */
+	list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
+		if (!drm_mm_node_allocated(&vma->node))
+			continue;
+
+		if (vma->pin_count) {
+			DRM_DEBUG("can not change the cache level of pinned objects\n");
+			return -EBUSY;
+		}
+
+		if (!i915_gem_valid_gtt_space(vma, cache_level)) {
+			ret = i915_vma_unbind(vma);
+			if (ret)
+				return ret;
+		} else
+			bound = true;
+	}
+
+	/* We can reuse the existing drm_mm nodes but need to change the
+	 * cache-level on the PTE. We could simply unbind them all and
+	 * rebind with the correct cache-level on next use. However since
+	 * we already have a valid slot, dma mapping, pages etc, we may as
+	 * rewrite the PTE in the belief that doing so tramples upon less
+	 * state and so involves less work.
+	 */
+	if (bound) {
+		/* Before we change the PTE, the GPU must not be accessing it.
+		 * If we wait upon the object, we know that all the bound
+		 * VMA are no longer active.
+		 */
+		ret = i915_gem_object_wait_rendering(obj, false);
+		if (ret)
+			return ret;
+
+		if (!HAS_LLC(dev) && cache_level != I915_CACHE_NONE) {
+			/* Access to snoopable pages through the GTT is
+			 * incoherent and on some machines causes a hard
+			 * lockup. Relinquish the CPU mmaping to force
+			 * userspace to refault in the pages and we can
+			 * then double check if the GTT mapping is still
+			 * valid for that pointer access.
+			 */
+			i915_gem_release_mmap(obj);
+
+			/* As we no longer need a fence for GTT access,
+			 * we can relinquish it now (and so prevent having
+			 * to steal a fence from someone else on the next
+			 * fence request). Note GPU activity would have
+			 * dropped the fence as all snoopable access is
+			 * supposed to be linear.
+			 */
+			ret = i915_gem_object_put_fence(obj);
+			if (ret)
+				return ret;
+		} else {
+			/* We either have incoherent backing store and
+			 * so no GTT access or the architecture is fully
+			 * coherent. In such cases, existing GTT mmaps
+			 * ignore the cache bit in the PTE and we can
+			 * rewrite it without confusing the GPU or having
+			 * to force userspace to fault back in its mmaps.
+			 */
+		}
+
+		list_for_each_entry(vma, &obj->vma_list, vma_link) {
+			if (!drm_mm_node_allocated(&vma->node))
+				continue;
+
+			ret = i915_vma_bind(vma, cache_level, PIN_UPDATE);
+			if (ret)
+				return ret;
+		}
+	}
+
+	list_for_each_entry(vma, &obj->vma_list, vma_link)
+		vma->node.color = cache_level;
+	obj->cache_level = cache_level;
+
+out:
+	/* Flush the dirty CPU caches to the backing storage so that the
+	 * object is now coherent at its new cache level (with respect
+	 * to the access domain).
+	 */
+	if (obj->cache_dirty &&
+	    obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
+	    cpu_write_needs_clflush(obj)) {
+		if (i915_gem_clflush_object(obj, true))
+			i915_gem_chipset_flush(obj->base.dev);
+	}
+
+	return 0;
+}
+
+int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
+			       struct drm_file *file)
+{
+	struct drm_i915_gem_caching *args = data;
+	struct drm_i915_gem_object *obj;
+
+	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+	if (&obj->base == NULL)
+		return -ENOENT;
+
+	switch (obj->cache_level) {
+	case I915_CACHE_LLC:
+	case I915_CACHE_L3_LLC:
+		args->caching = I915_CACHING_CACHED;
+		break;
+
+	case I915_CACHE_WT:
+		args->caching = I915_CACHING_DISPLAY;
+		break;
+
+	default:
+		args->caching = I915_CACHING_NONE;
+		break;
+	}
+
+	drm_gem_object_unreference_unlocked(&obj->base);
+	return 0;
+}
+
+int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
+			       struct drm_file *file)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_gem_caching *args = data;
+	struct drm_i915_gem_object *obj;
+	enum i915_cache_level level;
+	int ret;
+
+	switch (args->caching) {
+	case I915_CACHING_NONE:
+		level = I915_CACHE_NONE;
+		break;
+	case I915_CACHING_CACHED:
+		/*
+		 * Due to a HW issue on BXT A stepping, GPU stores via a
+		 * snooped mapping may leave stale data in a corresponding CPU
+		 * cacheline, whereas normally such cachelines would get
+		 * invalidated.
+		 */
+		if (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0)
+			return -ENODEV;
+
+		level = I915_CACHE_LLC;
+		break;
+	case I915_CACHING_DISPLAY:
+		level = HAS_WT(dev) ? I915_CACHE_WT : I915_CACHE_NONE;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	intel_runtime_pm_get(dev_priv);
+
+	ret = i915_mutex_lock_interruptible(dev);
+	if (ret)
+		goto rpm_put;
+
+	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+	if (&obj->base == NULL) {
+		ret = -ENOENT;
+		goto unlock;
+	}
+
+	ret = i915_gem_object_set_cache_level(obj, level);
+
+	drm_gem_object_unreference(&obj->base);
+unlock:
+	mutex_unlock(&dev->struct_mutex);
+rpm_put:
+	intel_runtime_pm_put(dev_priv);
+
+	return ret;
+}
+
+/*
+ * Prepare buffer for display plane (scanout, cursors, etc).
+ * Can be called from an uninterruptible phase (modesetting) and allows
+ * any flushes to be pipelined (for pageflips).
+ */
+int
+i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
+				     u32 alignment,
+				     struct intel_engine_cs *pipelined,
+				     struct drm_i915_gem_request **pipelined_request,
+				     const struct i915_ggtt_view *view)
+{
+	u32 old_read_domains, old_write_domain;
+	int ret;
+
+	ret = i915_gem_object_sync(obj, pipelined, pipelined_request);
+	if (ret)
+		return ret;
+
+	/* Mark the pin_display early so that we account for the
+	 * display coherency whilst setting up the cache domains.
+	 */
+	obj->pin_display++;
+
+	/* The display engine is not coherent with the LLC cache on gen6.  As
+	 * a result, we make sure that the pinning that is about to occur is
+	 * done with uncached PTEs. This is lowest common denominator for all
+	 * chipsets.
+	 *
+	 * However for gen6+, we could do better by using the GFDT bit instead
+	 * of uncaching, which would allow us to flush all the LLC-cached data
+	 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
+	 */
+	ret = i915_gem_object_set_cache_level(obj,
+					      HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
+	if (ret)
+		goto err_unpin_display;
+
+	/* As the user may map the buffer once pinned in the display plane
+	 * (e.g. libkms for the bootup splash), we have to ensure that we
+	 * always use map_and_fenceable for all scanout buffers.
+	 */
+	ret = i915_gem_object_ggtt_pin(obj, view, alignment,
+				       view->type == I915_GGTT_VIEW_NORMAL ?
+				       PIN_MAPPABLE : 0);
+	if (ret)
+		goto err_unpin_display;
+
+	i915_gem_object_flush_cpu_write_domain(obj);
+
+	old_write_domain = obj->base.write_domain;
+	old_read_domains = obj->base.read_domains;
+
+	/* It should now be out of any other write domains, and we can update
+	 * the domain values for our changes.
+	 */
+	obj->base.write_domain = 0;
+	obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
+
+	trace_i915_gem_object_change_domain(obj,
+					    old_read_domains,
+					    old_write_domain);
+
+	return 0;
+
+err_unpin_display:
+	obj->pin_display--;
+	return ret;
+}
+
+void
+i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj,
+					 const struct i915_ggtt_view *view)
+{
+	if (WARN_ON(obj->pin_display == 0))
+		return;
+
+	i915_gem_object_ggtt_unpin_view(obj, view);
+
+	obj->pin_display--;
+}
+
+/**
+ * Moves a single object to the CPU read, and possibly write domain.
+ *
+ * This function returns when the move is complete, including waiting on
+ * flushes to occur.
+ */
+int
+i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
+{
+	uint32_t old_write_domain, old_read_domains;
+	int ret;
+
+	if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
+		return 0;
+
+	ret = i915_gem_object_wait_rendering(obj, !write);
+	if (ret)
+		return ret;
+
+	i915_gem_object_flush_gtt_write_domain(obj);
+
+	old_write_domain = obj->base.write_domain;
+	old_read_domains = obj->base.read_domains;
+
+	/* Flush the CPU cache if it's still invalid. */
+	if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
+		i915_gem_clflush_object(obj, false);
+
+		obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
+	}
+
+	/* It should now be out of any other write domains, and we can update
+	 * the domain values for our changes.
+	 */
+	BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
+
+	/* If we're writing through the CPU, then the GPU read domains will
+	 * need to be invalidated at next use.
+	 */
+	if (write) {
+		obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+		obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+	}
+
+	trace_i915_gem_object_change_domain(obj,
+					    old_read_domains,
+					    old_write_domain);
+
+	return 0;
+}
+
+/* Throttle our rendering by waiting until the ring has completed our requests
+ * emitted over 20 msec ago.
+ *
+ * Note that if we were to use the current jiffies each time around the loop,
+ * we wouldn't escape the function with any frames outstanding if the time to
+ * render a frame was over 20ms.
+ *
+ * This should get us reasonable parallelism between CPU and GPU but also
+ * relatively low latency when blocking on a particular request to finish.
+ */
+static int
+i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_file_private *file_priv = file->driver_priv;
+	unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
+	struct drm_i915_gem_request *request, *target = NULL;
+	unsigned reset_counter;
+	int ret;
+
+	ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
+	if (ret)
+		return ret;
+
+	ret = i915_gem_check_wedge(&dev_priv->gpu_error, false);
+	if (ret)
+		return ret;
+
+	spin_lock(&file_priv->mm.lock);
+	list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
+		if (time_after_eq(request->emitted_jiffies, recent_enough))
+			break;
+
+		/*
+		 * Note that the request might not have been submitted yet.
+		 * In which case emitted_jiffies will be zero.
+		 */
+		if (!request->emitted_jiffies)
+			continue;
+
+		target = request;
+	}
+	reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
+	if (target)
+		i915_gem_request_reference(target);
+	spin_unlock(&file_priv->mm.lock);
+
+	if (target == NULL)
+		return 0;
+
+	ret = __i915_wait_request(target, reset_counter, true, NULL, NULL);
+	if (ret == 0)
+		queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
+
+	i915_gem_request_unreference__unlocked(target);
+
+	return ret;
+}
+
+static bool
+i915_vma_misplaced(struct i915_vma *vma, uint32_t alignment, uint64_t flags)
+{
+	struct drm_i915_gem_object *obj = vma->obj;
+
+	if (alignment &&
+	    vma->node.start & (alignment - 1))
+		return true;
+
+	if (flags & PIN_MAPPABLE && !obj->map_and_fenceable)
+		return true;
+
+	if (flags & PIN_OFFSET_BIAS &&
+	    vma->node.start < (flags & PIN_OFFSET_MASK))
+		return true;
+
+	return false;
+}
+
+void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
+{
+	struct drm_i915_gem_object *obj = vma->obj;
+	bool mappable, fenceable;
+	u32 fence_size, fence_alignment;
+
+	fence_size = i915_gem_get_gtt_size(obj->base.dev,
+					   obj->base.size,
+					   obj->tiling_mode);
+	fence_alignment = i915_gem_get_gtt_alignment(obj->base.dev,
+						     obj->base.size,
+						     obj->tiling_mode,
+						     true);
+
+	fenceable = (vma->node.size == fence_size &&
+		     (vma->node.start & (fence_alignment - 1)) == 0);
+
+	mappable = (vma->node.start + fence_size <=
+		    to_i915(obj->base.dev)->gtt.mappable_end);
+
+	obj->map_and_fenceable = mappable && fenceable;
+}
+
+static int
+i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
+		       struct i915_address_space *vm,
+		       const struct i915_ggtt_view *ggtt_view,
+		       uint32_t alignment,
+		       uint64_t flags)
+{
+	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+	struct i915_vma *vma;
+	unsigned bound;
+	int ret;
+
+	if (WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base))
+		return -ENODEV;
+
+	if (WARN_ON(flags & (PIN_GLOBAL | PIN_MAPPABLE) && !i915_is_ggtt(vm)))
+		return -EINVAL;
+
+	if (WARN_ON((flags & (PIN_MAPPABLE | PIN_GLOBAL)) == PIN_MAPPABLE))
+		return -EINVAL;
+
+	if (WARN_ON(i915_is_ggtt(vm) != !!ggtt_view))
+		return -EINVAL;
+
+	vma = ggtt_view ? i915_gem_obj_to_ggtt_view(obj, ggtt_view) :
+			  i915_gem_obj_to_vma(obj, vm);
+
+	if (IS_ERR(vma))
+		return PTR_ERR(vma);
+
+	if (vma) {
+		if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
+			return -EBUSY;
+
+		if (i915_vma_misplaced(vma, alignment, flags)) {
+			WARN(vma->pin_count,
+			     "bo is already pinned in %s with incorrect alignment:"
+			     " offset=%08x %08x, req.alignment=%x, req.map_and_fenceable=%d,"
+			     " obj->map_and_fenceable=%d\n",
+			     ggtt_view ? "ggtt" : "ppgtt",
+			     upper_32_bits(vma->node.start),
+			     lower_32_bits(vma->node.start),
+			     alignment,
+			     !!(flags & PIN_MAPPABLE),
+			     obj->map_and_fenceable);
+			ret = i915_vma_unbind(vma);
+			if (ret)
+				return ret;
+
+			vma = NULL;
+		}
+	}
+
+	bound = vma ? vma->bound : 0;
+	if (vma == NULL || !drm_mm_node_allocated(&vma->node)) {
+		vma = i915_gem_object_bind_to_vm(obj, vm, ggtt_view, alignment,
+						 flags);
+		if (IS_ERR(vma))
+			return PTR_ERR(vma);
+	} else {
+		ret = i915_vma_bind(vma, obj->cache_level, flags);
+		if (ret)
+			return ret;
+	}
+
+	if (ggtt_view && ggtt_view->type == I915_GGTT_VIEW_NORMAL &&
+	    (bound ^ vma->bound) & GLOBAL_BIND) {
+		__i915_vma_set_map_and_fenceable(vma);
+		WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable);
+	}
+
+	vma->pin_count++;
+	return 0;
+}
+
+int
+i915_gem_object_pin(struct drm_i915_gem_object *obj,
+		    struct i915_address_space *vm,
+		    uint32_t alignment,
+		    uint64_t flags)
+{
+	return i915_gem_object_do_pin(obj, vm,
+				      i915_is_ggtt(vm) ? &i915_ggtt_view_normal : NULL,
+				      alignment, flags);
+}
+
+int
+i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
+			 const struct i915_ggtt_view *view,
+			 uint32_t alignment,
+			 uint64_t flags)
+{
+	if (WARN_ONCE(!view, "no view specified"))
+		return -EINVAL;
+
+	return i915_gem_object_do_pin(obj, i915_obj_to_ggtt(obj), view,
+				      alignment, flags | PIN_GLOBAL);
+}
+
+void
+i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj,
+				const struct i915_ggtt_view *view)
+{
+	struct i915_vma *vma = i915_gem_obj_to_ggtt_view(obj, view);
+
+	BUG_ON(!vma);
+	WARN_ON(vma->pin_count == 0);
+	WARN_ON(!i915_gem_obj_ggtt_bound_view(obj, view));
+
+	--vma->pin_count;
+}
+
+int
+i915_gem_busy_ioctl(struct drm_device *dev, void *data,
+		    struct drm_file *file)
+{
+	struct drm_i915_gem_busy *args = data;
+	struct drm_i915_gem_object *obj;
+	int ret;
+
+	ret = i915_mutex_lock_interruptible(dev);
+	if (ret)
+		return ret;
+
+	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+	if (&obj->base == NULL) {
+		ret = -ENOENT;
+		goto unlock;
+	}
+
+	/* Count all active objects as busy, even if they are currently not used
+	 * by the gpu. Users of this interface expect objects to eventually
+	 * become non-busy without any further actions, therefore emit any
+	 * necessary flushes here.
+	 */
+	ret = i915_gem_object_flush_active(obj);
+	if (ret)
+		goto unref;
+
+	BUILD_BUG_ON(I915_NUM_RINGS > 16);
+	args->busy = obj->active << 16;
+	if (obj->last_write_req)
+		args->busy |= obj->last_write_req->ring->id;
+
+unref:
+	drm_gem_object_unreference(&obj->base);
+unlock:
+	mutex_unlock(&dev->struct_mutex);
+	return ret;
+}
+
+int
+i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
+			struct drm_file *file_priv)
+{
+	return i915_gem_ring_throttle(dev, file_priv);
+}
+
+int
+i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
+		       struct drm_file *file_priv)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_gem_madvise *args = data;
+	struct drm_i915_gem_object *obj;
+	int ret;
+
+	switch (args->madv) {
+	case I915_MADV_DONTNEED:
+	case I915_MADV_WILLNEED:
+	    break;
+	default:
+	    return -EINVAL;
+	}
+
+	ret = i915_mutex_lock_interruptible(dev);
+	if (ret)
+		return ret;
+
+	obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
+	if (&obj->base == NULL) {
+		ret = -ENOENT;
+		goto unlock;
+	}
+
+	if (i915_gem_obj_is_pinned(obj)) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (obj->pages &&
+	    obj->tiling_mode != I915_TILING_NONE &&
+	    dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
+		if (obj->madv == I915_MADV_WILLNEED)
+			i915_gem_object_unpin_pages(obj);
+		if (args->madv == I915_MADV_WILLNEED)
+			i915_gem_object_pin_pages(obj);
+	}
+
+	if (obj->madv != __I915_MADV_PURGED)
+		obj->madv = args->madv;
+
+	/* if the object is no longer attached, discard its backing storage */
+	if (obj->madv == I915_MADV_DONTNEED && obj->pages == NULL)
+		i915_gem_object_truncate(obj);
+
+	args->retained = obj->madv != __I915_MADV_PURGED;
+
+out:
+	drm_gem_object_unreference(&obj->base);
+unlock:
+	mutex_unlock(&dev->struct_mutex);
+	return ret;
+}
+
+void i915_gem_object_init(struct drm_i915_gem_object *obj,
+			  const struct drm_i915_gem_object_ops *ops)
+{
+	int i;
+
+	INIT_LIST_HEAD(&obj->global_list);
+	for (i = 0; i < I915_NUM_RINGS; i++)
+		INIT_LIST_HEAD(&obj->ring_list[i]);
+	INIT_LIST_HEAD(&obj->obj_exec_link);
+	INIT_LIST_HEAD(&obj->vma_list);
+	INIT_LIST_HEAD(&obj->batch_pool_link);
+
+	obj->ops = ops;
+
+	obj->fence_reg = I915_FENCE_REG_NONE;
+	obj->madv = I915_MADV_WILLNEED;
+
+	i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
+}
+
+static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
+	.get_pages = i915_gem_object_get_pages_gtt,
+	.put_pages = i915_gem_object_put_pages_gtt,
+};
+
+struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
+						  size_t size)
+{
+	struct drm_i915_gem_object *obj;
+	struct address_space *mapping;
+	gfp_t mask;
+
+	obj = i915_gem_object_alloc(dev);
+	if (obj == NULL)
+		return NULL;
+
+	if (drm_gem_object_init(dev, &obj->base, size) != 0) {
+		i915_gem_object_free(obj);
+		return NULL;
+	}
+
+	mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
+	if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
+		/* 965gm cannot relocate objects above 4GiB. */
+		mask &= ~__GFP_HIGHMEM;
+		mask |= __GFP_DMA32;
+	}
+
+	mapping = file_inode(obj->base.filp)->i_mapping;
+	mapping_set_gfp_mask(mapping, mask);
+
+	i915_gem_object_init(obj, &i915_gem_object_ops);
+
+	obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+	obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+
+	if (HAS_LLC(dev)) {
+		/* On some devices, we can have the GPU use the LLC (the CPU
+		 * cache) for about a 10% performance improvement
+		 * compared to uncached.  Graphics requests other than
+		 * display scanout are coherent with the CPU in
+		 * accessing this cache.  This means in this mode we
+		 * don't need to clflush on the CPU side, and on the
+		 * GPU side we only need to flush internal caches to
+		 * get data visible to the CPU.
+		 *
+		 * However, we maintain the display planes as UC, and so
+		 * need to rebind when first used as such.
+		 */
+		obj->cache_level = I915_CACHE_LLC;
+	} else
+		obj->cache_level = I915_CACHE_NONE;
+
+	trace_i915_gem_object_create(obj);
+
+	return obj;
+}
+
+static bool discard_backing_storage(struct drm_i915_gem_object *obj)
+{
+	/* If we are the last user of the backing storage (be it shmemfs
+	 * pages or stolen etc), we know that the pages are going to be
+	 * immediately released. In this case, we can then skip copying
+	 * back the contents from the GPU.
+	 */
+
+	if (obj->madv != I915_MADV_WILLNEED)
+		return false;
+
+	if (obj->base.filp == NULL)
+		return true;
+
+	/* At first glance, this looks racy, but then again so would be
+	 * userspace racing mmap against close. However, the first external
+	 * reference to the filp can only be obtained through the
+	 * i915_gem_mmap_ioctl() which safeguards us against the user
+	 * acquiring such a reference whilst we are in the middle of
+	 * freeing the object.
+	 */
+	return atomic_long_read(&obj->base.filp->f_count) == 1;
+}
+
+void i915_gem_free_object(struct drm_gem_object *gem_obj)
+{
+	struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
+	struct drm_device *dev = obj->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct i915_vma *vma, *next;
+
+	intel_runtime_pm_get(dev_priv);
+
+	trace_i915_gem_object_destroy(obj);
+
+	list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
+		int ret;
+
+		vma->pin_count = 0;
+		ret = i915_vma_unbind(vma);
+		if (WARN_ON(ret == -ERESTARTSYS)) {
+			bool was_interruptible;
+
+			was_interruptible = dev_priv->mm.interruptible;
+			dev_priv->mm.interruptible = false;
+
+			WARN_ON(i915_vma_unbind(vma));
+
+			dev_priv->mm.interruptible = was_interruptible;
+		}
+	}
+
+	/* Stolen objects don't hold a ref, but do hold pin count. Fix that up
+	 * before progressing. */
+	if (obj->stolen)
+		i915_gem_object_unpin_pages(obj);
+
+	WARN_ON(obj->frontbuffer_bits);
+
+	if (obj->pages && obj->madv == I915_MADV_WILLNEED &&
+	    dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES &&
+	    obj->tiling_mode != I915_TILING_NONE)
+		i915_gem_object_unpin_pages(obj);
+
+	if (WARN_ON(obj->pages_pin_count))
+		obj->pages_pin_count = 0;
+	if (discard_backing_storage(obj))
+		obj->madv = I915_MADV_DONTNEED;
+	i915_gem_object_put_pages(obj);
+	i915_gem_object_free_mmap_offset(obj);
+
+	BUG_ON(obj->pages);
+
+	if (obj->base.import_attach)
+		drm_prime_gem_destroy(&obj->base, NULL);
+
+	if (obj->ops->release)
+		obj->ops->release(obj);
+
+	drm_gem_object_release(&obj->base);
+	i915_gem_info_remove_obj(dev_priv, obj->base.size);
+
+	kfree(obj->bit_17);
+	i915_gem_object_free(obj);
+
+	intel_runtime_pm_put(dev_priv);
+}
+
+struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
+				     struct i915_address_space *vm)
+{
+	struct i915_vma *vma;
+	list_for_each_entry(vma, &obj->vma_list, vma_link) {
+		if (i915_is_ggtt(vma->vm) &&
+		    vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
+			continue;
+		if (vma->vm == vm)
+			return vma;
+	}
+	return NULL;
+}
+
+struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
+					   const struct i915_ggtt_view *view)
+{
+	struct i915_address_space *ggtt = i915_obj_to_ggtt(obj);
+	struct i915_vma *vma;
+
+	if (WARN_ONCE(!view, "no view specified"))
+		return ERR_PTR(-EINVAL);
+
+	list_for_each_entry(vma, &obj->vma_list, vma_link)
+		if (vma->vm == ggtt &&
+		    i915_ggtt_view_equal(&vma->ggtt_view, view))
+			return vma;
+	return NULL;
+}
+
+void i915_gem_vma_destroy(struct i915_vma *vma)
+{
+	struct i915_address_space *vm = NULL;
+	WARN_ON(vma->node.allocated);
+
+	/* Keep the vma as a placeholder in the execbuffer reservation lists */
+	if (!list_empty(&vma->exec_list))
+		return;
+
+	vm = vma->vm;
+
+	if (!i915_is_ggtt(vm))
+		i915_ppgtt_put(i915_vm_to_ppgtt(vm));
+
+	list_del(&vma->vma_link);
+
+	kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma);
+}
+
+static void
+i915_gem_stop_ringbuffers(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_engine_cs *ring;
+	int i;
+
+	for_each_ring(ring, dev_priv, i)
+		dev_priv->gt.stop_ring(ring);
+}
+
+int
+i915_gem_suspend(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret = 0;
+
+	mutex_lock(&dev->struct_mutex);
+	ret = i915_gpu_idle(dev);
+	if (ret)
+		goto err;
+
+	i915_gem_retire_requests(dev);
+
+	i915_gem_stop_ringbuffers(dev);
+	mutex_unlock(&dev->struct_mutex);
+
+	cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
+	cancel_delayed_work_sync(&dev_priv->mm.retire_work);
+	flush_delayed_work(&dev_priv->mm.idle_work);
+
+	/* Assert that we sucessfully flushed all the work and
+	 * reset the GPU back to its idle, low power state.
+	 */
+	WARN_ON(dev_priv->mm.busy);
+
+	return 0;
+
+err:
+	mutex_unlock(&dev->struct_mutex);
+	return ret;
+}
+
+int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice)
+{
+	struct intel_engine_cs *ring = req->ring;
+	struct drm_device *dev = ring->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 reg_base = GEN7_L3LOG_BASE + (slice * 0x200);
+	u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
+	int i, ret;
+
+	if (!HAS_L3_DPF(dev) || !remap_info)
+		return 0;
+
+	ret = intel_ring_begin(req, GEN7_L3LOG_SIZE / 4 * 3);
+	if (ret)
+		return ret;
+
+	/*
+	 * Note: We do not worry about the concurrent register cacheline hang
+	 * here because no other code should access these registers other than
+	 * at initialization time.
+	 */
+	for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
+		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+		intel_ring_emit(ring, reg_base + i);
+		intel_ring_emit(ring, remap_info[i/4]);
+	}
+
+	intel_ring_advance(ring);
+
+	return ret;
+}
+
+void i915_gem_init_swizzling(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (INTEL_INFO(dev)->gen < 5 ||
+	    dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
+		return;
+
+	I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
+				 DISP_TILE_SURFACE_SWIZZLING);
+
+	if (IS_GEN5(dev))
+		return;
+
+	I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
+	if (IS_GEN6(dev))
+		I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
+	else if (IS_GEN7(dev))
+		I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
+	else if (IS_GEN8(dev))
+		I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
+	else
+		BUG();
+}
+
+static void init_unused_ring(struct drm_device *dev, u32 base)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	I915_WRITE(RING_CTL(base), 0);
+	I915_WRITE(RING_HEAD(base), 0);
+	I915_WRITE(RING_TAIL(base), 0);
+	I915_WRITE(RING_START(base), 0);
+}
+
+static void init_unused_rings(struct drm_device *dev)
+{
+	if (IS_I830(dev)) {
+		init_unused_ring(dev, PRB1_BASE);
+		init_unused_ring(dev, SRB0_BASE);
+		init_unused_ring(dev, SRB1_BASE);
+		init_unused_ring(dev, SRB2_BASE);
+		init_unused_ring(dev, SRB3_BASE);
+	} else if (IS_GEN2(dev)) {
+		init_unused_ring(dev, SRB0_BASE);
+		init_unused_ring(dev, SRB1_BASE);
+	} else if (IS_GEN3(dev)) {
+		init_unused_ring(dev, PRB1_BASE);
+		init_unused_ring(dev, PRB2_BASE);
+	}
+}
+
+int i915_gem_init_rings(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret;
+
+	ret = intel_init_render_ring_buffer(dev);
+	if (ret)
+		return ret;
+
+	if (HAS_BSD(dev)) {
+		ret = intel_init_bsd_ring_buffer(dev);
+		if (ret)
+			goto cleanup_render_ring;
+	}
+
+	if (HAS_BLT(dev)) {
+		ret = intel_init_blt_ring_buffer(dev);
+		if (ret)
+			goto cleanup_bsd_ring;
+	}
+
+	if (HAS_VEBOX(dev)) {
+		ret = intel_init_vebox_ring_buffer(dev);
+		if (ret)
+			goto cleanup_blt_ring;
+	}
+
+	if (HAS_BSD2(dev)) {
+		ret = intel_init_bsd2_ring_buffer(dev);
+		if (ret)
+			goto cleanup_vebox_ring;
+	}
+
+	return 0;
+
+cleanup_vebox_ring:
+	intel_cleanup_ring_buffer(&dev_priv->ring[VECS]);
+cleanup_blt_ring:
+	intel_cleanup_ring_buffer(&dev_priv->ring[BCS]);
+cleanup_bsd_ring:
+	intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
+cleanup_render_ring:
+	intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
+
+	return ret;
+}
+
+int
+i915_gem_init_hw(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_engine_cs *ring;
+	int ret, i, j;
+
+	if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
+		return -EIO;
+
+	/* Double layer security blanket, see i915_gem_init() */
+	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+
+	if (dev_priv->ellc_size)
+		I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
+
+	if (IS_HASWELL(dev))
+		I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev) ?
+			   LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
+
+	if (HAS_PCH_NOP(dev)) {
+		if (IS_IVYBRIDGE(dev)) {
+			u32 temp = I915_READ(GEN7_MSG_CTL);
+			temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
+			I915_WRITE(GEN7_MSG_CTL, temp);
+		} else if (INTEL_INFO(dev)->gen >= 7) {
+			u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
+			temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
+			I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
+		}
+	}
+
+	i915_gem_init_swizzling(dev);
+
+	/*
+	 * At least 830 can leave some of the unused rings
+	 * "active" (ie. head != tail) after resume which
+	 * will prevent c3 entry. Makes sure all unused rings
+	 * are totally idle.
+	 */
+	init_unused_rings(dev);
+
+	BUG_ON(!dev_priv->ring[RCS].default_context);
+
+	ret = i915_ppgtt_init_hw(dev);
+	if (ret) {
+		DRM_ERROR("PPGTT enable HW failed %d\n", ret);
+		goto out;
+	}
+
+	/* Need to do basic initialisation of all rings first: */
+	for_each_ring(ring, dev_priv, i) {
+		ret = ring->init_hw(ring);
+		if (ret)
+			goto out;
+	}
+
+	/* We can't enable contexts until all firmware is loaded */
+	if (HAS_GUC_UCODE(dev)) {
+		ret = intel_guc_ucode_load(dev);
+		if (ret) {
+			/*
+			 * If we got an error and GuC submission is enabled, map
+			 * the error to -EIO so the GPU will be declared wedged.
+			 * OTOH, if we didn't intend to use the GuC anyway, just
+			 * discard the error and carry on.
+			 */
+			DRM_ERROR("Failed to initialize GuC, error %d%s\n", ret,
+				  i915.enable_guc_submission ? "" :
+				  " (ignored)");
+			ret = i915.enable_guc_submission ? -EIO : 0;
+			if (ret)
+				goto out;
+		}
+	}
+
+	/*
+	 * Increment the next seqno by 0x100 so we have a visible break
+	 * on re-initialisation
+	 */
+	ret = i915_gem_set_seqno(dev, dev_priv->next_seqno+0x100);
+	if (ret)
+		goto out;
+
+	/* Now it is safe to go back round and do everything else: */
+	for_each_ring(ring, dev_priv, i) {
+		struct drm_i915_gem_request *req;
+
+		WARN_ON(!ring->default_context);
+
+		ret = i915_gem_request_alloc(ring, ring->default_context, &req);
+		if (ret) {
+			i915_gem_cleanup_ringbuffer(dev);
+			goto out;
+		}
+
+		if (ring->id == RCS) {
+			for (j = 0; j < NUM_L3_SLICES(dev); j++)
+				i915_gem_l3_remap(req, j);
+		}
+
+		ret = i915_ppgtt_init_ring(req);
+		if (ret && ret != -EIO) {
+			DRM_ERROR("PPGTT enable ring #%d failed %d\n", i, ret);
+			i915_gem_request_cancel(req);
+			i915_gem_cleanup_ringbuffer(dev);
+			goto out;
+		}
+
+		ret = i915_gem_context_enable(req);
+		if (ret && ret != -EIO) {
+			DRM_ERROR("Context enable ring #%d failed %d\n", i, ret);
+			i915_gem_request_cancel(req);
+			i915_gem_cleanup_ringbuffer(dev);
+			goto out;
+		}
+
+		i915_add_request_no_flush(req);
+	}
+
+out:
+	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+	return ret;
+}
+
+int i915_gem_init(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret;
+
+	i915.enable_execlists = intel_sanitize_enable_execlists(dev,
+			i915.enable_execlists);
+
+	mutex_lock(&dev->struct_mutex);
+
+	if (IS_VALLEYVIEW(dev)) {
+		/* VLVA0 (potential hack), BIOS isn't actually waking us */
+		I915_WRITE(VLV_GTLC_WAKE_CTRL, VLV_GTLC_ALLOWWAKEREQ);
+		if (wait_for((I915_READ(VLV_GTLC_PW_STATUS) &
+			      VLV_GTLC_ALLOWWAKEACK), 10))
+			DRM_DEBUG_DRIVER("allow wake ack timed out\n");
+	}
+
+	if (!i915.enable_execlists) {
+		dev_priv->gt.execbuf_submit = i915_gem_ringbuffer_submission;
+		dev_priv->gt.init_rings = i915_gem_init_rings;
+		dev_priv->gt.cleanup_ring = intel_cleanup_ring_buffer;
+		dev_priv->gt.stop_ring = intel_stop_ring_buffer;
+	} else {
+		dev_priv->gt.execbuf_submit = intel_execlists_submission;
+		dev_priv->gt.init_rings = intel_logical_rings_init;
+		dev_priv->gt.cleanup_ring = intel_logical_ring_cleanup;
+		dev_priv->gt.stop_ring = intel_logical_ring_stop;
+	}
+
+	/* This is just a security blanket to placate dragons.
+	 * On some systems, we very sporadically observe that the first TLBs
+	 * used by the CS may be stale, despite us poking the TLB reset. If
+	 * we hold the forcewake during initialisation these problems
+	 * just magically go away.
+	 */
+	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+
+	ret = i915_gem_init_userptr(dev);
+	if (ret)
+		goto out_unlock;
+
+	i915_gem_init_global_gtt(dev);
+
+	ret = i915_gem_context_init(dev);
+	if (ret)
+		goto out_unlock;
+
+	ret = dev_priv->gt.init_rings(dev);
+	if (ret)
+		goto out_unlock;
+
+	ret = i915_gem_init_hw(dev);
+	if (ret == -EIO) {
+		/* Allow ring initialisation to fail by marking the GPU as
+		 * wedged. But we only want to do this where the GPU is angry,
+		 * for all other failure, such as an allocation failure, bail.
+		 */
+		DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
+		atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
+		ret = 0;
+	}
+
+out_unlock:
+	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+	mutex_unlock(&dev->struct_mutex);
+
+	return ret;
+}
+
+void
+i915_gem_cleanup_ringbuffer(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_engine_cs *ring;
+	int i;
+
+	for_each_ring(ring, dev_priv, i)
+		dev_priv->gt.cleanup_ring(ring);
+
+    if (i915.enable_execlists)
+            /*
+             * Neither the BIOS, ourselves or any other kernel
+             * expects the system to be in execlists mode on startup,
+             * so we need to reset the GPU back to legacy mode.
+             */
+            intel_gpu_reset(dev);
+}
+
+static void
+init_ring_lists(struct intel_engine_cs *ring)
+{
+	INIT_LIST_HEAD(&ring->active_list);
+	INIT_LIST_HEAD(&ring->request_list);
+}
+
+void
+i915_gem_load(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int i;
+
+	dev_priv->objects =
+		kmem_cache_create("i915_gem_object",
+				  sizeof(struct drm_i915_gem_object), 0,
+				  SLAB_HWCACHE_ALIGN,
+				  NULL);
+	dev_priv->vmas =
+		kmem_cache_create("i915_gem_vma",
+				  sizeof(struct i915_vma), 0,
+				  SLAB_HWCACHE_ALIGN,
+				  NULL);
+	dev_priv->requests =
+		kmem_cache_create("i915_gem_request",
+				  sizeof(struct drm_i915_gem_request), 0,
+				  SLAB_HWCACHE_ALIGN,
+				  NULL);
+
+	INIT_LIST_HEAD(&dev_priv->vm_list);
+	INIT_LIST_HEAD(&dev_priv->context_list);
+	INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
+	INIT_LIST_HEAD(&dev_priv->mm.bound_list);
+	INIT_LIST_HEAD(&dev_priv->mm.fence_list);
+	for (i = 0; i < I915_NUM_RINGS; i++)
+		init_ring_lists(&dev_priv->ring[i]);
+	for (i = 0; i < I915_MAX_NUM_FENCES; i++)
+		INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
+	INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
+			  i915_gem_retire_work_handler);
+	INIT_DELAYED_WORK(&dev_priv->mm.idle_work,
+			  i915_gem_idle_work_handler);
+	init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
+
+	dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
+
+	if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev))
+		dev_priv->num_fence_regs = 32;
+	else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
+		dev_priv->num_fence_regs = 16;
+	else
+		dev_priv->num_fence_regs = 8;
+
+	if (intel_vgpu_active(dev))
+		dev_priv->num_fence_regs =
+				I915_READ(vgtif_reg(avail_rs.fence_num));
+
+	/*
+	 * Set initial sequence number for requests.
+	 * Using this number allows the wraparound to happen early,
+	 * catching any obvious problems.
+	 */
+	dev_priv->next_seqno = ((u32)~0 - 0x1100);
+	dev_priv->last_seqno = ((u32)~0 - 0x1101);
+
+	/* Initialize fence registers to zero */
+	INIT_LIST_HEAD(&dev_priv->mm.fence_list);
+	i915_gem_restore_fences(dev);
+
+	i915_gem_detect_bit_6_swizzle(dev);
+	init_waitqueue_head(&dev_priv->pending_flip_queue);
+
+	dev_priv->mm.interruptible = true;
+
+	i915_gem_shrinker_init(dev_priv);
+
+	mutex_init(&dev_priv->fb_tracking.lock);
+}
+
+void i915_gem_release(struct drm_device *dev, struct drm_file *file)
+{
+	struct drm_i915_file_private *file_priv = file->driver_priv;
+
+	/* Clean up our request list when the client is going away, so that
+	 * later retire_requests won't dereference our soon-to-be-gone
+	 * file_priv.
+	 */
+	spin_lock(&file_priv->mm.lock);
+	while (!list_empty(&file_priv->mm.request_list)) {
+		struct drm_i915_gem_request *request;
+
+		request = list_first_entry(&file_priv->mm.request_list,
+					   struct drm_i915_gem_request,
+					   client_list);
+		list_del(&request->client_list);
+		request->file_priv = NULL;
+	}
+	spin_unlock(&file_priv->mm.lock);
+
+	if (!list_empty(&file_priv->rps.link)) {
+		spin_lock(&to_i915(dev)->rps.client_lock);
+		list_del(&file_priv->rps.link);
+		spin_unlock(&to_i915(dev)->rps.client_lock);
+	}
+}
+
+int i915_gem_open(struct drm_device *dev, struct drm_file *file)
+{
+	struct drm_i915_file_private *file_priv;
+	int ret;
+
+	DRM_DEBUG_DRIVER("\n");
+
+	file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
+	if (!file_priv)
+		return -ENOMEM;
+
+	file->driver_priv = file_priv;
+	file_priv->dev_priv = dev->dev_private;
+	file_priv->file = file;
+	INIT_LIST_HEAD(&file_priv->rps.link);
+
+	spin_lock_init(&file_priv->mm.lock);
+	INIT_LIST_HEAD(&file_priv->mm.request_list);
+
+	ret = i915_gem_context_open(dev, file);
+	if (ret)
+		kfree(file_priv);
+
+	return ret;
+}
+
+/**
+ * i915_gem_track_fb - update frontbuffer tracking
+ * @old: current GEM buffer for the frontbuffer slots
+ * @new: new GEM buffer for the frontbuffer slots
+ * @frontbuffer_bits: bitmask of frontbuffer slots
+ *
+ * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
+ * from @old and setting them in @new. Both @old and @new can be NULL.
+ */
+void i915_gem_track_fb(struct drm_i915_gem_object *old,
+		       struct drm_i915_gem_object *new,
+		       unsigned frontbuffer_bits)
+{
+	if (old) {
+		WARN_ON(!mutex_is_locked(&old->base.dev->struct_mutex));
+		WARN_ON(!(old->frontbuffer_bits & frontbuffer_bits));
+		old->frontbuffer_bits &= ~frontbuffer_bits;
+	}
+
+	if (new) {
+		WARN_ON(!mutex_is_locked(&new->base.dev->struct_mutex));
+		WARN_ON(new->frontbuffer_bits & frontbuffer_bits);
+		new->frontbuffer_bits |= frontbuffer_bits;
+	}
+}
+
+/* All the new VM stuff */
+u64 i915_gem_obj_offset(struct drm_i915_gem_object *o,
+			struct i915_address_space *vm)
+{
+	struct drm_i915_private *dev_priv = o->base.dev->dev_private;
+	struct i915_vma *vma;
+
+	WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
+
+	list_for_each_entry(vma, &o->vma_list, vma_link) {
+		if (i915_is_ggtt(vma->vm) &&
+		    vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
+			continue;
+		if (vma->vm == vm)
+			return vma->node.start;
+	}
+
+	WARN(1, "%s vma for this object not found.\n",
+	     i915_is_ggtt(vm) ? "global" : "ppgtt");
+	return -1;
+}
+
+u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
+				  const struct i915_ggtt_view *view)
+{
+	struct i915_address_space *ggtt = i915_obj_to_ggtt(o);
+	struct i915_vma *vma;
+
+	list_for_each_entry(vma, &o->vma_list, vma_link)
+		if (vma->vm == ggtt &&
+		    i915_ggtt_view_equal(&vma->ggtt_view, view))
+			return vma->node.start;
+
+	WARN(1, "global vma for this object not found. (view=%u)\n", view->type);
+	return -1;
+}
+
+bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
+			struct i915_address_space *vm)
+{
+	struct i915_vma *vma;
+
+	list_for_each_entry(vma, &o->vma_list, vma_link) {
+		if (i915_is_ggtt(vma->vm) &&
+		    vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
+			continue;
+		if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
+			return true;
+	}
+
+	return false;
+}
+
+bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
+				  const struct i915_ggtt_view *view)
+{
+	struct i915_address_space *ggtt = i915_obj_to_ggtt(o);
+	struct i915_vma *vma;
+
+	list_for_each_entry(vma, &o->vma_list, vma_link)
+		if (vma->vm == ggtt &&
+		    i915_ggtt_view_equal(&vma->ggtt_view, view) &&
+		    drm_mm_node_allocated(&vma->node))
+			return true;
+
+	return false;
+}
+
+bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
+{
+	struct i915_vma *vma;
+
+	list_for_each_entry(vma, &o->vma_list, vma_link)
+		if (drm_mm_node_allocated(&vma->node))
+			return true;
+
+	return false;
+}
+
+unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
+				struct i915_address_space *vm)
+{
+	struct drm_i915_private *dev_priv = o->base.dev->dev_private;
+	struct i915_vma *vma;
+
+	WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
+
+	BUG_ON(list_empty(&o->vma_list));
+
+	list_for_each_entry(vma, &o->vma_list, vma_link) {
+		if (i915_is_ggtt(vma->vm) &&
+		    vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
+			continue;
+		if (vma->vm == vm)
+			return vma->node.size;
+	}
+	return 0;
+}
+
+bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj)
+{
+	struct i915_vma *vma;
+	list_for_each_entry(vma, &obj->vma_list, vma_link)
+		if (vma->pin_count > 0)
+			return true;
+
+	return false;
+}
+
+/* Allocate a new GEM object and fill it with the supplied data */
+struct drm_i915_gem_object *
+i915_gem_object_create_from_data(struct drm_device *dev,
+			         const void *data, size_t size)
+{
+	struct drm_i915_gem_object *obj;
+	struct sg_table *sg;
+	size_t bytes;
+	int ret;
+
+	obj = i915_gem_alloc_object(dev, round_up(size, PAGE_SIZE));
+	if (IS_ERR_OR_NULL(obj))
+		return obj;
+
+	ret = i915_gem_object_set_to_cpu_domain(obj, true);
+	if (ret)
+		goto fail;
+
+	ret = i915_gem_object_get_pages(obj);
+	if (ret)
+		goto fail;
+
+	i915_gem_object_pin_pages(obj);
+	sg = obj->pages;
+	bytes = sg_copy_from_buffer(sg->sgl, sg->nents, (void *)data, size);
+	i915_gem_object_unpin_pages(obj);
+
+	if (WARN_ON(bytes != size)) {
+		DRM_ERROR("Incomplete copy, wrote %zu of %zu", bytes, size);
+		ret = -EFAULT;
+		goto fail;
+	}
+
+	return obj;
+
+fail:
+	drm_gem_object_unreference(&obj->base);
+	return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_batch_pool.c b/drivers/gpu/drm/i915/i915_gem_batch_pool.c
new file mode 100644
index 0000000..7bf2f3f
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_batch_pool.c
@@ -0,0 +1,151 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "i915_drv.h"
+#include "i915_gem_batch_pool.h"
+
+/**
+ * DOC: batch pool
+ *
+ * In order to submit batch buffers as 'secure', the software command parser
+ * must ensure that a batch buffer cannot be modified after parsing. It does
+ * this by copying the user provided batch buffer contents to a kernel owned
+ * buffer from which the hardware will actually execute, and by carefully
+ * managing the address space bindings for such buffers.
+ *
+ * The batch pool framework provides a mechanism for the driver to manage a
+ * set of scratch buffers to use for this purpose. The framework can be
+ * extended to support other uses cases should they arise.
+ */
+
+/**
+ * i915_gem_batch_pool_init() - initialize a batch buffer pool
+ * @dev: the drm device
+ * @pool: the batch buffer pool
+ */
+void i915_gem_batch_pool_init(struct drm_device *dev,
+			      struct i915_gem_batch_pool *pool)
+{
+	int n;
+
+	pool->dev = dev;
+
+	for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++)
+		INIT_LIST_HEAD(&pool->cache_list[n]);
+}
+
+/**
+ * i915_gem_batch_pool_fini() - clean up a batch buffer pool
+ * @pool: the pool to clean up
+ *
+ * Note: Callers must hold the struct_mutex.
+ */
+void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool)
+{
+	int n;
+
+	WARN_ON(!mutex_is_locked(&pool->dev->struct_mutex));
+
+	for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) {
+		while (!list_empty(&pool->cache_list[n])) {
+			struct drm_i915_gem_object *obj =
+				list_first_entry(&pool->cache_list[n],
+						 struct drm_i915_gem_object,
+						 batch_pool_link);
+
+			list_del(&obj->batch_pool_link);
+			drm_gem_object_unreference(&obj->base);
+		}
+	}
+}
+
+/**
+ * i915_gem_batch_pool_get() - allocate a buffer from the pool
+ * @pool: the batch buffer pool
+ * @size: the minimum desired size of the returned buffer
+ *
+ * Returns an inactive buffer from @pool with at least @size bytes,
+ * with the pages pinned. The caller must i915_gem_object_unpin_pages()
+ * on the returned object.
+ *
+ * Note: Callers must hold the struct_mutex
+ *
+ * Return: the buffer object or an error pointer
+ */
+struct drm_i915_gem_object *
+i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
+			size_t size)
+{
+	struct drm_i915_gem_object *obj = NULL;
+	struct drm_i915_gem_object *tmp, *next;
+	struct list_head *list;
+	int n;
+
+	WARN_ON(!mutex_is_locked(&pool->dev->struct_mutex));
+
+	/* Compute a power-of-two bucket, but throw everything greater than
+	 * 16KiB into the same bucket: i.e. the the buckets hold objects of
+	 * (1 page, 2 pages, 4 pages, 8+ pages).
+	 */
+	n = fls(size >> PAGE_SHIFT) - 1;
+	if (n >= ARRAY_SIZE(pool->cache_list))
+		n = ARRAY_SIZE(pool->cache_list) - 1;
+	list = &pool->cache_list[n];
+
+	list_for_each_entry_safe(tmp, next, list, batch_pool_link) {
+		/* The batches are strictly LRU ordered */
+		if (tmp->active)
+			break;
+
+		/* While we're looping, do some clean up */
+		if (tmp->madv == __I915_MADV_PURGED) {
+			list_del(&tmp->batch_pool_link);
+			drm_gem_object_unreference(&tmp->base);
+			continue;
+		}
+
+		if (tmp->base.size >= size) {
+			obj = tmp;
+			break;
+		}
+	}
+
+	if (obj == NULL) {
+		int ret;
+
+		obj = i915_gem_alloc_object(pool->dev, size);
+		if (obj == NULL)
+			return ERR_PTR(-ENOMEM);
+
+		ret = i915_gem_object_get_pages(obj);
+		if (ret)
+			return ERR_PTR(ret);
+
+		obj->madv = I915_MADV_DONTNEED;
+	}
+
+	list_move_tail(&obj->batch_pool_link, list);
+	i915_gem_object_pin_pages(obj);
+	return obj;
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_batch_pool.h b/drivers/gpu/drm/i915/i915_gem_batch_pool.h
new file mode 100644
index 0000000..848e907
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_batch_pool.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef I915_GEM_BATCH_POOL_H
+#define I915_GEM_BATCH_POOL_H
+
+#include "i915_drv.h"
+
+struct i915_gem_batch_pool {
+	struct drm_device *dev;
+	struct list_head cache_list[4];
+};
+
+/* i915_gem_batch_pool.c */
+void i915_gem_batch_pool_init(struct drm_device *dev,
+			      struct i915_gem_batch_pool *pool);
+void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool);
+struct drm_i915_gem_object*
+i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool, size_t size);
+
+#endif /* I915_GEM_BATCH_POOL_H */
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
new file mode 100644
index 0000000..0433d25
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -0,0 +1,982 @@
+/*
+ * Copyright © 2011-2012 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Ben Widawsky <ben@bwidawsk.net>
+ *
+ */
+
+/*
+ * This file implements HW context support. On gen5+ a HW context consists of an
+ * opaque GPU object which is referenced at times of context saves and restores.
+ * With RC6 enabled, the context is also referenced as the GPU enters and exists
+ * from RC6 (GPU has it's own internal power context, except on gen5). Though
+ * something like a context does exist for the media ring, the code only
+ * supports contexts for the render ring.
+ *
+ * In software, there is a distinction between contexts created by the user,
+ * and the default HW context. The default HW context is used by GPU clients
+ * that do not request setup of their own hardware context. The default
+ * context's state is never restored to help prevent programming errors. This
+ * would happen if a client ran and piggy-backed off another clients GPU state.
+ * The default context only exists to give the GPU some offset to load as the
+ * current to invoke a save of the context we actually care about. In fact, the
+ * code could likely be constructed, albeit in a more complicated fashion, to
+ * never use the default context, though that limits the driver's ability to
+ * swap out, and/or destroy other contexts.
+ *
+ * All other contexts are created as a request by the GPU client. These contexts
+ * store GPU state, and thus allow GPU clients to not re-emit state (and
+ * potentially query certain state) at any time. The kernel driver makes
+ * certain that the appropriate commands are inserted.
+ *
+ * The context life cycle is semi-complicated in that context BOs may live
+ * longer than the context itself because of the way the hardware, and object
+ * tracking works. Below is a very crude representation of the state machine
+ * describing the context life.
+ *                                         refcount     pincount     active
+ * S0: initial state                          0            0           0
+ * S1: context created                        1            0           0
+ * S2: context is currently running           2            1           X
+ * S3: GPU referenced, but not current        2            0           1
+ * S4: context is current, but destroyed      1            1           0
+ * S5: like S3, but destroyed                 1            0           1
+ *
+ * The most common (but not all) transitions:
+ * S0->S1: client creates a context
+ * S1->S2: client submits execbuf with context
+ * S2->S3: other clients submits execbuf with context
+ * S3->S1: context object was retired
+ * S3->S2: clients submits another execbuf
+ * S2->S4: context destroy called with current context
+ * S3->S5->S0: destroy path
+ * S4->S5->S0: destroy path on current context
+ *
+ * There are two confusing terms used above:
+ *  The "current context" means the context which is currently running on the
+ *  GPU. The GPU has loaded its state already and has stored away the gtt
+ *  offset of the BO. The GPU is not actively referencing the data at this
+ *  offset, but it will on the next context switch. The only way to avoid this
+ *  is to do a GPU reset.
+ *
+ *  An "active context' is one which was previously the "current context" and is
+ *  on the active list waiting for the next context switch to occur. Until this
+ *  happens, the object must remain at the same gtt offset. It is therefore
+ *  possible to destroy a context, but it is still active.
+ *
+ */
+
+#include <drm/drmP.h>
+#include <drm/i915_drm.h>
+#include "i915_drv.h"
+#include "i915_trace.h"
+
+/* This is a HW constraint. The value below is the largest known requirement
+ * I've seen in a spec to date, and that was a workaround for a non-shipping
+ * part. It should be safe to decrease this, but it's more future proof as is.
+ */
+#define GEN6_CONTEXT_ALIGN (64<<10)
+#define GEN7_CONTEXT_ALIGN 4096
+
+static size_t get_context_alignment(struct drm_device *dev)
+{
+	if (IS_GEN6(dev))
+		return GEN6_CONTEXT_ALIGN;
+
+	return GEN7_CONTEXT_ALIGN;
+}
+
+static int get_context_size(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret;
+	u32 reg;
+
+	switch (INTEL_INFO(dev)->gen) {
+	case 6:
+		reg = I915_READ(CXT_SIZE);
+		ret = GEN6_CXT_TOTAL_SIZE(reg) * 64;
+		break;
+	case 7:
+		reg = I915_READ(GEN7_CXT_SIZE);
+		if (IS_HASWELL(dev))
+			ret = HSW_CXT_TOTAL_SIZE;
+		else
+			ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
+		break;
+	case 8:
+		ret = GEN8_CXT_TOTAL_SIZE;
+		break;
+	default:
+		BUG();
+	}
+
+	return ret;
+}
+
+static void i915_gem_context_clean(struct intel_context *ctx)
+{
+	struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
+	struct i915_vma *vma, *next;
+
+	if (!ppgtt)
+		return;
+
+	list_for_each_entry_safe(vma, next, &ppgtt->base.inactive_list,
+				 mm_list) {
+		if (WARN_ON(__i915_vma_unbind_no_wait(vma)))
+			break;
+	}
+}
+
+void i915_gem_context_free(struct kref *ctx_ref)
+{
+	struct intel_context *ctx = container_of(ctx_ref, typeof(*ctx), ref);
+
+	trace_i915_context_free(ctx);
+
+	if (i915.enable_execlists)
+		intel_lr_context_free(ctx);
+
+	/*
+	 * This context is going away and we need to remove all VMAs still
+	 * around. This is to handle imported shared objects for which
+	 * destructor did not run when their handles were closed.
+	 */
+	i915_gem_context_clean(ctx);
+
+	i915_ppgtt_put(ctx->ppgtt);
+
+	if (ctx->legacy_hw_ctx.rcs_state)
+		drm_gem_object_unreference(&ctx->legacy_hw_ctx.rcs_state->base);
+	list_del(&ctx->link);
+	kfree(ctx);
+}
+
+struct drm_i915_gem_object *
+i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
+{
+	struct drm_i915_gem_object *obj;
+	int ret;
+
+	obj = i915_gem_alloc_object(dev, size);
+	if (obj == NULL)
+		return ERR_PTR(-ENOMEM);
+
+	/*
+	 * Try to make the context utilize L3 as well as LLC.
+	 *
+	 * On VLV we don't have L3 controls in the PTEs so we
+	 * shouldn't touch the cache level, especially as that
+	 * would make the object snooped which might have a
+	 * negative performance impact.
+	 */
+	if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev)) {
+		ret = i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC);
+		/* Failure shouldn't ever happen this early */
+		if (WARN_ON(ret)) {
+			drm_gem_object_unreference(&obj->base);
+			return ERR_PTR(ret);
+		}
+	}
+
+	return obj;
+}
+
+static struct intel_context *
+__create_hw_context(struct drm_device *dev,
+		    struct drm_i915_file_private *file_priv)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_context *ctx;
+	int ret;
+
+	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+	if (ctx == NULL)
+		return ERR_PTR(-ENOMEM);
+
+	kref_init(&ctx->ref);
+	list_add_tail(&ctx->link, &dev_priv->context_list);
+	ctx->i915 = dev_priv;
+
+	if (dev_priv->hw_context_size) {
+		struct drm_i915_gem_object *obj =
+				i915_gem_alloc_context_obj(dev, dev_priv->hw_context_size);
+		if (IS_ERR(obj)) {
+			ret = PTR_ERR(obj);
+			goto err_out;
+		}
+		ctx->legacy_hw_ctx.rcs_state = obj;
+	}
+
+	/* Default context will never have a file_priv */
+	if (file_priv != NULL) {
+		ret = idr_alloc(&file_priv->context_idr, ctx,
+				DEFAULT_CONTEXT_HANDLE, 0, GFP_KERNEL);
+		if (ret < 0)
+			goto err_out;
+	} else
+		ret = DEFAULT_CONTEXT_HANDLE;
+
+	ctx->file_priv = file_priv;
+	ctx->user_handle = ret;
+	/* NB: Mark all slices as needing a remap so that when the context first
+	 * loads it will restore whatever remap state already exists. If there
+	 * is no remap info, it will be a NOP. */
+	ctx->remap_slice = (1 << NUM_L3_SLICES(dev)) - 1;
+
+	ctx->hang_stats.ban_period_seconds = DRM_I915_CTX_BAN_PERIOD;
+
+	return ctx;
+
+err_out:
+	i915_gem_context_unreference(ctx);
+	return ERR_PTR(ret);
+}
+
+/**
+ * The default context needs to exist per ring that uses contexts. It stores the
+ * context state of the GPU for applications that don't utilize HW contexts, as
+ * well as an idle case.
+ */
+static struct intel_context *
+i915_gem_create_context(struct drm_device *dev,
+			struct drm_i915_file_private *file_priv)
+{
+	const bool is_global_default_ctx = file_priv == NULL;
+	struct intel_context *ctx;
+	int ret = 0;
+
+	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
+
+	ctx = __create_hw_context(dev, file_priv);
+	if (IS_ERR(ctx))
+		return ctx;
+
+	if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state) {
+		/* We may need to do things with the shrinker which
+		 * require us to immediately switch back to the default
+		 * context. This can cause a problem as pinning the
+		 * default context also requires GTT space which may not
+		 * be available. To avoid this we always pin the default
+		 * context.
+		 */
+		ret = i915_gem_obj_ggtt_pin(ctx->legacy_hw_ctx.rcs_state,
+					    get_context_alignment(dev), 0);
+		if (ret) {
+			DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
+			goto err_destroy;
+		}
+	}
+
+	if (USES_FULL_PPGTT(dev)) {
+		struct i915_hw_ppgtt *ppgtt = i915_ppgtt_create(dev, file_priv);
+
+		if (IS_ERR_OR_NULL(ppgtt)) {
+			DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
+					 PTR_ERR(ppgtt));
+			ret = PTR_ERR(ppgtt);
+			goto err_unpin;
+		}
+
+		ctx->ppgtt = ppgtt;
+	}
+
+	trace_i915_context_create(ctx);
+
+	return ctx;
+
+err_unpin:
+	if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state)
+		i915_gem_object_ggtt_unpin(ctx->legacy_hw_ctx.rcs_state);
+err_destroy:
+	idr_remove(&file_priv->context_idr, ctx->user_handle);
+	i915_gem_context_unreference(ctx);
+	return ERR_PTR(ret);
+}
+
+void i915_gem_context_reset(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int i;
+
+	if (i915.enable_execlists) {
+		struct intel_context *ctx;
+
+		list_for_each_entry(ctx, &dev_priv->context_list, link) {
+			intel_lr_context_reset(dev, ctx);
+		}
+
+		return;
+	}
+
+	for (i = 0; i < I915_NUM_RINGS; i++) {
+		struct intel_engine_cs *ring = &dev_priv->ring[i];
+		struct intel_context *lctx = ring->last_context;
+
+		if (lctx) {
+			if (lctx->legacy_hw_ctx.rcs_state && i == RCS)
+				i915_gem_object_ggtt_unpin(lctx->legacy_hw_ctx.rcs_state);
+
+			i915_gem_context_unreference(lctx);
+			ring->last_context = NULL;
+		}
+
+		/* Force the GPU state to be reinitialised on enabling */
+		if (ring->default_context)
+			ring->default_context->legacy_hw_ctx.initialized = false;
+	}
+}
+
+int i915_gem_context_init(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_context *ctx;
+	int i;
+
+	/* Init should only be called once per module load. Eventually the
+	 * restriction on the context_disabled check can be loosened. */
+	if (WARN_ON(dev_priv->ring[RCS].default_context))
+		return 0;
+
+	if (intel_vgpu_active(dev) && HAS_LOGICAL_RING_CONTEXTS(dev)) {
+		if (!i915.enable_execlists) {
+			DRM_INFO("Only EXECLIST mode is supported in vgpu.\n");
+			return -EINVAL;
+		}
+	}
+
+	if (i915.enable_execlists) {
+		/* NB: intentionally left blank. We will allocate our own
+		 * backing objects as we need them, thank you very much */
+		dev_priv->hw_context_size = 0;
+	} else if (HAS_HW_CONTEXTS(dev)) {
+		dev_priv->hw_context_size = round_up(get_context_size(dev), 4096);
+		if (dev_priv->hw_context_size > (1<<20)) {
+			DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size %d\n",
+					 dev_priv->hw_context_size);
+			dev_priv->hw_context_size = 0;
+		}
+	}
+
+	ctx = i915_gem_create_context(dev, NULL);
+	if (IS_ERR(ctx)) {
+		DRM_ERROR("Failed to create default global context (error %ld)\n",
+			  PTR_ERR(ctx));
+		return PTR_ERR(ctx);
+	}
+
+	for (i = 0; i < I915_NUM_RINGS; i++) {
+		struct intel_engine_cs *ring = &dev_priv->ring[i];
+
+		/* NB: RCS will hold a ref for all rings */
+		ring->default_context = ctx;
+	}
+
+	DRM_DEBUG_DRIVER("%s context support initialized\n",
+			i915.enable_execlists ? "LR" :
+			dev_priv->hw_context_size ? "HW" : "fake");
+	return 0;
+}
+
+void i915_gem_context_fini(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_context *dctx = dev_priv->ring[RCS].default_context;
+	int i;
+
+	if (dctx->legacy_hw_ctx.rcs_state) {
+		/* The only known way to stop the gpu from accessing the hw context is
+		 * to reset it. Do this as the very last operation to avoid confusing
+		 * other code, leading to spurious errors. */
+		intel_gpu_reset(dev);
+
+		/* When default context is created and switched to, base object refcount
+		 * will be 2 (+1 from object creation and +1 from do_switch()).
+		 * i915_gem_context_fini() will be called after gpu_idle() has switched
+		 * to default context. So we need to unreference the base object once
+		 * to offset the do_switch part, so that i915_gem_context_unreference()
+		 * can then free the base object correctly. */
+		WARN_ON(!dev_priv->ring[RCS].last_context);
+		if (dev_priv->ring[RCS].last_context == dctx) {
+			/* Fake switch to NULL context */
+			WARN_ON(dctx->legacy_hw_ctx.rcs_state->active);
+			i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
+			i915_gem_context_unreference(dctx);
+			dev_priv->ring[RCS].last_context = NULL;
+		}
+
+		i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
+	}
+
+	for (i = 0; i < I915_NUM_RINGS; i++) {
+		struct intel_engine_cs *ring = &dev_priv->ring[i];
+
+		if (ring->last_context)
+			i915_gem_context_unreference(ring->last_context);
+
+		ring->default_context = NULL;
+		ring->last_context = NULL;
+	}
+
+	i915_gem_context_unreference(dctx);
+}
+
+int i915_gem_context_enable(struct drm_i915_gem_request *req)
+{
+	struct intel_engine_cs *ring = req->ring;
+	int ret;
+
+	if (i915.enable_execlists) {
+		if (ring->init_context == NULL)
+			return 0;
+
+		ret = ring->init_context(req);
+	} else
+		ret = i915_switch_context(req);
+
+	if (ret) {
+		DRM_ERROR("ring init context: %d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int context_idr_cleanup(int id, void *p, void *data)
+{
+	struct intel_context *ctx = p;
+
+	i915_gem_context_unreference(ctx);
+	return 0;
+}
+
+int i915_gem_context_open(struct drm_device *dev, struct drm_file *file)
+{
+	struct drm_i915_file_private *file_priv = file->driver_priv;
+	struct intel_context *ctx;
+
+	idr_init(&file_priv->context_idr);
+
+	mutex_lock(&dev->struct_mutex);
+	ctx = i915_gem_create_context(dev, file_priv);
+	mutex_unlock(&dev->struct_mutex);
+
+	if (IS_ERR(ctx)) {
+		idr_destroy(&file_priv->context_idr);
+		return PTR_ERR(ctx);
+	}
+
+	return 0;
+}
+
+void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
+{
+	struct drm_i915_file_private *file_priv = file->driver_priv;
+
+	idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
+	idr_destroy(&file_priv->context_idr);
+}
+
+struct intel_context *
+i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id)
+{
+	struct intel_context *ctx;
+
+	ctx = (struct intel_context *)idr_find(&file_priv->context_idr, id);
+	if (!ctx)
+		return ERR_PTR(-ENOENT);
+
+	return ctx;
+}
+
+static inline int
+mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
+{
+	struct intel_engine_cs *ring = req->ring;
+	u32 flags = hw_flags | MI_MM_SPACE_GTT;
+	const int num_rings =
+		/* Use an extended w/a on ivb+ if signalling from other rings */
+		i915_semaphore_is_enabled(ring->dev) ?
+		hweight32(INTEL_INFO(ring->dev)->ring_mask) - 1 :
+		0;
+	int len, i, ret;
+
+	/* w/a: If Flush TLB Invalidation Mode is enabled, driver must do a TLB
+	 * invalidation prior to MI_SET_CONTEXT. On GEN6 we don't set the value
+	 * explicitly, so we rely on the value at ring init, stored in
+	 * itlb_before_ctx_switch.
+	 */
+	if (IS_GEN6(ring->dev)) {
+		ret = ring->flush(req, I915_GEM_GPU_DOMAINS, 0);
+		if (ret)
+			return ret;
+	}
+
+	/* These flags are for resource streamer on HSW+ */
+	if (IS_HASWELL(ring->dev) || INTEL_INFO(ring->dev)->gen >= 8)
+		flags |= (HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN);
+	else if (INTEL_INFO(ring->dev)->gen < 8)
+		flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN);
+
+
+	len = 4;
+	if (INTEL_INFO(ring->dev)->gen >= 7)
+		len += 2 + (num_rings ? 4*num_rings + 2 : 0);
+
+	ret = intel_ring_begin(req, len);
+	if (ret)
+		return ret;
+
+	/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
+	if (INTEL_INFO(ring->dev)->gen >= 7) {
+		intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
+		if (num_rings) {
+			struct intel_engine_cs *signaller;
+
+			intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings));
+			for_each_ring(signaller, to_i915(ring->dev), i) {
+				if (signaller == ring)
+					continue;
+
+				intel_ring_emit(ring, RING_PSMI_CTL(signaller->mmio_base));
+				intel_ring_emit(ring, _MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
+			}
+		}
+	}
+
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_emit(ring, MI_SET_CONTEXT);
+	intel_ring_emit(ring, i915_gem_obj_ggtt_offset(req->ctx->legacy_hw_ctx.rcs_state) |
+			flags);
+	/*
+	 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
+	 * WaMiSetContext_Hang:snb,ivb,vlv
+	 */
+	intel_ring_emit(ring, MI_NOOP);
+
+	if (INTEL_INFO(ring->dev)->gen >= 7) {
+		if (num_rings) {
+			struct intel_engine_cs *signaller;
+
+			intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings));
+			for_each_ring(signaller, to_i915(ring->dev), i) {
+				if (signaller == ring)
+					continue;
+
+				intel_ring_emit(ring, RING_PSMI_CTL(signaller->mmio_base));
+				intel_ring_emit(ring, _MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
+			}
+		}
+		intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE);
+	}
+
+	intel_ring_advance(ring);
+
+	return ret;
+}
+
+static inline bool should_skip_switch(struct intel_engine_cs *ring,
+				      struct intel_context *from,
+				      struct intel_context *to)
+{
+	if (to->remap_slice)
+		return false;
+
+	if (to->ppgtt && from == to &&
+	    !(intel_ring_flag(ring) & to->ppgtt->pd_dirty_rings))
+		return true;
+
+	return false;
+}
+
+static bool
+needs_pd_load_pre(struct intel_engine_cs *ring, struct intel_context *to)
+{
+	struct drm_i915_private *dev_priv = ring->dev->dev_private;
+
+	if (!to->ppgtt)
+		return false;
+
+	if (INTEL_INFO(ring->dev)->gen < 8)
+		return true;
+
+	if (ring != &dev_priv->ring[RCS])
+		return true;
+
+	return false;
+}
+
+static bool
+needs_pd_load_post(struct intel_engine_cs *ring, struct intel_context *to,
+		u32 hw_flags)
+{
+	struct drm_i915_private *dev_priv = ring->dev->dev_private;
+
+	if (!to->ppgtt)
+		return false;
+
+	if (!IS_GEN8(ring->dev))
+		return false;
+
+	if (ring != &dev_priv->ring[RCS])
+		return false;
+
+	if (hw_flags & MI_RESTORE_INHIBIT)
+		return true;
+
+	return false;
+}
+
+static int do_switch(struct drm_i915_gem_request *req)
+{
+	struct intel_context *to = req->ctx;
+	struct intel_engine_cs *ring = req->ring;
+	struct drm_i915_private *dev_priv = ring->dev->dev_private;
+	struct intel_context *from = ring->last_context;
+	u32 hw_flags = 0;
+	bool uninitialized = false;
+	int ret, i;
+
+	if (from != NULL && ring == &dev_priv->ring[RCS]) {
+		BUG_ON(from->legacy_hw_ctx.rcs_state == NULL);
+		BUG_ON(!i915_gem_obj_is_pinned(from->legacy_hw_ctx.rcs_state));
+	}
+
+	if (should_skip_switch(ring, from, to))
+		return 0;
+
+	/* Trying to pin first makes error handling easier. */
+	if (ring == &dev_priv->ring[RCS]) {
+		ret = i915_gem_obj_ggtt_pin(to->legacy_hw_ctx.rcs_state,
+					    get_context_alignment(ring->dev), 0);
+		if (ret)
+			return ret;
+	}
+
+	/*
+	 * Pin can switch back to the default context if we end up calling into
+	 * evict_everything - as a last ditch gtt defrag effort that also
+	 * switches to the default context. Hence we need to reload from here.
+	 */
+	from = ring->last_context;
+
+	if (needs_pd_load_pre(ring, to)) {
+		/* Older GENs and non render rings still want the load first,
+		 * "PP_DCLV followed by PP_DIR_BASE register through Load
+		 * Register Immediate commands in Ring Buffer before submitting
+		 * a context."*/
+		trace_switch_mm(ring, to);
+		ret = to->ppgtt->switch_mm(to->ppgtt, req);
+		if (ret)
+			goto unpin_out;
+
+		/* Doing a PD load always reloads the page dirs */
+		to->ppgtt->pd_dirty_rings &= ~intel_ring_flag(ring);
+	}
+
+	if (ring != &dev_priv->ring[RCS]) {
+		if (from)
+			i915_gem_context_unreference(from);
+		goto done;
+	}
+
+	/*
+	 * Clear this page out of any CPU caches for coherent swap-in/out. Note
+	 * that thanks to write = false in this call and us not setting any gpu
+	 * write domains when putting a context object onto the active list
+	 * (when switching away from it), this won't block.
+	 *
+	 * XXX: We need a real interface to do this instead of trickery.
+	 */
+	ret = i915_gem_object_set_to_gtt_domain(to->legacy_hw_ctx.rcs_state, false);
+	if (ret)
+		goto unpin_out;
+
+	if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to)) {
+		hw_flags |= MI_RESTORE_INHIBIT;
+		/* NB: If we inhibit the restore, the context is not allowed to
+		 * die because future work may end up depending on valid address
+		 * space. This means we must enforce that a page table load
+		 * occur when this occurs. */
+	} else if (to->ppgtt &&
+		   (intel_ring_flag(ring) & to->ppgtt->pd_dirty_rings)) {
+		hw_flags |= MI_FORCE_RESTORE;
+		to->ppgtt->pd_dirty_rings &= ~intel_ring_flag(ring);
+	}
+
+	/* We should never emit switch_mm more than once */
+	WARN_ON(needs_pd_load_pre(ring, to) &&
+		needs_pd_load_post(ring, to, hw_flags));
+
+	ret = mi_set_context(req, hw_flags);
+	if (ret)
+		goto unpin_out;
+
+	/* GEN8 does *not* require an explicit reload if the PDPs have been
+	 * setup, and we do not wish to move them.
+	 */
+	if (needs_pd_load_post(ring, to, hw_flags)) {
+		trace_switch_mm(ring, to);
+		ret = to->ppgtt->switch_mm(to->ppgtt, req);
+		/* The hardware context switch is emitted, but we haven't
+		 * actually changed the state - so it's probably safe to bail
+		 * here. Still, let the user know something dangerous has
+		 * happened.
+		 */
+		if (ret) {
+			DRM_ERROR("Failed to change address space on context switch\n");
+			goto unpin_out;
+		}
+	}
+
+	for (i = 0; i < MAX_L3_SLICES; i++) {
+		if (!(to->remap_slice & (1<<i)))
+			continue;
+
+		ret = i915_gem_l3_remap(req, i);
+		/* If it failed, try again next round */
+		if (ret)
+			DRM_DEBUG_DRIVER("L3 remapping failed\n");
+		else
+			to->remap_slice &= ~(1<<i);
+	}
+
+	/* The backing object for the context is done after switching to the
+	 * *next* context. Therefore we cannot retire the previous context until
+	 * the next context has already started running. In fact, the below code
+	 * is a bit suboptimal because the retiring can occur simply after the
+	 * MI_SET_CONTEXT instead of when the next seqno has completed.
+	 */
+	if (from != NULL) {
+		from->legacy_hw_ctx.rcs_state->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
+		i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->legacy_hw_ctx.rcs_state), req);
+		/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
+		 * whole damn pipeline, we don't need to explicitly mark the
+		 * object dirty. The only exception is that the context must be
+		 * correct in case the object gets swapped out. Ideally we'd be
+		 * able to defer doing this until we know the object would be
+		 * swapped, but there is no way to do that yet.
+		 */
+		from->legacy_hw_ctx.rcs_state->dirty = 1;
+
+		/* obj is kept alive until the next request by its active ref */
+		i915_gem_object_ggtt_unpin(from->legacy_hw_ctx.rcs_state);
+		i915_gem_context_unreference(from);
+	}
+
+	uninitialized = !to->legacy_hw_ctx.initialized;
+	to->legacy_hw_ctx.initialized = true;
+
+done:
+	i915_gem_context_reference(to);
+	ring->last_context = to;
+
+	if (uninitialized) {
+		if (ring->init_context) {
+			ret = ring->init_context(req);
+			if (ret)
+				DRM_ERROR("ring init context: %d\n", ret);
+		}
+	}
+
+	return 0;
+
+unpin_out:
+	if (ring->id == RCS)
+		i915_gem_object_ggtt_unpin(to->legacy_hw_ctx.rcs_state);
+	return ret;
+}
+
+/**
+ * i915_switch_context() - perform a GPU context switch.
+ * @req: request for which we'll execute the context switch
+ *
+ * The context life cycle is simple. The context refcount is incremented and
+ * decremented by 1 and create and destroy. If the context is in use by the GPU,
+ * it will have a refcount > 1. This allows us to destroy the context abstract
+ * object while letting the normal object tracking destroy the backing BO.
+ *
+ * This function should not be used in execlists mode.  Instead the context is
+ * switched by writing to the ELSP and requests keep a reference to their
+ * context.
+ */
+int i915_switch_context(struct drm_i915_gem_request *req)
+{
+	struct intel_engine_cs *ring = req->ring;
+	struct drm_i915_private *dev_priv = ring->dev->dev_private;
+
+	WARN_ON(i915.enable_execlists);
+	WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
+
+	if (req->ctx->legacy_hw_ctx.rcs_state == NULL) { /* We have the fake context */
+		if (req->ctx != ring->last_context) {
+			i915_gem_context_reference(req->ctx);
+			if (ring->last_context)
+				i915_gem_context_unreference(ring->last_context);
+			ring->last_context = req->ctx;
+		}
+		return 0;
+	}
+
+	return do_switch(req);
+}
+
+static bool contexts_enabled(struct drm_device *dev)
+{
+	return i915.enable_execlists || to_i915(dev)->hw_context_size;
+}
+
+int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
+				  struct drm_file *file)
+{
+	struct drm_i915_gem_context_create *args = data;
+	struct drm_i915_file_private *file_priv = file->driver_priv;
+	struct intel_context *ctx;
+	int ret;
+
+	if (!contexts_enabled(dev))
+		return -ENODEV;
+
+	ret = i915_mutex_lock_interruptible(dev);
+	if (ret)
+		return ret;
+
+	ctx = i915_gem_create_context(dev, file_priv);
+	mutex_unlock(&dev->struct_mutex);
+	if (IS_ERR(ctx))
+		return PTR_ERR(ctx);
+
+	args->ctx_id = ctx->user_handle;
+	DRM_DEBUG_DRIVER("HW context %d created\n", args->ctx_id);
+
+	return 0;
+}
+
+int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
+				   struct drm_file *file)
+{
+	struct drm_i915_gem_context_destroy *args = data;
+	struct drm_i915_file_private *file_priv = file->driver_priv;
+	struct intel_context *ctx;
+	int ret;
+
+	if (args->ctx_id == DEFAULT_CONTEXT_HANDLE)
+		return -ENOENT;
+
+	ret = i915_mutex_lock_interruptible(dev);
+	if (ret)
+		return ret;
+
+	ctx = i915_gem_context_get(file_priv, args->ctx_id);
+	if (IS_ERR(ctx)) {
+		mutex_unlock(&dev->struct_mutex);
+		return PTR_ERR(ctx);
+	}
+
+	idr_remove(&ctx->file_priv->context_idr, ctx->user_handle);
+	i915_gem_context_unreference(ctx);
+	mutex_unlock(&dev->struct_mutex);
+
+	DRM_DEBUG_DRIVER("HW context %d destroyed\n", args->ctx_id);
+	return 0;
+}
+
+int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
+				    struct drm_file *file)
+{
+	struct drm_i915_file_private *file_priv = file->driver_priv;
+	struct drm_i915_gem_context_param *args = data;
+	struct intel_context *ctx;
+	int ret;
+
+	ret = i915_mutex_lock_interruptible(dev);
+	if (ret)
+		return ret;
+
+	ctx = i915_gem_context_get(file_priv, args->ctx_id);
+	if (IS_ERR(ctx)) {
+		mutex_unlock(&dev->struct_mutex);
+		return PTR_ERR(ctx);
+	}
+
+	args->size = 0;
+	switch (args->param) {
+	case I915_CONTEXT_PARAM_BAN_PERIOD:
+		args->value = ctx->hang_stats.ban_period_seconds;
+		break;
+	case I915_CONTEXT_PARAM_NO_ZEROMAP:
+		args->value = ctx->flags & CONTEXT_NO_ZEROMAP;
+		break;
+	default:
+		ret = -EINVAL;
+		break;
+	}
+	mutex_unlock(&dev->struct_mutex);
+
+	return ret;
+}
+
+int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
+				    struct drm_file *file)
+{
+	struct drm_i915_file_private *file_priv = file->driver_priv;
+	struct drm_i915_gem_context_param *args = data;
+	struct intel_context *ctx;
+	int ret;
+
+	ret = i915_mutex_lock_interruptible(dev);
+	if (ret)
+		return ret;
+
+	ctx = i915_gem_context_get(file_priv, args->ctx_id);
+	if (IS_ERR(ctx)) {
+		mutex_unlock(&dev->struct_mutex);
+		return PTR_ERR(ctx);
+	}
+
+	switch (args->param) {
+	case I915_CONTEXT_PARAM_BAN_PERIOD:
+		if (args->size)
+			ret = -EINVAL;
+		else if (args->value < ctx->hang_stats.ban_period_seconds &&
+			 !capable(CAP_SYS_ADMIN))
+			ret = -EPERM;
+		else
+			ctx->hang_stats.ban_period_seconds = args->value;
+		break;
+	case I915_CONTEXT_PARAM_NO_ZEROMAP:
+		if (args->size) {
+			ret = -EINVAL;
+		} else {
+			ctx->flags &= ~CONTEXT_NO_ZEROMAP;
+			ctx->flags |= args->value ? CONTEXT_NO_ZEROMAP : 0;
+		}
+		break;
+	default:
+		ret = -EINVAL;
+		break;
+	}
+	mutex_unlock(&dev->struct_mutex);
+
+	return ret;
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c
new file mode 100644
index 0000000..17299d0
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_debug.c
@@ -0,0 +1,70 @@
+/*
+ * Copyright © 2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Keith Packard <keithp@keithp.com>
+ *
+ */
+
+#include <drm/drmP.h>
+#include <drm/i915_drm.h>
+#include "i915_drv.h"
+
+#if WATCH_LISTS
+int
+i915_verify_lists(struct drm_device *dev)
+{
+	static int warned;
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_gem_object *obj;
+	struct intel_engine_cs *ring;
+	int err = 0;
+	int i;
+
+	if (warned)
+		return 0;
+
+	for_each_ring(ring, dev_priv, i) {
+		list_for_each_entry(obj, &ring->active_list, ring_list[ring->id]) {
+			if (obj->base.dev != dev ||
+			    !atomic_read(&obj->base.refcount.refcount)) {
+				DRM_ERROR("%s: freed active obj %p\n",
+					  ring->name, obj);
+				err++;
+				break;
+			} else if (!obj->active ||
+				   obj->last_read_req[ring->id] == NULL) {
+				DRM_ERROR("%s: invalid active obj %p\n",
+					  ring->name, obj);
+				err++;
+			} else if (obj->base.write_domain) {
+				DRM_ERROR("%s: invalid write obj %p (w %x)\n",
+					  ring->name,
+					  obj, obj->base.write_domain);
+				err++;
+			}
+		}
+	}
+
+	return warned = err;
+}
+#endif /* WATCH_LIST */
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
new file mode 100644
index 0000000..e9c2bfd
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -0,0 +1,318 @@
+/*
+ * Copyright 2012 Red Hat Inc
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *	Dave Airlie <airlied@redhat.com>
+ */
+#include <drm/drmP.h>
+#include "i915_drv.h"
+#include <linux/dma-buf.h>
+
+static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf)
+{
+	return to_intel_bo(buf->priv);
+}
+
+static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment,
+					     enum dma_data_direction dir)
+{
+	struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
+	struct sg_table *st;
+	struct scatterlist *src, *dst;
+	int ret, i;
+
+	ret = i915_mutex_lock_interruptible(obj->base.dev);
+	if (ret)
+		goto err;
+
+	ret = i915_gem_object_get_pages(obj);
+	if (ret)
+		goto err_unlock;
+
+	i915_gem_object_pin_pages(obj);
+
+	/* Copy sg so that we make an independent mapping */
+	st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
+	if (st == NULL) {
+		ret = -ENOMEM;
+		goto err_unpin;
+	}
+
+	ret = sg_alloc_table(st, obj->pages->nents, GFP_KERNEL);
+	if (ret)
+		goto err_free;
+
+	src = obj->pages->sgl;
+	dst = st->sgl;
+	for (i = 0; i < obj->pages->nents; i++) {
+		sg_set_page(dst, sg_page(src), src->length, 0);
+		dst = sg_next(dst);
+		src = sg_next(src);
+	}
+
+	if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
+		ret =-ENOMEM;
+		goto err_free_sg;
+	}
+
+	mutex_unlock(&obj->base.dev->struct_mutex);
+	return st;
+
+err_free_sg:
+	sg_free_table(st);
+err_free:
+	kfree(st);
+err_unpin:
+	i915_gem_object_unpin_pages(obj);
+err_unlock:
+	mutex_unlock(&obj->base.dev->struct_mutex);
+err:
+	return ERR_PTR(ret);
+}
+
+static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
+				   struct sg_table *sg,
+				   enum dma_data_direction dir)
+{
+	struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
+
+	mutex_lock(&obj->base.dev->struct_mutex);
+
+	dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
+	sg_free_table(sg);
+	kfree(sg);
+
+	i915_gem_object_unpin_pages(obj);
+
+	mutex_unlock(&obj->base.dev->struct_mutex);
+}
+
+static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
+{
+	struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
+	struct drm_device *dev = obj->base.dev;
+	struct sg_page_iter sg_iter;
+	struct page **pages;
+	int ret, i;
+
+	ret = i915_mutex_lock_interruptible(dev);
+	if (ret)
+		return ERR_PTR(ret);
+
+	if (obj->dma_buf_vmapping) {
+		obj->vmapping_count++;
+		goto out_unlock;
+	}
+
+	ret = i915_gem_object_get_pages(obj);
+	if (ret)
+		goto err;
+
+	i915_gem_object_pin_pages(obj);
+
+	ret = -ENOMEM;
+
+	pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages));
+	if (pages == NULL)
+		goto err_unpin;
+
+	i = 0;
+	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0)
+		pages[i++] = sg_page_iter_page(&sg_iter);
+
+	obj->dma_buf_vmapping = vmap(pages, i, 0, PAGE_KERNEL);
+	drm_free_large(pages);
+
+	if (!obj->dma_buf_vmapping)
+		goto err_unpin;
+
+	obj->vmapping_count = 1;
+out_unlock:
+	mutex_unlock(&dev->struct_mutex);
+	return obj->dma_buf_vmapping;
+
+err_unpin:
+	i915_gem_object_unpin_pages(obj);
+err:
+	mutex_unlock(&dev->struct_mutex);
+	return ERR_PTR(ret);
+}
+
+static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
+{
+	struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
+	struct drm_device *dev = obj->base.dev;
+
+	mutex_lock(&dev->struct_mutex);
+	if (--obj->vmapping_count == 0) {
+		vunmap(obj->dma_buf_vmapping);
+		obj->dma_buf_vmapping = NULL;
+
+		i915_gem_object_unpin_pages(obj);
+	}
+	mutex_unlock(&dev->struct_mutex);
+}
+
+static void *i915_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
+{
+	return NULL;
+}
+
+static void i915_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
+{
+
+}
+static void *i915_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
+{
+	return NULL;
+}
+
+static void i915_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
+{
+
+}
+
+static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
+{
+	return -EINVAL;
+}
+
+static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size_t length, enum dma_data_direction direction)
+{
+	struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
+	struct drm_device *dev = obj->base.dev;
+	int ret;
+	bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE);
+
+	ret = i915_mutex_lock_interruptible(dev);
+	if (ret)
+		return ret;
+
+	ret = i915_gem_object_set_to_cpu_domain(obj, write);
+	mutex_unlock(&dev->struct_mutex);
+	return ret;
+}
+
+static const struct dma_buf_ops i915_dmabuf_ops =  {
+	.map_dma_buf = i915_gem_map_dma_buf,
+	.unmap_dma_buf = i915_gem_unmap_dma_buf,
+	.release = drm_gem_dmabuf_release,
+	.kmap = i915_gem_dmabuf_kmap,
+	.kmap_atomic = i915_gem_dmabuf_kmap_atomic,
+	.kunmap = i915_gem_dmabuf_kunmap,
+	.kunmap_atomic = i915_gem_dmabuf_kunmap_atomic,
+	.mmap = i915_gem_dmabuf_mmap,
+	.vmap = i915_gem_dmabuf_vmap,
+	.vunmap = i915_gem_dmabuf_vunmap,
+	.begin_cpu_access = i915_gem_begin_cpu_access,
+};
+
+struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
+				      struct drm_gem_object *gem_obj, int flags)
+{
+	struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
+	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+
+	exp_info.ops = &i915_dmabuf_ops;
+	exp_info.size = gem_obj->size;
+	exp_info.flags = flags;
+	exp_info.priv = gem_obj;
+
+
+	if (obj->ops->dmabuf_export) {
+		int ret = obj->ops->dmabuf_export(obj);
+		if (ret)
+			return ERR_PTR(ret);
+	}
+
+	return dma_buf_export(&exp_info);
+}
+
+static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
+{
+	struct sg_table *sg;
+
+	sg = dma_buf_map_attachment(obj->base.import_attach, DMA_BIDIRECTIONAL);
+	if (IS_ERR(sg))
+		return PTR_ERR(sg);
+
+	obj->pages = sg;
+	return 0;
+}
+
+static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj)
+{
+	dma_buf_unmap_attachment(obj->base.import_attach,
+				 obj->pages, DMA_BIDIRECTIONAL);
+}
+
+static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
+	.get_pages = i915_gem_object_get_pages_dmabuf,
+	.put_pages = i915_gem_object_put_pages_dmabuf,
+};
+
+struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
+					     struct dma_buf *dma_buf)
+{
+	struct dma_buf_attachment *attach;
+	struct drm_i915_gem_object *obj;
+	int ret;
+
+	/* is this one of own objects? */
+	if (dma_buf->ops == &i915_dmabuf_ops) {
+		obj = dma_buf_to_obj(dma_buf);
+		/* is it from our device? */
+		if (obj->base.dev == dev) {
+			/*
+			 * Importing dmabuf exported from out own gem increases
+			 * refcount on gem itself instead of f_count of dmabuf.
+			 */
+			drm_gem_object_reference(&obj->base);
+			return &obj->base;
+		}
+	}
+
+	/* need to attach */
+	attach = dma_buf_attach(dma_buf, dev->dev);
+	if (IS_ERR(attach))
+		return ERR_CAST(attach);
+
+	get_dma_buf(dma_buf);
+
+	obj = i915_gem_object_alloc(dev);
+	if (obj == NULL) {
+		ret = -ENOMEM;
+		goto fail_detach;
+	}
+
+	drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
+	i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops);
+	obj->base.import_attach = attach;
+
+	return &obj->base;
+
+fail_detach:
+	dma_buf_detach(dma_buf, attach);
+	dma_buf_put(dma_buf);
+
+	return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
new file mode 100644
index 0000000..d71a133
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -0,0 +1,239 @@
+/*
+ * Copyright © 2008-2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Eric Anholt <eric@anholt.net>
+ *    Chris Wilson <chris@chris-wilson.co.uuk>
+ *
+ */
+
+#include <drm/drmP.h>
+#include <drm/i915_drm.h>
+
+#include "i915_drv.h"
+#include "intel_drv.h"
+#include "i915_trace.h"
+
+static bool
+mark_free(struct i915_vma *vma, struct list_head *unwind)
+{
+	if (vma->pin_count)
+		return false;
+
+	if (WARN_ON(!list_empty(&vma->exec_list)))
+		return false;
+
+	list_add(&vma->exec_list, unwind);
+	return drm_mm_scan_add_block(&vma->node);
+}
+
+/**
+ * i915_gem_evict_something - Evict vmas to make room for binding a new one
+ * @dev: drm_device
+ * @vm: address space to evict from
+ * @min_size: size of the desired free space
+ * @alignment: alignment constraint of the desired free space
+ * @cache_level: cache_level for the desired space
+ * @start: start (inclusive) of the range from which to evict objects
+ * @end: end (exclusive) of the range from which to evict objects
+ * @flags: additional flags to control the eviction algorithm
+ *
+ * This function will try to evict vmas until a free space satisfying the
+ * requirements is found. Callers must check first whether any such hole exists
+ * already before calling this function.
+ *
+ * This function is used by the object/vma binding code.
+ *
+ * Since this function is only used to free up virtual address space it only
+ * ignores pinned vmas, and not object where the backing storage itself is
+ * pinned. Hence obj->pages_pin_count does not protect against eviction.
+ *
+ * To clarify: This is for freeing up virtual address space, not for freeing
+ * memory in e.g. the shrinker.
+ */
+int
+i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
+			 int min_size, unsigned alignment, unsigned cache_level,
+			 unsigned long start, unsigned long end,
+			 unsigned flags)
+{
+	struct list_head eviction_list, unwind_list;
+	struct i915_vma *vma;
+	int ret = 0;
+	int pass = 0;
+
+	trace_i915_gem_evict(dev, min_size, alignment, flags);
+
+	/*
+	 * The goal is to evict objects and amalgamate space in LRU order.
+	 * The oldest idle objects reside on the inactive list, which is in
+	 * retirement order. The next objects to retire are those on the (per
+	 * ring) active list that do not have an outstanding flush. Once the
+	 * hardware reports completion (the seqno is updated after the
+	 * batchbuffer has been finished) the clean buffer objects would
+	 * be retired to the inactive list. Any dirty objects would be added
+	 * to the tail of the flushing list. So after processing the clean
+	 * active objects we need to emit a MI_FLUSH to retire the flushing
+	 * list, hence the retirement order of the flushing list is in
+	 * advance of the dirty objects on the active lists.
+	 *
+	 * The retirement sequence is thus:
+	 *   1. Inactive objects (already retired)
+	 *   2. Clean active objects
+	 *   3. Flushing list
+	 *   4. Dirty active objects.
+	 *
+	 * On each list, the oldest objects lie at the HEAD with the freshest
+	 * object on the TAIL.
+	 */
+
+	INIT_LIST_HEAD(&unwind_list);
+	if (start != 0 || end != vm->total) {
+		drm_mm_init_scan_with_range(&vm->mm, min_size,
+					    alignment, cache_level,
+					    start, end);
+	} else
+		drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level);
+
+search_again:
+	/* First see if there is a large enough contiguous idle region... */
+	list_for_each_entry(vma, &vm->inactive_list, mm_list) {
+		if (mark_free(vma, &unwind_list))
+			goto found;
+	}
+
+	if (flags & PIN_NONBLOCK)
+		goto none;
+
+	/* Now merge in the soon-to-be-expired objects... */
+	list_for_each_entry(vma, &vm->active_list, mm_list) {
+		if (mark_free(vma, &unwind_list))
+			goto found;
+	}
+
+none:
+	/* Nothing found, clean up and bail out! */
+	while (!list_empty(&unwind_list)) {
+		vma = list_first_entry(&unwind_list,
+				       struct i915_vma,
+				       exec_list);
+		ret = drm_mm_scan_remove_block(&vma->node);
+		BUG_ON(ret);
+
+		list_del_init(&vma->exec_list);
+	}
+
+	/* Can we unpin some objects such as idle hw contents,
+	 * or pending flips?
+	 */
+	if (flags & PIN_NONBLOCK)
+		return -ENOSPC;
+
+	/* Only idle the GPU and repeat the search once */
+	if (pass++ == 0) {
+		ret = i915_gpu_idle(dev);
+		if (ret)
+			return ret;
+
+		i915_gem_retire_requests(dev);
+		goto search_again;
+	}
+
+	/* If we still have pending pageflip completions, drop
+	 * back to userspace to give our workqueues time to
+	 * acquire our locks and unpin the old scanouts.
+	 */
+	return intel_has_pending_fb_unpin(dev) ? -EAGAIN : -ENOSPC;
+
+found:
+	/* drm_mm doesn't allow any other other operations while
+	 * scanning, therefore store to be evicted objects on a
+	 * temporary list. */
+	INIT_LIST_HEAD(&eviction_list);
+	while (!list_empty(&unwind_list)) {
+		vma = list_first_entry(&unwind_list,
+				       struct i915_vma,
+				       exec_list);
+		if (drm_mm_scan_remove_block(&vma->node)) {
+			list_move(&vma->exec_list, &eviction_list);
+			drm_gem_object_reference(&vma->obj->base);
+			continue;
+		}
+		list_del_init(&vma->exec_list);
+	}
+
+	/* Unbinding will emit any required flushes */
+	while (!list_empty(&eviction_list)) {
+		struct drm_gem_object *obj;
+		vma = list_first_entry(&eviction_list,
+				       struct i915_vma,
+				       exec_list);
+
+		obj =  &vma->obj->base;
+		list_del_init(&vma->exec_list);
+		if (ret == 0)
+			ret = i915_vma_unbind(vma);
+
+		drm_gem_object_unreference(obj);
+	}
+
+	return ret;
+}
+
+/**
+ * i915_gem_evict_vm - Evict all idle vmas from a vm
+ * @vm: Address space to cleanse
+ * @do_idle: Boolean directing whether to idle first.
+ *
+ * This function evicts all idles vmas from a vm. If all unpinned vmas should be
+ * evicted the @do_idle needs to be set to true.
+ *
+ * This is used by the execbuf code as a last-ditch effort to defragment the
+ * address space.
+ *
+ * To clarify: This is for freeing up virtual address space, not for freeing
+ * memory in e.g. the shrinker.
+ */
+int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
+{
+	struct i915_vma *vma, *next;
+	int ret;
+
+	WARN_ON(!mutex_is_locked(&vm->dev->struct_mutex));
+	trace_i915_gem_evict_vm(vm);
+
+	if (do_idle) {
+		ret = i915_gpu_idle(vm->dev);
+		if (ret)
+			return ret;
+
+		i915_gem_retire_requests(vm->dev);
+
+		WARN_ON(!list_empty(&vm->active_list));
+	}
+
+	list_for_each_entry_safe(vma, next, &vm->inactive_list, mm_list)
+		if (vma->pin_count == 0)
+			WARN_ON(i915_vma_unbind(vma));
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
new file mode 100644
index 0000000..6ed7d63
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -0,0 +1,1758 @@
+/*
+ * Copyright © 2008,2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Eric Anholt <eric@anholt.net>
+ *    Chris Wilson <chris@chris-wilson.co.uk>
+ *
+ */
+
+#include <drm/drmP.h>
+#include <drm/i915_drm.h>
+#include "i915_drv.h"
+#include "i915_trace.h"
+#include "intel_drv.h"
+#include <linux/dma_remapping.h>
+#include <linux/uaccess.h>
+
+#define  __EXEC_OBJECT_HAS_PIN (1<<31)
+#define  __EXEC_OBJECT_HAS_FENCE (1<<30)
+#define  __EXEC_OBJECT_NEEDS_MAP (1<<29)
+#define  __EXEC_OBJECT_NEEDS_BIAS (1<<28)
+
+#define BATCH_OFFSET_BIAS (256*1024)
+
+struct eb_vmas {
+	struct list_head vmas;
+	int and;
+	union {
+		struct i915_vma *lut[0];
+		struct hlist_head buckets[0];
+	};
+};
+
+static struct eb_vmas *
+eb_create(struct drm_i915_gem_execbuffer2 *args)
+{
+	struct eb_vmas *eb = NULL;
+
+	if (args->flags & I915_EXEC_HANDLE_LUT) {
+		unsigned size = args->buffer_count;
+		size *= sizeof(struct i915_vma *);
+		size += sizeof(struct eb_vmas);
+		eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
+	}
+
+	if (eb == NULL) {
+		unsigned size = args->buffer_count;
+		unsigned count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
+		BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head));
+		while (count > 2*size)
+			count >>= 1;
+		eb = kzalloc(count*sizeof(struct hlist_head) +
+			     sizeof(struct eb_vmas),
+			     GFP_TEMPORARY);
+		if (eb == NULL)
+			return eb;
+
+		eb->and = count - 1;
+	} else
+		eb->and = -args->buffer_count;
+
+	INIT_LIST_HEAD(&eb->vmas);
+	return eb;
+}
+
+static void
+eb_reset(struct eb_vmas *eb)
+{
+	if (eb->and >= 0)
+		memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
+}
+
+static int
+eb_lookup_vmas(struct eb_vmas *eb,
+	       struct drm_i915_gem_exec_object2 *exec,
+	       const struct drm_i915_gem_execbuffer2 *args,
+	       struct i915_address_space *vm,
+	       struct drm_file *file)
+{
+	struct drm_i915_gem_object *obj;
+	struct list_head objects;
+	int i, ret;
+
+	INIT_LIST_HEAD(&objects);
+	spin_lock(&file->table_lock);
+	/* Grab a reference to the object and release the lock so we can lookup
+	 * or create the VMA without using GFP_ATOMIC */
+	for (i = 0; i < args->buffer_count; i++) {
+		obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle));
+		if (obj == NULL) {
+			spin_unlock(&file->table_lock);
+			DRM_DEBUG("Invalid object handle %d at index %d\n",
+				   exec[i].handle, i);
+			ret = -ENOENT;
+			goto err;
+		}
+
+		if (!list_empty(&obj->obj_exec_link)) {
+			spin_unlock(&file->table_lock);
+			DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
+				   obj, exec[i].handle, i);
+			ret = -EINVAL;
+			goto err;
+		}
+
+		drm_gem_object_reference(&obj->base);
+		list_add_tail(&obj->obj_exec_link, &objects);
+	}
+	spin_unlock(&file->table_lock);
+
+	i = 0;
+	while (!list_empty(&objects)) {
+		struct i915_vma *vma;
+
+		obj = list_first_entry(&objects,
+				       struct drm_i915_gem_object,
+				       obj_exec_link);
+
+		/*
+		 * NOTE: We can leak any vmas created here when something fails
+		 * later on. But that's no issue since vma_unbind can deal with
+		 * vmas which are not actually bound. And since only
+		 * lookup_or_create exists as an interface to get at the vma
+		 * from the (obj, vm) we don't run the risk of creating
+		 * duplicated vmas for the same vm.
+		 */
+		vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
+		if (IS_ERR(vma)) {
+			DRM_DEBUG("Failed to lookup VMA\n");
+			ret = PTR_ERR(vma);
+			goto err;
+		}
+
+		/* Transfer ownership from the objects list to the vmas list. */
+		list_add_tail(&vma->exec_list, &eb->vmas);
+		list_del_init(&obj->obj_exec_link);
+
+		vma->exec_entry = &exec[i];
+		if (eb->and < 0) {
+			eb->lut[i] = vma;
+		} else {
+			uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle;
+			vma->exec_handle = handle;
+			hlist_add_head(&vma->exec_node,
+				       &eb->buckets[handle & eb->and]);
+		}
+		++i;
+	}
+
+	return 0;
+
+
+err:
+	while (!list_empty(&objects)) {
+		obj = list_first_entry(&objects,
+				       struct drm_i915_gem_object,
+				       obj_exec_link);
+		list_del_init(&obj->obj_exec_link);
+		drm_gem_object_unreference(&obj->base);
+	}
+	/*
+	 * Objects already transfered to the vmas list will be unreferenced by
+	 * eb_destroy.
+	 */
+
+	return ret;
+}
+
+static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle)
+{
+	if (eb->and < 0) {
+		if (handle >= -eb->and)
+			return NULL;
+		return eb->lut[handle];
+	} else {
+		struct hlist_head *head;
+		struct hlist_node *node;
+
+		head = &eb->buckets[handle & eb->and];
+		hlist_for_each(node, head) {
+			struct i915_vma *vma;
+
+			vma = hlist_entry(node, struct i915_vma, exec_node);
+			if (vma->exec_handle == handle)
+				return vma;
+		}
+		return NULL;
+	}
+}
+
+static void
+i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
+{
+	struct drm_i915_gem_exec_object2 *entry;
+	struct drm_i915_gem_object *obj = vma->obj;
+
+	if (!drm_mm_node_allocated(&vma->node))
+		return;
+
+	entry = vma->exec_entry;
+
+	if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
+		i915_gem_object_unpin_fence(obj);
+
+	if (entry->flags & __EXEC_OBJECT_HAS_PIN)
+		vma->pin_count--;
+
+	entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
+}
+
+static void eb_destroy(struct eb_vmas *eb)
+{
+	while (!list_empty(&eb->vmas)) {
+		struct i915_vma *vma;
+
+		vma = list_first_entry(&eb->vmas,
+				       struct i915_vma,
+				       exec_list);
+		list_del_init(&vma->exec_list);
+		i915_gem_execbuffer_unreserve_vma(vma);
+		drm_gem_object_unreference(&vma->obj->base);
+	}
+	kfree(eb);
+}
+
+static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
+{
+	return (HAS_LLC(obj->base.dev) ||
+		obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
+		obj->cache_level != I915_CACHE_NONE);
+}
+
+static int
+relocate_entry_cpu(struct drm_i915_gem_object *obj,
+		   struct drm_i915_gem_relocation_entry *reloc,
+		   uint64_t target_offset)
+{
+	struct drm_device *dev = obj->base.dev;
+	uint32_t page_offset = offset_in_page(reloc->offset);
+	uint64_t delta = reloc->delta + target_offset;
+	char *vaddr;
+	int ret;
+
+	ret = i915_gem_object_set_to_cpu_domain(obj, true);
+	if (ret)
+		return ret;
+
+	vaddr = kmap_atomic(i915_gem_object_get_page(obj,
+				reloc->offset >> PAGE_SHIFT));
+	*(uint32_t *)(vaddr + page_offset) = lower_32_bits(delta);
+
+	if (INTEL_INFO(dev)->gen >= 8) {
+		page_offset = offset_in_page(page_offset + sizeof(uint32_t));
+
+		if (page_offset == 0) {
+			kunmap_atomic(vaddr);
+			vaddr = kmap_atomic(i915_gem_object_get_page(obj,
+			    (reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT));
+		}
+
+		*(uint32_t *)(vaddr + page_offset) = upper_32_bits(delta);
+	}
+
+	kunmap_atomic(vaddr);
+
+	return 0;
+}
+
+static int
+relocate_entry_gtt(struct drm_i915_gem_object *obj,
+		   struct drm_i915_gem_relocation_entry *reloc,
+		   uint64_t target_offset)
+{
+	struct drm_device *dev = obj->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint64_t delta = reloc->delta + target_offset;
+	uint64_t offset;
+	void __iomem *reloc_page;
+	int ret;
+
+	ret = i915_gem_object_set_to_gtt_domain(obj, true);
+	if (ret)
+		return ret;
+
+	ret = i915_gem_object_put_fence(obj);
+	if (ret)
+		return ret;
+
+	/* Map the page containing the relocation we're going to perform.  */
+	offset = i915_gem_obj_ggtt_offset(obj);
+	offset += reloc->offset;
+	reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
+					      offset & PAGE_MASK);
+	iowrite32(lower_32_bits(delta), reloc_page + offset_in_page(offset));
+
+	if (INTEL_INFO(dev)->gen >= 8) {
+		offset += sizeof(uint32_t);
+
+		if (offset_in_page(offset) == 0) {
+			io_mapping_unmap_atomic(reloc_page);
+			reloc_page =
+				io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
+							 offset);
+		}
+
+		iowrite32(upper_32_bits(delta),
+			  reloc_page + offset_in_page(offset));
+	}
+
+	io_mapping_unmap_atomic(reloc_page);
+
+	return 0;
+}
+
+static void
+clflush_write32(void *addr, uint32_t value)
+{
+	/* This is not a fast path, so KISS. */
+	drm_clflush_virt_range(addr, sizeof(uint32_t));
+	*(uint32_t *)addr = value;
+	drm_clflush_virt_range(addr, sizeof(uint32_t));
+}
+
+static int
+relocate_entry_clflush(struct drm_i915_gem_object *obj,
+		       struct drm_i915_gem_relocation_entry *reloc,
+		       uint64_t target_offset)
+{
+	struct drm_device *dev = obj->base.dev;
+	uint32_t page_offset = offset_in_page(reloc->offset);
+	uint64_t delta = (int)reloc->delta + target_offset;
+	char *vaddr;
+	int ret;
+
+	ret = i915_gem_object_set_to_gtt_domain(obj, true);
+	if (ret)
+		return ret;
+
+	vaddr = kmap_atomic(i915_gem_object_get_page(obj,
+				reloc->offset >> PAGE_SHIFT));
+	clflush_write32(vaddr + page_offset, lower_32_bits(delta));
+
+	if (INTEL_INFO(dev)->gen >= 8) {
+		page_offset = offset_in_page(page_offset + sizeof(uint32_t));
+
+		if (page_offset == 0) {
+			kunmap_atomic(vaddr);
+			vaddr = kmap_atomic(i915_gem_object_get_page(obj,
+			    (reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT));
+		}
+
+		clflush_write32(vaddr + page_offset, upper_32_bits(delta));
+	}
+
+	kunmap_atomic(vaddr);
+
+	return 0;
+}
+
+static int
+i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
+				   struct eb_vmas *eb,
+				   struct drm_i915_gem_relocation_entry *reloc)
+{
+	struct drm_device *dev = obj->base.dev;
+	struct drm_gem_object *target_obj;
+	struct drm_i915_gem_object *target_i915_obj;
+	struct i915_vma *target_vma;
+	uint64_t target_offset;
+	int ret;
+
+	/* we've already hold a reference to all valid objects */
+	target_vma = eb_get_vma(eb, reloc->target_handle);
+	if (unlikely(target_vma == NULL))
+		return -ENOENT;
+	target_i915_obj = target_vma->obj;
+	target_obj = &target_vma->obj->base;
+
+	target_offset = target_vma->node.start;
+
+	/* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
+	 * pipe_control writes because the gpu doesn't properly redirect them
+	 * through the ppgtt for non_secure batchbuffers. */
+	if (unlikely(IS_GEN6(dev) &&
+	    reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION)) {
+		ret = i915_vma_bind(target_vma, target_i915_obj->cache_level,
+				    PIN_GLOBAL);
+		if (WARN_ONCE(ret, "Unexpected failure to bind target VMA!"))
+			return ret;
+	}
+
+	/* Validate that the target is in a valid r/w GPU domain */
+	if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
+		DRM_DEBUG("reloc with multiple write domains: "
+			  "obj %p target %d offset %d "
+			  "read %08x write %08x",
+			  obj, reloc->target_handle,
+			  (int) reloc->offset,
+			  reloc->read_domains,
+			  reloc->write_domain);
+		return -EINVAL;
+	}
+	if (unlikely((reloc->write_domain | reloc->read_domains)
+		     & ~I915_GEM_GPU_DOMAINS)) {
+		DRM_DEBUG("reloc with read/write non-GPU domains: "
+			  "obj %p target %d offset %d "
+			  "read %08x write %08x",
+			  obj, reloc->target_handle,
+			  (int) reloc->offset,
+			  reloc->read_domains,
+			  reloc->write_domain);
+		return -EINVAL;
+	}
+
+	target_obj->pending_read_domains |= reloc->read_domains;
+	target_obj->pending_write_domain |= reloc->write_domain;
+
+	/* If the relocation already has the right value in it, no
+	 * more work needs to be done.
+	 */
+	if (target_offset == reloc->presumed_offset)
+		return 0;
+
+	/* Check that the relocation address is valid... */
+	if (unlikely(reloc->offset >
+		obj->base.size - (INTEL_INFO(dev)->gen >= 8 ? 8 : 4))) {
+		DRM_DEBUG("Relocation beyond object bounds: "
+			  "obj %p target %d offset %d size %d.\n",
+			  obj, reloc->target_handle,
+			  (int) reloc->offset,
+			  (int) obj->base.size);
+		return -EINVAL;
+	}
+	if (unlikely(reloc->offset & 3)) {
+		DRM_DEBUG("Relocation not 4-byte aligned: "
+			  "obj %p target %d offset %d.\n",
+			  obj, reloc->target_handle,
+			  (int) reloc->offset);
+		return -EINVAL;
+	}
+
+	/* We can't wait for rendering with pagefaults disabled */
+	if (obj->active && pagefault_disabled())
+		return -EFAULT;
+
+	if (use_cpu_reloc(obj))
+		ret = relocate_entry_cpu(obj, reloc, target_offset);
+	else if (obj->map_and_fenceable)
+		ret = relocate_entry_gtt(obj, reloc, target_offset);
+	else if (cpu_has_clflush)
+		ret = relocate_entry_clflush(obj, reloc, target_offset);
+	else {
+		WARN_ONCE(1, "Impossible case in relocation handling\n");
+		ret = -ENODEV;
+	}
+
+	if (ret)
+		return ret;
+
+	/* and update the user's relocation entry */
+	reloc->presumed_offset = target_offset;
+
+	return 0;
+}
+
+static int
+i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
+				 struct eb_vmas *eb)
+{
+#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
+	struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
+	struct drm_i915_gem_relocation_entry __user *user_relocs;
+	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
+	int remain, ret;
+
+	user_relocs = to_user_ptr(entry->relocs_ptr);
+
+	remain = entry->relocation_count;
+	while (remain) {
+		struct drm_i915_gem_relocation_entry *r = stack_reloc;
+		int count = remain;
+		if (count > ARRAY_SIZE(stack_reloc))
+			count = ARRAY_SIZE(stack_reloc);
+		remain -= count;
+
+		if (__copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0])))
+			return -EFAULT;
+
+		do {
+			u64 offset = r->presumed_offset;
+
+			ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r);
+			if (ret)
+				return ret;
+
+			if (r->presumed_offset != offset &&
+			    __copy_to_user_inatomic(&user_relocs->presumed_offset,
+						    &r->presumed_offset,
+						    sizeof(r->presumed_offset))) {
+				return -EFAULT;
+			}
+
+			user_relocs++;
+			r++;
+		} while (--count);
+	}
+
+	return 0;
+#undef N_RELOC
+}
+
+static int
+i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma,
+				      struct eb_vmas *eb,
+				      struct drm_i915_gem_relocation_entry *relocs)
+{
+	const struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
+	int i, ret;
+
+	for (i = 0; i < entry->relocation_count; i++) {
+		ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i]);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static int
+i915_gem_execbuffer_relocate(struct eb_vmas *eb)
+{
+	struct i915_vma *vma;
+	int ret = 0;
+
+	/* This is the fast path and we cannot handle a pagefault whilst
+	 * holding the struct mutex lest the user pass in the relocations
+	 * contained within a mmaped bo. For in such a case we, the page
+	 * fault handler would call i915_gem_fault() and we would try to
+	 * acquire the struct mutex again. Obviously this is bad and so
+	 * lockdep complains vehemently.
+	 */
+	pagefault_disable();
+	list_for_each_entry(vma, &eb->vmas, exec_list) {
+		ret = i915_gem_execbuffer_relocate_vma(vma, eb);
+		if (ret)
+			break;
+	}
+	pagefault_enable();
+
+	return ret;
+}
+
+static bool only_mappable_for_reloc(unsigned int flags)
+{
+	return (flags & (EXEC_OBJECT_NEEDS_FENCE | __EXEC_OBJECT_NEEDS_MAP)) ==
+		__EXEC_OBJECT_NEEDS_MAP;
+}
+
+static int
+i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
+				struct intel_engine_cs *ring,
+				bool *need_reloc)
+{
+	struct drm_i915_gem_object *obj = vma->obj;
+	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
+	uint64_t flags;
+	int ret;
+
+	flags = PIN_USER;
+	if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
+		flags |= PIN_GLOBAL;
+
+	if (!drm_mm_node_allocated(&vma->node)) {
+		/* Wa32bitGeneralStateOffset & Wa32bitInstructionBaseOffset,
+		 * limit address to the first 4GBs for unflagged objects.
+		 */
+		if ((entry->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) == 0)
+			flags |= PIN_ZONE_4G;
+		if (entry->flags & __EXEC_OBJECT_NEEDS_MAP)
+			flags |= PIN_GLOBAL | PIN_MAPPABLE;
+		if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS)
+			flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
+		if ((flags & PIN_MAPPABLE) == 0)
+			flags |= PIN_HIGH;
+	}
+
+	ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, flags);
+	if ((ret == -ENOSPC  || ret == -E2BIG) &&
+	    only_mappable_for_reloc(entry->flags))
+		ret = i915_gem_object_pin(obj, vma->vm,
+					  entry->alignment,
+					  flags & ~PIN_MAPPABLE);
+	if (ret)
+		return ret;
+
+	entry->flags |= __EXEC_OBJECT_HAS_PIN;
+
+	if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
+		ret = i915_gem_object_get_fence(obj);
+		if (ret)
+			return ret;
+
+		if (i915_gem_object_pin_fence(obj))
+			entry->flags |= __EXEC_OBJECT_HAS_FENCE;
+	}
+
+	if (entry->offset != vma->node.start) {
+		entry->offset = vma->node.start;
+		*need_reloc = true;
+	}
+
+	if (entry->flags & EXEC_OBJECT_WRITE) {
+		obj->base.pending_read_domains = I915_GEM_DOMAIN_RENDER;
+		obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER;
+	}
+
+	return 0;
+}
+
+static bool
+need_reloc_mappable(struct i915_vma *vma)
+{
+	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
+
+	if (entry->relocation_count == 0)
+		return false;
+
+	if (!i915_is_ggtt(vma->vm))
+		return false;
+
+	/* See also use_cpu_reloc() */
+	if (HAS_LLC(vma->obj->base.dev))
+		return false;
+
+	if (vma->obj->base.write_domain == I915_GEM_DOMAIN_CPU)
+		return false;
+
+	return true;
+}
+
+static bool
+eb_vma_misplaced(struct i915_vma *vma)
+{
+	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
+	struct drm_i915_gem_object *obj = vma->obj;
+
+	WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP &&
+	       !i915_is_ggtt(vma->vm));
+
+	if (entry->alignment &&
+	    vma->node.start & (entry->alignment - 1))
+		return true;
+
+	if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS &&
+	    vma->node.start < BATCH_OFFSET_BIAS)
+		return true;
+
+	/* avoid costly ping-pong once a batch bo ended up non-mappable */
+	if (entry->flags & __EXEC_OBJECT_NEEDS_MAP && !obj->map_and_fenceable)
+		return !only_mappable_for_reloc(entry->flags);
+
+	if ((entry->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) == 0 &&
+	    (vma->node.start + vma->node.size - 1) >> 32)
+		return true;
+
+	return false;
+}
+
+static int
+i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
+			    struct list_head *vmas,
+			    struct intel_context *ctx,
+			    bool *need_relocs)
+{
+	struct drm_i915_gem_object *obj;
+	struct i915_vma *vma;
+	struct i915_address_space *vm;
+	struct list_head ordered_vmas;
+	bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
+	int retry;
+
+	i915_gem_retire_requests_ring(ring);
+
+	vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
+
+	INIT_LIST_HEAD(&ordered_vmas);
+	while (!list_empty(vmas)) {
+		struct drm_i915_gem_exec_object2 *entry;
+		bool need_fence, need_mappable;
+
+		vma = list_first_entry(vmas, struct i915_vma, exec_list);
+		obj = vma->obj;
+		entry = vma->exec_entry;
+
+		if (ctx->flags & CONTEXT_NO_ZEROMAP)
+			entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
+
+		if (!has_fenced_gpu_access)
+			entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
+		need_fence =
+			entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
+			obj->tiling_mode != I915_TILING_NONE;
+		need_mappable = need_fence || need_reloc_mappable(vma);
+
+		if (need_mappable) {
+			entry->flags |= __EXEC_OBJECT_NEEDS_MAP;
+			list_move(&vma->exec_list, &ordered_vmas);
+		} else
+			list_move_tail(&vma->exec_list, &ordered_vmas);
+
+		obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
+		obj->base.pending_write_domain = 0;
+	}
+	list_splice(&ordered_vmas, vmas);
+
+	/* Attempt to pin all of the buffers into the GTT.
+	 * This is done in 3 phases:
+	 *
+	 * 1a. Unbind all objects that do not match the GTT constraints for
+	 *     the execbuffer (fenceable, mappable, alignment etc).
+	 * 1b. Increment pin count for already bound objects.
+	 * 2.  Bind new objects.
+	 * 3.  Decrement pin count.
+	 *
+	 * This avoid unnecessary unbinding of later objects in order to make
+	 * room for the earlier objects *unless* we need to defragment.
+	 */
+	retry = 0;
+	do {
+		int ret = 0;
+
+		/* Unbind any ill-fitting objects or pin. */
+		list_for_each_entry(vma, vmas, exec_list) {
+			if (!drm_mm_node_allocated(&vma->node))
+				continue;
+
+			if (eb_vma_misplaced(vma))
+				ret = i915_vma_unbind(vma);
+			else
+				ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
+			if (ret)
+				goto err;
+		}
+
+		/* Bind fresh objects */
+		list_for_each_entry(vma, vmas, exec_list) {
+			if (drm_mm_node_allocated(&vma->node))
+				continue;
+
+			ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
+			if (ret)
+				goto err;
+		}
+
+err:
+		if (ret != -ENOSPC || retry++)
+			return ret;
+
+		/* Decrement pin count for bound objects */
+		list_for_each_entry(vma, vmas, exec_list)
+			i915_gem_execbuffer_unreserve_vma(vma);
+
+		ret = i915_gem_evict_vm(vm, true);
+		if (ret)
+			return ret;
+	} while (1);
+}
+
+static int
+i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
+				  struct drm_i915_gem_execbuffer2 *args,
+				  struct drm_file *file,
+				  struct intel_engine_cs *ring,
+				  struct eb_vmas *eb,
+				  struct drm_i915_gem_exec_object2 *exec,
+				  struct intel_context *ctx)
+{
+	struct drm_i915_gem_relocation_entry *reloc;
+	struct i915_address_space *vm;
+	struct i915_vma *vma;
+	bool need_relocs;
+	int *reloc_offset;
+	int i, total, ret;
+	unsigned count = args->buffer_count;
+
+	vm = list_first_entry(&eb->vmas, struct i915_vma, exec_list)->vm;
+
+	/* We may process another execbuffer during the unlock... */
+	while (!list_empty(&eb->vmas)) {
+		vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list);
+		list_del_init(&vma->exec_list);
+		i915_gem_execbuffer_unreserve_vma(vma);
+		drm_gem_object_unreference(&vma->obj->base);
+	}
+
+	mutex_unlock(&dev->struct_mutex);
+
+	total = 0;
+	for (i = 0; i < count; i++)
+		total += exec[i].relocation_count;
+
+	reloc_offset = drm_malloc_ab(count, sizeof(*reloc_offset));
+	reloc = drm_malloc_ab(total, sizeof(*reloc));
+	if (reloc == NULL || reloc_offset == NULL) {
+		drm_free_large(reloc);
+		drm_free_large(reloc_offset);
+		mutex_lock(&dev->struct_mutex);
+		return -ENOMEM;
+	}
+
+	total = 0;
+	for (i = 0; i < count; i++) {
+		struct drm_i915_gem_relocation_entry __user *user_relocs;
+		u64 invalid_offset = (u64)-1;
+		int j;
+
+		user_relocs = to_user_ptr(exec[i].relocs_ptr);
+
+		if (copy_from_user(reloc+total, user_relocs,
+				   exec[i].relocation_count * sizeof(*reloc))) {
+			ret = -EFAULT;
+			mutex_lock(&dev->struct_mutex);
+			goto err;
+		}
+
+		/* As we do not update the known relocation offsets after
+		 * relocating (due to the complexities in lock handling),
+		 * we need to mark them as invalid now so that we force the
+		 * relocation processing next time. Just in case the target
+		 * object is evicted and then rebound into its old
+		 * presumed_offset before the next execbuffer - if that
+		 * happened we would make the mistake of assuming that the
+		 * relocations were valid.
+		 */
+		for (j = 0; j < exec[i].relocation_count; j++) {
+			if (__copy_to_user(&user_relocs[j].presumed_offset,
+					   &invalid_offset,
+					   sizeof(invalid_offset))) {
+				ret = -EFAULT;
+				mutex_lock(&dev->struct_mutex);
+				goto err;
+			}
+		}
+
+		reloc_offset[i] = total;
+		total += exec[i].relocation_count;
+	}
+
+	ret = i915_mutex_lock_interruptible(dev);
+	if (ret) {
+		mutex_lock(&dev->struct_mutex);
+		goto err;
+	}
+
+	/* reacquire the objects */
+	eb_reset(eb);
+	ret = eb_lookup_vmas(eb, exec, args, vm, file);
+	if (ret)
+		goto err;
+
+	need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
+	ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, ctx, &need_relocs);
+	if (ret)
+		goto err;
+
+	list_for_each_entry(vma, &eb->vmas, exec_list) {
+		int offset = vma->exec_entry - exec;
+		ret = i915_gem_execbuffer_relocate_vma_slow(vma, eb,
+							    reloc + reloc_offset[offset]);
+		if (ret)
+			goto err;
+	}
+
+	/* Leave the user relocations as are, this is the painfully slow path,
+	 * and we want to avoid the complication of dropping the lock whilst
+	 * having buffers reserved in the aperture and so causing spurious
+	 * ENOSPC for random operations.
+	 */
+
+err:
+	drm_free_large(reloc);
+	drm_free_large(reloc_offset);
+	return ret;
+}
+
+static int
+i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
+				struct list_head *vmas)
+{
+	const unsigned other_rings = ~intel_ring_flag(req->ring);
+	struct i915_vma *vma;
+	uint32_t flush_domains = 0;
+	bool flush_chipset = false;
+	int ret;
+
+	list_for_each_entry(vma, vmas, exec_list) {
+		struct drm_i915_gem_object *obj = vma->obj;
+
+		if (obj->active & other_rings) {
+			ret = i915_gem_object_sync(obj, req->ring, &req);
+			if (ret)
+				return ret;
+		}
+
+		if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
+			flush_chipset |= i915_gem_clflush_object(obj, false);
+
+		flush_domains |= obj->base.write_domain;
+	}
+
+	if (flush_chipset)
+		i915_gem_chipset_flush(req->ring->dev);
+
+	if (flush_domains & I915_GEM_DOMAIN_GTT)
+		wmb();
+
+	/* Unconditionally invalidate gpu caches and ensure that we do flush
+	 * any residual writes from the previous batch.
+	 */
+	return intel_ring_invalidate_all_caches(req);
+}
+
+static bool
+i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
+{
+	if (exec->flags & __I915_EXEC_UNKNOWN_FLAGS)
+		return false;
+
+	/* Kernel clipping was a DRI1 misfeature */
+	if (exec->num_cliprects || exec->cliprects_ptr)
+		return false;
+
+	if (exec->DR4 == 0xffffffff) {
+		DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
+		exec->DR4 = 0;
+	}
+	if (exec->DR1 || exec->DR4)
+		return false;
+
+	if ((exec->batch_start_offset | exec->batch_len) & 0x7)
+		return false;
+
+	return true;
+}
+
+static int
+validate_exec_list(struct drm_device *dev,
+		   struct drm_i915_gem_exec_object2 *exec,
+		   int count)
+{
+	unsigned relocs_total = 0;
+	unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
+	unsigned invalid_flags;
+	int i;
+
+	invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
+	if (USES_FULL_PPGTT(dev))
+		invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
+
+	for (i = 0; i < count; i++) {
+		char __user *ptr = to_user_ptr(exec[i].relocs_ptr);
+		int length; /* limited by fault_in_pages_readable() */
+
+		if (exec[i].flags & invalid_flags)
+			return -EINVAL;
+
+		if (exec[i].alignment && !is_power_of_2(exec[i].alignment))
+			return -EINVAL;
+
+		/* First check for malicious input causing overflow in
+		 * the worst case where we need to allocate the entire
+		 * relocation tree as a single array.
+		 */
+		if (exec[i].relocation_count > relocs_max - relocs_total)
+			return -EINVAL;
+		relocs_total += exec[i].relocation_count;
+
+		length = exec[i].relocation_count *
+			sizeof(struct drm_i915_gem_relocation_entry);
+		/*
+		 * We must check that the entire relocation array is safe
+		 * to read, but since we may need to update the presumed
+		 * offsets during execution, check for full write access.
+		 */
+		if (!access_ok(VERIFY_WRITE, ptr, length))
+			return -EFAULT;
+
+		if (likely(!i915.prefault_disable)) {
+			if (fault_in_multipages_readable(ptr, length))
+				return -EFAULT;
+		}
+	}
+
+	return 0;
+}
+
+static struct intel_context *
+i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
+			  struct intel_engine_cs *ring, const u32 ctx_id)
+{
+	struct intel_context *ctx = NULL;
+	struct i915_ctx_hang_stats *hs;
+
+	if (ring->id != RCS && ctx_id != DEFAULT_CONTEXT_HANDLE)
+		return ERR_PTR(-EINVAL);
+
+	ctx = i915_gem_context_get(file->driver_priv, ctx_id);
+	if (IS_ERR(ctx))
+		return ctx;
+
+	hs = &ctx->hang_stats;
+	if (hs->banned) {
+		DRM_DEBUG("Context %u tried to submit while banned\n", ctx_id);
+		return ERR_PTR(-EIO);
+	}
+
+	if (i915.enable_execlists && !ctx->engine[ring->id].state) {
+		int ret = intel_lr_context_deferred_alloc(ctx, ring);
+		if (ret) {
+			DRM_DEBUG("Could not create LRC %u: %d\n", ctx_id, ret);
+			return ERR_PTR(ret);
+		}
+	}
+
+	return ctx;
+}
+
+void
+i915_gem_execbuffer_move_to_active(struct list_head *vmas,
+				   struct drm_i915_gem_request *req)
+{
+	struct intel_engine_cs *ring = i915_gem_request_get_ring(req);
+	struct i915_vma *vma;
+
+	list_for_each_entry(vma, vmas, exec_list) {
+		struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
+		struct drm_i915_gem_object *obj = vma->obj;
+		u32 old_read = obj->base.read_domains;
+		u32 old_write = obj->base.write_domain;
+
+		obj->dirty = 1; /* be paranoid  */
+		obj->base.write_domain = obj->base.pending_write_domain;
+		if (obj->base.write_domain == 0)
+			obj->base.pending_read_domains |= obj->base.read_domains;
+		obj->base.read_domains = obj->base.pending_read_domains;
+
+		i915_vma_move_to_active(vma, req);
+		if (obj->base.write_domain) {
+			i915_gem_request_assign(&obj->last_write_req, req);
+
+			intel_fb_obj_invalidate(obj, ORIGIN_CS);
+
+			/* update for the implicit flush after a batch */
+			obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
+		}
+		if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
+			i915_gem_request_assign(&obj->last_fenced_req, req);
+			if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
+				struct drm_i915_private *dev_priv = to_i915(ring->dev);
+				list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list,
+					       &dev_priv->mm.fence_list);
+			}
+		}
+
+		trace_i915_gem_object_change_domain(obj, old_read, old_write);
+	}
+}
+
+void
+i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params)
+{
+	/* Unconditionally force add_request to emit a full flush. */
+	params->ring->gpu_caches_dirty = true;
+
+	/* Add a breadcrumb for the completion of the batch buffer */
+	__i915_add_request(params->request, params->batch_obj, true);
+}
+
+static int
+i915_reset_gen7_sol_offsets(struct drm_device *dev,
+			    struct drm_i915_gem_request *req)
+{
+	struct intel_engine_cs *ring = req->ring;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret, i;
+
+	if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS]) {
+		DRM_DEBUG("sol reset is gen7/rcs only\n");
+		return -EINVAL;
+	}
+
+	ret = intel_ring_begin(req, 4 * 3);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < 4; i++) {
+		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+		intel_ring_emit(ring, GEN7_SO_WRITE_OFFSET(i));
+		intel_ring_emit(ring, 0);
+	}
+
+	intel_ring_advance(ring);
+
+	return 0;
+}
+
+static struct drm_i915_gem_object*
+i915_gem_execbuffer_parse(struct intel_engine_cs *ring,
+			  struct drm_i915_gem_exec_object2 *shadow_exec_entry,
+			  struct eb_vmas *eb,
+			  struct drm_i915_gem_object *batch_obj,
+			  u32 batch_start_offset,
+			  u32 batch_len,
+			  bool is_master)
+{
+	struct drm_i915_gem_object *shadow_batch_obj;
+	struct i915_vma *vma;
+	int ret;
+
+	shadow_batch_obj = i915_gem_batch_pool_get(&ring->batch_pool,
+						   PAGE_ALIGN(batch_len));
+	if (IS_ERR(shadow_batch_obj))
+		return shadow_batch_obj;
+
+	ret = i915_parse_cmds(ring,
+			      batch_obj,
+			      shadow_batch_obj,
+			      batch_start_offset,
+			      batch_len,
+			      is_master);
+	if (ret)
+		goto err;
+
+	ret = i915_gem_obj_ggtt_pin(shadow_batch_obj, 0, 0);
+	if (ret)
+		goto err;
+
+	i915_gem_object_unpin_pages(shadow_batch_obj);
+
+	memset(shadow_exec_entry, 0, sizeof(*shadow_exec_entry));
+
+	vma = i915_gem_obj_to_ggtt(shadow_batch_obj);
+	vma->exec_entry = shadow_exec_entry;
+	vma->exec_entry->flags = __EXEC_OBJECT_HAS_PIN;
+	drm_gem_object_reference(&shadow_batch_obj->base);
+	list_add_tail(&vma->exec_list, &eb->vmas);
+
+	shadow_batch_obj->base.pending_read_domains = I915_GEM_DOMAIN_COMMAND;
+
+	return shadow_batch_obj;
+
+err:
+	i915_gem_object_unpin_pages(shadow_batch_obj);
+	if (ret == -EACCES) /* unhandled chained batch */
+		return batch_obj;
+	else
+		return ERR_PTR(ret);
+}
+
+int
+i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
+			       struct drm_i915_gem_execbuffer2 *args,
+			       struct list_head *vmas)
+{
+	struct drm_device *dev = params->dev;
+	struct intel_engine_cs *ring = params->ring;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u64 exec_start, exec_len;
+	int instp_mode;
+	u32 instp_mask;
+	int ret;
+
+	ret = i915_gem_execbuffer_move_to_gpu(params->request, vmas);
+	if (ret)
+		return ret;
+
+	ret = i915_switch_context(params->request);
+	if (ret)
+		return ret;
+
+	WARN(params->ctx->ppgtt && params->ctx->ppgtt->pd_dirty_rings & (1<<ring->id),
+	     "%s didn't clear reload\n", ring->name);
+
+	instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
+	instp_mask = I915_EXEC_CONSTANTS_MASK;
+	switch (instp_mode) {
+	case I915_EXEC_CONSTANTS_REL_GENERAL:
+	case I915_EXEC_CONSTANTS_ABSOLUTE:
+	case I915_EXEC_CONSTANTS_REL_SURFACE:
+		if (instp_mode != 0 && ring != &dev_priv->ring[RCS]) {
+			DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
+			return -EINVAL;
+		}
+
+		if (instp_mode != dev_priv->relative_constants_mode) {
+			if (INTEL_INFO(dev)->gen < 4) {
+				DRM_DEBUG("no rel constants on pre-gen4\n");
+				return -EINVAL;
+			}
+
+			if (INTEL_INFO(dev)->gen > 5 &&
+			    instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
+				DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
+				return -EINVAL;
+			}
+
+			/* The HW changed the meaning on this bit on gen6 */
+			if (INTEL_INFO(dev)->gen >= 6)
+				instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
+		}
+		break;
+	default:
+		DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode);
+		return -EINVAL;
+	}
+
+	if (ring == &dev_priv->ring[RCS] &&
+	    instp_mode != dev_priv->relative_constants_mode) {
+		ret = intel_ring_begin(params->request, 4);
+		if (ret)
+			return ret;
+
+		intel_ring_emit(ring, MI_NOOP);
+		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+		intel_ring_emit(ring, INSTPM);
+		intel_ring_emit(ring, instp_mask << 16 | instp_mode);
+		intel_ring_advance(ring);
+
+		dev_priv->relative_constants_mode = instp_mode;
+	}
+
+	if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
+		ret = i915_reset_gen7_sol_offsets(dev, params->request);
+		if (ret)
+			return ret;
+	}
+
+	exec_len   = args->batch_len;
+	exec_start = params->batch_obj_vm_offset +
+		     params->args_batch_start_offset;
+
+	ret = ring->dispatch_execbuffer(params->request,
+					exec_start, exec_len,
+					params->dispatch_flags);
+	if (ret)
+		return ret;
+
+	trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags);
+
+	i915_gem_execbuffer_move_to_active(vmas, params->request);
+	i915_gem_execbuffer_retire_commands(params);
+
+	return 0;
+}
+
+/**
+ * Find one BSD ring to dispatch the corresponding BSD command.
+ * The Ring ID is returned.
+ */
+static int gen8_dispatch_bsd_ring(struct drm_device *dev,
+				  struct drm_file *file)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_file_private *file_priv = file->driver_priv;
+
+	/* Check whether the file_priv is using one ring */
+	if (file_priv->bsd_ring)
+		return file_priv->bsd_ring->id;
+	else {
+		/* If no, use the ping-pong mechanism to select one ring */
+		int ring_id;
+
+		mutex_lock(&dev->struct_mutex);
+		if (dev_priv->mm.bsd_ring_dispatch_index == 0) {
+			ring_id = VCS;
+			dev_priv->mm.bsd_ring_dispatch_index = 1;
+		} else {
+			ring_id = VCS2;
+			dev_priv->mm.bsd_ring_dispatch_index = 0;
+		}
+		file_priv->bsd_ring = &dev_priv->ring[ring_id];
+		mutex_unlock(&dev->struct_mutex);
+		return ring_id;
+	}
+}
+
+static struct drm_i915_gem_object *
+eb_get_batch(struct eb_vmas *eb)
+{
+	struct i915_vma *vma = list_entry(eb->vmas.prev, typeof(*vma), exec_list);
+
+	/*
+	 * SNA is doing fancy tricks with compressing batch buffers, which leads
+	 * to negative relocation deltas. Usually that works out ok since the
+	 * relocate address is still positive, except when the batch is placed
+	 * very low in the GTT. Ensure this doesn't happen.
+	 *
+	 * Note that actual hangs have only been observed on gen7, but for
+	 * paranoia do it everywhere.
+	 */
+	vma->exec_entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
+
+	return vma->obj;
+}
+
+static int
+i915_gem_do_execbuffer(struct drm_device *dev, void *data,
+		       struct drm_file *file,
+		       struct drm_i915_gem_execbuffer2 *args,
+		       struct drm_i915_gem_exec_object2 *exec)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct eb_vmas *eb;
+	struct drm_i915_gem_object *batch_obj;
+	struct drm_i915_gem_exec_object2 shadow_exec_entry;
+	struct intel_engine_cs *ring;
+	struct intel_context *ctx;
+	struct i915_address_space *vm;
+	struct i915_execbuffer_params params_master; /* XXX: will be removed later */
+	struct i915_execbuffer_params *params = &params_master;
+	const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
+	u32 dispatch_flags;
+	int ret;
+	bool need_relocs;
+
+	if (!i915_gem_check_execbuffer(args))
+		return -EINVAL;
+
+	ret = validate_exec_list(dev, exec, args->buffer_count);
+	if (ret)
+		return ret;
+
+	dispatch_flags = 0;
+	if (args->flags & I915_EXEC_SECURE) {
+		if (!file->is_master || !capable(CAP_SYS_ADMIN))
+		    return -EPERM;
+
+		dispatch_flags |= I915_DISPATCH_SECURE;
+	}
+	if (args->flags & I915_EXEC_IS_PINNED)
+		dispatch_flags |= I915_DISPATCH_PINNED;
+
+	if ((args->flags & I915_EXEC_RING_MASK) > LAST_USER_RING) {
+		DRM_DEBUG("execbuf with unknown ring: %d\n",
+			  (int)(args->flags & I915_EXEC_RING_MASK));
+		return -EINVAL;
+	}
+
+	if (((args->flags & I915_EXEC_RING_MASK) != I915_EXEC_BSD) &&
+	    ((args->flags & I915_EXEC_BSD_MASK) != 0)) {
+		DRM_DEBUG("execbuf with non bsd ring but with invalid "
+			"bsd dispatch flags: %d\n", (int)(args->flags));
+		return -EINVAL;
+	} 
+
+	if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_DEFAULT)
+		ring = &dev_priv->ring[RCS];
+	else if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_BSD) {
+		if (HAS_BSD2(dev)) {
+			int ring_id;
+
+			switch (args->flags & I915_EXEC_BSD_MASK) {
+			case I915_EXEC_BSD_DEFAULT:
+				ring_id = gen8_dispatch_bsd_ring(dev, file);
+				ring = &dev_priv->ring[ring_id];
+				break;
+			case I915_EXEC_BSD_RING1:
+				ring = &dev_priv->ring[VCS];
+				break;
+			case I915_EXEC_BSD_RING2:
+				ring = &dev_priv->ring[VCS2];
+				break;
+			default:
+				DRM_DEBUG("execbuf with unknown bsd ring: %d\n",
+					  (int)(args->flags & I915_EXEC_BSD_MASK));
+				return -EINVAL;
+			}
+		} else
+			ring = &dev_priv->ring[VCS];
+	} else
+		ring = &dev_priv->ring[(args->flags & I915_EXEC_RING_MASK) - 1];
+
+	if (!intel_ring_initialized(ring)) {
+		DRM_DEBUG("execbuf with invalid ring: %d\n",
+			  (int)(args->flags & I915_EXEC_RING_MASK));
+		return -EINVAL;
+	}
+
+	if (args->buffer_count < 1) {
+		DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
+		return -EINVAL;
+	}
+
+	if (args->flags & I915_EXEC_RESOURCE_STREAMER) {
+		if (!HAS_RESOURCE_STREAMER(dev)) {
+			DRM_DEBUG("RS is only allowed for Haswell, Gen8 and above\n");
+			return -EINVAL;
+		}
+		if (ring->id != RCS) {
+			DRM_DEBUG("RS is not available on %s\n",
+				 ring->name);
+			return -EINVAL;
+		}
+
+		dispatch_flags |= I915_DISPATCH_RS;
+	}
+
+	intel_runtime_pm_get(dev_priv);
+
+	ret = i915_mutex_lock_interruptible(dev);
+	if (ret)
+		goto pre_mutex_err;
+
+	ctx = i915_gem_validate_context(dev, file, ring, ctx_id);
+	if (IS_ERR(ctx)) {
+		mutex_unlock(&dev->struct_mutex);
+		ret = PTR_ERR(ctx);
+		goto pre_mutex_err;
+	}
+
+	i915_gem_context_reference(ctx);
+
+	if (ctx->ppgtt)
+		vm = &ctx->ppgtt->base;
+	else
+		vm = &dev_priv->gtt.base;
+
+	memset(&params_master, 0x00, sizeof(params_master));
+
+	eb = eb_create(args);
+	if (eb == NULL) {
+		i915_gem_context_unreference(ctx);
+		mutex_unlock(&dev->struct_mutex);
+		ret = -ENOMEM;
+		goto pre_mutex_err;
+	}
+
+	/* Look up object handles */
+	ret = eb_lookup_vmas(eb, exec, args, vm, file);
+	if (ret)
+		goto err;
+
+	/* take note of the batch buffer before we might reorder the lists */
+	batch_obj = eb_get_batch(eb);
+
+	/* Move the objects en-masse into the GTT, evicting if necessary. */
+	need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
+	ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, ctx, &need_relocs);
+	if (ret)
+		goto err;
+
+	/* The objects are in their final locations, apply the relocations. */
+	if (need_relocs)
+		ret = i915_gem_execbuffer_relocate(eb);
+	if (ret) {
+		if (ret == -EFAULT) {
+			ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
+								eb, exec, ctx);
+			BUG_ON(!mutex_is_locked(&dev->struct_mutex));
+		}
+		if (ret)
+			goto err;
+	}
+
+	/* Set the pending read domains for the batch buffer to COMMAND */
+	if (batch_obj->base.pending_write_domain) {
+		DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
+		ret = -EINVAL;
+		goto err;
+	}
+
+	params->args_batch_start_offset = args->batch_start_offset;
+	if (i915_needs_cmd_parser(ring) && args->batch_len) {
+		struct drm_i915_gem_object *parsed_batch_obj;
+
+		parsed_batch_obj = i915_gem_execbuffer_parse(ring,
+						      &shadow_exec_entry,
+						      eb,
+						      batch_obj,
+						      args->batch_start_offset,
+						      args->batch_len,
+						      file->is_master);
+		if (IS_ERR(parsed_batch_obj)) {
+			ret = PTR_ERR(parsed_batch_obj);
+			goto err;
+		}
+
+		/*
+		 * parsed_batch_obj == batch_obj means batch not fully parsed:
+		 * Accept, but don't promote to secure.
+		 */
+
+		if (parsed_batch_obj != batch_obj) {
+			/*
+			 * Batch parsed and accepted:
+			 *
+			 * Set the DISPATCH_SECURE bit to remove the NON_SECURE
+			 * bit from MI_BATCH_BUFFER_START commands issued in
+			 * the dispatch_execbuffer implementations. We
+			 * specifically don't want that set on batches the
+			 * command parser has accepted.
+			 */
+			dispatch_flags |= I915_DISPATCH_SECURE;
+			params->args_batch_start_offset = 0;
+			batch_obj = parsed_batch_obj;
+		}
+	}
+
+	batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
+
+	/* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
+	 * batch" bit. Hence we need to pin secure batches into the global gtt.
+	 * hsw should have this fixed, but bdw mucks it up again. */
+	if (dispatch_flags & I915_DISPATCH_SECURE) {
+		/*
+		 * So on first glance it looks freaky that we pin the batch here
+		 * outside of the reservation loop. But:
+		 * - The batch is already pinned into the relevant ppgtt, so we
+		 *   already have the backing storage fully allocated.
+		 * - No other BO uses the global gtt (well contexts, but meh),
+		 *   so we don't really have issues with multiple objects not
+		 *   fitting due to fragmentation.
+		 * So this is actually safe.
+		 */
+		ret = i915_gem_obj_ggtt_pin(batch_obj, 0, 0);
+		if (ret)
+			goto err;
+
+		params->batch_obj_vm_offset = i915_gem_obj_ggtt_offset(batch_obj);
+	} else
+		params->batch_obj_vm_offset = i915_gem_obj_offset(batch_obj, vm);
+
+	/* Allocate a request for this batch buffer nice and early. */
+	ret = i915_gem_request_alloc(ring, ctx, &params->request);
+	if (ret)
+		goto err_batch_unpin;
+
+	ret = i915_gem_request_add_to_client(params->request, file);
+	if (ret)
+		goto err_batch_unpin;
+
+	/*
+	 * Save assorted stuff away to pass through to *_submission().
+	 * NB: This data should be 'persistent' and not local as it will
+	 * kept around beyond the duration of the IOCTL once the GPU
+	 * scheduler arrives.
+	 */
+	params->dev                     = dev;
+	params->file                    = file;
+	params->ring                    = ring;
+	params->dispatch_flags          = dispatch_flags;
+	params->batch_obj               = batch_obj;
+	params->ctx                     = ctx;
+
+	ret = dev_priv->gt.execbuf_submit(params, args, &eb->vmas);
+
+err_batch_unpin:
+	/*
+	 * FIXME: We crucially rely upon the active tracking for the (ppgtt)
+	 * batch vma for correctness. For less ugly and less fragility this
+	 * needs to be adjusted to also track the ggtt batch vma properly as
+	 * active.
+	 */
+	if (dispatch_flags & I915_DISPATCH_SECURE)
+		i915_gem_object_ggtt_unpin(batch_obj);
+
+err:
+	/* the request owns the ref now */
+	i915_gem_context_unreference(ctx);
+	eb_destroy(eb);
+
+	/*
+	 * If the request was created but not successfully submitted then it
+	 * must be freed again. If it was submitted then it is being tracked
+	 * on the active request list and no clean up is required here.
+	 */
+	if (ret && params->request)
+		i915_gem_request_cancel(params->request);
+
+	mutex_unlock(&dev->struct_mutex);
+
+pre_mutex_err:
+	/* intel_gpu_busy should also get a ref, so it will free when the device
+	 * is really idle. */
+	intel_runtime_pm_put(dev_priv);
+	return ret;
+}
+
+/*
+ * Legacy execbuffer just creates an exec2 list from the original exec object
+ * list array and passes it to the real function.
+ */
+int
+i915_gem_execbuffer(struct drm_device *dev, void *data,
+		    struct drm_file *file)
+{
+	struct drm_i915_gem_execbuffer *args = data;
+	struct drm_i915_gem_execbuffer2 exec2;
+	struct drm_i915_gem_exec_object *exec_list = NULL;
+	struct drm_i915_gem_exec_object2 *exec2_list = NULL;
+	int ret, i;
+
+	if (args->buffer_count < 1) {
+		DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
+		return -EINVAL;
+	}
+
+	/* Copy in the exec list from userland */
+	exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
+	exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
+	if (exec_list == NULL || exec2_list == NULL) {
+		DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
+			  args->buffer_count);
+		drm_free_large(exec_list);
+		drm_free_large(exec2_list);
+		return -ENOMEM;
+	}
+	ret = copy_from_user(exec_list,
+			     to_user_ptr(args->buffers_ptr),
+			     sizeof(*exec_list) * args->buffer_count);
+	if (ret != 0) {
+		DRM_DEBUG("copy %d exec entries failed %d\n",
+			  args->buffer_count, ret);
+		drm_free_large(exec_list);
+		drm_free_large(exec2_list);
+		return -EFAULT;
+	}
+
+	for (i = 0; i < args->buffer_count; i++) {
+		exec2_list[i].handle = exec_list[i].handle;
+		exec2_list[i].relocation_count = exec_list[i].relocation_count;
+		exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
+		exec2_list[i].alignment = exec_list[i].alignment;
+		exec2_list[i].offset = exec_list[i].offset;
+		if (INTEL_INFO(dev)->gen < 4)
+			exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
+		else
+			exec2_list[i].flags = 0;
+	}
+
+	exec2.buffers_ptr = args->buffers_ptr;
+	exec2.buffer_count = args->buffer_count;
+	exec2.batch_start_offset = args->batch_start_offset;
+	exec2.batch_len = args->batch_len;
+	exec2.DR1 = args->DR1;
+	exec2.DR4 = args->DR4;
+	exec2.num_cliprects = args->num_cliprects;
+	exec2.cliprects_ptr = args->cliprects_ptr;
+	exec2.flags = I915_EXEC_RENDER;
+	i915_execbuffer2_set_context_id(exec2, 0);
+
+	ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
+	if (!ret) {
+		struct drm_i915_gem_exec_object __user *user_exec_list =
+			to_user_ptr(args->buffers_ptr);
+
+		/* Copy the new buffer offsets back to the user's exec list. */
+		for (i = 0; i < args->buffer_count; i++) {
+			ret = __copy_to_user(&user_exec_list[i].offset,
+					     &exec2_list[i].offset,
+					     sizeof(user_exec_list[i].offset));
+			if (ret) {
+				ret = -EFAULT;
+				DRM_DEBUG("failed to copy %d exec entries "
+					  "back to user (%d)\n",
+					  args->buffer_count, ret);
+				break;
+			}
+		}
+	}
+
+	drm_free_large(exec_list);
+	drm_free_large(exec2_list);
+	return ret;
+}
+
+int
+i915_gem_execbuffer2(struct drm_device *dev, void *data,
+		     struct drm_file *file)
+{
+	struct drm_i915_gem_execbuffer2 *args = data;
+	struct drm_i915_gem_exec_object2 *exec2_list = NULL;
+	int ret;
+
+	if (args->buffer_count < 1 ||
+	    args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
+		DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
+		return -EINVAL;
+	}
+
+	if (args->rsvd2 != 0) {
+		DRM_DEBUG("dirty rvsd2 field\n");
+		return -EINVAL;
+	}
+
+	exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count,
+			     GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
+	if (exec2_list == NULL)
+		exec2_list = drm_malloc_ab(sizeof(*exec2_list),
+					   args->buffer_count);
+	if (exec2_list == NULL) {
+		DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
+			  args->buffer_count);
+		return -ENOMEM;
+	}
+	ret = copy_from_user(exec2_list,
+			     to_user_ptr(args->buffers_ptr),
+			     sizeof(*exec2_list) * args->buffer_count);
+	if (ret != 0) {
+		DRM_DEBUG("copy %d exec entries failed %d\n",
+			  args->buffer_count, ret);
+		drm_free_large(exec2_list);
+		return -EFAULT;
+	}
+
+	ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
+	if (!ret) {
+		/* Copy the new buffer offsets back to the user's exec list. */
+		struct drm_i915_gem_exec_object2 __user *user_exec_list =
+				   to_user_ptr(args->buffers_ptr);
+		int i;
+
+		for (i = 0; i < args->buffer_count; i++) {
+			ret = __copy_to_user(&user_exec_list[i].offset,
+					     &exec2_list[i].offset,
+					     sizeof(user_exec_list[i].offset));
+			if (ret) {
+				ret = -EFAULT;
+				DRM_DEBUG("failed to copy %d exec entries "
+					  "back to user\n",
+					  args->buffer_count);
+				break;
+			}
+		}
+	}
+
+	drm_free_large(exec2_list);
+	return ret;
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_fence.c b/drivers/gpu/drm/i915/i915_gem_fence.c
new file mode 100644
index 0000000..f010391
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_fence.c
@@ -0,0 +1,800 @@
+/*
+ * Copyright © 2008-2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <drm/drmP.h>
+#include <drm/i915_drm.h>
+#include "i915_drv.h"
+
+/**
+ * DOC: fence register handling
+ *
+ * Important to avoid confusions: "fences" in the i915 driver are not execution
+ * fences used to track command completion but hardware detiler objects which
+ * wrap a given range of the global GTT. Each platform has only a fairly limited
+ * set of these objects.
+ *
+ * Fences are used to detile GTT memory mappings. They're also connected to the
+ * hardware frontbuffer render tracking and hence interract with frontbuffer
+ * conmpression. Furthermore on older platforms fences are required for tiled
+ * objects used by the display engine. They can also be used by the render
+ * engine - they're required for blitter commands and are optional for render
+ * commands. But on gen4+ both display (with the exception of fbc) and rendering
+ * have their own tiling state bits and don't need fences.
+ *
+ * Also note that fences only support X and Y tiling and hence can't be used for
+ * the fancier new tiling formats like W, Ys and Yf.
+ *
+ * Finally note that because fences are such a restricted resource they're
+ * dynamically associated with objects. Furthermore fence state is committed to
+ * the hardware lazily to avoid unecessary stalls on gen2/3. Therefore code must
+ * explictly call i915_gem_object_get_fence() to synchronize fencing status
+ * for cpu access. Also note that some code wants an unfenced view, for those
+ * cases the fence can be removed forcefully with i915_gem_object_put_fence().
+ *
+ * Internally these functions will synchronize with userspace access by removing
+ * CPU ptes into GTT mmaps (not the GTT ptes themselves) as needed.
+ */
+
+static void i965_write_fence_reg(struct drm_device *dev, int reg,
+				 struct drm_i915_gem_object *obj)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int fence_reg_lo, fence_reg_hi;
+	int fence_pitch_shift;
+
+	if (INTEL_INFO(dev)->gen >= 6) {
+		fence_reg_lo = FENCE_REG_GEN6_LO(reg);
+		fence_reg_hi = FENCE_REG_GEN6_HI(reg);
+		fence_pitch_shift = GEN6_FENCE_PITCH_SHIFT;
+	} else {
+		fence_reg_lo = FENCE_REG_965_LO(reg);
+		fence_reg_hi = FENCE_REG_965_HI(reg);
+		fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
+	}
+
+	/* To w/a incoherency with non-atomic 64-bit register updates,
+	 * we split the 64-bit update into two 32-bit writes. In order
+	 * for a partial fence not to be evaluated between writes, we
+	 * precede the update with write to turn off the fence register,
+	 * and only enable the fence as the last step.
+	 *
+	 * For extra levels of paranoia, we make sure each step lands
+	 * before applying the next step.
+	 */
+	I915_WRITE(fence_reg_lo, 0);
+	POSTING_READ(fence_reg_lo);
+
+	if (obj) {
+		u32 size = i915_gem_obj_ggtt_size(obj);
+		uint64_t val;
+
+		/* Adjust fence size to match tiled area */
+		if (obj->tiling_mode != I915_TILING_NONE) {
+			uint32_t row_size = obj->stride *
+				(obj->tiling_mode == I915_TILING_Y ? 32 : 8);
+			size = (size / row_size) * row_size;
+		}
+
+		val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
+				 0xfffff000) << 32;
+		val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
+		val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
+		if (obj->tiling_mode == I915_TILING_Y)
+			val |= 1 << I965_FENCE_TILING_Y_SHIFT;
+		val |= I965_FENCE_REG_VALID;
+
+		I915_WRITE(fence_reg_hi, val >> 32);
+		POSTING_READ(fence_reg_hi);
+
+		I915_WRITE(fence_reg_lo, val);
+		POSTING_READ(fence_reg_lo);
+	} else {
+		I915_WRITE(fence_reg_hi, 0);
+		POSTING_READ(fence_reg_hi);
+	}
+}
+
+static void i915_write_fence_reg(struct drm_device *dev, int reg,
+				 struct drm_i915_gem_object *obj)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 val;
+
+	if (obj) {
+		u32 size = i915_gem_obj_ggtt_size(obj);
+		int pitch_val;
+		int tile_width;
+
+		WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) ||
+		     (size & -size) != size ||
+		     (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
+		     "object 0x%08llx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
+		     i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size);
+
+		if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
+			tile_width = 128;
+		else
+			tile_width = 512;
+
+		/* Note: pitch better be a power of two tile widths */
+		pitch_val = obj->stride / tile_width;
+		pitch_val = ffs(pitch_val) - 1;
+
+		val = i915_gem_obj_ggtt_offset(obj);
+		if (obj->tiling_mode == I915_TILING_Y)
+			val |= 1 << I830_FENCE_TILING_Y_SHIFT;
+		val |= I915_FENCE_SIZE_BITS(size);
+		val |= pitch_val << I830_FENCE_PITCH_SHIFT;
+		val |= I830_FENCE_REG_VALID;
+	} else
+		val = 0;
+
+	I915_WRITE(FENCE_REG(reg), val);
+	POSTING_READ(FENCE_REG(reg));
+}
+
+static void i830_write_fence_reg(struct drm_device *dev, int reg,
+				struct drm_i915_gem_object *obj)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t val;
+
+	if (obj) {
+		u32 size = i915_gem_obj_ggtt_size(obj);
+		uint32_t pitch_val;
+
+		WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) ||
+		     (size & -size) != size ||
+		     (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
+		     "object 0x%08llx not 512K or pot-size 0x%08x aligned\n",
+		     i915_gem_obj_ggtt_offset(obj), size);
+
+		pitch_val = obj->stride / 128;
+		pitch_val = ffs(pitch_val) - 1;
+
+		val = i915_gem_obj_ggtt_offset(obj);
+		if (obj->tiling_mode == I915_TILING_Y)
+			val |= 1 << I830_FENCE_TILING_Y_SHIFT;
+		val |= I830_FENCE_SIZE_BITS(size);
+		val |= pitch_val << I830_FENCE_PITCH_SHIFT;
+		val |= I830_FENCE_REG_VALID;
+	} else
+		val = 0;
+
+	I915_WRITE(FENCE_REG(reg), val);
+	POSTING_READ(FENCE_REG(reg));
+}
+
+inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj)
+{
+	return obj && obj->base.read_domains & I915_GEM_DOMAIN_GTT;
+}
+
+static void i915_gem_write_fence(struct drm_device *dev, int reg,
+				 struct drm_i915_gem_object *obj)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	/* Ensure that all CPU reads are completed before installing a fence
+	 * and all writes before removing the fence.
+	 */
+	if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
+		mb();
+
+	WARN(obj && (!obj->stride || !obj->tiling_mode),
+	     "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
+	     obj->stride, obj->tiling_mode);
+
+	if (IS_GEN2(dev))
+		i830_write_fence_reg(dev, reg, obj);
+	else if (IS_GEN3(dev))
+		i915_write_fence_reg(dev, reg, obj);
+	else if (INTEL_INFO(dev)->gen >= 4)
+		i965_write_fence_reg(dev, reg, obj);
+
+	/* And similarly be paranoid that no direct access to this region
+	 * is reordered to before the fence is installed.
+	 */
+	if (i915_gem_object_needs_mb(obj))
+		mb();
+}
+
+static inline int fence_number(struct drm_i915_private *dev_priv,
+			       struct drm_i915_fence_reg *fence)
+{
+	return fence - dev_priv->fence_regs;
+}
+
+static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
+					 struct drm_i915_fence_reg *fence,
+					 bool enable)
+{
+	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+	int reg = fence_number(dev_priv, fence);
+
+	i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
+
+	if (enable) {
+		obj->fence_reg = reg;
+		fence->obj = obj;
+		list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
+	} else {
+		obj->fence_reg = I915_FENCE_REG_NONE;
+		fence->obj = NULL;
+		list_del_init(&fence->lru_list);
+	}
+	obj->fence_dirty = false;
+}
+
+static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
+{
+	if (obj->tiling_mode)
+		i915_gem_release_mmap(obj);
+
+	/* As we do not have an associated fence register, we will force
+	 * a tiling change if we ever need to acquire one.
+	 */
+	obj->fence_dirty = false;
+	obj->fence_reg = I915_FENCE_REG_NONE;
+}
+
+static int
+i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
+{
+	if (obj->last_fenced_req) {
+		int ret = i915_wait_request(obj->last_fenced_req);
+		if (ret)
+			return ret;
+
+		i915_gem_request_assign(&obj->last_fenced_req, NULL);
+	}
+
+	return 0;
+}
+
+/**
+ * i915_gem_object_put_fence - force-remove fence for an object
+ * @obj: object to map through a fence reg
+ *
+ * This function force-removes any fence from the given object, which is useful
+ * if the kernel wants to do untiled GTT access.
+ *
+ * Returns:
+ *
+ * 0 on success, negative error code on failure.
+ */
+int
+i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
+{
+	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+	struct drm_i915_fence_reg *fence;
+	int ret;
+
+	ret = i915_gem_object_wait_fence(obj);
+	if (ret)
+		return ret;
+
+	if (obj->fence_reg == I915_FENCE_REG_NONE)
+		return 0;
+
+	fence = &dev_priv->fence_regs[obj->fence_reg];
+
+	if (WARN_ON(fence->pin_count))
+		return -EBUSY;
+
+	i915_gem_object_fence_lost(obj);
+	i915_gem_object_update_fence(obj, fence, false);
+
+	return 0;
+}
+
+static struct drm_i915_fence_reg *
+i915_find_fence_reg(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_fence_reg *reg, *avail;
+	int i;
+
+	/* First try to find a free reg */
+	avail = NULL;
+	for (i = 0; i < dev_priv->num_fence_regs; i++) {
+		reg = &dev_priv->fence_regs[i];
+		if (!reg->obj)
+			return reg;
+
+		if (!reg->pin_count)
+			avail = reg;
+	}
+
+	if (avail == NULL)
+		goto deadlock;
+
+	/* None available, try to steal one or wait for a user to finish */
+	list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
+		if (reg->pin_count)
+			continue;
+
+		return reg;
+	}
+
+deadlock:
+	/* Wait for completion of pending flips which consume fences */
+	if (intel_has_pending_fb_unpin(dev))
+		return ERR_PTR(-EAGAIN);
+
+	return ERR_PTR(-EDEADLK);
+}
+
+/**
+ * i915_gem_object_get_fence - set up fencing for an object
+ * @obj: object to map through a fence reg
+ *
+ * When mapping objects through the GTT, userspace wants to be able to write
+ * to them without having to worry about swizzling if the object is tiled.
+ * This function walks the fence regs looking for a free one for @obj,
+ * stealing one if it can't find any.
+ *
+ * It then sets up the reg based on the object's properties: address, pitch
+ * and tiling format.
+ *
+ * For an untiled surface, this removes any existing fence.
+ *
+ * Returns:
+ *
+ * 0 on success, negative error code on failure.
+ */
+int
+i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
+{
+	struct drm_device *dev = obj->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	bool enable = obj->tiling_mode != I915_TILING_NONE;
+	struct drm_i915_fence_reg *reg;
+	int ret;
+
+	/* Have we updated the tiling parameters upon the object and so
+	 * will need to serialise the write to the associated fence register?
+	 */
+	if (obj->fence_dirty) {
+		ret = i915_gem_object_wait_fence(obj);
+		if (ret)
+			return ret;
+	}
+
+	/* Just update our place in the LRU if our fence is getting reused. */
+	if (obj->fence_reg != I915_FENCE_REG_NONE) {
+		reg = &dev_priv->fence_regs[obj->fence_reg];
+		if (!obj->fence_dirty) {
+			list_move_tail(&reg->lru_list,
+				       &dev_priv->mm.fence_list);
+			return 0;
+		}
+	} else if (enable) {
+		if (WARN_ON(!obj->map_and_fenceable))
+			return -EINVAL;
+
+		reg = i915_find_fence_reg(dev);
+		if (IS_ERR(reg))
+			return PTR_ERR(reg);
+
+		if (reg->obj) {
+			struct drm_i915_gem_object *old = reg->obj;
+
+			ret = i915_gem_object_wait_fence(old);
+			if (ret)
+				return ret;
+
+			i915_gem_object_fence_lost(old);
+		}
+	} else
+		return 0;
+
+	i915_gem_object_update_fence(obj, reg, enable);
+
+	return 0;
+}
+
+/**
+ * i915_gem_object_pin_fence - pin fencing state
+ * @obj: object to pin fencing for
+ *
+ * This pins the fencing state (whether tiled or untiled) to make sure the
+ * object is ready to be used as a scanout target. Fencing status must be
+ * synchronize first by calling i915_gem_object_get_fence():
+ *
+ * The resulting fence pin reference must be released again with
+ * i915_gem_object_unpin_fence().
+ *
+ * Returns:
+ *
+ * True if the object has a fence, false otherwise.
+ */
+bool
+i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
+{
+	if (obj->fence_reg != I915_FENCE_REG_NONE) {
+		struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+		struct i915_vma *ggtt_vma = i915_gem_obj_to_ggtt(obj);
+
+		WARN_ON(!ggtt_vma ||
+			dev_priv->fence_regs[obj->fence_reg].pin_count >
+			ggtt_vma->pin_count);
+		dev_priv->fence_regs[obj->fence_reg].pin_count++;
+		return true;
+	} else
+		return false;
+}
+
+/**
+ * i915_gem_object_unpin_fence - unpin fencing state
+ * @obj: object to unpin fencing for
+ *
+ * This releases the fence pin reference acquired through
+ * i915_gem_object_pin_fence. It will handle both objects with and without an
+ * attached fence correctly, callers do not need to distinguish this.
+ */
+void
+i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
+{
+	if (obj->fence_reg != I915_FENCE_REG_NONE) {
+		struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+		WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0);
+		dev_priv->fence_regs[obj->fence_reg].pin_count--;
+	}
+}
+
+/**
+ * i915_gem_restore_fences - restore fence state
+ * @dev: DRM device
+ *
+ * Restore the hw fence state to match the software tracking again, to be called
+ * after a gpu reset and on resume.
+ */
+void i915_gem_restore_fences(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int i;
+
+	for (i = 0; i < dev_priv->num_fence_regs; i++) {
+		struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
+
+		/*
+		 * Commit delayed tiling changes if we have an object still
+		 * attached to the fence, otherwise just clear the fence.
+		 */
+		if (reg->obj) {
+			i915_gem_object_update_fence(reg->obj, reg,
+						     reg->obj->tiling_mode);
+		} else {
+			i915_gem_write_fence(dev, i, NULL);
+		}
+	}
+}
+
+/**
+ * DOC: tiling swizzling details
+ *
+ * The idea behind tiling is to increase cache hit rates by rearranging
+ * pixel data so that a group of pixel accesses are in the same cacheline.
+ * Performance improvement from doing this on the back/depth buffer are on
+ * the order of 30%.
+ *
+ * Intel architectures make this somewhat more complicated, though, by
+ * adjustments made to addressing of data when the memory is in interleaved
+ * mode (matched pairs of DIMMS) to improve memory bandwidth.
+ * For interleaved memory, the CPU sends every sequential 64 bytes
+ * to an alternate memory channel so it can get the bandwidth from both.
+ *
+ * The GPU also rearranges its accesses for increased bandwidth to interleaved
+ * memory, and it matches what the CPU does for non-tiled.  However, when tiled
+ * it does it a little differently, since one walks addresses not just in the
+ * X direction but also Y.  So, along with alternating channels when bit
+ * 6 of the address flips, it also alternates when other bits flip --  Bits 9
+ * (every 512 bytes, an X tile scanline) and 10 (every two X tile scanlines)
+ * are common to both the 915 and 965-class hardware.
+ *
+ * The CPU also sometimes XORs in higher bits as well, to improve
+ * bandwidth doing strided access like we do so frequently in graphics.  This
+ * is called "Channel XOR Randomization" in the MCH documentation.  The result
+ * is that the CPU is XORing in either bit 11 or bit 17 to bit 6 of its address
+ * decode.
+ *
+ * All of this bit 6 XORing has an effect on our memory management,
+ * as we need to make sure that the 3d driver can correctly address object
+ * contents.
+ *
+ * If we don't have interleaved memory, all tiling is safe and no swizzling is
+ * required.
+ *
+ * When bit 17 is XORed in, we simply refuse to tile at all.  Bit
+ * 17 is not just a page offset, so as we page an objet out and back in,
+ * individual pages in it will have different bit 17 addresses, resulting in
+ * each 64 bytes being swapped with its neighbor!
+ *
+ * Otherwise, if interleaved, we have to tell the 3d driver what the address
+ * swizzling it needs to do is, since it's writing with the CPU to the pages
+ * (bit 6 and potentially bit 11 XORed in), and the GPU is reading from the
+ * pages (bit 6, 9, and 10 XORed in), resulting in a cumulative bit swizzling
+ * required by the CPU of XORing in bit 6, 9, 10, and potentially 11, in order
+ * to match what the GPU expects.
+ */
+
+/**
+ * i915_gem_detect_bit_6_swizzle - detect bit 6 swizzling pattern
+ * @dev: DRM device
+ *
+ * Detects bit 6 swizzling of address lookup between IGD access and CPU
+ * access through main memory.
+ */
+void
+i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
+	uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
+
+	if (INTEL_INFO(dev)->gen >= 8 || IS_VALLEYVIEW(dev)) {
+		/*
+		 * On BDW+, swizzling is not used. We leave the CPU memory
+		 * controller in charge of optimizing memory accesses without
+		 * the extra address manipulation GPU side.
+		 *
+		 * VLV and CHV don't have GPU swizzling.
+		 */
+		swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+		swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+	} else if (INTEL_INFO(dev)->gen >= 6) {
+		if (dev_priv->preserve_bios_swizzle) {
+			if (I915_READ(DISP_ARB_CTL) &
+			    DISP_TILE_SURFACE_SWIZZLING) {
+				swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+				swizzle_y = I915_BIT_6_SWIZZLE_9;
+			} else {
+				swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+				swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+			}
+		} else {
+			uint32_t dimm_c0, dimm_c1;
+			dimm_c0 = I915_READ(MAD_DIMM_C0);
+			dimm_c1 = I915_READ(MAD_DIMM_C1);
+			dimm_c0 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
+			dimm_c1 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
+			/* Enable swizzling when the channels are populated
+			 * with identically sized dimms. We don't need to check
+			 * the 3rd channel because no cpu with gpu attached
+			 * ships in that configuration. Also, swizzling only
+			 * makes sense for 2 channels anyway. */
+			if (dimm_c0 == dimm_c1) {
+				swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+				swizzle_y = I915_BIT_6_SWIZZLE_9;
+			} else {
+				swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+				swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+			}
+		}
+	} else if (IS_GEN5(dev)) {
+		/* On Ironlake whatever DRAM config, GPU always do
+		 * same swizzling setup.
+		 */
+		swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+		swizzle_y = I915_BIT_6_SWIZZLE_9;
+	} else if (IS_GEN2(dev)) {
+		/* As far as we know, the 865 doesn't have these bit 6
+		 * swizzling issues.
+		 */
+		swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+		swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+	} else if (IS_MOBILE(dev) || (IS_GEN3(dev) && !IS_G33(dev))) {
+		uint32_t dcc;
+
+		/* On 9xx chipsets, channel interleave by the CPU is
+		 * determined by DCC.  For single-channel, neither the CPU
+		 * nor the GPU do swizzling.  For dual channel interleaved,
+		 * the GPU's interleave is bit 9 and 10 for X tiled, and bit
+		 * 9 for Y tiled.  The CPU's interleave is independent, and
+		 * can be based on either bit 11 (haven't seen this yet) or
+		 * bit 17 (common).
+		 */
+		dcc = I915_READ(DCC);
+		switch (dcc & DCC_ADDRESSING_MODE_MASK) {
+		case DCC_ADDRESSING_MODE_SINGLE_CHANNEL:
+		case DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC:
+			swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+			swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+			break;
+		case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED:
+			if (dcc & DCC_CHANNEL_XOR_DISABLE) {
+				/* This is the base swizzling by the GPU for
+				 * tiled buffers.
+				 */
+				swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+				swizzle_y = I915_BIT_6_SWIZZLE_9;
+			} else if ((dcc & DCC_CHANNEL_XOR_BIT_17) == 0) {
+				/* Bit 11 swizzling by the CPU in addition. */
+				swizzle_x = I915_BIT_6_SWIZZLE_9_10_11;
+				swizzle_y = I915_BIT_6_SWIZZLE_9_11;
+			} else {
+				/* Bit 17 swizzling by the CPU in addition. */
+				swizzle_x = I915_BIT_6_SWIZZLE_9_10_17;
+				swizzle_y = I915_BIT_6_SWIZZLE_9_17;
+			}
+			break;
+		}
+
+		/* check for L-shaped memory aka modified enhanced addressing */
+		if (IS_GEN4(dev) &&
+		    !(I915_READ(DCC2) & DCC2_MODIFIED_ENHANCED_DISABLE)) {
+			swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
+			swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
+		}
+
+		if (dcc == 0xffffffff) {
+			DRM_ERROR("Couldn't read from MCHBAR.  "
+				  "Disabling tiling.\n");
+			swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
+			swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
+		}
+	} else {
+		/* The 965, G33, and newer, have a very flexible memory
+		 * configuration.  It will enable dual-channel mode
+		 * (interleaving) on as much memory as it can, and the GPU
+		 * will additionally sometimes enable different bit 6
+		 * swizzling for tiled objects from the CPU.
+		 *
+		 * Here's what I found on the G965:
+		 *    slot fill         memory size  swizzling
+		 * 0A   0B   1A   1B    1-ch   2-ch
+		 * 512  0    0    0     512    0     O
+		 * 512  0    512  0     16     1008  X
+		 * 512  0    0    512   16     1008  X
+		 * 0    512  0    512   16     1008  X
+		 * 1024 1024 1024 0     2048   1024  O
+		 *
+		 * We could probably detect this based on either the DRB
+		 * matching, which was the case for the swizzling required in
+		 * the table above, or from the 1-ch value being less than
+		 * the minimum size of a rank.
+		 *
+		 * Reports indicate that the swizzling actually
+		 * varies depending upon page placement inside the
+		 * channels, i.e. we see swizzled pages where the
+		 * banks of memory are paired and unswizzled on the
+		 * uneven portion, so leave that as unknown.
+		 */
+		if (I915_READ16(C0DRB3) == I915_READ16(C1DRB3)) {
+			swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+			swizzle_y = I915_BIT_6_SWIZZLE_9;
+		}
+	}
+
+	if (swizzle_x == I915_BIT_6_SWIZZLE_UNKNOWN ||
+	    swizzle_y == I915_BIT_6_SWIZZLE_UNKNOWN) {
+		/* Userspace likes to explode if it sees unknown swizzling,
+		 * so lie. We will finish the lie when reporting through
+		 * the get-tiling-ioctl by reporting the physical swizzle
+		 * mode as unknown instead.
+		 *
+		 * As we don't strictly know what the swizzling is, it may be
+		 * bit17 dependent, and so we need to also prevent the pages
+		 * from being moved.
+		 */
+		dev_priv->quirks |= QUIRK_PIN_SWIZZLED_PAGES;
+		swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+		swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+	}
+
+	dev_priv->mm.bit_6_swizzle_x = swizzle_x;
+	dev_priv->mm.bit_6_swizzle_y = swizzle_y;
+}
+
+/*
+ * Swap every 64 bytes of this page around, to account for it having a new
+ * bit 17 of its physical address and therefore being interpreted differently
+ * by the GPU.
+ */
+static void
+i915_gem_swizzle_page(struct page *page)
+{
+	char temp[64];
+	char *vaddr;
+	int i;
+
+	vaddr = kmap(page);
+
+	for (i = 0; i < PAGE_SIZE; i += 128) {
+		memcpy(temp, &vaddr[i], 64);
+		memcpy(&vaddr[i], &vaddr[i + 64], 64);
+		memcpy(&vaddr[i + 64], temp, 64);
+	}
+
+	kunmap(page);
+}
+
+/**
+ * i915_gem_object_do_bit_17_swizzle - fixup bit 17 swizzling
+ * @obj: i915 GEM buffer object
+ *
+ * This function fixes up the swizzling in case any page frame number for this
+ * object has changed in bit 17 since that state has been saved with
+ * i915_gem_object_save_bit_17_swizzle().
+ *
+ * This is called when pinning backing storage again, since the kernel is free
+ * to move unpinned backing storage around (either by directly moving pages or
+ * by swapping them out and back in again).
+ */
+void
+i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
+{
+	struct sg_page_iter sg_iter;
+	int i;
+
+	if (obj->bit_17 == NULL)
+		return;
+
+	i = 0;
+	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
+		struct page *page = sg_page_iter_page(&sg_iter);
+		char new_bit_17 = page_to_phys(page) >> 17;
+		if ((new_bit_17 & 0x1) !=
+		    (test_bit(i, obj->bit_17) != 0)) {
+			i915_gem_swizzle_page(page);
+			set_page_dirty(page);
+		}
+		i++;
+	}
+}
+
+/**
+ * i915_gem_object_save_bit_17_swizzle - save bit 17 swizzling
+ * @obj: i915 GEM buffer object
+ *
+ * This function saves the bit 17 of each page frame number so that swizzling
+ * can be fixed up later on with i915_gem_object_do_bit_17_swizzle(). This must
+ * be called before the backing storage can be unpinned.
+ */
+void
+i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
+{
+	struct sg_page_iter sg_iter;
+	int page_count = obj->base.size >> PAGE_SHIFT;
+	int i;
+
+	if (obj->bit_17 == NULL) {
+		obj->bit_17 = kcalloc(BITS_TO_LONGS(page_count),
+				      sizeof(long), GFP_KERNEL);
+		if (obj->bit_17 == NULL) {
+			DRM_ERROR("Failed to allocate memory for bit 17 "
+				  "record\n");
+			return;
+		}
+	}
+
+	i = 0;
+	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
+		if (page_to_phys(sg_page_iter_page(&sg_iter)) & (1 << 17))
+			__set_bit(i, obj->bit_17);
+		else
+			__clear_bit(i, obj->bit_17);
+		i++;
+	}
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
new file mode 100644
index 0000000..b37fe0d
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -0,0 +1,3546 @@
+/*
+ * Copyright © 2010 Daniel Vetter
+ * Copyright © 2011-2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/seq_file.h>
+#include <drm/drmP.h>
+#include <drm/i915_drm.h>
+#include "i915_drv.h"
+#include "i915_vgpu.h"
+#include "i915_trace.h"
+#include "intel_drv.h"
+
+/**
+ * DOC: Global GTT views
+ *
+ * Background and previous state
+ *
+ * Historically objects could exists (be bound) in global GTT space only as
+ * singular instances with a view representing all of the object's backing pages
+ * in a linear fashion. This view will be called a normal view.
+ *
+ * To support multiple views of the same object, where the number of mapped
+ * pages is not equal to the backing store, or where the layout of the pages
+ * is not linear, concept of a GGTT view was added.
+ *
+ * One example of an alternative view is a stereo display driven by a single
+ * image. In this case we would have a framebuffer looking like this
+ * (2x2 pages):
+ *
+ *    12
+ *    34
+ *
+ * Above would represent a normal GGTT view as normally mapped for GPU or CPU
+ * rendering. In contrast, fed to the display engine would be an alternative
+ * view which could look something like this:
+ *
+ *   1212
+ *   3434
+ *
+ * In this example both the size and layout of pages in the alternative view is
+ * different from the normal view.
+ *
+ * Implementation and usage
+ *
+ * GGTT views are implemented using VMAs and are distinguished via enum
+ * i915_ggtt_view_type and struct i915_ggtt_view.
+ *
+ * A new flavour of core GEM functions which work with GGTT bound objects were
+ * added with the _ggtt_ infix, and sometimes with _view postfix to avoid
+ * renaming  in large amounts of code. They take the struct i915_ggtt_view
+ * parameter encapsulating all metadata required to implement a view.
+ *
+ * As a helper for callers which are only interested in the normal view,
+ * globally const i915_ggtt_view_normal singleton instance exists. All old core
+ * GEM API functions, the ones not taking the view parameter, are operating on,
+ * or with the normal GGTT view.
+ *
+ * Code wanting to add or use a new GGTT view needs to:
+ *
+ * 1. Add a new enum with a suitable name.
+ * 2. Extend the metadata in the i915_ggtt_view structure if required.
+ * 3. Add support to i915_get_vma_pages().
+ *
+ * New views are required to build a scatter-gather table from within the
+ * i915_get_vma_pages function. This table is stored in the vma.ggtt_view and
+ * exists for the lifetime of an VMA.
+ *
+ * Core API is designed to have copy semantics which means that passed in
+ * struct i915_ggtt_view does not need to be persistent (left around after
+ * calling the core API functions).
+ *
+ */
+
+static int
+i915_get_ggtt_vma_pages(struct i915_vma *vma);
+
+const struct i915_ggtt_view i915_ggtt_view_normal;
+const struct i915_ggtt_view i915_ggtt_view_rotated = {
+        .type = I915_GGTT_VIEW_ROTATED
+};
+
+static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
+{
+	bool has_aliasing_ppgtt;
+	bool has_full_ppgtt;
+
+	has_aliasing_ppgtt = INTEL_INFO(dev)->gen >= 6;
+	has_full_ppgtt = INTEL_INFO(dev)->gen >= 7;
+
+	if (intel_vgpu_active(dev))
+		has_full_ppgtt = false; /* emulation is too hard */
+
+	/*
+	 * We don't allow disabling PPGTT for gen9+ as it's a requirement for
+	 * execlists, the sole mechanism available to submit work.
+	 */
+	if (INTEL_INFO(dev)->gen < 9 &&
+	    (enable_ppgtt == 0 || !has_aliasing_ppgtt))
+		return 0;
+
+	if (enable_ppgtt == 1)
+		return 1;
+
+	if (enable_ppgtt == 2 && has_full_ppgtt)
+		return 2;
+
+#ifdef CONFIG_INTEL_IOMMU
+	/* Disable ppgtt on SNB if VT-d is on. */
+	if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) {
+		DRM_INFO("Disabling PPGTT because VT-d is on\n");
+		return 0;
+	}
+#endif
+
+	/* Early VLV doesn't have this */
+	if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) &&
+	    dev->pdev->revision < 0xb) {
+		DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n");
+		return 0;
+	}
+
+	if (INTEL_INFO(dev)->gen >= 8 && i915.enable_execlists)
+		return 2;
+	else
+		return has_aliasing_ppgtt ? 1 : 0;
+}
+
+static int ppgtt_bind_vma(struct i915_vma *vma,
+			  enum i915_cache_level cache_level,
+			  u32 unused)
+{
+	u32 pte_flags = 0;
+
+	/* Currently applicable only to VLV */
+	if (vma->obj->gt_ro)
+		pte_flags |= PTE_READ_ONLY;
+
+	vma->vm->insert_entries(vma->vm, vma->obj->pages, vma->node.start,
+				cache_level, pte_flags);
+
+	return 0;
+}
+
+static void ppgtt_unbind_vma(struct i915_vma *vma)
+{
+	vma->vm->clear_range(vma->vm,
+			     vma->node.start,
+			     vma->obj->base.size,
+			     true);
+}
+
+static gen8_pte_t gen8_pte_encode(dma_addr_t addr,
+				  enum i915_cache_level level,
+				  bool valid)
+{
+	gen8_pte_t pte = valid ? _PAGE_PRESENT | _PAGE_RW : 0;
+	pte |= addr;
+
+	switch (level) {
+	case I915_CACHE_NONE:
+		pte |= PPAT_UNCACHED_INDEX;
+		break;
+	case I915_CACHE_WT:
+		pte |= PPAT_DISPLAY_ELLC_INDEX;
+		break;
+	default:
+		pte |= PPAT_CACHED_INDEX;
+		break;
+	}
+
+	return pte;
+}
+
+static gen8_pde_t gen8_pde_encode(const dma_addr_t addr,
+				  const enum i915_cache_level level)
+{
+	gen8_pde_t pde = _PAGE_PRESENT | _PAGE_RW;
+	pde |= addr;
+	if (level != I915_CACHE_NONE)
+		pde |= PPAT_CACHED_PDE_INDEX;
+	else
+		pde |= PPAT_UNCACHED_INDEX;
+	return pde;
+}
+
+#define gen8_pdpe_encode gen8_pde_encode
+#define gen8_pml4e_encode gen8_pde_encode
+
+static gen6_pte_t snb_pte_encode(dma_addr_t addr,
+				 enum i915_cache_level level,
+				 bool valid, u32 unused)
+{
+	gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
+	pte |= GEN6_PTE_ADDR_ENCODE(addr);
+
+	switch (level) {
+	case I915_CACHE_L3_LLC:
+	case I915_CACHE_LLC:
+		pte |= GEN6_PTE_CACHE_LLC;
+		break;
+	case I915_CACHE_NONE:
+		pte |= GEN6_PTE_UNCACHED;
+		break;
+	default:
+		MISSING_CASE(level);
+	}
+
+	return pte;
+}
+
+static gen6_pte_t ivb_pte_encode(dma_addr_t addr,
+				 enum i915_cache_level level,
+				 bool valid, u32 unused)
+{
+	gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
+	pte |= GEN6_PTE_ADDR_ENCODE(addr);
+
+	switch (level) {
+	case I915_CACHE_L3_LLC:
+		pte |= GEN7_PTE_CACHE_L3_LLC;
+		break;
+	case I915_CACHE_LLC:
+		pte |= GEN6_PTE_CACHE_LLC;
+		break;
+	case I915_CACHE_NONE:
+		pte |= GEN6_PTE_UNCACHED;
+		break;
+	default:
+		MISSING_CASE(level);
+	}
+
+	return pte;
+}
+
+static gen6_pte_t byt_pte_encode(dma_addr_t addr,
+				 enum i915_cache_level level,
+				 bool valid, u32 flags)
+{
+	gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
+	pte |= GEN6_PTE_ADDR_ENCODE(addr);
+
+	if (!(flags & PTE_READ_ONLY))
+		pte |= BYT_PTE_WRITEABLE;
+
+	if (level != I915_CACHE_NONE)
+		pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;
+
+	return pte;
+}
+
+static gen6_pte_t hsw_pte_encode(dma_addr_t addr,
+				 enum i915_cache_level level,
+				 bool valid, u32 unused)
+{
+	gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
+	pte |= HSW_PTE_ADDR_ENCODE(addr);
+
+	if (level != I915_CACHE_NONE)
+		pte |= HSW_WB_LLC_AGE3;
+
+	return pte;
+}
+
+static gen6_pte_t iris_pte_encode(dma_addr_t addr,
+				  enum i915_cache_level level,
+				  bool valid, u32 unused)
+{
+	gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
+	pte |= HSW_PTE_ADDR_ENCODE(addr);
+
+	switch (level) {
+	case I915_CACHE_NONE:
+		break;
+	case I915_CACHE_WT:
+		pte |= HSW_WT_ELLC_LLC_AGE3;
+		break;
+	default:
+		pte |= HSW_WB_ELLC_LLC_AGE3;
+		break;
+	}
+
+	return pte;
+}
+
+static int __setup_page_dma(struct drm_device *dev,
+			    struct i915_page_dma *p, gfp_t flags)
+{
+	struct device *device = &dev->pdev->dev;
+
+	p->page = alloc_page(flags);
+	if (!p->page)
+		return -ENOMEM;
+
+	p->daddr = dma_map_page(device,
+				p->page, 0, 4096, PCI_DMA_BIDIRECTIONAL);
+
+	if (dma_mapping_error(device, p->daddr)) {
+		__free_page(p->page);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int setup_page_dma(struct drm_device *dev, struct i915_page_dma *p)
+{
+	return __setup_page_dma(dev, p, GFP_KERNEL);
+}
+
+static void cleanup_page_dma(struct drm_device *dev, struct i915_page_dma *p)
+{
+	if (WARN_ON(!p->page))
+		return;
+
+	dma_unmap_page(&dev->pdev->dev, p->daddr, 4096, PCI_DMA_BIDIRECTIONAL);
+	__free_page(p->page);
+	memset(p, 0, sizeof(*p));
+}
+
+static void *kmap_page_dma(struct i915_page_dma *p)
+{
+	return kmap_atomic(p->page);
+}
+
+/* We use the flushing unmap only with ppgtt structures:
+ * page directories, page tables and scratch pages.
+ */
+static void kunmap_page_dma(struct drm_device *dev, void *vaddr)
+{
+	/* There are only few exceptions for gen >=6. chv and bxt.
+	 * And we are not sure about the latter so play safe for now.
+	 */
+	if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev))
+		drm_clflush_virt_range(vaddr, PAGE_SIZE);
+
+	kunmap_atomic(vaddr);
+}
+
+#define kmap_px(px) kmap_page_dma(px_base(px))
+#define kunmap_px(ppgtt, vaddr) kunmap_page_dma((ppgtt)->base.dev, (vaddr))
+
+#define setup_px(dev, px) setup_page_dma((dev), px_base(px))
+#define cleanup_px(dev, px) cleanup_page_dma((dev), px_base(px))
+#define fill_px(dev, px, v) fill_page_dma((dev), px_base(px), (v))
+#define fill32_px(dev, px, v) fill_page_dma_32((dev), px_base(px), (v))
+
+static void fill_page_dma(struct drm_device *dev, struct i915_page_dma *p,
+			  const uint64_t val)
+{
+	int i;
+	uint64_t * const vaddr = kmap_page_dma(p);
+
+	for (i = 0; i < 512; i++)
+		vaddr[i] = val;
+
+	kunmap_page_dma(dev, vaddr);
+}
+
+static void fill_page_dma_32(struct drm_device *dev, struct i915_page_dma *p,
+			     const uint32_t val32)
+{
+	uint64_t v = val32;
+
+	v = v << 32 | val32;
+
+	fill_page_dma(dev, p, v);
+}
+
+static struct i915_page_scratch *alloc_scratch_page(struct drm_device *dev)
+{
+	struct i915_page_scratch *sp;
+	int ret;
+
+	sp = kzalloc(sizeof(*sp), GFP_KERNEL);
+	if (sp == NULL)
+		return ERR_PTR(-ENOMEM);
+
+	ret = __setup_page_dma(dev, px_base(sp), GFP_DMA32 | __GFP_ZERO);
+	if (ret) {
+		kfree(sp);
+		return ERR_PTR(ret);
+	}
+
+	set_pages_uc(px_page(sp), 1);
+
+	return sp;
+}
+
+static void free_scratch_page(struct drm_device *dev,
+			      struct i915_page_scratch *sp)
+{
+	set_pages_wb(px_page(sp), 1);
+
+	cleanup_px(dev, sp);
+	kfree(sp);
+}
+
+static struct i915_page_table *alloc_pt(struct drm_device *dev)
+{
+	struct i915_page_table *pt;
+	const size_t count = INTEL_INFO(dev)->gen >= 8 ?
+		GEN8_PTES : GEN6_PTES;
+	int ret = -ENOMEM;
+
+	pt = kzalloc(sizeof(*pt), GFP_KERNEL);
+	if (!pt)
+		return ERR_PTR(-ENOMEM);
+
+	pt->used_ptes = kcalloc(BITS_TO_LONGS(count), sizeof(*pt->used_ptes),
+				GFP_KERNEL);
+
+	if (!pt->used_ptes)
+		goto fail_bitmap;
+
+	ret = setup_px(dev, pt);
+	if (ret)
+		goto fail_page_m;
+
+	return pt;
+
+fail_page_m:
+	kfree(pt->used_ptes);
+fail_bitmap:
+	kfree(pt);
+
+	return ERR_PTR(ret);
+}
+
+static void free_pt(struct drm_device *dev, struct i915_page_table *pt)
+{
+	cleanup_px(dev, pt);
+	kfree(pt->used_ptes);
+	kfree(pt);
+}
+
+static void gen8_initialize_pt(struct i915_address_space *vm,
+			       struct i915_page_table *pt)
+{
+	gen8_pte_t scratch_pte;
+
+	scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page),
+				      I915_CACHE_LLC, true);
+
+	fill_px(vm->dev, pt, scratch_pte);
+}
+
+static void gen6_initialize_pt(struct i915_address_space *vm,
+			       struct i915_page_table *pt)
+{
+	gen6_pte_t scratch_pte;
+
+	WARN_ON(px_dma(vm->scratch_page) == 0);
+
+	scratch_pte = vm->pte_encode(px_dma(vm->scratch_page),
+				     I915_CACHE_LLC, true, 0);
+
+	fill32_px(vm->dev, pt, scratch_pte);
+}
+
+static struct i915_page_directory *alloc_pd(struct drm_device *dev)
+{
+	struct i915_page_directory *pd;
+	int ret = -ENOMEM;
+
+	pd = kzalloc(sizeof(*pd), GFP_KERNEL);
+	if (!pd)
+		return ERR_PTR(-ENOMEM);
+
+	pd->used_pdes = kcalloc(BITS_TO_LONGS(I915_PDES),
+				sizeof(*pd->used_pdes), GFP_KERNEL);
+	if (!pd->used_pdes)
+		goto fail_bitmap;
+
+	ret = setup_px(dev, pd);
+	if (ret)
+		goto fail_page_m;
+
+	return pd;
+
+fail_page_m:
+	kfree(pd->used_pdes);
+fail_bitmap:
+	kfree(pd);
+
+	return ERR_PTR(ret);
+}
+
+static void free_pd(struct drm_device *dev, struct i915_page_directory *pd)
+{
+	if (px_page(pd)) {
+		cleanup_px(dev, pd);
+		kfree(pd->used_pdes);
+		kfree(pd);
+	}
+}
+
+static void gen8_initialize_pd(struct i915_address_space *vm,
+			       struct i915_page_directory *pd)
+{
+	gen8_pde_t scratch_pde;
+
+	scratch_pde = gen8_pde_encode(px_dma(vm->scratch_pt), I915_CACHE_LLC);
+
+	fill_px(vm->dev, pd, scratch_pde);
+}
+
+static int __pdp_init(struct drm_device *dev,
+		      struct i915_page_directory_pointer *pdp)
+{
+	size_t pdpes = I915_PDPES_PER_PDP(dev);
+
+	pdp->used_pdpes = kcalloc(BITS_TO_LONGS(pdpes),
+				  sizeof(unsigned long),
+				  GFP_KERNEL);
+	if (!pdp->used_pdpes)
+		return -ENOMEM;
+
+	pdp->page_directory = kcalloc(pdpes, sizeof(*pdp->page_directory),
+				      GFP_KERNEL);
+	if (!pdp->page_directory) {
+		kfree(pdp->used_pdpes);
+		/* the PDP might be the statically allocated top level. Keep it
+		 * as clean as possible */
+		pdp->used_pdpes = NULL;
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static void __pdp_fini(struct i915_page_directory_pointer *pdp)
+{
+	kfree(pdp->used_pdpes);
+	kfree(pdp->page_directory);
+	pdp->page_directory = NULL;
+}
+
+static struct
+i915_page_directory_pointer *alloc_pdp(struct drm_device *dev)
+{
+	struct i915_page_directory_pointer *pdp;
+	int ret = -ENOMEM;
+
+	WARN_ON(!USES_FULL_48BIT_PPGTT(dev));
+
+	pdp = kzalloc(sizeof(*pdp), GFP_KERNEL);
+	if (!pdp)
+		return ERR_PTR(-ENOMEM);
+
+	ret = __pdp_init(dev, pdp);
+	if (ret)
+		goto fail_bitmap;
+
+	ret = setup_px(dev, pdp);
+	if (ret)
+		goto fail_page_m;
+
+	return pdp;
+
+fail_page_m:
+	__pdp_fini(pdp);
+fail_bitmap:
+	kfree(pdp);
+
+	return ERR_PTR(ret);
+}
+
+static void free_pdp(struct drm_device *dev,
+		     struct i915_page_directory_pointer *pdp)
+{
+	__pdp_fini(pdp);
+	if (USES_FULL_48BIT_PPGTT(dev)) {
+		cleanup_px(dev, pdp);
+		kfree(pdp);
+	}
+}
+
+static void gen8_initialize_pdp(struct i915_address_space *vm,
+				struct i915_page_directory_pointer *pdp)
+{
+	gen8_ppgtt_pdpe_t scratch_pdpe;
+
+	scratch_pdpe = gen8_pdpe_encode(px_dma(vm->scratch_pd), I915_CACHE_LLC);
+
+	fill_px(vm->dev, pdp, scratch_pdpe);
+}
+
+static void gen8_initialize_pml4(struct i915_address_space *vm,
+				 struct i915_pml4 *pml4)
+{
+	gen8_ppgtt_pml4e_t scratch_pml4e;
+
+	scratch_pml4e = gen8_pml4e_encode(px_dma(vm->scratch_pdp),
+					  I915_CACHE_LLC);
+
+	fill_px(vm->dev, pml4, scratch_pml4e);
+}
+
+static void
+gen8_setup_page_directory(struct i915_hw_ppgtt *ppgtt,
+			  struct i915_page_directory_pointer *pdp,
+			  struct i915_page_directory *pd,
+			  int index)
+{
+	gen8_ppgtt_pdpe_t *page_directorypo;
+
+	if (!USES_FULL_48BIT_PPGTT(ppgtt->base.dev))
+		return;
+
+	page_directorypo = kmap_px(pdp);
+	page_directorypo[index] = gen8_pdpe_encode(px_dma(pd), I915_CACHE_LLC);
+	kunmap_px(ppgtt, page_directorypo);
+}
+
+static void
+gen8_setup_page_directory_pointer(struct i915_hw_ppgtt *ppgtt,
+				  struct i915_pml4 *pml4,
+				  struct i915_page_directory_pointer *pdp,
+				  int index)
+{
+	gen8_ppgtt_pml4e_t *pagemap = kmap_px(pml4);
+
+	WARN_ON(!USES_FULL_48BIT_PPGTT(ppgtt->base.dev));
+	pagemap[index] = gen8_pml4e_encode(px_dma(pdp), I915_CACHE_LLC);
+	kunmap_px(ppgtt, pagemap);
+}
+
+/* Broadwell Page Directory Pointer Descriptors */
+static int gen8_write_pdp(struct drm_i915_gem_request *req,
+			  unsigned entry,
+			  dma_addr_t addr)
+{
+	struct intel_engine_cs *ring = req->ring;
+	int ret;
+
+	BUG_ON(entry >= 4);
+
+	ret = intel_ring_begin(req, 6);
+	if (ret)
+		return ret;
+
+	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+	intel_ring_emit(ring, GEN8_RING_PDP_UDW(ring, entry));
+	intel_ring_emit(ring, upper_32_bits(addr));
+	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+	intel_ring_emit(ring, GEN8_RING_PDP_LDW(ring, entry));
+	intel_ring_emit(ring, lower_32_bits(addr));
+	intel_ring_advance(ring);
+
+	return 0;
+}
+
+static int gen8_legacy_mm_switch(struct i915_hw_ppgtt *ppgtt,
+				 struct drm_i915_gem_request *req)
+{
+	int i, ret;
+
+	for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) {
+		const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
+
+		ret = gen8_write_pdp(req, i, pd_daddr);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static int gen8_48b_mm_switch(struct i915_hw_ppgtt *ppgtt,
+			      struct drm_i915_gem_request *req)
+{
+	return gen8_write_pdp(req, 0, px_dma(&ppgtt->pml4));
+}
+
+static void gen8_ppgtt_clear_pte_range(struct i915_address_space *vm,
+				       struct i915_page_directory_pointer *pdp,
+				       uint64_t start,
+				       uint64_t length,
+				       gen8_pte_t scratch_pte)
+{
+	struct i915_hw_ppgtt *ppgtt =
+		container_of(vm, struct i915_hw_ppgtt, base);
+	gen8_pte_t *pt_vaddr;
+	unsigned pdpe = gen8_pdpe_index(start);
+	unsigned pde = gen8_pde_index(start);
+	unsigned pte = gen8_pte_index(start);
+	unsigned num_entries = length >> PAGE_SHIFT;
+	unsigned last_pte, i;
+
+	if (WARN_ON(!pdp))
+		return;
+
+	while (num_entries) {
+		struct i915_page_directory *pd;
+		struct i915_page_table *pt;
+
+		if (WARN_ON(!pdp->page_directory[pdpe]))
+			break;
+
+		pd = pdp->page_directory[pdpe];
+
+		if (WARN_ON(!pd->page_table[pde]))
+			break;
+
+		pt = pd->page_table[pde];
+
+		if (WARN_ON(!px_page(pt)))
+			break;
+
+		last_pte = pte + num_entries;
+		if (last_pte > GEN8_PTES)
+			last_pte = GEN8_PTES;
+
+		pt_vaddr = kmap_px(pt);
+
+		for (i = pte; i < last_pte; i++) {
+			pt_vaddr[i] = scratch_pte;
+			num_entries--;
+		}
+
+		kunmap_px(ppgtt, pt);
+
+		pte = 0;
+		if (++pde == I915_PDES) {
+			if (++pdpe == I915_PDPES_PER_PDP(vm->dev))
+				break;
+			pde = 0;
+		}
+	}
+}
+
+static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
+				   uint64_t start,
+				   uint64_t length,
+				   bool use_scratch)
+{
+	struct i915_hw_ppgtt *ppgtt =
+		container_of(vm, struct i915_hw_ppgtt, base);
+	gen8_pte_t scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page),
+						 I915_CACHE_LLC, use_scratch);
+
+	if (!USES_FULL_48BIT_PPGTT(vm->dev)) {
+		gen8_ppgtt_clear_pte_range(vm, &ppgtt->pdp, start, length,
+					   scratch_pte);
+	} else {
+		uint64_t templ4, pml4e;
+		struct i915_page_directory_pointer *pdp;
+
+		gen8_for_each_pml4e(pdp, &ppgtt->pml4, start, length, templ4, pml4e) {
+			gen8_ppgtt_clear_pte_range(vm, pdp, start, length,
+						   scratch_pte);
+		}
+	}
+}
+
+static void
+gen8_ppgtt_insert_pte_entries(struct i915_address_space *vm,
+			      struct i915_page_directory_pointer *pdp,
+			      struct sg_page_iter *sg_iter,
+			      uint64_t start,
+			      enum i915_cache_level cache_level)
+{
+	struct i915_hw_ppgtt *ppgtt =
+		container_of(vm, struct i915_hw_ppgtt, base);
+	gen8_pte_t *pt_vaddr;
+	unsigned pdpe = gen8_pdpe_index(start);
+	unsigned pde = gen8_pde_index(start);
+	unsigned pte = gen8_pte_index(start);
+
+	pt_vaddr = NULL;
+
+	while (__sg_page_iter_next(sg_iter)) {
+		if (pt_vaddr == NULL) {
+			struct i915_page_directory *pd = pdp->page_directory[pdpe];
+			struct i915_page_table *pt = pd->page_table[pde];
+			pt_vaddr = kmap_px(pt);
+		}
+
+		pt_vaddr[pte] =
+			gen8_pte_encode(sg_page_iter_dma_address(sg_iter),
+					cache_level, true);
+		if (++pte == GEN8_PTES) {
+			kunmap_px(ppgtt, pt_vaddr);
+			pt_vaddr = NULL;
+			if (++pde == I915_PDES) {
+				if (++pdpe == I915_PDPES_PER_PDP(vm->dev))
+					break;
+				pde = 0;
+			}
+			pte = 0;
+		}
+	}
+
+	if (pt_vaddr)
+		kunmap_px(ppgtt, pt_vaddr);
+}
+
+static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
+				      struct sg_table *pages,
+				      uint64_t start,
+				      enum i915_cache_level cache_level,
+				      u32 unused)
+{
+	struct i915_hw_ppgtt *ppgtt =
+		container_of(vm, struct i915_hw_ppgtt, base);
+	struct sg_page_iter sg_iter;
+
+	__sg_page_iter_start(&sg_iter, pages->sgl, sg_nents(pages->sgl), 0);
+
+	if (!USES_FULL_48BIT_PPGTT(vm->dev)) {
+		gen8_ppgtt_insert_pte_entries(vm, &ppgtt->pdp, &sg_iter, start,
+					      cache_level);
+	} else {
+		struct i915_page_directory_pointer *pdp;
+		uint64_t templ4, pml4e;
+		uint64_t length = (uint64_t)pages->orig_nents << PAGE_SHIFT;
+
+		gen8_for_each_pml4e(pdp, &ppgtt->pml4, start, length, templ4, pml4e) {
+			gen8_ppgtt_insert_pte_entries(vm, pdp, &sg_iter,
+						      start, cache_level);
+		}
+	}
+}
+
+static void gen8_free_page_tables(struct drm_device *dev,
+				  struct i915_page_directory *pd)
+{
+	int i;
+
+	if (!px_page(pd))
+		return;
+
+	for_each_set_bit(i, pd->used_pdes, I915_PDES) {
+		if (WARN_ON(!pd->page_table[i]))
+			continue;
+
+		free_pt(dev, pd->page_table[i]);
+		pd->page_table[i] = NULL;
+	}
+}
+
+static int gen8_init_scratch(struct i915_address_space *vm)
+{
+	struct drm_device *dev = vm->dev;
+
+	vm->scratch_page = alloc_scratch_page(dev);
+	if (IS_ERR(vm->scratch_page))
+		return PTR_ERR(vm->scratch_page);
+
+	vm->scratch_pt = alloc_pt(dev);
+	if (IS_ERR(vm->scratch_pt)) {
+		free_scratch_page(dev, vm->scratch_page);
+		return PTR_ERR(vm->scratch_pt);
+	}
+
+	vm->scratch_pd = alloc_pd(dev);
+	if (IS_ERR(vm->scratch_pd)) {
+		free_pt(dev, vm->scratch_pt);
+		free_scratch_page(dev, vm->scratch_page);
+		return PTR_ERR(vm->scratch_pd);
+	}
+
+	if (USES_FULL_48BIT_PPGTT(dev)) {
+		vm->scratch_pdp = alloc_pdp(dev);
+		if (IS_ERR(vm->scratch_pdp)) {
+			free_pd(dev, vm->scratch_pd);
+			free_pt(dev, vm->scratch_pt);
+			free_scratch_page(dev, vm->scratch_page);
+			return PTR_ERR(vm->scratch_pdp);
+		}
+	}
+
+	gen8_initialize_pt(vm, vm->scratch_pt);
+	gen8_initialize_pd(vm, vm->scratch_pd);
+	if (USES_FULL_48BIT_PPGTT(dev))
+		gen8_initialize_pdp(vm, vm->scratch_pdp);
+
+	return 0;
+}
+
+static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
+{
+	enum vgt_g2v_type msg;
+	struct drm_device *dev = ppgtt->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	unsigned int offset = vgtif_reg(pdp0_lo);
+	int i;
+
+	if (USES_FULL_48BIT_PPGTT(dev)) {
+		u64 daddr = px_dma(&ppgtt->pml4);
+
+		I915_WRITE(offset, lower_32_bits(daddr));
+		I915_WRITE(offset + 4, upper_32_bits(daddr));
+
+		msg = (create ? VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE :
+				VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY);
+	} else {
+		for (i = 0; i < GEN8_LEGACY_PDPES; i++) {
+			u64 daddr = i915_page_dir_dma_addr(ppgtt, i);
+
+			I915_WRITE(offset, lower_32_bits(daddr));
+			I915_WRITE(offset + 4, upper_32_bits(daddr));
+
+			offset += 8;
+		}
+
+		msg = (create ? VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE :
+				VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY);
+	}
+
+	I915_WRITE(vgtif_reg(g2v_notify), msg);
+
+	return 0;
+}
+
+static void gen8_free_scratch(struct i915_address_space *vm)
+{
+	struct drm_device *dev = vm->dev;
+
+	if (USES_FULL_48BIT_PPGTT(dev))
+		free_pdp(dev, vm->scratch_pdp);
+	free_pd(dev, vm->scratch_pd);
+	free_pt(dev, vm->scratch_pt);
+	free_scratch_page(dev, vm->scratch_page);
+}
+
+static void gen8_ppgtt_cleanup_3lvl(struct drm_device *dev,
+				    struct i915_page_directory_pointer *pdp)
+{
+	int i;
+
+	for_each_set_bit(i, pdp->used_pdpes, I915_PDPES_PER_PDP(dev)) {
+		if (WARN_ON(!pdp->page_directory[i]))
+			continue;
+
+		gen8_free_page_tables(dev, pdp->page_directory[i]);
+		free_pd(dev, pdp->page_directory[i]);
+	}
+
+	free_pdp(dev, pdp);
+}
+
+static void gen8_ppgtt_cleanup_4lvl(struct i915_hw_ppgtt *ppgtt)
+{
+	int i;
+
+	for_each_set_bit(i, ppgtt->pml4.used_pml4es, GEN8_PML4ES_PER_PML4) {
+		if (WARN_ON(!ppgtt->pml4.pdps[i]))
+			continue;
+
+		gen8_ppgtt_cleanup_3lvl(ppgtt->base.dev, ppgtt->pml4.pdps[i]);
+	}
+
+	cleanup_px(ppgtt->base.dev, &ppgtt->pml4);
+}
+
+static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
+{
+	struct i915_hw_ppgtt *ppgtt =
+		container_of(vm, struct i915_hw_ppgtt, base);
+
+	if (intel_vgpu_active(vm->dev))
+		gen8_ppgtt_notify_vgt(ppgtt, false);
+
+	if (!USES_FULL_48BIT_PPGTT(ppgtt->base.dev))
+		gen8_ppgtt_cleanup_3lvl(ppgtt->base.dev, &ppgtt->pdp);
+	else
+		gen8_ppgtt_cleanup_4lvl(ppgtt);
+
+	gen8_free_scratch(vm);
+}
+
+/**
+ * gen8_ppgtt_alloc_pagetabs() - Allocate page tables for VA range.
+ * @vm:	Master vm structure.
+ * @pd:	Page directory for this address range.
+ * @start:	Starting virtual address to begin allocations.
+ * @length:	Size of the allocations.
+ * @new_pts:	Bitmap set by function with new allocations. Likely used by the
+ *		caller to free on error.
+ *
+ * Allocate the required number of page tables. Extremely similar to
+ * gen8_ppgtt_alloc_page_directories(). The main difference is here we are limited by
+ * the page directory boundary (instead of the page directory pointer). That
+ * boundary is 1GB virtual. Therefore, unlike gen8_ppgtt_alloc_page_directories(), it is
+ * possible, and likely that the caller will need to use multiple calls of this
+ * function to achieve the appropriate allocation.
+ *
+ * Return: 0 if success; negative error code otherwise.
+ */
+static int gen8_ppgtt_alloc_pagetabs(struct i915_address_space *vm,
+				     struct i915_page_directory *pd,
+				     uint64_t start,
+				     uint64_t length,
+				     unsigned long *new_pts)
+{
+	struct drm_device *dev = vm->dev;
+	struct i915_page_table *pt;
+	uint64_t temp;
+	uint32_t pde;
+
+	gen8_for_each_pde(pt, pd, start, length, temp, pde) {
+		/* Don't reallocate page tables */
+		if (test_bit(pde, pd->used_pdes)) {
+			/* Scratch is never allocated this way */
+			WARN_ON(pt == vm->scratch_pt);
+			continue;
+		}
+
+		pt = alloc_pt(dev);
+		if (IS_ERR(pt))
+			goto unwind_out;
+
+		gen8_initialize_pt(vm, pt);
+		pd->page_table[pde] = pt;
+		__set_bit(pde, new_pts);
+		trace_i915_page_table_entry_alloc(vm, pde, start, GEN8_PDE_SHIFT);
+	}
+
+	return 0;
+
+unwind_out:
+	for_each_set_bit(pde, new_pts, I915_PDES)
+		free_pt(dev, pd->page_table[pde]);
+
+	return -ENOMEM;
+}
+
+/**
+ * gen8_ppgtt_alloc_page_directories() - Allocate page directories for VA range.
+ * @vm:	Master vm structure.
+ * @pdp:	Page directory pointer for this address range.
+ * @start:	Starting virtual address to begin allocations.
+ * @length:	Size of the allocations.
+ * @new_pds:	Bitmap set by function with new allocations. Likely used by the
+ *		caller to free on error.
+ *
+ * Allocate the required number of page directories starting at the pde index of
+ * @start, and ending at the pde index @start + @length. This function will skip
+ * over already allocated page directories within the range, and only allocate
+ * new ones, setting the appropriate pointer within the pdp as well as the
+ * correct position in the bitmap @new_pds.
+ *
+ * The function will only allocate the pages within the range for a give page
+ * directory pointer. In other words, if @start + @length straddles a virtually
+ * addressed PDP boundary (512GB for 4k pages), there will be more allocations
+ * required by the caller, This is not currently possible, and the BUG in the
+ * code will prevent it.
+ *
+ * Return: 0 if success; negative error code otherwise.
+ */
+static int
+gen8_ppgtt_alloc_page_directories(struct i915_address_space *vm,
+				  struct i915_page_directory_pointer *pdp,
+				  uint64_t start,
+				  uint64_t length,
+				  unsigned long *new_pds)
+{
+	struct drm_device *dev = vm->dev;
+	struct i915_page_directory *pd;
+	uint64_t temp;
+	uint32_t pdpe;
+	uint32_t pdpes = I915_PDPES_PER_PDP(dev);
+
+	WARN_ON(!bitmap_empty(new_pds, pdpes));
+
+	gen8_for_each_pdpe(pd, pdp, start, length, temp, pdpe) {
+		if (test_bit(pdpe, pdp->used_pdpes))
+			continue;
+
+		pd = alloc_pd(dev);
+		if (IS_ERR(pd))
+			goto unwind_out;
+
+		gen8_initialize_pd(vm, pd);
+		pdp->page_directory[pdpe] = pd;
+		__set_bit(pdpe, new_pds);
+		trace_i915_page_directory_entry_alloc(vm, pdpe, start, GEN8_PDPE_SHIFT);
+	}
+
+	return 0;
+
+unwind_out:
+	for_each_set_bit(pdpe, new_pds, pdpes)
+		free_pd(dev, pdp->page_directory[pdpe]);
+
+	return -ENOMEM;
+}
+
+/**
+ * gen8_ppgtt_alloc_page_dirpointers() - Allocate pdps for VA range.
+ * @vm:	Master vm structure.
+ * @pml4:	Page map level 4 for this address range.
+ * @start:	Starting virtual address to begin allocations.
+ * @length:	Size of the allocations.
+ * @new_pdps:	Bitmap set by function with new allocations. Likely used by the
+ *		caller to free on error.
+ *
+ * Allocate the required number of page directory pointers. Extremely similar to
+ * gen8_ppgtt_alloc_page_directories() and gen8_ppgtt_alloc_pagetabs().
+ * The main difference is here we are limited by the pml4 boundary (instead of
+ * the page directory pointer).
+ *
+ * Return: 0 if success; negative error code otherwise.
+ */
+static int
+gen8_ppgtt_alloc_page_dirpointers(struct i915_address_space *vm,
+				  struct i915_pml4 *pml4,
+				  uint64_t start,
+				  uint64_t length,
+				  unsigned long *new_pdps)
+{
+	struct drm_device *dev = vm->dev;
+	struct i915_page_directory_pointer *pdp;
+	uint64_t temp;
+	uint32_t pml4e;
+
+	WARN_ON(!bitmap_empty(new_pdps, GEN8_PML4ES_PER_PML4));
+
+	gen8_for_each_pml4e(pdp, pml4, start, length, temp, pml4e) {
+		if (!test_bit(pml4e, pml4->used_pml4es)) {
+			pdp = alloc_pdp(dev);
+			if (IS_ERR(pdp))
+				goto unwind_out;
+
+			gen8_initialize_pdp(vm, pdp);
+			pml4->pdps[pml4e] = pdp;
+			__set_bit(pml4e, new_pdps);
+			trace_i915_page_directory_pointer_entry_alloc(vm,
+								      pml4e,
+								      start,
+								      GEN8_PML4E_SHIFT);
+		}
+	}
+
+	return 0;
+
+unwind_out:
+	for_each_set_bit(pml4e, new_pdps, GEN8_PML4ES_PER_PML4)
+		free_pdp(dev, pml4->pdps[pml4e]);
+
+	return -ENOMEM;
+}
+
+static void
+free_gen8_temp_bitmaps(unsigned long *new_pds, unsigned long *new_pts)
+{
+	kfree(new_pts);
+	kfree(new_pds);
+}
+
+/* Fills in the page directory bitmap, and the array of page tables bitmap. Both
+ * of these are based on the number of PDPEs in the system.
+ */
+static
+int __must_check alloc_gen8_temp_bitmaps(unsigned long **new_pds,
+					 unsigned long **new_pts,
+					 uint32_t pdpes)
+{
+	unsigned long *pds;
+	unsigned long *pts;
+
+	pds = kcalloc(BITS_TO_LONGS(pdpes), sizeof(unsigned long), GFP_TEMPORARY);
+	if (!pds)
+		return -ENOMEM;
+
+	pts = kcalloc(pdpes, BITS_TO_LONGS(I915_PDES) * sizeof(unsigned long),
+		      GFP_TEMPORARY);
+	if (!pts)
+		goto err_out;
+
+	*new_pds = pds;
+	*new_pts = pts;
+
+	return 0;
+
+err_out:
+	free_gen8_temp_bitmaps(pds, pts);
+	return -ENOMEM;
+}
+
+/* PDE TLBs are a pain to invalidate on GEN8+. When we modify
+ * the page table structures, we mark them dirty so that
+ * context switching/execlist queuing code takes extra steps
+ * to ensure that tlbs are flushed.
+ */
+static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
+{
+	ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->base.dev)->ring_mask;
+}
+
+static int gen8_alloc_va_range_3lvl(struct i915_address_space *vm,
+				    struct i915_page_directory_pointer *pdp,
+				    uint64_t start,
+				    uint64_t length)
+{
+	struct i915_hw_ppgtt *ppgtt =
+		container_of(vm, struct i915_hw_ppgtt, base);
+	unsigned long *new_page_dirs, *new_page_tables;
+	struct drm_device *dev = vm->dev;
+	struct i915_page_directory *pd;
+	const uint64_t orig_start = start;
+	const uint64_t orig_length = length;
+	uint64_t temp;
+	uint32_t pdpe;
+	uint32_t pdpes = I915_PDPES_PER_PDP(dev);
+	int ret;
+
+	/* Wrap is never okay since we can only represent 48b, and we don't
+	 * actually use the other side of the canonical address space.
+	 */
+	if (WARN_ON(start + length < start))
+		return -ENODEV;
+
+	if (WARN_ON(start + length > vm->total))
+		return -ENODEV;
+
+	ret = alloc_gen8_temp_bitmaps(&new_page_dirs, &new_page_tables, pdpes);
+	if (ret)
+		return ret;
+
+	/* Do the allocations first so we can easily bail out */
+	ret = gen8_ppgtt_alloc_page_directories(vm, pdp, start, length,
+						new_page_dirs);
+	if (ret) {
+		free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
+		return ret;
+	}
+
+	/* For every page directory referenced, allocate page tables */
+	gen8_for_each_pdpe(pd, pdp, start, length, temp, pdpe) {
+		ret = gen8_ppgtt_alloc_pagetabs(vm, pd, start, length,
+						new_page_tables + pdpe * BITS_TO_LONGS(I915_PDES));
+		if (ret)
+			goto err_out;
+	}
+
+	start = orig_start;
+	length = orig_length;
+
+	/* Allocations have completed successfully, so set the bitmaps, and do
+	 * the mappings. */
+	gen8_for_each_pdpe(pd, pdp, start, length, temp, pdpe) {
+		gen8_pde_t *const page_directory = kmap_px(pd);
+		struct i915_page_table *pt;
+		uint64_t pd_len = length;
+		uint64_t pd_start = start;
+		uint32_t pde;
+
+		/* Every pd should be allocated, we just did that above. */
+		WARN_ON(!pd);
+
+		gen8_for_each_pde(pt, pd, pd_start, pd_len, temp, pde) {
+			/* Same reasoning as pd */
+			WARN_ON(!pt);
+			WARN_ON(!pd_len);
+			WARN_ON(!gen8_pte_count(pd_start, pd_len));
+
+			/* Set our used ptes within the page table */
+			bitmap_set(pt->used_ptes,
+				   gen8_pte_index(pd_start),
+				   gen8_pte_count(pd_start, pd_len));
+
+			/* Our pde is now pointing to the pagetable, pt */
+			__set_bit(pde, pd->used_pdes);
+
+			/* Map the PDE to the page table */
+			page_directory[pde] = gen8_pde_encode(px_dma(pt),
+							      I915_CACHE_LLC);
+			trace_i915_page_table_entry_map(&ppgtt->base, pde, pt,
+							gen8_pte_index(start),
+							gen8_pte_count(start, length),
+							GEN8_PTES);
+
+			/* NB: We haven't yet mapped ptes to pages. At this
+			 * point we're still relying on insert_entries() */
+		}
+
+		kunmap_px(ppgtt, page_directory);
+		__set_bit(pdpe, pdp->used_pdpes);
+		gen8_setup_page_directory(ppgtt, pdp, pd, pdpe);
+	}
+
+	free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
+	mark_tlbs_dirty(ppgtt);
+	return 0;
+
+err_out:
+	while (pdpe--) {
+		for_each_set_bit(temp, new_page_tables + pdpe *
+				BITS_TO_LONGS(I915_PDES), I915_PDES)
+			free_pt(dev, pdp->page_directory[pdpe]->page_table[temp]);
+	}
+
+	for_each_set_bit(pdpe, new_page_dirs, pdpes)
+		free_pd(dev, pdp->page_directory[pdpe]);
+
+	free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
+	mark_tlbs_dirty(ppgtt);
+	return ret;
+}
+
+static int gen8_alloc_va_range_4lvl(struct i915_address_space *vm,
+				    struct i915_pml4 *pml4,
+				    uint64_t start,
+				    uint64_t length)
+{
+	DECLARE_BITMAP(new_pdps, GEN8_PML4ES_PER_PML4);
+	struct i915_hw_ppgtt *ppgtt =
+			container_of(vm, struct i915_hw_ppgtt, base);
+	struct i915_page_directory_pointer *pdp;
+	uint64_t temp, pml4e;
+	int ret = 0;
+
+	/* Do the pml4 allocations first, so we don't need to track the newly
+	 * allocated tables below the pdp */
+	bitmap_zero(new_pdps, GEN8_PML4ES_PER_PML4);
+
+	/* The pagedirectory and pagetable allocations are done in the shared 3
+	 * and 4 level code. Just allocate the pdps.
+	 */
+	ret = gen8_ppgtt_alloc_page_dirpointers(vm, pml4, start, length,
+						new_pdps);
+	if (ret)
+		return ret;
+
+	WARN(bitmap_weight(new_pdps, GEN8_PML4ES_PER_PML4) > 2,
+	     "The allocation has spanned more than 512GB. "
+	     "It is highly likely this is incorrect.");
+
+	gen8_for_each_pml4e(pdp, pml4, start, length, temp, pml4e) {
+		WARN_ON(!pdp);
+
+		ret = gen8_alloc_va_range_3lvl(vm, pdp, start, length);
+		if (ret)
+			goto err_out;
+
+		gen8_setup_page_directory_pointer(ppgtt, pml4, pdp, pml4e);
+	}
+
+	bitmap_or(pml4->used_pml4es, new_pdps, pml4->used_pml4es,
+		  GEN8_PML4ES_PER_PML4);
+
+	return 0;
+
+err_out:
+	for_each_set_bit(pml4e, new_pdps, GEN8_PML4ES_PER_PML4)
+		gen8_ppgtt_cleanup_3lvl(vm->dev, pml4->pdps[pml4e]);
+
+	return ret;
+}
+
+static int gen8_alloc_va_range(struct i915_address_space *vm,
+			       uint64_t start, uint64_t length)
+{
+	struct i915_hw_ppgtt *ppgtt =
+		container_of(vm, struct i915_hw_ppgtt, base);
+
+	if (USES_FULL_48BIT_PPGTT(vm->dev))
+		return gen8_alloc_va_range_4lvl(vm, &ppgtt->pml4, start, length);
+	else
+		return gen8_alloc_va_range_3lvl(vm, &ppgtt->pdp, start, length);
+}
+
+static void gen8_dump_pdp(struct i915_page_directory_pointer *pdp,
+			  uint64_t start, uint64_t length,
+			  gen8_pte_t scratch_pte,
+			  struct seq_file *m)
+{
+	struct i915_page_directory *pd;
+	uint64_t temp;
+	uint32_t pdpe;
+
+	gen8_for_each_pdpe(pd, pdp, start, length, temp, pdpe) {
+		struct i915_page_table *pt;
+		uint64_t pd_len = length;
+		uint64_t pd_start = start;
+		uint32_t pde;
+
+		if (!test_bit(pdpe, pdp->used_pdpes))
+			continue;
+
+		seq_printf(m, "\tPDPE #%d\n", pdpe);
+		gen8_for_each_pde(pt, pd, pd_start, pd_len, temp, pde) {
+			uint32_t  pte;
+			gen8_pte_t *pt_vaddr;
+
+			if (!test_bit(pde, pd->used_pdes))
+				continue;
+
+			pt_vaddr = kmap_px(pt);
+			for (pte = 0; pte < GEN8_PTES; pte += 4) {
+				uint64_t va =
+					(pdpe << GEN8_PDPE_SHIFT) |
+					(pde << GEN8_PDE_SHIFT) |
+					(pte << GEN8_PTE_SHIFT);
+				int i;
+				bool found = false;
+
+				for (i = 0; i < 4; i++)
+					if (pt_vaddr[pte + i] != scratch_pte)
+						found = true;
+				if (!found)
+					continue;
+
+				seq_printf(m, "\t\t0x%llx [%03d,%03d,%04d]: =", va, pdpe, pde, pte);
+				for (i = 0; i < 4; i++) {
+					if (pt_vaddr[pte + i] != scratch_pte)
+						seq_printf(m, " %llx", pt_vaddr[pte + i]);
+					else
+						seq_puts(m, "  SCRATCH ");
+				}
+				seq_puts(m, "\n");
+			}
+			/* don't use kunmap_px, it could trigger
+			 * an unnecessary flush.
+			 */
+			kunmap_atomic(pt_vaddr);
+		}
+	}
+}
+
+static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
+{
+	struct i915_address_space *vm = &ppgtt->base;
+	uint64_t start = ppgtt->base.start;
+	uint64_t length = ppgtt->base.total;
+	gen8_pte_t scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page),
+						 I915_CACHE_LLC, true);
+
+	if (!USES_FULL_48BIT_PPGTT(vm->dev)) {
+		gen8_dump_pdp(&ppgtt->pdp, start, length, scratch_pte, m);
+	} else {
+		uint64_t templ4, pml4e;
+		struct i915_pml4 *pml4 = &ppgtt->pml4;
+		struct i915_page_directory_pointer *pdp;
+
+		gen8_for_each_pml4e(pdp, pml4, start, length, templ4, pml4e) {
+			if (!test_bit(pml4e, pml4->used_pml4es))
+				continue;
+
+			seq_printf(m, "    PML4E #%llu\n", pml4e);
+			gen8_dump_pdp(pdp, start, length, scratch_pte, m);
+		}
+	}
+}
+
+static int gen8_preallocate_top_level_pdps(struct i915_hw_ppgtt *ppgtt)
+{
+	unsigned long *new_page_dirs, *new_page_tables;
+	uint32_t pdpes = I915_PDPES_PER_PDP(dev);
+	int ret;
+
+	/* We allocate temp bitmap for page tables for no gain
+	 * but as this is for init only, lets keep the things simple
+	 */
+	ret = alloc_gen8_temp_bitmaps(&new_page_dirs, &new_page_tables, pdpes);
+	if (ret)
+		return ret;
+
+	/* Allocate for all pdps regardless of how the ppgtt
+	 * was defined.
+	 */
+	ret = gen8_ppgtt_alloc_page_directories(&ppgtt->base, &ppgtt->pdp,
+						0, 1ULL << 32,
+						new_page_dirs);
+	if (!ret)
+		*ppgtt->pdp.used_pdpes = *new_page_dirs;
+
+	free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
+
+	return ret;
+}
+
+/*
+ * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
+ * with a net effect resembling a 2-level page table in normal x86 terms. Each
+ * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address
+ * space.
+ *
+ */
+static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
+{
+	int ret;
+
+	ret = gen8_init_scratch(&ppgtt->base);
+	if (ret)
+		return ret;
+
+	ppgtt->base.start = 0;
+	ppgtt->base.cleanup = gen8_ppgtt_cleanup;
+	ppgtt->base.allocate_va_range = gen8_alloc_va_range;
+	ppgtt->base.insert_entries = gen8_ppgtt_insert_entries;
+	ppgtt->base.clear_range = gen8_ppgtt_clear_range;
+	ppgtt->base.unbind_vma = ppgtt_unbind_vma;
+	ppgtt->base.bind_vma = ppgtt_bind_vma;
+	ppgtt->debug_dump = gen8_dump_ppgtt;
+
+	if (USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
+		ret = setup_px(ppgtt->base.dev, &ppgtt->pml4);
+		if (ret)
+			goto free_scratch;
+
+		gen8_initialize_pml4(&ppgtt->base, &ppgtt->pml4);
+
+		ppgtt->base.total = 1ULL << 48;
+		ppgtt->switch_mm = gen8_48b_mm_switch;
+	} else {
+		ret = __pdp_init(ppgtt->base.dev, &ppgtt->pdp);
+		if (ret)
+			goto free_scratch;
+
+		ppgtt->base.total = 1ULL << 32;
+		ppgtt->switch_mm = gen8_legacy_mm_switch;
+		trace_i915_page_directory_pointer_entry_alloc(&ppgtt->base,
+							      0, 0,
+							      GEN8_PML4E_SHIFT);
+
+		if (intel_vgpu_active(ppgtt->base.dev)) {
+			ret = gen8_preallocate_top_level_pdps(ppgtt);
+			if (ret)
+				goto free_scratch;
+		}
+	}
+
+	if (intel_vgpu_active(ppgtt->base.dev))
+		gen8_ppgtt_notify_vgt(ppgtt, true);
+
+	return 0;
+
+free_scratch:
+	gen8_free_scratch(&ppgtt->base);
+	return ret;
+}
+
+static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
+{
+	struct i915_address_space *vm = &ppgtt->base;
+	struct i915_page_table *unused;
+	gen6_pte_t scratch_pte;
+	uint32_t pd_entry;
+	uint32_t  pte, pde, temp;
+	uint32_t start = ppgtt->base.start, length = ppgtt->base.total;
+
+	scratch_pte = vm->pte_encode(px_dma(vm->scratch_page),
+				     I915_CACHE_LLC, true, 0);
+
+	gen6_for_each_pde(unused, &ppgtt->pd, start, length, temp, pde) {
+		u32 expected;
+		gen6_pte_t *pt_vaddr;
+		const dma_addr_t pt_addr = px_dma(ppgtt->pd.page_table[pde]);
+		pd_entry = readl(ppgtt->pd_addr + pde);
+		expected = (GEN6_PDE_ADDR_ENCODE(pt_addr) | GEN6_PDE_VALID);
+
+		if (pd_entry != expected)
+			seq_printf(m, "\tPDE #%d mismatch: Actual PDE: %x Expected PDE: %x\n",
+				   pde,
+				   pd_entry,
+				   expected);
+		seq_printf(m, "\tPDE: %x\n", pd_entry);
+
+		pt_vaddr = kmap_px(ppgtt->pd.page_table[pde]);
+
+		for (pte = 0; pte < GEN6_PTES; pte+=4) {
+			unsigned long va =
+				(pde * PAGE_SIZE * GEN6_PTES) +
+				(pte * PAGE_SIZE);
+			int i;
+			bool found = false;
+			for (i = 0; i < 4; i++)
+				if (pt_vaddr[pte + i] != scratch_pte)
+					found = true;
+			if (!found)
+				continue;
+
+			seq_printf(m, "\t\t0x%lx [%03d,%04d]: =", va, pde, pte);
+			for (i = 0; i < 4; i++) {
+				if (pt_vaddr[pte + i] != scratch_pte)
+					seq_printf(m, " %08x", pt_vaddr[pte + i]);
+				else
+					seq_puts(m, "  SCRATCH ");
+			}
+			seq_puts(m, "\n");
+		}
+		kunmap_px(ppgtt, pt_vaddr);
+	}
+}
+
+/* Write pde (index) from the page directory @pd to the page table @pt */
+static void gen6_write_pde(struct i915_page_directory *pd,
+			    const int pde, struct i915_page_table *pt)
+{
+	/* Caller needs to make sure the write completes if necessary */
+	struct i915_hw_ppgtt *ppgtt =
+		container_of(pd, struct i915_hw_ppgtt, pd);
+	u32 pd_entry;
+
+	pd_entry = GEN6_PDE_ADDR_ENCODE(px_dma(pt));
+	pd_entry |= GEN6_PDE_VALID;
+
+	writel(pd_entry, ppgtt->pd_addr + pde);
+}
+
+/* Write all the page tables found in the ppgtt structure to incrementing page
+ * directories. */
+static void gen6_write_page_range(struct drm_i915_private *dev_priv,
+				  struct i915_page_directory *pd,
+				  uint32_t start, uint32_t length)
+{
+	struct i915_page_table *pt;
+	uint32_t pde, temp;
+
+	gen6_for_each_pde(pt, pd, start, length, temp, pde)
+		gen6_write_pde(pd, pde, pt);
+
+	/* Make sure write is complete before other code can use this page
+	 * table. Also require for WC mapped PTEs */
+	readl(dev_priv->gtt.gsm);
+}
+
+static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
+{
+	BUG_ON(ppgtt->pd.base.ggtt_offset & 0x3f);
+
+	return (ppgtt->pd.base.ggtt_offset / 64) << 16;
+}
+
+static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
+			 struct drm_i915_gem_request *req)
+{
+	struct intel_engine_cs *ring = req->ring;
+	int ret;
+
+	/* NB: TLBs must be flushed and invalidated before a switch */
+	ret = ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+	if (ret)
+		return ret;
+
+	ret = intel_ring_begin(req, 6);
+	if (ret)
+		return ret;
+
+	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
+	intel_ring_emit(ring, RING_PP_DIR_DCLV(ring));
+	intel_ring_emit(ring, PP_DIR_DCLV_2G);
+	intel_ring_emit(ring, RING_PP_DIR_BASE(ring));
+	intel_ring_emit(ring, get_pd_offset(ppgtt));
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
+
+	return 0;
+}
+
+static int vgpu_mm_switch(struct i915_hw_ppgtt *ppgtt,
+			  struct drm_i915_gem_request *req)
+{
+	struct intel_engine_cs *ring = req->ring;
+	struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev);
+
+	I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
+	I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
+	return 0;
+}
+
+static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
+			  struct drm_i915_gem_request *req)
+{
+	struct intel_engine_cs *ring = req->ring;
+	int ret;
+
+	/* NB: TLBs must be flushed and invalidated before a switch */
+	ret = ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+	if (ret)
+		return ret;
+
+	ret = intel_ring_begin(req, 6);
+	if (ret)
+		return ret;
+
+	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
+	intel_ring_emit(ring, RING_PP_DIR_DCLV(ring));
+	intel_ring_emit(ring, PP_DIR_DCLV_2G);
+	intel_ring_emit(ring, RING_PP_DIR_BASE(ring));
+	intel_ring_emit(ring, get_pd_offset(ppgtt));
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
+
+	/* XXX: RCS is the only one to auto invalidate the TLBs? */
+	if (ring->id != RCS) {
+		ret = ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
+			  struct drm_i915_gem_request *req)
+{
+	struct intel_engine_cs *ring = req->ring;
+	struct drm_device *dev = ppgtt->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+
+	I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
+	I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
+
+	POSTING_READ(RING_PP_DIR_DCLV(ring));
+
+	return 0;
+}
+
+static void gen8_ppgtt_enable(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_engine_cs *ring;
+	int j;
+
+	for_each_ring(ring, dev_priv, j) {
+		u32 four_level = USES_FULL_48BIT_PPGTT(dev) ? GEN8_GFX_PPGTT_48B : 0;
+		I915_WRITE(RING_MODE_GEN7(ring),
+			   _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE | four_level));
+	}
+}
+
+static void gen7_ppgtt_enable(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_engine_cs *ring;
+	uint32_t ecochk, ecobits;
+	int i;
+
+	ecobits = I915_READ(GAC_ECO_BITS);
+	I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
+
+	ecochk = I915_READ(GAM_ECOCHK);
+	if (IS_HASWELL(dev)) {
+		ecochk |= ECOCHK_PPGTT_WB_HSW;
+	} else {
+		ecochk |= ECOCHK_PPGTT_LLC_IVB;
+		ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
+	}
+	I915_WRITE(GAM_ECOCHK, ecochk);
+
+	for_each_ring(ring, dev_priv, i) {
+		/* GFX_MODE is per-ring on gen7+ */
+		I915_WRITE(RING_MODE_GEN7(ring),
+			   _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
+	}
+}
+
+static void gen6_ppgtt_enable(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t ecochk, gab_ctl, ecobits;
+
+	ecobits = I915_READ(GAC_ECO_BITS);
+	I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT |
+		   ECOBITS_PPGTT_CACHE64B);
+
+	gab_ctl = I915_READ(GAB_CTL);
+	I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
+
+	ecochk = I915_READ(GAM_ECOCHK);
+	I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B);
+
+	I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
+}
+
+/* PPGTT support for Sandybdrige/Gen6 and later */
+static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
+				   uint64_t start,
+				   uint64_t length,
+				   bool use_scratch)
+{
+	struct i915_hw_ppgtt *ppgtt =
+		container_of(vm, struct i915_hw_ppgtt, base);
+	gen6_pte_t *pt_vaddr, scratch_pte;
+	unsigned first_entry = start >> PAGE_SHIFT;
+	unsigned num_entries = length >> PAGE_SHIFT;
+	unsigned act_pt = first_entry / GEN6_PTES;
+	unsigned first_pte = first_entry % GEN6_PTES;
+	unsigned last_pte, i;
+
+	scratch_pte = vm->pte_encode(px_dma(vm->scratch_page),
+				     I915_CACHE_LLC, true, 0);
+
+	while (num_entries) {
+		last_pte = first_pte + num_entries;
+		if (last_pte > GEN6_PTES)
+			last_pte = GEN6_PTES;
+
+		pt_vaddr = kmap_px(ppgtt->pd.page_table[act_pt]);
+
+		for (i = first_pte; i < last_pte; i++)
+			pt_vaddr[i] = scratch_pte;
+
+		kunmap_px(ppgtt, pt_vaddr);
+
+		num_entries -= last_pte - first_pte;
+		first_pte = 0;
+		act_pt++;
+	}
+}
+
+static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
+				      struct sg_table *pages,
+				      uint64_t start,
+				      enum i915_cache_level cache_level, u32 flags)
+{
+	struct i915_hw_ppgtt *ppgtt =
+		container_of(vm, struct i915_hw_ppgtt, base);
+	gen6_pte_t *pt_vaddr;
+	unsigned first_entry = start >> PAGE_SHIFT;
+	unsigned act_pt = first_entry / GEN6_PTES;
+	unsigned act_pte = first_entry % GEN6_PTES;
+	struct sg_page_iter sg_iter;
+
+	pt_vaddr = NULL;
+	for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
+		if (pt_vaddr == NULL)
+			pt_vaddr = kmap_px(ppgtt->pd.page_table[act_pt]);
+
+		pt_vaddr[act_pte] =
+			vm->pte_encode(sg_page_iter_dma_address(&sg_iter),
+				       cache_level, true, flags);
+
+		if (++act_pte == GEN6_PTES) {
+			kunmap_px(ppgtt, pt_vaddr);
+			pt_vaddr = NULL;
+			act_pt++;
+			act_pte = 0;
+		}
+	}
+	if (pt_vaddr)
+		kunmap_px(ppgtt, pt_vaddr);
+}
+
+static int gen6_alloc_va_range(struct i915_address_space *vm,
+			       uint64_t start_in, uint64_t length_in)
+{
+	DECLARE_BITMAP(new_page_tables, I915_PDES);
+	struct drm_device *dev = vm->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct i915_hw_ppgtt *ppgtt =
+				container_of(vm, struct i915_hw_ppgtt, base);
+	struct i915_page_table *pt;
+	uint32_t start, length, start_save, length_save;
+	uint32_t pde, temp;
+	int ret;
+
+	if (WARN_ON(start_in + length_in > ppgtt->base.total))
+		return -ENODEV;
+
+	start = start_save = start_in;
+	length = length_save = length_in;
+
+	bitmap_zero(new_page_tables, I915_PDES);
+
+	/* The allocation is done in two stages so that we can bail out with
+	 * minimal amount of pain. The first stage finds new page tables that
+	 * need allocation. The second stage marks use ptes within the page
+	 * tables.
+	 */
+	gen6_for_each_pde(pt, &ppgtt->pd, start, length, temp, pde) {
+		if (pt != vm->scratch_pt) {
+			WARN_ON(bitmap_empty(pt->used_ptes, GEN6_PTES));
+			continue;
+		}
+
+		/* We've already allocated a page table */
+		WARN_ON(!bitmap_empty(pt->used_ptes, GEN6_PTES));
+
+		pt = alloc_pt(dev);
+		if (IS_ERR(pt)) {
+			ret = PTR_ERR(pt);
+			goto unwind_out;
+		}
+
+		gen6_initialize_pt(vm, pt);
+
+		ppgtt->pd.page_table[pde] = pt;
+		__set_bit(pde, new_page_tables);
+		trace_i915_page_table_entry_alloc(vm, pde, start, GEN6_PDE_SHIFT);
+	}
+
+	start = start_save;
+	length = length_save;
+
+	gen6_for_each_pde(pt, &ppgtt->pd, start, length, temp, pde) {
+		DECLARE_BITMAP(tmp_bitmap, GEN6_PTES);
+
+		bitmap_zero(tmp_bitmap, GEN6_PTES);
+		bitmap_set(tmp_bitmap, gen6_pte_index(start),
+			   gen6_pte_count(start, length));
+
+		if (__test_and_clear_bit(pde, new_page_tables))
+			gen6_write_pde(&ppgtt->pd, pde, pt);
+
+		trace_i915_page_table_entry_map(vm, pde, pt,
+					 gen6_pte_index(start),
+					 gen6_pte_count(start, length),
+					 GEN6_PTES);
+		bitmap_or(pt->used_ptes, tmp_bitmap, pt->used_ptes,
+				GEN6_PTES);
+	}
+
+	WARN_ON(!bitmap_empty(new_page_tables, I915_PDES));
+
+	/* Make sure write is complete before other code can use this page
+	 * table. Also require for WC mapped PTEs */
+	readl(dev_priv->gtt.gsm);
+
+	mark_tlbs_dirty(ppgtt);
+	return 0;
+
+unwind_out:
+	for_each_set_bit(pde, new_page_tables, I915_PDES) {
+		struct i915_page_table *pt = ppgtt->pd.page_table[pde];
+
+		ppgtt->pd.page_table[pde] = vm->scratch_pt;
+		free_pt(vm->dev, pt);
+	}
+
+	mark_tlbs_dirty(ppgtt);
+	return ret;
+}
+
+static int gen6_init_scratch(struct i915_address_space *vm)
+{
+	struct drm_device *dev = vm->dev;
+
+	vm->scratch_page = alloc_scratch_page(dev);
+	if (IS_ERR(vm->scratch_page))
+		return PTR_ERR(vm->scratch_page);
+
+	vm->scratch_pt = alloc_pt(dev);
+	if (IS_ERR(vm->scratch_pt)) {
+		free_scratch_page(dev, vm->scratch_page);
+		return PTR_ERR(vm->scratch_pt);
+	}
+
+	gen6_initialize_pt(vm, vm->scratch_pt);
+
+	return 0;
+}
+
+static void gen6_free_scratch(struct i915_address_space *vm)
+{
+	struct drm_device *dev = vm->dev;
+
+	free_pt(dev, vm->scratch_pt);
+	free_scratch_page(dev, vm->scratch_page);
+}
+
+static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
+{
+	struct i915_hw_ppgtt *ppgtt =
+		container_of(vm, struct i915_hw_ppgtt, base);
+	struct i915_page_table *pt;
+	uint32_t pde;
+
+	drm_mm_remove_node(&ppgtt->node);
+
+	gen6_for_all_pdes(pt, ppgtt, pde) {
+		if (pt != vm->scratch_pt)
+			free_pt(ppgtt->base.dev, pt);
+	}
+
+	gen6_free_scratch(vm);
+}
+
+static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
+{
+	struct i915_address_space *vm = &ppgtt->base;
+	struct drm_device *dev = ppgtt->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	bool retried = false;
+	int ret;
+
+	/* PPGTT PDEs reside in the GGTT and consists of 512 entries. The
+	 * allocator works in address space sizes, so it's multiplied by page
+	 * size. We allocate at the top of the GTT to avoid fragmentation.
+	 */
+	BUG_ON(!drm_mm_initialized(&dev_priv->gtt.base.mm));
+
+	ret = gen6_init_scratch(vm);
+	if (ret)
+		return ret;
+
+alloc:
+	ret = drm_mm_insert_node_in_range_generic(&dev_priv->gtt.base.mm,
+						  &ppgtt->node, GEN6_PD_SIZE,
+						  GEN6_PD_ALIGN, 0,
+						  0, dev_priv->gtt.base.total,
+						  DRM_MM_TOPDOWN);
+	if (ret == -ENOSPC && !retried) {
+		ret = i915_gem_evict_something(dev, &dev_priv->gtt.base,
+					       GEN6_PD_SIZE, GEN6_PD_ALIGN,
+					       I915_CACHE_NONE,
+					       0, dev_priv->gtt.base.total,
+					       0);
+		if (ret)
+			goto err_out;
+
+		retried = true;
+		goto alloc;
+	}
+
+	if (ret)
+		goto err_out;
+
+
+	if (ppgtt->node.start < dev_priv->gtt.mappable_end)
+		DRM_DEBUG("Forced to use aperture for PDEs\n");
+
+	return 0;
+
+err_out:
+	gen6_free_scratch(vm);
+	return ret;
+}
+
+static int gen6_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt)
+{
+	return gen6_ppgtt_allocate_page_directories(ppgtt);
+}
+
+static void gen6_scratch_va_range(struct i915_hw_ppgtt *ppgtt,
+				  uint64_t start, uint64_t length)
+{
+	struct i915_page_table *unused;
+	uint32_t pde, temp;
+
+	gen6_for_each_pde(unused, &ppgtt->pd, start, length, temp, pde)
+		ppgtt->pd.page_table[pde] = ppgtt->base.scratch_pt;
+}
+
+static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
+{
+	struct drm_device *dev = ppgtt->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret;
+
+	ppgtt->base.pte_encode = dev_priv->gtt.base.pte_encode;
+	if (IS_GEN6(dev)) {
+		ppgtt->switch_mm = gen6_mm_switch;
+	} else if (IS_HASWELL(dev)) {
+		ppgtt->switch_mm = hsw_mm_switch;
+	} else if (IS_GEN7(dev)) {
+		ppgtt->switch_mm = gen7_mm_switch;
+	} else
+		BUG();
+
+	if (intel_vgpu_active(dev))
+		ppgtt->switch_mm = vgpu_mm_switch;
+
+	ret = gen6_ppgtt_alloc(ppgtt);
+	if (ret)
+		return ret;
+
+	ppgtt->base.allocate_va_range = gen6_alloc_va_range;
+	ppgtt->base.clear_range = gen6_ppgtt_clear_range;
+	ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
+	ppgtt->base.unbind_vma = ppgtt_unbind_vma;
+	ppgtt->base.bind_vma = ppgtt_bind_vma;
+	ppgtt->base.cleanup = gen6_ppgtt_cleanup;
+	ppgtt->base.start = 0;
+	ppgtt->base.total = I915_PDES * GEN6_PTES * PAGE_SIZE;
+	ppgtt->debug_dump = gen6_dump_ppgtt;
+
+	ppgtt->pd.base.ggtt_offset =
+		ppgtt->node.start / PAGE_SIZE * sizeof(gen6_pte_t);
+
+	ppgtt->pd_addr = (gen6_pte_t __iomem *)dev_priv->gtt.gsm +
+		ppgtt->pd.base.ggtt_offset / sizeof(gen6_pte_t);
+
+	gen6_scratch_va_range(ppgtt, 0, ppgtt->base.total);
+
+	gen6_write_page_range(dev_priv, &ppgtt->pd, 0, ppgtt->base.total);
+
+	DRM_DEBUG_DRIVER("Allocated pde space (%lldM) at GTT entry: %llx\n",
+			 ppgtt->node.size >> 20,
+			 ppgtt->node.start / PAGE_SIZE);
+
+	DRM_DEBUG("Adding PPGTT at offset %x\n",
+		  ppgtt->pd.base.ggtt_offset << 10);
+
+	return 0;
+}
+
+static int __hw_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
+{
+	ppgtt->base.dev = dev;
+
+	if (INTEL_INFO(dev)->gen < 8)
+		return gen6_ppgtt_init(ppgtt);
+	else
+		return gen8_ppgtt_init(ppgtt);
+}
+
+static void i915_address_space_init(struct i915_address_space *vm,
+				    struct drm_i915_private *dev_priv)
+{
+	drm_mm_init(&vm->mm, vm->start, vm->total);
+	vm->dev = dev_priv->dev;
+	INIT_LIST_HEAD(&vm->active_list);
+	INIT_LIST_HEAD(&vm->inactive_list);
+	list_add_tail(&vm->global_link, &dev_priv->vm_list);
+}
+
+int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret = 0;
+
+	ret = __hw_ppgtt_init(dev, ppgtt);
+	if (ret == 0) {
+		kref_init(&ppgtt->ref);
+		i915_address_space_init(&ppgtt->base, dev_priv);
+	}
+
+	return ret;
+}
+
+int i915_ppgtt_init_hw(struct drm_device *dev)
+{
+	/* In the case of execlists, PPGTT is enabled by the context descriptor
+	 * and the PDPs are contained within the context itself.  We don't
+	 * need to do anything here. */
+	if (i915.enable_execlists)
+		return 0;
+
+	if (!USES_PPGTT(dev))
+		return 0;
+
+	if (IS_GEN6(dev))
+		gen6_ppgtt_enable(dev);
+	else if (IS_GEN7(dev))
+		gen7_ppgtt_enable(dev);
+	else if (INTEL_INFO(dev)->gen >= 8)
+		gen8_ppgtt_enable(dev);
+	else
+		MISSING_CASE(INTEL_INFO(dev)->gen);
+
+	return 0;
+}
+
+int i915_ppgtt_init_ring(struct drm_i915_gem_request *req)
+{
+	struct drm_i915_private *dev_priv = req->ring->dev->dev_private;
+	struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
+
+	if (i915.enable_execlists)
+		return 0;
+
+	if (!ppgtt)
+		return 0;
+
+	return ppgtt->switch_mm(ppgtt, req);
+}
+
+struct i915_hw_ppgtt *
+i915_ppgtt_create(struct drm_device *dev, struct drm_i915_file_private *fpriv)
+{
+	struct i915_hw_ppgtt *ppgtt;
+	int ret;
+
+	ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
+	if (!ppgtt)
+		return ERR_PTR(-ENOMEM);
+
+	ret = i915_ppgtt_init(dev, ppgtt);
+	if (ret) {
+		kfree(ppgtt);
+		return ERR_PTR(ret);
+	}
+
+	ppgtt->file_priv = fpriv;
+
+	trace_i915_ppgtt_create(&ppgtt->base);
+
+	return ppgtt;
+}
+
+void  i915_ppgtt_release(struct kref *kref)
+{
+	struct i915_hw_ppgtt *ppgtt =
+		container_of(kref, struct i915_hw_ppgtt, ref);
+
+	trace_i915_ppgtt_release(&ppgtt->base);
+
+	/* vmas should already be unbound */
+	WARN_ON(!list_empty(&ppgtt->base.active_list));
+	WARN_ON(!list_empty(&ppgtt->base.inactive_list));
+
+	list_del(&ppgtt->base.global_link);
+	drm_mm_takedown(&ppgtt->base.mm);
+
+	ppgtt->base.cleanup(&ppgtt->base);
+	kfree(ppgtt);
+}
+
+extern int intel_iommu_gfx_mapped;
+/* Certain Gen5 chipsets require require idling the GPU before
+ * unmapping anything from the GTT when VT-d is enabled.
+ */
+static bool needs_idle_maps(struct drm_device *dev)
+{
+#ifdef CONFIG_INTEL_IOMMU
+	/* Query intel_iommu to see if we need the workaround. Presumably that
+	 * was loaded first.
+	 */
+	if (IS_GEN5(dev) && IS_MOBILE(dev) && intel_iommu_gfx_mapped)
+		return true;
+#endif
+	return false;
+}
+
+static bool do_idling(struct drm_i915_private *dev_priv)
+{
+	bool ret = dev_priv->mm.interruptible;
+
+	if (unlikely(dev_priv->gtt.do_idle_maps)) {
+		dev_priv->mm.interruptible = false;
+		if (i915_gpu_idle(dev_priv->dev)) {
+			DRM_ERROR("Couldn't idle GPU\n");
+			/* Wait a bit, in hopes it avoids the hang */
+			udelay(10);
+		}
+	}
+
+	return ret;
+}
+
+static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
+{
+	if (unlikely(dev_priv->gtt.do_idle_maps))
+		dev_priv->mm.interruptible = interruptible;
+}
+
+void i915_check_and_clear_faults(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_engine_cs *ring;
+	int i;
+
+	if (INTEL_INFO(dev)->gen < 6)
+		return;
+
+	for_each_ring(ring, dev_priv, i) {
+		u32 fault_reg;
+		fault_reg = I915_READ(RING_FAULT_REG(ring));
+		if (fault_reg & RING_FAULT_VALID) {
+			DRM_DEBUG_DRIVER("Unexpected fault\n"
+					 "\tAddr: 0x%08lx\n"
+					 "\tAddress space: %s\n"
+					 "\tSource ID: %d\n"
+					 "\tType: %d\n",
+					 fault_reg & PAGE_MASK,
+					 fault_reg & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT",
+					 RING_FAULT_SRCID(fault_reg),
+					 RING_FAULT_FAULT_TYPE(fault_reg));
+			I915_WRITE(RING_FAULT_REG(ring),
+				   fault_reg & ~RING_FAULT_VALID);
+		}
+	}
+	POSTING_READ(RING_FAULT_REG(&dev_priv->ring[RCS]));
+}
+
+static void i915_ggtt_flush(struct drm_i915_private *dev_priv)
+{
+	if (INTEL_INFO(dev_priv->dev)->gen < 6) {
+		intel_gtt_chipset_flush();
+	} else {
+		I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
+		POSTING_READ(GFX_FLSH_CNTL_GEN6);
+	}
+}
+
+void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	/* Don't bother messing with faults pre GEN6 as we have little
+	 * documentation supporting that it's a good idea.
+	 */
+	if (INTEL_INFO(dev)->gen < 6)
+		return;
+
+	i915_check_and_clear_faults(dev);
+
+	dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
+				       dev_priv->gtt.base.start,
+				       dev_priv->gtt.base.total,
+				       true);
+
+	i915_ggtt_flush(dev_priv);
+}
+
+int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
+{
+	if (!dma_map_sg(&obj->base.dev->pdev->dev,
+			obj->pages->sgl, obj->pages->nents,
+			PCI_DMA_BIDIRECTIONAL))
+		return -ENOSPC;
+
+	return 0;
+}
+
+static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
+{
+#ifdef writeq
+	writeq(pte, addr);
+#else
+	iowrite32((u32)pte, addr);
+	iowrite32(pte >> 32, addr + 4);
+#endif
+}
+
+static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
+				     struct sg_table *st,
+				     uint64_t start,
+				     enum i915_cache_level level, u32 unused)
+{
+	struct drm_i915_private *dev_priv = vm->dev->dev_private;
+	unsigned first_entry = start >> PAGE_SHIFT;
+	gen8_pte_t __iomem *gtt_entries =
+		(gen8_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
+	int i = 0;
+	struct sg_page_iter sg_iter;
+	dma_addr_t addr = 0; /* shut up gcc */
+
+	for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
+		addr = sg_dma_address(sg_iter.sg) +
+			(sg_iter.sg_pgoffset << PAGE_SHIFT);
+		gen8_set_pte(&gtt_entries[i],
+			     gen8_pte_encode(addr, level, true));
+		i++;
+	}
+
+	/*
+	 * XXX: This serves as a posting read to make sure that the PTE has
+	 * actually been updated. There is some concern that even though
+	 * registers and PTEs are within the same BAR that they are potentially
+	 * of NUMA access patterns. Therefore, even with the way we assume
+	 * hardware should work, we must keep this posting read for paranoia.
+	 */
+	if (i != 0)
+		WARN_ON(readq(&gtt_entries[i-1])
+			!= gen8_pte_encode(addr, level, true));
+
+	/* This next bit makes the above posting read even more important. We
+	 * want to flush the TLBs only after we're certain all the PTE updates
+	 * have finished.
+	 */
+	I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
+	POSTING_READ(GFX_FLSH_CNTL_GEN6);
+}
+
+/*
+ * Binds an object into the global gtt with the specified cache level. The object
+ * will be accessible to the GPU via commands whose operands reference offsets
+ * within the global GTT as well as accessible by the GPU through the GMADR
+ * mapped BAR (dev_priv->mm.gtt->gtt).
+ */
+static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
+				     struct sg_table *st,
+				     uint64_t start,
+				     enum i915_cache_level level, u32 flags)
+{
+	struct drm_i915_private *dev_priv = vm->dev->dev_private;
+	unsigned first_entry = start >> PAGE_SHIFT;
+	gen6_pte_t __iomem *gtt_entries =
+		(gen6_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
+	int i = 0;
+	struct sg_page_iter sg_iter;
+	dma_addr_t addr = 0;
+
+	for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
+		addr = sg_page_iter_dma_address(&sg_iter);
+		iowrite32(vm->pte_encode(addr, level, true, flags), &gtt_entries[i]);
+		i++;
+	}
+
+	/* XXX: This serves as a posting read to make sure that the PTE has
+	 * actually been updated. There is some concern that even though
+	 * registers and PTEs are within the same BAR that they are potentially
+	 * of NUMA access patterns. Therefore, even with the way we assume
+	 * hardware should work, we must keep this posting read for paranoia.
+	 */
+	if (i != 0) {
+		unsigned long gtt = readl(&gtt_entries[i-1]);
+		WARN_ON(gtt != vm->pte_encode(addr, level, true, flags));
+	}
+
+	/* This next bit makes the above posting read even more important. We
+	 * want to flush the TLBs only after we're certain all the PTE updates
+	 * have finished.
+	 */
+	I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
+	POSTING_READ(GFX_FLSH_CNTL_GEN6);
+}
+
+static void gen8_ggtt_clear_range(struct i915_address_space *vm,
+				  uint64_t start,
+				  uint64_t length,
+				  bool use_scratch)
+{
+	struct drm_i915_private *dev_priv = vm->dev->dev_private;
+	unsigned first_entry = start >> PAGE_SHIFT;
+	unsigned num_entries = length >> PAGE_SHIFT;
+	gen8_pte_t scratch_pte, __iomem *gtt_base =
+		(gen8_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
+	const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
+	int i;
+
+	if (WARN(num_entries > max_entries,
+		 "First entry = %d; Num entries = %d (max=%d)\n",
+		 first_entry, num_entries, max_entries))
+		num_entries = max_entries;
+
+	scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page),
+				      I915_CACHE_LLC,
+				      use_scratch);
+	for (i = 0; i < num_entries; i++)
+		gen8_set_pte(&gtt_base[i], scratch_pte);
+	readl(gtt_base);
+}
+
+static void gen6_ggtt_clear_range(struct i915_address_space *vm,
+				  uint64_t start,
+				  uint64_t length,
+				  bool use_scratch)
+{
+	struct drm_i915_private *dev_priv = vm->dev->dev_private;
+	unsigned first_entry = start >> PAGE_SHIFT;
+	unsigned num_entries = length >> PAGE_SHIFT;
+	gen6_pte_t scratch_pte, __iomem *gtt_base =
+		(gen6_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
+	const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
+	int i;
+
+	if (WARN(num_entries > max_entries,
+		 "First entry = %d; Num entries = %d (max=%d)\n",
+		 first_entry, num_entries, max_entries))
+		num_entries = max_entries;
+
+	scratch_pte = vm->pte_encode(px_dma(vm->scratch_page),
+				     I915_CACHE_LLC, use_scratch, 0);
+
+	for (i = 0; i < num_entries; i++)
+		iowrite32(scratch_pte, &gtt_base[i]);
+	readl(gtt_base);
+}
+
+static void i915_ggtt_insert_entries(struct i915_address_space *vm,
+				     struct sg_table *pages,
+				     uint64_t start,
+				     enum i915_cache_level cache_level, u32 unused)
+{
+	unsigned int flags = (cache_level == I915_CACHE_NONE) ?
+		AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
+
+	intel_gtt_insert_sg_entries(pages, start >> PAGE_SHIFT, flags);
+
+}
+
+static void i915_ggtt_clear_range(struct i915_address_space *vm,
+				  uint64_t start,
+				  uint64_t length,
+				  bool unused)
+{
+	unsigned first_entry = start >> PAGE_SHIFT;
+	unsigned num_entries = length >> PAGE_SHIFT;
+	intel_gtt_clear_range(first_entry, num_entries);
+}
+
+static int ggtt_bind_vma(struct i915_vma *vma,
+			 enum i915_cache_level cache_level,
+			 u32 flags)
+{
+	struct drm_i915_gem_object *obj = vma->obj;
+	u32 pte_flags = 0;
+	int ret;
+
+	ret = i915_get_ggtt_vma_pages(vma);
+	if (ret)
+		return ret;
+
+	/* Currently applicable only to VLV */
+	if (obj->gt_ro)
+		pte_flags |= PTE_READ_ONLY;
+
+	vma->vm->insert_entries(vma->vm, vma->ggtt_view.pages,
+				vma->node.start,
+				cache_level, pte_flags);
+
+	/*
+	 * Without aliasing PPGTT there's no difference between
+	 * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally
+	 * upgrade to both bound if we bind either to avoid double-binding.
+	 */
+	vma->bound |= GLOBAL_BIND | LOCAL_BIND;
+
+	return 0;
+}
+
+static int aliasing_gtt_bind_vma(struct i915_vma *vma,
+				 enum i915_cache_level cache_level,
+				 u32 flags)
+{
+	struct drm_device *dev = vma->vm->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_gem_object *obj = vma->obj;
+	struct sg_table *pages = obj->pages;
+	u32 pte_flags = 0;
+	int ret;
+
+	ret = i915_get_ggtt_vma_pages(vma);
+	if (ret)
+		return ret;
+	pages = vma->ggtt_view.pages;
+
+	/* Currently applicable only to VLV */
+	if (obj->gt_ro)
+		pte_flags |= PTE_READ_ONLY;
+
+
+	if (flags & GLOBAL_BIND) {
+		vma->vm->insert_entries(vma->vm, pages,
+					vma->node.start,
+					cache_level, pte_flags);
+	}
+
+	if (flags & LOCAL_BIND) {
+		struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
+		appgtt->base.insert_entries(&appgtt->base, pages,
+					    vma->node.start,
+					    cache_level, pte_flags);
+	}
+
+	return 0;
+}
+
+static void ggtt_unbind_vma(struct i915_vma *vma)
+{
+	struct drm_device *dev = vma->vm->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_gem_object *obj = vma->obj;
+	const uint64_t size = min_t(uint64_t,
+				    obj->base.size,
+				    vma->node.size);
+
+	if (vma->bound & GLOBAL_BIND) {
+		vma->vm->clear_range(vma->vm,
+				     vma->node.start,
+				     size,
+				     true);
+	}
+
+	if (dev_priv->mm.aliasing_ppgtt && vma->bound & LOCAL_BIND) {
+		struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
+
+		appgtt->base.clear_range(&appgtt->base,
+					 vma->node.start,
+					 size,
+					 true);
+	}
+}
+
+void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
+{
+	struct drm_device *dev = obj->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	bool interruptible;
+
+	interruptible = do_idling(dev_priv);
+
+	dma_unmap_sg(&dev->pdev->dev, obj->pages->sgl, obj->pages->nents,
+		     PCI_DMA_BIDIRECTIONAL);
+
+	undo_idling(dev_priv, interruptible);
+}
+
+static void i915_gtt_color_adjust(struct drm_mm_node *node,
+				  unsigned long color,
+				  u64 *start,
+				  u64 *end)
+{
+	if (node->color != color)
+		*start += 4096;
+
+	if (!list_empty(&node->node_list)) {
+		node = list_entry(node->node_list.next,
+				  struct drm_mm_node,
+				  node_list);
+		if (node->allocated && node->color != color)
+			*end -= 4096;
+	}
+}
+
+static int i915_gem_setup_global_gtt(struct drm_device *dev,
+				     u64 start,
+				     u64 mappable_end,
+				     u64 end)
+{
+	/* Let GEM Manage all of the aperture.
+	 *
+	 * However, leave one page at the end still bound to the scratch page.
+	 * There are a number of places where the hardware apparently prefetches
+	 * past the end of the object, and we've seen multiple hangs with the
+	 * GPU head pointer stuck in a batchbuffer bound at the last page of the
+	 * aperture.  One page should be enough to keep any prefetching inside
+	 * of the aperture.
+	 */
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct i915_address_space *ggtt_vm = &dev_priv->gtt.base;
+	struct drm_mm_node *entry;
+	struct drm_i915_gem_object *obj;
+	unsigned long hole_start, hole_end;
+	int ret;
+
+	BUG_ON(mappable_end > end);
+
+	ggtt_vm->start = start;
+
+	/* Subtract the guard page before address space initialization to
+	 * shrink the range used by drm_mm */
+	ggtt_vm->total = end - start - PAGE_SIZE;
+	i915_address_space_init(ggtt_vm, dev_priv);
+	ggtt_vm->total += PAGE_SIZE;
+
+	if (intel_vgpu_active(dev)) {
+		ret = intel_vgt_balloon(dev);
+		if (ret)
+			return ret;
+	}
+
+	if (!HAS_LLC(dev))
+		ggtt_vm->mm.color_adjust = i915_gtt_color_adjust;
+
+	/* Mark any preallocated objects as occupied */
+	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+		struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm);
+
+		DRM_DEBUG_KMS("reserving preallocated space: %llx + %zx\n",
+			      i915_gem_obj_ggtt_offset(obj), obj->base.size);
+
+		WARN_ON(i915_gem_obj_ggtt_bound(obj));
+		ret = drm_mm_reserve_node(&ggtt_vm->mm, &vma->node);
+		if (ret) {
+			DRM_DEBUG_KMS("Reservation failed: %i\n", ret);
+			return ret;
+		}
+		vma->bound |= GLOBAL_BIND;
+		__i915_vma_set_map_and_fenceable(vma);
+		list_add_tail(&vma->mm_list, &ggtt_vm->inactive_list);
+	}
+
+	/* Clear any non-preallocated blocks */
+	drm_mm_for_each_hole(entry, &ggtt_vm->mm, hole_start, hole_end) {
+		DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
+			      hole_start, hole_end);
+		ggtt_vm->clear_range(ggtt_vm, hole_start,
+				     hole_end - hole_start, true);
+	}
+
+	/* And finally clear the reserved guard page */
+	ggtt_vm->clear_range(ggtt_vm, end - PAGE_SIZE, PAGE_SIZE, true);
+
+	if (USES_PPGTT(dev) && !USES_FULL_PPGTT(dev)) {
+		struct i915_hw_ppgtt *ppgtt;
+
+		ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
+		if (!ppgtt)
+			return -ENOMEM;
+
+		ret = __hw_ppgtt_init(dev, ppgtt);
+		if (ret) {
+			ppgtt->base.cleanup(&ppgtt->base);
+			kfree(ppgtt);
+			return ret;
+		}
+
+		if (ppgtt->base.allocate_va_range)
+			ret = ppgtt->base.allocate_va_range(&ppgtt->base, 0,
+							    ppgtt->base.total);
+		if (ret) {
+			ppgtt->base.cleanup(&ppgtt->base);
+			kfree(ppgtt);
+			return ret;
+		}
+
+		ppgtt->base.clear_range(&ppgtt->base,
+					ppgtt->base.start,
+					ppgtt->base.total,
+					true);
+
+		dev_priv->mm.aliasing_ppgtt = ppgtt;
+		WARN_ON(dev_priv->gtt.base.bind_vma != ggtt_bind_vma);
+		dev_priv->gtt.base.bind_vma = aliasing_gtt_bind_vma;
+	}
+
+	return 0;
+}
+
+void i915_gem_init_global_gtt(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u64 gtt_size, mappable_size;
+
+	gtt_size = dev_priv->gtt.base.total;
+	mappable_size = dev_priv->gtt.mappable_end;
+
+	i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
+}
+
+void i915_global_gtt_cleanup(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct i915_address_space *vm = &dev_priv->gtt.base;
+
+	if (dev_priv->mm.aliasing_ppgtt) {
+		struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
+
+		ppgtt->base.cleanup(&ppgtt->base);
+		kfree(ppgtt);
+	}
+
+	if (drm_mm_initialized(&vm->mm)) {
+		if (intel_vgpu_active(dev))
+			intel_vgt_deballoon();
+
+		drm_mm_takedown(&vm->mm);
+		list_del(&vm->global_link);
+	}
+
+	vm->cleanup(vm);
+}
+
+static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
+{
+	snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
+	snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
+	return snb_gmch_ctl << 20;
+}
+
+static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
+{
+	bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT;
+	bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
+	if (bdw_gmch_ctl)
+		bdw_gmch_ctl = 1 << bdw_gmch_ctl;
+
+#ifdef CONFIG_X86_32
+	/* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * PAGE_SIZE */
+	if (bdw_gmch_ctl > 4)
+		bdw_gmch_ctl = 4;
+#endif
+
+	return bdw_gmch_ctl << 20;
+}
+
+static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
+{
+	gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT;
+	gmch_ctrl &= SNB_GMCH_GGMS_MASK;
+
+	if (gmch_ctrl)
+		return 1 << (20 + gmch_ctrl);
+
+	return 0;
+}
+
+static size_t gen6_get_stolen_size(u16 snb_gmch_ctl)
+{
+	snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
+	snb_gmch_ctl &= SNB_GMCH_GMS_MASK;
+	return snb_gmch_ctl << 25; /* 32 MB units */
+}
+
+static size_t gen8_get_stolen_size(u16 bdw_gmch_ctl)
+{
+	bdw_gmch_ctl >>= BDW_GMCH_GMS_SHIFT;
+	bdw_gmch_ctl &= BDW_GMCH_GMS_MASK;
+	return bdw_gmch_ctl << 25; /* 32 MB units */
+}
+
+static size_t chv_get_stolen_size(u16 gmch_ctrl)
+{
+	gmch_ctrl >>= SNB_GMCH_GMS_SHIFT;
+	gmch_ctrl &= SNB_GMCH_GMS_MASK;
+
+	/*
+	 * 0x0  to 0x10: 32MB increments starting at 0MB
+	 * 0x11 to 0x16: 4MB increments starting at 8MB
+	 * 0x17 to 0x1d: 4MB increments start at 36MB
+	 */
+	if (gmch_ctrl < 0x11)
+		return gmch_ctrl << 25;
+	else if (gmch_ctrl < 0x17)
+		return (gmch_ctrl - 0x11 + 2) << 22;
+	else
+		return (gmch_ctrl - 0x17 + 9) << 22;
+}
+
+static size_t gen9_get_stolen_size(u16 gen9_gmch_ctl)
+{
+	gen9_gmch_ctl >>= BDW_GMCH_GMS_SHIFT;
+	gen9_gmch_ctl &= BDW_GMCH_GMS_MASK;
+
+	if (gen9_gmch_ctl < 0xf0)
+		return gen9_gmch_ctl << 25; /* 32 MB units */
+	else
+		/* 4MB increments starting at 0xf0 for 4MB */
+		return (gen9_gmch_ctl - 0xf0 + 1) << 22;
+}
+
+static int ggtt_probe_common(struct drm_device *dev,
+			     size_t gtt_size)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct i915_page_scratch *scratch_page;
+	phys_addr_t gtt_phys_addr;
+
+	/* For Modern GENs the PTEs and register space are split in the BAR */
+	gtt_phys_addr = pci_resource_start(dev->pdev, 0) +
+		(pci_resource_len(dev->pdev, 0) / 2);
+
+	/*
+	 * On BXT writes larger than 64 bit to the GTT pagetable range will be
+	 * dropped. For WC mappings in general we have 64 byte burst writes
+	 * when the WC buffer is flushed, so we can't use it, but have to
+	 * resort to an uncached mapping. The WC issue is easily caught by the
+	 * readback check when writing GTT PTE entries.
+	 */
+	if (IS_BROXTON(dev))
+		dev_priv->gtt.gsm = ioremap_nocache(gtt_phys_addr, gtt_size);
+	else
+		dev_priv->gtt.gsm = ioremap_wc(gtt_phys_addr, gtt_size);
+	if (!dev_priv->gtt.gsm) {
+		DRM_ERROR("Failed to map the gtt page table\n");
+		return -ENOMEM;
+	}
+
+	scratch_page = alloc_scratch_page(dev);
+	if (IS_ERR(scratch_page)) {
+		DRM_ERROR("Scratch setup failed\n");
+		/* iounmap will also get called at remove, but meh */
+		iounmap(dev_priv->gtt.gsm);
+		return PTR_ERR(scratch_page);
+	}
+
+	dev_priv->gtt.base.scratch_page = scratch_page;
+
+	return 0;
+}
+
+/* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
+ * bits. When using advanced contexts each context stores its own PAT, but
+ * writing this data shouldn't be harmful even in those cases. */
+static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv)
+{
+	uint64_t pat;
+
+	pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC)     | /* for normal objects, no eLLC */
+	      GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) | /* for something pointing to ptes? */
+	      GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC) | /* for scanout with eLLC */
+	      GEN8_PPAT(3, GEN8_PPAT_UC)                     | /* Uncached objects, mostly for scanout */
+	      GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) |
+	      GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) |
+	      GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) |
+	      GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
+
+	if (!USES_PPGTT(dev_priv->dev))
+		/* Spec: "For GGTT, there is NO pat_sel[2:0] from the entry,
+		 * so RTL will always use the value corresponding to
+		 * pat_sel = 000".
+		 * So let's disable cache for GGTT to avoid screen corruptions.
+		 * MOCS still can be used though.
+		 * - System agent ggtt writes (i.e. cpu gtt mmaps) already work
+		 * before this patch, i.e. the same uncached + snooping access
+		 * like on gen6/7 seems to be in effect.
+		 * - So this just fixes blitter/render access. Again it looks
+		 * like it's not just uncached access, but uncached + snooping.
+		 * So we can still hold onto all our assumptions wrt cpu
+		 * clflushing on LLC machines.
+		 */
+		pat = GEN8_PPAT(0, GEN8_PPAT_UC);
+
+	/* XXX: spec defines this as 2 distinct registers. It's unclear if a 64b
+	 * write would work. */
+	I915_WRITE(GEN8_PRIVATE_PAT_LO, pat);
+	I915_WRITE(GEN8_PRIVATE_PAT_HI, pat >> 32);
+}
+
+static void chv_setup_private_ppat(struct drm_i915_private *dev_priv)
+{
+	uint64_t pat;
+
+	/*
+	 * Map WB on BDW to snooped on CHV.
+	 *
+	 * Only the snoop bit has meaning for CHV, the rest is
+	 * ignored.
+	 *
+	 * The hardware will never snoop for certain types of accesses:
+	 * - CPU GTT (GMADR->GGTT->no snoop->memory)
+	 * - PPGTT page tables
+	 * - some other special cycles
+	 *
+	 * As with BDW, we also need to consider the following for GT accesses:
+	 * "For GGTT, there is NO pat_sel[2:0] from the entry,
+	 * so RTL will always use the value corresponding to
+	 * pat_sel = 000".
+	 * Which means we must set the snoop bit in PAT entry 0
+	 * in order to keep the global status page working.
+	 */
+	pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) |
+	      GEN8_PPAT(1, 0) |
+	      GEN8_PPAT(2, 0) |
+	      GEN8_PPAT(3, 0) |
+	      GEN8_PPAT(4, CHV_PPAT_SNOOP) |
+	      GEN8_PPAT(5, CHV_PPAT_SNOOP) |
+	      GEN8_PPAT(6, CHV_PPAT_SNOOP) |
+	      GEN8_PPAT(7, CHV_PPAT_SNOOP);
+
+	I915_WRITE(GEN8_PRIVATE_PAT_LO, pat);
+	I915_WRITE(GEN8_PRIVATE_PAT_HI, pat >> 32);
+}
+
+static int gen8_gmch_probe(struct drm_device *dev,
+			   u64 *gtt_total,
+			   size_t *stolen,
+			   phys_addr_t *mappable_base,
+			   u64 *mappable_end)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u64 gtt_size;
+	u16 snb_gmch_ctl;
+	int ret;
+
+	/* TODO: We're not aware of mappable constraints on gen8 yet */
+	*mappable_base = pci_resource_start(dev->pdev, 2);
+	*mappable_end = pci_resource_len(dev->pdev, 2);
+
+	if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(39)))
+		pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(39));
+
+	pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
+
+	if (INTEL_INFO(dev)->gen >= 9) {
+		*stolen = gen9_get_stolen_size(snb_gmch_ctl);
+		gtt_size = gen8_get_total_gtt_size(snb_gmch_ctl);
+	} else if (IS_CHERRYVIEW(dev)) {
+		*stolen = chv_get_stolen_size(snb_gmch_ctl);
+		gtt_size = chv_get_total_gtt_size(snb_gmch_ctl);
+	} else {
+		*stolen = gen8_get_stolen_size(snb_gmch_ctl);
+		gtt_size = gen8_get_total_gtt_size(snb_gmch_ctl);
+	}
+
+	*gtt_total = (gtt_size / sizeof(gen8_pte_t)) << PAGE_SHIFT;
+
+	if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev))
+		chv_setup_private_ppat(dev_priv);
+	else
+		bdw_setup_private_ppat(dev_priv);
+
+	ret = ggtt_probe_common(dev, gtt_size);
+
+	dev_priv->gtt.base.clear_range = gen8_ggtt_clear_range;
+	dev_priv->gtt.base.insert_entries = gen8_ggtt_insert_entries;
+	dev_priv->gtt.base.bind_vma = ggtt_bind_vma;
+	dev_priv->gtt.base.unbind_vma = ggtt_unbind_vma;
+
+	return ret;
+}
+
+static int gen6_gmch_probe(struct drm_device *dev,
+			   u64 *gtt_total,
+			   size_t *stolen,
+			   phys_addr_t *mappable_base,
+			   u64 *mappable_end)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	unsigned int gtt_size;
+	u16 snb_gmch_ctl;
+	int ret;
+
+	*mappable_base = pci_resource_start(dev->pdev, 2);
+	*mappable_end = pci_resource_len(dev->pdev, 2);
+
+	/* 64/512MB is the current min/max we actually know of, but this is just
+	 * a coarse sanity check.
+	 */
+	if ((*mappable_end < (64<<20) || (*mappable_end > (512<<20)))) {
+		DRM_ERROR("Unknown GMADR size (%llx)\n",
+			  dev_priv->gtt.mappable_end);
+		return -ENXIO;
+	}
+
+	if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40)))
+		pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40));
+	pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
+
+	*stolen = gen6_get_stolen_size(snb_gmch_ctl);
+
+	gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl);
+	*gtt_total = (gtt_size / sizeof(gen6_pte_t)) << PAGE_SHIFT;
+
+	ret = ggtt_probe_common(dev, gtt_size);
+
+	dev_priv->gtt.base.clear_range = gen6_ggtt_clear_range;
+	dev_priv->gtt.base.insert_entries = gen6_ggtt_insert_entries;
+	dev_priv->gtt.base.bind_vma = ggtt_bind_vma;
+	dev_priv->gtt.base.unbind_vma = ggtt_unbind_vma;
+
+	return ret;
+}
+
+static void gen6_gmch_remove(struct i915_address_space *vm)
+{
+
+	struct i915_gtt *gtt = container_of(vm, struct i915_gtt, base);
+
+	iounmap(gtt->gsm);
+	free_scratch_page(vm->dev, vm->scratch_page);
+}
+
+static int i915_gmch_probe(struct drm_device *dev,
+			   u64 *gtt_total,
+			   size_t *stolen,
+			   phys_addr_t *mappable_base,
+			   u64 *mappable_end)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret;
+
+	ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->dev->pdev, NULL);
+	if (!ret) {
+		DRM_ERROR("failed to set up gmch\n");
+		return -EIO;
+	}
+
+	intel_gtt_get(gtt_total, stolen, mappable_base, mappable_end);
+
+	dev_priv->gtt.do_idle_maps = needs_idle_maps(dev_priv->dev);
+	dev_priv->gtt.base.insert_entries = i915_ggtt_insert_entries;
+	dev_priv->gtt.base.clear_range = i915_ggtt_clear_range;
+	dev_priv->gtt.base.bind_vma = ggtt_bind_vma;
+	dev_priv->gtt.base.unbind_vma = ggtt_unbind_vma;
+
+	if (unlikely(dev_priv->gtt.do_idle_maps))
+		DRM_INFO("applying Ironlake quirks for intel_iommu\n");
+
+	return 0;
+}
+
+static void i915_gmch_remove(struct i915_address_space *vm)
+{
+	intel_gmch_remove();
+}
+
+int i915_gem_gtt_init(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct i915_gtt *gtt = &dev_priv->gtt;
+	int ret;
+
+	if (INTEL_INFO(dev)->gen <= 5) {
+		gtt->gtt_probe = i915_gmch_probe;
+		gtt->base.cleanup = i915_gmch_remove;
+	} else if (INTEL_INFO(dev)->gen < 8) {
+		gtt->gtt_probe = gen6_gmch_probe;
+		gtt->base.cleanup = gen6_gmch_remove;
+		if (IS_HASWELL(dev) && dev_priv->ellc_size)
+			gtt->base.pte_encode = iris_pte_encode;
+		else if (IS_HASWELL(dev))
+			gtt->base.pte_encode = hsw_pte_encode;
+		else if (IS_VALLEYVIEW(dev))
+			gtt->base.pte_encode = byt_pte_encode;
+		else if (INTEL_INFO(dev)->gen >= 7)
+			gtt->base.pte_encode = ivb_pte_encode;
+		else
+			gtt->base.pte_encode = snb_pte_encode;
+	} else {
+		dev_priv->gtt.gtt_probe = gen8_gmch_probe;
+		dev_priv->gtt.base.cleanup = gen6_gmch_remove;
+	}
+
+	gtt->base.dev = dev;
+
+	ret = gtt->gtt_probe(dev, &gtt->base.total, &gtt->stolen_size,
+			     &gtt->mappable_base, &gtt->mappable_end);
+	if (ret)
+		return ret;
+
+	/* GMADR is the PCI mmio aperture into the global GTT. */
+	DRM_INFO("Memory usable by graphics device = %lluM\n",
+		 gtt->base.total >> 20);
+	DRM_DEBUG_DRIVER("GMADR size = %lldM\n", gtt->mappable_end >> 20);
+	DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", gtt->stolen_size >> 20);
+#ifdef CONFIG_INTEL_IOMMU
+	if (intel_iommu_gfx_mapped)
+		DRM_INFO("VT-d active for gfx access\n");
+#endif
+	/*
+	 * i915.enable_ppgtt is read-only, so do an early pass to validate the
+	 * user's requested state against the hardware/driver capabilities.  We
+	 * do this now so that we can print out any log messages once rather
+	 * than every time we check intel_enable_ppgtt().
+	 */
+	i915.enable_ppgtt = sanitize_enable_ppgtt(dev, i915.enable_ppgtt);
+	DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt);
+
+	return 0;
+}
+
+void i915_gem_restore_gtt_mappings(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_gem_object *obj;
+	struct i915_address_space *vm;
+	struct i915_vma *vma;
+	bool flush;
+
+	i915_check_and_clear_faults(dev);
+
+	/* First fill our portion of the GTT with scratch pages */
+	dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
+				       dev_priv->gtt.base.start,
+				       dev_priv->gtt.base.total,
+				       true);
+
+	/* Cache flush objects bound into GGTT and rebind them. */
+	vm = &dev_priv->gtt.base;
+	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+		flush = false;
+		list_for_each_entry(vma, &obj->vma_list, vma_link) {
+			if (vma->vm != vm)
+				continue;
+
+			WARN_ON(i915_vma_bind(vma, obj->cache_level,
+					      PIN_UPDATE));
+
+			flush = true;
+		}
+
+		if (flush)
+			i915_gem_clflush_object(obj, obj->pin_display);
+	}
+
+	if (INTEL_INFO(dev)->gen >= 8) {
+		if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev))
+			chv_setup_private_ppat(dev_priv);
+		else
+			bdw_setup_private_ppat(dev_priv);
+
+		return;
+	}
+
+	if (USES_PPGTT(dev)) {
+		list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
+			/* TODO: Perhaps it shouldn't be gen6 specific */
+
+			struct i915_hw_ppgtt *ppgtt =
+					container_of(vm, struct i915_hw_ppgtt,
+						     base);
+
+			if (i915_is_ggtt(vm))
+				ppgtt = dev_priv->mm.aliasing_ppgtt;
+
+			gen6_write_page_range(dev_priv, &ppgtt->pd,
+					      0, ppgtt->base.total);
+		}
+	}
+
+	i915_ggtt_flush(dev_priv);
+}
+
+static struct i915_vma *
+__i915_gem_vma_create(struct drm_i915_gem_object *obj,
+		      struct i915_address_space *vm,
+		      const struct i915_ggtt_view *ggtt_view)
+{
+	struct i915_vma *vma;
+
+	if (WARN_ON(i915_is_ggtt(vm) != !!ggtt_view))
+		return ERR_PTR(-EINVAL);
+
+	vma = kmem_cache_zalloc(to_i915(obj->base.dev)->vmas, GFP_KERNEL);
+	if (vma == NULL)
+		return ERR_PTR(-ENOMEM);
+
+	INIT_LIST_HEAD(&vma->vma_link);
+	INIT_LIST_HEAD(&vma->mm_list);
+	INIT_LIST_HEAD(&vma->exec_list);
+	vma->vm = vm;
+	vma->obj = obj;
+
+	if (i915_is_ggtt(vm))
+		vma->ggtt_view = *ggtt_view;
+
+	list_add_tail(&vma->vma_link, &obj->vma_list);
+	if (!i915_is_ggtt(vm))
+		i915_ppgtt_get(i915_vm_to_ppgtt(vm));
+
+	return vma;
+}
+
+struct i915_vma *
+i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
+				  struct i915_address_space *vm)
+{
+	struct i915_vma *vma;
+
+	vma = i915_gem_obj_to_vma(obj, vm);
+	if (!vma)
+		vma = __i915_gem_vma_create(obj, vm,
+					    i915_is_ggtt(vm) ? &i915_ggtt_view_normal : NULL);
+
+	return vma;
+}
+
+struct i915_vma *
+i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj,
+				       const struct i915_ggtt_view *view)
+{
+	struct i915_address_space *ggtt = i915_obj_to_ggtt(obj);
+	struct i915_vma *vma;
+
+	if (WARN_ON(!view))
+		return ERR_PTR(-EINVAL);
+
+	vma = i915_gem_obj_to_ggtt_view(obj, view);
+
+	if (IS_ERR(vma))
+		return vma;
+
+	if (!vma)
+		vma = __i915_gem_vma_create(obj, ggtt, view);
+
+	return vma;
+
+}
+
+static struct scatterlist *
+rotate_pages(dma_addr_t *in, unsigned int offset,
+	     unsigned int width, unsigned int height,
+	     struct sg_table *st, struct scatterlist *sg)
+{
+	unsigned int column, row;
+	unsigned int src_idx;
+
+	if (!sg) {
+		st->nents = 0;
+		sg = st->sgl;
+	}
+
+	for (column = 0; column < width; column++) {
+		src_idx = width * (height - 1) + column;
+		for (row = 0; row < height; row++) {
+			st->nents++;
+			/* We don't need the pages, but need to initialize
+			 * the entries so the sg list can be happily traversed.
+			 * The only thing we need are DMA addresses.
+			 */
+			sg_set_page(sg, NULL, PAGE_SIZE, 0);
+			sg_dma_address(sg) = in[offset + src_idx];
+			sg_dma_len(sg) = PAGE_SIZE;
+			sg = sg_next(sg);
+			src_idx -= width;
+		}
+	}
+
+	return sg;
+}
+
+static struct sg_table *
+intel_rotate_fb_obj_pages(struct i915_ggtt_view *ggtt_view,
+			  struct drm_i915_gem_object *obj)
+{
+	struct intel_rotation_info *rot_info = &ggtt_view->rotation_info;
+	unsigned int size_pages = rot_info->size >> PAGE_SHIFT;
+	unsigned int size_pages_uv;
+	struct sg_page_iter sg_iter;
+	unsigned long i;
+	dma_addr_t *page_addr_list;
+	struct sg_table *st;
+	unsigned int uv_start_page;
+	struct scatterlist *sg;
+	int ret = -ENOMEM;
+
+	/* Allocate a temporary list of source pages for random access. */
+	page_addr_list = drm_malloc_ab(obj->base.size / PAGE_SIZE,
+				       sizeof(dma_addr_t));
+	if (!page_addr_list)
+		return ERR_PTR(ret);
+
+	/* Account for UV plane with NV12. */
+	if (rot_info->pixel_format == DRM_FORMAT_NV12)
+		size_pages_uv = rot_info->size_uv >> PAGE_SHIFT;
+	else
+		size_pages_uv = 0;
+
+	/* Allocate target SG list. */
+	st = kmalloc(sizeof(*st), GFP_KERNEL);
+	if (!st)
+		goto err_st_alloc;
+
+	ret = sg_alloc_table(st, size_pages + size_pages_uv, GFP_KERNEL);
+	if (ret)
+		goto err_sg_alloc;
+
+	/* Populate source page list from the object. */
+	i = 0;
+	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
+		page_addr_list[i] = sg_page_iter_dma_address(&sg_iter);
+		i++;
+	}
+
+	/* Rotate the pages. */
+	sg = rotate_pages(page_addr_list, 0,
+		     rot_info->width_pages, rot_info->height_pages,
+		     st, NULL);
+
+	/* Append the UV plane if NV12. */
+	if (rot_info->pixel_format == DRM_FORMAT_NV12) {
+		uv_start_page = size_pages;
+
+		/* Check for tile-row un-alignment. */
+		if (offset_in_page(rot_info->uv_offset))
+			uv_start_page--;
+
+		rot_info->uv_start_page = uv_start_page;
+
+		rotate_pages(page_addr_list, uv_start_page,
+			     rot_info->width_pages_uv,
+			     rot_info->height_pages_uv,
+			     st, sg);
+	}
+
+	DRM_DEBUG_KMS(
+		      "Created rotated page mapping for object size %zu (pitch=%u, height=%u, pixel_format=0x%x, %ux%u tiles, %u pages (%u plane 0)).\n",
+		      obj->base.size, rot_info->pitch, rot_info->height,
+		      rot_info->pixel_format, rot_info->width_pages,
+		      rot_info->height_pages, size_pages + size_pages_uv,
+		      size_pages);
+
+	drm_free_large(page_addr_list);
+
+	return st;
+
+err_sg_alloc:
+	kfree(st);
+err_st_alloc:
+	drm_free_large(page_addr_list);
+
+	DRM_DEBUG_KMS(
+		      "Failed to create rotated mapping for object size %zu! (%d) (pitch=%u, height=%u, pixel_format=0x%x, %ux%u tiles, %u pages (%u plane 0))\n",
+		      obj->base.size, ret, rot_info->pitch, rot_info->height,
+		      rot_info->pixel_format, rot_info->width_pages,
+		      rot_info->height_pages, size_pages + size_pages_uv,
+		      size_pages);
+	return ERR_PTR(ret);
+}
+
+static struct sg_table *
+intel_partial_pages(const struct i915_ggtt_view *view,
+		    struct drm_i915_gem_object *obj)
+{
+	struct sg_table *st;
+	struct scatterlist *sg;
+	struct sg_page_iter obj_sg_iter;
+	int ret = -ENOMEM;
+
+	st = kmalloc(sizeof(*st), GFP_KERNEL);
+	if (!st)
+		goto err_st_alloc;
+
+	ret = sg_alloc_table(st, view->params.partial.size, GFP_KERNEL);
+	if (ret)
+		goto err_sg_alloc;
+
+	sg = st->sgl;
+	st->nents = 0;
+	for_each_sg_page(obj->pages->sgl, &obj_sg_iter, obj->pages->nents,
+		view->params.partial.offset)
+	{
+		if (st->nents >= view->params.partial.size)
+			break;
+
+		sg_set_page(sg, NULL, PAGE_SIZE, 0);
+		sg_dma_address(sg) = sg_page_iter_dma_address(&obj_sg_iter);
+		sg_dma_len(sg) = PAGE_SIZE;
+
+		sg = sg_next(sg);
+		st->nents++;
+	}
+
+	return st;
+
+err_sg_alloc:
+	kfree(st);
+err_st_alloc:
+	return ERR_PTR(ret);
+}
+
+static int
+i915_get_ggtt_vma_pages(struct i915_vma *vma)
+{
+	int ret = 0;
+
+	if (vma->ggtt_view.pages)
+		return 0;
+
+	if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL)
+		vma->ggtt_view.pages = vma->obj->pages;
+	else if (vma->ggtt_view.type == I915_GGTT_VIEW_ROTATED)
+		vma->ggtt_view.pages =
+			intel_rotate_fb_obj_pages(&vma->ggtt_view, vma->obj);
+	else if (vma->ggtt_view.type == I915_GGTT_VIEW_PARTIAL)
+		vma->ggtt_view.pages =
+			intel_partial_pages(&vma->ggtt_view, vma->obj);
+	else
+		WARN_ONCE(1, "GGTT view %u not implemented!\n",
+			  vma->ggtt_view.type);
+
+	if (!vma->ggtt_view.pages) {
+		DRM_ERROR("Failed to get pages for GGTT view type %u!\n",
+			  vma->ggtt_view.type);
+		ret = -EINVAL;
+	} else if (IS_ERR(vma->ggtt_view.pages)) {
+		ret = PTR_ERR(vma->ggtt_view.pages);
+		vma->ggtt_view.pages = NULL;
+		DRM_ERROR("Failed to get pages for VMA view type %u (%d)!\n",
+			  vma->ggtt_view.type, ret);
+	}
+
+	return ret;
+}
+
+/**
+ * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
+ * @vma: VMA to map
+ * @cache_level: mapping cache level
+ * @flags: flags like global or local mapping
+ *
+ * DMA addresses are taken from the scatter-gather table of this object (or of
+ * this VMA in case of non-default GGTT views) and PTE entries set up.
+ * Note that DMA addresses are also the only part of the SG table we care about.
+ */
+int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
+		  u32 flags)
+{
+	int ret;
+	u32 bind_flags;
+
+	if (WARN_ON(flags == 0))
+		return -EINVAL;
+
+	bind_flags = 0;
+	if (flags & PIN_GLOBAL)
+		bind_flags |= GLOBAL_BIND;
+	if (flags & PIN_USER)
+		bind_flags |= LOCAL_BIND;
+
+	if (flags & PIN_UPDATE)
+		bind_flags |= vma->bound;
+	else
+		bind_flags &= ~vma->bound;
+
+	if (bind_flags == 0)
+		return 0;
+
+	if (vma->bound == 0 && vma->vm->allocate_va_range) {
+		trace_i915_va_alloc(vma->vm,
+				    vma->node.start,
+				    vma->node.size,
+				    VM_TO_TRACE_NAME(vma->vm));
+
+		/* XXX: i915_vma_pin() will fix this +- hack */
+		vma->pin_count++;
+		ret = vma->vm->allocate_va_range(vma->vm,
+						 vma->node.start,
+						 vma->node.size);
+		vma->pin_count--;
+		if (ret)
+			return ret;
+	}
+
+	ret = vma->vm->bind_vma(vma, cache_level, bind_flags);
+	if (ret)
+		return ret;
+
+	vma->bound |= bind_flags;
+
+	return 0;
+}
+
+/**
+ * i915_ggtt_view_size - Get the size of a GGTT view.
+ * @obj: Object the view is of.
+ * @view: The view in question.
+ *
+ * @return The size of the GGTT view in bytes.
+ */
+size_t
+i915_ggtt_view_size(struct drm_i915_gem_object *obj,
+		    const struct i915_ggtt_view *view)
+{
+	if (view->type == I915_GGTT_VIEW_NORMAL) {
+		return obj->base.size;
+	} else if (view->type == I915_GGTT_VIEW_ROTATED) {
+		return view->rotation_info.size;
+	} else if (view->type == I915_GGTT_VIEW_PARTIAL) {
+		return view->params.partial.size << PAGE_SHIFT;
+	} else {
+		WARN_ONCE(1, "GGTT view %u not implemented!\n", view->type);
+		return obj->base.size;
+	}
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
new file mode 100644
index 0000000..a216397
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -0,0 +1,568 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Please try to maintain the following order within this file unless it makes
+ * sense to do otherwise. From top to bottom:
+ * 1. typedefs
+ * 2. #defines, and macros
+ * 3. structure definitions
+ * 4. function prototypes
+ *
+ * Within each section, please try to order by generation in ascending order,
+ * from top to bottom (ie. gen6 on the top, gen8 on the bottom).
+ */
+
+#ifndef __I915_GEM_GTT_H__
+#define __I915_GEM_GTT_H__
+
+struct drm_i915_file_private;
+
+typedef uint32_t gen6_pte_t;
+typedef uint64_t gen8_pte_t;
+typedef uint64_t gen8_pde_t;
+typedef uint64_t gen8_ppgtt_pdpe_t;
+typedef uint64_t gen8_ppgtt_pml4e_t;
+
+#define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT)
+
+
+/* gen6-hsw has bit 11-4 for physical addr bit 39-32 */
+#define GEN6_GTT_ADDR_ENCODE(addr)	((addr) | (((addr) >> 28) & 0xff0))
+#define GEN6_PTE_ADDR_ENCODE(addr)	GEN6_GTT_ADDR_ENCODE(addr)
+#define GEN6_PDE_ADDR_ENCODE(addr)	GEN6_GTT_ADDR_ENCODE(addr)
+#define GEN6_PTE_CACHE_LLC		(2 << 1)
+#define GEN6_PTE_UNCACHED		(1 << 1)
+#define GEN6_PTE_VALID			(1 << 0)
+
+#define I915_PTES(pte_len)		(PAGE_SIZE / (pte_len))
+#define I915_PTE_MASK(pte_len)		(I915_PTES(pte_len) - 1)
+#define I915_PDES			512
+#define I915_PDE_MASK			(I915_PDES - 1)
+#define NUM_PTE(pde_shift)     (1 << (pde_shift - PAGE_SHIFT))
+
+#define GEN6_PTES			I915_PTES(sizeof(gen6_pte_t))
+#define GEN6_PD_SIZE		        (I915_PDES * PAGE_SIZE)
+#define GEN6_PD_ALIGN			(PAGE_SIZE * 16)
+#define GEN6_PDE_SHIFT			22
+#define GEN6_PDE_VALID			(1 << 0)
+
+#define GEN7_PTE_CACHE_L3_LLC		(3 << 1)
+
+#define BYT_PTE_SNOOPED_BY_CPU_CACHES	(1 << 2)
+#define BYT_PTE_WRITEABLE		(1 << 1)
+
+/* Cacheability Control is a 4-bit value. The low three bits are stored in bits
+ * 3:1 of the PTE, while the fourth bit is stored in bit 11 of the PTE.
+ */
+#define HSW_CACHEABILITY_CONTROL(bits)	((((bits) & 0x7) << 1) | \
+					 (((bits) & 0x8) << (11 - 3)))
+#define HSW_WB_LLC_AGE3			HSW_CACHEABILITY_CONTROL(0x2)
+#define HSW_WB_LLC_AGE0			HSW_CACHEABILITY_CONTROL(0x3)
+#define HSW_WB_ELLC_LLC_AGE3		HSW_CACHEABILITY_CONTROL(0x8)
+#define HSW_WB_ELLC_LLC_AGE0		HSW_CACHEABILITY_CONTROL(0xb)
+#define HSW_WT_ELLC_LLC_AGE3		HSW_CACHEABILITY_CONTROL(0x7)
+#define HSW_WT_ELLC_LLC_AGE0		HSW_CACHEABILITY_CONTROL(0x6)
+#define HSW_PTE_UNCACHED		(0)
+#define HSW_GTT_ADDR_ENCODE(addr)	((addr) | (((addr) >> 28) & 0x7f0))
+#define HSW_PTE_ADDR_ENCODE(addr)	HSW_GTT_ADDR_ENCODE(addr)
+
+/* GEN8 legacy style address is defined as a 3 level page table:
+ * 31:30 | 29:21 | 20:12 |  11:0
+ * PDPE  |  PDE  |  PTE  | offset
+ * The difference as compared to normal x86 3 level page table is the PDPEs are
+ * programmed via register.
+ *
+ * GEN8 48b legacy style address is defined as a 4 level page table:
+ * 47:39 | 38:30 | 29:21 | 20:12 |  11:0
+ * PML4E | PDPE  |  PDE  |  PTE  | offset
+ */
+#define GEN8_PML4ES_PER_PML4		512
+#define GEN8_PML4E_SHIFT		39
+#define GEN8_PML4E_MASK			(GEN8_PML4ES_PER_PML4 - 1)
+#define GEN8_PDPE_SHIFT			30
+/* NB: GEN8_PDPE_MASK is untrue for 32b platforms, but it has no impact on 32b page
+ * tables */
+#define GEN8_PDPE_MASK			0x1ff
+#define GEN8_PDE_SHIFT			21
+#define GEN8_PDE_MASK			0x1ff
+#define GEN8_PTE_SHIFT			12
+#define GEN8_PTE_MASK			0x1ff
+#define GEN8_LEGACY_PDPES		4
+#define GEN8_PTES			I915_PTES(sizeof(gen8_pte_t))
+
+#define I915_PDPES_PER_PDP(dev) (USES_FULL_48BIT_PPGTT(dev) ?\
+				 GEN8_PML4ES_PER_PML4 : GEN8_LEGACY_PDPES)
+
+#define PPAT_UNCACHED_INDEX		(_PAGE_PWT | _PAGE_PCD)
+#define PPAT_CACHED_PDE_INDEX		0 /* WB LLC */
+#define PPAT_CACHED_INDEX		_PAGE_PAT /* WB LLCeLLC */
+#define PPAT_DISPLAY_ELLC_INDEX		_PAGE_PCD /* WT eLLC */
+
+#define CHV_PPAT_SNOOP			(1<<6)
+#define GEN8_PPAT_AGE(x)		(x<<4)
+#define GEN8_PPAT_LLCeLLC		(3<<2)
+#define GEN8_PPAT_LLCELLC		(2<<2)
+#define GEN8_PPAT_LLC			(1<<2)
+#define GEN8_PPAT_WB			(3<<0)
+#define GEN8_PPAT_WT			(2<<0)
+#define GEN8_PPAT_WC			(1<<0)
+#define GEN8_PPAT_UC			(0<<0)
+#define GEN8_PPAT_ELLC_OVERRIDE		(0<<2)
+#define GEN8_PPAT(i, x)			((uint64_t) (x) << ((i) * 8))
+
+enum i915_ggtt_view_type {
+	I915_GGTT_VIEW_NORMAL = 0,
+	I915_GGTT_VIEW_ROTATED,
+	I915_GGTT_VIEW_PARTIAL,
+};
+
+struct intel_rotation_info {
+	unsigned int height;
+	unsigned int pitch;
+	unsigned int uv_offset;
+	uint32_t pixel_format;
+	uint64_t fb_modifier;
+	unsigned int width_pages, height_pages;
+	uint64_t size;
+	unsigned int width_pages_uv, height_pages_uv;
+	uint64_t size_uv;
+	unsigned int uv_start_page;
+};
+
+struct i915_ggtt_view {
+	enum i915_ggtt_view_type type;
+
+	union {
+		struct {
+			u64 offset;
+			unsigned int size;
+		} partial;
+	} params;
+
+	struct sg_table *pages;
+
+	union {
+		struct intel_rotation_info rotation_info;
+	};
+};
+
+extern const struct i915_ggtt_view i915_ggtt_view_normal;
+extern const struct i915_ggtt_view i915_ggtt_view_rotated;
+
+enum i915_cache_level;
+
+/**
+ * A VMA represents a GEM BO that is bound into an address space. Therefore, a
+ * VMA's presence cannot be guaranteed before binding, or after unbinding the
+ * object into/from the address space.
+ *
+ * To make things as simple as possible (ie. no refcounting), a VMA's lifetime
+ * will always be <= an objects lifetime. So object refcounting should cover us.
+ */
+struct i915_vma {
+	struct drm_mm_node node;
+	struct drm_i915_gem_object *obj;
+	struct i915_address_space *vm;
+
+	/** Flags and address space this VMA is bound to */
+#define GLOBAL_BIND	(1<<0)
+#define LOCAL_BIND	(1<<1)
+	unsigned int bound : 4;
+
+	/**
+	 * Support different GGTT views into the same object.
+	 * This means there can be multiple VMA mappings per object and per VM.
+	 * i915_ggtt_view_type is used to distinguish between those entries.
+	 * The default one of zero (I915_GGTT_VIEW_NORMAL) is default and also
+	 * assumed in GEM functions which take no ggtt view parameter.
+	 */
+	struct i915_ggtt_view ggtt_view;
+
+	/** This object's place on the active/inactive lists */
+	struct list_head mm_list;
+
+	struct list_head vma_link; /* Link in the object's VMA list */
+
+	/** This vma's place in the batchbuffer or on the eviction list */
+	struct list_head exec_list;
+
+	/**
+	 * Used for performing relocations during execbuffer insertion.
+	 */
+	struct hlist_node exec_node;
+	unsigned long exec_handle;
+	struct drm_i915_gem_exec_object2 *exec_entry;
+
+	/**
+	 * How many users have pinned this object in GTT space. The following
+	 * users can each hold at most one reference: pwrite/pread, execbuffer
+	 * (objects are not allowed multiple times for the same batchbuffer),
+	 * and the framebuffer code. When switching/pageflipping, the
+	 * framebuffer code has at most two buffers pinned per crtc.
+	 *
+	 * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
+	 * bits with absolutely no headroom. So use 4 bits. */
+	unsigned int pin_count:4;
+#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
+};
+
+struct i915_page_dma {
+	struct page *page;
+	union {
+		dma_addr_t daddr;
+
+		/* For gen6/gen7 only. This is the offset in the GGTT
+		 * where the page directory entries for PPGTT begin
+		 */
+		uint32_t ggtt_offset;
+	};
+};
+
+#define px_base(px) (&(px)->base)
+#define px_page(px) (px_base(px)->page)
+#define px_dma(px) (px_base(px)->daddr)
+
+struct i915_page_scratch {
+	struct i915_page_dma base;
+};
+
+struct i915_page_table {
+	struct i915_page_dma base;
+
+	unsigned long *used_ptes;
+};
+
+struct i915_page_directory {
+	struct i915_page_dma base;
+
+	unsigned long *used_pdes;
+	struct i915_page_table *page_table[I915_PDES]; /* PDEs */
+};
+
+struct i915_page_directory_pointer {
+	struct i915_page_dma base;
+
+	unsigned long *used_pdpes;
+	struct i915_page_directory **page_directory;
+};
+
+struct i915_pml4 {
+	struct i915_page_dma base;
+
+	DECLARE_BITMAP(used_pml4es, GEN8_PML4ES_PER_PML4);
+	struct i915_page_directory_pointer *pdps[GEN8_PML4ES_PER_PML4];
+};
+
+struct i915_address_space {
+	struct drm_mm mm;
+	struct drm_device *dev;
+	struct list_head global_link;
+	u64 start;		/* Start offset always 0 for dri2 */
+	u64 total;		/* size addr space maps (ex. 2GB for ggtt) */
+
+	struct i915_page_scratch *scratch_page;
+	struct i915_page_table *scratch_pt;
+	struct i915_page_directory *scratch_pd;
+	struct i915_page_directory_pointer *scratch_pdp; /* GEN8+ & 48b PPGTT */
+
+	/**
+	 * List of objects currently involved in rendering.
+	 *
+	 * Includes buffers having the contents of their GPU caches
+	 * flushed, not necessarily primitives. last_read_req
+	 * represents when the rendering involved will be completed.
+	 *
+	 * A reference is held on the buffer while on this list.
+	 */
+	struct list_head active_list;
+
+	/**
+	 * LRU list of objects which are not in the ringbuffer and
+	 * are ready to unbind, but are still in the GTT.
+	 *
+	 * last_read_req is NULL while an object is in this list.
+	 *
+	 * A reference is not held on the buffer while on this list,
+	 * as merely being GTT-bound shouldn't prevent its being
+	 * freed, and we'll pull it off the list in the free path.
+	 */
+	struct list_head inactive_list;
+
+	/* FIXME: Need a more generic return type */
+	gen6_pte_t (*pte_encode)(dma_addr_t addr,
+				 enum i915_cache_level level,
+				 bool valid, u32 flags); /* Create a valid PTE */
+	/* flags for pte_encode */
+#define PTE_READ_ONLY	(1<<0)
+	int (*allocate_va_range)(struct i915_address_space *vm,
+				 uint64_t start,
+				 uint64_t length);
+	void (*clear_range)(struct i915_address_space *vm,
+			    uint64_t start,
+			    uint64_t length,
+			    bool use_scratch);
+	void (*insert_entries)(struct i915_address_space *vm,
+			       struct sg_table *st,
+			       uint64_t start,
+			       enum i915_cache_level cache_level, u32 flags);
+	void (*cleanup)(struct i915_address_space *vm);
+	/** Unmap an object from an address space. This usually consists of
+	 * setting the valid PTE entries to a reserved scratch page. */
+	void (*unbind_vma)(struct i915_vma *vma);
+	/* Map an object into an address space with the given cache flags. */
+	int (*bind_vma)(struct i915_vma *vma,
+			enum i915_cache_level cache_level,
+			u32 flags);
+};
+
+/* The Graphics Translation Table is the way in which GEN hardware translates a
+ * Graphics Virtual Address into a Physical Address. In addition to the normal
+ * collateral associated with any va->pa translations GEN hardware also has a
+ * portion of the GTT which can be mapped by the CPU and remain both coherent
+ * and correct (in cases like swizzling). That region is referred to as GMADR in
+ * the spec.
+ */
+struct i915_gtt {
+	struct i915_address_space base;
+
+	size_t stolen_size;		/* Total size of stolen memory */
+	size_t stolen_usable_size;	/* Total size minus BIOS reserved */
+	u64 mappable_end;		/* End offset that we can CPU map */
+	struct io_mapping *mappable;	/* Mapping to our CPU mappable region */
+	phys_addr_t mappable_base;	/* PA of our GMADR */
+
+	/** "Graphics Stolen Memory" holds the global PTEs */
+	void __iomem *gsm;
+
+	bool do_idle_maps;
+
+	int mtrr;
+
+	/* global gtt ops */
+	int (*gtt_probe)(struct drm_device *dev, u64 *gtt_total,
+			  size_t *stolen, phys_addr_t *mappable_base,
+			  u64 *mappable_end);
+};
+
+struct i915_hw_ppgtt {
+	struct i915_address_space base;
+	struct kref ref;
+	struct drm_mm_node node;
+	unsigned long pd_dirty_rings;
+	union {
+		struct i915_pml4 pml4;		/* GEN8+ & 48b PPGTT */
+		struct i915_page_directory_pointer pdp;	/* GEN8+ */
+		struct i915_page_directory pd;		/* GEN6-7 */
+	};
+
+	struct drm_i915_file_private *file_priv;
+
+	gen6_pte_t __iomem *pd_addr;
+
+	int (*enable)(struct i915_hw_ppgtt *ppgtt);
+	int (*switch_mm)(struct i915_hw_ppgtt *ppgtt,
+			 struct drm_i915_gem_request *req);
+	void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m);
+};
+
+/* For each pde iterates over every pde between from start until start + length.
+ * If start, and start+length are not perfectly divisible, the macro will round
+ * down, and up as needed. The macro modifies pde, start, and length. Dev is
+ * only used to differentiate shift values. Temp is temp.  On gen6/7, start = 0,
+ * and length = 2G effectively iterates over every PDE in the system.
+ *
+ * XXX: temp is not actually needed, but it saves doing the ALIGN operation.
+ */
+#define gen6_for_each_pde(pt, pd, start, length, temp, iter) \
+	for (iter = gen6_pde_index(start); \
+	     length > 0 && iter < I915_PDES ? \
+			(pt = (pd)->page_table[iter]), 1 : 0; \
+	     iter++, \
+	     temp = ALIGN(start+1, 1 << GEN6_PDE_SHIFT) - start, \
+	     temp = min_t(unsigned, temp, length), \
+	     start += temp, length -= temp)
+
+#define gen6_for_all_pdes(pt, ppgtt, iter)  \
+	for (iter = 0;		\
+	     pt = ppgtt->pd.page_table[iter], iter < I915_PDES;	\
+	     iter++)
+
+static inline uint32_t i915_pte_index(uint64_t address, uint32_t pde_shift)
+{
+	const uint32_t mask = NUM_PTE(pde_shift) - 1;
+
+	return (address >> PAGE_SHIFT) & mask;
+}
+
+/* Helper to counts the number of PTEs within the given length. This count
+ * does not cross a page table boundary, so the max value would be
+ * GEN6_PTES for GEN6, and GEN8_PTES for GEN8.
+*/
+static inline uint32_t i915_pte_count(uint64_t addr, size_t length,
+				      uint32_t pde_shift)
+{
+	const uint64_t mask = ~((1 << pde_shift) - 1);
+	uint64_t end;
+
+	WARN_ON(length == 0);
+	WARN_ON(offset_in_page(addr|length));
+
+	end = addr + length;
+
+	if ((addr & mask) != (end & mask))
+		return NUM_PTE(pde_shift) - i915_pte_index(addr, pde_shift);
+
+	return i915_pte_index(end, pde_shift) - i915_pte_index(addr, pde_shift);
+}
+
+static inline uint32_t i915_pde_index(uint64_t addr, uint32_t shift)
+{
+	return (addr >> shift) & I915_PDE_MASK;
+}
+
+static inline uint32_t gen6_pte_index(uint32_t addr)
+{
+	return i915_pte_index(addr, GEN6_PDE_SHIFT);
+}
+
+static inline size_t gen6_pte_count(uint32_t addr, uint32_t length)
+{
+	return i915_pte_count(addr, length, GEN6_PDE_SHIFT);
+}
+
+static inline uint32_t gen6_pde_index(uint32_t addr)
+{
+	return i915_pde_index(addr, GEN6_PDE_SHIFT);
+}
+
+/* Equivalent to the gen6 version, For each pde iterates over every pde
+ * between from start until start + length. On gen8+ it simply iterates
+ * over every page directory entry in a page directory.
+ */
+#define gen8_for_each_pde(pt, pd, start, length, temp, iter)		\
+	for (iter = gen8_pde_index(start); \
+	     length > 0 && iter < I915_PDES ? \
+			(pt = (pd)->page_table[iter]), 1 : 0; \
+	     iter++,				\
+	     temp = ALIGN(start+1, 1 << GEN8_PDE_SHIFT) - start,	\
+	     temp = min(temp, length),					\
+	     start += temp, length -= temp)
+
+#define gen8_for_each_pdpe(pd, pdp, start, length, temp, iter)	\
+	for (iter = gen8_pdpe_index(start); \
+	     length > 0 && (iter < I915_PDPES_PER_PDP(dev)) ? \
+			(pd = (pdp)->page_directory[iter]), 1 : 0; \
+	     iter++,				\
+	     temp = ALIGN(start+1, 1 << GEN8_PDPE_SHIFT) - start,	\
+	     temp = min(temp, length),					\
+	     start += temp, length -= temp)
+
+#define gen8_for_each_pml4e(pdp, pml4, start, length, temp, iter)	\
+	for (iter = gen8_pml4e_index(start);	\
+	     length > 0 && iter < GEN8_PML4ES_PER_PML4 ? \
+			(pdp = (pml4)->pdps[iter]), 1 : 0; \
+	     iter++,				\
+	     temp = ALIGN(start+1, 1ULL << GEN8_PML4E_SHIFT) - start,	\
+	     temp = min(temp, length),					\
+	     start += temp, length -= temp)
+
+static inline uint32_t gen8_pte_index(uint64_t address)
+{
+	return i915_pte_index(address, GEN8_PDE_SHIFT);
+}
+
+static inline uint32_t gen8_pde_index(uint64_t address)
+{
+	return i915_pde_index(address, GEN8_PDE_SHIFT);
+}
+
+static inline uint32_t gen8_pdpe_index(uint64_t address)
+{
+	return (address >> GEN8_PDPE_SHIFT) & GEN8_PDPE_MASK;
+}
+
+static inline uint32_t gen8_pml4e_index(uint64_t address)
+{
+	return (address >> GEN8_PML4E_SHIFT) & GEN8_PML4E_MASK;
+}
+
+static inline size_t gen8_pte_count(uint64_t address, uint64_t length)
+{
+	return i915_pte_count(address, length, GEN8_PDE_SHIFT);
+}
+
+static inline dma_addr_t
+i915_page_dir_dma_addr(const struct i915_hw_ppgtt *ppgtt, const unsigned n)
+{
+	return test_bit(n, ppgtt->pdp.used_pdpes) ?
+		px_dma(ppgtt->pdp.page_directory[n]) :
+		px_dma(ppgtt->base.scratch_pd);
+}
+
+int i915_gem_gtt_init(struct drm_device *dev);
+void i915_gem_init_global_gtt(struct drm_device *dev);
+void i915_global_gtt_cleanup(struct drm_device *dev);
+
+
+int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt);
+int i915_ppgtt_init_hw(struct drm_device *dev);
+int i915_ppgtt_init_ring(struct drm_i915_gem_request *req);
+void i915_ppgtt_release(struct kref *kref);
+struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_device *dev,
+					struct drm_i915_file_private *fpriv);
+static inline void i915_ppgtt_get(struct i915_hw_ppgtt *ppgtt)
+{
+	if (ppgtt)
+		kref_get(&ppgtt->ref);
+}
+static inline void i915_ppgtt_put(struct i915_hw_ppgtt *ppgtt)
+{
+	if (ppgtt)
+		kref_put(&ppgtt->ref, i915_ppgtt_release);
+}
+
+void i915_check_and_clear_faults(struct drm_device *dev);
+void i915_gem_suspend_gtt_mappings(struct drm_device *dev);
+void i915_gem_restore_gtt_mappings(struct drm_device *dev);
+
+int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
+void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
+
+static inline bool
+i915_ggtt_view_equal(const struct i915_ggtt_view *a,
+                     const struct i915_ggtt_view *b)
+{
+	if (WARN_ON(!a || !b))
+		return false;
+
+	if (a->type != b->type)
+		return false;
+	if (a->type == I915_GGTT_VIEW_PARTIAL)
+		return !memcmp(&a->params, &b->params, sizeof(a->params));
+	return true;
+}
+
+size_t
+i915_ggtt_view_size(struct drm_i915_gem_object *obj,
+		    const struct i915_ggtt_view *view);
+
+#endif
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
new file mode 100644
index 0000000..5026a62
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -0,0 +1,229 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Mika Kuoppala <mika.kuoppala@intel.com>
+ *
+ */
+
+#include "i915_drv.h"
+#include "intel_renderstate.h"
+
+static const struct intel_renderstate_rodata *
+render_state_get_rodata(struct drm_device *dev, const int gen)
+{
+	switch (gen) {
+	case 6:
+		return &gen6_null_state;
+	case 7:
+		return &gen7_null_state;
+	case 8:
+		return &gen8_null_state;
+	case 9:
+		return &gen9_null_state;
+	}
+
+	return NULL;
+}
+
+static int render_state_init(struct render_state *so, struct drm_device *dev)
+{
+	int ret;
+
+	so->gen = INTEL_INFO(dev)->gen;
+	so->rodata = render_state_get_rodata(dev, so->gen);
+	if (so->rodata == NULL)
+		return 0;
+
+	if (so->rodata->batch_items * 4 > 4096)
+		return -EINVAL;
+
+	so->obj = i915_gem_alloc_object(dev, 4096);
+	if (so->obj == NULL)
+		return -ENOMEM;
+
+	ret = i915_gem_obj_ggtt_pin(so->obj, 4096, 0);
+	if (ret)
+		goto free_gem;
+
+	so->ggtt_offset = i915_gem_obj_ggtt_offset(so->obj);
+	return 0;
+
+free_gem:
+	drm_gem_object_unreference(&so->obj->base);
+	return ret;
+}
+
+/*
+ * Macro to add commands to auxiliary batch.
+ * This macro only checks for page overflow before inserting the commands,
+ * this is sufficient as the null state generator makes the final batch
+ * with two passes to build command and state separately. At this point
+ * the size of both are known and it compacts them by relocating the state
+ * right after the commands taking care of aligment so we should sufficient
+ * space below them for adding new commands.
+ */
+#define OUT_BATCH(batch, i, val)				\
+	do {							\
+		if (WARN_ON((i) >= PAGE_SIZE / sizeof(u32))) {	\
+			ret = -ENOSPC;				\
+			goto err_out;				\
+		}						\
+		(batch)[(i)++] = (val);				\
+	} while(0)
+
+static int render_state_setup(struct render_state *so)
+{
+	const struct intel_renderstate_rodata *rodata = so->rodata;
+	unsigned int i = 0, reloc_index = 0;
+	struct page *page;
+	u32 *d;
+	int ret;
+
+	ret = i915_gem_object_set_to_cpu_domain(so->obj, true);
+	if (ret)
+		return ret;
+
+	page = sg_page(so->obj->pages->sgl);
+	d = kmap(page);
+
+	while (i < rodata->batch_items) {
+		u32 s = rodata->batch[i];
+
+		if (i * 4  == rodata->reloc[reloc_index]) {
+			u64 r = s + so->ggtt_offset;
+			s = lower_32_bits(r);
+			if (so->gen >= 8) {
+				if (i + 1 >= rodata->batch_items ||
+				    rodata->batch[i + 1] != 0) {
+					ret = -EINVAL;
+					goto err_out;
+				}
+
+				d[i++] = s;
+				s = upper_32_bits(r);
+			}
+
+			reloc_index++;
+		}
+
+		d[i++] = s;
+	}
+
+	while (i % CACHELINE_DWORDS)
+		OUT_BATCH(d, i, MI_NOOP);
+
+	so->aux_batch_offset = i * sizeof(u32);
+
+	OUT_BATCH(d, i, MI_BATCH_BUFFER_END);
+	so->aux_batch_size = (i * sizeof(u32)) - so->aux_batch_offset;
+
+	/*
+	 * Since we are sending length, we need to strictly conform to
+	 * all requirements. For Gen2 this must be a multiple of 8.
+	 */
+	so->aux_batch_size = ALIGN(so->aux_batch_size, 8);
+
+	kunmap(page);
+
+	ret = i915_gem_object_set_to_gtt_domain(so->obj, false);
+	if (ret)
+		return ret;
+
+	if (rodata->reloc[reloc_index] != -1) {
+		DRM_ERROR("only %d relocs resolved\n", reloc_index);
+		return -EINVAL;
+	}
+
+	return 0;
+
+err_out:
+	kunmap(page);
+	return ret;
+}
+
+#undef OUT_BATCH
+
+void i915_gem_render_state_fini(struct render_state *so)
+{
+	i915_gem_object_ggtt_unpin(so->obj);
+	drm_gem_object_unreference(&so->obj->base);
+}
+
+int i915_gem_render_state_prepare(struct intel_engine_cs *ring,
+				  struct render_state *so)
+{
+	int ret;
+
+	if (WARN_ON(ring->id != RCS))
+		return -ENOENT;
+
+	ret = render_state_init(so, ring->dev);
+	if (ret)
+		return ret;
+
+	if (so->rodata == NULL)
+		return 0;
+
+	ret = render_state_setup(so);
+	if (ret) {
+		i915_gem_render_state_fini(so);
+		return ret;
+	}
+
+	return 0;
+}
+
+int i915_gem_render_state_init(struct drm_i915_gem_request *req)
+{
+	struct render_state so;
+	int ret;
+
+	ret = i915_gem_render_state_prepare(req->ring, &so);
+	if (ret)
+		return ret;
+
+	if (so.rodata == NULL)
+		return 0;
+
+	ret = req->ring->dispatch_execbuffer(req, so.ggtt_offset,
+					     so.rodata->batch_items * 4,
+					     I915_DISPATCH_SECURE);
+	if (ret)
+		goto out;
+
+	if (so.aux_batch_size > 8) {
+		ret = req->ring->dispatch_execbuffer(req,
+						     (so.ggtt_offset +
+						      so.aux_batch_offset),
+						     so.aux_batch_size,
+						     I915_DISPATCH_SECURE);
+		if (ret)
+			goto out;
+	}
+
+	i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), req);
+
+out:
+	i915_gem_render_state_fini(&so);
+	return ret;
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.h b/drivers/gpu/drm/i915/i915_gem_render_state.h
new file mode 100644
index 0000000..e641bb0
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _I915_GEM_RENDER_STATE_H_
+#define _I915_GEM_RENDER_STATE_H_
+
+#include <linux/types.h>
+
+struct intel_renderstate_rodata {
+	const u32 *reloc;
+	const u32 *batch;
+	const u32 batch_items;
+};
+
+struct render_state {
+	const struct intel_renderstate_rodata *rodata;
+	struct drm_i915_gem_object *obj;
+	u64 ggtt_offset;
+	int gen;
+	u32 aux_batch_size;
+	u32 aux_batch_offset;
+};
+
+int i915_gem_render_state_init(struct drm_i915_gem_request *req);
+void i915_gem_render_state_fini(struct render_state *so);
+int i915_gem_render_state_prepare(struct intel_engine_cs *ring,
+				  struct render_state *so);
+
+#endif /* _I915_GEM_RENDER_STATE_H_ */
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
new file mode 100644
index 0000000..c0a96f1
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -0,0 +1,346 @@
+/*
+ * Copyright © 2008-2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/oom.h>
+#include <linux/shmem_fs.h>
+#include <linux/slab.h>
+#include <linux/swap.h>
+#include <linux/pci.h>
+#include <linux/dma-buf.h>
+#include <drm/drmP.h>
+#include <drm/i915_drm.h>
+
+#include "i915_drv.h"
+#include "i915_trace.h"
+
+static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
+{
+	if (!mutex_is_locked(mutex))
+		return false;
+
+#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_MUTEX_SPIN_ON_OWNER)
+	return mutex->owner == task;
+#else
+	/* Since UP may be pre-empted, we cannot assume that we own the lock */
+	return false;
+#endif
+}
+
+/**
+ * i915_gem_shrink - Shrink buffer object caches
+ * @dev_priv: i915 device
+ * @target: amount of memory to make available, in pages
+ * @flags: control flags for selecting cache types
+ *
+ * This function is the main interface to the shrinker. It will try to release
+ * up to @target pages of main memory backing storage from buffer objects.
+ * Selection of the specific caches can be done with @flags. This is e.g. useful
+ * when purgeable objects should be removed from caches preferentially.
+ *
+ * Note that it's not guaranteed that released amount is actually available as
+ * free system memory - the pages might still be in-used to due to other reasons
+ * (like cpu mmaps) or the mm core has reused them before we could grab them.
+ * Therefore code that needs to explicitly shrink buffer objects caches (e.g. to
+ * avoid deadlocks in memory reclaim) must fall back to i915_gem_shrink_all().
+ *
+ * Also note that any kind of pinning (both per-vma address space pins and
+ * backing storage pins at the buffer object level) result in the shrinker code
+ * having to skip the object.
+ *
+ * Returns:
+ * The number of pages of backing storage actually released.
+ */
+unsigned long
+i915_gem_shrink(struct drm_i915_private *dev_priv,
+		unsigned long target, unsigned flags)
+{
+	const struct {
+		struct list_head *list;
+		unsigned int bit;
+	} phases[] = {
+		{ &dev_priv->mm.unbound_list, I915_SHRINK_UNBOUND },
+		{ &dev_priv->mm.bound_list, I915_SHRINK_BOUND },
+		{ NULL, 0 },
+	}, *phase;
+	unsigned long count = 0;
+
+	trace_i915_gem_shrink(dev_priv, target, flags);
+	i915_gem_retire_requests(dev_priv->dev);
+
+	/*
+	 * As we may completely rewrite the (un)bound list whilst unbinding
+	 * (due to retiring requests) we have to strictly process only
+	 * one element of the list at the time, and recheck the list
+	 * on every iteration.
+	 *
+	 * In particular, we must hold a reference whilst removing the
+	 * object as we may end up waiting for and/or retiring the objects.
+	 * This might release the final reference (held by the active list)
+	 * and result in the object being freed from under us. This is
+	 * similar to the precautions the eviction code must take whilst
+	 * removing objects.
+	 *
+	 * Also note that although these lists do not hold a reference to
+	 * the object we can safely grab one here: The final object
+	 * unreferencing and the bound_list are both protected by the
+	 * dev->struct_mutex and so we won't ever be able to observe an
+	 * object on the bound_list with a reference count equals 0.
+	 */
+	for (phase = phases; phase->list; phase++) {
+		struct list_head still_in_list;
+
+		if ((flags & phase->bit) == 0)
+			continue;
+
+		INIT_LIST_HEAD(&still_in_list);
+		while (count < target && !list_empty(phase->list)) {
+			struct drm_i915_gem_object *obj;
+			struct i915_vma *vma, *v;
+
+			obj = list_first_entry(phase->list,
+					       typeof(*obj), global_list);
+			list_move_tail(&obj->global_list, &still_in_list);
+
+			if (flags & I915_SHRINK_PURGEABLE &&
+			    obj->madv != I915_MADV_DONTNEED)
+				continue;
+
+			if ((flags & I915_SHRINK_ACTIVE) == 0 && obj->active)
+				continue;
+
+			drm_gem_object_reference(&obj->base);
+
+			/* For the unbound phase, this should be a no-op! */
+			list_for_each_entry_safe(vma, v,
+						 &obj->vma_list, vma_link)
+				if (i915_vma_unbind(vma))
+					break;
+
+			if (i915_gem_object_put_pages(obj) == 0)
+				count += obj->base.size >> PAGE_SHIFT;
+
+			drm_gem_object_unreference(&obj->base);
+		}
+		list_splice(&still_in_list, phase->list);
+	}
+
+	i915_gem_retire_requests(dev_priv->dev);
+
+	return count;
+}
+
+/**
+ * i915_gem_shrink_all - Shrink buffer object caches completely
+ * @dev_priv: i915 device
+ *
+ * This is a simple wraper around i915_gem_shrink() to aggressively shrink all
+ * caches completely. It also first waits for and retires all outstanding
+ * requests to also be able to release backing storage for active objects.
+ *
+ * This should only be used in code to intentionally quiescent the gpu or as a
+ * last-ditch effort when memory seems to have run out.
+ *
+ * Returns:
+ * The number of pages of backing storage actually released.
+ */
+unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv)
+{
+	return i915_gem_shrink(dev_priv, -1UL,
+			       I915_SHRINK_BOUND |
+			       I915_SHRINK_UNBOUND |
+			       I915_SHRINK_ACTIVE);
+}
+
+static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
+{
+	if (!mutex_trylock(&dev->struct_mutex)) {
+		if (!mutex_is_locked_by(&dev->struct_mutex, current))
+			return false;
+
+		if (to_i915(dev)->mm.shrinker_no_lock_stealing)
+			return false;
+
+		*unlock = false;
+	} else
+		*unlock = true;
+
+	return true;
+}
+
+static int num_vma_bound(struct drm_i915_gem_object *obj)
+{
+	struct i915_vma *vma;
+	int count = 0;
+
+	list_for_each_entry(vma, &obj->vma_list, vma_link) {
+		if (drm_mm_node_allocated(&vma->node))
+			count++;
+		if (vma->pin_count)
+			count++;
+	}
+
+	return count;
+}
+
+static unsigned long
+i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
+{
+	struct drm_i915_private *dev_priv =
+		container_of(shrinker, struct drm_i915_private, mm.shrinker);
+	struct drm_device *dev = dev_priv->dev;
+	struct drm_i915_gem_object *obj;
+	unsigned long count;
+	bool unlock;
+
+	if (!i915_gem_shrinker_lock(dev, &unlock))
+		return 0;
+
+	count = 0;
+	list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
+		if (obj->pages_pin_count == 0)
+			count += obj->base.size >> PAGE_SHIFT;
+
+	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+		if (!obj->active && obj->pages_pin_count == num_vma_bound(obj))
+			count += obj->base.size >> PAGE_SHIFT;
+	}
+
+	if (unlock)
+		mutex_unlock(&dev->struct_mutex);
+
+	return count;
+}
+
+static unsigned long
+i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
+{
+	struct drm_i915_private *dev_priv =
+		container_of(shrinker, struct drm_i915_private, mm.shrinker);
+	struct drm_device *dev = dev_priv->dev;
+	unsigned long freed;
+	bool unlock;
+
+	if (!i915_gem_shrinker_lock(dev, &unlock))
+		return SHRINK_STOP;
+
+	freed = i915_gem_shrink(dev_priv,
+				sc->nr_to_scan,
+				I915_SHRINK_BOUND |
+				I915_SHRINK_UNBOUND |
+				I915_SHRINK_PURGEABLE);
+	if (freed < sc->nr_to_scan)
+		freed += i915_gem_shrink(dev_priv,
+					 sc->nr_to_scan - freed,
+					 I915_SHRINK_BOUND |
+					 I915_SHRINK_UNBOUND);
+	if (unlock)
+		mutex_unlock(&dev->struct_mutex);
+
+	return freed;
+}
+
+static int
+i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
+{
+	struct drm_i915_private *dev_priv =
+		container_of(nb, struct drm_i915_private, mm.oom_notifier);
+	struct drm_device *dev = dev_priv->dev;
+	struct drm_i915_gem_object *obj;
+	unsigned long timeout = msecs_to_jiffies(5000) + 1;
+	unsigned long pinned, bound, unbound, freed_pages;
+	bool was_interruptible;
+	bool unlock;
+
+	while (!i915_gem_shrinker_lock(dev, &unlock) && --timeout) {
+		schedule_timeout_killable(1);
+		if (fatal_signal_pending(current))
+			return NOTIFY_DONE;
+	}
+	if (timeout == 0) {
+		pr_err("Unable to purge GPU memory due lock contention.\n");
+		return NOTIFY_DONE;
+	}
+
+	was_interruptible = dev_priv->mm.interruptible;
+	dev_priv->mm.interruptible = false;
+
+	freed_pages = i915_gem_shrink_all(dev_priv);
+
+	dev_priv->mm.interruptible = was_interruptible;
+
+	/* Because we may be allocating inside our own driver, we cannot
+	 * assert that there are no objects with pinned pages that are not
+	 * being pointed to by hardware.
+	 */
+	unbound = bound = pinned = 0;
+	list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
+		if (!obj->base.filp) /* not backed by a freeable object */
+			continue;
+
+		if (obj->pages_pin_count)
+			pinned += obj->base.size;
+		else
+			unbound += obj->base.size;
+	}
+	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+		if (!obj->base.filp)
+			continue;
+
+		if (obj->pages_pin_count)
+			pinned += obj->base.size;
+		else
+			bound += obj->base.size;
+	}
+
+	if (unlock)
+		mutex_unlock(&dev->struct_mutex);
+
+	if (freed_pages || unbound || bound)
+		pr_info("Purging GPU memory, %lu bytes freed, %lu bytes still pinned.\n",
+			freed_pages << PAGE_SHIFT, pinned);
+	if (unbound || bound)
+		pr_err("%lu and %lu bytes still available in the "
+		       "bound and unbound GPU page lists.\n",
+		       bound, unbound);
+
+	*(unsigned long *)ptr += freed_pages;
+	return NOTIFY_DONE;
+}
+
+/**
+ * i915_gem_shrinker_init - Initialize i915 shrinker
+ * @dev_priv: i915 device
+ *
+ * This function registers and sets up the i915 shrinker and OOM handler.
+ */
+void i915_gem_shrinker_init(struct drm_i915_private *dev_priv)
+{
+	dev_priv->mm.shrinker.scan_objects = i915_gem_shrinker_scan;
+	dev_priv->mm.shrinker.count_objects = i915_gem_shrinker_count;
+	dev_priv->mm.shrinker.seeks = DEFAULT_SEEKS;
+	register_shrinker(&dev_priv->mm.shrinker);
+
+	dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
+	register_oom_notifier(&dev_priv->mm.oom_notifier);
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
new file mode 100644
index 0000000..5d2323a
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -0,0 +1,713 @@
+/*
+ * Copyright © 2008-2012 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Eric Anholt <eric@anholt.net>
+ *    Chris Wilson <chris@chris-wilson.co.uk>
+ *
+ */
+
+#include <drm/drmP.h>
+#include <drm/i915_drm.h>
+#include "i915_drv.h"
+
+#define KB(x) ((x) * 1024)
+#define MB(x) (KB(x) * 1024)
+
+/*
+ * The BIOS typically reserves some of the system's memory for the exclusive
+ * use of the integrated graphics. This memory is no longer available for
+ * use by the OS and so the user finds that his system has less memory
+ * available than he put in. We refer to this memory as stolen.
+ *
+ * The BIOS will allocate its framebuffer from the stolen memory. Our
+ * goal is try to reuse that object for our own fbcon which must always
+ * be available for panics. Anything else we can reuse the stolen memory
+ * for is a boon.
+ */
+
+int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
+					 struct drm_mm_node *node, u64 size,
+					 unsigned alignment, u64 start, u64 end)
+{
+	int ret;
+
+	if (!drm_mm_initialized(&dev_priv->mm.stolen))
+		return -ENODEV;
+
+	/* See the comment at the drm_mm_init() call for more about this check.
+	 * WaSkipStolenMemoryFirstPage:bdw,chv (incomplete) */
+	if (INTEL_INFO(dev_priv)->gen == 8 && start < 4096)
+		start = 4096;
+
+	mutex_lock(&dev_priv->mm.stolen_lock);
+	ret = drm_mm_insert_node_in_range(&dev_priv->mm.stolen, node, size,
+					  alignment, start, end,
+					  DRM_MM_SEARCH_DEFAULT);
+	mutex_unlock(&dev_priv->mm.stolen_lock);
+
+	return ret;
+}
+
+int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
+				struct drm_mm_node *node, u64 size,
+				unsigned alignment)
+{
+	return i915_gem_stolen_insert_node_in_range(dev_priv, node, size,
+					alignment, 0,
+					dev_priv->gtt.stolen_usable_size);
+}
+
+void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
+				 struct drm_mm_node *node)
+{
+	mutex_lock(&dev_priv->mm.stolen_lock);
+	drm_mm_remove_node(node);
+	mutex_unlock(&dev_priv->mm.stolen_lock);
+}
+
+static unsigned long i915_stolen_to_physical(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct resource *r;
+	u32 base;
+
+	/* Almost universally we can find the Graphics Base of Stolen Memory
+	 * at offset 0x5c in the igfx configuration space. On a few (desktop)
+	 * machines this is also mirrored in the bridge device at different
+	 * locations, or in the MCHBAR.
+	 *
+	 * On 865 we just check the TOUD register.
+	 *
+	 * On 830/845/85x the stolen memory base isn't available in any
+	 * register. We need to calculate it as TOM-TSEG_SIZE-stolen_size.
+	 *
+	 */
+	base = 0;
+	if (INTEL_INFO(dev)->gen >= 3) {
+		/* Read Graphics Base of Stolen Memory directly */
+		pci_read_config_dword(dev->pdev, 0x5c, &base);
+		base &= ~((1<<20) - 1);
+	} else if (IS_I865G(dev)) {
+		u32 tseg_size = 0;
+		u16 toud = 0;
+		u8 tmp;
+
+		pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 0),
+					 I845_ESMRAMC, &tmp);
+
+		if (tmp & TSEG_ENABLE) {
+			switch (tmp & I845_TSEG_SIZE_MASK) {
+			case I845_TSEG_SIZE_512K:
+				tseg_size = KB(512);
+				break;
+			case I845_TSEG_SIZE_1M:
+				tseg_size = MB(1);
+				break;
+			}
+		}
+
+		pci_bus_read_config_word(dev->pdev->bus, PCI_DEVFN(0, 0),
+					 I865_TOUD, &toud);
+
+		base = (toud << 16) + tseg_size;
+	} else if (IS_I85X(dev)) {
+		u32 tseg_size = 0;
+		u32 tom;
+		u8 tmp;
+
+		pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 0),
+					 I85X_ESMRAMC, &tmp);
+
+		if (tmp & TSEG_ENABLE)
+			tseg_size = MB(1);
+
+		pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 1),
+					 I85X_DRB3, &tmp);
+		tom = tmp * MB(32);
+
+		base = tom - tseg_size - dev_priv->gtt.stolen_size;
+	} else if (IS_845G(dev)) {
+		u32 tseg_size = 0;
+		u32 tom;
+		u8 tmp;
+
+		pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 0),
+					 I845_ESMRAMC, &tmp);
+
+		if (tmp & TSEG_ENABLE) {
+			switch (tmp & I845_TSEG_SIZE_MASK) {
+			case I845_TSEG_SIZE_512K:
+				tseg_size = KB(512);
+				break;
+			case I845_TSEG_SIZE_1M:
+				tseg_size = MB(1);
+				break;
+			}
+		}
+
+		pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 0),
+					 I830_DRB3, &tmp);
+		tom = tmp * MB(32);
+
+		base = tom - tseg_size - dev_priv->gtt.stolen_size;
+	} else if (IS_I830(dev)) {
+		u32 tseg_size = 0;
+		u32 tom;
+		u8 tmp;
+
+		pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 0),
+					 I830_ESMRAMC, &tmp);
+
+		if (tmp & TSEG_ENABLE) {
+			if (tmp & I830_TSEG_SIZE_1M)
+				tseg_size = MB(1);
+			else
+				tseg_size = KB(512);
+		}
+
+		pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 0),
+					 I830_DRB3, &tmp);
+		tom = tmp * MB(32);
+
+		base = tom - tseg_size - dev_priv->gtt.stolen_size;
+	}
+
+	if (base == 0)
+		return 0;
+
+	/* make sure we don't clobber the GTT if it's within stolen memory */
+	if (INTEL_INFO(dev)->gen <= 4 && !IS_G33(dev) && !IS_G4X(dev)) {
+		struct {
+			u32 start, end;
+		} stolen[2] = {
+			{ .start = base, .end = base + dev_priv->gtt.stolen_size, },
+			{ .start = base, .end = base + dev_priv->gtt.stolen_size, },
+		};
+		u64 gtt_start, gtt_end;
+
+		gtt_start = I915_READ(PGTBL_CTL);
+		if (IS_GEN4(dev))
+			gtt_start = (gtt_start & PGTBL_ADDRESS_LO_MASK) |
+				(gtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
+		else
+			gtt_start &= PGTBL_ADDRESS_LO_MASK;
+		gtt_end = gtt_start + gtt_total_entries(dev_priv->gtt) * 4;
+
+		if (gtt_start >= stolen[0].start && gtt_start < stolen[0].end)
+			stolen[0].end = gtt_start;
+		if (gtt_end > stolen[1].start && gtt_end <= stolen[1].end)
+			stolen[1].start = gtt_end;
+
+		/* pick the larger of the two chunks */
+		if (stolen[0].end - stolen[0].start >
+		    stolen[1].end - stolen[1].start) {
+			base = stolen[0].start;
+			dev_priv->gtt.stolen_size = stolen[0].end - stolen[0].start;
+		} else {
+			base = stolen[1].start;
+			dev_priv->gtt.stolen_size = stolen[1].end - stolen[1].start;
+		}
+
+		if (stolen[0].start != stolen[1].start ||
+		    stolen[0].end != stolen[1].end) {
+			DRM_DEBUG_KMS("GTT within stolen memory at 0x%llx-0x%llx\n",
+				      (unsigned long long) gtt_start,
+				      (unsigned long long) gtt_end - 1);
+			DRM_DEBUG_KMS("Stolen memory adjusted to 0x%x-0x%x\n",
+				      base, base + (u32) dev_priv->gtt.stolen_size - 1);
+		}
+	}
+
+
+	/* Verify that nothing else uses this physical address. Stolen
+	 * memory should be reserved by the BIOS and hidden from the
+	 * kernel. So if the region is already marked as busy, something
+	 * is seriously wrong.
+	 */
+	r = devm_request_mem_region(dev->dev, base, dev_priv->gtt.stolen_size,
+				    "Graphics Stolen Memory");
+	if (r == NULL) {
+		/*
+		 * One more attempt but this time requesting region from
+		 * base + 1, as we have seen that this resolves the region
+		 * conflict with the PCI Bus.
+		 * This is a BIOS w/a: Some BIOS wrap stolen in the root
+		 * PCI bus, but have an off-by-one error. Hence retry the
+		 * reservation starting from 1 instead of 0.
+		 */
+		r = devm_request_mem_region(dev->dev, base + 1,
+					    dev_priv->gtt.stolen_size - 1,
+					    "Graphics Stolen Memory");
+		/*
+		 * GEN3 firmware likes to smash pci bridges into the stolen
+		 * range. Apparently this works.
+		 */
+		if (r == NULL && !IS_GEN3(dev)) {
+			DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n",
+				  base, base + (uint32_t)dev_priv->gtt.stolen_size);
+			base = 0;
+		}
+	}
+
+	return base;
+}
+
+void i915_gem_cleanup_stolen(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (!drm_mm_initialized(&dev_priv->mm.stolen))
+		return;
+
+	drm_mm_takedown(&dev_priv->mm.stolen);
+}
+
+static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv,
+				    unsigned long *base, unsigned long *size)
+{
+	uint32_t reg_val = I915_READ(IS_GM45(dev_priv) ?
+				     CTG_STOLEN_RESERVED :
+				     ELK_STOLEN_RESERVED);
+	unsigned long stolen_top = dev_priv->mm.stolen_base +
+		dev_priv->gtt.stolen_size;
+
+	*base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16;
+
+	WARN_ON((reg_val & G4X_STOLEN_RESERVED_ADDR1_MASK) < *base);
+
+	/* On these platforms, the register doesn't have a size field, so the
+	 * size is the distance between the base and the top of the stolen
+	 * memory. We also have the genuine case where base is zero and there's
+	 * nothing reserved. */
+	if (*base == 0)
+		*size = 0;
+	else
+		*size = stolen_top - *base;
+}
+
+static void gen6_get_stolen_reserved(struct drm_i915_private *dev_priv,
+				     unsigned long *base, unsigned long *size)
+{
+	uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
+
+	*base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
+
+	switch (reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK) {
+	case GEN6_STOLEN_RESERVED_1M:
+		*size = 1024 * 1024;
+		break;
+	case GEN6_STOLEN_RESERVED_512K:
+		*size = 512 * 1024;
+		break;
+	case GEN6_STOLEN_RESERVED_256K:
+		*size = 256 * 1024;
+		break;
+	case GEN6_STOLEN_RESERVED_128K:
+		*size = 128 * 1024;
+		break;
+	default:
+		*size = 1024 * 1024;
+		MISSING_CASE(reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK);
+	}
+}
+
+static void gen7_get_stolen_reserved(struct drm_i915_private *dev_priv,
+				     unsigned long *base, unsigned long *size)
+{
+	uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
+
+	*base = reg_val & GEN7_STOLEN_RESERVED_ADDR_MASK;
+
+	switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) {
+	case GEN7_STOLEN_RESERVED_1M:
+		*size = 1024 * 1024;
+		break;
+	case GEN7_STOLEN_RESERVED_256K:
+		*size = 256 * 1024;
+		break;
+	default:
+		*size = 1024 * 1024;
+		MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK);
+	}
+}
+
+static void gen8_get_stolen_reserved(struct drm_i915_private *dev_priv,
+				     unsigned long *base, unsigned long *size)
+{
+	uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
+
+	*base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
+
+	switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) {
+	case GEN8_STOLEN_RESERVED_1M:
+		*size = 1024 * 1024;
+		break;
+	case GEN8_STOLEN_RESERVED_2M:
+		*size = 2 * 1024 * 1024;
+		break;
+	case GEN8_STOLEN_RESERVED_4M:
+		*size = 4 * 1024 * 1024;
+		break;
+	case GEN8_STOLEN_RESERVED_8M:
+		*size = 8 * 1024 * 1024;
+		break;
+	default:
+		*size = 8 * 1024 * 1024;
+		MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK);
+	}
+}
+
+static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv,
+				    unsigned long *base, unsigned long *size)
+{
+	uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
+	unsigned long stolen_top;
+
+	stolen_top = dev_priv->mm.stolen_base + dev_priv->gtt.stolen_size;
+
+	*base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
+
+	/* On these platforms, the register doesn't have a size field, so the
+	 * size is the distance between the base and the top of the stolen
+	 * memory. We also have the genuine case where base is zero and there's
+	 * nothing reserved. */
+	if (*base == 0)
+		*size = 0;
+	else
+		*size = stolen_top - *base;
+}
+
+int i915_gem_init_stolen(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	unsigned long reserved_total, reserved_base = 0, reserved_size;
+	unsigned long stolen_top;
+
+	mutex_init(&dev_priv->mm.stolen_lock);
+
+#ifdef CONFIG_INTEL_IOMMU
+	if (intel_iommu_gfx_mapped && INTEL_INFO(dev)->gen < 8) {
+		DRM_INFO("DMAR active, disabling use of stolen memory\n");
+		return 0;
+	}
+#endif
+
+	if (dev_priv->gtt.stolen_size == 0)
+		return 0;
+
+	dev_priv->mm.stolen_base = i915_stolen_to_physical(dev);
+	if (dev_priv->mm.stolen_base == 0)
+		return 0;
+
+	stolen_top = dev_priv->mm.stolen_base + dev_priv->gtt.stolen_size;
+
+	switch (INTEL_INFO(dev_priv)->gen) {
+	case 2:
+	case 3:
+		break;
+	case 4:
+		if (IS_G4X(dev))
+			g4x_get_stolen_reserved(dev_priv, &reserved_base,
+						&reserved_size);
+		break;
+	case 5:
+		/* Assume the gen6 maximum for the older platforms. */
+		reserved_size = 1024 * 1024;
+		reserved_base = stolen_top - reserved_size;
+		break;
+	case 6:
+		gen6_get_stolen_reserved(dev_priv, &reserved_base,
+					 &reserved_size);
+		break;
+	case 7:
+		gen7_get_stolen_reserved(dev_priv, &reserved_base,
+					 &reserved_size);
+		break;
+	default:
+		if (IS_BROADWELL(dev_priv) || IS_SKYLAKE(dev_priv))
+			bdw_get_stolen_reserved(dev_priv, &reserved_base,
+						&reserved_size);
+		else
+			gen8_get_stolen_reserved(dev_priv, &reserved_base,
+						 &reserved_size);
+		break;
+	}
+
+	/* It is possible for the reserved base to be zero, but the register
+	 * field for size doesn't have a zero option. */
+	if (reserved_base == 0) {
+		reserved_size = 0;
+		reserved_base = stolen_top;
+	}
+
+	if (reserved_base < dev_priv->mm.stolen_base ||
+	    reserved_base + reserved_size > stolen_top) {
+		DRM_DEBUG_KMS("Stolen reserved area [0x%08lx - 0x%08lx] outside stolen memory [0x%08lx - 0x%08lx]\n",
+			      reserved_base, reserved_base + reserved_size,
+			      dev_priv->mm.stolen_base, stolen_top);
+		return 0;
+	}
+
+	/* It is possible for the reserved area to end before the end of stolen
+	 * memory, so just consider the start. */
+	reserved_total = stolen_top - reserved_base;
+
+	DRM_DEBUG_KMS("Memory reserved for graphics device: %zuK, usable: %luK\n",
+		      dev_priv->gtt.stolen_size >> 10,
+		      (dev_priv->gtt.stolen_size - reserved_total) >> 10);
+
+	dev_priv->gtt.stolen_usable_size = dev_priv->gtt.stolen_size -
+					   reserved_total;
+
+	/*
+	 * Basic memrange allocator for stolen space.
+	 *
+	 * TODO: Notice that some platforms require us to not use the first page
+	 * of the stolen memory but their BIOSes may still put the framebuffer
+	 * on the first page. So we don't reserve this page for now because of
+	 * that. Our current solution is to just prevent new nodes from being
+	 * inserted on the first page - see the check we have at
+	 * i915_gem_stolen_insert_node_in_range(). We may want to fix the fbcon
+	 * problem later.
+	 */
+	drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_usable_size);
+
+	return 0;
+}
+
+static struct sg_table *
+i915_pages_create_for_stolen(struct drm_device *dev,
+			     u32 offset, u32 size)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct sg_table *st;
+	struct scatterlist *sg;
+
+	DRM_DEBUG_DRIVER("offset=0x%x, size=%d\n", offset, size);
+	BUG_ON(offset > dev_priv->gtt.stolen_size - size);
+
+	/* We hide that we have no struct page backing our stolen object
+	 * by wrapping the contiguous physical allocation with a fake
+	 * dma mapping in a single scatterlist.
+	 */
+
+	st = kmalloc(sizeof(*st), GFP_KERNEL);
+	if (st == NULL)
+		return NULL;
+
+	if (sg_alloc_table(st, 1, GFP_KERNEL)) {
+		kfree(st);
+		return NULL;
+	}
+
+	sg = st->sgl;
+	sg->offset = 0;
+	sg->length = size;
+
+	sg_dma_address(sg) = (dma_addr_t)dev_priv->mm.stolen_base + offset;
+	sg_dma_len(sg) = size;
+
+	return st;
+}
+
+static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
+{
+	BUG();
+	return -EINVAL;
+}
+
+static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj)
+{
+	/* Should only be called during free */
+	sg_free_table(obj->pages);
+	kfree(obj->pages);
+}
+
+
+static void
+i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
+{
+	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+
+	if (obj->stolen) {
+		i915_gem_stolen_remove_node(dev_priv, obj->stolen);
+		kfree(obj->stolen);
+		obj->stolen = NULL;
+	}
+}
+static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
+	.get_pages = i915_gem_object_get_pages_stolen,
+	.put_pages = i915_gem_object_put_pages_stolen,
+	.release = i915_gem_object_release_stolen,
+};
+
+static struct drm_i915_gem_object *
+_i915_gem_object_create_stolen(struct drm_device *dev,
+			       struct drm_mm_node *stolen)
+{
+	struct drm_i915_gem_object *obj;
+
+	obj = i915_gem_object_alloc(dev);
+	if (obj == NULL)
+		return NULL;
+
+	drm_gem_private_object_init(dev, &obj->base, stolen->size);
+	i915_gem_object_init(obj, &i915_gem_object_stolen_ops);
+
+	obj->pages = i915_pages_create_for_stolen(dev,
+						  stolen->start, stolen->size);
+	if (obj->pages == NULL)
+		goto cleanup;
+
+	i915_gem_object_pin_pages(obj);
+	obj->stolen = stolen;
+
+	obj->base.read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
+	obj->cache_level = HAS_LLC(dev) ? I915_CACHE_LLC : I915_CACHE_NONE;
+
+	return obj;
+
+cleanup:
+	i915_gem_object_free(obj);
+	return NULL;
+}
+
+struct drm_i915_gem_object *
+i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_gem_object *obj;
+	struct drm_mm_node *stolen;
+	int ret;
+
+	if (!drm_mm_initialized(&dev_priv->mm.stolen))
+		return NULL;
+
+	DRM_DEBUG_KMS("creating stolen object: size=%x\n", size);
+	if (size == 0)
+		return NULL;
+
+	stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
+	if (!stolen)
+		return NULL;
+
+	ret = i915_gem_stolen_insert_node(dev_priv, stolen, size, 4096);
+	if (ret) {
+		kfree(stolen);
+		return NULL;
+	}
+
+	obj = _i915_gem_object_create_stolen(dev, stolen);
+	if (obj)
+		return obj;
+
+	i915_gem_stolen_remove_node(dev_priv, stolen);
+	kfree(stolen);
+	return NULL;
+}
+
+struct drm_i915_gem_object *
+i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
+					       u32 stolen_offset,
+					       u32 gtt_offset,
+					       u32 size)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct i915_address_space *ggtt = &dev_priv->gtt.base;
+	struct drm_i915_gem_object *obj;
+	struct drm_mm_node *stolen;
+	struct i915_vma *vma;
+	int ret;
+
+	if (!drm_mm_initialized(&dev_priv->mm.stolen))
+		return NULL;
+
+	DRM_DEBUG_KMS("creating preallocated stolen object: stolen_offset=%x, gtt_offset=%x, size=%x\n",
+			stolen_offset, gtt_offset, size);
+
+	/* KISS and expect everything to be page-aligned */
+	if (WARN_ON(size == 0) || WARN_ON(size & 4095) ||
+	    WARN_ON(stolen_offset & 4095))
+		return NULL;
+
+	stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
+	if (!stolen)
+		return NULL;
+
+	stolen->start = stolen_offset;
+	stolen->size = size;
+	mutex_lock(&dev_priv->mm.stolen_lock);
+	ret = drm_mm_reserve_node(&dev_priv->mm.stolen, stolen);
+	mutex_unlock(&dev_priv->mm.stolen_lock);
+	if (ret) {
+		DRM_DEBUG_KMS("failed to allocate stolen space\n");
+		kfree(stolen);
+		return NULL;
+	}
+
+	obj = _i915_gem_object_create_stolen(dev, stolen);
+	if (obj == NULL) {
+		DRM_DEBUG_KMS("failed to allocate stolen object\n");
+		i915_gem_stolen_remove_node(dev_priv, stolen);
+		kfree(stolen);
+		return NULL;
+	}
+
+	/* Some objects just need physical mem from stolen space */
+	if (gtt_offset == I915_GTT_OFFSET_NONE)
+		return obj;
+
+	vma = i915_gem_obj_lookup_or_create_vma(obj, ggtt);
+	if (IS_ERR(vma)) {
+		ret = PTR_ERR(vma);
+		goto err;
+	}
+
+	/* To simplify the initialisation sequence between KMS and GTT,
+	 * we allow construction of the stolen object prior to
+	 * setting up the GTT space. The actual reservation will occur
+	 * later.
+	 */
+	vma->node.start = gtt_offset;
+	vma->node.size = size;
+	if (drm_mm_initialized(&ggtt->mm)) {
+		ret = drm_mm_reserve_node(&ggtt->mm, &vma->node);
+		if (ret) {
+			DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
+			goto err;
+		}
+
+		vma->bound |= GLOBAL_BIND;
+		__i915_vma_set_map_and_fenceable(vma);
+		list_add_tail(&vma->mm_list, &ggtt->inactive_list);
+	}
+
+	list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
+	i915_gem_object_pin_pages(obj);
+
+	return obj;
+
+err:
+	drm_gem_object_unreference(&obj->base);
+	return NULL;
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
new file mode 100644
index 0000000..8a6717c
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -0,0 +1,331 @@
+/*
+ * Copyright © 2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#include <linux/string.h>
+#include <linux/bitops.h>
+#include <drm/drmP.h>
+#include <drm/i915_drm.h>
+#include "i915_drv.h"
+
+/**
+ * DOC: buffer object tiling
+ *
+ * i915_gem_set_tiling() and i915_gem_get_tiling() is the userspace interface to
+ * declare fence register requirements.
+ *
+ * In principle GEM doesn't care at all about the internal data layout of an
+ * object, and hence it also doesn't care about tiling or swizzling. There's two
+ * exceptions:
+ *
+ * - For X and Y tiling the hardware provides detilers for CPU access, so called
+ *   fences. Since there's only a limited amount of them the kernel must manage
+ *   these, and therefore userspace must tell the kernel the object tiling if it
+ *   wants to use fences for detiling.
+ * - On gen3 and gen4 platforms have a swizzling pattern for tiled objects which
+ *   depends upon the physical page frame number. When swapping such objects the
+ *   page frame number might change and the kernel must be able to fix this up
+ *   and hence now the tiling. Note that on a subset of platforms with
+ *   asymmetric memory channel population the swizzling pattern changes in an
+ *   unknown way, and for those the kernel simply forbids swapping completely.
+ *
+ * Since neither of this applies for new tiling layouts on modern platforms like
+ * W, Ys and Yf tiling GEM only allows object tiling to be set to X or Y tiled.
+ * Anything else can be handled in userspace entirely without the kernel's
+ * invovlement.
+ */
+
+/* Check pitch constriants for all chips & tiling formats */
+static bool
+i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
+{
+	int tile_width;
+
+	/* Linear is always fine */
+	if (tiling_mode == I915_TILING_NONE)
+		return true;
+
+	if (IS_GEN2(dev) ||
+	    (tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)))
+		tile_width = 128;
+	else
+		tile_width = 512;
+
+	/* check maximum stride & object size */
+	/* i965+ stores the end address of the gtt mapping in the fence
+	 * reg, so dont bother to check the size */
+	if (INTEL_INFO(dev)->gen >= 7) {
+		if (stride / 128 > GEN7_FENCE_MAX_PITCH_VAL)
+			return false;
+	} else if (INTEL_INFO(dev)->gen >= 4) {
+		if (stride / 128 > I965_FENCE_MAX_PITCH_VAL)
+			return false;
+	} else {
+		if (stride > 8192)
+			return false;
+
+		if (IS_GEN3(dev)) {
+			if (size > I830_FENCE_MAX_SIZE_VAL << 20)
+				return false;
+		} else {
+			if (size > I830_FENCE_MAX_SIZE_VAL << 19)
+				return false;
+		}
+	}
+
+	if (stride < tile_width)
+		return false;
+
+	/* 965+ just needs multiples of tile width */
+	if (INTEL_INFO(dev)->gen >= 4) {
+		if (stride & (tile_width - 1))
+			return false;
+		return true;
+	}
+
+	/* Pre-965 needs power of two tile widths */
+	if (stride & (stride - 1))
+		return false;
+
+	return true;
+}
+
+/* Is the current GTT allocation valid for the change in tiling? */
+static bool
+i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
+{
+	u32 size;
+
+	if (tiling_mode == I915_TILING_NONE)
+		return true;
+
+	if (INTEL_INFO(obj->base.dev)->gen >= 4)
+		return true;
+
+	if (INTEL_INFO(obj->base.dev)->gen == 3) {
+		if (i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK)
+			return false;
+	} else {
+		if (i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK)
+			return false;
+	}
+
+	size = i915_gem_get_gtt_size(obj->base.dev, obj->base.size, tiling_mode);
+	if (i915_gem_obj_ggtt_size(obj) != size)
+		return false;
+
+	if (i915_gem_obj_ggtt_offset(obj) & (size - 1))
+		return false;
+
+	return true;
+}
+
+/**
+ * i915_gem_set_tiling - IOCTL handler to set tiling mode
+ * @dev: DRM device
+ * @data: data pointer for the ioctl
+ * @file: DRM file for the ioctl call
+ *
+ * Sets the tiling mode of an object, returning the required swizzling of
+ * bit 6 of addresses in the object.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int
+i915_gem_set_tiling(struct drm_device *dev, void *data,
+		   struct drm_file *file)
+{
+	struct drm_i915_gem_set_tiling *args = data;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_gem_object *obj;
+	int ret = 0;
+
+	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+	if (&obj->base == NULL)
+		return -ENOENT;
+
+	if (!i915_tiling_ok(dev,
+			    args->stride, obj->base.size, args->tiling_mode)) {
+		drm_gem_object_unreference_unlocked(&obj->base);
+		return -EINVAL;
+	}
+
+	mutex_lock(&dev->struct_mutex);
+	if (obj->pin_display || obj->framebuffer_references) {
+		ret = -EBUSY;
+		goto err;
+	}
+
+	if (args->tiling_mode == I915_TILING_NONE) {
+		args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
+		args->stride = 0;
+	} else {
+		if (args->tiling_mode == I915_TILING_X)
+			args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
+		else
+			args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
+
+		/* Hide bit 17 swizzling from the user.  This prevents old Mesa
+		 * from aborting the application on sw fallbacks to bit 17,
+		 * and we use the pread/pwrite bit17 paths to swizzle for it.
+		 * If there was a user that was relying on the swizzle
+		 * information for drm_intel_bo_map()ed reads/writes this would
+		 * break it, but we don't have any of those.
+		 */
+		if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17)
+			args->swizzle_mode = I915_BIT_6_SWIZZLE_9;
+		if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
+			args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10;
+
+		/* If we can't handle the swizzling, make it untiled. */
+		if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) {
+			args->tiling_mode = I915_TILING_NONE;
+			args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
+			args->stride = 0;
+		}
+	}
+
+	if (args->tiling_mode != obj->tiling_mode ||
+	    args->stride != obj->stride) {
+		/* We need to rebind the object if its current allocation
+		 * no longer meets the alignment restrictions for its new
+		 * tiling mode. Otherwise we can just leave it alone, but
+		 * need to ensure that any fence register is updated before
+		 * the next fenced (either through the GTT or by the BLT unit
+		 * on older GPUs) access.
+		 *
+		 * After updating the tiling parameters, we then flag whether
+		 * we need to update an associated fence register. Note this
+		 * has to also include the unfenced register the GPU uses
+		 * whilst executing a fenced command for an untiled object.
+		 */
+		if (obj->map_and_fenceable &&
+		    !i915_gem_object_fence_ok(obj, args->tiling_mode))
+			ret = i915_gem_object_ggtt_unbind(obj);
+
+		if (ret == 0) {
+			if (obj->pages &&
+			    obj->madv == I915_MADV_WILLNEED &&
+			    dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
+				if (args->tiling_mode == I915_TILING_NONE)
+					i915_gem_object_unpin_pages(obj);
+				if (obj->tiling_mode == I915_TILING_NONE)
+					i915_gem_object_pin_pages(obj);
+			}
+
+			obj->fence_dirty =
+				obj->last_fenced_req ||
+				obj->fence_reg != I915_FENCE_REG_NONE;
+
+			obj->tiling_mode = args->tiling_mode;
+			obj->stride = args->stride;
+
+			/* Force the fence to be reacquired for GTT access */
+			i915_gem_release_mmap(obj);
+		}
+	}
+	/* we have to maintain this existing ABI... */
+	args->stride = obj->stride;
+	args->tiling_mode = obj->tiling_mode;
+
+	/* Try to preallocate memory required to save swizzling on put-pages */
+	if (i915_gem_object_needs_bit17_swizzle(obj)) {
+		if (obj->bit_17 == NULL) {
+			obj->bit_17 = kcalloc(BITS_TO_LONGS(obj->base.size >> PAGE_SHIFT),
+					      sizeof(long), GFP_KERNEL);
+		}
+	} else {
+		kfree(obj->bit_17);
+		obj->bit_17 = NULL;
+	}
+
+err:
+	drm_gem_object_unreference(&obj->base);
+	mutex_unlock(&dev->struct_mutex);
+
+	return ret;
+}
+
+/**
+ * i915_gem_get_tiling - IOCTL handler to get tiling mode
+ * @dev: DRM device
+ * @data: data pointer for the ioctl
+ * @file: DRM file for the ioctl call
+ *
+ * Returns the current tiling mode and required bit 6 swizzling for the object.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int
+i915_gem_get_tiling(struct drm_device *dev, void *data,
+		   struct drm_file *file)
+{
+	struct drm_i915_gem_get_tiling *args = data;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_gem_object *obj;
+
+	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+	if (&obj->base == NULL)
+		return -ENOENT;
+
+	mutex_lock(&dev->struct_mutex);
+
+	args->tiling_mode = obj->tiling_mode;
+	switch (obj->tiling_mode) {
+	case I915_TILING_X:
+		args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
+		break;
+	case I915_TILING_Y:
+		args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
+		break;
+	case I915_TILING_NONE:
+		args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
+		break;
+	default:
+		DRM_ERROR("unknown tiling mode\n");
+	}
+
+	/* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */
+	if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
+		args->phys_swizzle_mode = I915_BIT_6_SWIZZLE_UNKNOWN;
+	else
+		args->phys_swizzle_mode = args->swizzle_mode;
+	if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17)
+		args->swizzle_mode = I915_BIT_6_SWIZZLE_9;
+	if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
+		args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10;
+
+	drm_gem_object_unreference(&obj->base);
+	mutex_unlock(&dev->struct_mutex);
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
new file mode 100644
index 0000000..19fb0bd
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -0,0 +1,898 @@
+/*
+ * Copyright © 2012-2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include <drm/drmP.h>
+#include <drm/i915_drm.h>
+#include "i915_drv.h"
+#include "i915_trace.h"
+#include "intel_drv.h"
+#include <linux/mmu_context.h>
+#include <linux/mmu_notifier.h>
+#include <linux/mempolicy.h>
+#include <linux/swap.h>
+
+struct i915_mm_struct {
+	struct mm_struct *mm;
+	struct drm_device *dev;
+	struct i915_mmu_notifier *mn;
+	struct hlist_node node;
+	struct kref kref;
+	struct work_struct work;
+};
+
+#if defined(CONFIG_MMU_NOTIFIER)
+#include <linux/interval_tree.h>
+
+struct i915_mmu_notifier {
+	spinlock_t lock;
+	struct hlist_node node;
+	struct mmu_notifier mn;
+	struct rb_root objects;
+	struct list_head linear;
+	bool has_linear;
+};
+
+struct i915_mmu_object {
+	struct i915_mmu_notifier *mn;
+	struct interval_tree_node it;
+	struct list_head link;
+	struct drm_i915_gem_object *obj;
+	struct work_struct work;
+	bool active;
+	bool is_linear;
+};
+
+static void __cancel_userptr__worker(struct work_struct *work)
+{
+	struct i915_mmu_object *mo = container_of(work, typeof(*mo), work);
+	struct drm_i915_gem_object *obj = mo->obj;
+	struct drm_device *dev = obj->base.dev;
+
+	mutex_lock(&dev->struct_mutex);
+	/* Cancel any active worker and force us to re-evaluate gup */
+	obj->userptr.work = NULL;
+
+	if (obj->pages != NULL) {
+		struct drm_i915_private *dev_priv = to_i915(dev);
+		struct i915_vma *vma, *tmp;
+		bool was_interruptible;
+
+		was_interruptible = dev_priv->mm.interruptible;
+		dev_priv->mm.interruptible = false;
+
+		list_for_each_entry_safe(vma, tmp, &obj->vma_list, vma_link) {
+			int ret = i915_vma_unbind(vma);
+			WARN_ON(ret && ret != -EIO);
+		}
+		WARN_ON(i915_gem_object_put_pages(obj));
+
+		dev_priv->mm.interruptible = was_interruptible;
+	}
+
+	drm_gem_object_unreference(&obj->base);
+	mutex_unlock(&dev->struct_mutex);
+}
+
+static unsigned long cancel_userptr(struct i915_mmu_object *mo)
+{
+	unsigned long end = mo->obj->userptr.ptr + mo->obj->base.size;
+
+	/* The mmu_object is released late when destroying the
+	 * GEM object so it is entirely possible to gain a
+	 * reference on an object in the process of being freed
+	 * since our serialisation is via the spinlock and not
+	 * the struct_mutex - and consequently use it after it
+	 * is freed and then double free it.
+	 */
+	if (mo->active && kref_get_unless_zero(&mo->obj->base.refcount)) {
+		schedule_work(&mo->work);
+		/* only schedule one work packet to avoid the refleak */
+		mo->active = false;
+	}
+
+	return end;
+}
+
+static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
+						       struct mm_struct *mm,
+						       unsigned long start,
+						       unsigned long end)
+{
+	struct i915_mmu_notifier *mn =
+		container_of(_mn, struct i915_mmu_notifier, mn);
+	struct i915_mmu_object *mo;
+
+	/* interval ranges are inclusive, but invalidate range is exclusive */
+	end--;
+
+	spin_lock(&mn->lock);
+	if (mn->has_linear) {
+		list_for_each_entry(mo, &mn->linear, link) {
+			if (mo->it.last < start || mo->it.start > end)
+				continue;
+
+			cancel_userptr(mo);
+		}
+	} else {
+		struct interval_tree_node *it;
+
+		it = interval_tree_iter_first(&mn->objects, start, end);
+		while (it) {
+			mo = container_of(it, struct i915_mmu_object, it);
+			start = cancel_userptr(mo);
+			it = interval_tree_iter_next(it, start, end);
+		}
+	}
+	spin_unlock(&mn->lock);
+}
+
+static const struct mmu_notifier_ops i915_gem_userptr_notifier = {
+	.invalidate_range_start = i915_gem_userptr_mn_invalidate_range_start,
+};
+
+static struct i915_mmu_notifier *
+i915_mmu_notifier_create(struct mm_struct *mm)
+{
+	struct i915_mmu_notifier *mn;
+	int ret;
+
+	mn = kmalloc(sizeof(*mn), GFP_KERNEL);
+	if (mn == NULL)
+		return ERR_PTR(-ENOMEM);
+
+	spin_lock_init(&mn->lock);
+	mn->mn.ops = &i915_gem_userptr_notifier;
+	mn->objects = RB_ROOT;
+	INIT_LIST_HEAD(&mn->linear);
+	mn->has_linear = false;
+
+	 /* Protected by mmap_sem (write-lock) */
+	ret = __mmu_notifier_register(&mn->mn, mm);
+	if (ret) {
+		kfree(mn);
+		return ERR_PTR(ret);
+	}
+
+	return mn;
+}
+
+static int
+i915_mmu_notifier_add(struct drm_device *dev,
+		      struct i915_mmu_notifier *mn,
+		      struct i915_mmu_object *mo)
+{
+	struct interval_tree_node *it;
+	int ret = 0;
+
+	/* By this point we have already done a lot of expensive setup that
+	 * we do not want to repeat just because the caller (e.g. X) has a
+	 * signal pending (and partly because of that expensive setup, X
+	 * using an interrupt timer is likely to get stuck in an EINTR loop).
+	 */
+	mutex_lock(&dev->struct_mutex);
+
+	/* Make sure we drop the final active reference (and thereby
+	 * remove the objects from the interval tree) before we do
+	 * the check for overlapping objects.
+	 */
+	i915_gem_retire_requests(dev);
+
+	spin_lock(&mn->lock);
+	it = interval_tree_iter_first(&mn->objects,
+				      mo->it.start, mo->it.last);
+	if (it) {
+		struct drm_i915_gem_object *obj;
+
+		/* We only need to check the first object in the range as it
+		 * either has cancelled gup work queued and we need to
+		 * return back to the user to give time for the gup-workers
+		 * to flush their object references upon which the object will
+		 * be removed from the interval-tree, or the the range is
+		 * still in use by another client and the overlap is invalid.
+		 *
+		 * If we do have an overlap, we cannot use the interval tree
+		 * for fast range invalidation.
+		 */
+
+		obj = container_of(it, struct i915_mmu_object, it)->obj;
+		if (!obj->userptr.workers)
+			mn->has_linear = mo->is_linear = true;
+		else
+			ret = -EAGAIN;
+	} else
+		interval_tree_insert(&mo->it, &mn->objects);
+
+	if (ret == 0)
+		list_add(&mo->link, &mn->linear);
+
+	spin_unlock(&mn->lock);
+	mutex_unlock(&dev->struct_mutex);
+
+	return ret;
+}
+
+static bool i915_mmu_notifier_has_linear(struct i915_mmu_notifier *mn)
+{
+	struct i915_mmu_object *mo;
+
+	list_for_each_entry(mo, &mn->linear, link)
+		if (mo->is_linear)
+			return true;
+
+	return false;
+}
+
+static void
+i915_mmu_notifier_del(struct i915_mmu_notifier *mn,
+		      struct i915_mmu_object *mo)
+{
+	spin_lock(&mn->lock);
+	list_del(&mo->link);
+	if (mo->is_linear)
+		mn->has_linear = i915_mmu_notifier_has_linear(mn);
+	else
+		interval_tree_remove(&mo->it, &mn->objects);
+	spin_unlock(&mn->lock);
+}
+
+static void
+i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
+{
+	struct i915_mmu_object *mo;
+
+	mo = obj->userptr.mmu_object;
+	if (mo == NULL)
+		return;
+
+	i915_mmu_notifier_del(mo->mn, mo);
+	kfree(mo);
+
+	obj->userptr.mmu_object = NULL;
+}
+
+static struct i915_mmu_notifier *
+i915_mmu_notifier_find(struct i915_mm_struct *mm)
+{
+	struct i915_mmu_notifier *mn = mm->mn;
+
+	mn = mm->mn;
+	if (mn)
+		return mn;
+
+	down_write(&mm->mm->mmap_sem);
+	mutex_lock(&to_i915(mm->dev)->mm_lock);
+	if ((mn = mm->mn) == NULL) {
+		mn = i915_mmu_notifier_create(mm->mm);
+		if (!IS_ERR(mn))
+			mm->mn = mn;
+	}
+	mutex_unlock(&to_i915(mm->dev)->mm_lock);
+	up_write(&mm->mm->mmap_sem);
+
+	return mn;
+}
+
+static int
+i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
+				    unsigned flags)
+{
+	struct i915_mmu_notifier *mn;
+	struct i915_mmu_object *mo;
+	int ret;
+
+	if (flags & I915_USERPTR_UNSYNCHRONIZED)
+		return capable(CAP_SYS_ADMIN) ? 0 : -EPERM;
+
+	if (WARN_ON(obj->userptr.mm == NULL))
+		return -EINVAL;
+
+	mn = i915_mmu_notifier_find(obj->userptr.mm);
+	if (IS_ERR(mn))
+		return PTR_ERR(mn);
+
+	mo = kzalloc(sizeof(*mo), GFP_KERNEL);
+	if (mo == NULL)
+		return -ENOMEM;
+
+	mo->mn = mn;
+	mo->it.start = obj->userptr.ptr;
+	mo->it.last = mo->it.start + obj->base.size - 1;
+	mo->obj = obj;
+	INIT_WORK(&mo->work, __cancel_userptr__worker);
+
+	ret = i915_mmu_notifier_add(obj->base.dev, mn, mo);
+	if (ret) {
+		kfree(mo);
+		return ret;
+	}
+
+	obj->userptr.mmu_object = mo;
+	return 0;
+}
+
+static void
+i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
+		       struct mm_struct *mm)
+{
+	if (mn == NULL)
+		return;
+
+	mmu_notifier_unregister(&mn->mn, mm);
+	kfree(mn);
+}
+
+#else
+
+static void
+i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
+{
+}
+
+static int
+i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
+				    unsigned flags)
+{
+	if ((flags & I915_USERPTR_UNSYNCHRONIZED) == 0)
+		return -ENODEV;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -EPERM;
+
+	return 0;
+}
+
+static void
+i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
+		       struct mm_struct *mm)
+{
+}
+
+#endif
+
+static struct i915_mm_struct *
+__i915_mm_struct_find(struct drm_i915_private *dev_priv, struct mm_struct *real)
+{
+	struct i915_mm_struct *mm;
+
+	/* Protected by dev_priv->mm_lock */
+	hash_for_each_possible(dev_priv->mm_structs, mm, node, (unsigned long)real)
+		if (mm->mm == real)
+			return mm;
+
+	return NULL;
+}
+
+static int
+i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj)
+{
+	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+	struct i915_mm_struct *mm;
+	int ret = 0;
+
+	/* During release of the GEM object we hold the struct_mutex. This
+	 * precludes us from calling mmput() at that time as that may be
+	 * the last reference and so call exit_mmap(). exit_mmap() will
+	 * attempt to reap the vma, and if we were holding a GTT mmap
+	 * would then call drm_gem_vm_close() and attempt to reacquire
+	 * the struct mutex. So in order to avoid that recursion, we have
+	 * to defer releasing the mm reference until after we drop the
+	 * struct_mutex, i.e. we need to schedule a worker to do the clean
+	 * up.
+	 */
+	mutex_lock(&dev_priv->mm_lock);
+	mm = __i915_mm_struct_find(dev_priv, current->mm);
+	if (mm == NULL) {
+		mm = kmalloc(sizeof(*mm), GFP_KERNEL);
+		if (mm == NULL) {
+			ret = -ENOMEM;
+			goto out;
+		}
+
+		kref_init(&mm->kref);
+		mm->dev = obj->base.dev;
+
+		mm->mm = current->mm;
+		atomic_inc(&current->mm->mm_count);
+
+		mm->mn = NULL;
+
+		/* Protected by dev_priv->mm_lock */
+		hash_add(dev_priv->mm_structs,
+			 &mm->node, (unsigned long)mm->mm);
+	} else
+		kref_get(&mm->kref);
+
+	obj->userptr.mm = mm;
+out:
+	mutex_unlock(&dev_priv->mm_lock);
+	return ret;
+}
+
+static void
+__i915_mm_struct_free__worker(struct work_struct *work)
+{
+	struct i915_mm_struct *mm = container_of(work, typeof(*mm), work);
+	i915_mmu_notifier_free(mm->mn, mm->mm);
+	mmdrop(mm->mm);
+	kfree(mm);
+}
+
+static void
+__i915_mm_struct_free(struct kref *kref)
+{
+	struct i915_mm_struct *mm = container_of(kref, typeof(*mm), kref);
+
+	/* Protected by dev_priv->mm_lock */
+	hash_del(&mm->node);
+	mutex_unlock(&to_i915(mm->dev)->mm_lock);
+
+	INIT_WORK(&mm->work, __i915_mm_struct_free__worker);
+	schedule_work(&mm->work);
+}
+
+static void
+i915_gem_userptr_release__mm_struct(struct drm_i915_gem_object *obj)
+{
+	if (obj->userptr.mm == NULL)
+		return;
+
+	kref_put_mutex(&obj->userptr.mm->kref,
+		       __i915_mm_struct_free,
+		       &to_i915(obj->base.dev)->mm_lock);
+	obj->userptr.mm = NULL;
+}
+
+struct get_pages_work {
+	struct work_struct work;
+	struct drm_i915_gem_object *obj;
+	struct task_struct *task;
+};
+
+#if IS_ENABLED(CONFIG_SWIOTLB)
+#define swiotlb_active() swiotlb_nr_tbl()
+#else
+#define swiotlb_active() 0
+#endif
+
+static int
+st_set_pages(struct sg_table **st, struct page **pvec, int num_pages)
+{
+	struct scatterlist *sg;
+	int ret, n;
+
+	*st = kmalloc(sizeof(**st), GFP_KERNEL);
+	if (*st == NULL)
+		return -ENOMEM;
+
+	if (swiotlb_active()) {
+		ret = sg_alloc_table(*st, num_pages, GFP_KERNEL);
+		if (ret)
+			goto err;
+
+		for_each_sg((*st)->sgl, sg, num_pages, n)
+			sg_set_page(sg, pvec[n], PAGE_SIZE, 0);
+	} else {
+		ret = sg_alloc_table_from_pages(*st, pvec, num_pages,
+						0, num_pages << PAGE_SHIFT,
+						GFP_KERNEL);
+		if (ret)
+			goto err;
+	}
+
+	return 0;
+
+err:
+	kfree(*st);
+	*st = NULL;
+	return ret;
+}
+
+static int
+__i915_gem_userptr_set_pages(struct drm_i915_gem_object *obj,
+			     struct page **pvec, int num_pages)
+{
+	int ret;
+
+	ret = st_set_pages(&obj->pages, pvec, num_pages);
+	if (ret)
+		return ret;
+
+	ret = i915_gem_gtt_prepare_object(obj);
+	if (ret) {
+		sg_free_table(obj->pages);
+		kfree(obj->pages);
+		obj->pages = NULL;
+	}
+
+	return ret;
+}
+
+static int
+__i915_gem_userptr_set_active(struct drm_i915_gem_object *obj,
+			      bool value)
+{
+	int ret = 0;
+
+	/* During mm_invalidate_range we need to cancel any userptr that
+	 * overlaps the range being invalidated. Doing so requires the
+	 * struct_mutex, and that risks recursion. In order to cause
+	 * recursion, the user must alias the userptr address space with
+	 * a GTT mmapping (possible with a MAP_FIXED) - then when we have
+	 * to invalidate that mmaping, mm_invalidate_range is called with
+	 * the userptr address *and* the struct_mutex held.  To prevent that
+	 * we set a flag under the i915_mmu_notifier spinlock to indicate
+	 * whether this object is valid.
+	 */
+#if defined(CONFIG_MMU_NOTIFIER)
+	if (obj->userptr.mmu_object == NULL)
+		return 0;
+
+	spin_lock(&obj->userptr.mmu_object->mn->lock);
+	/* In order to serialise get_pages with an outstanding
+	 * cancel_userptr, we must drop the struct_mutex and try again.
+	 */
+	if (!value || !work_pending(&obj->userptr.mmu_object->work))
+		obj->userptr.mmu_object->active = value;
+	else
+		ret = -EAGAIN;
+	spin_unlock(&obj->userptr.mmu_object->mn->lock);
+#endif
+
+	return ret;
+}
+
+static void
+__i915_gem_userptr_get_pages_worker(struct work_struct *_work)
+{
+	struct get_pages_work *work = container_of(_work, typeof(*work), work);
+	struct drm_i915_gem_object *obj = work->obj;
+	struct drm_device *dev = obj->base.dev;
+	const int npages = obj->base.size >> PAGE_SHIFT;
+	struct page **pvec;
+	int pinned, ret;
+
+	ret = -ENOMEM;
+	pinned = 0;
+
+	pvec = kmalloc(npages*sizeof(struct page *),
+		       GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
+	if (pvec == NULL)
+		pvec = drm_malloc_ab(npages, sizeof(struct page *));
+	if (pvec != NULL) {
+		struct mm_struct *mm = obj->userptr.mm->mm;
+
+		down_read(&mm->mmap_sem);
+		while (pinned < npages) {
+			ret = get_user_pages(work->task, mm,
+					     obj->userptr.ptr + pinned * PAGE_SIZE,
+					     npages - pinned,
+					     !obj->userptr.read_only, 0,
+					     pvec + pinned, NULL);
+			if (ret < 0)
+				break;
+
+			pinned += ret;
+		}
+		up_read(&mm->mmap_sem);
+	}
+
+	mutex_lock(&dev->struct_mutex);
+	if (obj->userptr.work == &work->work) {
+		if (pinned == npages) {
+			ret = __i915_gem_userptr_set_pages(obj, pvec, npages);
+			if (ret == 0) {
+				list_add_tail(&obj->global_list,
+					      &to_i915(dev)->mm.unbound_list);
+				obj->get_page.sg = obj->pages->sgl;
+				obj->get_page.last = 0;
+				pinned = 0;
+			}
+		}
+		obj->userptr.work = ERR_PTR(ret);
+		if (ret)
+			__i915_gem_userptr_set_active(obj, false);
+	}
+
+	obj->userptr.workers--;
+	drm_gem_object_unreference(&obj->base);
+	mutex_unlock(&dev->struct_mutex);
+
+	release_pages(pvec, pinned, 0);
+	drm_free_large(pvec);
+
+	put_task_struct(work->task);
+	kfree(work);
+}
+
+static int
+__i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj,
+				      bool *active)
+{
+	struct get_pages_work *work;
+
+	/* Spawn a worker so that we can acquire the
+	 * user pages without holding our mutex. Access
+	 * to the user pages requires mmap_sem, and we have
+	 * a strict lock ordering of mmap_sem, struct_mutex -
+	 * we already hold struct_mutex here and so cannot
+	 * call gup without encountering a lock inversion.
+	 *
+	 * Userspace will keep on repeating the operation
+	 * (thanks to EAGAIN) until either we hit the fast
+	 * path or the worker completes. If the worker is
+	 * cancelled or superseded, the task is still run
+	 * but the results ignored. (This leads to
+	 * complications that we may have a stray object
+	 * refcount that we need to be wary of when
+	 * checking for existing objects during creation.)
+	 * If the worker encounters an error, it reports
+	 * that error back to this function through
+	 * obj->userptr.work = ERR_PTR.
+	 */
+	if (obj->userptr.workers >= I915_GEM_USERPTR_MAX_WORKERS)
+		return -EAGAIN;
+
+	work = kmalloc(sizeof(*work), GFP_KERNEL);
+	if (work == NULL)
+		return -ENOMEM;
+
+	obj->userptr.work = &work->work;
+	obj->userptr.workers++;
+
+	work->obj = obj;
+	drm_gem_object_reference(&obj->base);
+
+	work->task = current;
+	get_task_struct(work->task);
+
+	INIT_WORK(&work->work, __i915_gem_userptr_get_pages_worker);
+	schedule_work(&work->work);
+
+	*active = true;
+	return -EAGAIN;
+}
+
+static int
+i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
+{
+	const int num_pages = obj->base.size >> PAGE_SHIFT;
+	struct page **pvec;
+	int pinned, ret;
+	bool active;
+
+	/* If userspace should engineer that these pages are replaced in
+	 * the vma between us binding this page into the GTT and completion
+	 * of rendering... Their loss. If they change the mapping of their
+	 * pages they need to create a new bo to point to the new vma.
+	 *
+	 * However, that still leaves open the possibility of the vma
+	 * being copied upon fork. Which falls under the same userspace
+	 * synchronisation issue as a regular bo, except that this time
+	 * the process may not be expecting that a particular piece of
+	 * memory is tied to the GPU.
+	 *
+	 * Fortunately, we can hook into the mmu_notifier in order to
+	 * discard the page references prior to anything nasty happening
+	 * to the vma (discard or cloning) which should prevent the more
+	 * egregious cases from causing harm.
+	 */
+	if (IS_ERR(obj->userptr.work)) {
+		/* active flag will have been dropped already by the worker */
+		ret = PTR_ERR(obj->userptr.work);
+		obj->userptr.work = NULL;
+		return ret;
+	}
+	if (obj->userptr.work)
+		/* active flag should still be held for the pending work */
+		return -EAGAIN;
+
+	/* Let the mmu-notifier know that we have begun and need cancellation */
+	ret = __i915_gem_userptr_set_active(obj, true);
+	if (ret)
+		return ret;
+
+	pvec = NULL;
+	pinned = 0;
+	if (obj->userptr.mm->mm == current->mm) {
+		pvec = kmalloc(num_pages*sizeof(struct page *),
+			       GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
+		if (pvec == NULL) {
+			pvec = drm_malloc_ab(num_pages, sizeof(struct page *));
+			if (pvec == NULL) {
+				__i915_gem_userptr_set_active(obj, false);
+				return -ENOMEM;
+			}
+		}
+
+		pinned = __get_user_pages_fast(obj->userptr.ptr, num_pages,
+					       !obj->userptr.read_only, pvec);
+	}
+
+	active = false;
+	if (pinned < 0)
+		ret = pinned, pinned = 0;
+	else if (pinned < num_pages)
+		ret = __i915_gem_userptr_get_pages_schedule(obj, &active);
+	else
+		ret = __i915_gem_userptr_set_pages(obj, pvec, num_pages);
+	if (ret) {
+		__i915_gem_userptr_set_active(obj, active);
+		release_pages(pvec, pinned, 0);
+	}
+	drm_free_large(pvec);
+	return ret;
+}
+
+static void
+i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
+{
+	struct sg_page_iter sg_iter;
+
+	BUG_ON(obj->userptr.work != NULL);
+	__i915_gem_userptr_set_active(obj, false);
+
+	if (obj->madv != I915_MADV_WILLNEED)
+		obj->dirty = 0;
+
+	i915_gem_gtt_finish_object(obj);
+
+	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
+		struct page *page = sg_page_iter_page(&sg_iter);
+
+		if (obj->dirty)
+			set_page_dirty(page);
+
+		mark_page_accessed(page);
+		page_cache_release(page);
+	}
+	obj->dirty = 0;
+
+	sg_free_table(obj->pages);
+	kfree(obj->pages);
+}
+
+static void
+i915_gem_userptr_release(struct drm_i915_gem_object *obj)
+{
+	i915_gem_userptr_release__mmu_notifier(obj);
+	i915_gem_userptr_release__mm_struct(obj);
+}
+
+static int
+i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
+{
+	if (obj->userptr.mmu_object)
+		return 0;
+
+	return i915_gem_userptr_init__mmu_notifier(obj, 0);
+}
+
+static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
+	.dmabuf_export = i915_gem_userptr_dmabuf_export,
+	.get_pages = i915_gem_userptr_get_pages,
+	.put_pages = i915_gem_userptr_put_pages,
+	.release = i915_gem_userptr_release,
+};
+
+/**
+ * Creates a new mm object that wraps some normal memory from the process
+ * context - user memory.
+ *
+ * We impose several restrictions upon the memory being mapped
+ * into the GPU.
+ * 1. It must be page aligned (both start/end addresses, i.e ptr and size).
+ * 2. It must be normal system memory, not a pointer into another map of IO
+ *    space (e.g. it must not be a GTT mmapping of another object).
+ * 3. We only allow a bo as large as we could in theory map into the GTT,
+ *    that is we limit the size to the total size of the GTT.
+ * 4. The bo is marked as being snoopable. The backing pages are left
+ *    accessible directly by the CPU, but reads and writes by the GPU may
+ *    incur the cost of a snoop (unless you have an LLC architecture).
+ *
+ * Synchronisation between multiple users and the GPU is left to userspace
+ * through the normal set-domain-ioctl. The kernel will enforce that the
+ * GPU relinquishes the VMA before it is returned back to the system
+ * i.e. upon free(), munmap() or process termination. However, the userspace
+ * malloc() library may not immediately relinquish the VMA after free() and
+ * instead reuse it whilst the GPU is still reading and writing to the VMA.
+ * Caveat emptor.
+ *
+ * Also note, that the object created here is not currently a "first class"
+ * object, in that several ioctls are banned. These are the CPU access
+ * ioctls: mmap(), pwrite and pread. In practice, you are expected to use
+ * direct access via your pointer rather than use those ioctls. Another
+ * restriction is that we do not allow userptr surfaces to be pinned to the
+ * hardware and so we reject any attempt to create a framebuffer out of a
+ * userptr.
+ *
+ * If you think this is a good interface to use to pass GPU memory between
+ * drivers, please use dma-buf instead. In fact, wherever possible use
+ * dma-buf instead.
+ */
+int
+i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
+{
+	struct drm_i915_gem_userptr *args = data;
+	struct drm_i915_gem_object *obj;
+	int ret;
+	u32 handle;
+
+	if (args->flags & ~(I915_USERPTR_READ_ONLY |
+			    I915_USERPTR_UNSYNCHRONIZED))
+		return -EINVAL;
+
+	if (offset_in_page(args->user_ptr | args->user_size))
+		return -EINVAL;
+
+	if (!access_ok(args->flags & I915_USERPTR_READ_ONLY ? VERIFY_READ : VERIFY_WRITE,
+		       (char __user *)(unsigned long)args->user_ptr, args->user_size))
+		return -EFAULT;
+
+	if (args->flags & I915_USERPTR_READ_ONLY) {
+		/* On almost all of the current hw, we cannot tell the GPU that a
+		 * page is readonly, so this is just a placeholder in the uAPI.
+		 */
+		return -ENODEV;
+	}
+
+	obj = i915_gem_object_alloc(dev);
+	if (obj == NULL)
+		return -ENOMEM;
+
+	drm_gem_private_object_init(dev, &obj->base, args->user_size);
+	i915_gem_object_init(obj, &i915_gem_userptr_ops);
+	obj->cache_level = I915_CACHE_LLC;
+	obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+	obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+
+	obj->userptr.ptr = args->user_ptr;
+	obj->userptr.read_only = !!(args->flags & I915_USERPTR_READ_ONLY);
+
+	/* And keep a pointer to the current->mm for resolving the user pages
+	 * at binding. This means that we need to hook into the mmu_notifier
+	 * in order to detect if the mmu is destroyed.
+	 */
+	ret = i915_gem_userptr_init__mm_struct(obj);
+	if (ret == 0)
+		ret = i915_gem_userptr_init__mmu_notifier(obj, args->flags);
+	if (ret == 0)
+		ret = drm_gem_handle_create(file, &obj->base, &handle);
+
+	/* drop reference from allocate - handle holds it now */
+	drm_gem_object_unreference_unlocked(&obj->base);
+	if (ret)
+		return ret;
+
+	args->handle = handle;
+	return 0;
+}
+
+int
+i915_gem_init_userptr(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	mutex_init(&dev_priv->mm_lock);
+	hash_init(dev_priv->mm_structs);
+	return 0;
+}
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
new file mode 100644
index 0000000..2f04e4f
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -0,0 +1,1401 @@
+/*
+ * Copyright (c) 2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Eric Anholt <eric@anholt.net>
+ *    Keith Packard <keithp@keithp.com>
+ *    Mika Kuoppala <mika.kuoppala@intel.com>
+ *
+ */
+
+#include <generated/utsrelease.h>
+#include "i915_drv.h"
+
+static const char *ring_str(int ring)
+{
+	switch (ring) {
+	case RCS: return "render";
+	case VCS: return "bsd";
+	case BCS: return "blt";
+	case VECS: return "vebox";
+	case VCS2: return "bsd2";
+	default: return "";
+	}
+}
+
+static const char *pin_flag(int pinned)
+{
+	if (pinned > 0)
+		return " P";
+	else if (pinned < 0)
+		return " p";
+	else
+		return "";
+}
+
+static const char *tiling_flag(int tiling)
+{
+	switch (tiling) {
+	default:
+	case I915_TILING_NONE: return "";
+	case I915_TILING_X: return " X";
+	case I915_TILING_Y: return " Y";
+	}
+}
+
+static const char *dirty_flag(int dirty)
+{
+	return dirty ? " dirty" : "";
+}
+
+static const char *purgeable_flag(int purgeable)
+{
+	return purgeable ? " purgeable" : "";
+}
+
+static bool __i915_error_ok(struct drm_i915_error_state_buf *e)
+{
+
+	if (!e->err && WARN(e->bytes > (e->size - 1), "overflow")) {
+		e->err = -ENOSPC;
+		return false;
+	}
+
+	if (e->bytes == e->size - 1 || e->err)
+		return false;
+
+	return true;
+}
+
+static bool __i915_error_seek(struct drm_i915_error_state_buf *e,
+			      unsigned len)
+{
+	if (e->pos + len <= e->start) {
+		e->pos += len;
+		return false;
+	}
+
+	/* First vsnprintf needs to fit in its entirety for memmove */
+	if (len >= e->size) {
+		e->err = -EIO;
+		return false;
+	}
+
+	return true;
+}
+
+static void __i915_error_advance(struct drm_i915_error_state_buf *e,
+				 unsigned len)
+{
+	/* If this is first printf in this window, adjust it so that
+	 * start position matches start of the buffer
+	 */
+
+	if (e->pos < e->start) {
+		const size_t off = e->start - e->pos;
+
+		/* Should not happen but be paranoid */
+		if (off > len || e->bytes) {
+			e->err = -EIO;
+			return;
+		}
+
+		memmove(e->buf, e->buf + off, len - off);
+		e->bytes = len - off;
+		e->pos = e->start;
+		return;
+	}
+
+	e->bytes += len;
+	e->pos += len;
+}
+
+static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
+			       const char *f, va_list args)
+{
+	unsigned len;
+
+	if (!__i915_error_ok(e))
+		return;
+
+	/* Seek the first printf which is hits start position */
+	if (e->pos < e->start) {
+		va_list tmp;
+
+		va_copy(tmp, args);
+		len = vsnprintf(NULL, 0, f, tmp);
+		va_end(tmp);
+
+		if (!__i915_error_seek(e, len))
+			return;
+	}
+
+	len = vsnprintf(e->buf + e->bytes, e->size - e->bytes, f, args);
+	if (len >= e->size - e->bytes)
+		len = e->size - e->bytes - 1;
+
+	__i915_error_advance(e, len);
+}
+
+static void i915_error_puts(struct drm_i915_error_state_buf *e,
+			    const char *str)
+{
+	unsigned len;
+
+	if (!__i915_error_ok(e))
+		return;
+
+	len = strlen(str);
+
+	/* Seek the first printf which is hits start position */
+	if (e->pos < e->start) {
+		if (!__i915_error_seek(e, len))
+			return;
+	}
+
+	if (len >= e->size - e->bytes)
+		len = e->size - e->bytes - 1;
+	memcpy(e->buf + e->bytes, str, len);
+
+	__i915_error_advance(e, len);
+}
+
+#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
+#define err_puts(e, s) i915_error_puts(e, s)
+
+static void print_error_buffers(struct drm_i915_error_state_buf *m,
+				const char *name,
+				struct drm_i915_error_buffer *err,
+				int count)
+{
+	int i;
+
+	err_printf(m, "  %s [%d]:\n", name, count);
+
+	while (count--) {
+		err_printf(m, "    %08x_%08x %8u %02x %02x [ ",
+			   upper_32_bits(err->gtt_offset),
+			   lower_32_bits(err->gtt_offset),
+			   err->size,
+			   err->read_domains,
+			   err->write_domain);
+		for (i = 0; i < I915_NUM_RINGS; i++)
+			err_printf(m, "%02x ", err->rseqno[i]);
+
+		err_printf(m, "] %02x", err->wseqno);
+		err_puts(m, pin_flag(err->pinned));
+		err_puts(m, tiling_flag(err->tiling));
+		err_puts(m, dirty_flag(err->dirty));
+		err_puts(m, purgeable_flag(err->purgeable));
+		err_puts(m, err->userptr ? " userptr" : "");
+		err_puts(m, err->ring != -1 ? " " : "");
+		err_puts(m, ring_str(err->ring));
+		err_puts(m, i915_cache_level_str(m->i915, err->cache_level));
+
+		if (err->name)
+			err_printf(m, " (name: %d)", err->name);
+		if (err->fence_reg != I915_FENCE_REG_NONE)
+			err_printf(m, " (fence: %d)", err->fence_reg);
+
+		err_puts(m, "\n");
+		err++;
+	}
+}
+
+static const char *hangcheck_action_to_str(enum intel_ring_hangcheck_action a)
+{
+	switch (a) {
+	case HANGCHECK_IDLE:
+		return "idle";
+	case HANGCHECK_WAIT:
+		return "wait";
+	case HANGCHECK_ACTIVE:
+		return "active";
+	case HANGCHECK_ACTIVE_LOOP:
+		return "active (loop)";
+	case HANGCHECK_KICK:
+		return "kick";
+	case HANGCHECK_HUNG:
+		return "hung";
+	}
+
+	return "unknown";
+}
+
+static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
+				  struct drm_device *dev,
+				  struct drm_i915_error_state *error,
+				  int ring_idx)
+{
+	struct drm_i915_error_ring *ring = &error->ring[ring_idx];
+
+	if (!ring->valid)
+		return;
+
+	err_printf(m, "%s command stream:\n", ring_str(ring_idx));
+	err_printf(m, "  START: 0x%08x\n", ring->start);
+	err_printf(m, "  HEAD:  0x%08x\n", ring->head);
+	err_printf(m, "  TAIL:  0x%08x\n", ring->tail);
+	err_printf(m, "  CTL:   0x%08x\n", ring->ctl);
+	err_printf(m, "  HWS:   0x%08x\n", ring->hws);
+	err_printf(m, "  ACTHD: 0x%08x %08x\n", (u32)(ring->acthd>>32), (u32)ring->acthd);
+	err_printf(m, "  IPEIR: 0x%08x\n", ring->ipeir);
+	err_printf(m, "  IPEHR: 0x%08x\n", ring->ipehr);
+	err_printf(m, "  INSTDONE: 0x%08x\n", ring->instdone);
+	if (INTEL_INFO(dev)->gen >= 4) {
+		err_printf(m, "  BBADDR: 0x%08x %08x\n", (u32)(ring->bbaddr>>32), (u32)ring->bbaddr);
+		err_printf(m, "  BB_STATE: 0x%08x\n", ring->bbstate);
+		err_printf(m, "  INSTPS: 0x%08x\n", ring->instps);
+	}
+	err_printf(m, "  INSTPM: 0x%08x\n", ring->instpm);
+	err_printf(m, "  FADDR: 0x%08x %08x\n", upper_32_bits(ring->faddr),
+		   lower_32_bits(ring->faddr));
+	if (INTEL_INFO(dev)->gen >= 6) {
+		err_printf(m, "  RC PSMI: 0x%08x\n", ring->rc_psmi);
+		err_printf(m, "  FAULT_REG: 0x%08x\n", ring->fault_reg);
+		err_printf(m, "  SYNC_0: 0x%08x [last synced 0x%08x]\n",
+			   ring->semaphore_mboxes[0],
+			   ring->semaphore_seqno[0]);
+		err_printf(m, "  SYNC_1: 0x%08x [last synced 0x%08x]\n",
+			   ring->semaphore_mboxes[1],
+			   ring->semaphore_seqno[1]);
+		if (HAS_VEBOX(dev)) {
+			err_printf(m, "  SYNC_2: 0x%08x [last synced 0x%08x]\n",
+				   ring->semaphore_mboxes[2],
+				   ring->semaphore_seqno[2]);
+		}
+	}
+	if (USES_PPGTT(dev)) {
+		err_printf(m, "  GFX_MODE: 0x%08x\n", ring->vm_info.gfx_mode);
+
+		if (INTEL_INFO(dev)->gen >= 8) {
+			int i;
+			for (i = 0; i < 4; i++)
+				err_printf(m, "  PDP%d: 0x%016llx\n",
+					   i, ring->vm_info.pdp[i]);
+		} else {
+			err_printf(m, "  PP_DIR_BASE: 0x%08x\n",
+				   ring->vm_info.pp_dir_base);
+		}
+	}
+	err_printf(m, "  seqno: 0x%08x\n", ring->seqno);
+	err_printf(m, "  waiting: %s\n", yesno(ring->waiting));
+	err_printf(m, "  ring->head: 0x%08x\n", ring->cpu_ring_head);
+	err_printf(m, "  ring->tail: 0x%08x\n", ring->cpu_ring_tail);
+	err_printf(m, "  hangcheck: %s [%d]\n",
+		   hangcheck_action_to_str(ring->hangcheck_action),
+		   ring->hangcheck_score);
+}
+
+void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
+{
+	va_list args;
+
+	va_start(args, f);
+	i915_error_vprintf(e, f, args);
+	va_end(args);
+}
+
+static void print_error_obj(struct drm_i915_error_state_buf *m,
+			    struct drm_i915_error_object *obj)
+{
+	int page, offset, elt;
+
+	for (page = offset = 0; page < obj->page_count; page++) {
+		for (elt = 0; elt < PAGE_SIZE/4; elt++) {
+			err_printf(m, "%08x :  %08x\n", offset,
+				   obj->pages[page][elt]);
+			offset += 4;
+		}
+	}
+}
+
+int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
+			    const struct i915_error_state_file_priv *error_priv)
+{
+	struct drm_device *dev = error_priv->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_error_state *error = error_priv->error;
+	struct drm_i915_error_object *obj;
+	int i, j, offset, elt;
+	int max_hangcheck_score;
+
+	if (!error) {
+		err_printf(m, "no error state collected\n");
+		goto out;
+	}
+
+	err_printf(m, "%s\n", error->error_msg);
+	err_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
+		   error->time.tv_usec);
+	err_printf(m, "Kernel: " UTS_RELEASE "\n");
+	max_hangcheck_score = 0;
+	for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
+		if (error->ring[i].hangcheck_score > max_hangcheck_score)
+			max_hangcheck_score = error->ring[i].hangcheck_score;
+	}
+	for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
+		if (error->ring[i].hangcheck_score == max_hangcheck_score &&
+		    error->ring[i].pid != -1) {
+			err_printf(m, "Active process (on ring %s): %s [%d]\n",
+				   ring_str(i),
+				   error->ring[i].comm,
+				   error->ring[i].pid);
+		}
+	}
+	err_printf(m, "Reset count: %u\n", error->reset_count);
+	err_printf(m, "Suspend count: %u\n", error->suspend_count);
+	err_printf(m, "PCI ID: 0x%04x\n", dev->pdev->device);
+	err_printf(m, "IOMMU enabled?: %d\n", error->iommu);
+	err_printf(m, "EIR: 0x%08x\n", error->eir);
+	err_printf(m, "IER: 0x%08x\n", error->ier);
+	if (INTEL_INFO(dev)->gen >= 8) {
+		for (i = 0; i < 4; i++)
+			err_printf(m, "GTIER gt %d: 0x%08x\n", i,
+				   error->gtier[i]);
+	} else if (HAS_PCH_SPLIT(dev) || IS_VALLEYVIEW(dev))
+		err_printf(m, "GTIER: 0x%08x\n", error->gtier[0]);
+	err_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
+	err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake);
+	err_printf(m, "DERRMR: 0x%08x\n", error->derrmr);
+	err_printf(m, "CCID: 0x%08x\n", error->ccid);
+	err_printf(m, "Missed interrupts: 0x%08lx\n", dev_priv->gpu_error.missed_irq_rings);
+
+	for (i = 0; i < dev_priv->num_fence_regs; i++)
+		err_printf(m, "  fence[%d] = %08llx\n", i, error->fence[i]);
+
+	for (i = 0; i < ARRAY_SIZE(error->extra_instdone); i++)
+		err_printf(m, "  INSTDONE_%d: 0x%08x\n", i,
+			   error->extra_instdone[i]);
+
+	if (INTEL_INFO(dev)->gen >= 6) {
+		err_printf(m, "ERROR: 0x%08x\n", error->error);
+
+		if (INTEL_INFO(dev)->gen >= 8)
+			err_printf(m, "FAULT_TLB_DATA: 0x%08x 0x%08x\n",
+				   error->fault_data1, error->fault_data0);
+
+		err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
+	}
+
+	if (INTEL_INFO(dev)->gen == 7)
+		err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
+
+	for (i = 0; i < ARRAY_SIZE(error->ring); i++)
+		i915_ring_error_state(m, dev, error, i);
+
+	for (i = 0; i < error->vm_count; i++) {
+		err_printf(m, "vm[%d]\n", i);
+
+		print_error_buffers(m, "Active",
+				    error->active_bo[i],
+				    error->active_bo_count[i]);
+
+		print_error_buffers(m, "Pinned",
+				    error->pinned_bo[i],
+				    error->pinned_bo_count[i]);
+	}
+
+	for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
+		obj = error->ring[i].batchbuffer;
+		if (obj) {
+			err_puts(m, dev_priv->ring[i].name);
+			if (error->ring[i].pid != -1)
+				err_printf(m, " (submitted by %s [%d])",
+					   error->ring[i].comm,
+					   error->ring[i].pid);
+			err_printf(m, " --- gtt_offset = 0x%08x %08x\n",
+				   upper_32_bits(obj->gtt_offset),
+				   lower_32_bits(obj->gtt_offset));
+			print_error_obj(m, obj);
+		}
+
+		obj = error->ring[i].wa_batchbuffer;
+		if (obj) {
+			err_printf(m, "%s (w/a) --- gtt_offset = 0x%08x\n",
+				   dev_priv->ring[i].name,
+				   lower_32_bits(obj->gtt_offset));
+			print_error_obj(m, obj);
+		}
+
+		if (error->ring[i].num_requests) {
+			err_printf(m, "%s --- %d requests\n",
+				   dev_priv->ring[i].name,
+				   error->ring[i].num_requests);
+			for (j = 0; j < error->ring[i].num_requests; j++) {
+				err_printf(m, "  seqno 0x%08x, emitted %ld, tail 0x%08x\n",
+					   error->ring[i].requests[j].seqno,
+					   error->ring[i].requests[j].jiffies,
+					   error->ring[i].requests[j].tail);
+			}
+		}
+
+		if ((obj = error->ring[i].ringbuffer)) {
+			err_printf(m, "%s --- ringbuffer = 0x%08x\n",
+				   dev_priv->ring[i].name,
+				   lower_32_bits(obj->gtt_offset));
+			print_error_obj(m, obj);
+		}
+
+		if ((obj = error->ring[i].hws_page)) {
+			u64 hws_offset = obj->gtt_offset;
+			u32 *hws_page = &obj->pages[0][0];
+
+			if (i915.enable_execlists) {
+				hws_offset += LRC_PPHWSP_PN * PAGE_SIZE;
+				hws_page = &obj->pages[LRC_PPHWSP_PN][0];
+			}
+			err_printf(m, "%s --- HW Status = 0x%08llx\n",
+				   dev_priv->ring[i].name, hws_offset);
+			offset = 0;
+			for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
+				err_printf(m, "[%04x] %08x %08x %08x %08x\n",
+					   offset,
+					   hws_page[elt],
+					   hws_page[elt+1],
+					   hws_page[elt+2],
+					   hws_page[elt+3]);
+					offset += 16;
+			}
+		}
+
+		if ((obj = error->ring[i].ctx)) {
+			err_printf(m, "%s --- HW Context = 0x%08x\n",
+				   dev_priv->ring[i].name,
+				   lower_32_bits(obj->gtt_offset));
+			print_error_obj(m, obj);
+		}
+	}
+
+	if ((obj = error->semaphore_obj)) {
+		err_printf(m, "Semaphore page = 0x%08x\n",
+			   lower_32_bits(obj->gtt_offset));
+		for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
+			err_printf(m, "[%04x] %08x %08x %08x %08x\n",
+				   elt * 4,
+				   obj->pages[0][elt],
+				   obj->pages[0][elt+1],
+				   obj->pages[0][elt+2],
+				   obj->pages[0][elt+3]);
+		}
+	}
+
+	if (error->overlay)
+		intel_overlay_print_error_state(m, error->overlay);
+
+	if (error->display)
+		intel_display_print_error_state(m, dev, error->display);
+
+out:
+	if (m->bytes == 0 && m->err)
+		return m->err;
+
+	return 0;
+}
+
+int i915_error_state_buf_init(struct drm_i915_error_state_buf *ebuf,
+			      struct drm_i915_private *i915,
+			      size_t count, loff_t pos)
+{
+	memset(ebuf, 0, sizeof(*ebuf));
+	ebuf->i915 = i915;
+
+	/* We need to have enough room to store any i915_error_state printf
+	 * so that we can move it to start position.
+	 */
+	ebuf->size = count + 1 > PAGE_SIZE ? count + 1 : PAGE_SIZE;
+	ebuf->buf = kmalloc(ebuf->size,
+				GFP_TEMPORARY | __GFP_NORETRY | __GFP_NOWARN);
+
+	if (ebuf->buf == NULL) {
+		ebuf->size = PAGE_SIZE;
+		ebuf->buf = kmalloc(ebuf->size, GFP_TEMPORARY);
+	}
+
+	if (ebuf->buf == NULL) {
+		ebuf->size = 128;
+		ebuf->buf = kmalloc(ebuf->size, GFP_TEMPORARY);
+	}
+
+	if (ebuf->buf == NULL)
+		return -ENOMEM;
+
+	ebuf->start = pos;
+
+	return 0;
+}
+
+static void i915_error_object_free(struct drm_i915_error_object *obj)
+{
+	int page;
+
+	if (obj == NULL)
+		return;
+
+	for (page = 0; page < obj->page_count; page++)
+		kfree(obj->pages[page]);
+
+	kfree(obj);
+}
+
+static void i915_error_state_free(struct kref *error_ref)
+{
+	struct drm_i915_error_state *error = container_of(error_ref,
+							  typeof(*error), ref);
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
+		i915_error_object_free(error->ring[i].batchbuffer);
+		i915_error_object_free(error->ring[i].wa_batchbuffer);
+		i915_error_object_free(error->ring[i].ringbuffer);
+		i915_error_object_free(error->ring[i].hws_page);
+		i915_error_object_free(error->ring[i].ctx);
+		kfree(error->ring[i].requests);
+	}
+
+	i915_error_object_free(error->semaphore_obj);
+
+	for (i = 0; i < error->vm_count; i++)
+		kfree(error->active_bo[i]);
+
+	kfree(error->active_bo);
+	kfree(error->active_bo_count);
+	kfree(error->pinned_bo);
+	kfree(error->pinned_bo_count);
+	kfree(error->overlay);
+	kfree(error->display);
+	kfree(error);
+}
+
+static struct drm_i915_error_object *
+i915_error_object_create(struct drm_i915_private *dev_priv,
+			 struct drm_i915_gem_object *src,
+			 struct i915_address_space *vm)
+{
+	struct drm_i915_error_object *dst;
+	struct i915_vma *vma = NULL;
+	int num_pages;
+	bool use_ggtt;
+	int i = 0;
+	u64 reloc_offset;
+
+	if (src == NULL || src->pages == NULL)
+		return NULL;
+
+	num_pages = src->base.size >> PAGE_SHIFT;
+
+	dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC);
+	if (dst == NULL)
+		return NULL;
+
+	if (i915_gem_obj_bound(src, vm))
+		dst->gtt_offset = i915_gem_obj_offset(src, vm);
+	else
+		dst->gtt_offset = -1;
+
+	reloc_offset = dst->gtt_offset;
+	if (i915_is_ggtt(vm))
+		vma = i915_gem_obj_to_ggtt(src);
+	use_ggtt = (src->cache_level == I915_CACHE_NONE &&
+		   vma && (vma->bound & GLOBAL_BIND) &&
+		   reloc_offset + num_pages * PAGE_SIZE <= dev_priv->gtt.mappable_end);
+
+	/* Cannot access stolen address directly, try to use the aperture */
+	if (src->stolen) {
+		use_ggtt = true;
+
+		if (!(vma && vma->bound & GLOBAL_BIND))
+			goto unwind;
+
+		reloc_offset = i915_gem_obj_ggtt_offset(src);
+		if (reloc_offset + num_pages * PAGE_SIZE > dev_priv->gtt.mappable_end)
+			goto unwind;
+	}
+
+	/* Cannot access snooped pages through the aperture */
+	if (use_ggtt && src->cache_level != I915_CACHE_NONE && !HAS_LLC(dev_priv->dev))
+		goto unwind;
+
+	dst->page_count = num_pages;
+	while (num_pages--) {
+		unsigned long flags;
+		void *d;
+
+		d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
+		if (d == NULL)
+			goto unwind;
+
+		local_irq_save(flags);
+		if (use_ggtt) {
+			void __iomem *s;
+
+			/* Simply ignore tiling or any overlapping fence.
+			 * It's part of the error state, and this hopefully
+			 * captures what the GPU read.
+			 */
+
+			s = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
+						     reloc_offset);
+			memcpy_fromio(d, s, PAGE_SIZE);
+			io_mapping_unmap_atomic(s);
+		} else {
+			struct page *page;
+			void *s;
+
+			page = i915_gem_object_get_page(src, i);
+
+			drm_clflush_pages(&page, 1);
+
+			s = kmap_atomic(page);
+			memcpy(d, s, PAGE_SIZE);
+			kunmap_atomic(s);
+
+			drm_clflush_pages(&page, 1);
+		}
+		local_irq_restore(flags);
+
+		dst->pages[i++] = d;
+		reloc_offset += PAGE_SIZE;
+	}
+
+	return dst;
+
+unwind:
+	while (i--)
+		kfree(dst->pages[i]);
+	kfree(dst);
+	return NULL;
+}
+#define i915_error_ggtt_object_create(dev_priv, src) \
+	i915_error_object_create((dev_priv), (src), &(dev_priv)->gtt.base)
+
+static void capture_bo(struct drm_i915_error_buffer *err,
+		       struct i915_vma *vma)
+{
+	struct drm_i915_gem_object *obj = vma->obj;
+	int i;
+
+	err->size = obj->base.size;
+	err->name = obj->base.name;
+	for (i = 0; i < I915_NUM_RINGS; i++)
+		err->rseqno[i] = i915_gem_request_get_seqno(obj->last_read_req[i]);
+	err->wseqno = i915_gem_request_get_seqno(obj->last_write_req);
+	err->gtt_offset = vma->node.start;
+	err->read_domains = obj->base.read_domains;
+	err->write_domain = obj->base.write_domain;
+	err->fence_reg = obj->fence_reg;
+	err->pinned = 0;
+	if (i915_gem_obj_is_pinned(obj))
+		err->pinned = 1;
+	err->tiling = obj->tiling_mode;
+	err->dirty = obj->dirty;
+	err->purgeable = obj->madv != I915_MADV_WILLNEED;
+	err->userptr = obj->userptr.mm != NULL;
+	err->ring = obj->last_write_req ?
+			i915_gem_request_get_ring(obj->last_write_req)->id : -1;
+	err->cache_level = obj->cache_level;
+}
+
+static u32 capture_active_bo(struct drm_i915_error_buffer *err,
+			     int count, struct list_head *head)
+{
+	struct i915_vma *vma;
+	int i = 0;
+
+	list_for_each_entry(vma, head, mm_list) {
+		capture_bo(err++, vma);
+		if (++i == count)
+			break;
+	}
+
+	return i;
+}
+
+static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
+			     int count, struct list_head *head,
+			     struct i915_address_space *vm)
+{
+	struct drm_i915_gem_object *obj;
+	struct drm_i915_error_buffer * const first = err;
+	struct drm_i915_error_buffer * const last = err + count;
+
+	list_for_each_entry(obj, head, global_list) {
+		struct i915_vma *vma;
+
+		if (err == last)
+			break;
+
+		list_for_each_entry(vma, &obj->vma_list, vma_link)
+			if (vma->vm == vm && vma->pin_count > 0)
+				capture_bo(err++, vma);
+	}
+
+	return err - first;
+}
+
+/* Generate a semi-unique error code. The code is not meant to have meaning, The
+ * code's only purpose is to try to prevent false duplicated bug reports by
+ * grossly estimating a GPU error state.
+ *
+ * TODO Ideally, hashing the batchbuffer would be a very nice way to determine
+ * the hang if we could strip the GTT offset information from it.
+ *
+ * It's only a small step better than a random number in its current form.
+ */
+static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv,
+					 struct drm_i915_error_state *error,
+					 int *ring_id)
+{
+	uint32_t error_code = 0;
+	int i;
+
+	/* IPEHR would be an ideal way to detect errors, as it's the gross
+	 * measure of "the command that hung." However, has some very common
+	 * synchronization commands which almost always appear in the case
+	 * strictly a client bug. Use instdone to differentiate those some.
+	 */
+	for (i = 0; i < I915_NUM_RINGS; i++) {
+		if (error->ring[i].hangcheck_action == HANGCHECK_HUNG) {
+			if (ring_id)
+				*ring_id = i;
+
+			return error->ring[i].ipehr ^ error->ring[i].instdone;
+		}
+	}
+
+	return error_code;
+}
+
+static void i915_gem_record_fences(struct drm_device *dev,
+				   struct drm_i915_error_state *error)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int i;
+
+	if (IS_GEN3(dev) || IS_GEN2(dev)) {
+		for (i = 0; i < dev_priv->num_fence_regs; i++)
+			error->fence[i] = I915_READ(FENCE_REG(i));
+	} else if (IS_GEN5(dev) || IS_GEN4(dev)) {
+		for (i = 0; i < dev_priv->num_fence_regs; i++)
+			error->fence[i] = I915_READ64(FENCE_REG_965_LO(i));
+	} else if (INTEL_INFO(dev)->gen >= 6) {
+		for (i = 0; i < dev_priv->num_fence_regs; i++)
+			error->fence[i] = I915_READ64(FENCE_REG_GEN6_LO(i));
+	}
+}
+
+
+static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv,
+					struct drm_i915_error_state *error,
+					struct intel_engine_cs *ring,
+					struct drm_i915_error_ring *ering)
+{
+	struct intel_engine_cs *to;
+	int i;
+
+	if (!i915_semaphore_is_enabled(dev_priv->dev))
+		return;
+
+	if (!error->semaphore_obj)
+		error->semaphore_obj =
+			i915_error_ggtt_object_create(dev_priv,
+						      dev_priv->semaphore_obj);
+
+	for_each_ring(to, dev_priv, i) {
+		int idx;
+		u16 signal_offset;
+		u32 *tmp;
+
+		if (ring == to)
+			continue;
+
+		signal_offset = (GEN8_SIGNAL_OFFSET(ring, i) & (PAGE_SIZE - 1))
+				/ 4;
+		tmp = error->semaphore_obj->pages[0];
+		idx = intel_ring_sync_index(ring, to);
+
+		ering->semaphore_mboxes[idx] = tmp[signal_offset];
+		ering->semaphore_seqno[idx] = ring->semaphore.sync_seqno[idx];
+	}
+}
+
+static void gen6_record_semaphore_state(struct drm_i915_private *dev_priv,
+					struct intel_engine_cs *ring,
+					struct drm_i915_error_ring *ering)
+{
+	ering->semaphore_mboxes[0] = I915_READ(RING_SYNC_0(ring->mmio_base));
+	ering->semaphore_mboxes[1] = I915_READ(RING_SYNC_1(ring->mmio_base));
+	ering->semaphore_seqno[0] = ring->semaphore.sync_seqno[0];
+	ering->semaphore_seqno[1] = ring->semaphore.sync_seqno[1];
+
+	if (HAS_VEBOX(dev_priv->dev)) {
+		ering->semaphore_mboxes[2] =
+			I915_READ(RING_SYNC_2(ring->mmio_base));
+		ering->semaphore_seqno[2] = ring->semaphore.sync_seqno[2];
+	}
+}
+
+static void i915_record_ring_state(struct drm_device *dev,
+				   struct drm_i915_error_state *error,
+				   struct intel_engine_cs *ring,
+				   struct drm_i915_error_ring *ering)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (INTEL_INFO(dev)->gen >= 6) {
+		ering->rc_psmi = I915_READ(ring->mmio_base + 0x50);
+		ering->fault_reg = I915_READ(RING_FAULT_REG(ring));
+		if (INTEL_INFO(dev)->gen >= 8)
+			gen8_record_semaphore_state(dev_priv, error, ring, ering);
+		else
+			gen6_record_semaphore_state(dev_priv, ring, ering);
+	}
+
+	if (INTEL_INFO(dev)->gen >= 4) {
+		ering->faddr = I915_READ(RING_DMA_FADD(ring->mmio_base));
+		ering->ipeir = I915_READ(RING_IPEIR(ring->mmio_base));
+		ering->ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
+		ering->instdone = I915_READ(RING_INSTDONE(ring->mmio_base));
+		ering->instps = I915_READ(RING_INSTPS(ring->mmio_base));
+		ering->bbaddr = I915_READ(RING_BBADDR(ring->mmio_base));
+		if (INTEL_INFO(dev)->gen >= 8) {
+			ering->faddr |= (u64) I915_READ(RING_DMA_FADD_UDW(ring->mmio_base)) << 32;
+			ering->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(ring->mmio_base)) << 32;
+		}
+		ering->bbstate = I915_READ(RING_BBSTATE(ring->mmio_base));
+	} else {
+		ering->faddr = I915_READ(DMA_FADD_I8XX);
+		ering->ipeir = I915_READ(IPEIR);
+		ering->ipehr = I915_READ(IPEHR);
+		ering->instdone = I915_READ(GEN2_INSTDONE);
+	}
+
+	ering->waiting = waitqueue_active(&ring->irq_queue);
+	ering->instpm = I915_READ(RING_INSTPM(ring->mmio_base));
+	ering->seqno = ring->get_seqno(ring, false);
+	ering->acthd = intel_ring_get_active_head(ring);
+	ering->start = I915_READ_START(ring);
+	ering->head = I915_READ_HEAD(ring);
+	ering->tail = I915_READ_TAIL(ring);
+	ering->ctl = I915_READ_CTL(ring);
+
+	if (I915_NEED_GFX_HWS(dev)) {
+		int mmio;
+
+		if (IS_GEN7(dev)) {
+			switch (ring->id) {
+			default:
+			case RCS:
+				mmio = RENDER_HWS_PGA_GEN7;
+				break;
+			case BCS:
+				mmio = BLT_HWS_PGA_GEN7;
+				break;
+			case VCS:
+				mmio = BSD_HWS_PGA_GEN7;
+				break;
+			case VECS:
+				mmio = VEBOX_HWS_PGA_GEN7;
+				break;
+			}
+		} else if (IS_GEN6(ring->dev)) {
+			mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
+		} else {
+			/* XXX: gen8 returns to sanity */
+			mmio = RING_HWS_PGA(ring->mmio_base);
+		}
+
+		ering->hws = I915_READ(mmio);
+	}
+
+	ering->hangcheck_score = ring->hangcheck.score;
+	ering->hangcheck_action = ring->hangcheck.action;
+
+	if (USES_PPGTT(dev)) {
+		int i;
+
+		ering->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(ring));
+
+		if (IS_GEN6(dev))
+			ering->vm_info.pp_dir_base =
+				I915_READ(RING_PP_DIR_BASE_READ(ring));
+		else if (IS_GEN7(dev))
+			ering->vm_info.pp_dir_base =
+				I915_READ(RING_PP_DIR_BASE(ring));
+		else if (INTEL_INFO(dev)->gen >= 8)
+			for (i = 0; i < 4; i++) {
+				ering->vm_info.pdp[i] =
+					I915_READ(GEN8_RING_PDP_UDW(ring, i));
+				ering->vm_info.pdp[i] <<= 32;
+				ering->vm_info.pdp[i] |=
+					I915_READ(GEN8_RING_PDP_LDW(ring, i));
+			}
+	}
+}
+
+
+static void i915_gem_record_active_context(struct intel_engine_cs *ring,
+					   struct drm_i915_error_state *error,
+					   struct drm_i915_error_ring *ering)
+{
+	struct drm_i915_private *dev_priv = ring->dev->dev_private;
+	struct drm_i915_gem_object *obj;
+
+	/* Currently render ring is the only HW context user */
+	if (ring->id != RCS || !error->ccid)
+		return;
+
+	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+		if (!i915_gem_obj_ggtt_bound(obj))
+			continue;
+
+		if ((error->ccid & PAGE_MASK) == i915_gem_obj_ggtt_offset(obj)) {
+			ering->ctx = i915_error_ggtt_object_create(dev_priv, obj);
+			break;
+		}
+	}
+}
+
+static void i915_gem_record_rings(struct drm_device *dev,
+				  struct drm_i915_error_state *error)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_gem_request *request;
+	int i, count;
+
+	for (i = 0; i < I915_NUM_RINGS; i++) {
+		struct intel_engine_cs *ring = &dev_priv->ring[i];
+		struct intel_ringbuffer *rbuf;
+
+		error->ring[i].pid = -1;
+
+		if (ring->dev == NULL)
+			continue;
+
+		error->ring[i].valid = true;
+
+		i915_record_ring_state(dev, error, ring, &error->ring[i]);
+
+		request = i915_gem_find_active_request(ring);
+		if (request) {
+			struct i915_address_space *vm;
+
+			vm = request->ctx && request->ctx->ppgtt ?
+				&request->ctx->ppgtt->base :
+				&dev_priv->gtt.base;
+
+			/* We need to copy these to an anonymous buffer
+			 * as the simplest method to avoid being overwritten
+			 * by userspace.
+			 */
+			error->ring[i].batchbuffer =
+				i915_error_object_create(dev_priv,
+							 request->batch_obj,
+							 vm);
+
+			if (HAS_BROKEN_CS_TLB(dev_priv->dev))
+				error->ring[i].wa_batchbuffer =
+					i915_error_ggtt_object_create(dev_priv,
+							     ring->scratch.obj);
+
+			if (request->pid) {
+				struct task_struct *task;
+
+				rcu_read_lock();
+				task = pid_task(request->pid, PIDTYPE_PID);
+				if (task) {
+					strcpy(error->ring[i].comm, task->comm);
+					error->ring[i].pid = task->pid;
+				}
+				rcu_read_unlock();
+			}
+		}
+
+		if (i915.enable_execlists) {
+			/* TODO: This is only a small fix to keep basic error
+			 * capture working, but we need to add more information
+			 * for it to be useful (e.g. dump the context being
+			 * executed).
+			 */
+			if (request)
+				rbuf = request->ctx->engine[ring->id].ringbuf;
+			else
+				rbuf = ring->default_context->engine[ring->id].ringbuf;
+		} else
+			rbuf = ring->buffer;
+
+		error->ring[i].cpu_ring_head = rbuf->head;
+		error->ring[i].cpu_ring_tail = rbuf->tail;
+
+		error->ring[i].ringbuffer =
+			i915_error_ggtt_object_create(dev_priv, rbuf->obj);
+
+		error->ring[i].hws_page =
+			i915_error_ggtt_object_create(dev_priv, ring->status_page.obj);
+
+		i915_gem_record_active_context(ring, error, &error->ring[i]);
+
+		count = 0;
+		list_for_each_entry(request, &ring->request_list, list)
+			count++;
+
+		error->ring[i].num_requests = count;
+		error->ring[i].requests =
+			kcalloc(count, sizeof(*error->ring[i].requests),
+				GFP_ATOMIC);
+		if (error->ring[i].requests == NULL) {
+			error->ring[i].num_requests = 0;
+			continue;
+		}
+
+		count = 0;
+		list_for_each_entry(request, &ring->request_list, list) {
+			struct drm_i915_error_request *erq;
+
+			erq = &error->ring[i].requests[count++];
+			erq->seqno = request->seqno;
+			erq->jiffies = request->emitted_jiffies;
+			erq->tail = request->postfix;
+		}
+	}
+}
+
+/* FIXME: Since pin count/bound list is global, we duplicate what we capture per
+ * VM.
+ */
+static void i915_gem_capture_vm(struct drm_i915_private *dev_priv,
+				struct drm_i915_error_state *error,
+				struct i915_address_space *vm,
+				const int ndx)
+{
+	struct drm_i915_error_buffer *active_bo = NULL, *pinned_bo = NULL;
+	struct drm_i915_gem_object *obj;
+	struct i915_vma *vma;
+	int i;
+
+	i = 0;
+	list_for_each_entry(vma, &vm->active_list, mm_list)
+		i++;
+	error->active_bo_count[ndx] = i;
+
+	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+		list_for_each_entry(vma, &obj->vma_list, vma_link)
+			if (vma->vm == vm && vma->pin_count > 0)
+				i++;
+	}
+	error->pinned_bo_count[ndx] = i - error->active_bo_count[ndx];
+
+	if (i) {
+		active_bo = kcalloc(i, sizeof(*active_bo), GFP_ATOMIC);
+		if (active_bo)
+			pinned_bo = active_bo + error->active_bo_count[ndx];
+	}
+
+	if (active_bo)
+		error->active_bo_count[ndx] =
+			capture_active_bo(active_bo,
+					  error->active_bo_count[ndx],
+					  &vm->active_list);
+
+	if (pinned_bo)
+		error->pinned_bo_count[ndx] =
+			capture_pinned_bo(pinned_bo,
+					  error->pinned_bo_count[ndx],
+					  &dev_priv->mm.bound_list, vm);
+	error->active_bo[ndx] = active_bo;
+	error->pinned_bo[ndx] = pinned_bo;
+}
+
+static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv,
+				     struct drm_i915_error_state *error)
+{
+	struct i915_address_space *vm;
+	int cnt = 0, i = 0;
+
+	list_for_each_entry(vm, &dev_priv->vm_list, global_link)
+		cnt++;
+
+	error->active_bo = kcalloc(cnt, sizeof(*error->active_bo), GFP_ATOMIC);
+	error->pinned_bo = kcalloc(cnt, sizeof(*error->pinned_bo), GFP_ATOMIC);
+	error->active_bo_count = kcalloc(cnt, sizeof(*error->active_bo_count),
+					 GFP_ATOMIC);
+	error->pinned_bo_count = kcalloc(cnt, sizeof(*error->pinned_bo_count),
+					 GFP_ATOMIC);
+
+	if (error->active_bo == NULL ||
+	    error->pinned_bo == NULL ||
+	    error->active_bo_count == NULL ||
+	    error->pinned_bo_count == NULL) {
+		kfree(error->active_bo);
+		kfree(error->active_bo_count);
+		kfree(error->pinned_bo);
+		kfree(error->pinned_bo_count);
+
+		error->active_bo = NULL;
+		error->active_bo_count = NULL;
+		error->pinned_bo = NULL;
+		error->pinned_bo_count = NULL;
+	} else {
+		list_for_each_entry(vm, &dev_priv->vm_list, global_link)
+			i915_gem_capture_vm(dev_priv, error, vm, i++);
+
+		error->vm_count = cnt;
+	}
+}
+
+/* Capture all registers which don't fit into another category. */
+static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
+				   struct drm_i915_error_state *error)
+{
+	struct drm_device *dev = dev_priv->dev;
+	int i;
+
+	/* General organization
+	 * 1. Registers specific to a single generation
+	 * 2. Registers which belong to multiple generations
+	 * 3. Feature specific registers.
+	 * 4. Everything else
+	 * Please try to follow the order.
+	 */
+
+	/* 1: Registers specific to a single generation */
+	if (IS_VALLEYVIEW(dev)) {
+		error->gtier[0] = I915_READ(GTIER);
+		error->ier = I915_READ(VLV_IER);
+		error->forcewake = I915_READ(FORCEWAKE_VLV);
+	}
+
+	if (IS_GEN7(dev))
+		error->err_int = I915_READ(GEN7_ERR_INT);
+
+	if (INTEL_INFO(dev)->gen >= 8) {
+		error->fault_data0 = I915_READ(GEN8_FAULT_TLB_DATA0);
+		error->fault_data1 = I915_READ(GEN8_FAULT_TLB_DATA1);
+	}
+
+	if (IS_GEN6(dev)) {
+		error->forcewake = I915_READ(FORCEWAKE);
+		error->gab_ctl = I915_READ(GAB_CTL);
+		error->gfx_mode = I915_READ(GFX_MODE);
+	}
+
+	/* 2: Registers which belong to multiple generations */
+	if (INTEL_INFO(dev)->gen >= 7)
+		error->forcewake = I915_READ(FORCEWAKE_MT);
+
+	if (INTEL_INFO(dev)->gen >= 6) {
+		error->derrmr = I915_READ(DERRMR);
+		error->error = I915_READ(ERROR_GEN6);
+		error->done_reg = I915_READ(DONE_REG);
+	}
+
+	/* 3: Feature specific registers */
+	if (IS_GEN6(dev) || IS_GEN7(dev)) {
+		error->gam_ecochk = I915_READ(GAM_ECOCHK);
+		error->gac_eco = I915_READ(GAC_ECO_BITS);
+	}
+
+	/* 4: Everything else */
+	if (HAS_HW_CONTEXTS(dev))
+		error->ccid = I915_READ(CCID);
+
+	if (INTEL_INFO(dev)->gen >= 8) {
+		error->ier = I915_READ(GEN8_DE_MISC_IER);
+		for (i = 0; i < 4; i++)
+			error->gtier[i] = I915_READ(GEN8_GT_IER(i));
+	} else if (HAS_PCH_SPLIT(dev)) {
+		error->ier = I915_READ(DEIER);
+		error->gtier[0] = I915_READ(GTIER);
+	} else if (IS_GEN2(dev)) {
+		error->ier = I915_READ16(IER);
+	} else if (!IS_VALLEYVIEW(dev)) {
+		error->ier = I915_READ(IER);
+	}
+	error->eir = I915_READ(EIR);
+	error->pgtbl_er = I915_READ(PGTBL_ER);
+
+	i915_get_extra_instdone(dev, error->extra_instdone);
+}
+
+static void i915_error_capture_msg(struct drm_device *dev,
+				   struct drm_i915_error_state *error,
+				   bool wedged,
+				   const char *error_msg)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 ecode;
+	int ring_id = -1, len;
+
+	ecode = i915_error_generate_code(dev_priv, error, &ring_id);
+
+	len = scnprintf(error->error_msg, sizeof(error->error_msg),
+			"GPU HANG: ecode %d:%d:0x%08x",
+			INTEL_INFO(dev)->gen, ring_id, ecode);
+
+	if (ring_id != -1 && error->ring[ring_id].pid != -1)
+		len += scnprintf(error->error_msg + len,
+				 sizeof(error->error_msg) - len,
+				 ", in %s [%d]",
+				 error->ring[ring_id].comm,
+				 error->ring[ring_id].pid);
+
+	scnprintf(error->error_msg + len, sizeof(error->error_msg) - len,
+		  ", reason: %s, action: %s",
+		  error_msg,
+		  wedged ? "reset" : "continue");
+}
+
+static void i915_capture_gen_state(struct drm_i915_private *dev_priv,
+				   struct drm_i915_error_state *error)
+{
+	error->iommu = -1;
+#ifdef CONFIG_INTEL_IOMMU
+	error->iommu = intel_iommu_gfx_mapped;
+#endif
+	error->reset_count = i915_reset_count(&dev_priv->gpu_error);
+	error->suspend_count = dev_priv->suspend_count;
+}
+
+/**
+ * i915_capture_error_state - capture an error record for later analysis
+ * @dev: drm device
+ *
+ * Should be called when an error is detected (either a hang or an error
+ * interrupt) to capture error state from the time of the error.  Fills
+ * out a structure which becomes available in debugfs for user level tools
+ * to pick up.
+ */
+void i915_capture_error_state(struct drm_device *dev, bool wedged,
+			      const char *error_msg)
+{
+	static bool warned;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_error_state *error;
+	unsigned long flags;
+
+	/* Account for pipe specific data like PIPE*STAT */
+	error = kzalloc(sizeof(*error), GFP_ATOMIC);
+	if (!error) {
+		DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
+		return;
+	}
+
+	kref_init(&error->ref);
+
+	i915_capture_gen_state(dev_priv, error);
+	i915_capture_reg_state(dev_priv, error);
+	i915_gem_capture_buffers(dev_priv, error);
+	i915_gem_record_fences(dev, error);
+	i915_gem_record_rings(dev, error);
+
+	do_gettimeofday(&error->time);
+
+	error->overlay = intel_overlay_capture_error_state(dev);
+	error->display = intel_display_capture_error_state(dev);
+
+	i915_error_capture_msg(dev, error, wedged, error_msg);
+	DRM_INFO("%s\n", error->error_msg);
+
+	spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
+	if (dev_priv->gpu_error.first_error == NULL) {
+		dev_priv->gpu_error.first_error = error;
+		error = NULL;
+	}
+	spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
+
+	if (error) {
+		i915_error_state_free(&error->ref);
+		return;
+	}
+
+	if (!warned) {
+		DRM_INFO("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
+		DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
+		DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
+		DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n");
+		DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n", dev->primary->index);
+		warned = true;
+	}
+}
+
+void i915_error_state_get(struct drm_device *dev,
+			  struct i915_error_state_file_priv *error_priv)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	spin_lock_irq(&dev_priv->gpu_error.lock);
+	error_priv->error = dev_priv->gpu_error.first_error;
+	if (error_priv->error)
+		kref_get(&error_priv->error->ref);
+	spin_unlock_irq(&dev_priv->gpu_error.lock);
+
+}
+
+void i915_error_state_put(struct i915_error_state_file_priv *error_priv)
+{
+	if (error_priv->error)
+		kref_put(&error_priv->error->ref, i915_error_state_free);
+}
+
+void i915_destroy_error_state(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_error_state *error;
+
+	spin_lock_irq(&dev_priv->gpu_error.lock);
+	error = dev_priv->gpu_error.first_error;
+	dev_priv->gpu_error.first_error = NULL;
+	spin_unlock_irq(&dev_priv->gpu_error.lock);
+
+	if (error)
+		kref_put(&error->ref, i915_error_state_free);
+}
+
+const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
+{
+	switch (type) {
+	case I915_CACHE_NONE: return " uncached";
+	case I915_CACHE_LLC: return HAS_LLC(i915) ? " LLC" : " snooped";
+	case I915_CACHE_L3_LLC: return " L3+LLC";
+	case I915_CACHE_WT: return " WT";
+	default: return "";
+	}
+}
+
+/* NB: please notice the memset */
+void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
+
+	if (IS_GEN2(dev) || IS_GEN3(dev))
+		instdone[0] = I915_READ(GEN2_INSTDONE);
+	else if (IS_GEN4(dev) || IS_GEN5(dev) || IS_GEN6(dev)) {
+		instdone[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE));
+		instdone[1] = I915_READ(GEN4_INSTDONE1);
+	} else if (INTEL_INFO(dev)->gen >= 7) {
+		instdone[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE));
+		instdone[1] = I915_READ(GEN7_SC_INSTDONE);
+		instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
+		instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
+	}
+}
diff --git a/drivers/gpu/drm/i915/i915_guc_reg.h b/drivers/gpu/drm/i915/i915_guc_reg.h
new file mode 100644
index 0000000..c4cb1c0
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_guc_reg.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+#ifndef _I915_GUC_REG_H_
+#define _I915_GUC_REG_H_
+
+/* Definitions of GuC H/W registers, bits, etc */
+
+#define GUC_STATUS			0xc000
+#define   GS_BOOTROM_SHIFT		1
+#define   GS_BOOTROM_MASK		  (0x7F << GS_BOOTROM_SHIFT)
+#define   GS_BOOTROM_RSA_FAILED		  (0x50 << GS_BOOTROM_SHIFT)
+#define   GS_UKERNEL_SHIFT		8
+#define   GS_UKERNEL_MASK		  (0xFF << GS_UKERNEL_SHIFT)
+#define   GS_UKERNEL_LAPIC_DONE		  (0x30 << GS_UKERNEL_SHIFT)
+#define   GS_UKERNEL_DPC_ERROR		  (0x60 << GS_UKERNEL_SHIFT)
+#define   GS_UKERNEL_READY		  (0xF0 << GS_UKERNEL_SHIFT)
+#define   GS_MIA_SHIFT			16
+#define   GS_MIA_MASK			  (0x07 << GS_MIA_SHIFT)
+#define   GS_MIA_CORE_STATE		  (1 << GS_MIA_SHIFT)
+
+#define SOFT_SCRATCH(n)			(0xc180 + ((n) * 4))
+
+#define UOS_RSA_SCRATCH(i)		(0xc200 + (i) * 4)
+#define DMA_ADDR_0_LOW			0xc300
+#define DMA_ADDR_0_HIGH			0xc304
+#define DMA_ADDR_1_LOW			0xc308
+#define DMA_ADDR_1_HIGH			0xc30c
+#define   DMA_ADDRESS_SPACE_WOPCM	  (7 << 16)
+#define   DMA_ADDRESS_SPACE_GTT		  (8 << 16)
+#define DMA_COPY_SIZE			0xc310
+#define DMA_CTRL			0xc314
+#define   UOS_MOVE			  (1<<4)
+#define   START_DMA			  (1<<0)
+#define DMA_GUC_WOPCM_OFFSET		0xc340
+#define   GUC_WOPCM_OFFSET_VALUE	  0x80000	/* 512KB */
+#define GUC_MAX_IDLE_COUNT		0xC3E4
+
+#define GUC_WOPCM_SIZE			0xc050
+#define   GUC_WOPCM_SIZE_VALUE  	  (0x80 << 12)	/* 512KB */
+
+/* GuC addresses below GUC_WOPCM_TOP don't map through the GTT */
+#define	GUC_WOPCM_TOP			(GUC_WOPCM_SIZE_VALUE)
+
+#define GEN8_GT_PM_CONFIG		0x138140
+#define GEN9LP_GT_PM_CONFIG		0x138140
+#define GEN9_GT_PM_CONFIG		0x13816c
+#define   GT_DOORBELL_ENABLE		  (1<<0)
+
+#define GEN8_GTCR			0x4274
+#define   GEN8_GTCR_INVALIDATE		  (1<<0)
+
+#define GUC_ARAT_C6DIS			0xA178
+
+#define GUC_SHIM_CONTROL		0xc064
+#define   GUC_DISABLE_SRAM_INIT_TO_ZEROES	(1<<0)
+#define   GUC_ENABLE_READ_CACHE_LOGIC		(1<<1)
+#define   GUC_ENABLE_MIA_CACHING		(1<<2)
+#define   GUC_GEN10_MSGCH_ENABLE		(1<<4)
+#define   GUC_ENABLE_READ_CACHE_FOR_SRAM_DATA	(1<<9)
+#define   GUC_ENABLE_READ_CACHE_FOR_WOPCM_DATA	(1<<10)
+#define   GUC_ENABLE_MIA_CLOCK_GATING		(1<<15)
+#define   GUC_GEN10_SHIM_WC_ENABLE		(1<<21)
+
+#define GUC_SHIM_CONTROL_VALUE	(GUC_DISABLE_SRAM_INIT_TO_ZEROES	| \
+				 GUC_ENABLE_READ_CACHE_LOGIC		| \
+				 GUC_ENABLE_MIA_CACHING			| \
+				 GUC_ENABLE_READ_CACHE_FOR_SRAM_DATA	| \
+				 GUC_ENABLE_READ_CACHE_FOR_WOPCM_DATA	| \
+				 GUC_ENABLE_MIA_CLOCK_GATING)
+
+#define HOST2GUC_INTERRUPT		0xc4c8
+#define   HOST2GUC_TRIGGER		  (1<<0)
+
+#define DRBMISC1			0x1984
+#define   DOORBELL_ENABLE		  (1<<0)
+
+#define GEN8_DRBREGL(x)			(0x1000 + (x) * 8)
+#define   GEN8_DRB_VALID		  (1<<0)
+#define GEN8_DRBREGU(x)			(GEN8_DRBREGL(x) + 4)
+
+#define DE_GUCRMR			0x44054
+
+#define GUC_BCS_RCS_IER			0xC550
+#define GUC_VCS2_VCS1_IER		0xC554
+#define GUC_WD_VECS_IER			0xC558
+#define GUC_PM_P24C_IER			0xC55C
+
+#endif
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
new file mode 100644
index 0000000..036b42b
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_guc_submission.c
@@ -0,0 +1,975 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+#include <linux/firmware.h>
+#include <linux/circ_buf.h>
+#include "i915_drv.h"
+#include "intel_guc.h"
+
+/**
+ * DOC: GuC Client
+ *
+ * i915_guc_client:
+ * We use the term client to avoid confusion with contexts. A i915_guc_client is
+ * equivalent to GuC object guc_context_desc. This context descriptor is
+ * allocated from a pool of 1024 entries. Kernel driver will allocate doorbell
+ * and workqueue for it. Also the process descriptor (guc_process_desc), which
+ * is mapped to client space. So the client can write Work Item then ring the
+ * doorbell.
+ *
+ * To simplify the implementation, we allocate one gem object that contains all
+ * pages for doorbell, process descriptor and workqueue.
+ *
+ * The Scratch registers:
+ * There are 16 MMIO-based registers start from 0xC180. The kernel driver writes
+ * a value to the action register (SOFT_SCRATCH_0) along with any data. It then
+ * triggers an interrupt on the GuC via another register write (0xC4C8).
+ * Firmware writes a success/fail code back to the action register after
+ * processes the request. The kernel driver polls waiting for this update and
+ * then proceeds.
+ * See host2guc_action()
+ *
+ * Doorbells:
+ * Doorbells are interrupts to uKernel. A doorbell is a single cache line (QW)
+ * mapped into process space.
+ *
+ * Work Items:
+ * There are several types of work items that the host may place into a
+ * workqueue, each with its own requirements and limitations. Currently only
+ * WQ_TYPE_INORDER is needed to support legacy submission via GuC, which
+ * represents in-order queue. The kernel driver packs ring tail pointer and an
+ * ELSP context descriptor dword into Work Item.
+ * See guc_add_workqueue_item()
+ *
+ */
+
+/*
+ * Read GuC command/status register (SOFT_SCRATCH_0)
+ * Return true if it contains a response rather than a command
+ */
+static inline bool host2guc_action_response(struct drm_i915_private *dev_priv,
+					    u32 *status)
+{
+	u32 val = I915_READ(SOFT_SCRATCH(0));
+	*status = val;
+	return GUC2HOST_IS_RESPONSE(val);
+}
+
+static int host2guc_action(struct intel_guc *guc, u32 *data, u32 len)
+{
+	struct drm_i915_private *dev_priv = guc_to_i915(guc);
+	u32 status;
+	int i;
+	int ret;
+
+	if (WARN_ON(len < 1 || len > 15))
+		return -EINVAL;
+
+	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+	spin_lock(&dev_priv->guc.host2guc_lock);
+
+	dev_priv->guc.action_count += 1;
+	dev_priv->guc.action_cmd = data[0];
+
+	for (i = 0; i < len; i++)
+		I915_WRITE(SOFT_SCRATCH(i), data[i]);
+
+	POSTING_READ(SOFT_SCRATCH(i - 1));
+
+	I915_WRITE(HOST2GUC_INTERRUPT, HOST2GUC_TRIGGER);
+
+	/* No HOST2GUC command should take longer than 10ms */
+	ret = wait_for_atomic(host2guc_action_response(dev_priv, &status), 10);
+	if (status != GUC2HOST_STATUS_SUCCESS) {
+		/*
+		 * Either the GuC explicitly returned an error (which
+		 * we convert to -EIO here) or no response at all was
+		 * received within the timeout limit (-ETIMEDOUT)
+		 */
+		if (ret != -ETIMEDOUT)
+			ret = -EIO;
+
+		DRM_ERROR("GUC: host2guc action 0x%X failed. ret=%d "
+				"status=0x%08X response=0x%08X\n",
+				data[0], ret, status,
+				I915_READ(SOFT_SCRATCH(15)));
+
+		dev_priv->guc.action_fail += 1;
+		dev_priv->guc.action_err = ret;
+	}
+	dev_priv->guc.action_status = status;
+
+	spin_unlock(&dev_priv->guc.host2guc_lock);
+	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+
+	return ret;
+}
+
+/*
+ * Tell the GuC to allocate or deallocate a specific doorbell
+ */
+
+static int host2guc_allocate_doorbell(struct intel_guc *guc,
+				      struct i915_guc_client *client)
+{
+	u32 data[2];
+
+	data[0] = HOST2GUC_ACTION_ALLOCATE_DOORBELL;
+	data[1] = client->ctx_index;
+
+	return host2guc_action(guc, data, 2);
+}
+
+static int host2guc_release_doorbell(struct intel_guc *guc,
+				     struct i915_guc_client *client)
+{
+	u32 data[2];
+
+	data[0] = HOST2GUC_ACTION_DEALLOCATE_DOORBELL;
+	data[1] = client->ctx_index;
+
+	return host2guc_action(guc, data, 2);
+}
+
+static int host2guc_sample_forcewake(struct intel_guc *guc,
+				     struct i915_guc_client *client)
+{
+	struct drm_i915_private *dev_priv = guc_to_i915(guc);
+	struct drm_device *dev = dev_priv->dev;
+	u32 data[2];
+
+	data[0] = HOST2GUC_ACTION_SAMPLE_FORCEWAKE;
+	/* WaRsDisableCoarsePowerGating:skl,bxt */
+	if (!intel_enable_rc6(dev_priv->dev) ||
+	    (IS_BROXTON(dev) && (INTEL_REVID(dev) < BXT_REVID_B0)) ||
+	    (IS_SKL_GT3(dev) && (INTEL_REVID(dev) <= SKL_REVID_E0)) ||
+	    (IS_SKL_GT4(dev) && (INTEL_REVID(dev) <= SKL_REVID_E0)))
+		data[1] = 0;
+	else
+		/* bit 0 and 1 are for Render and Media domain separately */
+		data[1] = GUC_FORCEWAKE_RENDER | GUC_FORCEWAKE_MEDIA;
+
+	return host2guc_action(guc, data, ARRAY_SIZE(data));
+}
+
+/*
+ * Initialise, update, or clear doorbell data shared with the GuC
+ *
+ * These functions modify shared data and so need access to the mapped
+ * client object which contains the page being used for the doorbell
+ */
+
+static void guc_init_doorbell(struct intel_guc *guc,
+			      struct i915_guc_client *client)
+{
+	struct guc_doorbell_info *doorbell;
+	void *base;
+
+	base = kmap_atomic(i915_gem_object_get_page(client->client_obj, 0));
+	doorbell = base + client->doorbell_offset;
+
+	doorbell->db_status = 1;
+	doorbell->cookie = 0;
+
+	kunmap_atomic(base);
+}
+
+static int guc_ring_doorbell(struct i915_guc_client *gc)
+{
+	struct guc_process_desc *desc;
+	union guc_doorbell_qw db_cmp, db_exc, db_ret;
+	union guc_doorbell_qw *db;
+	void *base;
+	int attempt = 2, ret = -EAGAIN;
+
+	base = kmap_atomic(i915_gem_object_get_page(gc->client_obj, 0));
+	desc = base + gc->proc_desc_offset;
+
+	/* Update the tail so it is visible to GuC */
+	desc->tail = gc->wq_tail;
+
+	/* current cookie */
+	db_cmp.db_status = GUC_DOORBELL_ENABLED;
+	db_cmp.cookie = gc->cookie;
+
+	/* cookie to be updated */
+	db_exc.db_status = GUC_DOORBELL_ENABLED;
+	db_exc.cookie = gc->cookie + 1;
+	if (db_exc.cookie == 0)
+		db_exc.cookie = 1;
+
+	/* pointer of current doorbell cacheline */
+	db = base + gc->doorbell_offset;
+
+	while (attempt--) {
+		/* lets ring the doorbell */
+		db_ret.value_qw = atomic64_cmpxchg((atomic64_t *)db,
+			db_cmp.value_qw, db_exc.value_qw);
+
+		/* if the exchange was successfully executed */
+		if (db_ret.value_qw == db_cmp.value_qw) {
+			/* db was successfully rung */
+			gc->cookie = db_exc.cookie;
+			ret = 0;
+			break;
+		}
+
+		/* XXX: doorbell was lost and need to acquire it again */
+		if (db_ret.db_status == GUC_DOORBELL_DISABLED)
+			break;
+
+		DRM_ERROR("Cookie mismatch. Expected %d, returned %d\n",
+			  db_cmp.cookie, db_ret.cookie);
+
+		/* update the cookie to newly read cookie from GuC */
+		db_cmp.cookie = db_ret.cookie;
+		db_exc.cookie = db_ret.cookie + 1;
+		if (db_exc.cookie == 0)
+			db_exc.cookie = 1;
+	}
+
+	kunmap_atomic(base);
+	return ret;
+}
+
+static void guc_disable_doorbell(struct intel_guc *guc,
+				 struct i915_guc_client *client)
+{
+	struct drm_i915_private *dev_priv = guc_to_i915(guc);
+	struct guc_doorbell_info *doorbell;
+	void *base;
+	int drbreg = GEN8_DRBREGL(client->doorbell_id);
+	int value;
+
+	base = kmap_atomic(i915_gem_object_get_page(client->client_obj, 0));
+	doorbell = base + client->doorbell_offset;
+
+	doorbell->db_status = 0;
+
+	kunmap_atomic(base);
+
+	I915_WRITE(drbreg, I915_READ(drbreg) & ~GEN8_DRB_VALID);
+
+	value = I915_READ(drbreg);
+	WARN_ON((value & GEN8_DRB_VALID) != 0);
+
+	I915_WRITE(GEN8_DRBREGU(client->doorbell_id), 0);
+	I915_WRITE(drbreg, 0);
+
+	/* XXX: wait for any interrupts */
+	/* XXX: wait for workqueue to drain */
+}
+
+/*
+ * Select, assign and relase doorbell cachelines
+ *
+ * These functions track which doorbell cachelines are in use.
+ * The data they manipulate is protected by the host2guc lock.
+ */
+
+static uint32_t select_doorbell_cacheline(struct intel_guc *guc)
+{
+	const uint32_t cacheline_size = cache_line_size();
+	uint32_t offset;
+
+	spin_lock(&guc->host2guc_lock);
+
+	/* Doorbell uses a single cache line within a page */
+	offset = offset_in_page(guc->db_cacheline);
+
+	/* Moving to next cache line to reduce contention */
+	guc->db_cacheline += cacheline_size;
+
+	spin_unlock(&guc->host2guc_lock);
+
+	DRM_DEBUG_DRIVER("selected doorbell cacheline 0x%x, next 0x%x, linesize %u\n",
+			offset, guc->db_cacheline, cacheline_size);
+
+	return offset;
+}
+
+static uint16_t assign_doorbell(struct intel_guc *guc, uint32_t priority)
+{
+	/*
+	 * The bitmap is split into two halves; the first half is used for
+	 * normal priority contexts, the second half for high-priority ones.
+	 * Note that logically higher priorities are numerically less than
+	 * normal ones, so the test below means "is it high-priority?"
+	 */
+	const bool hi_pri = (priority <= GUC_CTX_PRIORITY_HIGH);
+	const uint16_t half = GUC_MAX_DOORBELLS / 2;
+	const uint16_t start = hi_pri ? half : 0;
+	const uint16_t end = start + half;
+	uint16_t id;
+
+	spin_lock(&guc->host2guc_lock);
+	id = find_next_zero_bit(guc->doorbell_bitmap, end, start);
+	if (id == end)
+		id = GUC_INVALID_DOORBELL_ID;
+	else
+		bitmap_set(guc->doorbell_bitmap, id, 1);
+	spin_unlock(&guc->host2guc_lock);
+
+	DRM_DEBUG_DRIVER("assigned %s priority doorbell id 0x%x\n",
+			hi_pri ? "high" : "normal", id);
+
+	return id;
+}
+
+static void release_doorbell(struct intel_guc *guc, uint16_t id)
+{
+	spin_lock(&guc->host2guc_lock);
+	bitmap_clear(guc->doorbell_bitmap, id, 1);
+	spin_unlock(&guc->host2guc_lock);
+}
+
+/*
+ * Initialise the process descriptor shared with the GuC firmware.
+ */
+static void guc_init_proc_desc(struct intel_guc *guc,
+			       struct i915_guc_client *client)
+{
+	struct guc_process_desc *desc;
+	void *base;
+
+	base = kmap_atomic(i915_gem_object_get_page(client->client_obj, 0));
+	desc = base + client->proc_desc_offset;
+
+	memset(desc, 0, sizeof(*desc));
+
+	/*
+	 * XXX: pDoorbell and WQVBaseAddress are pointers in process address
+	 * space for ring3 clients (set them as in mmap_ioctl) or kernel
+	 * space for kernel clients (map on demand instead? May make debug
+	 * easier to have it mapped).
+	 */
+	desc->wq_base_addr = 0;
+	desc->db_base_addr = 0;
+
+	desc->context_id = client->ctx_index;
+	desc->wq_size_bytes = client->wq_size;
+	desc->wq_status = WQ_STATUS_ACTIVE;
+	desc->priority = client->priority;
+
+	kunmap_atomic(base);
+}
+
+/*
+ * Initialise/clear the context descriptor shared with the GuC firmware.
+ *
+ * This descriptor tells the GuC where (in GGTT space) to find the important
+ * data structures relating to this client (doorbell, process descriptor,
+ * write queue, etc).
+ */
+
+static void guc_init_ctx_desc(struct intel_guc *guc,
+			      struct i915_guc_client *client)
+{
+	struct intel_context *ctx = client->owner;
+	struct guc_context_desc desc;
+	struct sg_table *sg;
+	int i;
+
+	memset(&desc, 0, sizeof(desc));
+
+	desc.attribute = GUC_CTX_DESC_ATTR_ACTIVE | GUC_CTX_DESC_ATTR_KERNEL;
+	desc.context_id = client->ctx_index;
+	desc.priority = client->priority;
+	desc.db_id = client->doorbell_id;
+
+	for (i = 0; i < I915_NUM_RINGS; i++) {
+		struct guc_execlist_context *lrc = &desc.lrc[i];
+		struct intel_ringbuffer *ringbuf = ctx->engine[i].ringbuf;
+		struct intel_engine_cs *ring;
+		struct drm_i915_gem_object *obj;
+		uint64_t ctx_desc;
+
+		/* TODO: We have a design issue to be solved here. Only when we
+		 * receive the first batch, we know which engine is used by the
+		 * user. But here GuC expects the lrc and ring to be pinned. It
+		 * is not an issue for default context, which is the only one
+		 * for now who owns a GuC client. But for future owner of GuC
+		 * client, need to make sure lrc is pinned prior to enter here.
+		 */
+		obj = ctx->engine[i].state;
+		if (!obj)
+			break;	/* XXX: continue? */
+
+		ring = ringbuf->ring;
+		ctx_desc = intel_lr_context_descriptor(ctx, ring);
+		lrc->context_desc = (u32)ctx_desc;
+
+		/* The state page is after PPHWSP */
+		lrc->ring_lcra = i915_gem_obj_ggtt_offset(obj) +
+				LRC_STATE_PN * PAGE_SIZE;
+		lrc->context_id = (client->ctx_index << GUC_ELC_CTXID_OFFSET) |
+				(ring->id << GUC_ELC_ENGINE_OFFSET);
+
+		obj = ringbuf->obj;
+
+		lrc->ring_begin = i915_gem_obj_ggtt_offset(obj);
+		lrc->ring_end = lrc->ring_begin + obj->base.size - 1;
+		lrc->ring_next_free_location = lrc->ring_begin;
+		lrc->ring_current_tail_pointer_value = 0;
+
+		desc.engines_used |= (1 << ring->id);
+	}
+
+	WARN_ON(desc.engines_used == 0);
+
+	/*
+	 * The CPU address is only needed at certain points, so kmap_atomic on
+	 * demand instead of storing it in the ctx descriptor.
+	 * XXX: May make debug easier to have it mapped
+	 */
+	desc.db_trigger_cpu = 0;
+	desc.db_trigger_uk = client->doorbell_offset +
+		i915_gem_obj_ggtt_offset(client->client_obj);
+	desc.db_trigger_phy = client->doorbell_offset +
+		sg_dma_address(client->client_obj->pages->sgl);
+
+	desc.process_desc = client->proc_desc_offset +
+		i915_gem_obj_ggtt_offset(client->client_obj);
+
+	desc.wq_addr = client->wq_offset +
+		i915_gem_obj_ggtt_offset(client->client_obj);
+
+	desc.wq_size = client->wq_size;
+
+	/*
+	 * XXX: Take LRCs from an existing intel_context if this is not an
+	 * IsKMDCreatedContext client
+	 */
+	desc.desc_private = (uintptr_t)client;
+
+	/* Pool context is pinned already */
+	sg = guc->ctx_pool_obj->pages;
+	sg_pcopy_from_buffer(sg->sgl, sg->nents, &desc, sizeof(desc),
+			     sizeof(desc) * client->ctx_index);
+}
+
+static void guc_fini_ctx_desc(struct intel_guc *guc,
+			      struct i915_guc_client *client)
+{
+	struct guc_context_desc desc;
+	struct sg_table *sg;
+
+	memset(&desc, 0, sizeof(desc));
+
+	sg = guc->ctx_pool_obj->pages;
+	sg_pcopy_from_buffer(sg->sgl, sg->nents, &desc, sizeof(desc),
+			     sizeof(desc) * client->ctx_index);
+}
+
+/* Get valid workqueue item and return it back to offset */
+static int guc_get_workqueue_space(struct i915_guc_client *gc, u32 *offset)
+{
+	struct guc_process_desc *desc;
+	void *base;
+	u32 size = sizeof(struct guc_wq_item);
+	int ret = 0, timeout_counter = 200;
+
+	base = kmap_atomic(i915_gem_object_get_page(gc->client_obj, 0));
+	desc = base + gc->proc_desc_offset;
+
+	while (timeout_counter-- > 0) {
+		ret = wait_for_atomic(CIRC_SPACE(gc->wq_tail, desc->head,
+				gc->wq_size) >= size, 1);
+
+		if (!ret) {
+			*offset = gc->wq_tail;
+
+			/* advance the tail for next workqueue item */
+			gc->wq_tail += size;
+			gc->wq_tail &= gc->wq_size - 1;
+
+			/* this will break the loop */
+			timeout_counter = 0;
+		}
+	};
+
+	kunmap_atomic(base);
+
+	return ret;
+}
+
+static int guc_add_workqueue_item(struct i915_guc_client *gc,
+				  struct drm_i915_gem_request *rq)
+{
+	enum intel_ring_id ring_id = rq->ring->id;
+	struct guc_wq_item *wqi;
+	void *base;
+	u32 tail, wq_len, wq_off = 0;
+	int ret;
+
+	ret = guc_get_workqueue_space(gc, &wq_off);
+	if (ret)
+		return ret;
+
+	/* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we
+	 * should not have the case where structure wqi is across page, neither
+	 * wrapped to the beginning. This simplifies the implementation below.
+	 *
+	 * XXX: if not the case, we need save data to a temp wqi and copy it to
+	 * workqueue buffer dw by dw.
+	 */
+	WARN_ON(sizeof(struct guc_wq_item) != 16);
+	WARN_ON(wq_off & 3);
+
+	/* wq starts from the page after doorbell / process_desc */
+	base = kmap_atomic(i915_gem_object_get_page(gc->client_obj,
+			(wq_off + GUC_DB_SIZE) >> PAGE_SHIFT));
+	wq_off &= PAGE_SIZE - 1;
+	wqi = (struct guc_wq_item *)((char *)base + wq_off);
+
+	/* len does not include the header */
+	wq_len = sizeof(struct guc_wq_item) / sizeof(u32) - 1;
+	wqi->header = WQ_TYPE_INORDER |
+			(wq_len << WQ_LEN_SHIFT) |
+			(ring_id << WQ_TARGET_SHIFT) |
+			WQ_NO_WCFLUSH_WAIT;
+
+	/* The GuC wants only the low-order word of the context descriptor */
+	wqi->context_desc = (u32)intel_lr_context_descriptor(rq->ctx, rq->ring);
+
+	/* The GuC firmware wants the tail index in QWords, not bytes */
+	tail = rq->ringbuf->tail >> 3;
+	wqi->ring_tail = tail << WQ_RING_TAIL_SHIFT;
+	wqi->fence_id = 0; /*XXX: what fence to be here */
+
+	kunmap_atomic(base);
+
+	return 0;
+}
+
+#define CTX_RING_BUFFER_START		0x08
+
+/* Update the ringbuffer pointer in a saved context image */
+static void lr_context_update(struct drm_i915_gem_request *rq)
+{
+	enum intel_ring_id ring_id = rq->ring->id;
+	struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring_id].state;
+	struct drm_i915_gem_object *rb_obj = rq->ringbuf->obj;
+	struct page *page;
+	uint32_t *reg_state;
+
+	BUG_ON(!ctx_obj);
+	WARN_ON(!i915_gem_obj_is_pinned(ctx_obj));
+	WARN_ON(!i915_gem_obj_is_pinned(rb_obj));
+
+	page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
+	reg_state = kmap_atomic(page);
+
+	reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(rb_obj);
+
+	kunmap_atomic(reg_state);
+}
+
+/**
+ * i915_guc_submit() - Submit commands through GuC
+ * @client:	the guc client where commands will go through
+ * @ctx:	LRC where commands come from
+ * @ring:	HW engine that will excute the commands
+ *
+ * Return:	0 if succeed
+ */
+int i915_guc_submit(struct i915_guc_client *client,
+		    struct drm_i915_gem_request *rq)
+{
+	struct intel_guc *guc = client->guc;
+	enum intel_ring_id ring_id = rq->ring->id;
+	unsigned long flags;
+	int q_ret, b_ret;
+
+	/* Need this because of the deferred pin ctx and ring */
+	/* Shall we move this right after ring is pinned? */
+	lr_context_update(rq);
+
+	spin_lock_irqsave(&client->wq_lock, flags);
+
+	q_ret = guc_add_workqueue_item(client, rq);
+	if (q_ret == 0)
+		b_ret = guc_ring_doorbell(client);
+
+	client->submissions[ring_id] += 1;
+	if (q_ret) {
+		client->q_fail += 1;
+		client->retcode = q_ret;
+	} else if (b_ret) {
+		client->b_fail += 1;
+		client->retcode = q_ret = b_ret;
+	} else {
+		client->retcode = 0;
+	}
+	spin_unlock_irqrestore(&client->wq_lock, flags);
+
+	spin_lock(&guc->host2guc_lock);
+	guc->submissions[ring_id] += 1;
+	guc->last_seqno[ring_id] = rq->seqno;
+	spin_unlock(&guc->host2guc_lock);
+
+	return q_ret;
+}
+
+/*
+ * Everything below here is concerned with setup & teardown, and is
+ * therefore not part of the somewhat time-critical batch-submission
+ * path of i915_guc_submit() above.
+ */
+
+/**
+ * gem_allocate_guc_obj() - Allocate gem object for GuC usage
+ * @dev:	drm device
+ * @size:	size of object
+ *
+ * This is a wrapper to create a gem obj. In order to use it inside GuC, the
+ * object needs to be pinned lifetime. Also we must pin it to gtt space other
+ * than [0, GUC_WOPCM_TOP) because this range is reserved inside GuC.
+ *
+ * Return:	A drm_i915_gem_object if successful, otherwise NULL.
+ */
+static struct drm_i915_gem_object *gem_allocate_guc_obj(struct drm_device *dev,
+							u32 size)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_gem_object *obj;
+
+	obj = i915_gem_alloc_object(dev, size);
+	if (!obj)
+		return NULL;
+
+	if (i915_gem_object_get_pages(obj)) {
+		drm_gem_object_unreference(&obj->base);
+		return NULL;
+	}
+
+	if (i915_gem_obj_ggtt_pin(obj, PAGE_SIZE,
+			PIN_OFFSET_BIAS | GUC_WOPCM_TOP)) {
+		drm_gem_object_unreference(&obj->base);
+		return NULL;
+	}
+
+	/* Invalidate GuC TLB to let GuC take the latest updates to GTT. */
+	I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
+
+	return obj;
+}
+
+/**
+ * gem_release_guc_obj() - Release gem object allocated for GuC usage
+ * @obj:	gem obj to be released
+  */
+static void gem_release_guc_obj(struct drm_i915_gem_object *obj)
+{
+	if (!obj)
+		return;
+
+	if (i915_gem_obj_is_pinned(obj))
+		i915_gem_object_ggtt_unpin(obj);
+
+	drm_gem_object_unreference(&obj->base);
+}
+
+static void guc_client_free(struct drm_device *dev,
+			    struct i915_guc_client *client)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_guc *guc = &dev_priv->guc;
+
+	if (!client)
+		return;
+
+	if (client->doorbell_id != GUC_INVALID_DOORBELL_ID) {
+		/*
+		 * First disable the doorbell, then tell the GuC we've
+		 * finished with it, finally deallocate it in our bitmap
+		 */
+		guc_disable_doorbell(guc, client);
+		host2guc_release_doorbell(guc, client);
+		release_doorbell(guc, client->doorbell_id);
+	}
+
+	/*
+	 * XXX: wait for any outstanding submissions before freeing memory.
+	 * Be sure to drop any locks
+	 */
+
+	gem_release_guc_obj(client->client_obj);
+
+	if (client->ctx_index != GUC_INVALID_CTX_ID) {
+		guc_fini_ctx_desc(guc, client);
+		ida_simple_remove(&guc->ctx_ids, client->ctx_index);
+	}
+
+	kfree(client);
+}
+
+/**
+ * guc_client_alloc() - Allocate an i915_guc_client
+ * @dev:	drm device
+ * @priority:	four levels priority _CRITICAL, _HIGH, _NORMAL and _LOW
+ * 		The kernel client to replace ExecList submission is created with
+ * 		NORMAL priority. Priority of a client for scheduler can be HIGH,
+ * 		while a preemption context can use CRITICAL.
+ * @ctx		the context to own the client (we use the default render context)
+ *
+ * Return:	An i915_guc_client object if success.
+ */
+static struct i915_guc_client *guc_client_alloc(struct drm_device *dev,
+						uint32_t priority,
+						struct intel_context *ctx)
+{
+	struct i915_guc_client *client;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_guc *guc = &dev_priv->guc;
+	struct drm_i915_gem_object *obj;
+
+	client = kzalloc(sizeof(*client), GFP_KERNEL);
+	if (!client)
+		return NULL;
+
+	client->doorbell_id = GUC_INVALID_DOORBELL_ID;
+	client->priority = priority;
+	client->owner = ctx;
+	client->guc = guc;
+
+	client->ctx_index = (uint32_t)ida_simple_get(&guc->ctx_ids, 0,
+			GUC_MAX_GPU_CONTEXTS, GFP_KERNEL);
+	if (client->ctx_index >= GUC_MAX_GPU_CONTEXTS) {
+		client->ctx_index = GUC_INVALID_CTX_ID;
+		goto err;
+	}
+
+	/* The first page is doorbell/proc_desc. Two followed pages are wq. */
+	obj = gem_allocate_guc_obj(dev, GUC_DB_SIZE + GUC_WQ_SIZE);
+	if (!obj)
+		goto err;
+
+	client->client_obj = obj;
+	client->wq_offset = GUC_DB_SIZE;
+	client->wq_size = GUC_WQ_SIZE;
+	spin_lock_init(&client->wq_lock);
+
+	client->doorbell_offset = select_doorbell_cacheline(guc);
+
+	/*
+	 * Since the doorbell only requires a single cacheline, we can save
+	 * space by putting the application process descriptor in the same
+	 * page. Use the half of the page that doesn't include the doorbell.
+	 */
+	if (client->doorbell_offset >= (GUC_DB_SIZE / 2))
+		client->proc_desc_offset = 0;
+	else
+		client->proc_desc_offset = (GUC_DB_SIZE / 2);
+
+	client->doorbell_id = assign_doorbell(guc, client->priority);
+	if (client->doorbell_id == GUC_INVALID_DOORBELL_ID)
+		/* XXX: evict a doorbell instead */
+		goto err;
+
+	guc_init_proc_desc(guc, client);
+	guc_init_ctx_desc(guc, client);
+	guc_init_doorbell(guc, client);
+
+	/* XXX: Any cache flushes needed? General domain mgmt calls? */
+
+	if (host2guc_allocate_doorbell(guc, client))
+		goto err;
+
+	DRM_DEBUG_DRIVER("new priority %u client %p: ctx_index %u db_id %u\n",
+		priority, client, client->ctx_index, client->doorbell_id);
+
+	return client;
+
+err:
+	DRM_ERROR("FAILED to create priority %u GuC client!\n", priority);
+
+	guc_client_free(dev, client);
+	return NULL;
+}
+
+static void guc_create_log(struct intel_guc *guc)
+{
+	struct drm_i915_private *dev_priv = guc_to_i915(guc);
+	struct drm_i915_gem_object *obj;
+	unsigned long offset;
+	uint32_t size, flags;
+
+	if (i915.guc_log_level < GUC_LOG_VERBOSITY_MIN)
+		return;
+
+	if (i915.guc_log_level > GUC_LOG_VERBOSITY_MAX)
+		i915.guc_log_level = GUC_LOG_VERBOSITY_MAX;
+
+	/* The first page is to save log buffer state. Allocate one
+	 * extra page for others in case for overlap */
+	size = (1 + GUC_LOG_DPC_PAGES + 1 +
+		GUC_LOG_ISR_PAGES + 1 +
+		GUC_LOG_CRASH_PAGES + 1) << PAGE_SHIFT;
+
+	obj = guc->log_obj;
+	if (!obj) {
+		obj = gem_allocate_guc_obj(dev_priv->dev, size);
+		if (!obj) {
+			/* logging will be off */
+			i915.guc_log_level = -1;
+			return;
+		}
+
+		guc->log_obj = obj;
+	}
+
+	/* each allocated unit is a page */
+	flags = GUC_LOG_VALID | GUC_LOG_NOTIFY_ON_HALF_FULL |
+		(GUC_LOG_DPC_PAGES << GUC_LOG_DPC_SHIFT) |
+		(GUC_LOG_ISR_PAGES << GUC_LOG_ISR_SHIFT) |
+		(GUC_LOG_CRASH_PAGES << GUC_LOG_CRASH_SHIFT);
+
+	offset = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT; /* in pages */
+	guc->log_flags = (offset << GUC_LOG_BUF_ADDR_SHIFT) | flags;
+}
+
+/*
+ * Set up the memory resources to be shared with the GuC.  At this point,
+ * we require just one object that can be mapped through the GGTT.
+ */
+int i915_guc_submission_init(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	const size_t ctxsize = sizeof(struct guc_context_desc);
+	const size_t poolsize = GUC_MAX_GPU_CONTEXTS * ctxsize;
+	const size_t gemsize = round_up(poolsize, PAGE_SIZE);
+	struct intel_guc *guc = &dev_priv->guc;
+
+	if (!i915.enable_guc_submission)
+		return 0; /* not enabled  */
+
+	if (guc->ctx_pool_obj)
+		return 0; /* already allocated */
+
+	guc->ctx_pool_obj = gem_allocate_guc_obj(dev_priv->dev, gemsize);
+	if (!guc->ctx_pool_obj)
+		return -ENOMEM;
+
+	spin_lock_init(&dev_priv->guc.host2guc_lock);
+
+	ida_init(&guc->ctx_ids);
+
+	guc_create_log(guc);
+
+	return 0;
+}
+
+int i915_guc_submission_enable(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_guc *guc = &dev_priv->guc;
+	struct intel_context *ctx = dev_priv->ring[RCS].default_context;
+	struct i915_guc_client *client;
+
+	/* client for execbuf submission */
+	client = guc_client_alloc(dev, GUC_CTX_PRIORITY_KMD_NORMAL, ctx);
+	if (!client) {
+		DRM_ERROR("Failed to create execbuf guc_client\n");
+		return -ENOMEM;
+	}
+
+	guc->execbuf_client = client;
+
+	host2guc_sample_forcewake(guc, client);
+
+	return 0;
+}
+
+void i915_guc_submission_disable(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_guc *guc = &dev_priv->guc;
+
+	guc_client_free(dev, guc->execbuf_client);
+	guc->execbuf_client = NULL;
+}
+
+void i915_guc_submission_fini(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_guc *guc = &dev_priv->guc;
+
+	gem_release_guc_obj(dev_priv->guc.log_obj);
+	guc->log_obj = NULL;
+
+	if (guc->ctx_pool_obj)
+		ida_destroy(&guc->ctx_ids);
+	gem_release_guc_obj(guc->ctx_pool_obj);
+	guc->ctx_pool_obj = NULL;
+}
+
+/**
+ * intel_guc_suspend() - notify GuC entering suspend state
+ * @dev:	drm device
+ */
+int intel_guc_suspend(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_guc *guc = &dev_priv->guc;
+	struct intel_context *ctx;
+	u32 data[3];
+
+	if (!i915.enable_guc_submission)
+		return 0;
+
+	ctx = dev_priv->ring[RCS].default_context;
+
+	data[0] = HOST2GUC_ACTION_ENTER_S_STATE;
+	/* any value greater than GUC_POWER_D0 */
+	data[1] = GUC_POWER_D1;
+	/* first page is shared data with GuC */
+	data[2] = i915_gem_obj_ggtt_offset(ctx->engine[RCS].state);
+
+	return host2guc_action(guc, data, ARRAY_SIZE(data));
+}
+
+
+/**
+ * intel_guc_resume() - notify GuC resuming from suspend state
+ * @dev:	drm device
+ */
+int intel_guc_resume(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_guc *guc = &dev_priv->guc;
+	struct intel_context *ctx;
+	u32 data[3];
+
+	if (!i915.enable_guc_submission)
+		return 0;
+
+	ctx = dev_priv->ring[RCS].default_context;
+
+	data[0] = HOST2GUC_ACTION_EXIT_S_STATE;
+	data[1] = GUC_POWER_D0;
+	/* first page is shared data with GuC */
+	data[2] = i915_gem_obj_ggtt_offset(ctx->engine[RCS].state);
+
+	return host2guc_action(guc, data, ARRAY_SIZE(data));
+}
diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
new file mode 100644
index 0000000..97f3a56
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_ioc32.c
@@ -0,0 +1,99 @@
+/**
+ * \file i915_ioc32.c
+ *
+ * 32-bit ioctl compatibility routines for the i915 DRM.
+ *
+ * \author Alan Hourihane <alanh@fairlite.demon.co.uk>
+ *
+ *
+ * Copyright (C) Paul Mackerras 2005
+ * Copyright (C) Alan Hourihane 2005
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#include <linux/compat.h>
+
+#include <drm/drmP.h>
+#include <drm/i915_drm.h>
+#include "i915_drv.h"
+
+struct drm_i915_getparam32 {
+	s32 param;
+	/*
+	 * We screwed up the generic ioctl struct here and used a variable-sized
+	 * pointer. Use u32 in the compat struct to match the 32bit pointer
+	 * userspace expects.
+	 */
+	u32 value;
+};
+
+static int compat_i915_getparam(struct file *file, unsigned int cmd,
+				unsigned long arg)
+{
+	struct drm_i915_getparam32 req32;
+	drm_i915_getparam_t __user *request;
+
+	if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
+		return -EFAULT;
+
+	request = compat_alloc_user_space(sizeof(*request));
+	if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+	    || __put_user(req32.param, &request->param)
+	    || __put_user((void __user *)(unsigned long)req32.value,
+			  &request->value))
+		return -EFAULT;
+
+	return drm_ioctl(file, DRM_IOCTL_I915_GETPARAM,
+			 (unsigned long)request);
+}
+
+static drm_ioctl_compat_t *i915_compat_ioctls[] = {
+	[DRM_I915_GETPARAM] = compat_i915_getparam,
+};
+
+/**
+ * Called whenever a 32-bit process running under a 64-bit kernel
+ * performs an ioctl on /dev/dri/card<n>.
+ *
+ * \param filp file pointer.
+ * \param cmd command.
+ * \param arg user argument.
+ * \return zero on success or negative number on failure.
+ */
+long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+	unsigned int nr = DRM_IOCTL_NR(cmd);
+	drm_ioctl_compat_t *fn = NULL;
+	int ret;
+
+	if (nr < DRM_COMMAND_BASE || nr >= DRM_COMMAND_END)
+		return drm_compat_ioctl(filp, cmd, arg);
+
+	if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(i915_compat_ioctls))
+		fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
+
+	if (fn != NULL)
+		ret = (*fn) (filp, cmd, arg);
+	else
+		ret = drm_ioctl(filp, cmd, arg);
+
+	return ret;
+}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
new file mode 100644
index 0000000..b7b0a38
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -0,0 +1,4531 @@
+/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
+ */
+/*
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/sysrq.h>
+#include <linux/slab.h>
+#include <linux/circ_buf.h>
+#include <drm/drmP.h>
+#include <drm/i915_drm.h>
+#include "i915_drv.h"
+#include "i915_trace.h"
+#include "intel_drv.h"
+
+/**
+ * DOC: interrupt handling
+ *
+ * These functions provide the basic support for enabling and disabling the
+ * interrupt handling support. There's a lot more functionality in i915_irq.c
+ * and related files, but that will be described in separate chapters.
+ */
+
+static const u32 hpd_ilk[HPD_NUM_PINS] = {
+	[HPD_PORT_A] = DE_DP_A_HOTPLUG,
+};
+
+static const u32 hpd_ivb[HPD_NUM_PINS] = {
+	[HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
+};
+
+static const u32 hpd_bdw[HPD_NUM_PINS] = {
+	[HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG,
+};
+
+static const u32 hpd_ibx[HPD_NUM_PINS] = {
+	[HPD_CRT] = SDE_CRT_HOTPLUG,
+	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
+	[HPD_PORT_B] = SDE_PORTB_HOTPLUG,
+	[HPD_PORT_C] = SDE_PORTC_HOTPLUG,
+	[HPD_PORT_D] = SDE_PORTD_HOTPLUG
+};
+
+static const u32 hpd_cpt[HPD_NUM_PINS] = {
+	[HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
+	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
+	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
+	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
+	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
+};
+
+static const u32 hpd_spt[HPD_NUM_PINS] = {
+	[HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
+	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
+	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
+	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
+	[HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT
+};
+
+static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
+	[HPD_CRT] = CRT_HOTPLUG_INT_EN,
+	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
+	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
+	[HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
+	[HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
+	[HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
+};
+
+static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
+	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
+	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
+	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
+	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
+	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
+	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
+};
+
+static const u32 hpd_status_i915[HPD_NUM_PINS] = {
+	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
+	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
+	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
+	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
+	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
+	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
+};
+
+/* BXT hpd list */
+static const u32 hpd_bxt[HPD_NUM_PINS] = {
+	[HPD_PORT_A] = BXT_DE_PORT_HP_DDIA,
+	[HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
+	[HPD_PORT_C] = BXT_DE_PORT_HP_DDIC
+};
+
+/* IIR can theoretically queue up two events. Be paranoid. */
+#define GEN8_IRQ_RESET_NDX(type, which) do { \
+	I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
+	POSTING_READ(GEN8_##type##_IMR(which)); \
+	I915_WRITE(GEN8_##type##_IER(which), 0); \
+	I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
+	POSTING_READ(GEN8_##type##_IIR(which)); \
+	I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
+	POSTING_READ(GEN8_##type##_IIR(which)); \
+} while (0)
+
+#define GEN5_IRQ_RESET(type) do { \
+	I915_WRITE(type##IMR, 0xffffffff); \
+	POSTING_READ(type##IMR); \
+	I915_WRITE(type##IER, 0); \
+	I915_WRITE(type##IIR, 0xffffffff); \
+	POSTING_READ(type##IIR); \
+	I915_WRITE(type##IIR, 0xffffffff); \
+	POSTING_READ(type##IIR); \
+} while (0)
+
+/*
+ * We should clear IMR at preinstall/uninstall, and just check at postinstall.
+ */
+static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv, u32 reg)
+{
+	u32 val = I915_READ(reg);
+
+	if (val == 0)
+		return;
+
+	WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
+	     reg, val);
+	I915_WRITE(reg, 0xffffffff);
+	POSTING_READ(reg);
+	I915_WRITE(reg, 0xffffffff);
+	POSTING_READ(reg);
+}
+
+#define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
+	gen5_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \
+	I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
+	I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
+	POSTING_READ(GEN8_##type##_IMR(which)); \
+} while (0)
+
+#define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
+	gen5_assert_iir_is_zero(dev_priv, type##IIR); \
+	I915_WRITE(type##IER, (ier_val)); \
+	I915_WRITE(type##IMR, (imr_val)); \
+	POSTING_READ(type##IMR); \
+} while (0)
+
+static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
+
+/* For display hotplug interrupt */
+static inline void
+i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
+				     uint32_t mask,
+				     uint32_t bits)
+{
+	uint32_t val;
+
+	assert_spin_locked(&dev_priv->irq_lock);
+	WARN_ON(bits & ~mask);
+
+	val = I915_READ(PORT_HOTPLUG_EN);
+	val &= ~mask;
+	val |= bits;
+	I915_WRITE(PORT_HOTPLUG_EN, val);
+}
+
+/**
+ * i915_hotplug_interrupt_update - update hotplug interrupt enable
+ * @dev_priv: driver private
+ * @mask: bits to update
+ * @bits: bits to enable
+ * NOTE: the HPD enable bits are modified both inside and outside
+ * of an interrupt context. To avoid that read-modify-write cycles
+ * interfer, these bits are protected by a spinlock. Since this
+ * function is usually not called from a context where the lock is
+ * held already, this function acquires the lock itself. A non-locking
+ * version is also available.
+ */
+void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
+				   uint32_t mask,
+				   uint32_t bits)
+{
+	spin_lock_irq(&dev_priv->irq_lock);
+	i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
+	spin_unlock_irq(&dev_priv->irq_lock);
+}
+
+/**
+ * ilk_update_display_irq - update DEIMR
+ * @dev_priv: driver private
+ * @interrupt_mask: mask of interrupt bits to update
+ * @enabled_irq_mask: mask of interrupt bits to enable
+ */
+static void ilk_update_display_irq(struct drm_i915_private *dev_priv,
+				   uint32_t interrupt_mask,
+				   uint32_t enabled_irq_mask)
+{
+	uint32_t new_val;
+
+	assert_spin_locked(&dev_priv->irq_lock);
+
+	WARN_ON(enabled_irq_mask & ~interrupt_mask);
+
+	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
+		return;
+
+	new_val = dev_priv->irq_mask;
+	new_val &= ~interrupt_mask;
+	new_val |= (~enabled_irq_mask & interrupt_mask);
+
+	if (new_val != dev_priv->irq_mask) {
+		dev_priv->irq_mask = new_val;
+		I915_WRITE(DEIMR, dev_priv->irq_mask);
+		POSTING_READ(DEIMR);
+	}
+}
+
+void
+ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
+{
+	ilk_update_display_irq(dev_priv, mask, mask);
+}
+
+void
+ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
+{
+	ilk_update_display_irq(dev_priv, mask, 0);
+}
+
+/**
+ * ilk_update_gt_irq - update GTIMR
+ * @dev_priv: driver private
+ * @interrupt_mask: mask of interrupt bits to update
+ * @enabled_irq_mask: mask of interrupt bits to enable
+ */
+static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
+			      uint32_t interrupt_mask,
+			      uint32_t enabled_irq_mask)
+{
+	assert_spin_locked(&dev_priv->irq_lock);
+
+	WARN_ON(enabled_irq_mask & ~interrupt_mask);
+
+	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
+		return;
+
+	dev_priv->gt_irq_mask &= ~interrupt_mask;
+	dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
+	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+	POSTING_READ(GTIMR);
+}
+
+void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+{
+	ilk_update_gt_irq(dev_priv, mask, mask);
+}
+
+void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+{
+	ilk_update_gt_irq(dev_priv, mask, 0);
+}
+
+static u32 gen6_pm_iir(struct drm_i915_private *dev_priv)
+{
+	return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
+}
+
+static u32 gen6_pm_imr(struct drm_i915_private *dev_priv)
+{
+	return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR;
+}
+
+static u32 gen6_pm_ier(struct drm_i915_private *dev_priv)
+{
+	return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER;
+}
+
+/**
+  * snb_update_pm_irq - update GEN6_PMIMR
+  * @dev_priv: driver private
+  * @interrupt_mask: mask of interrupt bits to update
+  * @enabled_irq_mask: mask of interrupt bits to enable
+  */
+static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
+			      uint32_t interrupt_mask,
+			      uint32_t enabled_irq_mask)
+{
+	uint32_t new_val;
+
+	WARN_ON(enabled_irq_mask & ~interrupt_mask);
+
+	assert_spin_locked(&dev_priv->irq_lock);
+
+	new_val = dev_priv->pm_irq_mask;
+	new_val &= ~interrupt_mask;
+	new_val |= (~enabled_irq_mask & interrupt_mask);
+
+	if (new_val != dev_priv->pm_irq_mask) {
+		dev_priv->pm_irq_mask = new_val;
+		I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_irq_mask);
+		POSTING_READ(gen6_pm_imr(dev_priv));
+	}
+}
+
+void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+{
+	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
+		return;
+
+	snb_update_pm_irq(dev_priv, mask, mask);
+}
+
+static void __gen6_disable_pm_irq(struct drm_i915_private *dev_priv,
+				  uint32_t mask)
+{
+	snb_update_pm_irq(dev_priv, mask, 0);
+}
+
+void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+{
+	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
+		return;
+
+	__gen6_disable_pm_irq(dev_priv, mask);
+}
+
+void gen6_reset_rps_interrupts(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t reg = gen6_pm_iir(dev_priv);
+
+	spin_lock_irq(&dev_priv->irq_lock);
+	I915_WRITE(reg, dev_priv->pm_rps_events);
+	I915_WRITE(reg, dev_priv->pm_rps_events);
+	POSTING_READ(reg);
+	dev_priv->rps.pm_iir = 0;
+	spin_unlock_irq(&dev_priv->irq_lock);
+}
+
+void gen6_enable_rps_interrupts(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	spin_lock_irq(&dev_priv->irq_lock);
+
+	WARN_ON(dev_priv->rps.pm_iir);
+	WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
+	dev_priv->rps.interrupts_enabled = true;
+	I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) |
+				dev_priv->pm_rps_events);
+	gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
+
+	spin_unlock_irq(&dev_priv->irq_lock);
+}
+
+u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask)
+{
+	/*
+	 * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer
+	 * if GEN6_PM_UP_EI_EXPIRED is masked.
+	 *
+	 * TODO: verify if this can be reproduced on VLV,CHV.
+	 */
+	if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv))
+		mask &= ~GEN6_PM_RP_UP_EI_EXPIRED;
+
+	if (INTEL_INFO(dev_priv)->gen >= 8)
+		mask &= ~GEN8_PMINTR_REDIRECT_TO_NON_DISP;
+
+	return mask;
+}
+
+void gen6_disable_rps_interrupts(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	spin_lock_irq(&dev_priv->irq_lock);
+	dev_priv->rps.interrupts_enabled = false;
+	spin_unlock_irq(&dev_priv->irq_lock);
+
+	cancel_work_sync(&dev_priv->rps.work);
+
+	spin_lock_irq(&dev_priv->irq_lock);
+
+	I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0));
+
+	__gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
+	I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) &
+				~dev_priv->pm_rps_events);
+
+	spin_unlock_irq(&dev_priv->irq_lock);
+
+	synchronize_irq(dev->irq);
+}
+
+/**
+  * bdw_update_port_irq - update DE port interrupt
+  * @dev_priv: driver private
+  * @interrupt_mask: mask of interrupt bits to update
+  * @enabled_irq_mask: mask of interrupt bits to enable
+  */
+static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
+				uint32_t interrupt_mask,
+				uint32_t enabled_irq_mask)
+{
+	uint32_t new_val;
+	uint32_t old_val;
+
+	assert_spin_locked(&dev_priv->irq_lock);
+
+	WARN_ON(enabled_irq_mask & ~interrupt_mask);
+
+	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
+		return;
+
+	old_val = I915_READ(GEN8_DE_PORT_IMR);
+
+	new_val = old_val;
+	new_val &= ~interrupt_mask;
+	new_val |= (~enabled_irq_mask & interrupt_mask);
+
+	if (new_val != old_val) {
+		I915_WRITE(GEN8_DE_PORT_IMR, new_val);
+		POSTING_READ(GEN8_DE_PORT_IMR);
+	}
+}
+
+/**
+ * ibx_display_interrupt_update - update SDEIMR
+ * @dev_priv: driver private
+ * @interrupt_mask: mask of interrupt bits to update
+ * @enabled_irq_mask: mask of interrupt bits to enable
+ */
+void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
+				  uint32_t interrupt_mask,
+				  uint32_t enabled_irq_mask)
+{
+	uint32_t sdeimr = I915_READ(SDEIMR);
+	sdeimr &= ~interrupt_mask;
+	sdeimr |= (~enabled_irq_mask & interrupt_mask);
+
+	WARN_ON(enabled_irq_mask & ~interrupt_mask);
+
+	assert_spin_locked(&dev_priv->irq_lock);
+
+	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
+		return;
+
+	I915_WRITE(SDEIMR, sdeimr);
+	POSTING_READ(SDEIMR);
+}
+
+static void
+__i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
+		       u32 enable_mask, u32 status_mask)
+{
+	u32 reg = PIPESTAT(pipe);
+	u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
+
+	assert_spin_locked(&dev_priv->irq_lock);
+	WARN_ON(!intel_irqs_enabled(dev_priv));
+
+	if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
+		      status_mask & ~PIPESTAT_INT_STATUS_MASK,
+		      "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
+		      pipe_name(pipe), enable_mask, status_mask))
+		return;
+
+	if ((pipestat & enable_mask) == enable_mask)
+		return;
+
+	dev_priv->pipestat_irq_mask[pipe] |= status_mask;
+
+	/* Enable the interrupt, clear any pending status */
+	pipestat |= enable_mask | status_mask;
+	I915_WRITE(reg, pipestat);
+	POSTING_READ(reg);
+}
+
+static void
+__i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
+		        u32 enable_mask, u32 status_mask)
+{
+	u32 reg = PIPESTAT(pipe);
+	u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
+
+	assert_spin_locked(&dev_priv->irq_lock);
+	WARN_ON(!intel_irqs_enabled(dev_priv));
+
+	if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
+		      status_mask & ~PIPESTAT_INT_STATUS_MASK,
+		      "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
+		      pipe_name(pipe), enable_mask, status_mask))
+		return;
+
+	if ((pipestat & enable_mask) == 0)
+		return;
+
+	dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
+
+	pipestat &= ~enable_mask;
+	I915_WRITE(reg, pipestat);
+	POSTING_READ(reg);
+}
+
+static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
+{
+	u32 enable_mask = status_mask << 16;
+
+	/*
+	 * On pipe A we don't support the PSR interrupt yet,
+	 * on pipe B and C the same bit MBZ.
+	 */
+	if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
+		return 0;
+	/*
+	 * On pipe B and C we don't support the PSR interrupt yet, on pipe
+	 * A the same bit is for perf counters which we don't use either.
+	 */
+	if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
+		return 0;
+
+	enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
+			 SPRITE0_FLIP_DONE_INT_EN_VLV |
+			 SPRITE1_FLIP_DONE_INT_EN_VLV);
+	if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
+		enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
+	if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
+		enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
+
+	return enable_mask;
+}
+
+void
+i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
+		     u32 status_mask)
+{
+	u32 enable_mask;
+
+	if (IS_VALLEYVIEW(dev_priv->dev))
+		enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
+							   status_mask);
+	else
+		enable_mask = status_mask << 16;
+	__i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask);
+}
+
+void
+i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
+		      u32 status_mask)
+{
+	u32 enable_mask;
+
+	if (IS_VALLEYVIEW(dev_priv->dev))
+		enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
+							   status_mask);
+	else
+		enable_mask = status_mask << 16;
+	__i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask);
+}
+
+/**
+ * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
+ * @dev: drm device
+ */
+static void i915_enable_asle_pipestat(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
+		return;
+
+	spin_lock_irq(&dev_priv->irq_lock);
+
+	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
+	if (INTEL_INFO(dev)->gen >= 4)
+		i915_enable_pipestat(dev_priv, PIPE_A,
+				     PIPE_LEGACY_BLC_EVENT_STATUS);
+
+	spin_unlock_irq(&dev_priv->irq_lock);
+}
+
+/*
+ * This timing diagram depicts the video signal in and
+ * around the vertical blanking period.
+ *
+ * Assumptions about the fictitious mode used in this example:
+ *  vblank_start >= 3
+ *  vsync_start = vblank_start + 1
+ *  vsync_end = vblank_start + 2
+ *  vtotal = vblank_start + 3
+ *
+ *           start of vblank:
+ *           latch double buffered registers
+ *           increment frame counter (ctg+)
+ *           generate start of vblank interrupt (gen4+)
+ *           |
+ *           |          frame start:
+ *           |          generate frame start interrupt (aka. vblank interrupt) (gmch)
+ *           |          may be shifted forward 1-3 extra lines via PIPECONF
+ *           |          |
+ *           |          |  start of vsync:
+ *           |          |  generate vsync interrupt
+ *           |          |  |
+ * ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx
+ *       .   \hs/   .      \hs/          \hs/          \hs/   .      \hs/
+ * ----va---> <-----------------vb--------------------> <--------va-------------
+ *       |          |       <----vs----->                     |
+ * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
+ * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
+ * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
+ *       |          |                                         |
+ *       last visible pixel                                   first visible pixel
+ *                  |                                         increment frame counter (gen3/4)
+ *                  pixel counter = vblank_start * htotal     pixel counter = 0 (gen3/4)
+ *
+ * x  = horizontal active
+ * _  = horizontal blanking
+ * hs = horizontal sync
+ * va = vertical active
+ * vb = vertical blanking
+ * vs = vertical sync
+ * vbs = vblank_start (number)
+ *
+ * Summary:
+ * - most events happen at the start of horizontal sync
+ * - frame start happens at the start of horizontal blank, 1-4 lines
+ *   (depending on PIPECONF settings) after the start of vblank
+ * - gen3/4 pixel and frame counter are synchronized with the start
+ *   of horizontal active on the first line of vertical active
+ */
+
+static u32 i8xx_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
+{
+	/* Gen2 doesn't have a hardware frame counter */
+	return 0;
+}
+
+/* Called from drm generic code, passed a 'crtc', which
+ * we use as a pipe index
+ */
+static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	unsigned long high_frame;
+	unsigned long low_frame;
+	u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
+	struct intel_crtc *intel_crtc =
+		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+	const struct drm_display_mode *mode = &intel_crtc->base.hwmode;
+
+	htotal = mode->crtc_htotal;
+	hsync_start = mode->crtc_hsync_start;
+	vbl_start = mode->crtc_vblank_start;
+	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+		vbl_start = DIV_ROUND_UP(vbl_start, 2);
+
+	/* Convert to pixel count */
+	vbl_start *= htotal;
+
+	/* Start of vblank event occurs at start of hsync */
+	vbl_start -= htotal - hsync_start;
+
+	high_frame = PIPEFRAME(pipe);
+	low_frame = PIPEFRAMEPIXEL(pipe);
+
+	/*
+	 * High & low register fields aren't synchronized, so make sure
+	 * we get a low value that's stable across two reads of the high
+	 * register.
+	 */
+	do {
+		high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
+		low   = I915_READ(low_frame);
+		high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
+	} while (high1 != high2);
+
+	high1 >>= PIPE_FRAME_HIGH_SHIFT;
+	pixel = low & PIPE_PIXEL_MASK;
+	low >>= PIPE_FRAME_LOW_SHIFT;
+
+	/*
+	 * The frame counter increments at beginning of active.
+	 * Cook up a vblank counter by also checking the pixel
+	 * counter against vblank start.
+	 */
+	return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
+}
+
+static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
+}
+
+/* raw reads, only for fast reads of display block, no need for forcewake etc. */
+#define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
+
+static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	const struct drm_display_mode *mode = &crtc->base.hwmode;
+	enum pipe pipe = crtc->pipe;
+	int position, vtotal;
+
+	vtotal = mode->crtc_vtotal;
+	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+		vtotal /= 2;
+
+	if (IS_GEN2(dev))
+		position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
+	else
+		position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
+
+	/*
+	 * On HSW, the DSL reg (0x70000) appears to return 0 if we
+	 * read it just before the start of vblank.  So try it again
+	 * so we don't accidentally end up spanning a vblank frame
+	 * increment, causing the pipe_update_end() code to squak at us.
+	 *
+	 * The nature of this problem means we can't simply check the ISR
+	 * bit and return the vblank start value; nor can we use the scanline
+	 * debug register in the transcoder as it appears to have the same
+	 * problem.  We may need to extend this to include other platforms,
+	 * but so far testing only shows the problem on HSW.
+	 */
+	if (HAS_DDI(dev) && !position) {
+		int i, temp;
+
+		for (i = 0; i < 100; i++) {
+			udelay(1);
+			temp = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) &
+				DSL_LINEMASK_GEN3;
+			if (temp != position) {
+				position = temp;
+				break;
+			}
+		}
+	}
+
+	/*
+	 * See update_scanline_offset() for the details on the
+	 * scanline_offset adjustment.
+	 */
+	return (position + crtc->scanline_offset) % vtotal;
+}
+
+static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
+				    unsigned int flags, int *vpos, int *hpos,
+				    ktime_t *stime, ktime_t *etime,
+				    const struct drm_display_mode *mode)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	int position;
+	int vbl_start, vbl_end, hsync_start, htotal, vtotal;
+	bool in_vbl = true;
+	int ret = 0;
+	unsigned long irqflags;
+
+	if (WARN_ON(!mode->crtc_clock)) {
+		DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
+				 "pipe %c\n", pipe_name(pipe));
+		return 0;
+	}
+
+	htotal = mode->crtc_htotal;
+	hsync_start = mode->crtc_hsync_start;
+	vtotal = mode->crtc_vtotal;
+	vbl_start = mode->crtc_vblank_start;
+	vbl_end = mode->crtc_vblank_end;
+
+	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
+		vbl_start = DIV_ROUND_UP(vbl_start, 2);
+		vbl_end /= 2;
+		vtotal /= 2;
+	}
+
+	ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
+
+	/*
+	 * Lock uncore.lock, as we will do multiple timing critical raw
+	 * register reads, potentially with preemption disabled, so the
+	 * following code must not block on uncore.lock.
+	 */
+	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+	/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
+
+	/* Get optional system timestamp before query. */
+	if (stime)
+		*stime = ktime_get();
+
+	if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
+		/* No obvious pixelcount register. Only query vertical
+		 * scanout position from Display scan line register.
+		 */
+		position = __intel_get_crtc_scanline(intel_crtc);
+	} else {
+		/* Have access to pixelcount since start of frame.
+		 * We can split this into vertical and horizontal
+		 * scanout position.
+		 */
+		position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
+
+		/* convert to pixel counts */
+		vbl_start *= htotal;
+		vbl_end *= htotal;
+		vtotal *= htotal;
+
+		/*
+		 * In interlaced modes, the pixel counter counts all pixels,
+		 * so one field will have htotal more pixels. In order to avoid
+		 * the reported position from jumping backwards when the pixel
+		 * counter is beyond the length of the shorter field, just
+		 * clamp the position the length of the shorter field. This
+		 * matches how the scanline counter based position works since
+		 * the scanline counter doesn't count the two half lines.
+		 */
+		if (position >= vtotal)
+			position = vtotal - 1;
+
+		/*
+		 * Start of vblank interrupt is triggered at start of hsync,
+		 * just prior to the first active line of vblank. However we
+		 * consider lines to start at the leading edge of horizontal
+		 * active. So, should we get here before we've crossed into
+		 * the horizontal active of the first line in vblank, we would
+		 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
+		 * always add htotal-hsync_start to the current pixel position.
+		 */
+		position = (position + htotal - hsync_start) % vtotal;
+	}
+
+	/* Get optional system timestamp after query. */
+	if (etime)
+		*etime = ktime_get();
+
+	/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
+
+	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+
+	in_vbl = position >= vbl_start && position < vbl_end;
+
+	/*
+	 * While in vblank, position will be negative
+	 * counting up towards 0 at vbl_end. And outside
+	 * vblank, position will be positive counting
+	 * up since vbl_end.
+	 */
+	if (position >= vbl_start)
+		position -= vbl_end;
+	else
+		position += vtotal - vbl_end;
+
+	if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
+		*vpos = position;
+		*hpos = 0;
+	} else {
+		*vpos = position / htotal;
+		*hpos = position - (*vpos * htotal);
+	}
+
+	/* In vblank? */
+	if (in_vbl)
+		ret |= DRM_SCANOUTPOS_IN_VBLANK;
+
+	return ret;
+}
+
+int intel_get_crtc_scanline(struct intel_crtc *crtc)
+{
+	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+	unsigned long irqflags;
+	int position;
+
+	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+	position = __intel_get_crtc_scanline(crtc);
+	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+
+	return position;
+}
+
+static int i915_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
+			      int *max_error,
+			      struct timeval *vblank_time,
+			      unsigned flags)
+{
+	struct drm_crtc *crtc;
+
+	if (pipe >= INTEL_INFO(dev)->num_pipes) {
+		DRM_ERROR("Invalid crtc %u\n", pipe);
+		return -EINVAL;
+	}
+
+	/* Get drm_crtc to timestamp: */
+	crtc = intel_get_crtc_for_pipe(dev, pipe);
+	if (crtc == NULL) {
+		DRM_ERROR("Invalid crtc %u\n", pipe);
+		return -EINVAL;
+	}
+
+	if (!crtc->hwmode.crtc_clock) {
+		DRM_DEBUG_KMS("crtc %u is disabled\n", pipe);
+		return -EBUSY;
+	}
+
+	/* Helper routine in DRM core does all the work: */
+	return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
+						     vblank_time, flags,
+						     &crtc->hwmode);
+}
+
+static void ironlake_rps_change_irq_handler(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 busy_up, busy_down, max_avg, min_avg;
+	u8 new_delay;
+
+	spin_lock(&mchdev_lock);
+
+	I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
+
+	new_delay = dev_priv->ips.cur_delay;
+
+	I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
+	busy_up = I915_READ(RCPREVBSYTUPAVG);
+	busy_down = I915_READ(RCPREVBSYTDNAVG);
+	max_avg = I915_READ(RCBMAXAVG);
+	min_avg = I915_READ(RCBMINAVG);
+
+	/* Handle RCS change request from hw */
+	if (busy_up > max_avg) {
+		if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
+			new_delay = dev_priv->ips.cur_delay - 1;
+		if (new_delay < dev_priv->ips.max_delay)
+			new_delay = dev_priv->ips.max_delay;
+	} else if (busy_down < min_avg) {
+		if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
+			new_delay = dev_priv->ips.cur_delay + 1;
+		if (new_delay > dev_priv->ips.min_delay)
+			new_delay = dev_priv->ips.min_delay;
+	}
+
+	if (ironlake_set_drps(dev, new_delay))
+		dev_priv->ips.cur_delay = new_delay;
+
+	spin_unlock(&mchdev_lock);
+
+	return;
+}
+
+static void notify_ring(struct intel_engine_cs *ring)
+{
+	if (!intel_ring_initialized(ring))
+		return;
+
+	trace_i915_gem_request_notify(ring);
+
+	wake_up_all(&ring->irq_queue);
+}
+
+static void vlv_c0_read(struct drm_i915_private *dev_priv,
+			struct intel_rps_ei *ei)
+{
+	ei->cz_clock = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
+	ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT);
+	ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
+}
+
+void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
+{
+	memset(&dev_priv->rps.ei, 0, sizeof(dev_priv->rps.ei));
+}
+
+static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
+{
+	const struct intel_rps_ei *prev = &dev_priv->rps.ei;
+	struct intel_rps_ei now;
+	u32 events = 0;
+
+	if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0)
+		return 0;
+
+	vlv_c0_read(dev_priv, &now);
+	if (now.cz_clock == 0)
+		return 0;
+
+	if (prev->cz_clock) {
+		u64 time, c0;
+		unsigned int mul;
+
+		mul = VLV_CZ_CLOCK_TO_MILLI_SEC * 100; /* scale to threshold% */
+		if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
+			mul <<= 8;
+
+		time = now.cz_clock - prev->cz_clock;
+		time *= dev_priv->czclk_freq;
+
+		/* Workload can be split between render + media,
+		 * e.g. SwapBuffers being blitted in X after being rendered in
+		 * mesa. To account for this we need to combine both engines
+		 * into our activity counter.
+		 */
+		c0 = now.render_c0 - prev->render_c0;
+		c0 += now.media_c0 - prev->media_c0;
+		c0 *= mul;
+
+		if (c0 > time * dev_priv->rps.up_threshold)
+			events = GEN6_PM_RP_UP_THRESHOLD;
+		else if (c0 < time * dev_priv->rps.down_threshold)
+			events = GEN6_PM_RP_DOWN_THRESHOLD;
+	}
+
+	dev_priv->rps.ei = now;
+	return events;
+}
+
+static bool any_waiters(struct drm_i915_private *dev_priv)
+{
+	struct intel_engine_cs *ring;
+	int i;
+
+	for_each_ring(ring, dev_priv, i)
+		if (ring->irq_refcount)
+			return true;
+
+	return false;
+}
+
+static void gen6_pm_rps_work(struct work_struct *work)
+{
+	struct drm_i915_private *dev_priv =
+		container_of(work, struct drm_i915_private, rps.work);
+	bool client_boost;
+	int new_delay, adj, min, max;
+	u32 pm_iir;
+
+	spin_lock_irq(&dev_priv->irq_lock);
+	/* Speed up work cancelation during disabling rps interrupts. */
+	if (!dev_priv->rps.interrupts_enabled) {
+		spin_unlock_irq(&dev_priv->irq_lock);
+		return;
+	}
+	pm_iir = dev_priv->rps.pm_iir;
+	dev_priv->rps.pm_iir = 0;
+	/* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
+	gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
+	client_boost = dev_priv->rps.client_boost;
+	dev_priv->rps.client_boost = false;
+	spin_unlock_irq(&dev_priv->irq_lock);
+
+	/* Make sure we didn't queue anything we're not going to process. */
+	WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
+
+	if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
+		return;
+
+	mutex_lock(&dev_priv->rps.hw_lock);
+
+	pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
+
+	adj = dev_priv->rps.last_adj;
+	new_delay = dev_priv->rps.cur_freq;
+	min = dev_priv->rps.min_freq_softlimit;
+	max = dev_priv->rps.max_freq_softlimit;
+
+	if (client_boost) {
+		new_delay = dev_priv->rps.max_freq_softlimit;
+		adj = 0;
+	} else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
+		if (adj > 0)
+			adj *= 2;
+		else /* CHV needs even encode values */
+			adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
+		/*
+		 * For better performance, jump directly
+		 * to RPe if we're below it.
+		 */
+		if (new_delay < dev_priv->rps.efficient_freq - adj) {
+			new_delay = dev_priv->rps.efficient_freq;
+			adj = 0;
+		}
+	} else if (any_waiters(dev_priv)) {
+		adj = 0;
+	} else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
+		if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
+			new_delay = dev_priv->rps.efficient_freq;
+		else
+			new_delay = dev_priv->rps.min_freq_softlimit;
+		adj = 0;
+	} else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
+		if (adj < 0)
+			adj *= 2;
+		else /* CHV needs even encode values */
+			adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
+	} else { /* unknown event */
+		adj = 0;
+	}
+
+	dev_priv->rps.last_adj = adj;
+
+	/* sysfs frequency interfaces may have snuck in while servicing the
+	 * interrupt
+	 */
+	new_delay += adj;
+	new_delay = clamp_t(int, new_delay, min, max);
+
+	intel_set_rps(dev_priv->dev, new_delay);
+
+	mutex_unlock(&dev_priv->rps.hw_lock);
+}
+
+
+/**
+ * ivybridge_parity_work - Workqueue called when a parity error interrupt
+ * occurred.
+ * @work: workqueue struct
+ *
+ * Doesn't actually do anything except notify userspace. As a consequence of
+ * this event, userspace should try to remap the bad rows since statistically
+ * it is likely the same row is more likely to go bad again.
+ */
+static void ivybridge_parity_work(struct work_struct *work)
+{
+	struct drm_i915_private *dev_priv =
+		container_of(work, struct drm_i915_private, l3_parity.error_work);
+	u32 error_status, row, bank, subbank;
+	char *parity_event[6];
+	uint32_t misccpctl;
+	uint8_t slice = 0;
+
+	/* We must turn off DOP level clock gating to access the L3 registers.
+	 * In order to prevent a get/put style interface, acquire struct mutex
+	 * any time we access those registers.
+	 */
+	mutex_lock(&dev_priv->dev->struct_mutex);
+
+	/* If we've screwed up tracking, just let the interrupt fire again */
+	if (WARN_ON(!dev_priv->l3_parity.which_slice))
+		goto out;
+
+	misccpctl = I915_READ(GEN7_MISCCPCTL);
+	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
+	POSTING_READ(GEN7_MISCCPCTL);
+
+	while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
+		u32 reg;
+
+		slice--;
+		if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
+			break;
+
+		dev_priv->l3_parity.which_slice &= ~(1<<slice);
+
+		reg = GEN7_L3CDERRST1 + (slice * 0x200);
+
+		error_status = I915_READ(reg);
+		row = GEN7_PARITY_ERROR_ROW(error_status);
+		bank = GEN7_PARITY_ERROR_BANK(error_status);
+		subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
+
+		I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
+		POSTING_READ(reg);
+
+		parity_event[0] = I915_L3_PARITY_UEVENT "=1";
+		parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
+		parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
+		parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
+		parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
+		parity_event[5] = NULL;
+
+		kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj,
+				   KOBJ_CHANGE, parity_event);
+
+		DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
+			  slice, row, bank, subbank);
+
+		kfree(parity_event[4]);
+		kfree(parity_event[3]);
+		kfree(parity_event[2]);
+		kfree(parity_event[1]);
+	}
+
+	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
+
+out:
+	WARN_ON(dev_priv->l3_parity.which_slice);
+	spin_lock_irq(&dev_priv->irq_lock);
+	gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
+	spin_unlock_irq(&dev_priv->irq_lock);
+
+	mutex_unlock(&dev_priv->dev->struct_mutex);
+}
+
+static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (!HAS_L3_DPF(dev))
+		return;
+
+	spin_lock(&dev_priv->irq_lock);
+	gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
+	spin_unlock(&dev_priv->irq_lock);
+
+	iir &= GT_PARITY_ERROR(dev);
+	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
+		dev_priv->l3_parity.which_slice |= 1 << 1;
+
+	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
+		dev_priv->l3_parity.which_slice |= 1 << 0;
+
+	queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
+}
+
+static void ilk_gt_irq_handler(struct drm_device *dev,
+			       struct drm_i915_private *dev_priv,
+			       u32 gt_iir)
+{
+	if (gt_iir &
+	    (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
+		notify_ring(&dev_priv->ring[RCS]);
+	if (gt_iir & ILK_BSD_USER_INTERRUPT)
+		notify_ring(&dev_priv->ring[VCS]);
+}
+
+static void snb_gt_irq_handler(struct drm_device *dev,
+			       struct drm_i915_private *dev_priv,
+			       u32 gt_iir)
+{
+
+	if (gt_iir &
+	    (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
+		notify_ring(&dev_priv->ring[RCS]);
+	if (gt_iir & GT_BSD_USER_INTERRUPT)
+		notify_ring(&dev_priv->ring[VCS]);
+	if (gt_iir & GT_BLT_USER_INTERRUPT)
+		notify_ring(&dev_priv->ring[BCS]);
+
+	if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
+		      GT_BSD_CS_ERROR_INTERRUPT |
+		      GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
+		DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
+
+	if (gt_iir & GT_PARITY_ERROR(dev))
+		ivybridge_parity_error_irq_handler(dev, gt_iir);
+}
+
+static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
+				       u32 master_ctl)
+{
+	irqreturn_t ret = IRQ_NONE;
+
+	if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
+		u32 tmp = I915_READ_FW(GEN8_GT_IIR(0));
+		if (tmp) {
+			I915_WRITE_FW(GEN8_GT_IIR(0), tmp);
+			ret = IRQ_HANDLED;
+
+			if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT))
+				intel_lrc_irq_handler(&dev_priv->ring[RCS]);
+			if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT))
+				notify_ring(&dev_priv->ring[RCS]);
+
+			if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT))
+				intel_lrc_irq_handler(&dev_priv->ring[BCS]);
+			if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT))
+				notify_ring(&dev_priv->ring[BCS]);
+		} else
+			DRM_ERROR("The master control interrupt lied (GT0)!\n");
+	}
+
+	if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
+		u32 tmp = I915_READ_FW(GEN8_GT_IIR(1));
+		if (tmp) {
+			I915_WRITE_FW(GEN8_GT_IIR(1), tmp);
+			ret = IRQ_HANDLED;
+
+			if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT))
+				intel_lrc_irq_handler(&dev_priv->ring[VCS]);
+			if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT))
+				notify_ring(&dev_priv->ring[VCS]);
+
+			if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT))
+				intel_lrc_irq_handler(&dev_priv->ring[VCS2]);
+			if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT))
+				notify_ring(&dev_priv->ring[VCS2]);
+		} else
+			DRM_ERROR("The master control interrupt lied (GT1)!\n");
+	}
+
+	if (master_ctl & GEN8_GT_VECS_IRQ) {
+		u32 tmp = I915_READ_FW(GEN8_GT_IIR(3));
+		if (tmp) {
+			I915_WRITE_FW(GEN8_GT_IIR(3), tmp);
+			ret = IRQ_HANDLED;
+
+			if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT))
+				intel_lrc_irq_handler(&dev_priv->ring[VECS]);
+			if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT))
+				notify_ring(&dev_priv->ring[VECS]);
+		} else
+			DRM_ERROR("The master control interrupt lied (GT3)!\n");
+	}
+
+	if (master_ctl & GEN8_GT_PM_IRQ) {
+		u32 tmp = I915_READ_FW(GEN8_GT_IIR(2));
+		if (tmp & dev_priv->pm_rps_events) {
+			I915_WRITE_FW(GEN8_GT_IIR(2),
+				      tmp & dev_priv->pm_rps_events);
+			ret = IRQ_HANDLED;
+			gen6_rps_irq_handler(dev_priv, tmp);
+		} else
+			DRM_ERROR("The master control interrupt lied (PM)!\n");
+	}
+
+	return ret;
+}
+
+static bool bxt_port_hotplug_long_detect(enum port port, u32 val)
+{
+	switch (port) {
+	case PORT_A:
+		return val & PORTA_HOTPLUG_LONG_DETECT;
+	case PORT_B:
+		return val & PORTB_HOTPLUG_LONG_DETECT;
+	case PORT_C:
+		return val & PORTC_HOTPLUG_LONG_DETECT;
+	default:
+		return false;
+	}
+}
+
+static bool spt_port_hotplug2_long_detect(enum port port, u32 val)
+{
+	switch (port) {
+	case PORT_E:
+		return val & PORTE_HOTPLUG_LONG_DETECT;
+	default:
+		return false;
+	}
+}
+
+static bool spt_port_hotplug_long_detect(enum port port, u32 val)
+{
+	switch (port) {
+	case PORT_A:
+		return val & PORTA_HOTPLUG_LONG_DETECT;
+	case PORT_B:
+		return val & PORTB_HOTPLUG_LONG_DETECT;
+	case PORT_C:
+		return val & PORTC_HOTPLUG_LONG_DETECT;
+	case PORT_D:
+		return val & PORTD_HOTPLUG_LONG_DETECT;
+	default:
+		return false;
+	}
+}
+
+static bool ilk_port_hotplug_long_detect(enum port port, u32 val)
+{
+	switch (port) {
+	case PORT_A:
+		return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
+	default:
+		return false;
+	}
+}
+
+static bool pch_port_hotplug_long_detect(enum port port, u32 val)
+{
+	switch (port) {
+	case PORT_B:
+		return val & PORTB_HOTPLUG_LONG_DETECT;
+	case PORT_C:
+		return val & PORTC_HOTPLUG_LONG_DETECT;
+	case PORT_D:
+		return val & PORTD_HOTPLUG_LONG_DETECT;
+	default:
+		return false;
+	}
+}
+
+static bool i9xx_port_hotplug_long_detect(enum port port, u32 val)
+{
+	switch (port) {
+	case PORT_B:
+		return val & PORTB_HOTPLUG_INT_LONG_PULSE;
+	case PORT_C:
+		return val & PORTC_HOTPLUG_INT_LONG_PULSE;
+	case PORT_D:
+		return val & PORTD_HOTPLUG_INT_LONG_PULSE;
+	default:
+		return false;
+	}
+}
+
+/*
+ * Get a bit mask of pins that have triggered, and which ones may be long.
+ * This can be called multiple times with the same masks to accumulate
+ * hotplug detection results from several registers.
+ *
+ * Note that the caller is expected to zero out the masks initially.
+ */
+static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask,
+			     u32 hotplug_trigger, u32 dig_hotplug_reg,
+			     const u32 hpd[HPD_NUM_PINS],
+			     bool long_pulse_detect(enum port port, u32 val))
+{
+	enum port port;
+	int i;
+
+	for_each_hpd_pin(i) {
+		if ((hpd[i] & hotplug_trigger) == 0)
+			continue;
+
+		*pin_mask |= BIT(i);
+
+		if (!intel_hpd_pin_to_port(i, &port))
+			continue;
+
+		if (long_pulse_detect(port, dig_hotplug_reg))
+			*long_mask |= BIT(i);
+	}
+
+	DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n",
+			 hotplug_trigger, dig_hotplug_reg, *pin_mask);
+
+}
+
+static void gmbus_irq_handler(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	wake_up_all(&dev_priv->gmbus_wait_queue);
+}
+
+static void dp_aux_irq_handler(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	wake_up_all(&dev_priv->gmbus_wait_queue);
+}
+
+#if defined(CONFIG_DEBUG_FS)
+static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
+					 uint32_t crc0, uint32_t crc1,
+					 uint32_t crc2, uint32_t crc3,
+					 uint32_t crc4)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
+	struct intel_pipe_crc_entry *entry;
+	int head, tail;
+
+	spin_lock(&pipe_crc->lock);
+
+	if (!pipe_crc->entries) {
+		spin_unlock(&pipe_crc->lock);
+		DRM_DEBUG_KMS("spurious interrupt\n");
+		return;
+	}
+
+	head = pipe_crc->head;
+	tail = pipe_crc->tail;
+
+	if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
+		spin_unlock(&pipe_crc->lock);
+		DRM_ERROR("CRC buffer overflowing\n");
+		return;
+	}
+
+	entry = &pipe_crc->entries[head];
+
+	entry->frame = dev->driver->get_vblank_counter(dev, pipe);
+	entry->crc[0] = crc0;
+	entry->crc[1] = crc1;
+	entry->crc[2] = crc2;
+	entry->crc[3] = crc3;
+	entry->crc[4] = crc4;
+
+	head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
+	pipe_crc->head = head;
+
+	spin_unlock(&pipe_crc->lock);
+
+	wake_up_interruptible(&pipe_crc->wq);
+}
+#else
+static inline void
+display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
+			     uint32_t crc0, uint32_t crc1,
+			     uint32_t crc2, uint32_t crc3,
+			     uint32_t crc4) {}
+#endif
+
+
+static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	display_pipe_crc_irq_handler(dev, pipe,
+				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
+				     0, 0, 0, 0);
+}
+
+static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	display_pipe_crc_irq_handler(dev, pipe,
+				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
+				     I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
+				     I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
+				     I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
+				     I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
+}
+
+static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t res1, res2;
+
+	if (INTEL_INFO(dev)->gen >= 3)
+		res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
+	else
+		res1 = 0;
+
+	if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
+		res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
+	else
+		res2 = 0;
+
+	display_pipe_crc_irq_handler(dev, pipe,
+				     I915_READ(PIPE_CRC_RES_RED(pipe)),
+				     I915_READ(PIPE_CRC_RES_GREEN(pipe)),
+				     I915_READ(PIPE_CRC_RES_BLUE(pipe)),
+				     res1, res2);
+}
+
+/* The RPS events need forcewake, so we add them to a work queue and mask their
+ * IMR bits until the work is done. Other interrupts can be processed without
+ * the work queue. */
+static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
+{
+	if (pm_iir & dev_priv->pm_rps_events) {
+		spin_lock(&dev_priv->irq_lock);
+		gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
+		if (dev_priv->rps.interrupts_enabled) {
+			dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
+			queue_work(dev_priv->wq, &dev_priv->rps.work);
+		}
+		spin_unlock(&dev_priv->irq_lock);
+	}
+
+	if (INTEL_INFO(dev_priv)->gen >= 8)
+		return;
+
+	if (HAS_VEBOX(dev_priv->dev)) {
+		if (pm_iir & PM_VEBOX_USER_INTERRUPT)
+			notify_ring(&dev_priv->ring[VECS]);
+
+		if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
+			DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
+	}
+}
+
+static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe)
+{
+	if (!drm_handle_vblank(dev, pipe))
+		return false;
+
+	return true;
+}
+
+static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 pipe_stats[I915_MAX_PIPES] = { };
+	int pipe;
+
+	spin_lock(&dev_priv->irq_lock);
+	for_each_pipe(dev_priv, pipe) {
+		int reg;
+		u32 mask, iir_bit = 0;
+
+		/*
+		 * PIPESTAT bits get signalled even when the interrupt is
+		 * disabled with the mask bits, and some of the status bits do
+		 * not generate interrupts at all (like the underrun bit). Hence
+		 * we need to be careful that we only handle what we want to
+		 * handle.
+		 */
+
+		/* fifo underruns are filterered in the underrun handler. */
+		mask = PIPE_FIFO_UNDERRUN_STATUS;
+
+		switch (pipe) {
+		case PIPE_A:
+			iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
+			break;
+		case PIPE_B:
+			iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
+			break;
+		case PIPE_C:
+			iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
+			break;
+		}
+		if (iir & iir_bit)
+			mask |= dev_priv->pipestat_irq_mask[pipe];
+
+		if (!mask)
+			continue;
+
+		reg = PIPESTAT(pipe);
+		mask |= PIPESTAT_INT_ENABLE_MASK;
+		pipe_stats[pipe] = I915_READ(reg) & mask;
+
+		/*
+		 * Clear the PIPE*STAT regs before the IIR
+		 */
+		if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS |
+					PIPESTAT_INT_STATUS_MASK))
+			I915_WRITE(reg, pipe_stats[pipe]);
+	}
+	spin_unlock(&dev_priv->irq_lock);
+
+	for_each_pipe(dev_priv, pipe) {
+		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
+		    intel_pipe_handle_vblank(dev, pipe))
+			intel_check_page_flip(dev, pipe);
+
+		if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) {
+			intel_prepare_page_flip(dev, pipe);
+			intel_finish_page_flip(dev, pipe);
+		}
+
+		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
+			i9xx_pipe_crc_irq_handler(dev, pipe);
+
+		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
+			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
+	}
+
+	if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
+		gmbus_irq_handler(dev);
+}
+
+static void i9xx_hpd_irq_handler(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
+	u32 pin_mask = 0, long_mask = 0;
+
+	if (!hotplug_status)
+		return;
+
+	I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
+	/*
+	 * Make sure hotplug status is cleared before we clear IIR, or else we
+	 * may miss hotplug events.
+	 */
+	POSTING_READ(PORT_HOTPLUG_STAT);
+
+	if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
+		u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
+
+		if (hotplug_trigger) {
+			intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
+					   hotplug_trigger, hpd_status_g4x,
+					   i9xx_port_hotplug_long_detect);
+
+			intel_hpd_irq_handler(dev, pin_mask, long_mask);
+		}
+
+		if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
+			dp_aux_irq_handler(dev);
+	} else {
+		u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
+
+		if (hotplug_trigger) {
+			intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
+					   hotplug_trigger, hpd_status_i915,
+					   i9xx_port_hotplug_long_detect);
+			intel_hpd_irq_handler(dev, pin_mask, long_mask);
+		}
+	}
+}
+
+static irqreturn_t valleyview_irq_handler(int irq, void *arg)
+{
+	struct drm_device *dev = arg;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 iir, gt_iir, pm_iir;
+	irqreturn_t ret = IRQ_NONE;
+
+	if (!intel_irqs_enabled(dev_priv))
+		return IRQ_NONE;
+
+	while (true) {
+		/* Find, clear, then process each source of interrupt */
+
+		gt_iir = I915_READ(GTIIR);
+		if (gt_iir)
+			I915_WRITE(GTIIR, gt_iir);
+
+		pm_iir = I915_READ(GEN6_PMIIR);
+		if (pm_iir)
+			I915_WRITE(GEN6_PMIIR, pm_iir);
+
+		iir = I915_READ(VLV_IIR);
+		if (iir) {
+			/* Consume port before clearing IIR or we'll miss events */
+			if (iir & I915_DISPLAY_PORT_INTERRUPT)
+				i9xx_hpd_irq_handler(dev);
+			I915_WRITE(VLV_IIR, iir);
+		}
+
+		if (gt_iir == 0 && pm_iir == 0 && iir == 0)
+			goto out;
+
+		ret = IRQ_HANDLED;
+
+		if (gt_iir)
+			snb_gt_irq_handler(dev, dev_priv, gt_iir);
+		if (pm_iir)
+			gen6_rps_irq_handler(dev_priv, pm_iir);
+		/* Call regardless, as some status bits might not be
+		 * signalled in iir */
+		valleyview_pipestat_irq_handler(dev, iir);
+	}
+
+out:
+	return ret;
+}
+
+static irqreturn_t cherryview_irq_handler(int irq, void *arg)
+{
+	struct drm_device *dev = arg;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 master_ctl, iir;
+	irqreturn_t ret = IRQ_NONE;
+
+	if (!intel_irqs_enabled(dev_priv))
+		return IRQ_NONE;
+
+	for (;;) {
+		master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
+		iir = I915_READ(VLV_IIR);
+
+		if (master_ctl == 0 && iir == 0)
+			break;
+
+		ret = IRQ_HANDLED;
+
+		I915_WRITE(GEN8_MASTER_IRQ, 0);
+
+		/* Find, clear, then process each source of interrupt */
+
+		if (iir) {
+			/* Consume port before clearing IIR or we'll miss events */
+			if (iir & I915_DISPLAY_PORT_INTERRUPT)
+				i9xx_hpd_irq_handler(dev);
+			I915_WRITE(VLV_IIR, iir);
+		}
+
+		gen8_gt_irq_handler(dev_priv, master_ctl);
+
+		/* Call regardless, as some status bits might not be
+		 * signalled in iir */
+		valleyview_pipestat_irq_handler(dev, iir);
+
+		I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
+		POSTING_READ(GEN8_MASTER_IRQ);
+	}
+
+	return ret;
+}
+
+static void ibx_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
+				const u32 hpd[HPD_NUM_PINS])
+{
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
+
+	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
+	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
+
+	intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
+			   dig_hotplug_reg, hpd,
+			   pch_port_hotplug_long_detect);
+
+	intel_hpd_irq_handler(dev, pin_mask, long_mask);
+}
+
+static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int pipe;
+	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
+
+	if (hotplug_trigger)
+		ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx);
+
+	if (pch_iir & SDE_AUDIO_POWER_MASK) {
+		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
+			       SDE_AUDIO_POWER_SHIFT);
+		DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
+				 port_name(port));
+	}
+
+	if (pch_iir & SDE_AUX_MASK)
+		dp_aux_irq_handler(dev);
+
+	if (pch_iir & SDE_GMBUS)
+		gmbus_irq_handler(dev);
+
+	if (pch_iir & SDE_AUDIO_HDCP_MASK)
+		DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
+
+	if (pch_iir & SDE_AUDIO_TRANS_MASK)
+		DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
+
+	if (pch_iir & SDE_POISON)
+		DRM_ERROR("PCH poison interrupt\n");
+
+	if (pch_iir & SDE_FDI_MASK)
+		for_each_pipe(dev_priv, pipe)
+			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
+					 pipe_name(pipe),
+					 I915_READ(FDI_RX_IIR(pipe)));
+
+	if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
+		DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
+
+	if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
+		DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
+
+	if (pch_iir & SDE_TRANSA_FIFO_UNDER)
+		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
+
+	if (pch_iir & SDE_TRANSB_FIFO_UNDER)
+		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
+}
+
+static void ivb_err_int_handler(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 err_int = I915_READ(GEN7_ERR_INT);
+	enum pipe pipe;
+
+	if (err_int & ERR_INT_POISON)
+		DRM_ERROR("Poison interrupt\n");
+
+	for_each_pipe(dev_priv, pipe) {
+		if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
+			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
+
+		if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
+			if (IS_IVYBRIDGE(dev))
+				ivb_pipe_crc_irq_handler(dev, pipe);
+			else
+				hsw_pipe_crc_irq_handler(dev, pipe);
+		}
+	}
+
+	I915_WRITE(GEN7_ERR_INT, err_int);
+}
+
+static void cpt_serr_int_handler(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 serr_int = I915_READ(SERR_INT);
+
+	if (serr_int & SERR_INT_POISON)
+		DRM_ERROR("PCH poison interrupt\n");
+
+	if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
+		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
+
+	if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
+		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
+
+	if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
+		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C);
+
+	I915_WRITE(SERR_INT, serr_int);
+}
+
+static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int pipe;
+	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
+
+	if (hotplug_trigger)
+		ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt);
+
+	if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
+		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
+			       SDE_AUDIO_POWER_SHIFT_CPT);
+		DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
+				 port_name(port));
+	}
+
+	if (pch_iir & SDE_AUX_MASK_CPT)
+		dp_aux_irq_handler(dev);
+
+	if (pch_iir & SDE_GMBUS_CPT)
+		gmbus_irq_handler(dev);
+
+	if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
+		DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
+
+	if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
+		DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
+
+	if (pch_iir & SDE_FDI_MASK_CPT)
+		for_each_pipe(dev_priv, pipe)
+			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
+					 pipe_name(pipe),
+					 I915_READ(FDI_RX_IIR(pipe)));
+
+	if (pch_iir & SDE_ERROR_CPT)
+		cpt_serr_int_handler(dev);
+}
+
+static void spt_irq_handler(struct drm_device *dev, u32 pch_iir)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
+		~SDE_PORTE_HOTPLUG_SPT;
+	u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
+	u32 pin_mask = 0, long_mask = 0;
+
+	if (hotplug_trigger) {
+		u32 dig_hotplug_reg;
+
+		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
+		I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
+
+		intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
+				   dig_hotplug_reg, hpd_spt,
+				   spt_port_hotplug_long_detect);
+	}
+
+	if (hotplug2_trigger) {
+		u32 dig_hotplug_reg;
+
+		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
+		I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);
+
+		intel_get_hpd_pins(&pin_mask, &long_mask, hotplug2_trigger,
+				   dig_hotplug_reg, hpd_spt,
+				   spt_port_hotplug2_long_detect);
+	}
+
+	if (pin_mask)
+		intel_hpd_irq_handler(dev, pin_mask, long_mask);
+
+	if (pch_iir & SDE_GMBUS_CPT)
+		gmbus_irq_handler(dev);
+}
+
+static void ilk_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
+				const u32 hpd[HPD_NUM_PINS])
+{
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
+
+	dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
+	I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
+
+	intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
+			   dig_hotplug_reg, hpd,
+			   ilk_port_hotplug_long_detect);
+
+	intel_hpd_irq_handler(dev, pin_mask, long_mask);
+}
+
+static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum pipe pipe;
+	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
+
+	if (hotplug_trigger)
+		ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ilk);
+
+	if (de_iir & DE_AUX_CHANNEL_A)
+		dp_aux_irq_handler(dev);
+
+	if (de_iir & DE_GSE)
+		intel_opregion_asle_intr(dev);
+
+	if (de_iir & DE_POISON)
+		DRM_ERROR("Poison interrupt\n");
+
+	for_each_pipe(dev_priv, pipe) {
+		if (de_iir & DE_PIPE_VBLANK(pipe) &&
+		    intel_pipe_handle_vblank(dev, pipe))
+			intel_check_page_flip(dev, pipe);
+
+		if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
+			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
+
+		if (de_iir & DE_PIPE_CRC_DONE(pipe))
+			i9xx_pipe_crc_irq_handler(dev, pipe);
+
+		/* plane/pipes map 1:1 on ilk+ */
+		if (de_iir & DE_PLANE_FLIP_DONE(pipe)) {
+			intel_prepare_page_flip(dev, pipe);
+			intel_finish_page_flip_plane(dev, pipe);
+		}
+	}
+
+	/* check event from PCH */
+	if (de_iir & DE_PCH_EVENT) {
+		u32 pch_iir = I915_READ(SDEIIR);
+
+		if (HAS_PCH_CPT(dev))
+			cpt_irq_handler(dev, pch_iir);
+		else
+			ibx_irq_handler(dev, pch_iir);
+
+		/* should clear PCH hotplug event before clear CPU irq */
+		I915_WRITE(SDEIIR, pch_iir);
+	}
+
+	if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
+		ironlake_rps_change_irq_handler(dev);
+}
+
+static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum pipe pipe;
+	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
+
+	if (hotplug_trigger)
+		ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ivb);
+
+	if (de_iir & DE_ERR_INT_IVB)
+		ivb_err_int_handler(dev);
+
+	if (de_iir & DE_AUX_CHANNEL_A_IVB)
+		dp_aux_irq_handler(dev);
+
+	if (de_iir & DE_GSE_IVB)
+		intel_opregion_asle_intr(dev);
+
+	for_each_pipe(dev_priv, pipe) {
+		if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) &&
+		    intel_pipe_handle_vblank(dev, pipe))
+			intel_check_page_flip(dev, pipe);
+
+		/* plane/pipes map 1:1 on ilk+ */
+		if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) {
+			intel_prepare_page_flip(dev, pipe);
+			intel_finish_page_flip_plane(dev, pipe);
+		}
+	}
+
+	/* check event from PCH */
+	if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
+		u32 pch_iir = I915_READ(SDEIIR);
+
+		cpt_irq_handler(dev, pch_iir);
+
+		/* clear PCH hotplug event before clear CPU irq */
+		I915_WRITE(SDEIIR, pch_iir);
+	}
+}
+
+/*
+ * To handle irqs with the minimum potential races with fresh interrupts, we:
+ * 1 - Disable Master Interrupt Control.
+ * 2 - Find the source(s) of the interrupt.
+ * 3 - Clear the Interrupt Identity bits (IIR).
+ * 4 - Process the interrupt(s) that had bits set in the IIRs.
+ * 5 - Re-enable Master Interrupt Control.
+ */
+static irqreturn_t ironlake_irq_handler(int irq, void *arg)
+{
+	struct drm_device *dev = arg;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 de_iir, gt_iir, de_ier, sde_ier = 0;
+	irqreturn_t ret = IRQ_NONE;
+
+	if (!intel_irqs_enabled(dev_priv))
+		return IRQ_NONE;
+
+	/* We get interrupts on unclaimed registers, so check for this before we
+	 * do any I915_{READ,WRITE}. */
+	intel_uncore_check_errors(dev);
+
+	/* disable master interrupt before clearing iir  */
+	de_ier = I915_READ(DEIER);
+	I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
+	POSTING_READ(DEIER);
+
+	/* Disable south interrupts. We'll only write to SDEIIR once, so further
+	 * interrupts will will be stored on its back queue, and then we'll be
+	 * able to process them after we restore SDEIER (as soon as we restore
+	 * it, we'll get an interrupt if SDEIIR still has something to process
+	 * due to its back queue). */
+	if (!HAS_PCH_NOP(dev)) {
+		sde_ier = I915_READ(SDEIER);
+		I915_WRITE(SDEIER, 0);
+		POSTING_READ(SDEIER);
+	}
+
+	/* Find, clear, then process each source of interrupt */
+
+	gt_iir = I915_READ(GTIIR);
+	if (gt_iir) {
+		I915_WRITE(GTIIR, gt_iir);
+		ret = IRQ_HANDLED;
+		if (INTEL_INFO(dev)->gen >= 6)
+			snb_gt_irq_handler(dev, dev_priv, gt_iir);
+		else
+			ilk_gt_irq_handler(dev, dev_priv, gt_iir);
+	}
+
+	de_iir = I915_READ(DEIIR);
+	if (de_iir) {
+		I915_WRITE(DEIIR, de_iir);
+		ret = IRQ_HANDLED;
+		if (INTEL_INFO(dev)->gen >= 7)
+			ivb_display_irq_handler(dev, de_iir);
+		else
+			ilk_display_irq_handler(dev, de_iir);
+	}
+
+	if (INTEL_INFO(dev)->gen >= 6) {
+		u32 pm_iir = I915_READ(GEN6_PMIIR);
+		if (pm_iir) {
+			I915_WRITE(GEN6_PMIIR, pm_iir);
+			ret = IRQ_HANDLED;
+			gen6_rps_irq_handler(dev_priv, pm_iir);
+		}
+	}
+
+	I915_WRITE(DEIER, de_ier);
+	POSTING_READ(DEIER);
+	if (!HAS_PCH_NOP(dev)) {
+		I915_WRITE(SDEIER, sde_ier);
+		POSTING_READ(SDEIER);
+	}
+
+	return ret;
+}
+
+static void bxt_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
+				const u32 hpd[HPD_NUM_PINS])
+{
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
+
+	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
+	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
+
+	intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
+			   dig_hotplug_reg, hpd,
+			   bxt_port_hotplug_long_detect);
+
+	intel_hpd_irq_handler(dev, pin_mask, long_mask);
+}
+
+static irqreturn_t gen8_irq_handler(int irq, void *arg)
+{
+	struct drm_device *dev = arg;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 master_ctl;
+	irqreturn_t ret = IRQ_NONE;
+	uint32_t tmp = 0;
+	enum pipe pipe;
+	u32 aux_mask = GEN8_AUX_CHANNEL_A;
+
+	if (!intel_irqs_enabled(dev_priv))
+		return IRQ_NONE;
+
+	if (INTEL_INFO(dev_priv)->gen >= 9)
+		aux_mask |=  GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
+			GEN9_AUX_CHANNEL_D;
+
+	master_ctl = I915_READ_FW(GEN8_MASTER_IRQ);
+	master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
+	if (!master_ctl)
+		return IRQ_NONE;
+
+	I915_WRITE_FW(GEN8_MASTER_IRQ, 0);
+
+	/* Find, clear, then process each source of interrupt */
+
+	ret = gen8_gt_irq_handler(dev_priv, master_ctl);
+
+	if (master_ctl & GEN8_DE_MISC_IRQ) {
+		tmp = I915_READ(GEN8_DE_MISC_IIR);
+		if (tmp) {
+			I915_WRITE(GEN8_DE_MISC_IIR, tmp);
+			ret = IRQ_HANDLED;
+			if (tmp & GEN8_DE_MISC_GSE)
+				intel_opregion_asle_intr(dev);
+			else
+				DRM_ERROR("Unexpected DE Misc interrupt\n");
+		}
+		else
+			DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
+	}
+
+	if (master_ctl & GEN8_DE_PORT_IRQ) {
+		tmp = I915_READ(GEN8_DE_PORT_IIR);
+		if (tmp) {
+			bool found = false;
+			u32 hotplug_trigger = 0;
+
+			if (IS_BROXTON(dev_priv))
+				hotplug_trigger = tmp & BXT_DE_PORT_HOTPLUG_MASK;
+			else if (IS_BROADWELL(dev_priv))
+				hotplug_trigger = tmp & GEN8_PORT_DP_A_HOTPLUG;
+
+			I915_WRITE(GEN8_DE_PORT_IIR, tmp);
+			ret = IRQ_HANDLED;
+
+			if (tmp & aux_mask) {
+				dp_aux_irq_handler(dev);
+				found = true;
+			}
+
+			if (hotplug_trigger) {
+				if (IS_BROXTON(dev))
+					bxt_hpd_irq_handler(dev, hotplug_trigger, hpd_bxt);
+				else
+					ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_bdw);
+				found = true;
+			}
+
+			if (IS_BROXTON(dev) && (tmp & BXT_DE_PORT_GMBUS)) {
+				gmbus_irq_handler(dev);
+				found = true;
+			}
+
+			if (!found)
+				DRM_ERROR("Unexpected DE Port interrupt\n");
+		}
+		else
+			DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
+	}
+
+	for_each_pipe(dev_priv, pipe) {
+		uint32_t pipe_iir, flip_done = 0, fault_errors = 0;
+
+		if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
+			continue;
+
+		pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
+		if (pipe_iir) {
+			ret = IRQ_HANDLED;
+			I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
+
+			if (pipe_iir & GEN8_PIPE_VBLANK &&
+			    intel_pipe_handle_vblank(dev, pipe))
+				intel_check_page_flip(dev, pipe);
+
+			if (INTEL_INFO(dev_priv)->gen >= 9)
+				flip_done = pipe_iir & GEN9_PIPE_PLANE1_FLIP_DONE;
+			else
+				flip_done = pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE;
+
+			if (flip_done) {
+				intel_prepare_page_flip(dev, pipe);
+				intel_finish_page_flip_plane(dev, pipe);
+			}
+
+			if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
+				hsw_pipe_crc_irq_handler(dev, pipe);
+
+			if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN)
+				intel_cpu_fifo_underrun_irq_handler(dev_priv,
+								    pipe);
+
+
+			if (INTEL_INFO(dev_priv)->gen >= 9)
+				fault_errors = pipe_iir & GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
+			else
+				fault_errors = pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
+
+			if (fault_errors)
+				DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
+					  pipe_name(pipe),
+					  pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
+		} else
+			DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
+	}
+
+	if (HAS_PCH_SPLIT(dev) && !HAS_PCH_NOP(dev) &&
+	    master_ctl & GEN8_DE_PCH_IRQ) {
+		/*
+		 * FIXME(BDW): Assume for now that the new interrupt handling
+		 * scheme also closed the SDE interrupt handling race we've seen
+		 * on older pch-split platforms. But this needs testing.
+		 */
+		u32 pch_iir = I915_READ(SDEIIR);
+		if (pch_iir) {
+			I915_WRITE(SDEIIR, pch_iir);
+			ret = IRQ_HANDLED;
+
+			if (HAS_PCH_SPT(dev_priv))
+				spt_irq_handler(dev, pch_iir);
+			else
+				cpt_irq_handler(dev, pch_iir);
+		} else {
+			/*
+			 * Like on previous PCH there seems to be something
+			 * fishy going on with forwarding PCH interrupts.
+			 */
+			DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n");
+		}
+	}
+
+	I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
+	POSTING_READ_FW(GEN8_MASTER_IRQ);
+
+	return ret;
+}
+
+static void i915_error_wake_up(struct drm_i915_private *dev_priv,
+			       bool reset_completed)
+{
+	struct intel_engine_cs *ring;
+	int i;
+
+	/*
+	 * Notify all waiters for GPU completion events that reset state has
+	 * been changed, and that they need to restart their wait after
+	 * checking for potential errors (and bail out to drop locks if there is
+	 * a gpu reset pending so that i915_error_work_func can acquire them).
+	 */
+
+	/* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
+	for_each_ring(ring, dev_priv, i)
+		wake_up_all(&ring->irq_queue);
+
+	/* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
+	wake_up_all(&dev_priv->pending_flip_queue);
+
+	/*
+	 * Signal tasks blocked in i915_gem_wait_for_error that the pending
+	 * reset state is cleared.
+	 */
+	if (reset_completed)
+		wake_up_all(&dev_priv->gpu_error.reset_queue);
+}
+
+/**
+ * i915_reset_and_wakeup - do process context error handling work
+ * @dev: drm device
+ *
+ * Fire an error uevent so userspace can see that a hang or error
+ * was detected.
+ */
+static void i915_reset_and_wakeup(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct i915_gpu_error *error = &dev_priv->gpu_error;
+	char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
+	char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
+	char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
+	int ret;
+
+	kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event);
+
+	/*
+	 * Note that there's only one work item which does gpu resets, so we
+	 * need not worry about concurrent gpu resets potentially incrementing
+	 * error->reset_counter twice. We only need to take care of another
+	 * racing irq/hangcheck declaring the gpu dead for a second time. A
+	 * quick check for that is good enough: schedule_work ensures the
+	 * correct ordering between hang detection and this work item, and since
+	 * the reset in-progress bit is only ever set by code outside of this
+	 * work we don't need to worry about any other races.
+	 */
+	if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
+		DRM_DEBUG_DRIVER("resetting chip\n");
+		kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
+				   reset_event);
+
+		/*
+		 * In most cases it's guaranteed that we get here with an RPM
+		 * reference held, for example because there is a pending GPU
+		 * request that won't finish until the reset is done. This
+		 * isn't the case at least when we get here by doing a
+		 * simulated reset via debugs, so get an RPM reference.
+		 */
+		intel_runtime_pm_get(dev_priv);
+
+		intel_prepare_reset(dev);
+
+		/*
+		 * All state reset _must_ be completed before we update the
+		 * reset counter, for otherwise waiters might miss the reset
+		 * pending state and not properly drop locks, resulting in
+		 * deadlocks with the reset work.
+		 */
+		ret = i915_reset(dev);
+
+		intel_finish_reset(dev);
+
+		intel_runtime_pm_put(dev_priv);
+
+		if (ret == 0) {
+			/*
+			 * After all the gem state is reset, increment the reset
+			 * counter and wake up everyone waiting for the reset to
+			 * complete.
+			 *
+			 * Since unlock operations are a one-sided barrier only,
+			 * we need to insert a barrier here to order any seqno
+			 * updates before
+			 * the counter increment.
+			 */
+			smp_mb__before_atomic();
+			atomic_inc(&dev_priv->gpu_error.reset_counter);
+
+			kobject_uevent_env(&dev->primary->kdev->kobj,
+					   KOBJ_CHANGE, reset_done_event);
+		} else {
+			atomic_or(I915_WEDGED, &error->reset_counter);
+		}
+
+		/*
+		 * Note: The wake_up also serves as a memory barrier so that
+		 * waiters see the update value of the reset counter atomic_t.
+		 */
+		i915_error_wake_up(dev_priv, true);
+	}
+}
+
+static void i915_report_and_clear_eir(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t instdone[I915_NUM_INSTDONE_REG];
+	u32 eir = I915_READ(EIR);
+	int pipe, i;
+
+	if (!eir)
+		return;
+
+	pr_err("render error detected, EIR: 0x%08x\n", eir);
+
+	i915_get_extra_instdone(dev, instdone);
+
+	if (IS_G4X(dev)) {
+		if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
+			u32 ipeir = I915_READ(IPEIR_I965);
+
+			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
+			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
+			for (i = 0; i < ARRAY_SIZE(instdone); i++)
+				pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
+			pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
+			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
+			I915_WRITE(IPEIR_I965, ipeir);
+			POSTING_READ(IPEIR_I965);
+		}
+		if (eir & GM45_ERROR_PAGE_TABLE) {
+			u32 pgtbl_err = I915_READ(PGTBL_ER);
+			pr_err("page table error\n");
+			pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
+			I915_WRITE(PGTBL_ER, pgtbl_err);
+			POSTING_READ(PGTBL_ER);
+		}
+	}
+
+	if (!IS_GEN2(dev)) {
+		if (eir & I915_ERROR_PAGE_TABLE) {
+			u32 pgtbl_err = I915_READ(PGTBL_ER);
+			pr_err("page table error\n");
+			pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
+			I915_WRITE(PGTBL_ER, pgtbl_err);
+			POSTING_READ(PGTBL_ER);
+		}
+	}
+
+	if (eir & I915_ERROR_MEMORY_REFRESH) {
+		pr_err("memory refresh error:\n");
+		for_each_pipe(dev_priv, pipe)
+			pr_err("pipe %c stat: 0x%08x\n",
+			       pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
+		/* pipestat has already been acked */
+	}
+	if (eir & I915_ERROR_INSTRUCTION) {
+		pr_err("instruction error\n");
+		pr_err("  INSTPM: 0x%08x\n", I915_READ(INSTPM));
+		for (i = 0; i < ARRAY_SIZE(instdone); i++)
+			pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
+		if (INTEL_INFO(dev)->gen < 4) {
+			u32 ipeir = I915_READ(IPEIR);
+
+			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR));
+			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR));
+			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD));
+			I915_WRITE(IPEIR, ipeir);
+			POSTING_READ(IPEIR);
+		} else {
+			u32 ipeir = I915_READ(IPEIR_I965);
+
+			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
+			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
+			pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
+			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
+			I915_WRITE(IPEIR_I965, ipeir);
+			POSTING_READ(IPEIR_I965);
+		}
+	}
+
+	I915_WRITE(EIR, eir);
+	POSTING_READ(EIR);
+	eir = I915_READ(EIR);
+	if (eir) {
+		/*
+		 * some errors might have become stuck,
+		 * mask them.
+		 */
+		DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
+		I915_WRITE(EMR, I915_READ(EMR) | eir);
+		I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
+	}
+}
+
+/**
+ * i915_handle_error - handle a gpu error
+ * @dev: drm device
+ *
+ * Do some basic checking of register state at error time and
+ * dump it to the syslog.  Also call i915_capture_error_state() to make
+ * sure we get a record and make it available in debugfs.  Fire a uevent
+ * so userspace knows something bad happened (should trigger collection
+ * of a ring dump etc.).
+ */
+void i915_handle_error(struct drm_device *dev, bool wedged,
+		       const char *fmt, ...)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	va_list args;
+	char error_msg[80];
+
+	va_start(args, fmt);
+	vscnprintf(error_msg, sizeof(error_msg), fmt, args);
+	va_end(args);
+
+	i915_capture_error_state(dev, wedged, error_msg);
+	i915_report_and_clear_eir(dev);
+
+	if (wedged) {
+		atomic_or(I915_RESET_IN_PROGRESS_FLAG,
+				&dev_priv->gpu_error.reset_counter);
+
+		/*
+		 * Wakeup waiting processes so that the reset function
+		 * i915_reset_and_wakeup doesn't deadlock trying to grab
+		 * various locks. By bumping the reset counter first, the woken
+		 * processes will see a reset in progress and back off,
+		 * releasing their locks and then wait for the reset completion.
+		 * We must do this for _all_ gpu waiters that might hold locks
+		 * that the reset work needs to acquire.
+		 *
+		 * Note: The wake_up serves as the required memory barrier to
+		 * ensure that the waiters see the updated value of the reset
+		 * counter atomic_t.
+		 */
+		i915_error_wake_up(dev_priv, false);
+	}
+
+	i915_reset_and_wakeup(dev);
+}
+
+/* Called from drm generic code, passed 'crtc' which
+ * we use as a pipe index
+ */
+static int i915_enable_vblank(struct drm_device *dev, unsigned int pipe)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	unsigned long irqflags;
+
+	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+	if (INTEL_INFO(dev)->gen >= 4)
+		i915_enable_pipestat(dev_priv, pipe,
+				     PIPE_START_VBLANK_INTERRUPT_STATUS);
+	else
+		i915_enable_pipestat(dev_priv, pipe,
+				     PIPE_VBLANK_INTERRUPT_STATUS);
+	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+
+	return 0;
+}
+
+static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	unsigned long irqflags;
+	uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
+						     DE_PIPE_VBLANK(pipe);
+
+	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+	ironlake_enable_display_irq(dev_priv, bit);
+	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+
+	return 0;
+}
+
+static int valleyview_enable_vblank(struct drm_device *dev, unsigned int pipe)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	unsigned long irqflags;
+
+	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+	i915_enable_pipestat(dev_priv, pipe,
+			     PIPE_START_VBLANK_INTERRUPT_STATUS);
+	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+
+	return 0;
+}
+
+static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	unsigned long irqflags;
+
+	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+	dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK;
+	I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
+	POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
+	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+	return 0;
+}
+
+/* Called from drm generic code, passed 'crtc' which
+ * we use as a pipe index
+ */
+static void i915_disable_vblank(struct drm_device *dev, unsigned int pipe)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	unsigned long irqflags;
+
+	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+	i915_disable_pipestat(dev_priv, pipe,
+			      PIPE_VBLANK_INTERRUPT_STATUS |
+			      PIPE_START_VBLANK_INTERRUPT_STATUS);
+	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+}
+
+static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	unsigned long irqflags;
+	uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
+						     DE_PIPE_VBLANK(pipe);
+
+	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+	ironlake_disable_display_irq(dev_priv, bit);
+	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+}
+
+static void valleyview_disable_vblank(struct drm_device *dev, unsigned int pipe)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	unsigned long irqflags;
+
+	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+	i915_disable_pipestat(dev_priv, pipe,
+			      PIPE_START_VBLANK_INTERRUPT_STATUS);
+	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+}
+
+static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	unsigned long irqflags;
+
+	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+	dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK;
+	I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
+	POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
+	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+}
+
+static bool
+ring_idle(struct intel_engine_cs *ring, u32 seqno)
+{
+	return (list_empty(&ring->request_list) ||
+		i915_seqno_passed(seqno, ring->last_submitted_seqno));
+}
+
+static bool
+ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
+{
+	if (INTEL_INFO(dev)->gen >= 8) {
+		return (ipehr >> 23) == 0x1c;
+	} else {
+		ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
+		return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
+				 MI_SEMAPHORE_REGISTER);
+	}
+}
+
+static struct intel_engine_cs *
+semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset)
+{
+	struct drm_i915_private *dev_priv = ring->dev->dev_private;
+	struct intel_engine_cs *signaller;
+	int i;
+
+	if (INTEL_INFO(dev_priv->dev)->gen >= 8) {
+		for_each_ring(signaller, dev_priv, i) {
+			if (ring == signaller)
+				continue;
+
+			if (offset == signaller->semaphore.signal_ggtt[ring->id])
+				return signaller;
+		}
+	} else {
+		u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
+
+		for_each_ring(signaller, dev_priv, i) {
+			if(ring == signaller)
+				continue;
+
+			if (sync_bits == signaller->semaphore.mbox.wait[ring->id])
+				return signaller;
+		}
+	}
+
+	DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n",
+		  ring->id, ipehr, offset);
+
+	return NULL;
+}
+
+static struct intel_engine_cs *
+semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
+{
+	struct drm_i915_private *dev_priv = ring->dev->dev_private;
+	u32 cmd, ipehr, head;
+	u64 offset = 0;
+	int i, backwards;
+
+	/*
+	 * This function does not support execlist mode - any attempt to
+	 * proceed further into this function will result in a kernel panic
+	 * when dereferencing ring->buffer, which is not set up in execlist
+	 * mode.
+	 *
+	 * The correct way of doing it would be to derive the currently
+	 * executing ring buffer from the current context, which is derived
+	 * from the currently running request. Unfortunately, to get the
+	 * current request we would have to grab the struct_mutex before doing
+	 * anything else, which would be ill-advised since some other thread
+	 * might have grabbed it already and managed to hang itself, causing
+	 * the hang checker to deadlock.
+	 *
+	 * Therefore, this function does not support execlist mode in its
+	 * current form. Just return NULL and move on.
+	 */
+	if (ring->buffer == NULL)
+		return NULL;
+
+	ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
+	if (!ipehr_is_semaphore_wait(ring->dev, ipehr))
+		return NULL;
+
+	/*
+	 * HEAD is likely pointing to the dword after the actual command,
+	 * so scan backwards until we find the MBOX. But limit it to just 3
+	 * or 4 dwords depending on the semaphore wait command size.
+	 * Note that we don't care about ACTHD here since that might
+	 * point at at batch, and semaphores are always emitted into the
+	 * ringbuffer itself.
+	 */
+	head = I915_READ_HEAD(ring) & HEAD_ADDR;
+	backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4;
+
+	for (i = backwards; i; --i) {
+		/*
+		 * Be paranoid and presume the hw has gone off into the wild -
+		 * our ring is smaller than what the hardware (and hence
+		 * HEAD_ADDR) allows. Also handles wrap-around.
+		 */
+		head &= ring->buffer->size - 1;
+
+		/* This here seems to blow up */
+		cmd = ioread32(ring->buffer->virtual_start + head);
+		if (cmd == ipehr)
+			break;
+
+		head -= 4;
+	}
+
+	if (!i)
+		return NULL;
+
+	*seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1;
+	if (INTEL_INFO(ring->dev)->gen >= 8) {
+		offset = ioread32(ring->buffer->virtual_start + head + 12);
+		offset <<= 32;
+		offset = ioread32(ring->buffer->virtual_start + head + 8);
+	}
+	return semaphore_wait_to_signaller_ring(ring, ipehr, offset);
+}
+
+static int semaphore_passed(struct intel_engine_cs *ring)
+{
+	struct drm_i915_private *dev_priv = ring->dev->dev_private;
+	struct intel_engine_cs *signaller;
+	u32 seqno;
+
+	ring->hangcheck.deadlock++;
+
+	signaller = semaphore_waits_for(ring, &seqno);
+	if (signaller == NULL)
+		return -1;
+
+	/* Prevent pathological recursion due to driver bugs */
+	if (signaller->hangcheck.deadlock >= I915_NUM_RINGS)
+		return -1;
+
+	if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno))
+		return 1;
+
+	/* cursory check for an unkickable deadlock */
+	if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
+	    semaphore_passed(signaller) < 0)
+		return -1;
+
+	return 0;
+}
+
+static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
+{
+	struct intel_engine_cs *ring;
+	int i;
+
+	for_each_ring(ring, dev_priv, i)
+		ring->hangcheck.deadlock = 0;
+}
+
+static enum intel_ring_hangcheck_action
+ring_stuck(struct intel_engine_cs *ring, u64 acthd)
+{
+	struct drm_device *dev = ring->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 tmp;
+
+	if (acthd != ring->hangcheck.acthd) {
+		if (acthd > ring->hangcheck.max_acthd) {
+			ring->hangcheck.max_acthd = acthd;
+			return HANGCHECK_ACTIVE;
+		}
+
+		return HANGCHECK_ACTIVE_LOOP;
+	}
+
+	if (IS_GEN2(dev))
+		return HANGCHECK_HUNG;
+
+	/* Is the chip hanging on a WAIT_FOR_EVENT?
+	 * If so we can simply poke the RB_WAIT bit
+	 * and break the hang. This should work on
+	 * all but the second generation chipsets.
+	 */
+	tmp = I915_READ_CTL(ring);
+	if (tmp & RING_WAIT) {
+		i915_handle_error(dev, false,
+				  "Kicking stuck wait on %s",
+				  ring->name);
+		I915_WRITE_CTL(ring, tmp);
+		return HANGCHECK_KICK;
+	}
+
+	if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
+		switch (semaphore_passed(ring)) {
+		default:
+			return HANGCHECK_HUNG;
+		case 1:
+			i915_handle_error(dev, false,
+					  "Kicking stuck semaphore on %s",
+					  ring->name);
+			I915_WRITE_CTL(ring, tmp);
+			return HANGCHECK_KICK;
+		case 0:
+			return HANGCHECK_WAIT;
+		}
+	}
+
+	return HANGCHECK_HUNG;
+}
+
+/*
+ * This is called when the chip hasn't reported back with completed
+ * batchbuffers in a long time. We keep track per ring seqno progress and
+ * if there are no progress, hangcheck score for that ring is increased.
+ * Further, acthd is inspected to see if the ring is stuck. On stuck case
+ * we kick the ring. If we see no progress on three subsequent calls
+ * we assume chip is wedged and try to fix it by resetting the chip.
+ */
+static void i915_hangcheck_elapsed(struct work_struct *work)
+{
+	struct drm_i915_private *dev_priv =
+		container_of(work, typeof(*dev_priv),
+			     gpu_error.hangcheck_work.work);
+	struct drm_device *dev = dev_priv->dev;
+	struct intel_engine_cs *ring;
+	int i;
+	int busy_count = 0, rings_hung = 0;
+	bool stuck[I915_NUM_RINGS] = { 0 };
+#define BUSY 1
+#define KICK 5
+#define HUNG 20
+
+	if (!i915.enable_hangcheck)
+		return;
+
+	for_each_ring(ring, dev_priv, i) {
+		u64 acthd;
+		u32 seqno;
+		bool busy = true;
+
+		semaphore_clear_deadlocks(dev_priv);
+
+		seqno = ring->get_seqno(ring, false);
+		acthd = intel_ring_get_active_head(ring);
+
+		if (ring->hangcheck.seqno == seqno) {
+			if (ring_idle(ring, seqno)) {
+				ring->hangcheck.action = HANGCHECK_IDLE;
+
+				if (waitqueue_active(&ring->irq_queue)) {
+					/* Issue a wake-up to catch stuck h/w. */
+					if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
+						if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)))
+							DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
+								  ring->name);
+						else
+							DRM_INFO("Fake missed irq on %s\n",
+								 ring->name);
+						wake_up_all(&ring->irq_queue);
+					}
+					/* Safeguard against driver failure */
+					ring->hangcheck.score += BUSY;
+				} else
+					busy = false;
+			} else {
+				/* We always increment the hangcheck score
+				 * if the ring is busy and still processing
+				 * the same request, so that no single request
+				 * can run indefinitely (such as a chain of
+				 * batches). The only time we do not increment
+				 * the hangcheck score on this ring, if this
+				 * ring is in a legitimate wait for another
+				 * ring. In that case the waiting ring is a
+				 * victim and we want to be sure we catch the
+				 * right culprit. Then every time we do kick
+				 * the ring, add a small increment to the
+				 * score so that we can catch a batch that is
+				 * being repeatedly kicked and so responsible
+				 * for stalling the machine.
+				 */
+				ring->hangcheck.action = ring_stuck(ring,
+								    acthd);
+
+				switch (ring->hangcheck.action) {
+				case HANGCHECK_IDLE:
+				case HANGCHECK_WAIT:
+				case HANGCHECK_ACTIVE:
+					break;
+				case HANGCHECK_ACTIVE_LOOP:
+					ring->hangcheck.score += BUSY;
+					break;
+				case HANGCHECK_KICK:
+					ring->hangcheck.score += KICK;
+					break;
+				case HANGCHECK_HUNG:
+					ring->hangcheck.score += HUNG;
+					stuck[i] = true;
+					break;
+				}
+			}
+		} else {
+			ring->hangcheck.action = HANGCHECK_ACTIVE;
+
+			/* Gradually reduce the count so that we catch DoS
+			 * attempts across multiple batches.
+			 */
+			if (ring->hangcheck.score > 0)
+				ring->hangcheck.score--;
+
+			ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0;
+		}
+
+		ring->hangcheck.seqno = seqno;
+		ring->hangcheck.acthd = acthd;
+		busy_count += busy;
+	}
+
+	for_each_ring(ring, dev_priv, i) {
+		if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
+			DRM_INFO("%s on %s\n",
+				 stuck[i] ? "stuck" : "no progress",
+				 ring->name);
+			rings_hung++;
+		}
+	}
+
+	if (rings_hung)
+		return i915_handle_error(dev, true, "Ring hung");
+
+	if (busy_count)
+		/* Reset timer case chip hangs without another request
+		 * being added */
+		i915_queue_hangcheck(dev);
+}
+
+void i915_queue_hangcheck(struct drm_device *dev)
+{
+	struct i915_gpu_error *e = &to_i915(dev)->gpu_error;
+
+	if (!i915.enable_hangcheck)
+		return;
+
+	/* Don't continually defer the hangcheck so that it is always run at
+	 * least once after work has been scheduled on any ring. Otherwise,
+	 * we will ignore a hung ring if a second ring is kept busy.
+	 */
+
+	queue_delayed_work(e->hangcheck_wq, &e->hangcheck_work,
+			   round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES));
+}
+
+static void ibx_irq_reset(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (HAS_PCH_NOP(dev))
+		return;
+
+	GEN5_IRQ_RESET(SDE);
+
+	if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
+		I915_WRITE(SERR_INT, 0xffffffff);
+}
+
+/*
+ * SDEIER is also touched by the interrupt handler to work around missed PCH
+ * interrupts. Hence we can't update it after the interrupt handler is enabled -
+ * instead we unconditionally enable all PCH interrupt sources here, but then
+ * only unmask them as needed with SDEIMR.
+ *
+ * This function needs to be called before interrupts are enabled.
+ */
+static void ibx_irq_pre_postinstall(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (HAS_PCH_NOP(dev))
+		return;
+
+	WARN_ON(I915_READ(SDEIER) != 0);
+	I915_WRITE(SDEIER, 0xffffffff);
+	POSTING_READ(SDEIER);
+}
+
+static void gen5_gt_irq_reset(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	GEN5_IRQ_RESET(GT);
+	if (INTEL_INFO(dev)->gen >= 6)
+		GEN5_IRQ_RESET(GEN6_PM);
+}
+
+/* drm_dma.h hooks
+*/
+static void ironlake_irq_reset(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	I915_WRITE(HWSTAM, 0xffffffff);
+
+	GEN5_IRQ_RESET(DE);
+	if (IS_GEN7(dev))
+		I915_WRITE(GEN7_ERR_INT, 0xffffffff);
+
+	gen5_gt_irq_reset(dev);
+
+	ibx_irq_reset(dev);
+}
+
+static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
+{
+	enum pipe pipe;
+
+	i915_hotplug_interrupt_update(dev_priv, 0xFFFFFFFF, 0);
+	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
+
+	for_each_pipe(dev_priv, pipe)
+		I915_WRITE(PIPESTAT(pipe), 0xffff);
+
+	GEN5_IRQ_RESET(VLV_);
+}
+
+static void valleyview_irq_preinstall(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	/* VLV magic */
+	I915_WRITE(VLV_IMR, 0);
+	I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
+	I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
+	I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
+
+	gen5_gt_irq_reset(dev);
+
+	I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
+
+	vlv_display_irq_reset(dev_priv);
+}
+
+static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
+{
+	GEN8_IRQ_RESET_NDX(GT, 0);
+	GEN8_IRQ_RESET_NDX(GT, 1);
+	GEN8_IRQ_RESET_NDX(GT, 2);
+	GEN8_IRQ_RESET_NDX(GT, 3);
+}
+
+static void gen8_irq_reset(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int pipe;
+
+	I915_WRITE(GEN8_MASTER_IRQ, 0);
+	POSTING_READ(GEN8_MASTER_IRQ);
+
+	gen8_gt_irq_reset(dev_priv);
+
+	for_each_pipe(dev_priv, pipe)
+		if (intel_display_power_is_enabled(dev_priv,
+						   POWER_DOMAIN_PIPE(pipe)))
+			GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
+
+	GEN5_IRQ_RESET(GEN8_DE_PORT_);
+	GEN5_IRQ_RESET(GEN8_DE_MISC_);
+	GEN5_IRQ_RESET(GEN8_PCU_);
+
+	if (HAS_PCH_SPLIT(dev))
+		ibx_irq_reset(dev);
+}
+
+void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
+				     unsigned int pipe_mask)
+{
+	uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
+
+	spin_lock_irq(&dev_priv->irq_lock);
+	if (pipe_mask & 1 << PIPE_A)
+		GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_A,
+				  dev_priv->de_irq_mask[PIPE_A],
+				  ~dev_priv->de_irq_mask[PIPE_A] | extra_ier);
+	if (pipe_mask & 1 << PIPE_B)
+		GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B,
+				  dev_priv->de_irq_mask[PIPE_B],
+				  ~dev_priv->de_irq_mask[PIPE_B] | extra_ier);
+	if (pipe_mask & 1 << PIPE_C)
+		GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C,
+				  dev_priv->de_irq_mask[PIPE_C],
+				  ~dev_priv->de_irq_mask[PIPE_C] | extra_ier);
+	spin_unlock_irq(&dev_priv->irq_lock);
+}
+
+static void cherryview_irq_preinstall(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	I915_WRITE(GEN8_MASTER_IRQ, 0);
+	POSTING_READ(GEN8_MASTER_IRQ);
+
+	gen8_gt_irq_reset(dev_priv);
+
+	GEN5_IRQ_RESET(GEN8_PCU_);
+
+	I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
+
+	vlv_display_irq_reset(dev_priv);
+}
+
+static u32 intel_hpd_enabled_irqs(struct drm_device *dev,
+				  const u32 hpd[HPD_NUM_PINS])
+{
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct intel_encoder *encoder;
+	u32 enabled_irqs = 0;
+
+	for_each_intel_encoder(dev, encoder)
+		if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
+			enabled_irqs |= hpd[encoder->hpd_pin];
+
+	return enabled_irqs;
+}
+
+static void ibx_hpd_irq_setup(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 hotplug_irqs, hotplug, enabled_irqs;
+
+	if (HAS_PCH_IBX(dev)) {
+		hotplug_irqs = SDE_HOTPLUG_MASK;
+		enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ibx);
+	} else {
+		hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
+		enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_cpt);
+	}
+
+	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
+
+	/*
+	 * Enable digital hotplug on the PCH, and configure the DP short pulse
+	 * duration to 2ms (which is the minimum in the Display Port spec).
+	 * The pulse duration bits are reserved on LPT+.
+	 */
+	hotplug = I915_READ(PCH_PORT_HOTPLUG);
+	hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
+	hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
+	hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
+	hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
+	/*
+	 * When CPU and PCH are on the same package, port A
+	 * HPD must be enabled in both north and south.
+	 */
+	if (HAS_PCH_LPT_LP(dev))
+		hotplug |= PORTA_HOTPLUG_ENABLE;
+	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
+}
+
+static void spt_hpd_irq_setup(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 hotplug_irqs, hotplug, enabled_irqs;
+
+	hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
+	enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_spt);
+
+	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
+
+	/* Enable digital hotplug on the PCH */
+	hotplug = I915_READ(PCH_PORT_HOTPLUG);
+	hotplug |= PORTD_HOTPLUG_ENABLE | PORTC_HOTPLUG_ENABLE |
+		PORTB_HOTPLUG_ENABLE | PORTA_HOTPLUG_ENABLE;
+	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
+
+	hotplug = I915_READ(PCH_PORT_HOTPLUG2);
+	hotplug |= PORTE_HOTPLUG_ENABLE;
+	I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
+}
+
+static void ilk_hpd_irq_setup(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 hotplug_irqs, hotplug, enabled_irqs;
+
+	if (INTEL_INFO(dev)->gen >= 8) {
+		hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG;
+		enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_bdw);
+
+		bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
+	} else if (INTEL_INFO(dev)->gen >= 7) {
+		hotplug_irqs = DE_DP_A_HOTPLUG_IVB;
+		enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ivb);
+
+		ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
+	} else {
+		hotplug_irqs = DE_DP_A_HOTPLUG;
+		enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ilk);
+
+		ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
+	}
+
+	/*
+	 * Enable digital hotplug on the CPU, and configure the DP short pulse
+	 * duration to 2ms (which is the minimum in the Display Port spec)
+	 * The pulse duration bits are reserved on HSW+.
+	 */
+	hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
+	hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK;
+	hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | DIGITAL_PORTA_PULSE_DURATION_2ms;
+	I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
+
+	ibx_hpd_irq_setup(dev);
+}
+
+static void bxt_hpd_irq_setup(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 hotplug_irqs, hotplug, enabled_irqs;
+
+	enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_bxt);
+	hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;
+
+	bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
+
+	hotplug = I915_READ(PCH_PORT_HOTPLUG);
+	hotplug |= PORTC_HOTPLUG_ENABLE | PORTB_HOTPLUG_ENABLE |
+		PORTA_HOTPLUG_ENABLE;
+	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
+}
+
+static void ibx_irq_postinstall(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 mask;
+
+	if (HAS_PCH_NOP(dev))
+		return;
+
+	if (HAS_PCH_IBX(dev))
+		mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
+	else
+		mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
+
+	gen5_assert_iir_is_zero(dev_priv, SDEIIR);
+	I915_WRITE(SDEIMR, ~mask);
+}
+
+static void gen5_gt_irq_postinstall(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 pm_irqs, gt_irqs;
+
+	pm_irqs = gt_irqs = 0;
+
+	dev_priv->gt_irq_mask = ~0;
+	if (HAS_L3_DPF(dev)) {
+		/* L3 parity interrupt is always unmasked. */
+		dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
+		gt_irqs |= GT_PARITY_ERROR(dev);
+	}
+
+	gt_irqs |= GT_RENDER_USER_INTERRUPT;
+	if (IS_GEN5(dev)) {
+		gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
+			   ILK_BSD_USER_INTERRUPT;
+	} else {
+		gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
+	}
+
+	GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
+
+	if (INTEL_INFO(dev)->gen >= 6) {
+		/*
+		 * RPS interrupts will get enabled/disabled on demand when RPS
+		 * itself is enabled/disabled.
+		 */
+		if (HAS_VEBOX(dev))
+			pm_irqs |= PM_VEBOX_USER_INTERRUPT;
+
+		dev_priv->pm_irq_mask = 0xffffffff;
+		GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs);
+	}
+}
+
+static int ironlake_irq_postinstall(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 display_mask, extra_mask;
+
+	if (INTEL_INFO(dev)->gen >= 7) {
+		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
+				DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
+				DE_PLANEB_FLIP_DONE_IVB |
+				DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
+		extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
+			      DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
+			      DE_DP_A_HOTPLUG_IVB);
+	} else {
+		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
+				DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
+				DE_AUX_CHANNEL_A |
+				DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
+				DE_POISON);
+		extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
+			      DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
+			      DE_DP_A_HOTPLUG);
+	}
+
+	dev_priv->irq_mask = ~display_mask;
+
+	I915_WRITE(HWSTAM, 0xeffe);
+
+	ibx_irq_pre_postinstall(dev);
+
+	GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
+
+	gen5_gt_irq_postinstall(dev);
+
+	ibx_irq_postinstall(dev);
+
+	if (IS_IRONLAKE_M(dev)) {
+		/* Enable PCU event interrupts
+		 *
+		 * spinlocking not required here for correctness since interrupt
+		 * setup is guaranteed to run in single-threaded context. But we
+		 * need it to make the assert_spin_locked happy. */
+		spin_lock_irq(&dev_priv->irq_lock);
+		ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
+		spin_unlock_irq(&dev_priv->irq_lock);
+	}
+
+	return 0;
+}
+
+static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv)
+{
+	u32 pipestat_mask;
+	u32 iir_mask;
+	enum pipe pipe;
+
+	pipestat_mask = PIPESTAT_INT_STATUS_MASK |
+			PIPE_FIFO_UNDERRUN_STATUS;
+
+	for_each_pipe(dev_priv, pipe)
+		I915_WRITE(PIPESTAT(pipe), pipestat_mask);
+	POSTING_READ(PIPESTAT(PIPE_A));
+
+	pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
+			PIPE_CRC_DONE_INTERRUPT_STATUS;
+
+	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
+	for_each_pipe(dev_priv, pipe)
+		      i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
+
+	iir_mask = I915_DISPLAY_PORT_INTERRUPT |
+		   I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+		   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
+	if (IS_CHERRYVIEW(dev_priv))
+		iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
+	dev_priv->irq_mask &= ~iir_mask;
+
+	I915_WRITE(VLV_IIR, iir_mask);
+	I915_WRITE(VLV_IIR, iir_mask);
+	I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
+	I915_WRITE(VLV_IMR, dev_priv->irq_mask);
+	POSTING_READ(VLV_IMR);
+}
+
+static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv)
+{
+	u32 pipestat_mask;
+	u32 iir_mask;
+	enum pipe pipe;
+
+	iir_mask = I915_DISPLAY_PORT_INTERRUPT |
+		   I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+		   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
+	if (IS_CHERRYVIEW(dev_priv))
+		iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
+
+	dev_priv->irq_mask |= iir_mask;
+	I915_WRITE(VLV_IMR, dev_priv->irq_mask);
+	I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
+	I915_WRITE(VLV_IIR, iir_mask);
+	I915_WRITE(VLV_IIR, iir_mask);
+	POSTING_READ(VLV_IIR);
+
+	pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
+			PIPE_CRC_DONE_INTERRUPT_STATUS;
+
+	i915_disable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
+	for_each_pipe(dev_priv, pipe)
+		i915_disable_pipestat(dev_priv, pipe, pipestat_mask);
+
+	pipestat_mask = PIPESTAT_INT_STATUS_MASK |
+			PIPE_FIFO_UNDERRUN_STATUS;
+
+	for_each_pipe(dev_priv, pipe)
+		I915_WRITE(PIPESTAT(pipe), pipestat_mask);
+	POSTING_READ(PIPESTAT(PIPE_A));
+}
+
+void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
+{
+	assert_spin_locked(&dev_priv->irq_lock);
+
+	if (dev_priv->display_irqs_enabled)
+		return;
+
+	dev_priv->display_irqs_enabled = true;
+
+	if (intel_irqs_enabled(dev_priv))
+		valleyview_display_irqs_install(dev_priv);
+}
+
+void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
+{
+	assert_spin_locked(&dev_priv->irq_lock);
+
+	if (!dev_priv->display_irqs_enabled)
+		return;
+
+	dev_priv->display_irqs_enabled = false;
+
+	if (intel_irqs_enabled(dev_priv))
+		valleyview_display_irqs_uninstall(dev_priv);
+}
+
+static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
+{
+	dev_priv->irq_mask = ~0;
+
+	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
+	POSTING_READ(PORT_HOTPLUG_EN);
+
+	I915_WRITE(VLV_IIR, 0xffffffff);
+	I915_WRITE(VLV_IIR, 0xffffffff);
+	I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
+	I915_WRITE(VLV_IMR, dev_priv->irq_mask);
+	POSTING_READ(VLV_IMR);
+
+	/* Interrupt setup is already guaranteed to be single-threaded, this is
+	 * just to make the assert_spin_locked check happy. */
+	spin_lock_irq(&dev_priv->irq_lock);
+	if (dev_priv->display_irqs_enabled)
+		valleyview_display_irqs_install(dev_priv);
+	spin_unlock_irq(&dev_priv->irq_lock);
+}
+
+static int valleyview_irq_postinstall(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	vlv_display_irq_postinstall(dev_priv);
+
+	gen5_gt_irq_postinstall(dev);
+
+	/* ack & enable invalid PTE error interrupts */
+#if 0 /* FIXME: add support to irq handler for checking these bits */
+	I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
+	I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
+#endif
+
+	I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
+
+	return 0;
+}
+
+static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
+{
+	/* These are interrupts we'll toggle with the ring mask register */
+	uint32_t gt_interrupts[] = {
+		GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
+			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
+			GT_RENDER_L3_PARITY_ERROR_INTERRUPT |
+			GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
+			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
+		GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
+			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
+			GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
+			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
+		0,
+		GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
+			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
+		};
+
+	dev_priv->pm_irq_mask = 0xffffffff;
+	GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
+	GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
+	/*
+	 * RPS interrupts will get enabled/disabled on demand when RPS itself
+	 * is enabled/disabled.
+	 */
+	GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, 0);
+	GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
+}
+
+static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
+{
+	uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
+	uint32_t de_pipe_enables;
+	u32 de_port_masked = GEN8_AUX_CHANNEL_A;
+	u32 de_port_enables;
+	enum pipe pipe;
+
+	if (INTEL_INFO(dev_priv)->gen >= 9) {
+		de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE |
+				  GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
+		de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
+				  GEN9_AUX_CHANNEL_D;
+		if (IS_BROXTON(dev_priv))
+			de_port_masked |= BXT_DE_PORT_GMBUS;
+	} else {
+		de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE |
+				  GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
+	}
+
+	de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
+					   GEN8_PIPE_FIFO_UNDERRUN;
+
+	de_port_enables = de_port_masked;
+	if (IS_BROXTON(dev_priv))
+		de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
+	else if (IS_BROADWELL(dev_priv))
+		de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;
+
+	dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
+	dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
+	dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
+
+	for_each_pipe(dev_priv, pipe)
+		if (intel_display_power_is_enabled(dev_priv,
+				POWER_DOMAIN_PIPE(pipe)))
+			GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
+					  dev_priv->de_irq_mask[pipe],
+					  de_pipe_enables);
+
+	GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
+}
+
+static int gen8_irq_postinstall(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (HAS_PCH_SPLIT(dev))
+		ibx_irq_pre_postinstall(dev);
+
+	gen8_gt_irq_postinstall(dev_priv);
+	gen8_de_irq_postinstall(dev_priv);
+
+	if (HAS_PCH_SPLIT(dev))
+		ibx_irq_postinstall(dev);
+
+	I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
+	POSTING_READ(GEN8_MASTER_IRQ);
+
+	return 0;
+}
+
+static int cherryview_irq_postinstall(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	vlv_display_irq_postinstall(dev_priv);
+
+	gen8_gt_irq_postinstall(dev_priv);
+
+	I915_WRITE(GEN8_MASTER_IRQ, MASTER_INTERRUPT_ENABLE);
+	POSTING_READ(GEN8_MASTER_IRQ);
+
+	return 0;
+}
+
+static void gen8_irq_uninstall(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (!dev_priv)
+		return;
+
+	gen8_irq_reset(dev);
+}
+
+static void vlv_display_irq_uninstall(struct drm_i915_private *dev_priv)
+{
+	/* Interrupt setup is already guaranteed to be single-threaded, this is
+	 * just to make the assert_spin_locked check happy. */
+	spin_lock_irq(&dev_priv->irq_lock);
+	if (dev_priv->display_irqs_enabled)
+		valleyview_display_irqs_uninstall(dev_priv);
+	spin_unlock_irq(&dev_priv->irq_lock);
+
+	vlv_display_irq_reset(dev_priv);
+
+	dev_priv->irq_mask = ~0;
+}
+
+static void valleyview_irq_uninstall(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (!dev_priv)
+		return;
+
+	I915_WRITE(VLV_MASTER_IER, 0);
+
+	gen5_gt_irq_reset(dev);
+
+	I915_WRITE(HWSTAM, 0xffffffff);
+
+	vlv_display_irq_uninstall(dev_priv);
+}
+
+static void cherryview_irq_uninstall(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (!dev_priv)
+		return;
+
+	I915_WRITE(GEN8_MASTER_IRQ, 0);
+	POSTING_READ(GEN8_MASTER_IRQ);
+
+	gen8_gt_irq_reset(dev_priv);
+
+	GEN5_IRQ_RESET(GEN8_PCU_);
+
+	vlv_display_irq_uninstall(dev_priv);
+}
+
+static void ironlake_irq_uninstall(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (!dev_priv)
+		return;
+
+	ironlake_irq_reset(dev);
+}
+
+static void i8xx_irq_preinstall(struct drm_device * dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int pipe;
+
+	for_each_pipe(dev_priv, pipe)
+		I915_WRITE(PIPESTAT(pipe), 0);
+	I915_WRITE16(IMR, 0xffff);
+	I915_WRITE16(IER, 0x0);
+	POSTING_READ16(IER);
+}
+
+static int i8xx_irq_postinstall(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	I915_WRITE16(EMR,
+		     ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
+
+	/* Unmask the interrupts that we always want on. */
+	dev_priv->irq_mask =
+		~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+		  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
+		  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
+	I915_WRITE16(IMR, dev_priv->irq_mask);
+
+	I915_WRITE16(IER,
+		     I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+		     I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+		     I915_USER_INTERRUPT);
+	POSTING_READ16(IER);
+
+	/* Interrupt setup is already guaranteed to be single-threaded, this is
+	 * just to make the assert_spin_locked check happy. */
+	spin_lock_irq(&dev_priv->irq_lock);
+	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
+	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
+	spin_unlock_irq(&dev_priv->irq_lock);
+
+	return 0;
+}
+
+/*
+ * Returns true when a page flip has completed.
+ */
+static bool i8xx_handle_vblank(struct drm_device *dev,
+			       int plane, int pipe, u32 iir)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
+
+	if (!intel_pipe_handle_vblank(dev, pipe))
+		return false;
+
+	if ((iir & flip_pending) == 0)
+		goto check_page_flip;
+
+	/* We detect FlipDone by looking for the change in PendingFlip from '1'
+	 * to '0' on the following vblank, i.e. IIR has the Pendingflip
+	 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
+	 * the flip is completed (no longer pending). Since this doesn't raise
+	 * an interrupt per se, we watch for the change at vblank.
+	 */
+	if (I915_READ16(ISR) & flip_pending)
+		goto check_page_flip;
+
+	intel_prepare_page_flip(dev, plane);
+	intel_finish_page_flip(dev, pipe);
+	return true;
+
+check_page_flip:
+	intel_check_page_flip(dev, pipe);
+	return false;
+}
+
+static irqreturn_t i8xx_irq_handler(int irq, void *arg)
+{
+	struct drm_device *dev = arg;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u16 iir, new_iir;
+	u32 pipe_stats[2];
+	int pipe;
+	u16 flip_mask =
+		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
+		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
+
+	if (!intel_irqs_enabled(dev_priv))
+		return IRQ_NONE;
+
+	iir = I915_READ16(IIR);
+	if (iir == 0)
+		return IRQ_NONE;
+
+	while (iir & ~flip_mask) {
+		/* Can't rely on pipestat interrupt bit in iir as it might
+		 * have been cleared after the pipestat interrupt was received.
+		 * It doesn't set the bit in iir again, but it still produces
+		 * interrupts (for non-MSI).
+		 */
+		spin_lock(&dev_priv->irq_lock);
+		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
+			DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
+
+		for_each_pipe(dev_priv, pipe) {
+			int reg = PIPESTAT(pipe);
+			pipe_stats[pipe] = I915_READ(reg);
+
+			/*
+			 * Clear the PIPE*STAT regs before the IIR
+			 */
+			if (pipe_stats[pipe] & 0x8000ffff)
+				I915_WRITE(reg, pipe_stats[pipe]);
+		}
+		spin_unlock(&dev_priv->irq_lock);
+
+		I915_WRITE16(IIR, iir & ~flip_mask);
+		new_iir = I915_READ16(IIR); /* Flush posted writes */
+
+		if (iir & I915_USER_INTERRUPT)
+			notify_ring(&dev_priv->ring[RCS]);
+
+		for_each_pipe(dev_priv, pipe) {
+			int plane = pipe;
+			if (HAS_FBC(dev))
+				plane = !plane;
+
+			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
+			    i8xx_handle_vblank(dev, plane, pipe, iir))
+				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
+
+			if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
+				i9xx_pipe_crc_irq_handler(dev, pipe);
+
+			if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
+				intel_cpu_fifo_underrun_irq_handler(dev_priv,
+								    pipe);
+		}
+
+		iir = new_iir;
+	}
+
+	return IRQ_HANDLED;
+}
+
+static void i8xx_irq_uninstall(struct drm_device * dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int pipe;
+
+	for_each_pipe(dev_priv, pipe) {
+		/* Clear enable bits; then clear status bits */
+		I915_WRITE(PIPESTAT(pipe), 0);
+		I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
+	}
+	I915_WRITE16(IMR, 0xffff);
+	I915_WRITE16(IER, 0x0);
+	I915_WRITE16(IIR, I915_READ16(IIR));
+}
+
+static void i915_irq_preinstall(struct drm_device * dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int pipe;
+
+	if (I915_HAS_HOTPLUG(dev)) {
+		i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
+		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
+	}
+
+	I915_WRITE16(HWSTAM, 0xeffe);
+	for_each_pipe(dev_priv, pipe)
+		I915_WRITE(PIPESTAT(pipe), 0);
+	I915_WRITE(IMR, 0xffffffff);
+	I915_WRITE(IER, 0x0);
+	POSTING_READ(IER);
+}
+
+static int i915_irq_postinstall(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 enable_mask;
+
+	I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
+
+	/* Unmask the interrupts that we always want on. */
+	dev_priv->irq_mask =
+		~(I915_ASLE_INTERRUPT |
+		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+		  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
+		  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
+
+	enable_mask =
+		I915_ASLE_INTERRUPT |
+		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+		I915_USER_INTERRUPT;
+
+	if (I915_HAS_HOTPLUG(dev)) {
+		i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
+		POSTING_READ(PORT_HOTPLUG_EN);
+
+		/* Enable in IER... */
+		enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
+		/* and unmask in IMR */
+		dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
+	}
+
+	I915_WRITE(IMR, dev_priv->irq_mask);
+	I915_WRITE(IER, enable_mask);
+	POSTING_READ(IER);
+
+	i915_enable_asle_pipestat(dev);
+
+	/* Interrupt setup is already guaranteed to be single-threaded, this is
+	 * just to make the assert_spin_locked check happy. */
+	spin_lock_irq(&dev_priv->irq_lock);
+	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
+	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
+	spin_unlock_irq(&dev_priv->irq_lock);
+
+	return 0;
+}
+
+/*
+ * Returns true when a page flip has completed.
+ */
+static bool i915_handle_vblank(struct drm_device *dev,
+			       int plane, int pipe, u32 iir)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
+
+	if (!intel_pipe_handle_vblank(dev, pipe))
+		return false;
+
+	if ((iir & flip_pending) == 0)
+		goto check_page_flip;
+
+	/* We detect FlipDone by looking for the change in PendingFlip from '1'
+	 * to '0' on the following vblank, i.e. IIR has the Pendingflip
+	 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
+	 * the flip is completed (no longer pending). Since this doesn't raise
+	 * an interrupt per se, we watch for the change at vblank.
+	 */
+	if (I915_READ(ISR) & flip_pending)
+		goto check_page_flip;
+
+	intel_prepare_page_flip(dev, plane);
+	intel_finish_page_flip(dev, pipe);
+	return true;
+
+check_page_flip:
+	intel_check_page_flip(dev, pipe);
+	return false;
+}
+
+static irqreturn_t i915_irq_handler(int irq, void *arg)
+{
+	struct drm_device *dev = arg;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
+	u32 flip_mask =
+		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
+		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
+	int pipe, ret = IRQ_NONE;
+
+	if (!intel_irqs_enabled(dev_priv))
+		return IRQ_NONE;
+
+	iir = I915_READ(IIR);
+	do {
+		bool irq_received = (iir & ~flip_mask) != 0;
+		bool blc_event = false;
+
+		/* Can't rely on pipestat interrupt bit in iir as it might
+		 * have been cleared after the pipestat interrupt was received.
+		 * It doesn't set the bit in iir again, but it still produces
+		 * interrupts (for non-MSI).
+		 */
+		spin_lock(&dev_priv->irq_lock);
+		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
+			DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
+
+		for_each_pipe(dev_priv, pipe) {
+			int reg = PIPESTAT(pipe);
+			pipe_stats[pipe] = I915_READ(reg);
+
+			/* Clear the PIPE*STAT regs before the IIR */
+			if (pipe_stats[pipe] & 0x8000ffff) {
+				I915_WRITE(reg, pipe_stats[pipe]);
+				irq_received = true;
+			}
+		}
+		spin_unlock(&dev_priv->irq_lock);
+
+		if (!irq_received)
+			break;
+
+		/* Consume port.  Then clear IIR or we'll miss events */
+		if (I915_HAS_HOTPLUG(dev) &&
+		    iir & I915_DISPLAY_PORT_INTERRUPT)
+			i9xx_hpd_irq_handler(dev);
+
+		I915_WRITE(IIR, iir & ~flip_mask);
+		new_iir = I915_READ(IIR); /* Flush posted writes */
+
+		if (iir & I915_USER_INTERRUPT)
+			notify_ring(&dev_priv->ring[RCS]);
+
+		for_each_pipe(dev_priv, pipe) {
+			int plane = pipe;
+			if (HAS_FBC(dev))
+				plane = !plane;
+
+			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
+			    i915_handle_vblank(dev, plane, pipe, iir))
+				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
+
+			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
+				blc_event = true;
+
+			if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
+				i9xx_pipe_crc_irq_handler(dev, pipe);
+
+			if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
+				intel_cpu_fifo_underrun_irq_handler(dev_priv,
+								    pipe);
+		}
+
+		if (blc_event || (iir & I915_ASLE_INTERRUPT))
+			intel_opregion_asle_intr(dev);
+
+		/* With MSI, interrupts are only generated when iir
+		 * transitions from zero to nonzero.  If another bit got
+		 * set while we were handling the existing iir bits, then
+		 * we would never get another interrupt.
+		 *
+		 * This is fine on non-MSI as well, as if we hit this path
+		 * we avoid exiting the interrupt handler only to generate
+		 * another one.
+		 *
+		 * Note that for MSI this could cause a stray interrupt report
+		 * if an interrupt landed in the time between writing IIR and
+		 * the posting read.  This should be rare enough to never
+		 * trigger the 99% of 100,000 interrupts test for disabling
+		 * stray interrupts.
+		 */
+		ret = IRQ_HANDLED;
+		iir = new_iir;
+	} while (iir & ~flip_mask);
+
+	return ret;
+}
+
+static void i915_irq_uninstall(struct drm_device * dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int pipe;
+
+	if (I915_HAS_HOTPLUG(dev)) {
+		i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
+		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
+	}
+
+	I915_WRITE16(HWSTAM, 0xffff);
+	for_each_pipe(dev_priv, pipe) {
+		/* Clear enable bits; then clear status bits */
+		I915_WRITE(PIPESTAT(pipe), 0);
+		I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
+	}
+	I915_WRITE(IMR, 0xffffffff);
+	I915_WRITE(IER, 0x0);
+
+	I915_WRITE(IIR, I915_READ(IIR));
+}
+
+static void i965_irq_preinstall(struct drm_device * dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int pipe;
+
+	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
+	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
+
+	I915_WRITE(HWSTAM, 0xeffe);
+	for_each_pipe(dev_priv, pipe)
+		I915_WRITE(PIPESTAT(pipe), 0);
+	I915_WRITE(IMR, 0xffffffff);
+	I915_WRITE(IER, 0x0);
+	POSTING_READ(IER);
+}
+
+static int i965_irq_postinstall(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 enable_mask;
+	u32 error_mask;
+
+	/* Unmask the interrupts that we always want on. */
+	dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
+			       I915_DISPLAY_PORT_INTERRUPT |
+			       I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+			       I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+			       I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
+			       I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
+			       I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
+
+	enable_mask = ~dev_priv->irq_mask;
+	enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
+			 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
+	enable_mask |= I915_USER_INTERRUPT;
+
+	if (IS_G4X(dev))
+		enable_mask |= I915_BSD_USER_INTERRUPT;
+
+	/* Interrupt setup is already guaranteed to be single-threaded, this is
+	 * just to make the assert_spin_locked check happy. */
+	spin_lock_irq(&dev_priv->irq_lock);
+	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
+	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
+	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
+	spin_unlock_irq(&dev_priv->irq_lock);
+
+	/*
+	 * Enable some error detection, note the instruction error mask
+	 * bit is reserved, so we leave it masked.
+	 */
+	if (IS_G4X(dev)) {
+		error_mask = ~(GM45_ERROR_PAGE_TABLE |
+			       GM45_ERROR_MEM_PRIV |
+			       GM45_ERROR_CP_PRIV |
+			       I915_ERROR_MEMORY_REFRESH);
+	} else {
+		error_mask = ~(I915_ERROR_PAGE_TABLE |
+			       I915_ERROR_MEMORY_REFRESH);
+	}
+	I915_WRITE(EMR, error_mask);
+
+	I915_WRITE(IMR, dev_priv->irq_mask);
+	I915_WRITE(IER, enable_mask);
+	POSTING_READ(IER);
+
+	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
+	POSTING_READ(PORT_HOTPLUG_EN);
+
+	i915_enable_asle_pipestat(dev);
+
+	return 0;
+}
+
+static void i915_hpd_irq_setup(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 hotplug_en;
+
+	assert_spin_locked(&dev_priv->irq_lock);
+
+	/* Note HDMI and DP share hotplug bits */
+	/* enable bits are the same for all generations */
+	hotplug_en = intel_hpd_enabled_irqs(dev, hpd_mask_i915);
+	/* Programming the CRT detection parameters tends
+	   to generate a spurious hotplug event about three
+	   seconds later.  So just do it once.
+	*/
+	if (IS_G4X(dev))
+		hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
+	hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
+
+	/* Ignore TV since it's buggy */
+	i915_hotplug_interrupt_update_locked(dev_priv,
+					     HOTPLUG_INT_EN_MASK |
+					     CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
+					     CRT_HOTPLUG_ACTIVATION_PERIOD_64,
+					     hotplug_en);
+}
+
+static irqreturn_t i965_irq_handler(int irq, void *arg)
+{
+	struct drm_device *dev = arg;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 iir, new_iir;
+	u32 pipe_stats[I915_MAX_PIPES];
+	int ret = IRQ_NONE, pipe;
+	u32 flip_mask =
+		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
+		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
+
+	if (!intel_irqs_enabled(dev_priv))
+		return IRQ_NONE;
+
+	iir = I915_READ(IIR);
+
+	for (;;) {
+		bool irq_received = (iir & ~flip_mask) != 0;
+		bool blc_event = false;
+
+		/* Can't rely on pipestat interrupt bit in iir as it might
+		 * have been cleared after the pipestat interrupt was received.
+		 * It doesn't set the bit in iir again, but it still produces
+		 * interrupts (for non-MSI).
+		 */
+		spin_lock(&dev_priv->irq_lock);
+		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
+			DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
+
+		for_each_pipe(dev_priv, pipe) {
+			int reg = PIPESTAT(pipe);
+			pipe_stats[pipe] = I915_READ(reg);
+
+			/*
+			 * Clear the PIPE*STAT regs before the IIR
+			 */
+			if (pipe_stats[pipe] & 0x8000ffff) {
+				I915_WRITE(reg, pipe_stats[pipe]);
+				irq_received = true;
+			}
+		}
+		spin_unlock(&dev_priv->irq_lock);
+
+		if (!irq_received)
+			break;
+
+		ret = IRQ_HANDLED;
+
+		/* Consume port.  Then clear IIR or we'll miss events */
+		if (iir & I915_DISPLAY_PORT_INTERRUPT)
+			i9xx_hpd_irq_handler(dev);
+
+		I915_WRITE(IIR, iir & ~flip_mask);
+		new_iir = I915_READ(IIR); /* Flush posted writes */
+
+		if (iir & I915_USER_INTERRUPT)
+			notify_ring(&dev_priv->ring[RCS]);
+		if (iir & I915_BSD_USER_INTERRUPT)
+			notify_ring(&dev_priv->ring[VCS]);
+
+		for_each_pipe(dev_priv, pipe) {
+			if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
+			    i915_handle_vblank(dev, pipe, pipe, iir))
+				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
+
+			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
+				blc_event = true;
+
+			if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
+				i9xx_pipe_crc_irq_handler(dev, pipe);
+
+			if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
+				intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
+		}
+
+		if (blc_event || (iir & I915_ASLE_INTERRUPT))
+			intel_opregion_asle_intr(dev);
+
+		if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
+			gmbus_irq_handler(dev);
+
+		/* With MSI, interrupts are only generated when iir
+		 * transitions from zero to nonzero.  If another bit got
+		 * set while we were handling the existing iir bits, then
+		 * we would never get another interrupt.
+		 *
+		 * This is fine on non-MSI as well, as if we hit this path
+		 * we avoid exiting the interrupt handler only to generate
+		 * another one.
+		 *
+		 * Note that for MSI this could cause a stray interrupt report
+		 * if an interrupt landed in the time between writing IIR and
+		 * the posting read.  This should be rare enough to never
+		 * trigger the 99% of 100,000 interrupts test for disabling
+		 * stray interrupts.
+		 */
+		iir = new_iir;
+	}
+
+	return ret;
+}
+
+static void i965_irq_uninstall(struct drm_device * dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int pipe;
+
+	if (!dev_priv)
+		return;
+
+	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
+	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
+
+	I915_WRITE(HWSTAM, 0xffffffff);
+	for_each_pipe(dev_priv, pipe)
+		I915_WRITE(PIPESTAT(pipe), 0);
+	I915_WRITE(IMR, 0xffffffff);
+	I915_WRITE(IER, 0x0);
+
+	for_each_pipe(dev_priv, pipe)
+		I915_WRITE(PIPESTAT(pipe),
+			   I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
+	I915_WRITE(IIR, I915_READ(IIR));
+}
+
+/**
+ * intel_irq_init - initializes irq support
+ * @dev_priv: i915 device instance
+ *
+ * This function initializes all the irq support including work items, timers
+ * and all the vtables. It does not setup the interrupt itself though.
+ */
+void intel_irq_init(struct drm_i915_private *dev_priv)
+{
+	struct drm_device *dev = dev_priv->dev;
+
+	intel_hpd_init_work(dev_priv);
+
+	INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
+	INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
+
+	/* Let's track the enabled rps events */
+	if (IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
+		/* WaGsvRC0ResidencyMethod:vlv */
+		dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
+	else
+		dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
+
+	INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work,
+			  i915_hangcheck_elapsed);
+
+	pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
+
+	if (IS_GEN2(dev_priv)) {
+		dev->max_vblank_count = 0;
+		dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
+	} else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
+		dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
+		dev->driver->get_vblank_counter = g4x_get_vblank_counter;
+	} else {
+		dev->driver->get_vblank_counter = i915_get_vblank_counter;
+		dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
+	}
+
+	/*
+	 * Opt out of the vblank disable timer on everything except gen2.
+	 * Gen2 doesn't have a hardware frame counter and so depends on
+	 * vblank interrupts to produce sane vblank seuquence numbers.
+	 */
+	if (!IS_GEN2(dev_priv))
+		dev->vblank_disable_immediate = true;
+
+	dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
+	dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
+
+	if (IS_CHERRYVIEW(dev_priv)) {
+		dev->driver->irq_handler = cherryview_irq_handler;
+		dev->driver->irq_preinstall = cherryview_irq_preinstall;
+		dev->driver->irq_postinstall = cherryview_irq_postinstall;
+		dev->driver->irq_uninstall = cherryview_irq_uninstall;
+		dev->driver->enable_vblank = valleyview_enable_vblank;
+		dev->driver->disable_vblank = valleyview_disable_vblank;
+		dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
+	} else if (IS_VALLEYVIEW(dev_priv)) {
+		dev->driver->irq_handler = valleyview_irq_handler;
+		dev->driver->irq_preinstall = valleyview_irq_preinstall;
+		dev->driver->irq_postinstall = valleyview_irq_postinstall;
+		dev->driver->irq_uninstall = valleyview_irq_uninstall;
+		dev->driver->enable_vblank = valleyview_enable_vblank;
+		dev->driver->disable_vblank = valleyview_disable_vblank;
+		dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
+	} else if (INTEL_INFO(dev_priv)->gen >= 8) {
+		dev->driver->irq_handler = gen8_irq_handler;
+		dev->driver->irq_preinstall = gen8_irq_reset;
+		dev->driver->irq_postinstall = gen8_irq_postinstall;
+		dev->driver->irq_uninstall = gen8_irq_uninstall;
+		dev->driver->enable_vblank = gen8_enable_vblank;
+		dev->driver->disable_vblank = gen8_disable_vblank;
+		if (IS_BROXTON(dev))
+			dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
+		else if (HAS_PCH_SPT(dev))
+			dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
+		else
+			dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
+	} else if (HAS_PCH_SPLIT(dev)) {
+		dev->driver->irq_handler = ironlake_irq_handler;
+		dev->driver->irq_preinstall = ironlake_irq_reset;
+		dev->driver->irq_postinstall = ironlake_irq_postinstall;
+		dev->driver->irq_uninstall = ironlake_irq_uninstall;
+		dev->driver->enable_vblank = ironlake_enable_vblank;
+		dev->driver->disable_vblank = ironlake_disable_vblank;
+		dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
+	} else {
+		if (INTEL_INFO(dev_priv)->gen == 2) {
+			dev->driver->irq_preinstall = i8xx_irq_preinstall;
+			dev->driver->irq_postinstall = i8xx_irq_postinstall;
+			dev->driver->irq_handler = i8xx_irq_handler;
+			dev->driver->irq_uninstall = i8xx_irq_uninstall;
+		} else if (INTEL_INFO(dev_priv)->gen == 3) {
+			dev->driver->irq_preinstall = i915_irq_preinstall;
+			dev->driver->irq_postinstall = i915_irq_postinstall;
+			dev->driver->irq_uninstall = i915_irq_uninstall;
+			dev->driver->irq_handler = i915_irq_handler;
+		} else {
+			dev->driver->irq_preinstall = i965_irq_preinstall;
+			dev->driver->irq_postinstall = i965_irq_postinstall;
+			dev->driver->irq_uninstall = i965_irq_uninstall;
+			dev->driver->irq_handler = i965_irq_handler;
+		}
+		if (I915_HAS_HOTPLUG(dev_priv))
+			dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
+		dev->driver->enable_vblank = i915_enable_vblank;
+		dev->driver->disable_vblank = i915_disable_vblank;
+	}
+}
+
+/**
+ * intel_irq_install - enables the hardware interrupt
+ * @dev_priv: i915 device instance
+ *
+ * This function enables the hardware interrupt handling, but leaves the hotplug
+ * handling still disabled. It is called after intel_irq_init().
+ *
+ * In the driver load and resume code we need working interrupts in a few places
+ * but don't want to deal with the hassle of concurrent probe and hotplug
+ * workers. Hence the split into this two-stage approach.
+ */
+int intel_irq_install(struct drm_i915_private *dev_priv)
+{
+	/*
+	 * We enable some interrupt sources in our postinstall hooks, so mark
+	 * interrupts as enabled _before_ actually enabling them to avoid
+	 * special cases in our ordering checks.
+	 */
+	dev_priv->pm.irqs_enabled = true;
+
+	return drm_irq_install(dev_priv->dev, dev_priv->dev->pdev->irq);
+}
+
+/**
+ * intel_irq_uninstall - finilizes all irq handling
+ * @dev_priv: i915 device instance
+ *
+ * This stops interrupt and hotplug handling and unregisters and frees all
+ * resources acquired in the init functions.
+ */
+void intel_irq_uninstall(struct drm_i915_private *dev_priv)
+{
+	drm_irq_uninstall(dev_priv->dev);
+	intel_hpd_cancel_work(dev_priv);
+	dev_priv->pm.irqs_enabled = false;
+}
+
+/**
+ * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
+ * @dev_priv: i915 device instance
+ *
+ * This function is used to disable interrupts at runtime, both in the runtime
+ * pm and the system suspend/resume code.
+ */
+void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
+{
+	dev_priv->dev->driver->irq_uninstall(dev_priv->dev);
+	dev_priv->pm.irqs_enabled = false;
+	synchronize_irq(dev_priv->dev->irq);
+}
+
+/**
+ * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
+ * @dev_priv: i915 device instance
+ *
+ * This function is used to enable interrupts at runtime, both in the runtime
+ * pm and the system suspend/resume code.
+ */
+void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
+{
+	dev_priv->pm.irqs_enabled = true;
+	dev_priv->dev->driver->irq_preinstall(dev_priv->dev);
+	dev_priv->dev->driver->irq_postinstall(dev_priv->dev);
+}
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
new file mode 100644
index 0000000..4be13a5
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -0,0 +1,198 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "i915_drv.h"
+
+struct i915_params i915 __read_mostly = {
+	.modeset = -1,
+	.panel_ignore_lid = 1,
+	.semaphores = -1,
+	.lvds_channel_mode = 0,
+	.panel_use_ssc = -1,
+	.vbt_sdvo_panel_type = -1,
+	.enable_rc6 = -1,
+	.enable_fbc = -1,
+	.enable_execlists = -1,
+	.enable_hangcheck = true,
+	.enable_ppgtt = -1,
+	.enable_psr = 0,
+	.preliminary_hw_support = IS_ENABLED(CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT),
+	.disable_power_well = -1,
+	.enable_ips = 1,
+	.fastboot = 0,
+	.prefault_disable = 0,
+	.load_detect_test = 0,
+	.reset = true,
+	.invert_brightness = 0,
+	.disable_display = 0,
+	.enable_cmd_parser = 1,
+	.disable_vtd_wa = 0,
+	.use_mmio_flip = 0,
+	.mmio_debug = 0,
+	.verbose_state_checks = 1,
+	.nuclear_pageflip = 0,
+	.edp_vswing = 0,
+	.enable_guc_submission = false,
+	.guc_log_level = -1,
+};
+
+module_param_named(modeset, i915.modeset, int, 0400);
+MODULE_PARM_DESC(modeset,
+	"Use kernel modesetting [KMS] (0=disable, "
+	"1=on, -1=force vga console preference [default])");
+
+module_param_named_unsafe(panel_ignore_lid, i915.panel_ignore_lid, int, 0600);
+MODULE_PARM_DESC(panel_ignore_lid,
+	"Override lid status (0=autodetect, 1=autodetect disabled [default], "
+	"-1=force lid closed, -2=force lid open)");
+
+module_param_named_unsafe(semaphores, i915.semaphores, int, 0400);
+MODULE_PARM_DESC(semaphores,
+	"Use semaphores for inter-ring sync "
+	"(default: -1 (use per-chip defaults))");
+
+module_param_named_unsafe(enable_rc6, i915.enable_rc6, int, 0400);
+MODULE_PARM_DESC(enable_rc6,
+	"Enable power-saving render C-state 6. "
+	"Different stages can be selected via bitmask values "
+	"(0 = disable; 1 = enable rc6; 2 = enable deep rc6; 4 = enable deepest rc6). "
+	"For example, 3 would enable rc6 and deep rc6, and 7 would enable everything. "
+	"default: -1 (use per-chip default)");
+
+module_param_named_unsafe(enable_fbc, i915.enable_fbc, int, 0600);
+MODULE_PARM_DESC(enable_fbc,
+	"Enable frame buffer compression for power savings "
+	"(default: -1 (use per-chip default))");
+
+module_param_named_unsafe(lvds_channel_mode, i915.lvds_channel_mode, int, 0600);
+MODULE_PARM_DESC(lvds_channel_mode,
+	 "Specify LVDS channel mode "
+	 "(0=probe BIOS [default], 1=single-channel, 2=dual-channel)");
+
+module_param_named_unsafe(lvds_use_ssc, i915.panel_use_ssc, int, 0600);
+MODULE_PARM_DESC(lvds_use_ssc,
+	"Use Spread Spectrum Clock with panels [LVDS/eDP] "
+	"(default: auto from VBT)");
+
+module_param_named_unsafe(vbt_sdvo_panel_type, i915.vbt_sdvo_panel_type, int, 0600);
+MODULE_PARM_DESC(vbt_sdvo_panel_type,
+	"Override/Ignore selection of SDVO panel mode in the VBT "
+	"(-2=ignore, -1=auto [default], index in VBT BIOS table)");
+
+module_param_named_unsafe(reset, i915.reset, bool, 0600);
+MODULE_PARM_DESC(reset, "Attempt GPU resets (default: true)");
+
+module_param_named_unsafe(enable_hangcheck, i915.enable_hangcheck, bool, 0644);
+MODULE_PARM_DESC(enable_hangcheck,
+	"Periodically check GPU activity for detecting hangs. "
+	"WARNING: Disabling this can cause system wide hangs. "
+	"(default: true)");
+
+module_param_named_unsafe(enable_ppgtt, i915.enable_ppgtt, int, 0400);
+MODULE_PARM_DESC(enable_ppgtt,
+	"Override PPGTT usage. "
+	"(-1=auto [default], 0=disabled, 1=aliasing, 2=full)");
+
+module_param_named_unsafe(enable_execlists, i915.enable_execlists, int, 0400);
+MODULE_PARM_DESC(enable_execlists,
+	"Override execlists usage. "
+	"(-1=auto [default], 0=disabled, 1=enabled)");
+
+module_param_named_unsafe(enable_psr, i915.enable_psr, int, 0600);
+MODULE_PARM_DESC(enable_psr, "Enable PSR (default: false)");
+
+module_param_named_unsafe(preliminary_hw_support, i915.preliminary_hw_support, int, 0600);
+MODULE_PARM_DESC(preliminary_hw_support,
+	"Enable preliminary hardware support.");
+
+module_param_named_unsafe(disable_power_well, i915.disable_power_well, int, 0600);
+MODULE_PARM_DESC(disable_power_well,
+	"Disable display power wells when possible "
+	"(-1=auto [default], 0=power wells always on, 1=power wells disabled when possible)");
+
+module_param_named_unsafe(enable_ips, i915.enable_ips, int, 0600);
+MODULE_PARM_DESC(enable_ips, "Enable IPS (default: true)");
+
+module_param_named(fastboot, i915.fastboot, bool, 0600);
+MODULE_PARM_DESC(fastboot,
+	"Try to skip unnecessary mode sets at boot time (default: false)");
+
+module_param_named_unsafe(prefault_disable, i915.prefault_disable, bool, 0600);
+MODULE_PARM_DESC(prefault_disable,
+	"Disable page prefaulting for pread/pwrite/reloc (default:false). "
+	"For developers only.");
+
+module_param_named_unsafe(load_detect_test, i915.load_detect_test, bool, 0600);
+MODULE_PARM_DESC(load_detect_test,
+	"Force-enable the VGA load detect code for testing (default:false). "
+	"For developers only.");
+
+module_param_named_unsafe(invert_brightness, i915.invert_brightness, int, 0600);
+MODULE_PARM_DESC(invert_brightness,
+	"Invert backlight brightness "
+	"(-1 force normal, 0 machine defaults, 1 force inversion), please "
+	"report PCI device ID, subsystem vendor and subsystem device ID "
+	"to dri-devel@lists.freedesktop.org, if your machine needs it. "
+	"It will then be included in an upcoming module version.");
+
+module_param_named(disable_display, i915.disable_display, bool, 0600);
+MODULE_PARM_DESC(disable_display, "Disable display (default: false)");
+
+module_param_named_unsafe(disable_vtd_wa, i915.disable_vtd_wa, bool, 0600);
+MODULE_PARM_DESC(disable_vtd_wa, "Disable all VT-d workarounds (default: false)");
+
+module_param_named_unsafe(enable_cmd_parser, i915.enable_cmd_parser, int, 0600);
+MODULE_PARM_DESC(enable_cmd_parser,
+		 "Enable command parsing (1=enabled [default], 0=disabled)");
+
+module_param_named_unsafe(use_mmio_flip, i915.use_mmio_flip, int, 0600);
+MODULE_PARM_DESC(use_mmio_flip,
+		 "use MMIO flips (-1=never, 0=driver discretion [default], 1=always)");
+
+module_param_named(mmio_debug, i915.mmio_debug, int, 0600);
+MODULE_PARM_DESC(mmio_debug,
+	"Enable the MMIO debug code for the first N failures (default: off). "
+	"This may negatively affect performance.");
+
+module_param_named(verbose_state_checks, i915.verbose_state_checks, bool, 0600);
+MODULE_PARM_DESC(verbose_state_checks,
+	"Enable verbose logs (ie. WARN_ON()) in case of unexpected hw state conditions.");
+
+module_param_named_unsafe(nuclear_pageflip, i915.nuclear_pageflip, bool, 0600);
+MODULE_PARM_DESC(nuclear_pageflip,
+		 "Force atomic modeset functionality; asynchronous mode is not yet supported. (default: false).");
+
+/* WA to get away with the default setting in VBT for early platforms.Will be removed */
+module_param_named_unsafe(edp_vswing, i915.edp_vswing, int, 0400);
+MODULE_PARM_DESC(edp_vswing,
+		 "Ignore/Override vswing pre-emph table selection from VBT "
+		 "(0=use value from vbt [default], 1=low power swing(200mV),"
+		 "2=default swing(400mV))");
+
+module_param_named_unsafe(enable_guc_submission, i915.enable_guc_submission, bool, 0400);
+MODULE_PARM_DESC(enable_guc_submission, "Enable GuC submission (default:false)");
+
+module_param_named(guc_log_level, i915.guc_log_level, int, 0400);
+MODULE_PARM_DESC(guc_log_level,
+	"GuC firmware logging level (-1:disabled (default), 0-3:enabled)");
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
new file mode 100644
index 0000000..cace154
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -0,0 +1,8161 @@
+/* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _I915_REG_H_
+#define _I915_REG_H_
+
+#define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a)))
+#define _PLANE(plane, a, b) _PIPE(plane, a, b)
+#define _TRANSCODER(tran, a, b) ((a) + (tran)*((b)-(a)))
+#define _PORT(port, a, b) ((a) + (port)*((b)-(a)))
+#define _PIPE3(pipe, a, b, c) ((pipe) == PIPE_A ? (a) : \
+			       (pipe) == PIPE_B ? (b) : (c))
+#define _PORT3(port, a, b, c) ((port) == PORT_A ? (a) : \
+			       (port) == PORT_B ? (b) : (c))
+
+#define _MASKED_FIELD(mask, value) ({					   \
+	if (__builtin_constant_p(mask))					   \
+		BUILD_BUG_ON_MSG(((mask) & 0xffff0000), "Incorrect mask"); \
+	if (__builtin_constant_p(value))				   \
+		BUILD_BUG_ON_MSG((value) & 0xffff0000, "Incorrect value"); \
+	if (__builtin_constant_p(mask) && __builtin_constant_p(value))	   \
+		BUILD_BUG_ON_MSG((value) & ~(mask),			   \
+				 "Incorrect value for mask");		   \
+	(mask) << 16 | (value); })
+#define _MASKED_BIT_ENABLE(a)	({ typeof(a) _a = (a); _MASKED_FIELD(_a, _a); })
+#define _MASKED_BIT_DISABLE(a)	(_MASKED_FIELD((a), 0))
+
+
+
+/* PCI config space */
+
+#define HPLLCC	0xc0 /* 85x only */
+#define   GC_CLOCK_CONTROL_MASK		(0x7 << 0)
+#define   GC_CLOCK_133_200		(0 << 0)
+#define   GC_CLOCK_100_200		(1 << 0)
+#define   GC_CLOCK_100_133		(2 << 0)
+#define   GC_CLOCK_133_266		(3 << 0)
+#define   GC_CLOCK_133_200_2		(4 << 0)
+#define   GC_CLOCK_133_266_2		(5 << 0)
+#define   GC_CLOCK_166_266		(6 << 0)
+#define   GC_CLOCK_166_250		(7 << 0)
+
+#define GCFGC2	0xda
+#define GCFGC	0xf0 /* 915+ only */
+#define   GC_LOW_FREQUENCY_ENABLE	(1 << 7)
+#define   GC_DISPLAY_CLOCK_190_200_MHZ	(0 << 4)
+#define   GC_DISPLAY_CLOCK_333_MHZ	(4 << 4)
+#define   GC_DISPLAY_CLOCK_267_MHZ_PNV	(0 << 4)
+#define   GC_DISPLAY_CLOCK_333_MHZ_PNV	(1 << 4)
+#define   GC_DISPLAY_CLOCK_444_MHZ_PNV	(2 << 4)
+#define   GC_DISPLAY_CLOCK_200_MHZ_PNV	(5 << 4)
+#define   GC_DISPLAY_CLOCK_133_MHZ_PNV	(6 << 4)
+#define   GC_DISPLAY_CLOCK_167_MHZ_PNV	(7 << 4)
+#define   GC_DISPLAY_CLOCK_MASK		(7 << 4)
+#define   GM45_GC_RENDER_CLOCK_MASK	(0xf << 0)
+#define   GM45_GC_RENDER_CLOCK_266_MHZ	(8 << 0)
+#define   GM45_GC_RENDER_CLOCK_320_MHZ	(9 << 0)
+#define   GM45_GC_RENDER_CLOCK_400_MHZ	(0xb << 0)
+#define   GM45_GC_RENDER_CLOCK_533_MHZ	(0xc << 0)
+#define   I965_GC_RENDER_CLOCK_MASK	(0xf << 0)
+#define   I965_GC_RENDER_CLOCK_267_MHZ	(2 << 0)
+#define   I965_GC_RENDER_CLOCK_333_MHZ	(3 << 0)
+#define   I965_GC_RENDER_CLOCK_444_MHZ	(4 << 0)
+#define   I965_GC_RENDER_CLOCK_533_MHZ	(5 << 0)
+#define   I945_GC_RENDER_CLOCK_MASK	(7 << 0)
+#define   I945_GC_RENDER_CLOCK_166_MHZ	(0 << 0)
+#define   I945_GC_RENDER_CLOCK_200_MHZ	(1 << 0)
+#define   I945_GC_RENDER_CLOCK_250_MHZ	(3 << 0)
+#define   I945_GC_RENDER_CLOCK_400_MHZ	(5 << 0)
+#define   I915_GC_RENDER_CLOCK_MASK	(7 << 0)
+#define   I915_GC_RENDER_CLOCK_166_MHZ	(0 << 0)
+#define   I915_GC_RENDER_CLOCK_200_MHZ	(1 << 0)
+#define   I915_GC_RENDER_CLOCK_333_MHZ	(4 << 0)
+#define GCDGMBUS 0xcc
+#define PCI_LBPC 0xf4 /* legacy/combination backlight modes, also called LBB */
+
+
+/* Graphics reset regs */
+#define I915_GDRST 0xc0 /* PCI config register */
+#define  GRDOM_FULL	(0<<2)
+#define  GRDOM_RENDER	(1<<2)
+#define  GRDOM_MEDIA	(3<<2)
+#define  GRDOM_MASK	(3<<2)
+#define  GRDOM_RESET_STATUS (1<<1)
+#define  GRDOM_RESET_ENABLE (1<<0)
+
+#define ILK_GDSR (MCHBAR_MIRROR_BASE + 0x2ca4)
+#define  ILK_GRDOM_FULL		(0<<1)
+#define  ILK_GRDOM_RENDER	(1<<1)
+#define  ILK_GRDOM_MEDIA	(3<<1)
+#define  ILK_GRDOM_MASK		(3<<1)
+#define  ILK_GRDOM_RESET_ENABLE (1<<0)
+
+#define GEN6_MBCUNIT_SNPCR	0x900c /* for LLC config */
+#define   GEN6_MBC_SNPCR_SHIFT	21
+#define   GEN6_MBC_SNPCR_MASK	(3<<21)
+#define   GEN6_MBC_SNPCR_MAX	(0<<21)
+#define   GEN6_MBC_SNPCR_MED	(1<<21)
+#define   GEN6_MBC_SNPCR_LOW	(2<<21)
+#define   GEN6_MBC_SNPCR_MIN	(3<<21) /* only 1/16th of the cache is shared */
+
+#define VLV_G3DCTL		0x9024
+#define VLV_GSCKGCTL		0x9028
+
+#define GEN6_MBCTL		0x0907c
+#define   GEN6_MBCTL_ENABLE_BOOT_FETCH	(1 << 4)
+#define   GEN6_MBCTL_CTX_FETCH_NEEDED	(1 << 3)
+#define   GEN6_MBCTL_BME_UPDATE_ENABLE	(1 << 2)
+#define   GEN6_MBCTL_MAE_UPDATE_ENABLE	(1 << 1)
+#define   GEN6_MBCTL_BOOT_FETCH_MECH	(1 << 0)
+
+#define GEN6_GDRST	0x941c
+#define  GEN6_GRDOM_FULL		(1 << 0)
+#define  GEN6_GRDOM_RENDER		(1 << 1)
+#define  GEN6_GRDOM_MEDIA		(1 << 2)
+#define  GEN6_GRDOM_BLT			(1 << 3)
+
+#define RING_PP_DIR_BASE(ring)		((ring)->mmio_base+0x228)
+#define RING_PP_DIR_BASE_READ(ring)	((ring)->mmio_base+0x518)
+#define RING_PP_DIR_DCLV(ring)		((ring)->mmio_base+0x220)
+#define   PP_DIR_DCLV_2G		0xffffffff
+
+#define GEN8_RING_PDP_UDW(ring, n)	((ring)->mmio_base+0x270 + ((n) * 8 + 4))
+#define GEN8_RING_PDP_LDW(ring, n)	((ring)->mmio_base+0x270 + (n) * 8)
+
+#define GEN8_R_PWR_CLK_STATE		0x20C8
+#define   GEN8_RPCS_ENABLE		(1 << 31)
+#define   GEN8_RPCS_S_CNT_ENABLE	(1 << 18)
+#define   GEN8_RPCS_S_CNT_SHIFT		15
+#define   GEN8_RPCS_S_CNT_MASK		(0x7 << GEN8_RPCS_S_CNT_SHIFT)
+#define   GEN8_RPCS_SS_CNT_ENABLE	(1 << 11)
+#define   GEN8_RPCS_SS_CNT_SHIFT	8
+#define   GEN8_RPCS_SS_CNT_MASK		(0x7 << GEN8_RPCS_SS_CNT_SHIFT)
+#define   GEN8_RPCS_EU_MAX_SHIFT	4
+#define   GEN8_RPCS_EU_MAX_MASK		(0xf << GEN8_RPCS_EU_MAX_SHIFT)
+#define   GEN8_RPCS_EU_MIN_SHIFT	0
+#define   GEN8_RPCS_EU_MIN_MASK		(0xf << GEN8_RPCS_EU_MIN_SHIFT)
+
+#define GAM_ECOCHK			0x4090
+#define   BDW_DISABLE_HDC_INVALIDATION	(1<<25)
+#define   ECOCHK_SNB_BIT		(1<<10)
+#define   ECOCHK_DIS_TLB		(1<<8)
+#define   HSW_ECOCHK_ARB_PRIO_SOL	(1<<6)
+#define   ECOCHK_PPGTT_CACHE64B		(0x3<<3)
+#define   ECOCHK_PPGTT_CACHE4B		(0x0<<3)
+#define   ECOCHK_PPGTT_GFDT_IVB		(0x1<<4)
+#define   ECOCHK_PPGTT_LLC_IVB		(0x1<<3)
+#define   ECOCHK_PPGTT_UC_HSW		(0x1<<3)
+#define   ECOCHK_PPGTT_WT_HSW		(0x2<<3)
+#define   ECOCHK_PPGTT_WB_HSW		(0x3<<3)
+
+#define GAC_ECO_BITS			0x14090
+#define   ECOBITS_SNB_BIT		(1<<13)
+#define   ECOBITS_PPGTT_CACHE64B	(3<<8)
+#define   ECOBITS_PPGTT_CACHE4B		(0<<8)
+
+#define GAB_CTL				0x24000
+#define   GAB_CTL_CONT_AFTER_PAGEFAULT	(1<<8)
+
+#define GEN6_STOLEN_RESERVED		0x1082C0
+#define GEN6_STOLEN_RESERVED_ADDR_MASK	(0xFFF << 20)
+#define GEN7_STOLEN_RESERVED_ADDR_MASK	(0x3FFF << 18)
+#define GEN6_STOLEN_RESERVED_SIZE_MASK	(3 << 4)
+#define GEN6_STOLEN_RESERVED_1M		(0 << 4)
+#define GEN6_STOLEN_RESERVED_512K	(1 << 4)
+#define GEN6_STOLEN_RESERVED_256K	(2 << 4)
+#define GEN6_STOLEN_RESERVED_128K	(3 << 4)
+#define GEN7_STOLEN_RESERVED_SIZE_MASK	(1 << 5)
+#define GEN7_STOLEN_RESERVED_1M		(0 << 5)
+#define GEN7_STOLEN_RESERVED_256K	(1 << 5)
+#define GEN8_STOLEN_RESERVED_SIZE_MASK	(3 << 7)
+#define GEN8_STOLEN_RESERVED_1M		(0 << 7)
+#define GEN8_STOLEN_RESERVED_2M		(1 << 7)
+#define GEN8_STOLEN_RESERVED_4M		(2 << 7)
+#define GEN8_STOLEN_RESERVED_8M		(3 << 7)
+
+/* VGA stuff */
+
+#define VGA_ST01_MDA 0x3ba
+#define VGA_ST01_CGA 0x3da
+
+#define VGA_MSR_WRITE 0x3c2
+#define VGA_MSR_READ 0x3cc
+#define   VGA_MSR_MEM_EN (1<<1)
+#define   VGA_MSR_CGA_MODE (1<<0)
+
+#define VGA_SR_INDEX 0x3c4
+#define SR01			1
+#define VGA_SR_DATA 0x3c5
+
+#define VGA_AR_INDEX 0x3c0
+#define   VGA_AR_VID_EN (1<<5)
+#define VGA_AR_DATA_WRITE 0x3c0
+#define VGA_AR_DATA_READ 0x3c1
+
+#define VGA_GR_INDEX 0x3ce
+#define VGA_GR_DATA 0x3cf
+/* GR05 */
+#define   VGA_GR_MEM_READ_MODE_SHIFT 3
+#define     VGA_GR_MEM_READ_MODE_PLANE 1
+/* GR06 */
+#define   VGA_GR_MEM_MODE_MASK 0xc
+#define   VGA_GR_MEM_MODE_SHIFT 2
+#define   VGA_GR_MEM_A0000_AFFFF 0
+#define   VGA_GR_MEM_A0000_BFFFF 1
+#define   VGA_GR_MEM_B0000_B7FFF 2
+#define   VGA_GR_MEM_B0000_BFFFF 3
+
+#define VGA_DACMASK 0x3c6
+#define VGA_DACRX 0x3c7
+#define VGA_DACWX 0x3c8
+#define VGA_DACDATA 0x3c9
+
+#define VGA_CR_INDEX_MDA 0x3b4
+#define VGA_CR_DATA_MDA 0x3b5
+#define VGA_CR_INDEX_CGA 0x3d4
+#define VGA_CR_DATA_CGA 0x3d5
+
+/*
+ * Instruction field definitions used by the command parser
+ */
+#define INSTR_CLIENT_SHIFT      29
+#define INSTR_CLIENT_MASK       0xE0000000
+#define   INSTR_MI_CLIENT       0x0
+#define   INSTR_BC_CLIENT       0x2
+#define   INSTR_RC_CLIENT       0x3
+#define INSTR_SUBCLIENT_SHIFT   27
+#define INSTR_SUBCLIENT_MASK    0x18000000
+#define   INSTR_MEDIA_SUBCLIENT 0x2
+#define INSTR_26_TO_24_MASK	0x7000000
+#define   INSTR_26_TO_24_SHIFT	24
+
+/*
+ * Memory interface instructions used by the kernel
+ */
+#define MI_INSTR(opcode, flags) (((opcode) << 23) | (flags))
+/* Many MI commands use bit 22 of the header dword for GGTT vs PPGTT */
+#define  MI_GLOBAL_GTT    (1<<22)
+
+#define MI_NOOP			MI_INSTR(0, 0)
+#define MI_USER_INTERRUPT	MI_INSTR(0x02, 0)
+#define MI_WAIT_FOR_EVENT       MI_INSTR(0x03, 0)
+#define   MI_WAIT_FOR_OVERLAY_FLIP	(1<<16)
+#define   MI_WAIT_FOR_PLANE_B_FLIP      (1<<6)
+#define   MI_WAIT_FOR_PLANE_A_FLIP      (1<<2)
+#define   MI_WAIT_FOR_PLANE_A_SCANLINES (1<<1)
+#define MI_FLUSH		MI_INSTR(0x04, 0)
+#define   MI_READ_FLUSH		(1 << 0)
+#define   MI_EXE_FLUSH		(1 << 1)
+#define   MI_NO_WRITE_FLUSH	(1 << 2)
+#define   MI_SCENE_COUNT	(1 << 3) /* just increment scene count */
+#define   MI_END_SCENE		(1 << 4) /* flush binner and incr scene count */
+#define   MI_INVALIDATE_ISP	(1 << 5) /* invalidate indirect state pointers */
+#define MI_REPORT_HEAD		MI_INSTR(0x07, 0)
+#define MI_ARB_ON_OFF		MI_INSTR(0x08, 0)
+#define   MI_ARB_ENABLE			(1<<0)
+#define   MI_ARB_DISABLE		(0<<0)
+#define MI_BATCH_BUFFER_END	MI_INSTR(0x0a, 0)
+#define MI_SUSPEND_FLUSH	MI_INSTR(0x0b, 0)
+#define   MI_SUSPEND_FLUSH_EN	(1<<0)
+#define MI_SET_APPID		MI_INSTR(0x0e, 0)
+#define MI_OVERLAY_FLIP		MI_INSTR(0x11, 0)
+#define   MI_OVERLAY_CONTINUE	(0x0<<21)
+#define   MI_OVERLAY_ON		(0x1<<21)
+#define   MI_OVERLAY_OFF	(0x2<<21)
+#define MI_LOAD_SCAN_LINES_INCL MI_INSTR(0x12, 0)
+#define MI_DISPLAY_FLIP		MI_INSTR(0x14, 2)
+#define MI_DISPLAY_FLIP_I915	MI_INSTR(0x14, 1)
+#define   MI_DISPLAY_FLIP_PLANE(n) ((n) << 20)
+/* IVB has funny definitions for which plane to flip. */
+#define   MI_DISPLAY_FLIP_IVB_PLANE_A  (0 << 19)
+#define   MI_DISPLAY_FLIP_IVB_PLANE_B  (1 << 19)
+#define   MI_DISPLAY_FLIP_IVB_SPRITE_A (2 << 19)
+#define   MI_DISPLAY_FLIP_IVB_SPRITE_B (3 << 19)
+#define   MI_DISPLAY_FLIP_IVB_PLANE_C  (4 << 19)
+#define   MI_DISPLAY_FLIP_IVB_SPRITE_C (5 << 19)
+/* SKL ones */
+#define   MI_DISPLAY_FLIP_SKL_PLANE_1_A	(0 << 8)
+#define   MI_DISPLAY_FLIP_SKL_PLANE_1_B	(1 << 8)
+#define   MI_DISPLAY_FLIP_SKL_PLANE_1_C	(2 << 8)
+#define   MI_DISPLAY_FLIP_SKL_PLANE_2_A	(4 << 8)
+#define   MI_DISPLAY_FLIP_SKL_PLANE_2_B	(5 << 8)
+#define   MI_DISPLAY_FLIP_SKL_PLANE_2_C	(6 << 8)
+#define   MI_DISPLAY_FLIP_SKL_PLANE_3_A	(7 << 8)
+#define   MI_DISPLAY_FLIP_SKL_PLANE_3_B	(8 << 8)
+#define   MI_DISPLAY_FLIP_SKL_PLANE_3_C	(9 << 8)
+#define MI_SEMAPHORE_MBOX	MI_INSTR(0x16, 1) /* gen6, gen7 */
+#define   MI_SEMAPHORE_GLOBAL_GTT    (1<<22)
+#define   MI_SEMAPHORE_UPDATE	    (1<<21)
+#define   MI_SEMAPHORE_COMPARE	    (1<<20)
+#define   MI_SEMAPHORE_REGISTER	    (1<<18)
+#define   MI_SEMAPHORE_SYNC_VR	    (0<<16) /* RCS  wait for VCS  (RVSYNC) */
+#define   MI_SEMAPHORE_SYNC_VER	    (1<<16) /* RCS  wait for VECS (RVESYNC) */
+#define   MI_SEMAPHORE_SYNC_BR	    (2<<16) /* RCS  wait for BCS  (RBSYNC) */
+#define   MI_SEMAPHORE_SYNC_BV	    (0<<16) /* VCS  wait for BCS  (VBSYNC) */
+#define   MI_SEMAPHORE_SYNC_VEV	    (1<<16) /* VCS  wait for VECS (VVESYNC) */
+#define   MI_SEMAPHORE_SYNC_RV	    (2<<16) /* VCS  wait for RCS  (VRSYNC) */
+#define   MI_SEMAPHORE_SYNC_RB	    (0<<16) /* BCS  wait for RCS  (BRSYNC) */
+#define   MI_SEMAPHORE_SYNC_VEB	    (1<<16) /* BCS  wait for VECS (BVESYNC) */
+#define   MI_SEMAPHORE_SYNC_VB	    (2<<16) /* BCS  wait for VCS  (BVSYNC) */
+#define   MI_SEMAPHORE_SYNC_BVE	    (0<<16) /* VECS wait for BCS  (VEBSYNC) */
+#define   MI_SEMAPHORE_SYNC_VVE	    (1<<16) /* VECS wait for VCS  (VEVSYNC) */
+#define   MI_SEMAPHORE_SYNC_RVE	    (2<<16) /* VECS wait for RCS  (VERSYNC) */
+#define   MI_SEMAPHORE_SYNC_INVALID (3<<16)
+#define   MI_SEMAPHORE_SYNC_MASK    (3<<16)
+#define MI_SET_CONTEXT		MI_INSTR(0x18, 0)
+#define   MI_MM_SPACE_GTT		(1<<8)
+#define   MI_MM_SPACE_PHYSICAL		(0<<8)
+#define   MI_SAVE_EXT_STATE_EN		(1<<3)
+#define   MI_RESTORE_EXT_STATE_EN	(1<<2)
+#define   MI_FORCE_RESTORE		(1<<1)
+#define   MI_RESTORE_INHIBIT		(1<<0)
+#define   HSW_MI_RS_SAVE_STATE_EN       (1<<3)
+#define   HSW_MI_RS_RESTORE_STATE_EN    (1<<2)
+#define MI_SEMAPHORE_SIGNAL	MI_INSTR(0x1b, 0) /* GEN8+ */
+#define   MI_SEMAPHORE_TARGET(engine)	((engine)<<15)
+#define MI_SEMAPHORE_WAIT	MI_INSTR(0x1c, 2) /* GEN8+ */
+#define   MI_SEMAPHORE_POLL		(1<<15)
+#define   MI_SEMAPHORE_SAD_GTE_SDD	(1<<12)
+#define MI_STORE_DWORD_IMM	MI_INSTR(0x20, 1)
+#define MI_STORE_DWORD_IMM_GEN4	MI_INSTR(0x20, 2)
+#define   MI_MEM_VIRTUAL	(1 << 22) /* 945,g33,965 */
+#define   MI_USE_GGTT		(1 << 22) /* g4x+ */
+#define MI_STORE_DWORD_INDEX	MI_INSTR(0x21, 1)
+#define   MI_STORE_DWORD_INDEX_SHIFT 2
+/* Official intel docs are somewhat sloppy concerning MI_LOAD_REGISTER_IMM:
+ * - Always issue a MI_NOOP _before_ the MI_LOAD_REGISTER_IMM - otherwise hw
+ *   simply ignores the register load under certain conditions.
+ * - One can actually load arbitrary many arbitrary registers: Simply issue x
+ *   address/value pairs. Don't overdue it, though, x <= 2^4 must hold!
+ */
+#define MI_LOAD_REGISTER_IMM(x)	MI_INSTR(0x22, 2*(x)-1)
+#define   MI_LRI_FORCE_POSTED		(1<<12)
+#define MI_STORE_REGISTER_MEM        MI_INSTR(0x24, 1)
+#define MI_STORE_REGISTER_MEM_GEN8   MI_INSTR(0x24, 2)
+#define   MI_SRM_LRM_GLOBAL_GTT		(1<<22)
+#define MI_FLUSH_DW		MI_INSTR(0x26, 1) /* for GEN6 */
+#define   MI_FLUSH_DW_STORE_INDEX	(1<<21)
+#define   MI_INVALIDATE_TLB		(1<<18)
+#define   MI_FLUSH_DW_OP_STOREDW	(1<<14)
+#define   MI_FLUSH_DW_OP_MASK		(3<<14)
+#define   MI_FLUSH_DW_NOTIFY		(1<<8)
+#define   MI_INVALIDATE_BSD		(1<<7)
+#define   MI_FLUSH_DW_USE_GTT		(1<<2)
+#define   MI_FLUSH_DW_USE_PPGTT		(0<<2)
+#define MI_LOAD_REGISTER_MEM	   MI_INSTR(0x29, 1)
+#define MI_LOAD_REGISTER_MEM_GEN8  MI_INSTR(0x29, 2)
+#define MI_BATCH_BUFFER		MI_INSTR(0x30, 1)
+#define   MI_BATCH_NON_SECURE		(1)
+/* for snb/ivb/vlv this also means "batch in ppgtt" when ppgtt is enabled. */
+#define   MI_BATCH_NON_SECURE_I965	(1<<8)
+#define   MI_BATCH_PPGTT_HSW		(1<<8)
+#define   MI_BATCH_NON_SECURE_HSW	(1<<13)
+#define MI_BATCH_BUFFER_START	MI_INSTR(0x31, 0)
+#define   MI_BATCH_GTT		    (2<<6) /* aliased with (1<<7) on gen4 */
+#define MI_BATCH_BUFFER_START_GEN8	MI_INSTR(0x31, 1)
+#define   MI_BATCH_RESOURCE_STREAMER (1<<10)
+
+#define MI_PREDICATE_SRC0	(0x2400)
+#define MI_PREDICATE_SRC1	(0x2408)
+
+#define MI_PREDICATE_RESULT_2	(0x2214)
+#define  LOWER_SLICE_ENABLED	(1<<0)
+#define  LOWER_SLICE_DISABLED	(0<<0)
+
+/*
+ * 3D instructions used by the kernel
+ */
+#define GFX_INSTR(opcode, flags) ((0x3 << 29) | ((opcode) << 24) | (flags))
+
+#define GFX_OP_RASTER_RULES    ((0x3<<29)|(0x7<<24))
+#define GFX_OP_SCISSOR         ((0x3<<29)|(0x1c<<24)|(0x10<<19))
+#define   SC_UPDATE_SCISSOR       (0x1<<1)
+#define   SC_ENABLE_MASK          (0x1<<0)
+#define   SC_ENABLE               (0x1<<0)
+#define GFX_OP_LOAD_INDIRECT   ((0x3<<29)|(0x1d<<24)|(0x7<<16))
+#define GFX_OP_SCISSOR_INFO    ((0x3<<29)|(0x1d<<24)|(0x81<<16)|(0x1))
+#define   SCI_YMIN_MASK      (0xffff<<16)
+#define   SCI_XMIN_MASK      (0xffff<<0)
+#define   SCI_YMAX_MASK      (0xffff<<16)
+#define   SCI_XMAX_MASK      (0xffff<<0)
+#define GFX_OP_SCISSOR_ENABLE	 ((0x3<<29)|(0x1c<<24)|(0x10<<19))
+#define GFX_OP_SCISSOR_RECT	 ((0x3<<29)|(0x1d<<24)|(0x81<<16)|1)
+#define GFX_OP_COLOR_FACTOR      ((0x3<<29)|(0x1d<<24)|(0x1<<16)|0x0)
+#define GFX_OP_STIPPLE           ((0x3<<29)|(0x1d<<24)|(0x83<<16))
+#define GFX_OP_MAP_INFO          ((0x3<<29)|(0x1d<<24)|0x4)
+#define GFX_OP_DESTBUFFER_VARS   ((0x3<<29)|(0x1d<<24)|(0x85<<16)|0x0)
+#define GFX_OP_DESTBUFFER_INFO	 ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1)
+#define GFX_OP_DRAWRECT_INFO     ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3))
+#define GFX_OP_DRAWRECT_INFO_I965  ((0x7900<<16)|0x2)
+
+#define COLOR_BLT_CMD			(2<<29 | 0x40<<22 | (5-2))
+#define SRC_COPY_BLT_CMD		((2<<29)|(0x43<<22)|4)
+#define XY_SRC_COPY_BLT_CMD		((2<<29)|(0x53<<22)|6)
+#define XY_MONO_SRC_COPY_IMM_BLT	((2<<29)|(0x71<<22)|5)
+#define   BLT_WRITE_A			(2<<20)
+#define   BLT_WRITE_RGB			(1<<20)
+#define   BLT_WRITE_RGBA		(BLT_WRITE_RGB | BLT_WRITE_A)
+#define   BLT_DEPTH_8			(0<<24)
+#define   BLT_DEPTH_16_565		(1<<24)
+#define   BLT_DEPTH_16_1555		(2<<24)
+#define   BLT_DEPTH_32			(3<<24)
+#define   BLT_ROP_SRC_COPY		(0xcc<<16)
+#define   BLT_ROP_COLOR_COPY		(0xf0<<16)
+#define XY_SRC_COPY_BLT_SRC_TILED	(1<<15) /* 965+ only */
+#define XY_SRC_COPY_BLT_DST_TILED	(1<<11) /* 965+ only */
+#define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2)
+#define   ASYNC_FLIP                (1<<22)
+#define   DISPLAY_PLANE_A           (0<<20)
+#define   DISPLAY_PLANE_B           (1<<20)
+#define GFX_OP_PIPE_CONTROL(len)	((0x3<<29)|(0x3<<27)|(0x2<<24)|((len)-2))
+#define   PIPE_CONTROL_FLUSH_L3				(1<<27)
+#define   PIPE_CONTROL_GLOBAL_GTT_IVB			(1<<24) /* gen7+ */
+#define   PIPE_CONTROL_MMIO_WRITE			(1<<23)
+#define   PIPE_CONTROL_STORE_DATA_INDEX			(1<<21)
+#define   PIPE_CONTROL_CS_STALL				(1<<20)
+#define   PIPE_CONTROL_TLB_INVALIDATE			(1<<18)
+#define   PIPE_CONTROL_MEDIA_STATE_CLEAR		(1<<16)
+#define   PIPE_CONTROL_QW_WRITE				(1<<14)
+#define   PIPE_CONTROL_POST_SYNC_OP_MASK                (3<<14)
+#define   PIPE_CONTROL_DEPTH_STALL			(1<<13)
+#define   PIPE_CONTROL_WRITE_FLUSH			(1<<12)
+#define   PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH	(1<<12) /* gen6+ */
+#define   PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE	(1<<11) /* MBZ on Ironlake */
+#define   PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE		(1<<10) /* GM45+ only */
+#define   PIPE_CONTROL_INDIRECT_STATE_DISABLE		(1<<9)
+#define   PIPE_CONTROL_NOTIFY				(1<<8)
+#define   PIPE_CONTROL_FLUSH_ENABLE			(1<<7) /* gen7+ */
+#define   PIPE_CONTROL_DC_FLUSH_ENABLE			(1<<5)
+#define   PIPE_CONTROL_VF_CACHE_INVALIDATE		(1<<4)
+#define   PIPE_CONTROL_CONST_CACHE_INVALIDATE		(1<<3)
+#define   PIPE_CONTROL_STATE_CACHE_INVALIDATE		(1<<2)
+#define   PIPE_CONTROL_STALL_AT_SCOREBOARD		(1<<1)
+#define   PIPE_CONTROL_DEPTH_CACHE_FLUSH		(1<<0)
+#define   PIPE_CONTROL_GLOBAL_GTT (1<<2) /* in addr dword */
+
+/*
+ * Commands used only by the command parser
+ */
+#define MI_SET_PREDICATE        MI_INSTR(0x01, 0)
+#define MI_ARB_CHECK            MI_INSTR(0x05, 0)
+#define MI_RS_CONTROL           MI_INSTR(0x06, 0)
+#define MI_URB_ATOMIC_ALLOC     MI_INSTR(0x09, 0)
+#define MI_PREDICATE            MI_INSTR(0x0C, 0)
+#define MI_RS_CONTEXT           MI_INSTR(0x0F, 0)
+#define MI_TOPOLOGY_FILTER      MI_INSTR(0x0D, 0)
+#define MI_LOAD_SCAN_LINES_EXCL MI_INSTR(0x13, 0)
+#define MI_URB_CLEAR            MI_INSTR(0x19, 0)
+#define MI_UPDATE_GTT           MI_INSTR(0x23, 0)
+#define MI_CLFLUSH              MI_INSTR(0x27, 0)
+#define MI_REPORT_PERF_COUNT    MI_INSTR(0x28, 0)
+#define   MI_REPORT_PERF_COUNT_GGTT (1<<0)
+#define MI_LOAD_REGISTER_REG    MI_INSTR(0x2A, 0)
+#define MI_RS_STORE_DATA_IMM    MI_INSTR(0x2B, 0)
+#define MI_LOAD_URB_MEM         MI_INSTR(0x2C, 0)
+#define MI_STORE_URB_MEM        MI_INSTR(0x2D, 0)
+#define MI_CONDITIONAL_BATCH_BUFFER_END MI_INSTR(0x36, 0)
+
+#define PIPELINE_SELECT                ((0x3<<29)|(0x1<<27)|(0x1<<24)|(0x4<<16))
+#define GFX_OP_3DSTATE_VF_STATISTICS   ((0x3<<29)|(0x1<<27)|(0x0<<24)|(0xB<<16))
+#define MEDIA_VFE_STATE                ((0x3<<29)|(0x2<<27)|(0x0<<24)|(0x0<<16))
+#define  MEDIA_VFE_STATE_MMIO_ACCESS_MASK (0x18)
+#define GPGPU_OBJECT                   ((0x3<<29)|(0x2<<27)|(0x1<<24)|(0x4<<16))
+#define GPGPU_WALKER                   ((0x3<<29)|(0x2<<27)|(0x1<<24)|(0x5<<16))
+#define GFX_OP_3DSTATE_DX9_CONSTANTF_VS \
+	((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x39<<16))
+#define GFX_OP_3DSTATE_DX9_CONSTANTF_PS \
+	((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x3A<<16))
+#define GFX_OP_3DSTATE_SO_DECL_LIST \
+	((0x3<<29)|(0x3<<27)|(0x1<<24)|(0x17<<16))
+
+#define GFX_OP_3DSTATE_BINDING_TABLE_EDIT_VS \
+	((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x43<<16))
+#define GFX_OP_3DSTATE_BINDING_TABLE_EDIT_GS \
+	((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x44<<16))
+#define GFX_OP_3DSTATE_BINDING_TABLE_EDIT_HS \
+	((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x45<<16))
+#define GFX_OP_3DSTATE_BINDING_TABLE_EDIT_DS \
+	((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x46<<16))
+#define GFX_OP_3DSTATE_BINDING_TABLE_EDIT_PS \
+	((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x47<<16))
+
+#define MFX_WAIT  ((0x3<<29)|(0x1<<27)|(0x0<<16))
+
+#define COLOR_BLT     ((0x2<<29)|(0x40<<22))
+#define SRC_COPY_BLT  ((0x2<<29)|(0x43<<22))
+
+/*
+ * Registers used only by the command parser
+ */
+#define BCS_SWCTRL 0x22200
+
+#define GPGPU_THREADS_DISPATCHED        0x2290
+#define HS_INVOCATION_COUNT             0x2300
+#define DS_INVOCATION_COUNT             0x2308
+#define IA_VERTICES_COUNT               0x2310
+#define IA_PRIMITIVES_COUNT             0x2318
+#define VS_INVOCATION_COUNT             0x2320
+#define GS_INVOCATION_COUNT             0x2328
+#define GS_PRIMITIVES_COUNT             0x2330
+#define CL_INVOCATION_COUNT             0x2338
+#define CL_PRIMITIVES_COUNT             0x2340
+#define PS_INVOCATION_COUNT             0x2348
+#define PS_DEPTH_COUNT                  0x2350
+
+/* There are the 4 64-bit counter registers, one for each stream output */
+#define GEN7_SO_NUM_PRIMS_WRITTEN(n) (0x5200 + (n) * 8)
+
+#define GEN7_SO_PRIM_STORAGE_NEEDED(n)  (0x5240 + (n) * 8)
+
+#define GEN7_3DPRIM_END_OFFSET          0x2420
+#define GEN7_3DPRIM_START_VERTEX        0x2430
+#define GEN7_3DPRIM_VERTEX_COUNT        0x2434
+#define GEN7_3DPRIM_INSTANCE_COUNT      0x2438
+#define GEN7_3DPRIM_START_INSTANCE      0x243C
+#define GEN7_3DPRIM_BASE_VERTEX         0x2440
+
+#define GEN7_GPGPU_DISPATCHDIMX         0x2500
+#define GEN7_GPGPU_DISPATCHDIMY         0x2504
+#define GEN7_GPGPU_DISPATCHDIMZ         0x2508
+
+#define OACONTROL 0x2360
+
+#define _GEN7_PIPEA_DE_LOAD_SL	0x70068
+#define _GEN7_PIPEB_DE_LOAD_SL	0x71068
+#define GEN7_PIPE_DE_LOAD_SL(pipe) _PIPE(pipe, \
+					 _GEN7_PIPEA_DE_LOAD_SL, \
+					 _GEN7_PIPEB_DE_LOAD_SL)
+
+/*
+ * Reset registers
+ */
+#define DEBUG_RESET_I830		0x6070
+#define  DEBUG_RESET_FULL		(1<<7)
+#define  DEBUG_RESET_RENDER		(1<<8)
+#define  DEBUG_RESET_DISPLAY		(1<<9)
+
+/*
+ * IOSF sideband
+ */
+#define VLV_IOSF_DOORBELL_REQ			(VLV_DISPLAY_BASE + 0x2100)
+#define   IOSF_DEVFN_SHIFT			24
+#define   IOSF_OPCODE_SHIFT			16
+#define   IOSF_PORT_SHIFT			8
+#define   IOSF_BYTE_ENABLES_SHIFT		4
+#define   IOSF_BAR_SHIFT			1
+#define   IOSF_SB_BUSY				(1<<0)
+#define   IOSF_PORT_BUNIT			0x3
+#define   IOSF_PORT_PUNIT			0x4
+#define   IOSF_PORT_NC				0x11
+#define   IOSF_PORT_DPIO			0x12
+#define   IOSF_PORT_DPIO_2			0x1a
+#define   IOSF_PORT_GPIO_NC			0x13
+#define   IOSF_PORT_CCK				0x14
+#define   IOSF_PORT_CCU				0xA9
+#define   IOSF_PORT_GPS_CORE			0x48
+#define   IOSF_PORT_FLISDSI			0x1B
+#define VLV_IOSF_DATA				(VLV_DISPLAY_BASE + 0x2104)
+#define VLV_IOSF_ADDR				(VLV_DISPLAY_BASE + 0x2108)
+
+/* See configdb bunit SB addr map */
+#define BUNIT_REG_BISOC				0x11
+
+#define PUNIT_REG_DSPFREQ			0x36
+#define   DSPFREQSTAT_SHIFT_CHV			24
+#define   DSPFREQSTAT_MASK_CHV			(0x1f << DSPFREQSTAT_SHIFT_CHV)
+#define   DSPFREQGUAR_SHIFT_CHV			8
+#define   DSPFREQGUAR_MASK_CHV			(0x1f << DSPFREQGUAR_SHIFT_CHV)
+#define   DSPFREQSTAT_SHIFT			30
+#define   DSPFREQSTAT_MASK			(0x3 << DSPFREQSTAT_SHIFT)
+#define   DSPFREQGUAR_SHIFT			14
+#define   DSPFREQGUAR_MASK			(0x3 << DSPFREQGUAR_SHIFT)
+#define   DSP_MAXFIFO_PM5_STATUS		(1 << 22) /* chv */
+#define   DSP_AUTO_CDCLK_GATE_DISABLE		(1 << 7) /* chv */
+#define   DSP_MAXFIFO_PM5_ENABLE		(1 << 6) /* chv */
+#define   _DP_SSC(val, pipe)			((val) << (2 * (pipe)))
+#define   DP_SSC_MASK(pipe)			_DP_SSC(0x3, (pipe))
+#define   DP_SSC_PWR_ON(pipe)			_DP_SSC(0x0, (pipe))
+#define   DP_SSC_CLK_GATE(pipe)			_DP_SSC(0x1, (pipe))
+#define   DP_SSC_RESET(pipe)			_DP_SSC(0x2, (pipe))
+#define   DP_SSC_PWR_GATE(pipe)			_DP_SSC(0x3, (pipe))
+#define   _DP_SSS(val, pipe)			((val) << (2 * (pipe) + 16))
+#define   DP_SSS_MASK(pipe)			_DP_SSS(0x3, (pipe))
+#define   DP_SSS_PWR_ON(pipe)			_DP_SSS(0x0, (pipe))
+#define   DP_SSS_CLK_GATE(pipe)			_DP_SSS(0x1, (pipe))
+#define   DP_SSS_RESET(pipe)			_DP_SSS(0x2, (pipe))
+#define   DP_SSS_PWR_GATE(pipe)			_DP_SSS(0x3, (pipe))
+
+/* See the PUNIT HAS v0.8 for the below bits */
+enum punit_power_well {
+	PUNIT_POWER_WELL_RENDER			= 0,
+	PUNIT_POWER_WELL_MEDIA			= 1,
+	PUNIT_POWER_WELL_DISP2D			= 3,
+	PUNIT_POWER_WELL_DPIO_CMN_BC		= 5,
+	PUNIT_POWER_WELL_DPIO_TX_B_LANES_01	= 6,
+	PUNIT_POWER_WELL_DPIO_TX_B_LANES_23	= 7,
+	PUNIT_POWER_WELL_DPIO_TX_C_LANES_01	= 8,
+	PUNIT_POWER_WELL_DPIO_TX_C_LANES_23	= 9,
+	PUNIT_POWER_WELL_DPIO_RX0		= 10,
+	PUNIT_POWER_WELL_DPIO_RX1		= 11,
+	PUNIT_POWER_WELL_DPIO_CMN_D		= 12,
+
+	PUNIT_POWER_WELL_NUM,
+};
+
+enum skl_disp_power_wells {
+	SKL_DISP_PW_MISC_IO,
+	SKL_DISP_PW_DDI_A_E,
+	SKL_DISP_PW_DDI_B,
+	SKL_DISP_PW_DDI_C,
+	SKL_DISP_PW_DDI_D,
+	SKL_DISP_PW_1 = 14,
+	SKL_DISP_PW_2,
+};
+
+#define SKL_POWER_WELL_STATE(pw) (1 << ((pw) * 2))
+#define SKL_POWER_WELL_REQ(pw) (1 << (((pw) * 2) + 1))
+
+#define PUNIT_REG_PWRGT_CTRL			0x60
+#define PUNIT_REG_PWRGT_STATUS			0x61
+#define   PUNIT_PWRGT_MASK(power_well)		(3 << ((power_well) * 2))
+#define   PUNIT_PWRGT_PWR_ON(power_well)	(0 << ((power_well) * 2))
+#define   PUNIT_PWRGT_CLK_GATE(power_well)	(1 << ((power_well) * 2))
+#define   PUNIT_PWRGT_RESET(power_well)		(2 << ((power_well) * 2))
+#define   PUNIT_PWRGT_PWR_GATE(power_well)	(3 << ((power_well) * 2))
+
+#define PUNIT_REG_GPU_LFM			0xd3
+#define PUNIT_REG_GPU_FREQ_REQ			0xd4
+#define PUNIT_REG_GPU_FREQ_STS			0xd8
+#define   GPLLENABLE				(1<<4)
+#define   GENFREQSTATUS				(1<<0)
+#define PUNIT_REG_MEDIA_TURBO_FREQ_REQ		0xdc
+#define PUNIT_REG_CZ_TIMESTAMP			0xce
+
+#define PUNIT_FUSE_BUS2				0xf6 /* bits 47:40 */
+#define PUNIT_FUSE_BUS1				0xf5 /* bits 55:48 */
+
+#define FB_GFX_FMAX_AT_VMAX_FUSE		0x136
+#define FB_GFX_FREQ_FUSE_MASK			0xff
+#define FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT	24
+#define FB_GFX_FMAX_AT_VMAX_2SS6EU_FUSE_SHIFT	16
+#define FB_GFX_FMAX_AT_VMAX_2SS8EU_FUSE_SHIFT	8
+
+#define FB_GFX_FMIN_AT_VMIN_FUSE		0x137
+#define FB_GFX_FMIN_AT_VMIN_FUSE_SHIFT		8
+
+#define PUNIT_REG_DDR_SETUP2			0x139
+#define   FORCE_DDR_FREQ_REQ_ACK		(1 << 8)
+#define   FORCE_DDR_LOW_FREQ			(1 << 1)
+#define   FORCE_DDR_HIGH_FREQ			(1 << 0)
+
+#define PUNIT_GPU_STATUS_REG			0xdb
+#define PUNIT_GPU_STATUS_MAX_FREQ_SHIFT	16
+#define PUNIT_GPU_STATUS_MAX_FREQ_MASK		0xff
+#define PUNIT_GPU_STATIS_GFX_MIN_FREQ_SHIFT	8
+#define PUNIT_GPU_STATUS_GFX_MIN_FREQ_MASK	0xff
+
+#define PUNIT_GPU_DUTYCYCLE_REG		0xdf
+#define PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT	8
+#define PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK	0xff
+
+#define IOSF_NC_FB_GFX_FREQ_FUSE		0x1c
+#define   FB_GFX_MAX_FREQ_FUSE_SHIFT		3
+#define   FB_GFX_MAX_FREQ_FUSE_MASK		0x000007f8
+#define   FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT	11
+#define   FB_GFX_FGUARANTEED_FREQ_FUSE_MASK	0x0007f800
+#define IOSF_NC_FB_GFX_FMAX_FUSE_HI		0x34
+#define   FB_FMAX_VMIN_FREQ_HI_MASK		0x00000007
+#define IOSF_NC_FB_GFX_FMAX_FUSE_LO		0x30
+#define   FB_FMAX_VMIN_FREQ_LO_SHIFT		27
+#define   FB_FMAX_VMIN_FREQ_LO_MASK		0xf8000000
+
+#define VLV_TURBO_SOC_OVERRIDE	0x04
+#define 	VLV_OVERRIDE_EN	1
+#define 	VLV_SOC_TDP_EN	(1 << 1)
+#define 	VLV_BIAS_CPU_125_SOC_875 (6 << 2)
+#define 	CHV_BIAS_CPU_50_SOC_50 (3 << 2)
+
+#define VLV_CZ_CLOCK_TO_MILLI_SEC		100000
+
+/* vlv2 north clock has */
+#define CCK_FUSE_REG				0x8
+#define  CCK_FUSE_HPLL_FREQ_MASK		0x3
+#define CCK_REG_DSI_PLL_FUSE			0x44
+#define CCK_REG_DSI_PLL_CONTROL			0x48
+#define  DSI_PLL_VCO_EN				(1 << 31)
+#define  DSI_PLL_LDO_GATE			(1 << 30)
+#define  DSI_PLL_P1_POST_DIV_SHIFT		17
+#define  DSI_PLL_P1_POST_DIV_MASK		(0x1ff << 17)
+#define  DSI_PLL_P2_MUX_DSI0_DIV2		(1 << 13)
+#define  DSI_PLL_P3_MUX_DSI1_DIV2		(1 << 12)
+#define  DSI_PLL_MUX_MASK			(3 << 9)
+#define  DSI_PLL_MUX_DSI0_DSIPLL		(0 << 10)
+#define  DSI_PLL_MUX_DSI0_CCK			(1 << 10)
+#define  DSI_PLL_MUX_DSI1_DSIPLL		(0 << 9)
+#define  DSI_PLL_MUX_DSI1_CCK			(1 << 9)
+#define  DSI_PLL_CLK_GATE_MASK			(0xf << 5)
+#define  DSI_PLL_CLK_GATE_DSI0_DSIPLL		(1 << 8)
+#define  DSI_PLL_CLK_GATE_DSI1_DSIPLL		(1 << 7)
+#define  DSI_PLL_CLK_GATE_DSI0_CCK		(1 << 6)
+#define  DSI_PLL_CLK_GATE_DSI1_CCK		(1 << 5)
+#define  DSI_PLL_LOCK				(1 << 0)
+#define CCK_REG_DSI_PLL_DIVIDER			0x4c
+#define  DSI_PLL_LFSR				(1 << 31)
+#define  DSI_PLL_FRACTION_EN			(1 << 30)
+#define  DSI_PLL_FRAC_COUNTER_SHIFT		27
+#define  DSI_PLL_FRAC_COUNTER_MASK		(7 << 27)
+#define  DSI_PLL_USYNC_CNT_SHIFT		18
+#define  DSI_PLL_USYNC_CNT_MASK			(0x1ff << 18)
+#define  DSI_PLL_N1_DIV_SHIFT			16
+#define  DSI_PLL_N1_DIV_MASK			(3 << 16)
+#define  DSI_PLL_M1_DIV_SHIFT			0
+#define  DSI_PLL_M1_DIV_MASK			(0x1ff << 0)
+#define CCK_CZ_CLOCK_CONTROL			0x62
+#define CCK_DISPLAY_CLOCK_CONTROL		0x6b
+#define  CCK_TRUNK_FORCE_ON			(1 << 17)
+#define  CCK_TRUNK_FORCE_OFF			(1 << 16)
+#define  CCK_FREQUENCY_STATUS			(0x1f << 8)
+#define  CCK_FREQUENCY_STATUS_SHIFT		8
+#define  CCK_FREQUENCY_VALUES			(0x1f << 0)
+
+/**
+ * DOC: DPIO
+ *
+ * VLV, CHV and BXT have slightly peculiar display PHYs for driving DP/HDMI
+ * ports. DPIO is the name given to such a display PHY. These PHYs
+ * don't follow the standard programming model using direct MMIO
+ * registers, and instead their registers must be accessed trough IOSF
+ * sideband. VLV has one such PHY for driving ports B and C, and CHV
+ * adds another PHY for driving port D. Each PHY responds to specific
+ * IOSF-SB port.
+ *
+ * Each display PHY is made up of one or two channels. Each channel
+ * houses a common lane part which contains the PLL and other common
+ * logic. CH0 common lane also contains the IOSF-SB logic for the
+ * Common Register Interface (CRI) ie. the DPIO registers. CRI clock
+ * must be running when any DPIO registers are accessed.
+ *
+ * In addition to having their own registers, the PHYs are also
+ * controlled through some dedicated signals from the display
+ * controller. These include PLL reference clock enable, PLL enable,
+ * and CRI clock selection, for example.
+ *
+ * Eeach channel also has two splines (also called data lanes), and
+ * each spline is made up of one Physical Access Coding Sub-Layer
+ * (PCS) block and two TX lanes. So each channel has two PCS blocks
+ * and four TX lanes. The TX lanes are used as DP lanes or TMDS
+ * data/clock pairs depending on the output type.
+ *
+ * Additionally the PHY also contains an AUX lane with AUX blocks
+ * for each channel. This is used for DP AUX communication, but
+ * this fact isn't really relevant for the driver since AUX is
+ * controlled from the display controller side. No DPIO registers
+ * need to be accessed during AUX communication,
+ *
+ * Generally on VLV/CHV the common lane corresponds to the pipe and
+ * the spline (PCS/TX) corresponds to the port.
+ *
+ * For dual channel PHY (VLV/CHV):
+ *
+ *  pipe A == CMN/PLL/REF CH0
+ *
+ *  pipe B == CMN/PLL/REF CH1
+ *
+ *  port B == PCS/TX CH0
+ *
+ *  port C == PCS/TX CH1
+ *
+ * This is especially important when we cross the streams
+ * ie. drive port B with pipe B, or port C with pipe A.
+ *
+ * For single channel PHY (CHV):
+ *
+ *  pipe C == CMN/PLL/REF CH0
+ *
+ *  port D == PCS/TX CH0
+ *
+ * On BXT the entire PHY channel corresponds to the port. That means
+ * the PLL is also now associated with the port rather than the pipe,
+ * and so the clock needs to be routed to the appropriate transcoder.
+ * Port A PLL is directly connected to transcoder EDP and port B/C
+ * PLLs can be routed to any transcoder A/B/C.
+ *
+ * Note: DDI0 is digital port B, DD1 is digital port C, and DDI2 is
+ * digital port D (CHV) or port A (BXT).
+ */
+/*
+ * Dual channel PHY (VLV/CHV/BXT)
+ * ---------------------------------
+ * |      CH0      |      CH1      |
+ * |  CMN/PLL/REF  |  CMN/PLL/REF  |
+ * |---------------|---------------| Display PHY
+ * | PCS01 | PCS23 | PCS01 | PCS23 |
+ * |-------|-------|-------|-------|
+ * |TX0|TX1|TX2|TX3|TX0|TX1|TX2|TX3|
+ * ---------------------------------
+ * |     DDI0      |     DDI1      | DP/HDMI ports
+ * ---------------------------------
+ *
+ * Single channel PHY (CHV/BXT)
+ * -----------------
+ * |      CH0      |
+ * |  CMN/PLL/REF  |
+ * |---------------| Display PHY
+ * | PCS01 | PCS23 |
+ * |-------|-------|
+ * |TX0|TX1|TX2|TX3|
+ * -----------------
+ * |     DDI2      | DP/HDMI port
+ * -----------------
+ */
+#define DPIO_DEVFN			0
+
+#define DPIO_CTL			(VLV_DISPLAY_BASE + 0x2110)
+#define  DPIO_MODSEL1			(1<<3) /* if ref clk b == 27 */
+#define  DPIO_MODSEL0			(1<<2) /* if ref clk a == 27 */
+#define  DPIO_SFR_BYPASS		(1<<1)
+#define  DPIO_CMNRST			(1<<0)
+
+#define DPIO_PHY(pipe)			((pipe) >> 1)
+#define DPIO_PHY_IOSF_PORT(phy)		(dev_priv->dpio_phy_iosf_port[phy])
+
+/*
+ * Per pipe/PLL DPIO regs
+ */
+#define _VLV_PLL_DW3_CH0		0x800c
+#define   DPIO_POST_DIV_SHIFT		(28) /* 3 bits */
+#define   DPIO_POST_DIV_DAC		0
+#define   DPIO_POST_DIV_HDMIDP		1 /* DAC 225-400M rate */
+#define   DPIO_POST_DIV_LVDS1		2
+#define   DPIO_POST_DIV_LVDS2		3
+#define   DPIO_K_SHIFT			(24) /* 4 bits */
+#define   DPIO_P1_SHIFT			(21) /* 3 bits */
+#define   DPIO_P2_SHIFT			(16) /* 5 bits */
+#define   DPIO_N_SHIFT			(12) /* 4 bits */
+#define   DPIO_ENABLE_CALIBRATION	(1<<11)
+#define   DPIO_M1DIV_SHIFT		(8) /* 3 bits */
+#define   DPIO_M2DIV_MASK		0xff
+#define _VLV_PLL_DW3_CH1		0x802c
+#define VLV_PLL_DW3(ch) _PIPE(ch, _VLV_PLL_DW3_CH0, _VLV_PLL_DW3_CH1)
+
+#define _VLV_PLL_DW5_CH0		0x8014
+#define   DPIO_REFSEL_OVERRIDE		27
+#define   DPIO_PLL_MODESEL_SHIFT	24 /* 3 bits */
+#define   DPIO_BIAS_CURRENT_CTL_SHIFT	21 /* 3 bits, always 0x7 */
+#define   DPIO_PLL_REFCLK_SEL_SHIFT	16 /* 2 bits */
+#define   DPIO_PLL_REFCLK_SEL_MASK	3
+#define   DPIO_DRIVER_CTL_SHIFT		12 /* always set to 0x8 */
+#define   DPIO_CLK_BIAS_CTL_SHIFT	8 /* always set to 0x5 */
+#define _VLV_PLL_DW5_CH1		0x8034
+#define VLV_PLL_DW5(ch) _PIPE(ch, _VLV_PLL_DW5_CH0, _VLV_PLL_DW5_CH1)
+
+#define _VLV_PLL_DW7_CH0		0x801c
+#define _VLV_PLL_DW7_CH1		0x803c
+#define VLV_PLL_DW7(ch) _PIPE(ch, _VLV_PLL_DW7_CH0, _VLV_PLL_DW7_CH1)
+
+#define _VLV_PLL_DW8_CH0		0x8040
+#define _VLV_PLL_DW8_CH1		0x8060
+#define VLV_PLL_DW8(ch) _PIPE(ch, _VLV_PLL_DW8_CH0, _VLV_PLL_DW8_CH1)
+
+#define VLV_PLL_DW9_BCAST		0xc044
+#define _VLV_PLL_DW9_CH0		0x8044
+#define _VLV_PLL_DW9_CH1		0x8064
+#define VLV_PLL_DW9(ch) _PIPE(ch, _VLV_PLL_DW9_CH0, _VLV_PLL_DW9_CH1)
+
+#define _VLV_PLL_DW10_CH0		0x8048
+#define _VLV_PLL_DW10_CH1		0x8068
+#define VLV_PLL_DW10(ch) _PIPE(ch, _VLV_PLL_DW10_CH0, _VLV_PLL_DW10_CH1)
+
+#define _VLV_PLL_DW11_CH0		0x804c
+#define _VLV_PLL_DW11_CH1		0x806c
+#define VLV_PLL_DW11(ch) _PIPE(ch, _VLV_PLL_DW11_CH0, _VLV_PLL_DW11_CH1)
+
+/* Spec for ref block start counts at DW10 */
+#define VLV_REF_DW13			0x80ac
+
+#define VLV_CMN_DW0			0x8100
+
+/*
+ * Per DDI channel DPIO regs
+ */
+
+#define _VLV_PCS_DW0_CH0		0x8200
+#define _VLV_PCS_DW0_CH1		0x8400
+#define   DPIO_PCS_TX_LANE2_RESET	(1<<16)
+#define   DPIO_PCS_TX_LANE1_RESET	(1<<7)
+#define   DPIO_LEFT_TXFIFO_RST_MASTER2	(1<<4)
+#define   DPIO_RIGHT_TXFIFO_RST_MASTER2	(1<<3)
+#define VLV_PCS_DW0(ch) _PORT(ch, _VLV_PCS_DW0_CH0, _VLV_PCS_DW0_CH1)
+
+#define _VLV_PCS01_DW0_CH0		0x200
+#define _VLV_PCS23_DW0_CH0		0x400
+#define _VLV_PCS01_DW0_CH1		0x2600
+#define _VLV_PCS23_DW0_CH1		0x2800
+#define VLV_PCS01_DW0(ch) _PORT(ch, _VLV_PCS01_DW0_CH0, _VLV_PCS01_DW0_CH1)
+#define VLV_PCS23_DW0(ch) _PORT(ch, _VLV_PCS23_DW0_CH0, _VLV_PCS23_DW0_CH1)
+
+#define _VLV_PCS_DW1_CH0		0x8204
+#define _VLV_PCS_DW1_CH1		0x8404
+#define   CHV_PCS_REQ_SOFTRESET_EN	(1<<23)
+#define   DPIO_PCS_CLK_CRI_RXEB_EIOS_EN	(1<<22)
+#define   DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN (1<<21)
+#define   DPIO_PCS_CLK_DATAWIDTH_SHIFT	(6)
+#define   DPIO_PCS_CLK_SOFT_RESET	(1<<5)
+#define VLV_PCS_DW1(ch) _PORT(ch, _VLV_PCS_DW1_CH0, _VLV_PCS_DW1_CH1)
+
+#define _VLV_PCS01_DW1_CH0		0x204
+#define _VLV_PCS23_DW1_CH0		0x404
+#define _VLV_PCS01_DW1_CH1		0x2604
+#define _VLV_PCS23_DW1_CH1		0x2804
+#define VLV_PCS01_DW1(ch) _PORT(ch, _VLV_PCS01_DW1_CH0, _VLV_PCS01_DW1_CH1)
+#define VLV_PCS23_DW1(ch) _PORT(ch, _VLV_PCS23_DW1_CH0, _VLV_PCS23_DW1_CH1)
+
+#define _VLV_PCS_DW8_CH0		0x8220
+#define _VLV_PCS_DW8_CH1		0x8420
+#define   CHV_PCS_USEDCLKCHANNEL_OVRRIDE	(1 << 20)
+#define   CHV_PCS_USEDCLKCHANNEL		(1 << 21)
+#define VLV_PCS_DW8(ch) _PORT(ch, _VLV_PCS_DW8_CH0, _VLV_PCS_DW8_CH1)
+
+#define _VLV_PCS01_DW8_CH0		0x0220
+#define _VLV_PCS23_DW8_CH0		0x0420
+#define _VLV_PCS01_DW8_CH1		0x2620
+#define _VLV_PCS23_DW8_CH1		0x2820
+#define VLV_PCS01_DW8(port) _PORT(port, _VLV_PCS01_DW8_CH0, _VLV_PCS01_DW8_CH1)
+#define VLV_PCS23_DW8(port) _PORT(port, _VLV_PCS23_DW8_CH0, _VLV_PCS23_DW8_CH1)
+
+#define _VLV_PCS_DW9_CH0		0x8224
+#define _VLV_PCS_DW9_CH1		0x8424
+#define   DPIO_PCS_TX2MARGIN_MASK	(0x7<<13)
+#define   DPIO_PCS_TX2MARGIN_000	(0<<13)
+#define   DPIO_PCS_TX2MARGIN_101	(1<<13)
+#define   DPIO_PCS_TX1MARGIN_MASK	(0x7<<10)
+#define   DPIO_PCS_TX1MARGIN_000	(0<<10)
+#define   DPIO_PCS_TX1MARGIN_101	(1<<10)
+#define	VLV_PCS_DW9(ch) _PORT(ch, _VLV_PCS_DW9_CH0, _VLV_PCS_DW9_CH1)
+
+#define _VLV_PCS01_DW9_CH0		0x224
+#define _VLV_PCS23_DW9_CH0		0x424
+#define _VLV_PCS01_DW9_CH1		0x2624
+#define _VLV_PCS23_DW9_CH1		0x2824
+#define VLV_PCS01_DW9(ch) _PORT(ch, _VLV_PCS01_DW9_CH0, _VLV_PCS01_DW9_CH1)
+#define VLV_PCS23_DW9(ch) _PORT(ch, _VLV_PCS23_DW9_CH0, _VLV_PCS23_DW9_CH1)
+
+#define _CHV_PCS_DW10_CH0		0x8228
+#define _CHV_PCS_DW10_CH1		0x8428
+#define   DPIO_PCS_SWING_CALC_TX0_TX2	(1<<30)
+#define   DPIO_PCS_SWING_CALC_TX1_TX3	(1<<31)
+#define   DPIO_PCS_TX2DEEMP_MASK	(0xf<<24)
+#define   DPIO_PCS_TX2DEEMP_9P5		(0<<24)
+#define   DPIO_PCS_TX2DEEMP_6P0		(2<<24)
+#define   DPIO_PCS_TX1DEEMP_MASK	(0xf<<16)
+#define   DPIO_PCS_TX1DEEMP_9P5		(0<<16)
+#define   DPIO_PCS_TX1DEEMP_6P0		(2<<16)
+#define CHV_PCS_DW10(ch) _PORT(ch, _CHV_PCS_DW10_CH0, _CHV_PCS_DW10_CH1)
+
+#define _VLV_PCS01_DW10_CH0		0x0228
+#define _VLV_PCS23_DW10_CH0		0x0428
+#define _VLV_PCS01_DW10_CH1		0x2628
+#define _VLV_PCS23_DW10_CH1		0x2828
+#define VLV_PCS01_DW10(port) _PORT(port, _VLV_PCS01_DW10_CH0, _VLV_PCS01_DW10_CH1)
+#define VLV_PCS23_DW10(port) _PORT(port, _VLV_PCS23_DW10_CH0, _VLV_PCS23_DW10_CH1)
+
+#define _VLV_PCS_DW11_CH0		0x822c
+#define _VLV_PCS_DW11_CH1		0x842c
+#define   DPIO_TX2_STAGGER_MASK(x)	((x)<<24)
+#define   DPIO_LANEDESKEW_STRAP_OVRD	(1<<3)
+#define   DPIO_LEFT_TXFIFO_RST_MASTER	(1<<1)
+#define   DPIO_RIGHT_TXFIFO_RST_MASTER	(1<<0)
+#define VLV_PCS_DW11(ch) _PORT(ch, _VLV_PCS_DW11_CH0, _VLV_PCS_DW11_CH1)
+
+#define _VLV_PCS01_DW11_CH0		0x022c
+#define _VLV_PCS23_DW11_CH0		0x042c
+#define _VLV_PCS01_DW11_CH1		0x262c
+#define _VLV_PCS23_DW11_CH1		0x282c
+#define VLV_PCS01_DW11(ch) _PORT(ch, _VLV_PCS01_DW11_CH0, _VLV_PCS01_DW11_CH1)
+#define VLV_PCS23_DW11(ch) _PORT(ch, _VLV_PCS23_DW11_CH0, _VLV_PCS23_DW11_CH1)
+
+#define _VLV_PCS01_DW12_CH0		0x0230
+#define _VLV_PCS23_DW12_CH0		0x0430
+#define _VLV_PCS01_DW12_CH1		0x2630
+#define _VLV_PCS23_DW12_CH1		0x2830
+#define VLV_PCS01_DW12(ch) _PORT(ch, _VLV_PCS01_DW12_CH0, _VLV_PCS01_DW12_CH1)
+#define VLV_PCS23_DW12(ch) _PORT(ch, _VLV_PCS23_DW12_CH0, _VLV_PCS23_DW12_CH1)
+
+#define _VLV_PCS_DW12_CH0		0x8230
+#define _VLV_PCS_DW12_CH1		0x8430
+#define   DPIO_TX2_STAGGER_MULT(x)	((x)<<20)
+#define   DPIO_TX1_STAGGER_MULT(x)	((x)<<16)
+#define   DPIO_TX1_STAGGER_MASK(x)	((x)<<8)
+#define   DPIO_LANESTAGGER_STRAP_OVRD	(1<<6)
+#define   DPIO_LANESTAGGER_STRAP(x)	((x)<<0)
+#define VLV_PCS_DW12(ch) _PORT(ch, _VLV_PCS_DW12_CH0, _VLV_PCS_DW12_CH1)
+
+#define _VLV_PCS_DW14_CH0		0x8238
+#define _VLV_PCS_DW14_CH1		0x8438
+#define	VLV_PCS_DW14(ch) _PORT(ch, _VLV_PCS_DW14_CH0, _VLV_PCS_DW14_CH1)
+
+#define _VLV_PCS_DW23_CH0		0x825c
+#define _VLV_PCS_DW23_CH1		0x845c
+#define VLV_PCS_DW23(ch) _PORT(ch, _VLV_PCS_DW23_CH0, _VLV_PCS_DW23_CH1)
+
+#define _VLV_TX_DW2_CH0			0x8288
+#define _VLV_TX_DW2_CH1			0x8488
+#define   DPIO_SWING_MARGIN000_SHIFT	16
+#define   DPIO_SWING_MARGIN000_MASK	(0xff << DPIO_SWING_MARGIN000_SHIFT)
+#define   DPIO_UNIQ_TRANS_SCALE_SHIFT	8
+#define VLV_TX_DW2(ch) _PORT(ch, _VLV_TX_DW2_CH0, _VLV_TX_DW2_CH1)
+
+#define _VLV_TX_DW3_CH0			0x828c
+#define _VLV_TX_DW3_CH1			0x848c
+/* The following bit for CHV phy */
+#define   DPIO_TX_UNIQ_TRANS_SCALE_EN	(1<<27)
+#define   DPIO_SWING_MARGIN101_SHIFT	16
+#define   DPIO_SWING_MARGIN101_MASK	(0xff << DPIO_SWING_MARGIN101_SHIFT)
+#define VLV_TX_DW3(ch) _PORT(ch, _VLV_TX_DW3_CH0, _VLV_TX_DW3_CH1)
+
+#define _VLV_TX_DW4_CH0			0x8290
+#define _VLV_TX_DW4_CH1			0x8490
+#define   DPIO_SWING_DEEMPH9P5_SHIFT	24
+#define   DPIO_SWING_DEEMPH9P5_MASK	(0xff << DPIO_SWING_DEEMPH9P5_SHIFT)
+#define   DPIO_SWING_DEEMPH6P0_SHIFT	16
+#define   DPIO_SWING_DEEMPH6P0_MASK	(0xff << DPIO_SWING_DEEMPH6P0_SHIFT)
+#define VLV_TX_DW4(ch) _PORT(ch, _VLV_TX_DW4_CH0, _VLV_TX_DW4_CH1)
+
+#define _VLV_TX3_DW4_CH0		0x690
+#define _VLV_TX3_DW4_CH1		0x2a90
+#define VLV_TX3_DW4(ch) _PORT(ch, _VLV_TX3_DW4_CH0, _VLV_TX3_DW4_CH1)
+
+#define _VLV_TX_DW5_CH0			0x8294
+#define _VLV_TX_DW5_CH1			0x8494
+#define   DPIO_TX_OCALINIT_EN		(1<<31)
+#define VLV_TX_DW5(ch) _PORT(ch, _VLV_TX_DW5_CH0, _VLV_TX_DW5_CH1)
+
+#define _VLV_TX_DW11_CH0		0x82ac
+#define _VLV_TX_DW11_CH1		0x84ac
+#define VLV_TX_DW11(ch) _PORT(ch, _VLV_TX_DW11_CH0, _VLV_TX_DW11_CH1)
+
+#define _VLV_TX_DW14_CH0		0x82b8
+#define _VLV_TX_DW14_CH1		0x84b8
+#define VLV_TX_DW14(ch) _PORT(ch, _VLV_TX_DW14_CH0, _VLV_TX_DW14_CH1)
+
+/* CHV dpPhy registers */
+#define _CHV_PLL_DW0_CH0		0x8000
+#define _CHV_PLL_DW0_CH1		0x8180
+#define CHV_PLL_DW0(ch) _PIPE(ch, _CHV_PLL_DW0_CH0, _CHV_PLL_DW0_CH1)
+
+#define _CHV_PLL_DW1_CH0		0x8004
+#define _CHV_PLL_DW1_CH1		0x8184
+#define   DPIO_CHV_N_DIV_SHIFT		8
+#define   DPIO_CHV_M1_DIV_BY_2		(0 << 0)
+#define CHV_PLL_DW1(ch) _PIPE(ch, _CHV_PLL_DW1_CH0, _CHV_PLL_DW1_CH1)
+
+#define _CHV_PLL_DW2_CH0		0x8008
+#define _CHV_PLL_DW2_CH1		0x8188
+#define CHV_PLL_DW2(ch) _PIPE(ch, _CHV_PLL_DW2_CH0, _CHV_PLL_DW2_CH1)
+
+#define _CHV_PLL_DW3_CH0		0x800c
+#define _CHV_PLL_DW3_CH1		0x818c
+#define  DPIO_CHV_FRAC_DIV_EN		(1 << 16)
+#define  DPIO_CHV_FIRST_MOD		(0 << 8)
+#define  DPIO_CHV_SECOND_MOD		(1 << 8)
+#define  DPIO_CHV_FEEDFWD_GAIN_SHIFT	0
+#define  DPIO_CHV_FEEDFWD_GAIN_MASK		(0xF << 0)
+#define CHV_PLL_DW3(ch) _PIPE(ch, _CHV_PLL_DW3_CH0, _CHV_PLL_DW3_CH1)
+
+#define _CHV_PLL_DW6_CH0		0x8018
+#define _CHV_PLL_DW6_CH1		0x8198
+#define   DPIO_CHV_GAIN_CTRL_SHIFT	16
+#define	  DPIO_CHV_INT_COEFF_SHIFT	8
+#define   DPIO_CHV_PROP_COEFF_SHIFT	0
+#define CHV_PLL_DW6(ch) _PIPE(ch, _CHV_PLL_DW6_CH0, _CHV_PLL_DW6_CH1)
+
+#define _CHV_PLL_DW8_CH0		0x8020
+#define _CHV_PLL_DW8_CH1		0x81A0
+#define   DPIO_CHV_TDC_TARGET_CNT_SHIFT 0
+#define   DPIO_CHV_TDC_TARGET_CNT_MASK  (0x3FF << 0)
+#define CHV_PLL_DW8(ch) _PIPE(ch, _CHV_PLL_DW8_CH0, _CHV_PLL_DW8_CH1)
+
+#define _CHV_PLL_DW9_CH0		0x8024
+#define _CHV_PLL_DW9_CH1		0x81A4
+#define  DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT		1 /* 3 bits */
+#define  DPIO_CHV_INT_LOCK_THRESHOLD_MASK		(7 << 1)
+#define  DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE	1 /* 1: coarse & 0 : fine  */
+#define CHV_PLL_DW9(ch) _PIPE(ch, _CHV_PLL_DW9_CH0, _CHV_PLL_DW9_CH1)
+
+#define _CHV_CMN_DW0_CH0               0x8100
+#define   DPIO_ALLDL_POWERDOWN_SHIFT_CH0	19
+#define   DPIO_ANYDL_POWERDOWN_SHIFT_CH0	18
+#define   DPIO_ALLDL_POWERDOWN			(1 << 1)
+#define   DPIO_ANYDL_POWERDOWN			(1 << 0)
+
+#define _CHV_CMN_DW5_CH0               0x8114
+#define   CHV_BUFRIGHTENA1_DISABLE	(0 << 20)
+#define   CHV_BUFRIGHTENA1_NORMAL	(1 << 20)
+#define   CHV_BUFRIGHTENA1_FORCE	(3 << 20)
+#define   CHV_BUFRIGHTENA1_MASK		(3 << 20)
+#define   CHV_BUFLEFTENA1_DISABLE	(0 << 22)
+#define   CHV_BUFLEFTENA1_NORMAL	(1 << 22)
+#define   CHV_BUFLEFTENA1_FORCE		(3 << 22)
+#define   CHV_BUFLEFTENA1_MASK		(3 << 22)
+
+#define _CHV_CMN_DW13_CH0		0x8134
+#define _CHV_CMN_DW0_CH1		0x8080
+#define   DPIO_CHV_S1_DIV_SHIFT		21
+#define   DPIO_CHV_P1_DIV_SHIFT		13 /* 3 bits */
+#define   DPIO_CHV_P2_DIV_SHIFT		8  /* 5 bits */
+#define   DPIO_CHV_K_DIV_SHIFT		4
+#define   DPIO_PLL_FREQLOCK		(1 << 1)
+#define   DPIO_PLL_LOCK			(1 << 0)
+#define CHV_CMN_DW13(ch) _PIPE(ch, _CHV_CMN_DW13_CH0, _CHV_CMN_DW0_CH1)
+
+#define _CHV_CMN_DW14_CH0		0x8138
+#define _CHV_CMN_DW1_CH1		0x8084
+#define   DPIO_AFC_RECAL		(1 << 14)
+#define   DPIO_DCLKP_EN			(1 << 13)
+#define   CHV_BUFLEFTENA2_DISABLE	(0 << 17) /* CL2 DW1 only */
+#define   CHV_BUFLEFTENA2_NORMAL	(1 << 17) /* CL2 DW1 only */
+#define   CHV_BUFLEFTENA2_FORCE		(3 << 17) /* CL2 DW1 only */
+#define   CHV_BUFLEFTENA2_MASK		(3 << 17) /* CL2 DW1 only */
+#define   CHV_BUFRIGHTENA2_DISABLE	(0 << 19) /* CL2 DW1 only */
+#define   CHV_BUFRIGHTENA2_NORMAL	(1 << 19) /* CL2 DW1 only */
+#define   CHV_BUFRIGHTENA2_FORCE	(3 << 19) /* CL2 DW1 only */
+#define   CHV_BUFRIGHTENA2_MASK		(3 << 19) /* CL2 DW1 only */
+#define CHV_CMN_DW14(ch) _PIPE(ch, _CHV_CMN_DW14_CH0, _CHV_CMN_DW1_CH1)
+
+#define _CHV_CMN_DW19_CH0		0x814c
+#define _CHV_CMN_DW6_CH1		0x8098
+#define   DPIO_ALLDL_POWERDOWN_SHIFT_CH1	30 /* CL2 DW6 only */
+#define   DPIO_ANYDL_POWERDOWN_SHIFT_CH1	29 /* CL2 DW6 only */
+#define   DPIO_DYNPWRDOWNEN_CH1		(1 << 28) /* CL2 DW6 only */
+#define   CHV_CMN_USEDCLKCHANNEL	(1 << 13)
+
+#define CHV_CMN_DW19(ch) _PIPE(ch, _CHV_CMN_DW19_CH0, _CHV_CMN_DW6_CH1)
+
+#define CHV_CMN_DW28			0x8170
+#define   DPIO_CL1POWERDOWNEN		(1 << 23)
+#define   DPIO_DYNPWRDOWNEN_CH0		(1 << 22)
+#define   DPIO_SUS_CLK_CONFIG_ON		(0 << 0)
+#define   DPIO_SUS_CLK_CONFIG_CLKREQ		(1 << 0)
+#define   DPIO_SUS_CLK_CONFIG_GATE		(2 << 0)
+#define   DPIO_SUS_CLK_CONFIG_GATE_CLKREQ	(3 << 0)
+
+#define CHV_CMN_DW30			0x8178
+#define   DPIO_CL2_LDOFUSE_PWRENB	(1 << 6)
+#define   DPIO_LRC_BYPASS		(1 << 3)
+
+#define _TXLANE(ch, lane, offset) ((ch ? 0x2400 : 0) + \
+					(lane) * 0x200 + (offset))
+
+#define CHV_TX_DW0(ch, lane) _TXLANE(ch, lane, 0x80)
+#define CHV_TX_DW1(ch, lane) _TXLANE(ch, lane, 0x84)
+#define CHV_TX_DW2(ch, lane) _TXLANE(ch, lane, 0x88)
+#define CHV_TX_DW3(ch, lane) _TXLANE(ch, lane, 0x8c)
+#define CHV_TX_DW4(ch, lane) _TXLANE(ch, lane, 0x90)
+#define CHV_TX_DW5(ch, lane) _TXLANE(ch, lane, 0x94)
+#define CHV_TX_DW6(ch, lane) _TXLANE(ch, lane, 0x98)
+#define CHV_TX_DW7(ch, lane) _TXLANE(ch, lane, 0x9c)
+#define CHV_TX_DW8(ch, lane) _TXLANE(ch, lane, 0xa0)
+#define CHV_TX_DW9(ch, lane) _TXLANE(ch, lane, 0xa4)
+#define CHV_TX_DW10(ch, lane) _TXLANE(ch, lane, 0xa8)
+#define CHV_TX_DW11(ch, lane) _TXLANE(ch, lane, 0xac)
+#define   DPIO_FRC_LATENCY_SHFIT	8
+#define CHV_TX_DW14(ch, lane) _TXLANE(ch, lane, 0xb8)
+#define   DPIO_UPAR_SHIFT		30
+
+/* BXT PHY registers */
+#define _BXT_PHY(phy, a, b)		_PIPE((phy), (a), (b))
+
+#define BXT_P_CR_GT_DISP_PWRON		0x138090
+#define   GT_DISPLAY_POWER_ON(phy)	(1 << (phy))
+
+#define _PHY_CTL_FAMILY_EDP		0x64C80
+#define _PHY_CTL_FAMILY_DDI		0x64C90
+#define   COMMON_RESET_DIS		(1 << 31)
+#define BXT_PHY_CTL_FAMILY(phy)		_BXT_PHY((phy), _PHY_CTL_FAMILY_DDI, \
+							_PHY_CTL_FAMILY_EDP)
+
+/* BXT PHY PLL registers */
+#define _PORT_PLL_A			0x46074
+#define _PORT_PLL_B			0x46078
+#define _PORT_PLL_C			0x4607c
+#define   PORT_PLL_ENABLE		(1 << 31)
+#define   PORT_PLL_LOCK			(1 << 30)
+#define   PORT_PLL_REF_SEL		(1 << 27)
+#define BXT_PORT_PLL_ENABLE(port)	_PORT(port, _PORT_PLL_A, _PORT_PLL_B)
+
+#define _PORT_PLL_EBB_0_A		0x162034
+#define _PORT_PLL_EBB_0_B		0x6C034
+#define _PORT_PLL_EBB_0_C		0x6C340
+#define   PORT_PLL_P1_SHIFT		13
+#define   PORT_PLL_P1_MASK		(0x07 << PORT_PLL_P1_SHIFT)
+#define   PORT_PLL_P1(x)		((x)  << PORT_PLL_P1_SHIFT)
+#define   PORT_PLL_P2_SHIFT		8
+#define   PORT_PLL_P2_MASK		(0x1f << PORT_PLL_P2_SHIFT)
+#define   PORT_PLL_P2(x)		((x)  << PORT_PLL_P2_SHIFT)
+#define BXT_PORT_PLL_EBB_0(port)	_PORT3(port, _PORT_PLL_EBB_0_A, \
+						_PORT_PLL_EBB_0_B,	\
+						_PORT_PLL_EBB_0_C)
+
+#define _PORT_PLL_EBB_4_A		0x162038
+#define _PORT_PLL_EBB_4_B		0x6C038
+#define _PORT_PLL_EBB_4_C		0x6C344
+#define   PORT_PLL_10BIT_CLK_ENABLE	(1 << 13)
+#define   PORT_PLL_RECALIBRATE		(1 << 14)
+#define BXT_PORT_PLL_EBB_4(port)	_PORT3(port, _PORT_PLL_EBB_4_A, \
+						_PORT_PLL_EBB_4_B,	\
+						_PORT_PLL_EBB_4_C)
+
+#define _PORT_PLL_0_A			0x162100
+#define _PORT_PLL_0_B			0x6C100
+#define _PORT_PLL_0_C			0x6C380
+/* PORT_PLL_0_A */
+#define   PORT_PLL_M2_MASK		0xFF
+/* PORT_PLL_1_A */
+#define   PORT_PLL_N_SHIFT		8
+#define   PORT_PLL_N_MASK		(0x0F << PORT_PLL_N_SHIFT)
+#define   PORT_PLL_N(x)			((x) << PORT_PLL_N_SHIFT)
+/* PORT_PLL_2_A */
+#define   PORT_PLL_M2_FRAC_MASK		0x3FFFFF
+/* PORT_PLL_3_A */
+#define   PORT_PLL_M2_FRAC_ENABLE	(1 << 16)
+/* PORT_PLL_6_A */
+#define   PORT_PLL_PROP_COEFF_MASK	0xF
+#define   PORT_PLL_INT_COEFF_MASK	(0x1F << 8)
+#define   PORT_PLL_INT_COEFF(x)		((x)  << 8)
+#define   PORT_PLL_GAIN_CTL_MASK	(0x07 << 16)
+#define   PORT_PLL_GAIN_CTL(x)		((x)  << 16)
+/* PORT_PLL_8_A */
+#define   PORT_PLL_TARGET_CNT_MASK	0x3FF
+/* PORT_PLL_9_A */
+#define  PORT_PLL_LOCK_THRESHOLD_SHIFT	1
+#define  PORT_PLL_LOCK_THRESHOLD_MASK	(0x7 << PORT_PLL_LOCK_THRESHOLD_SHIFT)
+/* PORT_PLL_10_A */
+#define  PORT_PLL_DCO_AMP_OVR_EN_H	(1<<27)
+#define  PORT_PLL_DCO_AMP_DEFAULT	15
+#define  PORT_PLL_DCO_AMP_MASK		0x3c00
+#define  PORT_PLL_DCO_AMP(x)		((x)<<10)
+#define _PORT_PLL_BASE(port)		_PORT3(port, _PORT_PLL_0_A,	\
+						_PORT_PLL_0_B,		\
+						_PORT_PLL_0_C)
+#define BXT_PORT_PLL(port, idx)		(_PORT_PLL_BASE(port) + (idx) * 4)
+
+/* BXT PHY common lane registers */
+#define _PORT_CL1CM_DW0_A		0x162000
+#define _PORT_CL1CM_DW0_BC		0x6C000
+#define   PHY_POWER_GOOD		(1 << 16)
+#define BXT_PORT_CL1CM_DW0(phy)		_BXT_PHY((phy), _PORT_CL1CM_DW0_BC, \
+							_PORT_CL1CM_DW0_A)
+
+#define _PORT_CL1CM_DW9_A		0x162024
+#define _PORT_CL1CM_DW9_BC		0x6C024
+#define   IREF0RC_OFFSET_SHIFT		8
+#define   IREF0RC_OFFSET_MASK		(0xFF << IREF0RC_OFFSET_SHIFT)
+#define BXT_PORT_CL1CM_DW9(phy)		_BXT_PHY((phy), _PORT_CL1CM_DW9_BC, \
+							_PORT_CL1CM_DW9_A)
+
+#define _PORT_CL1CM_DW10_A		0x162028
+#define _PORT_CL1CM_DW10_BC		0x6C028
+#define   IREF1RC_OFFSET_SHIFT		8
+#define   IREF1RC_OFFSET_MASK		(0xFF << IREF1RC_OFFSET_SHIFT)
+#define BXT_PORT_CL1CM_DW10(phy)	_BXT_PHY((phy), _PORT_CL1CM_DW10_BC, \
+							_PORT_CL1CM_DW10_A)
+
+#define _PORT_CL1CM_DW28_A		0x162070
+#define _PORT_CL1CM_DW28_BC		0x6C070
+#define   OCL1_POWER_DOWN_EN		(1 << 23)
+#define   DW28_OLDO_DYN_PWR_DOWN_EN	(1 << 22)
+#define   SUS_CLK_CONFIG		0x3
+#define BXT_PORT_CL1CM_DW28(phy)	_BXT_PHY((phy), _PORT_CL1CM_DW28_BC, \
+							_PORT_CL1CM_DW28_A)
+
+#define _PORT_CL1CM_DW30_A		0x162078
+#define _PORT_CL1CM_DW30_BC		0x6C078
+#define   OCL2_LDOFUSE_PWR_DIS		(1 << 6)
+#define BXT_PORT_CL1CM_DW30(phy)	_BXT_PHY((phy), _PORT_CL1CM_DW30_BC, \
+							_PORT_CL1CM_DW30_A)
+
+/* Defined for PHY0 only */
+#define BXT_PORT_CL2CM_DW6_BC		0x6C358
+#define   DW6_OLDO_DYN_PWR_DOWN_EN	(1 << 28)
+
+/* BXT PHY Ref registers */
+#define _PORT_REF_DW3_A			0x16218C
+#define _PORT_REF_DW3_BC		0x6C18C
+#define   GRC_DONE			(1 << 22)
+#define BXT_PORT_REF_DW3(phy)		_BXT_PHY((phy), _PORT_REF_DW3_BC, \
+							_PORT_REF_DW3_A)
+
+#define _PORT_REF_DW6_A			0x162198
+#define _PORT_REF_DW6_BC		0x6C198
+/*
+ * FIXME: BSpec/CHV ConfigDB disagrees on the following two fields, fix them
+ * after testing.
+ */
+#define   GRC_CODE_SHIFT		23
+#define   GRC_CODE_MASK			(0x1FF << GRC_CODE_SHIFT)
+#define   GRC_CODE_FAST_SHIFT		16
+#define   GRC_CODE_FAST_MASK		(0x7F << GRC_CODE_FAST_SHIFT)
+#define   GRC_CODE_SLOW_SHIFT		8
+#define   GRC_CODE_SLOW_MASK		(0xFF << GRC_CODE_SLOW_SHIFT)
+#define   GRC_CODE_NOM_MASK		0xFF
+#define BXT_PORT_REF_DW6(phy)		_BXT_PHY((phy), _PORT_REF_DW6_BC,	\
+						      _PORT_REF_DW6_A)
+
+#define _PORT_REF_DW8_A			0x1621A0
+#define _PORT_REF_DW8_BC		0x6C1A0
+#define   GRC_DIS			(1 << 15)
+#define   GRC_RDY_OVRD			(1 << 1)
+#define BXT_PORT_REF_DW8(phy)		_BXT_PHY((phy), _PORT_REF_DW8_BC,	\
+						      _PORT_REF_DW8_A)
+
+/* BXT PHY PCS registers */
+#define _PORT_PCS_DW10_LN01_A		0x162428
+#define _PORT_PCS_DW10_LN01_B		0x6C428
+#define _PORT_PCS_DW10_LN01_C		0x6C828
+#define _PORT_PCS_DW10_GRP_A		0x162C28
+#define _PORT_PCS_DW10_GRP_B		0x6CC28
+#define _PORT_PCS_DW10_GRP_C		0x6CE28
+#define BXT_PORT_PCS_DW10_LN01(port)	_PORT3(port, _PORT_PCS_DW10_LN01_A, \
+						     _PORT_PCS_DW10_LN01_B, \
+						     _PORT_PCS_DW10_LN01_C)
+#define BXT_PORT_PCS_DW10_GRP(port)	_PORT3(port, _PORT_PCS_DW10_GRP_A,  \
+						     _PORT_PCS_DW10_GRP_B,  \
+						     _PORT_PCS_DW10_GRP_C)
+#define   TX2_SWING_CALC_INIT		(1 << 31)
+#define   TX1_SWING_CALC_INIT		(1 << 30)
+
+#define _PORT_PCS_DW12_LN01_A		0x162430
+#define _PORT_PCS_DW12_LN01_B		0x6C430
+#define _PORT_PCS_DW12_LN01_C		0x6C830
+#define _PORT_PCS_DW12_LN23_A		0x162630
+#define _PORT_PCS_DW12_LN23_B		0x6C630
+#define _PORT_PCS_DW12_LN23_C		0x6CA30
+#define _PORT_PCS_DW12_GRP_A		0x162c30
+#define _PORT_PCS_DW12_GRP_B		0x6CC30
+#define _PORT_PCS_DW12_GRP_C		0x6CE30
+#define   LANESTAGGER_STRAP_OVRD	(1 << 6)
+#define   LANE_STAGGER_MASK		0x1F
+#define BXT_PORT_PCS_DW12_LN01(port)	_PORT3(port, _PORT_PCS_DW12_LN01_A, \
+						     _PORT_PCS_DW12_LN01_B, \
+						     _PORT_PCS_DW12_LN01_C)
+#define BXT_PORT_PCS_DW12_LN23(port)	_PORT3(port, _PORT_PCS_DW12_LN23_A, \
+						     _PORT_PCS_DW12_LN23_B, \
+						     _PORT_PCS_DW12_LN23_C)
+#define BXT_PORT_PCS_DW12_GRP(port)	_PORT3(port, _PORT_PCS_DW12_GRP_A, \
+						     _PORT_PCS_DW12_GRP_B, \
+						     _PORT_PCS_DW12_GRP_C)
+
+/* BXT PHY TX registers */
+#define _BXT_LANE_OFFSET(lane)           (((lane) >> 1) * 0x200 +	\
+					  ((lane) & 1) * 0x80)
+
+#define _PORT_TX_DW2_LN0_A		0x162508
+#define _PORT_TX_DW2_LN0_B		0x6C508
+#define _PORT_TX_DW2_LN0_C		0x6C908
+#define _PORT_TX_DW2_GRP_A		0x162D08
+#define _PORT_TX_DW2_GRP_B		0x6CD08
+#define _PORT_TX_DW2_GRP_C		0x6CF08
+#define BXT_PORT_TX_DW2_GRP(port)	_PORT3(port, _PORT_TX_DW2_GRP_A,  \
+						     _PORT_TX_DW2_GRP_B,  \
+						     _PORT_TX_DW2_GRP_C)
+#define BXT_PORT_TX_DW2_LN0(port)	_PORT3(port, _PORT_TX_DW2_LN0_A,  \
+						     _PORT_TX_DW2_LN0_B,  \
+						     _PORT_TX_DW2_LN0_C)
+#define   MARGIN_000_SHIFT		16
+#define   MARGIN_000			(0xFF << MARGIN_000_SHIFT)
+#define   UNIQ_TRANS_SCALE_SHIFT	8
+#define   UNIQ_TRANS_SCALE		(0xFF << UNIQ_TRANS_SCALE_SHIFT)
+
+#define _PORT_TX_DW3_LN0_A		0x16250C
+#define _PORT_TX_DW3_LN0_B		0x6C50C
+#define _PORT_TX_DW3_LN0_C		0x6C90C
+#define _PORT_TX_DW3_GRP_A		0x162D0C
+#define _PORT_TX_DW3_GRP_B		0x6CD0C
+#define _PORT_TX_DW3_GRP_C		0x6CF0C
+#define BXT_PORT_TX_DW3_GRP(port)	_PORT3(port, _PORT_TX_DW3_GRP_A,  \
+						     _PORT_TX_DW3_GRP_B,  \
+						     _PORT_TX_DW3_GRP_C)
+#define BXT_PORT_TX_DW3_LN0(port)	_PORT3(port, _PORT_TX_DW3_LN0_A,  \
+						     _PORT_TX_DW3_LN0_B,  \
+						     _PORT_TX_DW3_LN0_C)
+#define   SCALE_DCOMP_METHOD		(1 << 26)
+#define   UNIQUE_TRANGE_EN_METHOD	(1 << 27)
+
+#define _PORT_TX_DW4_LN0_A		0x162510
+#define _PORT_TX_DW4_LN0_B		0x6C510
+#define _PORT_TX_DW4_LN0_C		0x6C910
+#define _PORT_TX_DW4_GRP_A		0x162D10
+#define _PORT_TX_DW4_GRP_B		0x6CD10
+#define _PORT_TX_DW4_GRP_C		0x6CF10
+#define BXT_PORT_TX_DW4_LN0(port)	_PORT3(port, _PORT_TX_DW4_LN0_A,  \
+						     _PORT_TX_DW4_LN0_B,  \
+						     _PORT_TX_DW4_LN0_C)
+#define BXT_PORT_TX_DW4_GRP(port)	_PORT3(port, _PORT_TX_DW4_GRP_A,  \
+						     _PORT_TX_DW4_GRP_B,  \
+						     _PORT_TX_DW4_GRP_C)
+#define   DEEMPH_SHIFT			24
+#define   DE_EMPHASIS			(0xFF << DEEMPH_SHIFT)
+
+#define _PORT_TX_DW14_LN0_A		0x162538
+#define _PORT_TX_DW14_LN0_B		0x6C538
+#define _PORT_TX_DW14_LN0_C		0x6C938
+#define   LATENCY_OPTIM_SHIFT		30
+#define   LATENCY_OPTIM			(1 << LATENCY_OPTIM_SHIFT)
+#define BXT_PORT_TX_DW14_LN(port, lane)	(_PORT3((port), _PORT_TX_DW14_LN0_A,   \
+							_PORT_TX_DW14_LN0_B,   \
+							_PORT_TX_DW14_LN0_C) + \
+					 _BXT_LANE_OFFSET(lane))
+
+/* UAIMI scratch pad register 1 */
+#define UAIMI_SPR1			0x4F074
+/* SKL VccIO mask */
+#define SKL_VCCIO_MASK			0x1
+/* SKL balance leg register */
+#define DISPIO_CR_TX_BMU_CR0		0x6C00C
+/* I_boost values */
+#define BALANCE_LEG_SHIFT(port)		(8+3*(port))
+#define BALANCE_LEG_MASK(port)		(7<<(8+3*(port)))
+/* Balance leg disable bits */
+#define BALANCE_LEG_DISABLE_SHIFT	23
+
+/*
+ * Fence registers
+ * [0-7]  @ 0x2000 gen2,gen3
+ * [8-15] @ 0x3000 945,g33,pnv
+ *
+ * [0-15] @ 0x3000 gen4,gen5
+ *
+ * [0-15] @ 0x100000 gen6,vlv,chv
+ * [0-31] @ 0x100000 gen7+
+ */
+#define FENCE_REG(i)			(0x2000 + (((i) & 8) << 9) + ((i) & 7) * 4)
+#define   I830_FENCE_START_MASK		0x07f80000
+#define   I830_FENCE_TILING_Y_SHIFT	12
+#define   I830_FENCE_SIZE_BITS(size)	((ffs((size) >> 19) - 1) << 8)
+#define   I830_FENCE_PITCH_SHIFT	4
+#define   I830_FENCE_REG_VALID		(1<<0)
+#define   I915_FENCE_MAX_PITCH_VAL	4
+#define   I830_FENCE_MAX_PITCH_VAL	6
+#define   I830_FENCE_MAX_SIZE_VAL	(1<<8)
+
+#define   I915_FENCE_START_MASK		0x0ff00000
+#define   I915_FENCE_SIZE_BITS(size)	((ffs((size) >> 20) - 1) << 8)
+
+#define FENCE_REG_965_LO(i)		(0x03000 + (i) * 8)
+#define FENCE_REG_965_HI(i)		(0x03000 + (i) * 8 + 4)
+#define   I965_FENCE_PITCH_SHIFT	2
+#define   I965_FENCE_TILING_Y_SHIFT	1
+#define   I965_FENCE_REG_VALID		(1<<0)
+#define   I965_FENCE_MAX_PITCH_VAL	0x0400
+
+#define FENCE_REG_GEN6_LO(i)	(0x100000 + (i) * 8)
+#define FENCE_REG_GEN6_HI(i)	(0x100000 + (i) * 8 + 4)
+#define   GEN6_FENCE_PITCH_SHIFT	32
+#define   GEN7_FENCE_MAX_PITCH_VAL	0x0800
+
+
+/* control register for cpu gtt access */
+#define TILECTL				0x101000
+#define   TILECTL_SWZCTL			(1 << 0)
+#define   TILECTL_TLBPF			(1 << 1)
+#define   TILECTL_TLB_PREFETCH_DIS	(1 << 2)
+#define   TILECTL_BACKSNOOP_DIS		(1 << 3)
+
+/*
+ * Instruction and interrupt control regs
+ */
+#define PGTBL_CTL	0x02020
+#define   PGTBL_ADDRESS_LO_MASK	0xfffff000 /* bits [31:12] */
+#define   PGTBL_ADDRESS_HI_MASK	0x000000f0 /* bits [35:32] (gen4) */
+#define PGTBL_ER	0x02024
+#define PRB0_BASE (0x2030-0x30)
+#define PRB1_BASE (0x2040-0x30) /* 830,gen3 */
+#define PRB2_BASE (0x2050-0x30) /* gen3 */
+#define SRB0_BASE (0x2100-0x30) /* gen2 */
+#define SRB1_BASE (0x2110-0x30) /* gen2 */
+#define SRB2_BASE (0x2120-0x30) /* 830 */
+#define SRB3_BASE (0x2130-0x30) /* 830 */
+#define RENDER_RING_BASE	0x02000
+#define BSD_RING_BASE		0x04000
+#define GEN6_BSD_RING_BASE	0x12000
+#define GEN8_BSD2_RING_BASE	0x1c000
+#define VEBOX_RING_BASE		0x1a000
+#define BLT_RING_BASE		0x22000
+#define RING_TAIL(base)		((base)+0x30)
+#define RING_HEAD(base)		((base)+0x34)
+#define RING_START(base)	((base)+0x38)
+#define RING_CTL(base)		((base)+0x3c)
+#define RING_SYNC_0(base)	((base)+0x40)
+#define RING_SYNC_1(base)	((base)+0x44)
+#define RING_SYNC_2(base)	((base)+0x48)
+#define GEN6_RVSYNC	(RING_SYNC_0(RENDER_RING_BASE))
+#define GEN6_RBSYNC	(RING_SYNC_1(RENDER_RING_BASE))
+#define GEN6_RVESYNC	(RING_SYNC_2(RENDER_RING_BASE))
+#define GEN6_VBSYNC	(RING_SYNC_0(GEN6_BSD_RING_BASE))
+#define GEN6_VRSYNC	(RING_SYNC_1(GEN6_BSD_RING_BASE))
+#define GEN6_VVESYNC	(RING_SYNC_2(GEN6_BSD_RING_BASE))
+#define GEN6_BRSYNC	(RING_SYNC_0(BLT_RING_BASE))
+#define GEN6_BVSYNC	(RING_SYNC_1(BLT_RING_BASE))
+#define GEN6_BVESYNC	(RING_SYNC_2(BLT_RING_BASE))
+#define GEN6_VEBSYNC	(RING_SYNC_0(VEBOX_RING_BASE))
+#define GEN6_VERSYNC	(RING_SYNC_1(VEBOX_RING_BASE))
+#define GEN6_VEVSYNC	(RING_SYNC_2(VEBOX_RING_BASE))
+#define GEN6_NOSYNC 0
+#define RING_PSMI_CTL(base)	((base)+0x50)
+#define RING_MAX_IDLE(base)	((base)+0x54)
+#define RING_HWS_PGA(base)	((base)+0x80)
+#define RING_HWS_PGA_GEN6(base)	((base)+0x2080)
+#define RING_RESET_CTL(base)	((base)+0xd0)
+#define   RESET_CTL_REQUEST_RESET  (1 << 0)
+#define   RESET_CTL_READY_TO_RESET (1 << 1)
+
+#define HSW_GTT_CACHE_EN	0x4024
+#define   GTT_CACHE_EN_ALL	0xF0007FFF
+#define GEN7_WR_WATERMARK	0x4028
+#define GEN7_GFX_PRIO_CTRL	0x402C
+#define ARB_MODE		0x4030
+#define   ARB_MODE_SWIZZLE_SNB	(1<<4)
+#define   ARB_MODE_SWIZZLE_IVB	(1<<5)
+#define GEN7_GFX_PEND_TLB0	0x4034
+#define GEN7_GFX_PEND_TLB1	0x4038
+/* L3, CVS, ZTLB, RCC, CASC LRA min, max values */
+#define GEN7_LRA_LIMITS(i)	(0x403C + (i) * 4)
+#define GEN7_LRA_LIMITS_REG_NUM	13
+#define GEN7_MEDIA_MAX_REQ_COUNT	0x4070
+#define GEN7_GFX_MAX_REQ_COUNT		0x4074
+
+#define GAMTARBMODE		0x04a08
+#define   ARB_MODE_BWGTLB_DISABLE (1<<9)
+#define   ARB_MODE_SWIZZLE_BDW	(1<<1)
+#define RENDER_HWS_PGA_GEN7	(0x04080)
+#define RING_FAULT_REG(ring)	(0x4094 + 0x100*(ring)->id)
+#define   RING_FAULT_GTTSEL_MASK (1<<11)
+#define   RING_FAULT_SRCID(x)	(((x) >> 3) & 0xff)
+#define   RING_FAULT_FAULT_TYPE(x) (((x) >> 1) & 0x3)
+#define   RING_FAULT_VALID	(1<<0)
+#define DONE_REG		0x40b0
+#define GEN8_PRIVATE_PAT_LO	0x40e0
+#define GEN8_PRIVATE_PAT_HI	(0x40e0 + 4)
+#define BSD_HWS_PGA_GEN7	(0x04180)
+#define BLT_HWS_PGA_GEN7	(0x04280)
+#define VEBOX_HWS_PGA_GEN7	(0x04380)
+#define RING_ACTHD(base)	((base)+0x74)
+#define RING_ACTHD_UDW(base)	((base)+0x5c)
+#define RING_NOPID(base)	((base)+0x94)
+#define RING_IMR(base)		((base)+0xa8)
+#define RING_HWSTAM(base)	((base)+0x98)
+#define RING_TIMESTAMP(base)	((base)+0x358)
+#define   TAIL_ADDR		0x001FFFF8
+#define   HEAD_WRAP_COUNT	0xFFE00000
+#define   HEAD_WRAP_ONE		0x00200000
+#define   HEAD_ADDR		0x001FFFFC
+#define   RING_NR_PAGES		0x001FF000
+#define   RING_REPORT_MASK	0x00000006
+#define   RING_REPORT_64K	0x00000002
+#define   RING_REPORT_128K	0x00000004
+#define   RING_NO_REPORT	0x00000000
+#define   RING_VALID_MASK	0x00000001
+#define   RING_VALID		0x00000001
+#define   RING_INVALID		0x00000000
+#define   RING_WAIT_I8XX	(1<<0) /* gen2, PRBx_HEAD */
+#define   RING_WAIT		(1<<11) /* gen3+, PRBx_CTL */
+#define   RING_WAIT_SEMAPHORE	(1<<10) /* gen6+ */
+
+#define GEN7_TLB_RD_ADDR	0x4700
+
+#if 0
+#define PRB0_TAIL	0x02030
+#define PRB0_HEAD	0x02034
+#define PRB0_START	0x02038
+#define PRB0_CTL	0x0203c
+#define PRB1_TAIL	0x02040 /* 915+ only */
+#define PRB1_HEAD	0x02044 /* 915+ only */
+#define PRB1_START	0x02048 /* 915+ only */
+#define PRB1_CTL	0x0204c /* 915+ only */
+#endif
+#define IPEIR_I965	0x02064
+#define IPEHR_I965	0x02068
+#define GEN7_SC_INSTDONE	0x07100
+#define GEN7_SAMPLER_INSTDONE	0x0e160
+#define GEN7_ROW_INSTDONE	0x0e164
+#define I915_NUM_INSTDONE_REG	4
+#define RING_IPEIR(base)	((base)+0x64)
+#define RING_IPEHR(base)	((base)+0x68)
+/*
+ * On GEN4, only the render ring INSTDONE exists and has a different
+ * layout than the GEN7+ version.
+ * The GEN2 counterpart of this register is GEN2_INSTDONE.
+ */
+#define RING_INSTDONE(base)	((base)+0x6c)
+#define RING_INSTPS(base)	((base)+0x70)
+#define RING_DMA_FADD(base)	((base)+0x78)
+#define RING_DMA_FADD_UDW(base)	((base)+0x60) /* gen8+ */
+#define RING_INSTPM(base)	((base)+0xc0)
+#define RING_MI_MODE(base)	((base)+0x9c)
+#define INSTPS		0x02070 /* 965+ only */
+#define GEN4_INSTDONE1	0x0207c /* 965+ only, aka INSTDONE_2 on SNB */
+#define ACTHD_I965	0x02074
+#define HWS_PGA		0x02080
+#define HWS_ADDRESS_MASK	0xfffff000
+#define HWS_START_ADDRESS_SHIFT	4
+#define PWRCTXA		0x2088 /* 965GM+ only */
+#define   PWRCTX_EN	(1<<0)
+#define IPEIR		0x02088
+#define IPEHR		0x0208c
+#define GEN2_INSTDONE	0x02090
+#define NOPID		0x02094
+#define HWSTAM		0x02098
+#define DMA_FADD_I8XX	0x020d0
+#define RING_BBSTATE(base)	((base)+0x110)
+#define RING_BBADDR(base)	((base)+0x140)
+#define RING_BBADDR_UDW(base)	((base)+0x168) /* gen8+ */
+
+#define ERROR_GEN6	0x040a0
+#define GEN7_ERR_INT	0x44040
+#define   ERR_INT_POISON		(1<<31)
+#define   ERR_INT_MMIO_UNCLAIMED	(1<<13)
+#define   ERR_INT_PIPE_CRC_DONE_C	(1<<8)
+#define   ERR_INT_FIFO_UNDERRUN_C	(1<<6)
+#define   ERR_INT_PIPE_CRC_DONE_B	(1<<5)
+#define   ERR_INT_FIFO_UNDERRUN_B	(1<<3)
+#define   ERR_INT_PIPE_CRC_DONE_A	(1<<2)
+#define   ERR_INT_PIPE_CRC_DONE(pipe)	(1<<(2 + (pipe)*3))
+#define   ERR_INT_FIFO_UNDERRUN_A	(1<<0)
+#define   ERR_INT_FIFO_UNDERRUN(pipe)	(1<<((pipe)*3))
+
+#define GEN8_FAULT_TLB_DATA0		0x04b10
+#define GEN8_FAULT_TLB_DATA1		0x04b14
+
+#define FPGA_DBG		0x42300
+#define   FPGA_DBG_RM_NOCLAIM	(1<<31)
+
+#define DERRMR		0x44050
+/* Note that HBLANK events are reserved on bdw+ */
+#define   DERRMR_PIPEA_SCANLINE		(1<<0)
+#define   DERRMR_PIPEA_PRI_FLIP_DONE	(1<<1)
+#define   DERRMR_PIPEA_SPR_FLIP_DONE	(1<<2)
+#define   DERRMR_PIPEA_VBLANK		(1<<3)
+#define   DERRMR_PIPEA_HBLANK		(1<<5)
+#define   DERRMR_PIPEB_SCANLINE 	(1<<8)
+#define   DERRMR_PIPEB_PRI_FLIP_DONE	(1<<9)
+#define   DERRMR_PIPEB_SPR_FLIP_DONE	(1<<10)
+#define   DERRMR_PIPEB_VBLANK		(1<<11)
+#define   DERRMR_PIPEB_HBLANK		(1<<13)
+/* Note that PIPEC is not a simple translation of PIPEA/PIPEB */
+#define   DERRMR_PIPEC_SCANLINE		(1<<14)
+#define   DERRMR_PIPEC_PRI_FLIP_DONE	(1<<15)
+#define   DERRMR_PIPEC_SPR_FLIP_DONE	(1<<20)
+#define   DERRMR_PIPEC_VBLANK		(1<<21)
+#define   DERRMR_PIPEC_HBLANK		(1<<22)
+
+
+/* GM45+ chicken bits -- debug workaround bits that may be required
+ * for various sorts of correct behavior.  The top 16 bits of each are
+ * the enables for writing to the corresponding low bit.
+ */
+#define _3D_CHICKEN	0x02084
+#define  _3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB	(1 << 10)
+#define _3D_CHICKEN2	0x0208c
+/* Disables pipelining of read flushes past the SF-WIZ interface.
+ * Required on all Ironlake steppings according to the B-Spec, but the
+ * particular danger of not doing so is not specified.
+ */
+# define _3D_CHICKEN2_WM_READ_PIPELINED			(1 << 14)
+#define _3D_CHICKEN3	0x02090
+#define  _3D_CHICKEN_SF_DISABLE_OBJEND_CULL		(1 << 10)
+#define  _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL		(1 << 5)
+#define  _3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(x)	((x)<<1) /* gen8+ */
+#define  _3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH	(1 << 1) /* gen6 */
+
+#define MI_MODE		0x0209c
+# define VS_TIMER_DISPATCH				(1 << 6)
+# define MI_FLUSH_ENABLE				(1 << 12)
+# define ASYNC_FLIP_PERF_DISABLE			(1 << 14)
+# define MODE_IDLE					(1 << 9)
+# define STOP_RING					(1 << 8)
+
+#define GEN6_GT_MODE	0x20d0
+#define GEN7_GT_MODE	0x7008
+#define   GEN6_WIZ_HASHING(hi, lo)			(((hi) << 9) | ((lo) << 7))
+#define   GEN6_WIZ_HASHING_8x8				GEN6_WIZ_HASHING(0, 0)
+#define   GEN6_WIZ_HASHING_8x4				GEN6_WIZ_HASHING(0, 1)
+#define   GEN6_WIZ_HASHING_16x4				GEN6_WIZ_HASHING(1, 0)
+#define   GEN6_WIZ_HASHING_MASK				GEN6_WIZ_HASHING(1, 1)
+#define   GEN6_TD_FOUR_ROW_DISPATCH_DISABLE		(1 << 5)
+#define   GEN9_IZ_HASHING_MASK(slice)			(0x3 << ((slice) * 2))
+#define   GEN9_IZ_HASHING(slice, val)			((val) << ((slice) * 2))
+
+#define GFX_MODE	0x02520
+#define GFX_MODE_GEN7	0x0229c
+#define RING_MODE_GEN7(ring)	((ring)->mmio_base+0x29c)
+#define   GFX_RUN_LIST_ENABLE		(1<<15)
+#define   GFX_INTERRUPT_STEERING	(1<<14)
+#define   GFX_TLB_INVALIDATE_EXPLICIT	(1<<13)
+#define   GFX_SURFACE_FAULT_ENABLE	(1<<12)
+#define   GFX_REPLAY_MODE		(1<<11)
+#define   GFX_PSMI_GRANULARITY		(1<<10)
+#define   GFX_PPGTT_ENABLE		(1<<9)
+#define   GEN8_GFX_PPGTT_48B		(1<<7)
+
+#define   GFX_FORWARD_VBLANK_MASK	(3<<5)
+#define   GFX_FORWARD_VBLANK_NEVER	(0<<5)
+#define   GFX_FORWARD_VBLANK_ALWAYS	(1<<5)
+#define   GFX_FORWARD_VBLANK_COND	(2<<5)
+
+#define VLV_DISPLAY_BASE 0x180000
+#define VLV_MIPI_BASE VLV_DISPLAY_BASE
+
+#define VLV_GU_CTL0	(VLV_DISPLAY_BASE + 0x2030)
+#define VLV_GU_CTL1	(VLV_DISPLAY_BASE + 0x2034)
+#define SCPD0		0x0209c /* 915+ only */
+#define IER		0x020a0
+#define IIR		0x020a4
+#define IMR		0x020a8
+#define ISR		0x020ac
+#define VLV_GUNIT_CLOCK_GATE	(VLV_DISPLAY_BASE + 0x2060)
+#define   GINT_DIS		(1<<22)
+#define   GCFG_DIS		(1<<8)
+#define VLV_GUNIT_CLOCK_GATE2	(VLV_DISPLAY_BASE + 0x2064)
+#define VLV_IIR_RW	(VLV_DISPLAY_BASE + 0x2084)
+#define VLV_IER		(VLV_DISPLAY_BASE + 0x20a0)
+#define VLV_IIR		(VLV_DISPLAY_BASE + 0x20a4)
+#define VLV_IMR		(VLV_DISPLAY_BASE + 0x20a8)
+#define VLV_ISR		(VLV_DISPLAY_BASE + 0x20ac)
+#define VLV_PCBR	(VLV_DISPLAY_BASE + 0x2120)
+#define VLV_PCBR_ADDR_SHIFT	12
+
+#define   DISPLAY_PLANE_FLIP_PENDING(plane) (1<<(11-(plane))) /* A and B only */
+#define EIR		0x020b0
+#define EMR		0x020b4
+#define ESR		0x020b8
+#define   GM45_ERROR_PAGE_TABLE				(1<<5)
+#define   GM45_ERROR_MEM_PRIV				(1<<4)
+#define   I915_ERROR_PAGE_TABLE				(1<<4)
+#define   GM45_ERROR_CP_PRIV				(1<<3)
+#define   I915_ERROR_MEMORY_REFRESH			(1<<1)
+#define   I915_ERROR_INSTRUCTION			(1<<0)
+#define INSTPM	        0x020c0
+#define   INSTPM_SELF_EN (1<<12) /* 915GM only */
+#define   INSTPM_AGPBUSY_INT_EN (1<<11) /* gen3: when disabled, pending interrupts
+					will not assert AGPBUSY# and will only
+					be delivered when out of C3. */
+#define   INSTPM_FORCE_ORDERING				(1<<7) /* GEN6+ */
+#define   INSTPM_TLB_INVALIDATE	(1<<9)
+#define   INSTPM_SYNC_FLUSH	(1<<5)
+#define ACTHD	        0x020c8
+#define MEM_MODE	0x020cc
+#define   MEM_DISPLAY_B_TRICKLE_FEED_DISABLE (1<<3) /* 830 only */
+#define   MEM_DISPLAY_A_TRICKLE_FEED_DISABLE (1<<2) /* 830/845 only */
+#define   MEM_DISPLAY_TRICKLE_FEED_DISABLE (1<<2) /* 85x only */
+#define FW_BLC		0x020d8
+#define FW_BLC2		0x020dc
+#define FW_BLC_SELF	0x020e0 /* 915+ only */
+#define   FW_BLC_SELF_EN_MASK      (1<<31)
+#define   FW_BLC_SELF_FIFO_MASK    (1<<16) /* 945 only */
+#define   FW_BLC_SELF_EN           (1<<15) /* 945 only */
+#define MM_BURST_LENGTH     0x00700000
+#define MM_FIFO_WATERMARK   0x0001F000
+#define LM_BURST_LENGTH     0x00000700
+#define LM_FIFO_WATERMARK   0x0000001F
+#define MI_ARB_STATE	0x020e4 /* 915+ only */
+
+/* Make render/texture TLB fetches lower priorty than associated data
+ *   fetches. This is not turned on by default
+ */
+#define   MI_ARB_RENDER_TLB_LOW_PRIORITY	(1 << 15)
+
+/* Isoch request wait on GTT enable (Display A/B/C streams).
+ * Make isoch requests stall on the TLB update. May cause
+ * display underruns (test mode only)
+ */
+#define   MI_ARB_ISOCH_WAIT_GTT			(1 << 14)
+
+/* Block grant count for isoch requests when block count is
+ * set to a finite value.
+ */
+#define   MI_ARB_BLOCK_GRANT_MASK		(3 << 12)
+#define   MI_ARB_BLOCK_GRANT_8			(0 << 12)	/* for 3 display planes */
+#define   MI_ARB_BLOCK_GRANT_4			(1 << 12)	/* for 2 display planes */
+#define   MI_ARB_BLOCK_GRANT_2			(2 << 12)	/* for 1 display plane */
+#define   MI_ARB_BLOCK_GRANT_0			(3 << 12)	/* don't use */
+
+/* Enable render writes to complete in C2/C3/C4 power states.
+ * If this isn't enabled, render writes are prevented in low
+ * power states. That seems bad to me.
+ */
+#define   MI_ARB_C3_LP_WRITE_ENABLE		(1 << 11)
+
+/* This acknowledges an async flip immediately instead
+ * of waiting for 2TLB fetches.
+ */
+#define   MI_ARB_ASYNC_FLIP_ACK_IMMEDIATE	(1 << 10)
+
+/* Enables non-sequential data reads through arbiter
+ */
+#define   MI_ARB_DUAL_DATA_PHASE_DISABLE	(1 << 9)
+
+/* Disable FSB snooping of cacheable write cycles from binner/render
+ * command stream
+ */
+#define   MI_ARB_CACHE_SNOOP_DISABLE		(1 << 8)
+
+/* Arbiter time slice for non-isoch streams */
+#define   MI_ARB_TIME_SLICE_MASK		(7 << 5)
+#define   MI_ARB_TIME_SLICE_1			(0 << 5)
+#define   MI_ARB_TIME_SLICE_2			(1 << 5)
+#define   MI_ARB_TIME_SLICE_4			(2 << 5)
+#define   MI_ARB_TIME_SLICE_6			(3 << 5)
+#define   MI_ARB_TIME_SLICE_8			(4 << 5)
+#define   MI_ARB_TIME_SLICE_10			(5 << 5)
+#define   MI_ARB_TIME_SLICE_14			(6 << 5)
+#define   MI_ARB_TIME_SLICE_16			(7 << 5)
+
+/* Low priority grace period page size */
+#define   MI_ARB_LOW_PRIORITY_GRACE_4KB		(0 << 4)	/* default */
+#define   MI_ARB_LOW_PRIORITY_GRACE_8KB		(1 << 4)
+
+/* Disable display A/B trickle feed */
+#define   MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE	(1 << 2)
+
+/* Set display plane priority */
+#define   MI_ARB_DISPLAY_PRIORITY_A_B		(0 << 0)	/* display A > display B */
+#define   MI_ARB_DISPLAY_PRIORITY_B_A		(1 << 0)	/* display B > display A */
+
+#define MI_STATE	0x020e4 /* gen2 only */
+#define   MI_AGPBUSY_INT_EN			(1 << 1) /* 85x only */
+#define   MI_AGPBUSY_830_MODE			(1 << 0) /* 85x only */
+
+#define CACHE_MODE_0	0x02120 /* 915+ only */
+#define   CM0_PIPELINED_RENDER_FLUSH_DISABLE (1<<8)
+#define   CM0_IZ_OPT_DISABLE      (1<<6)
+#define   CM0_ZR_OPT_DISABLE      (1<<5)
+#define	  CM0_STC_EVICT_DISABLE_LRA_SNB	(1<<5)
+#define   CM0_DEPTH_EVICT_DISABLE (1<<4)
+#define   CM0_COLOR_EVICT_DISABLE (1<<3)
+#define   CM0_DEPTH_WRITE_DISABLE (1<<1)
+#define   CM0_RC_OP_FLUSH_DISABLE (1<<0)
+#define GFX_FLSH_CNTL	0x02170 /* 915+ only */
+#define GFX_FLSH_CNTL_GEN6	0x101008
+#define   GFX_FLSH_CNTL_EN	(1<<0)
+#define ECOSKPD		0x021d0
+#define   ECO_GATING_CX_ONLY	(1<<3)
+#define   ECO_FLIP_DONE		(1<<0)
+
+#define CACHE_MODE_0_GEN7	0x7000 /* IVB+ */
+#define RC_OP_FLUSH_ENABLE (1<<0)
+#define   HIZ_RAW_STALL_OPT_DISABLE (1<<2)
+#define CACHE_MODE_1		0x7004 /* IVB+ */
+#define   PIXEL_SUBSPAN_COLLECT_OPT_DISABLE	(1<<6)
+#define   GEN8_4x4_STC_OPTIMIZATION_DISABLE	(1<<6)
+#define   GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE	(1<<1)
+
+#define GEN6_BLITTER_ECOSKPD	0x221d0
+#define   GEN6_BLITTER_LOCK_SHIFT			16
+#define   GEN6_BLITTER_FBC_NOTIFY			(1<<3)
+
+#define GEN6_RC_SLEEP_PSMI_CONTROL	0x2050
+#define   GEN6_PSMI_SLEEP_MSG_DISABLE	(1 << 0)
+#define   GEN8_RC_SEMA_IDLE_MSG_DISABLE	(1 << 12)
+#define   GEN8_FF_DOP_CLOCK_GATE_DISABLE	(1<<10)
+
+/* Fuse readout registers for GT */
+#define CHV_FUSE_GT			(VLV_DISPLAY_BASE + 0x2168)
+#define   CHV_FGT_DISABLE_SS0		(1 << 10)
+#define   CHV_FGT_DISABLE_SS1		(1 << 11)
+#define   CHV_FGT_EU_DIS_SS0_R0_SHIFT	16
+#define   CHV_FGT_EU_DIS_SS0_R0_MASK	(0xf << CHV_FGT_EU_DIS_SS0_R0_SHIFT)
+#define   CHV_FGT_EU_DIS_SS0_R1_SHIFT	20
+#define   CHV_FGT_EU_DIS_SS0_R1_MASK	(0xf << CHV_FGT_EU_DIS_SS0_R1_SHIFT)
+#define   CHV_FGT_EU_DIS_SS1_R0_SHIFT	24
+#define   CHV_FGT_EU_DIS_SS1_R0_MASK	(0xf << CHV_FGT_EU_DIS_SS1_R0_SHIFT)
+#define   CHV_FGT_EU_DIS_SS1_R1_SHIFT	28
+#define   CHV_FGT_EU_DIS_SS1_R1_MASK	(0xf << CHV_FGT_EU_DIS_SS1_R1_SHIFT)
+
+#define GEN8_FUSE2			0x9120
+#define   GEN8_F2_SS_DIS_SHIFT		21
+#define   GEN8_F2_SS_DIS_MASK		(0x7 << GEN8_F2_SS_DIS_SHIFT)
+#define   GEN8_F2_S_ENA_SHIFT		25
+#define   GEN8_F2_S_ENA_MASK		(0x7 << GEN8_F2_S_ENA_SHIFT)
+
+#define   GEN9_F2_SS_DIS_SHIFT		20
+#define   GEN9_F2_SS_DIS_MASK		(0xf << GEN9_F2_SS_DIS_SHIFT)
+
+#define GEN8_EU_DISABLE0		0x9134
+#define   GEN8_EU_DIS0_S0_MASK		0xffffff
+#define   GEN8_EU_DIS0_S1_SHIFT		24
+#define   GEN8_EU_DIS0_S1_MASK		(0xff << GEN8_EU_DIS0_S1_SHIFT)
+
+#define GEN8_EU_DISABLE1		0x9138
+#define   GEN8_EU_DIS1_S1_MASK		0xffff
+#define   GEN8_EU_DIS1_S2_SHIFT		16
+#define   GEN8_EU_DIS1_S2_MASK		(0xffff << GEN8_EU_DIS1_S2_SHIFT)
+
+#define GEN8_EU_DISABLE2		0x913c
+#define   GEN8_EU_DIS2_S2_MASK		0xff
+
+#define GEN9_EU_DISABLE(slice)		(0x9134 + (slice)*0x4)
+
+#define GEN6_BSD_SLEEP_PSMI_CONTROL	0x12050
+#define   GEN6_BSD_SLEEP_MSG_DISABLE	(1 << 0)
+#define   GEN6_BSD_SLEEP_FLUSH_DISABLE	(1 << 2)
+#define   GEN6_BSD_SLEEP_INDICATOR	(1 << 3)
+#define   GEN6_BSD_GO_INDICATOR		(1 << 4)
+
+/* On modern GEN architectures interrupt control consists of two sets
+ * of registers. The first set pertains to the ring generating the
+ * interrupt. The second control is for the functional block generating the
+ * interrupt. These are PM, GT, DE, etc.
+ *
+ * Luckily *knocks on wood* all the ring interrupt bits match up with the
+ * GT interrupt bits, so we don't need to duplicate the defines.
+ *
+ * These defines should cover us well from SNB->HSW with minor exceptions
+ * it can also work on ILK.
+ */
+#define GT_BLT_FLUSHDW_NOTIFY_INTERRUPT		(1 << 26)
+#define GT_BLT_CS_ERROR_INTERRUPT		(1 << 25)
+#define GT_BLT_USER_INTERRUPT			(1 << 22)
+#define GT_BSD_CS_ERROR_INTERRUPT		(1 << 15)
+#define GT_BSD_USER_INTERRUPT			(1 << 12)
+#define GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1	(1 << 11) /* hsw+; rsvd on snb, ivb, vlv */
+#define GT_CONTEXT_SWITCH_INTERRUPT		(1 <<  8)
+#define GT_RENDER_L3_PARITY_ERROR_INTERRUPT	(1 <<  5) /* !snb */
+#define GT_RENDER_PIPECTL_NOTIFY_INTERRUPT	(1 <<  4)
+#define GT_RENDER_CS_MASTER_ERROR_INTERRUPT	(1 <<  3)
+#define GT_RENDER_SYNC_STATUS_INTERRUPT		(1 <<  2)
+#define GT_RENDER_DEBUG_INTERRUPT		(1 <<  1)
+#define GT_RENDER_USER_INTERRUPT		(1 <<  0)
+
+#define PM_VEBOX_CS_ERROR_INTERRUPT		(1 << 12) /* hsw+ */
+#define PM_VEBOX_USER_INTERRUPT			(1 << 10) /* hsw+ */
+
+#define GT_PARITY_ERROR(dev) \
+	(GT_RENDER_L3_PARITY_ERROR_INTERRUPT | \
+	 (IS_HASWELL(dev) ? GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1 : 0))
+
+/* These are all the "old" interrupts */
+#define ILK_BSD_USER_INTERRUPT				(1<<5)
+
+#define I915_PM_INTERRUPT				(1<<31)
+#define I915_ISP_INTERRUPT				(1<<22)
+#define I915_LPE_PIPE_B_INTERRUPT			(1<<21)
+#define I915_LPE_PIPE_A_INTERRUPT			(1<<20)
+#define I915_MIPIC_INTERRUPT				(1<<19)
+#define I915_MIPIA_INTERRUPT				(1<<18)
+#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT		(1<<18)
+#define I915_DISPLAY_PORT_INTERRUPT			(1<<17)
+#define I915_DISPLAY_PIPE_C_HBLANK_INTERRUPT		(1<<16)
+#define I915_MASTER_ERROR_INTERRUPT			(1<<15)
+#define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT	(1<<15)
+#define I915_DISPLAY_PIPE_B_HBLANK_INTERRUPT		(1<<14)
+#define I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT	(1<<14) /* p-state */
+#define I915_DISPLAY_PIPE_A_HBLANK_INTERRUPT		(1<<13)
+#define I915_HWB_OOM_INTERRUPT				(1<<13)
+#define I915_LPE_PIPE_C_INTERRUPT			(1<<12)
+#define I915_SYNC_STATUS_INTERRUPT			(1<<12)
+#define I915_MISC_INTERRUPT				(1<<11)
+#define I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT	(1<<11)
+#define I915_DISPLAY_PIPE_C_VBLANK_INTERRUPT		(1<<10)
+#define I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT	(1<<10)
+#define I915_DISPLAY_PIPE_C_EVENT_INTERRUPT		(1<<9)
+#define I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT	(1<<9)
+#define I915_DISPLAY_PIPE_C_DPBM_INTERRUPT		(1<<8)
+#define I915_DISPLAY_PLANE_C_FLIP_PENDING_INTERRUPT	(1<<8)
+#define I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT		(1<<7)
+#define I915_DISPLAY_PIPE_A_EVENT_INTERRUPT		(1<<6)
+#define I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT		(1<<5)
+#define I915_DISPLAY_PIPE_B_EVENT_INTERRUPT		(1<<4)
+#define I915_DISPLAY_PIPE_A_DPBM_INTERRUPT		(1<<3)
+#define I915_DISPLAY_PIPE_B_DPBM_INTERRUPT		(1<<2)
+#define I915_DEBUG_INTERRUPT				(1<<2)
+#define I915_WINVALID_INTERRUPT				(1<<1)
+#define I915_USER_INTERRUPT				(1<<1)
+#define I915_ASLE_INTERRUPT				(1<<0)
+#define I915_BSD_USER_INTERRUPT				(1<<25)
+
+#define GEN6_BSD_RNCID			0x12198
+
+#define GEN7_FF_THREAD_MODE		0x20a0
+#define   GEN7_FF_SCHED_MASK		0x0077070
+#define   GEN8_FF_DS_REF_CNT_FFME	(1 << 19)
+#define   GEN7_FF_TS_SCHED_HS1		(0x5<<16)
+#define   GEN7_FF_TS_SCHED_HS0		(0x3<<16)
+#define   GEN7_FF_TS_SCHED_LOAD_BALANCE	(0x1<<16)
+#define   GEN7_FF_TS_SCHED_HW		(0x0<<16) /* Default */
+#define   GEN7_FF_VS_REF_CNT_FFME	(1 << 15)
+#define   GEN7_FF_VS_SCHED_HS1		(0x5<<12)
+#define   GEN7_FF_VS_SCHED_HS0		(0x3<<12)
+#define   GEN7_FF_VS_SCHED_LOAD_BALANCE	(0x1<<12) /* Default */
+#define   GEN7_FF_VS_SCHED_HW		(0x0<<12)
+#define   GEN7_FF_DS_SCHED_HS1		(0x5<<4)
+#define   GEN7_FF_DS_SCHED_HS0		(0x3<<4)
+#define   GEN7_FF_DS_SCHED_LOAD_BALANCE	(0x1<<4)  /* Default */
+#define   GEN7_FF_DS_SCHED_HW		(0x0<<4)
+
+/*
+ * Framebuffer compression (915+ only)
+ */
+
+#define FBC_CFB_BASE		0x03200 /* 4k page aligned */
+#define FBC_LL_BASE		0x03204 /* 4k page aligned */
+#define FBC_CONTROL		0x03208
+#define   FBC_CTL_EN		(1<<31)
+#define   FBC_CTL_PERIODIC	(1<<30)
+#define   FBC_CTL_INTERVAL_SHIFT (16)
+#define   FBC_CTL_UNCOMPRESSIBLE (1<<14)
+#define   FBC_CTL_C3_IDLE	(1<<13)
+#define   FBC_CTL_STRIDE_SHIFT	(5)
+#define   FBC_CTL_FENCENO_SHIFT	(0)
+#define FBC_COMMAND		0x0320c
+#define   FBC_CMD_COMPRESS	(1<<0)
+#define FBC_STATUS		0x03210
+#define   FBC_STAT_COMPRESSING	(1<<31)
+#define   FBC_STAT_COMPRESSED	(1<<30)
+#define   FBC_STAT_MODIFIED	(1<<29)
+#define   FBC_STAT_CURRENT_LINE_SHIFT	(0)
+#define FBC_CONTROL2		0x03214
+#define   FBC_CTL_FENCE_DBL	(0<<4)
+#define   FBC_CTL_IDLE_IMM	(0<<2)
+#define   FBC_CTL_IDLE_FULL	(1<<2)
+#define   FBC_CTL_IDLE_LINE	(2<<2)
+#define   FBC_CTL_IDLE_DEBUG	(3<<2)
+#define   FBC_CTL_CPU_FENCE	(1<<1)
+#define   FBC_CTL_PLANE(plane)	((plane)<<0)
+#define FBC_FENCE_OFF		0x03218 /* BSpec typo has 321Bh */
+#define FBC_TAG(i)		(0x03300 + (i) * 4)
+
+#define FBC_STATUS2		0x43214
+#define  FBC_COMPRESSION_MASK	0x7ff
+
+#define FBC_LL_SIZE		(1536)
+
+/* Framebuffer compression for GM45+ */
+#define DPFC_CB_BASE		0x3200
+#define DPFC_CONTROL		0x3208
+#define   DPFC_CTL_EN		(1<<31)
+#define   DPFC_CTL_PLANE(plane)	((plane)<<30)
+#define   IVB_DPFC_CTL_PLANE(plane)	((plane)<<29)
+#define   DPFC_CTL_FENCE_EN	(1<<29)
+#define   IVB_DPFC_CTL_FENCE_EN	(1<<28)
+#define   DPFC_CTL_PERSISTENT_MODE	(1<<25)
+#define   DPFC_SR_EN		(1<<10)
+#define   DPFC_CTL_LIMIT_1X	(0<<6)
+#define   DPFC_CTL_LIMIT_2X	(1<<6)
+#define   DPFC_CTL_LIMIT_4X	(2<<6)
+#define DPFC_RECOMP_CTL		0x320c
+#define   DPFC_RECOMP_STALL_EN	(1<<27)
+#define   DPFC_RECOMP_STALL_WM_SHIFT (16)
+#define   DPFC_RECOMP_STALL_WM_MASK (0x07ff0000)
+#define   DPFC_RECOMP_TIMER_COUNT_SHIFT (0)
+#define   DPFC_RECOMP_TIMER_COUNT_MASK (0x0000003f)
+#define DPFC_STATUS		0x3210
+#define   DPFC_INVAL_SEG_SHIFT  (16)
+#define   DPFC_INVAL_SEG_MASK	(0x07ff0000)
+#define   DPFC_COMP_SEG_SHIFT	(0)
+#define   DPFC_COMP_SEG_MASK	(0x000003ff)
+#define DPFC_STATUS2		0x3214
+#define DPFC_FENCE_YOFF		0x3218
+#define DPFC_CHICKEN		0x3224
+#define   DPFC_HT_MODIFY	(1<<31)
+
+/* Framebuffer compression for Ironlake */
+#define ILK_DPFC_CB_BASE	0x43200
+#define ILK_DPFC_CONTROL	0x43208
+#define   FBC_CTL_FALSE_COLOR	(1<<10)
+/* The bit 28-8 is reserved */
+#define   DPFC_RESERVED		(0x1FFFFF00)
+#define ILK_DPFC_RECOMP_CTL	0x4320c
+#define ILK_DPFC_STATUS		0x43210
+#define ILK_DPFC_FENCE_YOFF	0x43218
+#define ILK_DPFC_CHICKEN	0x43224
+#define ILK_FBC_RT_BASE		0x2128
+#define   ILK_FBC_RT_VALID	(1<<0)
+#define   SNB_FBC_FRONT_BUFFER	(1<<1)
+
+#define ILK_DISPLAY_CHICKEN1	0x42000
+#define   ILK_FBCQ_DIS		(1<<22)
+#define	  ILK_PABSTRETCH_DIS	(1<<21)
+
+
+/*
+ * Framebuffer compression for Sandybridge
+ *
+ * The following two registers are of type GTTMMADR
+ */
+#define SNB_DPFC_CTL_SA		0x100100
+#define   SNB_CPU_FENCE_ENABLE	(1<<29)
+#define DPFC_CPU_FENCE_OFFSET	0x100104
+
+/* Framebuffer compression for Ivybridge */
+#define IVB_FBC_RT_BASE			0x7020
+
+#define IPS_CTL		0x43408
+#define   IPS_ENABLE	(1 << 31)
+
+#define MSG_FBC_REND_STATE	0x50380
+#define   FBC_REND_NUKE		(1<<2)
+#define   FBC_REND_CACHE_CLEAN	(1<<1)
+
+/*
+ * GPIO regs
+ */
+#define GPIOA			0x5010
+#define GPIOB			0x5014
+#define GPIOC			0x5018
+#define GPIOD			0x501c
+#define GPIOE			0x5020
+#define GPIOF			0x5024
+#define GPIOG			0x5028
+#define GPIOH			0x502c
+# define GPIO_CLOCK_DIR_MASK		(1 << 0)
+# define GPIO_CLOCK_DIR_IN		(0 << 1)
+# define GPIO_CLOCK_DIR_OUT		(1 << 1)
+# define GPIO_CLOCK_VAL_MASK		(1 << 2)
+# define GPIO_CLOCK_VAL_OUT		(1 << 3)
+# define GPIO_CLOCK_VAL_IN		(1 << 4)
+# define GPIO_CLOCK_PULLUP_DISABLE	(1 << 5)
+# define GPIO_DATA_DIR_MASK		(1 << 8)
+# define GPIO_DATA_DIR_IN		(0 << 9)
+# define GPIO_DATA_DIR_OUT		(1 << 9)
+# define GPIO_DATA_VAL_MASK		(1 << 10)
+# define GPIO_DATA_VAL_OUT		(1 << 11)
+# define GPIO_DATA_VAL_IN		(1 << 12)
+# define GPIO_DATA_PULLUP_DISABLE	(1 << 13)
+
+#define GMBUS0			(dev_priv->gpio_mmio_base + 0x5100) /* clock/port select */
+#define   GMBUS_RATE_100KHZ	(0<<8)
+#define   GMBUS_RATE_50KHZ	(1<<8)
+#define   GMBUS_RATE_400KHZ	(2<<8) /* reserved on Pineview */
+#define   GMBUS_RATE_1MHZ	(3<<8) /* reserved on Pineview */
+#define   GMBUS_HOLD_EXT	(1<<7) /* 300ns hold time, rsvd on Pineview */
+#define   GMBUS_PIN_DISABLED	0
+#define   GMBUS_PIN_SSC		1
+#define   GMBUS_PIN_VGADDC	2
+#define   GMBUS_PIN_PANEL	3
+#define   GMBUS_PIN_DPD_CHV	3 /* HDMID_CHV */
+#define   GMBUS_PIN_DPC		4 /* HDMIC */
+#define   GMBUS_PIN_DPB		5 /* SDVO, HDMIB */
+#define   GMBUS_PIN_DPD		6 /* HDMID */
+#define   GMBUS_PIN_RESERVED	7 /* 7 reserved */
+#define   GMBUS_PIN_1_BXT	1
+#define   GMBUS_PIN_2_BXT	2
+#define   GMBUS_PIN_3_BXT	3
+#define   GMBUS_NUM_PINS	7 /* including 0 */
+#define GMBUS1			(dev_priv->gpio_mmio_base + 0x5104) /* command/status */
+#define   GMBUS_SW_CLR_INT	(1<<31)
+#define   GMBUS_SW_RDY		(1<<30)
+#define   GMBUS_ENT		(1<<29) /* enable timeout */
+#define   GMBUS_CYCLE_NONE	(0<<25)
+#define   GMBUS_CYCLE_WAIT	(1<<25)
+#define   GMBUS_CYCLE_INDEX	(2<<25)
+#define   GMBUS_CYCLE_STOP	(4<<25)
+#define   GMBUS_BYTE_COUNT_SHIFT 16
+#define   GMBUS_BYTE_COUNT_MAX   256U
+#define   GMBUS_SLAVE_INDEX_SHIFT 8
+#define   GMBUS_SLAVE_ADDR_SHIFT 1
+#define   GMBUS_SLAVE_READ	(1<<0)
+#define   GMBUS_SLAVE_WRITE	(0<<0)
+#define GMBUS2			(dev_priv->gpio_mmio_base + 0x5108) /* status */
+#define   GMBUS_INUSE		(1<<15)
+#define   GMBUS_HW_WAIT_PHASE	(1<<14)
+#define   GMBUS_STALL_TIMEOUT	(1<<13)
+#define   GMBUS_INT		(1<<12)
+#define   GMBUS_HW_RDY		(1<<11)
+#define   GMBUS_SATOER		(1<<10)
+#define   GMBUS_ACTIVE		(1<<9)
+#define GMBUS3			(dev_priv->gpio_mmio_base + 0x510c) /* data buffer bytes 3-0 */
+#define GMBUS4			(dev_priv->gpio_mmio_base + 0x5110) /* interrupt mask (Pineview+) */
+#define   GMBUS_SLAVE_TIMEOUT_EN (1<<4)
+#define   GMBUS_NAK_EN		(1<<3)
+#define   GMBUS_IDLE_EN		(1<<2)
+#define   GMBUS_HW_WAIT_EN	(1<<1)
+#define   GMBUS_HW_RDY_EN	(1<<0)
+#define GMBUS5			(dev_priv->gpio_mmio_base + 0x5120) /* byte index */
+#define   GMBUS_2BYTE_INDEX_EN	(1<<31)
+
+/*
+ * Clock control & power management
+ */
+#define _DPLL_A (dev_priv->info.display_mmio_offset + 0x6014)
+#define _DPLL_B (dev_priv->info.display_mmio_offset + 0x6018)
+#define _CHV_DPLL_C (dev_priv->info.display_mmio_offset + 0x6030)
+#define DPLL(pipe) _PIPE3((pipe), _DPLL_A, _DPLL_B, _CHV_DPLL_C)
+
+#define VGA0	0x6000
+#define VGA1	0x6004
+#define VGA_PD	0x6010
+#define   VGA0_PD_P2_DIV_4	(1 << 7)
+#define   VGA0_PD_P1_DIV_2	(1 << 5)
+#define   VGA0_PD_P1_SHIFT	0
+#define   VGA0_PD_P1_MASK	(0x1f << 0)
+#define   VGA1_PD_P2_DIV_4	(1 << 15)
+#define   VGA1_PD_P1_DIV_2	(1 << 13)
+#define   VGA1_PD_P1_SHIFT	8
+#define   VGA1_PD_P1_MASK	(0x1f << 8)
+#define   DPLL_VCO_ENABLE		(1 << 31)
+#define   DPLL_SDVO_HIGH_SPEED		(1 << 30)
+#define   DPLL_DVO_2X_MODE		(1 << 30)
+#define   DPLL_EXT_BUFFER_ENABLE_VLV	(1 << 30)
+#define   DPLL_SYNCLOCK_ENABLE		(1 << 29)
+#define   DPLL_REF_CLK_ENABLE_VLV	(1 << 29)
+#define   DPLL_VGA_MODE_DIS		(1 << 28)
+#define   DPLLB_MODE_DAC_SERIAL		(1 << 26) /* i915 */
+#define   DPLLB_MODE_LVDS		(2 << 26) /* i915 */
+#define   DPLL_MODE_MASK		(3 << 26)
+#define   DPLL_DAC_SERIAL_P2_CLOCK_DIV_10 (0 << 24) /* i915 */
+#define   DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 (1 << 24) /* i915 */
+#define   DPLLB_LVDS_P2_CLOCK_DIV_14	(0 << 24) /* i915 */
+#define   DPLLB_LVDS_P2_CLOCK_DIV_7	(1 << 24) /* i915 */
+#define   DPLL_P2_CLOCK_DIV_MASK	0x03000000 /* i915 */
+#define   DPLL_FPA01_P1_POST_DIV_MASK	0x00ff0000 /* i915 */
+#define   DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW	0x00ff8000 /* Pineview */
+#define   DPLL_LOCK_VLV			(1<<15)
+#define   DPLL_INTEGRATED_CRI_CLK_VLV	(1<<14)
+#define   DPLL_INTEGRATED_REF_CLK_VLV	(1<<13)
+#define   DPLL_SSC_REF_CLK_CHV		(1<<13)
+#define   DPLL_PORTC_READY_MASK		(0xf << 4)
+#define   DPLL_PORTB_READY_MASK		(0xf)
+
+#define   DPLL_FPA01_P1_POST_DIV_MASK_I830	0x001f0000
+
+/* Additional CHV pll/phy registers */
+#define DPIO_PHY_STATUS			(VLV_DISPLAY_BASE + 0x6240)
+#define   DPLL_PORTD_READY_MASK		(0xf)
+#define DISPLAY_PHY_CONTROL (VLV_DISPLAY_BASE + 0x60100)
+#define   PHY_CH_POWER_DOWN_OVRD_EN(phy, ch)	(1 << (2*(phy)+(ch)+27))
+#define   PHY_LDO_DELAY_0NS			0x0
+#define   PHY_LDO_DELAY_200NS			0x1
+#define   PHY_LDO_DELAY_600NS			0x2
+#define   PHY_LDO_SEQ_DELAY(delay, phy)		((delay) << (2*(phy)+23))
+#define   PHY_CH_POWER_DOWN_OVRD(mask, phy, ch)	((mask) << (8*(phy)+4*(ch)+11))
+#define   PHY_CH_SU_PSR				0x1
+#define   PHY_CH_DEEP_PSR			0x7
+#define   PHY_CH_POWER_MODE(mode, phy, ch)	((mode) << (6*(phy)+3*(ch)+2))
+#define   PHY_COM_LANE_RESET_DEASSERT(phy)	(1 << (phy))
+#define DISPLAY_PHY_STATUS (VLV_DISPLAY_BASE + 0x60104)
+#define   PHY_POWERGOOD(phy)	(((phy) == DPIO_PHY0) ? (1<<31) : (1<<30))
+#define   PHY_STATUS_CMN_LDO(phy, ch)                   (1 << (6-(6*(phy)+3*(ch))))
+#define   PHY_STATUS_SPLINE_LDO(phy, ch, spline)        (1 << (8-(6*(phy)+3*(ch)+(spline))))
+
+/*
+ * The i830 generation, in LVDS mode, defines P1 as the bit number set within
+ * this field (only one bit may be set).
+ */
+#define   DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS	0x003f0000
+#define   DPLL_FPA01_P1_POST_DIV_SHIFT	16
+#define   DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW 15
+/* i830, required in DVO non-gang */
+#define   PLL_P2_DIVIDE_BY_4		(1 << 23)
+#define   PLL_P1_DIVIDE_BY_TWO		(1 << 21) /* i830 */
+#define   PLL_REF_INPUT_DREFCLK		(0 << 13)
+#define   PLL_REF_INPUT_TVCLKINA	(1 << 13) /* i830 */
+#define   PLL_REF_INPUT_TVCLKINBC	(2 << 13) /* SDVO TVCLKIN */
+#define   PLLB_REF_INPUT_SPREADSPECTRUMIN (3 << 13)
+#define   PLL_REF_INPUT_MASK		(3 << 13)
+#define   PLL_LOAD_PULSE_PHASE_SHIFT		9
+/* Ironlake */
+# define PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT     9
+# define PLL_REF_SDVO_HDMI_MULTIPLIER_MASK      (7 << 9)
+# define PLL_REF_SDVO_HDMI_MULTIPLIER(x)	(((x)-1) << 9)
+# define DPLL_FPA1_P1_POST_DIV_SHIFT            0
+# define DPLL_FPA1_P1_POST_DIV_MASK             0xff
+
+/*
+ * Parallel to Serial Load Pulse phase selection.
+ * Selects the phase for the 10X DPLL clock for the PCIe
+ * digital display port. The range is 4 to 13; 10 or more
+ * is just a flip delay. The default is 6
+ */
+#define   PLL_LOAD_PULSE_PHASE_MASK		(0xf << PLL_LOAD_PULSE_PHASE_SHIFT)
+#define   DISPLAY_RATE_SELECT_FPA1		(1 << 8)
+/*
+ * SDVO multiplier for 945G/GM. Not used on 965.
+ */
+#define   SDVO_MULTIPLIER_MASK			0x000000ff
+#define   SDVO_MULTIPLIER_SHIFT_HIRES		4
+#define   SDVO_MULTIPLIER_SHIFT_VGA		0
+
+#define _DPLL_A_MD (dev_priv->info.display_mmio_offset + 0x601c)
+#define _DPLL_B_MD (dev_priv->info.display_mmio_offset + 0x6020)
+#define _CHV_DPLL_C_MD (dev_priv->info.display_mmio_offset + 0x603c)
+#define DPLL_MD(pipe) _PIPE3((pipe), _DPLL_A_MD, _DPLL_B_MD, _CHV_DPLL_C_MD)
+
+/*
+ * UDI pixel divider, controlling how many pixels are stuffed into a packet.
+ *
+ * Value is pixels minus 1.  Must be set to 1 pixel for SDVO.
+ */
+#define   DPLL_MD_UDI_DIVIDER_MASK		0x3f000000
+#define   DPLL_MD_UDI_DIVIDER_SHIFT		24
+/* UDI pixel divider for VGA, same as DPLL_MD_UDI_DIVIDER_MASK. */
+#define   DPLL_MD_VGA_UDI_DIVIDER_MASK		0x003f0000
+#define   DPLL_MD_VGA_UDI_DIVIDER_SHIFT		16
+/*
+ * SDVO/UDI pixel multiplier.
+ *
+ * SDVO requires that the bus clock rate be between 1 and 2 Ghz, and the bus
+ * clock rate is 10 times the DPLL clock.  At low resolution/refresh rate
+ * modes, the bus rate would be below the limits, so SDVO allows for stuffing
+ * dummy bytes in the datastream at an increased clock rate, with both sides of
+ * the link knowing how many bytes are fill.
+ *
+ * So, for a mode with a dotclock of 65Mhz, we would want to double the clock
+ * rate to 130Mhz to get a bus rate of 1.30Ghz.  The DPLL clock rate would be
+ * set to 130Mhz, and the SDVO multiplier set to 2x in this register and
+ * through an SDVO command.
+ *
+ * This register field has values of multiplication factor minus 1, with
+ * a maximum multiplier of 5 for SDVO.
+ */
+#define   DPLL_MD_UDI_MULTIPLIER_MASK		0x00003f00
+#define   DPLL_MD_UDI_MULTIPLIER_SHIFT		8
+/*
+ * SDVO/UDI pixel multiplier for VGA, same as DPLL_MD_UDI_MULTIPLIER_MASK.
+ * This best be set to the default value (3) or the CRT won't work. No,
+ * I don't entirely understand what this does...
+ */
+#define   DPLL_MD_VGA_UDI_MULTIPLIER_MASK	0x0000003f
+#define   DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT	0
+
+#define _FPA0	0x06040
+#define _FPA1	0x06044
+#define _FPB0	0x06048
+#define _FPB1	0x0604c
+#define FP0(pipe) _PIPE(pipe, _FPA0, _FPB0)
+#define FP1(pipe) _PIPE(pipe, _FPA1, _FPB1)
+#define   FP_N_DIV_MASK		0x003f0000
+#define   FP_N_PINEVIEW_DIV_MASK	0x00ff0000
+#define   FP_N_DIV_SHIFT		16
+#define   FP_M1_DIV_MASK	0x00003f00
+#define   FP_M1_DIV_SHIFT		 8
+#define   FP_M2_DIV_MASK	0x0000003f
+#define   FP_M2_PINEVIEW_DIV_MASK	0x000000ff
+#define   FP_M2_DIV_SHIFT		 0
+#define DPLL_TEST	0x606c
+#define   DPLLB_TEST_SDVO_DIV_1		(0 << 22)
+#define   DPLLB_TEST_SDVO_DIV_2		(1 << 22)
+#define   DPLLB_TEST_SDVO_DIV_4		(2 << 22)
+#define   DPLLB_TEST_SDVO_DIV_MASK	(3 << 22)
+#define   DPLLB_TEST_N_BYPASS		(1 << 19)
+#define   DPLLB_TEST_M_BYPASS		(1 << 18)
+#define   DPLLB_INPUT_BUFFER_ENABLE	(1 << 16)
+#define   DPLLA_TEST_N_BYPASS		(1 << 3)
+#define   DPLLA_TEST_M_BYPASS		(1 << 2)
+#define   DPLLA_INPUT_BUFFER_ENABLE	(1 << 0)
+#define D_STATE		0x6104
+#define  DSTATE_GFX_RESET_I830			(1<<6)
+#define  DSTATE_PLL_D3_OFF			(1<<3)
+#define  DSTATE_GFX_CLOCK_GATING		(1<<1)
+#define  DSTATE_DOT_CLOCK_GATING		(1<<0)
+#define DSPCLK_GATE_D	(dev_priv->info.display_mmio_offset + 0x6200)
+# define DPUNIT_B_CLOCK_GATE_DISABLE		(1 << 30) /* 965 */
+# define VSUNIT_CLOCK_GATE_DISABLE		(1 << 29) /* 965 */
+# define VRHUNIT_CLOCK_GATE_DISABLE		(1 << 28) /* 965 */
+# define VRDUNIT_CLOCK_GATE_DISABLE		(1 << 27) /* 965 */
+# define AUDUNIT_CLOCK_GATE_DISABLE		(1 << 26) /* 965 */
+# define DPUNIT_A_CLOCK_GATE_DISABLE		(1 << 25) /* 965 */
+# define DPCUNIT_CLOCK_GATE_DISABLE		(1 << 24) /* 965 */
+# define TVRUNIT_CLOCK_GATE_DISABLE		(1 << 23) /* 915-945 */
+# define TVCUNIT_CLOCK_GATE_DISABLE		(1 << 22) /* 915-945 */
+# define TVFUNIT_CLOCK_GATE_DISABLE		(1 << 21) /* 915-945 */
+# define TVEUNIT_CLOCK_GATE_DISABLE		(1 << 20) /* 915-945 */
+# define DVSUNIT_CLOCK_GATE_DISABLE		(1 << 19) /* 915-945 */
+# define DSSUNIT_CLOCK_GATE_DISABLE		(1 << 18) /* 915-945 */
+# define DDBUNIT_CLOCK_GATE_DISABLE		(1 << 17) /* 915-945 */
+# define DPRUNIT_CLOCK_GATE_DISABLE		(1 << 16) /* 915-945 */
+# define DPFUNIT_CLOCK_GATE_DISABLE		(1 << 15) /* 915-945 */
+# define DPBMUNIT_CLOCK_GATE_DISABLE		(1 << 14) /* 915-945 */
+# define DPLSUNIT_CLOCK_GATE_DISABLE		(1 << 13) /* 915-945 */
+# define DPLUNIT_CLOCK_GATE_DISABLE		(1 << 12) /* 915-945 */
+# define DPOUNIT_CLOCK_GATE_DISABLE		(1 << 11)
+# define DPBUNIT_CLOCK_GATE_DISABLE		(1 << 10)
+# define DCUNIT_CLOCK_GATE_DISABLE		(1 << 9)
+# define DPUNIT_CLOCK_GATE_DISABLE		(1 << 8)
+# define VRUNIT_CLOCK_GATE_DISABLE		(1 << 7) /* 915+: reserved */
+# define OVHUNIT_CLOCK_GATE_DISABLE		(1 << 6) /* 830-865 */
+# define DPIOUNIT_CLOCK_GATE_DISABLE		(1 << 6) /* 915-945 */
+# define OVFUNIT_CLOCK_GATE_DISABLE		(1 << 5)
+# define OVBUNIT_CLOCK_GATE_DISABLE		(1 << 4)
+/*
+ * This bit must be set on the 830 to prevent hangs when turning off the
+ * overlay scaler.
+ */
+# define OVRUNIT_CLOCK_GATE_DISABLE		(1 << 3)
+# define OVCUNIT_CLOCK_GATE_DISABLE		(1 << 2)
+# define OVUUNIT_CLOCK_GATE_DISABLE		(1 << 1)
+# define ZVUNIT_CLOCK_GATE_DISABLE		(1 << 0) /* 830 */
+# define OVLUNIT_CLOCK_GATE_DISABLE		(1 << 0) /* 845,865 */
+
+#define RENCLK_GATE_D1		0x6204
+# define BLITTER_CLOCK_GATE_DISABLE		(1 << 13) /* 945GM only */
+# define MPEG_CLOCK_GATE_DISABLE		(1 << 12) /* 945GM only */
+# define PC_FE_CLOCK_GATE_DISABLE		(1 << 11)
+# define PC_BE_CLOCK_GATE_DISABLE		(1 << 10)
+# define WINDOWER_CLOCK_GATE_DISABLE		(1 << 9)
+# define INTERPOLATOR_CLOCK_GATE_DISABLE	(1 << 8)
+# define COLOR_CALCULATOR_CLOCK_GATE_DISABLE	(1 << 7)
+# define MOTION_COMP_CLOCK_GATE_DISABLE		(1 << 6)
+# define MAG_CLOCK_GATE_DISABLE			(1 << 5)
+/* This bit must be unset on 855,865 */
+# define MECI_CLOCK_GATE_DISABLE		(1 << 4)
+# define DCMP_CLOCK_GATE_DISABLE		(1 << 3)
+# define MEC_CLOCK_GATE_DISABLE			(1 << 2)
+# define MECO_CLOCK_GATE_DISABLE		(1 << 1)
+/* This bit must be set on 855,865. */
+# define SV_CLOCK_GATE_DISABLE			(1 << 0)
+# define I915_MPEG_CLOCK_GATE_DISABLE		(1 << 16)
+# define I915_VLD_IP_PR_CLOCK_GATE_DISABLE	(1 << 15)
+# define I915_MOTION_COMP_CLOCK_GATE_DISABLE	(1 << 14)
+# define I915_BD_BF_CLOCK_GATE_DISABLE		(1 << 13)
+# define I915_SF_SE_CLOCK_GATE_DISABLE		(1 << 12)
+# define I915_WM_CLOCK_GATE_DISABLE		(1 << 11)
+# define I915_IZ_CLOCK_GATE_DISABLE		(1 << 10)
+# define I915_PI_CLOCK_GATE_DISABLE		(1 << 9)
+# define I915_DI_CLOCK_GATE_DISABLE		(1 << 8)
+# define I915_SH_SV_CLOCK_GATE_DISABLE		(1 << 7)
+# define I915_PL_DG_QC_FT_CLOCK_GATE_DISABLE	(1 << 6)
+# define I915_SC_CLOCK_GATE_DISABLE		(1 << 5)
+# define I915_FL_CLOCK_GATE_DISABLE		(1 << 4)
+# define I915_DM_CLOCK_GATE_DISABLE		(1 << 3)
+# define I915_PS_CLOCK_GATE_DISABLE		(1 << 2)
+# define I915_CC_CLOCK_GATE_DISABLE		(1 << 1)
+# define I915_BY_CLOCK_GATE_DISABLE		(1 << 0)
+
+# define I965_RCZ_CLOCK_GATE_DISABLE		(1 << 30)
+/* This bit must always be set on 965G/965GM */
+# define I965_RCC_CLOCK_GATE_DISABLE		(1 << 29)
+# define I965_RCPB_CLOCK_GATE_DISABLE		(1 << 28)
+# define I965_DAP_CLOCK_GATE_DISABLE		(1 << 27)
+# define I965_ROC_CLOCK_GATE_DISABLE		(1 << 26)
+# define I965_GW_CLOCK_GATE_DISABLE		(1 << 25)
+# define I965_TD_CLOCK_GATE_DISABLE		(1 << 24)
+/* This bit must always be set on 965G */
+# define I965_ISC_CLOCK_GATE_DISABLE		(1 << 23)
+# define I965_IC_CLOCK_GATE_DISABLE		(1 << 22)
+# define I965_EU_CLOCK_GATE_DISABLE		(1 << 21)
+# define I965_IF_CLOCK_GATE_DISABLE		(1 << 20)
+# define I965_TC_CLOCK_GATE_DISABLE		(1 << 19)
+# define I965_SO_CLOCK_GATE_DISABLE		(1 << 17)
+# define I965_FBC_CLOCK_GATE_DISABLE		(1 << 16)
+# define I965_MARI_CLOCK_GATE_DISABLE		(1 << 15)
+# define I965_MASF_CLOCK_GATE_DISABLE		(1 << 14)
+# define I965_MAWB_CLOCK_GATE_DISABLE		(1 << 13)
+# define I965_EM_CLOCK_GATE_DISABLE		(1 << 12)
+# define I965_UC_CLOCK_GATE_DISABLE		(1 << 11)
+# define I965_SI_CLOCK_GATE_DISABLE		(1 << 6)
+# define I965_MT_CLOCK_GATE_DISABLE		(1 << 5)
+# define I965_PL_CLOCK_GATE_DISABLE		(1 << 4)
+# define I965_DG_CLOCK_GATE_DISABLE		(1 << 3)
+# define I965_QC_CLOCK_GATE_DISABLE		(1 << 2)
+# define I965_FT_CLOCK_GATE_DISABLE		(1 << 1)
+# define I965_DM_CLOCK_GATE_DISABLE		(1 << 0)
+
+#define RENCLK_GATE_D2		0x6208
+#define VF_UNIT_CLOCK_GATE_DISABLE		(1 << 9)
+#define GS_UNIT_CLOCK_GATE_DISABLE		(1 << 7)
+#define CL_UNIT_CLOCK_GATE_DISABLE		(1 << 6)
+
+#define VDECCLK_GATE_D		0x620C		/* g4x only */
+#define  VCP_UNIT_CLOCK_GATE_DISABLE		(1 << 4)
+
+#define RAMCLK_GATE_D		0x6210		/* CRL only */
+#define DEUC			0x6214          /* CRL only */
+
+#define FW_BLC_SELF_VLV		(VLV_DISPLAY_BASE + 0x6500)
+#define  FW_CSPWRDWNEN		(1<<15)
+
+#define MI_ARB_VLV		(VLV_DISPLAY_BASE + 0x6504)
+
+#define CZCLK_CDCLK_FREQ_RATIO	(VLV_DISPLAY_BASE + 0x6508)
+#define   CDCLK_FREQ_SHIFT	4
+#define   CDCLK_FREQ_MASK	(0x1f << CDCLK_FREQ_SHIFT)
+#define   CZCLK_FREQ_MASK	0xf
+
+#define GCI_CONTROL		(VLV_DISPLAY_BASE + 0x650C)
+#define   PFI_CREDIT_63		(9 << 28)		/* chv only */
+#define   PFI_CREDIT_31		(8 << 28)		/* chv only */
+#define   PFI_CREDIT(x)		(((x) - 8) << 28)	/* 8-15 */
+#define   PFI_CREDIT_RESEND	(1 << 27)
+#define   VGA_FAST_MODE_DISABLE	(1 << 14)
+
+#define GMBUSFREQ_VLV		(VLV_DISPLAY_BASE + 0x6510)
+
+/*
+ * Palette regs
+ */
+#define PALETTE_A_OFFSET 0xa000
+#define PALETTE_B_OFFSET 0xa800
+#define CHV_PALETTE_C_OFFSET 0xc000
+#define PALETTE(pipe, i) (dev_priv->info.palette_offsets[pipe] + \
+			  dev_priv->info.display_mmio_offset + (i) * 4)
+
+/* MCH MMIO space */
+
+/*
+ * MCHBAR mirror.
+ *
+ * This mirrors the MCHBAR MMIO space whose location is determined by
+ * device 0 function 0's pci config register 0x44 or 0x48 and matches it in
+ * every way.  It is not accessible from the CP register read instructions.
+ *
+ * Starting from Haswell, you can't write registers using the MCHBAR mirror,
+ * just read.
+ */
+#define MCHBAR_MIRROR_BASE	0x10000
+
+#define MCHBAR_MIRROR_BASE_SNB	0x140000
+
+#define CTG_STOLEN_RESERVED		(MCHBAR_MIRROR_BASE + 0x34)
+#define ELK_STOLEN_RESERVED		(MCHBAR_MIRROR_BASE + 0x48)
+#define G4X_STOLEN_RESERVED_ADDR1_MASK	(0xFFFF << 16)
+#define G4X_STOLEN_RESERVED_ADDR2_MASK	(0xFFF << 4)
+
+/* Memory controller frequency in MCHBAR for Haswell (possible SNB+) */
+#define DCLK (MCHBAR_MIRROR_BASE_SNB + 0x5e04)
+
+/* 915-945 and GM965 MCH register controlling DRAM channel access */
+#define DCC			0x10200
+#define DCC_ADDRESSING_MODE_SINGLE_CHANNEL		(0 << 0)
+#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC	(1 << 0)
+#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED	(2 << 0)
+#define DCC_ADDRESSING_MODE_MASK			(3 << 0)
+#define DCC_CHANNEL_XOR_DISABLE				(1 << 10)
+#define DCC_CHANNEL_XOR_BIT_17				(1 << 9)
+#define DCC2			0x10204
+#define DCC2_MODIFIED_ENHANCED_DISABLE			(1 << 20)
+
+/* Pineview MCH register contains DDR3 setting */
+#define CSHRDDR3CTL            0x101a8
+#define CSHRDDR3CTL_DDR3       (1 << 2)
+
+/* 965 MCH register controlling DRAM channel configuration */
+#define C0DRB3			0x10206
+#define C1DRB3			0x10606
+
+/* snb MCH registers for reading the DRAM channel configuration */
+#define MAD_DIMM_C0			(MCHBAR_MIRROR_BASE_SNB + 0x5004)
+#define MAD_DIMM_C1			(MCHBAR_MIRROR_BASE_SNB + 0x5008)
+#define MAD_DIMM_C2			(MCHBAR_MIRROR_BASE_SNB + 0x500C)
+#define   MAD_DIMM_ECC_MASK		(0x3 << 24)
+#define   MAD_DIMM_ECC_OFF		(0x0 << 24)
+#define   MAD_DIMM_ECC_IO_ON_LOGIC_OFF	(0x1 << 24)
+#define   MAD_DIMM_ECC_IO_OFF_LOGIC_ON	(0x2 << 24)
+#define   MAD_DIMM_ECC_ON		(0x3 << 24)
+#define   MAD_DIMM_ENH_INTERLEAVE	(0x1 << 22)
+#define   MAD_DIMM_RANK_INTERLEAVE	(0x1 << 21)
+#define   MAD_DIMM_B_WIDTH_X16		(0x1 << 20) /* X8 chips if unset */
+#define   MAD_DIMM_A_WIDTH_X16		(0x1 << 19) /* X8 chips if unset */
+#define   MAD_DIMM_B_DUAL_RANK		(0x1 << 18)
+#define   MAD_DIMM_A_DUAL_RANK		(0x1 << 17)
+#define   MAD_DIMM_A_SELECT		(0x1 << 16)
+/* DIMM sizes are in multiples of 256mb. */
+#define   MAD_DIMM_B_SIZE_SHIFT		8
+#define   MAD_DIMM_B_SIZE_MASK		(0xff << MAD_DIMM_B_SIZE_SHIFT)
+#define   MAD_DIMM_A_SIZE_SHIFT		0
+#define   MAD_DIMM_A_SIZE_MASK		(0xff << MAD_DIMM_A_SIZE_SHIFT)
+
+/* snb MCH registers for priority tuning */
+#define MCH_SSKPD			(MCHBAR_MIRROR_BASE_SNB + 0x5d10)
+#define   MCH_SSKPD_WM0_MASK		0x3f
+#define   MCH_SSKPD_WM0_VAL		0xc
+
+#define MCH_SECP_NRG_STTS		(MCHBAR_MIRROR_BASE_SNB + 0x592c)
+
+/* Clocking configuration register */
+#define CLKCFG			0x10c00
+#define CLKCFG_FSB_400					(5 << 0)	/* hrawclk 100 */
+#define CLKCFG_FSB_533					(1 << 0)	/* hrawclk 133 */
+#define CLKCFG_FSB_667					(3 << 0)	/* hrawclk 166 */
+#define CLKCFG_FSB_800					(2 << 0)	/* hrawclk 200 */
+#define CLKCFG_FSB_1067					(6 << 0)	/* hrawclk 266 */
+#define CLKCFG_FSB_1333					(7 << 0)	/* hrawclk 333 */
+/* Note, below two are guess */
+#define CLKCFG_FSB_1600					(4 << 0)	/* hrawclk 400 */
+#define CLKCFG_FSB_1600_ALT				(0 << 0)	/* hrawclk 400 */
+#define CLKCFG_FSB_MASK					(7 << 0)
+#define CLKCFG_MEM_533					(1 << 4)
+#define CLKCFG_MEM_667					(2 << 4)
+#define CLKCFG_MEM_800					(3 << 4)
+#define CLKCFG_MEM_MASK					(7 << 4)
+
+#define HPLLVCO                 (MCHBAR_MIRROR_BASE + 0xc38)
+#define HPLLVCO_MOBILE          (MCHBAR_MIRROR_BASE + 0xc0f)
+
+#define TSC1			0x11001
+#define   TSE			(1<<0)
+#define TR1			0x11006
+#define TSFS			0x11020
+#define   TSFS_SLOPE_MASK	0x0000ff00
+#define   TSFS_SLOPE_SHIFT	8
+#define   TSFS_INTR_MASK	0x000000ff
+
+#define CRSTANDVID		0x11100
+#define PXVFREQ(i)		(0x11110 + (i) * 4) /* P[0-15]VIDFREQ (0x1114c) (Ironlake) */
+#define   PXVFREQ_PX_MASK	0x7f000000
+#define   PXVFREQ_PX_SHIFT	24
+#define VIDFREQ_BASE		0x11110
+#define VIDFREQ1		0x11110 /* VIDFREQ1-4 (0x1111c) (Cantiga) */
+#define VIDFREQ2		0x11114
+#define VIDFREQ3		0x11118
+#define VIDFREQ4		0x1111c
+#define   VIDFREQ_P0_MASK	0x1f000000
+#define   VIDFREQ_P0_SHIFT	24
+#define   VIDFREQ_P0_CSCLK_MASK	0x00f00000
+#define   VIDFREQ_P0_CSCLK_SHIFT 20
+#define   VIDFREQ_P0_CRCLK_MASK	0x000f0000
+#define   VIDFREQ_P0_CRCLK_SHIFT 16
+#define   VIDFREQ_P1_MASK	0x00001f00
+#define   VIDFREQ_P1_SHIFT	8
+#define   VIDFREQ_P1_CSCLK_MASK	0x000000f0
+#define   VIDFREQ_P1_CSCLK_SHIFT 4
+#define   VIDFREQ_P1_CRCLK_MASK	0x0000000f
+#define INTTOEXT_BASE_ILK	0x11300
+#define INTTOEXT_BASE		0x11120 /* INTTOEXT1-8 (0x1113c) */
+#define   INTTOEXT_MAP3_SHIFT	24
+#define   INTTOEXT_MAP3_MASK	(0x1f << INTTOEXT_MAP3_SHIFT)
+#define   INTTOEXT_MAP2_SHIFT	16
+#define   INTTOEXT_MAP2_MASK	(0x1f << INTTOEXT_MAP2_SHIFT)
+#define   INTTOEXT_MAP1_SHIFT	8
+#define   INTTOEXT_MAP1_MASK	(0x1f << INTTOEXT_MAP1_SHIFT)
+#define   INTTOEXT_MAP0_SHIFT	0
+#define   INTTOEXT_MAP0_MASK	(0x1f << INTTOEXT_MAP0_SHIFT)
+#define MEMSWCTL		0x11170 /* Ironlake only */
+#define   MEMCTL_CMD_MASK	0xe000
+#define   MEMCTL_CMD_SHIFT	13
+#define   MEMCTL_CMD_RCLK_OFF	0
+#define   MEMCTL_CMD_RCLK_ON	1
+#define   MEMCTL_CMD_CHFREQ	2
+#define   MEMCTL_CMD_CHVID	3
+#define   MEMCTL_CMD_VMMOFF	4
+#define   MEMCTL_CMD_VMMON	5
+#define   MEMCTL_CMD_STS	(1<<12) /* write 1 triggers command, clears
+					   when command complete */
+#define   MEMCTL_FREQ_MASK	0x0f00 /* jitter, from 0-15 */
+#define   MEMCTL_FREQ_SHIFT	8
+#define   MEMCTL_SFCAVM		(1<<7)
+#define   MEMCTL_TGT_VID_MASK	0x007f
+#define MEMIHYST		0x1117c
+#define MEMINTREN		0x11180 /* 16 bits */
+#define   MEMINT_RSEXIT_EN	(1<<8)
+#define   MEMINT_CX_SUPR_EN	(1<<7)
+#define   MEMINT_CONT_BUSY_EN	(1<<6)
+#define   MEMINT_AVG_BUSY_EN	(1<<5)
+#define   MEMINT_EVAL_CHG_EN	(1<<4)
+#define   MEMINT_MON_IDLE_EN	(1<<3)
+#define   MEMINT_UP_EVAL_EN	(1<<2)
+#define   MEMINT_DOWN_EVAL_EN	(1<<1)
+#define   MEMINT_SW_CMD_EN	(1<<0)
+#define MEMINTRSTR		0x11182 /* 16 bits */
+#define   MEM_RSEXIT_MASK	0xc000
+#define   MEM_RSEXIT_SHIFT	14
+#define   MEM_CONT_BUSY_MASK	0x3000
+#define   MEM_CONT_BUSY_SHIFT	12
+#define   MEM_AVG_BUSY_MASK	0x0c00
+#define   MEM_AVG_BUSY_SHIFT	10
+#define   MEM_EVAL_CHG_MASK	0x0300
+#define   MEM_EVAL_BUSY_SHIFT	8
+#define   MEM_MON_IDLE_MASK	0x00c0
+#define   MEM_MON_IDLE_SHIFT	6
+#define   MEM_UP_EVAL_MASK	0x0030
+#define   MEM_UP_EVAL_SHIFT	4
+#define   MEM_DOWN_EVAL_MASK	0x000c
+#define   MEM_DOWN_EVAL_SHIFT	2
+#define   MEM_SW_CMD_MASK	0x0003
+#define   MEM_INT_STEER_GFX	0
+#define   MEM_INT_STEER_CMR	1
+#define   MEM_INT_STEER_SMI	2
+#define   MEM_INT_STEER_SCI	3
+#define MEMINTRSTS		0x11184
+#define   MEMINT_RSEXIT		(1<<7)
+#define   MEMINT_CONT_BUSY	(1<<6)
+#define   MEMINT_AVG_BUSY	(1<<5)
+#define   MEMINT_EVAL_CHG	(1<<4)
+#define   MEMINT_MON_IDLE	(1<<3)
+#define   MEMINT_UP_EVAL	(1<<2)
+#define   MEMINT_DOWN_EVAL	(1<<1)
+#define   MEMINT_SW_CMD		(1<<0)
+#define MEMMODECTL		0x11190
+#define   MEMMODE_BOOST_EN	(1<<31)
+#define   MEMMODE_BOOST_FREQ_MASK 0x0f000000 /* jitter for boost, 0-15 */
+#define   MEMMODE_BOOST_FREQ_SHIFT 24
+#define   MEMMODE_IDLE_MODE_MASK 0x00030000
+#define   MEMMODE_IDLE_MODE_SHIFT 16
+#define   MEMMODE_IDLE_MODE_EVAL 0
+#define   MEMMODE_IDLE_MODE_CONT 1
+#define   MEMMODE_HWIDLE_EN	(1<<15)
+#define   MEMMODE_SWMODE_EN	(1<<14)
+#define   MEMMODE_RCLK_GATE	(1<<13)
+#define   MEMMODE_HW_UPDATE	(1<<12)
+#define   MEMMODE_FSTART_MASK	0x00000f00 /* starting jitter, 0-15 */
+#define   MEMMODE_FSTART_SHIFT	8
+#define   MEMMODE_FMAX_MASK	0x000000f0 /* max jitter, 0-15 */
+#define   MEMMODE_FMAX_SHIFT	4
+#define   MEMMODE_FMIN_MASK	0x0000000f /* min jitter, 0-15 */
+#define RCBMAXAVG		0x1119c
+#define MEMSWCTL2		0x1119e /* Cantiga only */
+#define   SWMEMCMD_RENDER_OFF	(0 << 13)
+#define   SWMEMCMD_RENDER_ON	(1 << 13)
+#define   SWMEMCMD_SWFREQ	(2 << 13)
+#define   SWMEMCMD_TARVID	(3 << 13)
+#define   SWMEMCMD_VRM_OFF	(4 << 13)
+#define   SWMEMCMD_VRM_ON	(5 << 13)
+#define   CMDSTS		(1<<12)
+#define   SFCAVM		(1<<11)
+#define   SWFREQ_MASK		0x0380 /* P0-7 */
+#define   SWFREQ_SHIFT		7
+#define   TARVID_MASK		0x001f
+#define MEMSTAT_CTG		0x111a0
+#define RCBMINAVG		0x111a0
+#define RCUPEI			0x111b0
+#define RCDNEI			0x111b4
+#define RSTDBYCTL		0x111b8
+#define   RS1EN			(1<<31)
+#define   RS2EN			(1<<30)
+#define   RS3EN			(1<<29)
+#define   D3RS3EN		(1<<28) /* Display D3 imlies RS3 */
+#define   SWPROMORSX		(1<<27) /* RSx promotion timers ignored */
+#define   RCWAKERW		(1<<26) /* Resetwarn from PCH causes wakeup */
+#define   DPRSLPVREN		(1<<25) /* Fast voltage ramp enable */
+#define   GFXTGHYST		(1<<24) /* Hysteresis to allow trunk gating */
+#define   RCX_SW_EXIT		(1<<23) /* Leave RSx and prevent re-entry */
+#define   RSX_STATUS_MASK	(7<<20)
+#define   RSX_STATUS_ON		(0<<20)
+#define   RSX_STATUS_RC1	(1<<20)
+#define   RSX_STATUS_RC1E	(2<<20)
+#define   RSX_STATUS_RS1	(3<<20)
+#define   RSX_STATUS_RS2	(4<<20) /* aka rc6 */
+#define   RSX_STATUS_RSVD	(5<<20) /* deep rc6 unsupported on ilk */
+#define   RSX_STATUS_RS3	(6<<20) /* rs3 unsupported on ilk */
+#define   RSX_STATUS_RSVD2	(7<<20)
+#define   UWRCRSXE		(1<<19) /* wake counter limit prevents rsx */
+#define   RSCRP			(1<<18) /* rs requests control on rs1/2 reqs */
+#define   JRSC			(1<<17) /* rsx coupled to cpu c-state */
+#define   RS2INC0		(1<<16) /* allow rs2 in cpu c0 */
+#define   RS1CONTSAV_MASK	(3<<14)
+#define   RS1CONTSAV_NO_RS1	(0<<14) /* rs1 doesn't save/restore context */
+#define   RS1CONTSAV_RSVD	(1<<14)
+#define   RS1CONTSAV_SAVE_RS1	(2<<14) /* rs1 saves context */
+#define   RS1CONTSAV_FULL_RS1	(3<<14) /* rs1 saves and restores context */
+#define   NORMSLEXLAT_MASK	(3<<12)
+#define   SLOW_RS123		(0<<12)
+#define   SLOW_RS23		(1<<12)
+#define   SLOW_RS3		(2<<12)
+#define   NORMAL_RS123		(3<<12)
+#define   RCMODE_TIMEOUT	(1<<11) /* 0 is eval interval method */
+#define   IMPROMOEN		(1<<10) /* promo is immediate or delayed until next idle interval (only for timeout method above) */
+#define   RCENTSYNC		(1<<9) /* rs coupled to cpu c-state (3/6/7) */
+#define   STATELOCK		(1<<7) /* locked to rs_cstate if 0 */
+#define   RS_CSTATE_MASK	(3<<4)
+#define   RS_CSTATE_C367_RS1	(0<<4)
+#define   RS_CSTATE_C36_RS1_C7_RS2 (1<<4)
+#define   RS_CSTATE_RSVD	(2<<4)
+#define   RS_CSTATE_C367_RS2	(3<<4)
+#define   REDSAVES		(1<<3) /* no context save if was idle during rs0 */
+#define   REDRESTORES		(1<<2) /* no restore if was idle during rs0 */
+#define VIDCTL			0x111c0
+#define VIDSTS			0x111c8
+#define VIDSTART		0x111cc /* 8 bits */
+#define MEMSTAT_ILK			0x111f8
+#define   MEMSTAT_VID_MASK	0x7f00
+#define   MEMSTAT_VID_SHIFT	8
+#define   MEMSTAT_PSTATE_MASK	0x00f8
+#define   MEMSTAT_PSTATE_SHIFT  3
+#define   MEMSTAT_MON_ACTV	(1<<2)
+#define   MEMSTAT_SRC_CTL_MASK	0x0003
+#define   MEMSTAT_SRC_CTL_CORE	0
+#define   MEMSTAT_SRC_CTL_TRB	1
+#define   MEMSTAT_SRC_CTL_THM	2
+#define   MEMSTAT_SRC_CTL_STDBY 3
+#define RCPREVBSYTUPAVG		0x113b8
+#define RCPREVBSYTDNAVG		0x113bc
+#define PMMISC			0x11214
+#define   MCPPCE_EN		(1<<0) /* enable PM_MSG from PCH->MPC */
+#define SDEW			0x1124c
+#define CSIEW0			0x11250
+#define CSIEW1			0x11254
+#define CSIEW2			0x11258
+#define PEW(i)			(0x1125c + (i) * 4) /* 5 registers */
+#define DEW(i)			(0x11270 + (i) * 4) /* 3 registers */
+#define MCHAFE			0x112c0
+#define CSIEC			0x112e0
+#define DMIEC			0x112e4
+#define DDREC			0x112e8
+#define PEG0EC			0x112ec
+#define PEG1EC			0x112f0
+#define GFXEC			0x112f4
+#define RPPREVBSYTUPAVG		0x113b8
+#define RPPREVBSYTDNAVG		0x113bc
+#define ECR			0x11600
+#define   ECR_GPFE		(1<<31)
+#define   ECR_IMONE		(1<<30)
+#define   ECR_CAP_MASK		0x0000001f /* Event range, 0-31 */
+#define OGW0			0x11608
+#define OGW1			0x1160c
+#define EG0			0x11610
+#define EG1			0x11614
+#define EG2			0x11618
+#define EG3			0x1161c
+#define EG4			0x11620
+#define EG5			0x11624
+#define EG6			0x11628
+#define EG7			0x1162c
+#define PXW(i)			(0x11664 + (i) * 4) /* 4 registers */
+#define PXWL(i)			(0x11680 + (i) * 4) /* 8 registers */
+#define LCFUSE02		0x116c0
+#define   LCFUSE_HIV_MASK	0x000000ff
+#define CSIPLL0			0x12c10
+#define DDRMPLL1		0X12c20
+#define PEG_BAND_GAP_DATA	0x14d68
+
+#define GEN6_GT_THREAD_STATUS_REG 0x13805c
+#define GEN6_GT_THREAD_STATUS_CORE_MASK 0x7
+
+#define GEN6_GT_PERF_STATUS	(MCHBAR_MIRROR_BASE_SNB + 0x5948)
+#define BXT_GT_PERF_STATUS      (MCHBAR_MIRROR_BASE_SNB + 0x7070)
+#define GEN6_RP_STATE_LIMITS	(MCHBAR_MIRROR_BASE_SNB + 0x5994)
+#define GEN6_RP_STATE_CAP	(MCHBAR_MIRROR_BASE_SNB + 0x5998)
+#define BXT_RP_STATE_CAP        0x138170
+
+/*
+ * Make these a multiple of magic 25 to avoid SNB (eg. Dell XPS
+ * 8300) freezing up around GPU hangs. Looks as if even
+ * scheduling/timer interrupts start misbehaving if the RPS
+ * EI/thresholds are "bad", leading to a very sluggish or even
+ * frozen machine.
+ */
+#define INTERVAL_1_28_US(us)	roundup(((us) * 100) >> 7, 25)
+#define INTERVAL_1_33_US(us)	(((us) * 3)   >> 2)
+#define INTERVAL_0_833_US(us)	(((us) * 6) / 5)
+#define GT_INTERVAL_FROM_US(dev_priv, us) (IS_GEN9(dev_priv) ? \
+				(IS_BROXTON(dev_priv) ? \
+				INTERVAL_0_833_US(us) : \
+				INTERVAL_1_33_US(us)) : \
+				INTERVAL_1_28_US(us))
+
+/*
+ * Logical Context regs
+ */
+#define CCID			0x2180
+#define   CCID_EN		(1<<0)
+/*
+ * Notes on SNB/IVB/VLV context size:
+ * - Power context is saved elsewhere (LLC or stolen)
+ * - Ring/execlist context is saved on SNB, not on IVB
+ * - Extended context size already includes render context size
+ * - We always need to follow the extended context size.
+ *   SNB BSpec has comments indicating that we should use the
+ *   render context size instead if execlists are disabled, but
+ *   based on empirical testing that's just nonsense.
+ * - Pipelined/VF state is saved on SNB/IVB respectively
+ * - GT1 size just indicates how much of render context
+ *   doesn't need saving on GT1
+ */
+#define CXT_SIZE		0x21a0
+#define GEN6_CXT_POWER_SIZE(cxt_reg)	(((cxt_reg) >> 24) & 0x3f)
+#define GEN6_CXT_RING_SIZE(cxt_reg)	(((cxt_reg) >> 18) & 0x3f)
+#define GEN6_CXT_RENDER_SIZE(cxt_reg)	(((cxt_reg) >> 12) & 0x3f)
+#define GEN6_CXT_EXTENDED_SIZE(cxt_reg)	(((cxt_reg) >> 6) & 0x3f)
+#define GEN6_CXT_PIPELINE_SIZE(cxt_reg)	(((cxt_reg) >> 0) & 0x3f)
+#define GEN6_CXT_TOTAL_SIZE(cxt_reg)	(GEN6_CXT_RING_SIZE(cxt_reg) + \
+					GEN6_CXT_EXTENDED_SIZE(cxt_reg) + \
+					GEN6_CXT_PIPELINE_SIZE(cxt_reg))
+#define GEN7_CXT_SIZE		0x21a8
+#define GEN7_CXT_POWER_SIZE(ctx_reg)	(((ctx_reg) >> 25) & 0x7f)
+#define GEN7_CXT_RING_SIZE(ctx_reg)	(((ctx_reg) >> 22) & 0x7)
+#define GEN7_CXT_RENDER_SIZE(ctx_reg)	(((ctx_reg) >> 16) & 0x3f)
+#define GEN7_CXT_EXTENDED_SIZE(ctx_reg)	(((ctx_reg) >> 9) & 0x7f)
+#define GEN7_CXT_GT1_SIZE(ctx_reg)	(((ctx_reg) >> 6) & 0x7)
+#define GEN7_CXT_VFSTATE_SIZE(ctx_reg)	(((ctx_reg) >> 0) & 0x3f)
+#define GEN7_CXT_TOTAL_SIZE(ctx_reg)	(GEN7_CXT_EXTENDED_SIZE(ctx_reg) + \
+					 GEN7_CXT_VFSTATE_SIZE(ctx_reg))
+/* Haswell does have the CXT_SIZE register however it does not appear to be
+ * valid. Now, docs explain in dwords what is in the context object. The full
+ * size is 70720 bytes, however, the power context and execlist context will
+ * never be saved (power context is stored elsewhere, and execlists don't work
+ * on HSW) - so the final size, including the extra state required for the
+ * Resource Streamer, is 66944 bytes, which rounds to 17 pages.
+ */
+#define HSW_CXT_TOTAL_SIZE		(17 * PAGE_SIZE)
+/* Same as Haswell, but 72064 bytes now. */
+#define GEN8_CXT_TOTAL_SIZE		(18 * PAGE_SIZE)
+
+#define CHV_CLK_CTL1			0x101100
+#define VLV_CLK_CTL2			0x101104
+#define   CLK_CTL2_CZCOUNT_30NS_SHIFT	28
+
+/*
+ * Overlay regs
+ */
+
+#define OVADD			0x30000
+#define DOVSTA			0x30008
+#define OC_BUF			(0x3<<20)
+#define OGAMC5			0x30010
+#define OGAMC4			0x30014
+#define OGAMC3			0x30018
+#define OGAMC2			0x3001c
+#define OGAMC1			0x30020
+#define OGAMC0			0x30024
+
+/*
+ * Display engine regs
+ */
+
+/* Pipe A CRC regs */
+#define _PIPE_CRC_CTL_A			0x60050
+#define   PIPE_CRC_ENABLE		(1 << 31)
+/* ivb+ source selection */
+#define   PIPE_CRC_SOURCE_PRIMARY_IVB	(0 << 29)
+#define   PIPE_CRC_SOURCE_SPRITE_IVB	(1 << 29)
+#define   PIPE_CRC_SOURCE_PF_IVB	(2 << 29)
+/* ilk+ source selection */
+#define   PIPE_CRC_SOURCE_PRIMARY_ILK	(0 << 28)
+#define   PIPE_CRC_SOURCE_SPRITE_ILK	(1 << 28)
+#define   PIPE_CRC_SOURCE_PIPE_ILK	(2 << 28)
+/* embedded DP port on the north display block, reserved on ivb */
+#define   PIPE_CRC_SOURCE_PORT_A_ILK	(4 << 28)
+#define   PIPE_CRC_SOURCE_FDI_ILK	(5 << 28) /* reserved on ivb */
+/* vlv source selection */
+#define   PIPE_CRC_SOURCE_PIPE_VLV	(0 << 27)
+#define   PIPE_CRC_SOURCE_HDMIB_VLV	(1 << 27)
+#define   PIPE_CRC_SOURCE_HDMIC_VLV	(2 << 27)
+/* with DP port the pipe source is invalid */
+#define   PIPE_CRC_SOURCE_DP_D_VLV	(3 << 27)
+#define   PIPE_CRC_SOURCE_DP_B_VLV	(6 << 27)
+#define   PIPE_CRC_SOURCE_DP_C_VLV	(7 << 27)
+/* gen3+ source selection */
+#define   PIPE_CRC_SOURCE_PIPE_I9XX	(0 << 28)
+#define   PIPE_CRC_SOURCE_SDVOB_I9XX	(1 << 28)
+#define   PIPE_CRC_SOURCE_SDVOC_I9XX	(2 << 28)
+/* with DP/TV port the pipe source is invalid */
+#define   PIPE_CRC_SOURCE_DP_D_G4X	(3 << 28)
+#define   PIPE_CRC_SOURCE_TV_PRE	(4 << 28)
+#define   PIPE_CRC_SOURCE_TV_POST	(5 << 28)
+#define   PIPE_CRC_SOURCE_DP_B_G4X	(6 << 28)
+#define   PIPE_CRC_SOURCE_DP_C_G4X	(7 << 28)
+/* gen2 doesn't have source selection bits */
+#define   PIPE_CRC_INCLUDE_BORDER_I8XX	(1 << 30)
+
+#define _PIPE_CRC_RES_1_A_IVB		0x60064
+#define _PIPE_CRC_RES_2_A_IVB		0x60068
+#define _PIPE_CRC_RES_3_A_IVB		0x6006c
+#define _PIPE_CRC_RES_4_A_IVB		0x60070
+#define _PIPE_CRC_RES_5_A_IVB		0x60074
+
+#define _PIPE_CRC_RES_RED_A		0x60060
+#define _PIPE_CRC_RES_GREEN_A		0x60064
+#define _PIPE_CRC_RES_BLUE_A		0x60068
+#define _PIPE_CRC_RES_RES1_A_I915	0x6006c
+#define _PIPE_CRC_RES_RES2_A_G4X	0x60080
+
+/* Pipe B CRC regs */
+#define _PIPE_CRC_RES_1_B_IVB		0x61064
+#define _PIPE_CRC_RES_2_B_IVB		0x61068
+#define _PIPE_CRC_RES_3_B_IVB		0x6106c
+#define _PIPE_CRC_RES_4_B_IVB		0x61070
+#define _PIPE_CRC_RES_5_B_IVB		0x61074
+
+#define PIPE_CRC_CTL(pipe) _TRANSCODER2(pipe, _PIPE_CRC_CTL_A)
+#define PIPE_CRC_RES_1_IVB(pipe)	\
+	_TRANSCODER2(pipe, _PIPE_CRC_RES_1_A_IVB)
+#define PIPE_CRC_RES_2_IVB(pipe)	\
+	_TRANSCODER2(pipe, _PIPE_CRC_RES_2_A_IVB)
+#define PIPE_CRC_RES_3_IVB(pipe)	\
+	_TRANSCODER2(pipe, _PIPE_CRC_RES_3_A_IVB)
+#define PIPE_CRC_RES_4_IVB(pipe)	\
+	_TRANSCODER2(pipe, _PIPE_CRC_RES_4_A_IVB)
+#define PIPE_CRC_RES_5_IVB(pipe)	\
+	_TRANSCODER2(pipe, _PIPE_CRC_RES_5_A_IVB)
+
+#define PIPE_CRC_RES_RED(pipe) \
+	_TRANSCODER2(pipe, _PIPE_CRC_RES_RED_A)
+#define PIPE_CRC_RES_GREEN(pipe) \
+	_TRANSCODER2(pipe, _PIPE_CRC_RES_GREEN_A)
+#define PIPE_CRC_RES_BLUE(pipe) \
+	_TRANSCODER2(pipe, _PIPE_CRC_RES_BLUE_A)
+#define PIPE_CRC_RES_RES1_I915(pipe) \
+	_TRANSCODER2(pipe, _PIPE_CRC_RES_RES1_A_I915)
+#define PIPE_CRC_RES_RES2_G4X(pipe) \
+	_TRANSCODER2(pipe, _PIPE_CRC_RES_RES2_A_G4X)
+
+/* Pipe A timing regs */
+#define _HTOTAL_A	0x60000
+#define _HBLANK_A	0x60004
+#define _HSYNC_A	0x60008
+#define _VTOTAL_A	0x6000c
+#define _VBLANK_A	0x60010
+#define _VSYNC_A	0x60014
+#define _PIPEASRC	0x6001c
+#define _BCLRPAT_A	0x60020
+#define _VSYNCSHIFT_A	0x60028
+#define _PIPE_MULT_A	0x6002c
+
+/* Pipe B timing regs */
+#define _HTOTAL_B	0x61000
+#define _HBLANK_B	0x61004
+#define _HSYNC_B	0x61008
+#define _VTOTAL_B	0x6100c
+#define _VBLANK_B	0x61010
+#define _VSYNC_B	0x61014
+#define _PIPEBSRC	0x6101c
+#define _BCLRPAT_B	0x61020
+#define _VSYNCSHIFT_B	0x61028
+#define _PIPE_MULT_B	0x6102c
+
+#define TRANSCODER_A_OFFSET 0x60000
+#define TRANSCODER_B_OFFSET 0x61000
+#define TRANSCODER_C_OFFSET 0x62000
+#define CHV_TRANSCODER_C_OFFSET 0x63000
+#define TRANSCODER_EDP_OFFSET 0x6f000
+
+#define _TRANSCODER2(pipe, reg) (dev_priv->info.trans_offsets[(pipe)] - \
+	dev_priv->info.trans_offsets[TRANSCODER_A] + (reg) + \
+	dev_priv->info.display_mmio_offset)
+
+#define HTOTAL(trans) _TRANSCODER2(trans, _HTOTAL_A)
+#define HBLANK(trans) _TRANSCODER2(trans, _HBLANK_A)
+#define HSYNC(trans) _TRANSCODER2(trans, _HSYNC_A)
+#define VTOTAL(trans) _TRANSCODER2(trans, _VTOTAL_A)
+#define VBLANK(trans) _TRANSCODER2(trans, _VBLANK_A)
+#define VSYNC(trans) _TRANSCODER2(trans, _VSYNC_A)
+#define BCLRPAT(trans) _TRANSCODER2(trans, _BCLRPAT_A)
+#define VSYNCSHIFT(trans) _TRANSCODER2(trans, _VSYNCSHIFT_A)
+#define PIPESRC(trans) _TRANSCODER2(trans, _PIPEASRC)
+#define PIPE_MULT(trans) _TRANSCODER2(trans, _PIPE_MULT_A)
+
+/* VLV eDP PSR registers */
+#define _PSRCTLA				(VLV_DISPLAY_BASE + 0x60090)
+#define _PSRCTLB				(VLV_DISPLAY_BASE + 0x61090)
+#define  VLV_EDP_PSR_ENABLE			(1<<0)
+#define  VLV_EDP_PSR_RESET			(1<<1)
+#define  VLV_EDP_PSR_MODE_MASK			(7<<2)
+#define  VLV_EDP_PSR_MODE_HW_TIMER		(1<<3)
+#define  VLV_EDP_PSR_MODE_SW_TIMER		(1<<2)
+#define  VLV_EDP_PSR_SINGLE_FRAME_UPDATE	(1<<7)
+#define  VLV_EDP_PSR_ACTIVE_ENTRY		(1<<8)
+#define  VLV_EDP_PSR_SRC_TRANSMITTER_STATE	(1<<9)
+#define  VLV_EDP_PSR_DBL_FRAME			(1<<10)
+#define  VLV_EDP_PSR_FRAME_COUNT_MASK		(0xff<<16)
+#define  VLV_EDP_PSR_IDLE_FRAME_SHIFT		16
+#define VLV_PSRCTL(pipe) _PIPE(pipe, _PSRCTLA, _PSRCTLB)
+
+#define _VSCSDPA			(VLV_DISPLAY_BASE + 0x600a0)
+#define _VSCSDPB			(VLV_DISPLAY_BASE + 0x610a0)
+#define  VLV_EDP_PSR_SDP_FREQ_MASK	(3<<30)
+#define  VLV_EDP_PSR_SDP_FREQ_ONCE	(1<<31)
+#define  VLV_EDP_PSR_SDP_FREQ_EVFRAME	(1<<30)
+#define VLV_VSCSDP(pipe)	_PIPE(pipe, _VSCSDPA, _VSCSDPB)
+
+#define _PSRSTATA			(VLV_DISPLAY_BASE + 0x60094)
+#define _PSRSTATB			(VLV_DISPLAY_BASE + 0x61094)
+#define  VLV_EDP_PSR_LAST_STATE_MASK	(7<<3)
+#define  VLV_EDP_PSR_CURR_STATE_MASK	7
+#define  VLV_EDP_PSR_DISABLED		(0<<0)
+#define  VLV_EDP_PSR_INACTIVE		(1<<0)
+#define  VLV_EDP_PSR_IN_TRANS_TO_ACTIVE	(2<<0)
+#define  VLV_EDP_PSR_ACTIVE_NORFB_UP	(3<<0)
+#define  VLV_EDP_PSR_ACTIVE_SF_UPDATE	(4<<0)
+#define  VLV_EDP_PSR_EXIT		(5<<0)
+#define  VLV_EDP_PSR_IN_TRANS		(1<<7)
+#define VLV_PSRSTAT(pipe) _PIPE(pipe, _PSRSTATA, _PSRSTATB)
+
+/* HSW+ eDP PSR registers */
+#define EDP_PSR_BASE(dev)                       (IS_HASWELL(dev) ? 0x64800 : 0x6f800)
+#define EDP_PSR_CTL(dev)			(EDP_PSR_BASE(dev) + 0)
+#define   EDP_PSR_ENABLE			(1<<31)
+#define   BDW_PSR_SINGLE_FRAME			(1<<30)
+#define   EDP_PSR_LINK_STANDBY			(1<<27)
+#define   EDP_PSR_MIN_LINK_ENTRY_TIME_MASK	(3<<25)
+#define   EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES	(0<<25)
+#define   EDP_PSR_MIN_LINK_ENTRY_TIME_4_LINES	(1<<25)
+#define   EDP_PSR_MIN_LINK_ENTRY_TIME_2_LINES	(2<<25)
+#define   EDP_PSR_MIN_LINK_ENTRY_TIME_0_LINES	(3<<25)
+#define   EDP_PSR_MAX_SLEEP_TIME_SHIFT		20
+#define   EDP_PSR_SKIP_AUX_EXIT			(1<<12)
+#define   EDP_PSR_TP1_TP2_SEL			(0<<11)
+#define   EDP_PSR_TP1_TP3_SEL			(1<<11)
+#define   EDP_PSR_TP2_TP3_TIME_500us		(0<<8)
+#define   EDP_PSR_TP2_TP3_TIME_100us		(1<<8)
+#define   EDP_PSR_TP2_TP3_TIME_2500us		(2<<8)
+#define   EDP_PSR_TP2_TP3_TIME_0us		(3<<8)
+#define   EDP_PSR_TP1_TIME_500us		(0<<4)
+#define   EDP_PSR_TP1_TIME_100us		(1<<4)
+#define   EDP_PSR_TP1_TIME_2500us		(2<<4)
+#define   EDP_PSR_TP1_TIME_0us			(3<<4)
+#define   EDP_PSR_IDLE_FRAME_SHIFT		0
+
+#define EDP_PSR_AUX_CTL(dev)			(EDP_PSR_BASE(dev) + 0x10)
+#define EDP_PSR_AUX_DATA1(dev)			(EDP_PSR_BASE(dev) + 0x14)
+#define EDP_PSR_AUX_DATA2(dev)			(EDP_PSR_BASE(dev) + 0x18)
+#define EDP_PSR_AUX_DATA3(dev)			(EDP_PSR_BASE(dev) + 0x1c)
+#define EDP_PSR_AUX_DATA4(dev)			(EDP_PSR_BASE(dev) + 0x20)
+#define EDP_PSR_AUX_DATA5(dev)			(EDP_PSR_BASE(dev) + 0x24)
+
+#define EDP_PSR_STATUS_CTL(dev)			(EDP_PSR_BASE(dev) + 0x40)
+#define   EDP_PSR_STATUS_STATE_MASK		(7<<29)
+#define   EDP_PSR_STATUS_STATE_IDLE		(0<<29)
+#define   EDP_PSR_STATUS_STATE_SRDONACK		(1<<29)
+#define   EDP_PSR_STATUS_STATE_SRDENT		(2<<29)
+#define   EDP_PSR_STATUS_STATE_BUFOFF		(3<<29)
+#define   EDP_PSR_STATUS_STATE_BUFON		(4<<29)
+#define   EDP_PSR_STATUS_STATE_AUXACK		(5<<29)
+#define   EDP_PSR_STATUS_STATE_SRDOFFACK	(6<<29)
+#define   EDP_PSR_STATUS_LINK_MASK		(3<<26)
+#define   EDP_PSR_STATUS_LINK_FULL_OFF		(0<<26)
+#define   EDP_PSR_STATUS_LINK_FULL_ON		(1<<26)
+#define   EDP_PSR_STATUS_LINK_STANDBY		(2<<26)
+#define   EDP_PSR_STATUS_MAX_SLEEP_TIMER_SHIFT	20
+#define   EDP_PSR_STATUS_MAX_SLEEP_TIMER_MASK	0x1f
+#define   EDP_PSR_STATUS_COUNT_SHIFT		16
+#define   EDP_PSR_STATUS_COUNT_MASK		0xf
+#define   EDP_PSR_STATUS_AUX_ERROR		(1<<15)
+#define   EDP_PSR_STATUS_AUX_SENDING		(1<<12)
+#define   EDP_PSR_STATUS_SENDING_IDLE		(1<<9)
+#define   EDP_PSR_STATUS_SENDING_TP2_TP3	(1<<8)
+#define   EDP_PSR_STATUS_SENDING_TP1		(1<<4)
+#define   EDP_PSR_STATUS_IDLE_MASK		0xf
+
+#define EDP_PSR_PERF_CNT(dev)		(EDP_PSR_BASE(dev) + 0x44)
+#define   EDP_PSR_PERF_CNT_MASK		0xffffff
+
+#define EDP_PSR_DEBUG_CTL(dev)		(EDP_PSR_BASE(dev) + 0x60)
+#define   EDP_PSR_DEBUG_MASK_LPSP	(1<<27)
+#define   EDP_PSR_DEBUG_MASK_MEMUP	(1<<26)
+#define   EDP_PSR_DEBUG_MASK_HPD	(1<<25)
+
+#define EDP_PSR2_CTL			0x6f900
+#define   EDP_PSR2_ENABLE		(1<<31)
+#define   EDP_SU_TRACK_ENABLE		(1<<30)
+#define   EDP_MAX_SU_DISABLE_TIME(t)	((t)<<20)
+#define   EDP_MAX_SU_DISABLE_TIME_MASK	(0x1f<<20)
+#define   EDP_PSR2_TP2_TIME_500		(0<<8)
+#define   EDP_PSR2_TP2_TIME_100		(1<<8)
+#define   EDP_PSR2_TP2_TIME_2500	(2<<8)
+#define   EDP_PSR2_TP2_TIME_50		(3<<8)
+#define   EDP_PSR2_TP2_TIME_MASK	(3<<8)
+#define   EDP_PSR2_FRAME_BEFORE_SU_SHIFT 4
+#define   EDP_PSR2_FRAME_BEFORE_SU_MASK	(0xf<<4)
+#define   EDP_PSR2_IDLE_MASK		0xf
+
+/* VGA port control */
+#define ADPA			0x61100
+#define PCH_ADPA                0xe1100
+#define VLV_ADPA		(VLV_DISPLAY_BASE + ADPA)
+
+#define   ADPA_DAC_ENABLE	(1<<31)
+#define   ADPA_DAC_DISABLE	0
+#define   ADPA_PIPE_SELECT_MASK	(1<<30)
+#define   ADPA_PIPE_A_SELECT	0
+#define   ADPA_PIPE_B_SELECT	(1<<30)
+#define   ADPA_PIPE_SELECT(pipe) ((pipe) << 30)
+/* CPT uses bits 29:30 for pch transcoder select */
+#define   ADPA_CRT_HOTPLUG_MASK  0x03ff0000 /* bit 25-16 */
+#define   ADPA_CRT_HOTPLUG_MONITOR_NONE  (0<<24)
+#define   ADPA_CRT_HOTPLUG_MONITOR_MASK  (3<<24)
+#define   ADPA_CRT_HOTPLUG_MONITOR_COLOR (3<<24)
+#define   ADPA_CRT_HOTPLUG_MONITOR_MONO  (2<<24)
+#define   ADPA_CRT_HOTPLUG_ENABLE        (1<<23)
+#define   ADPA_CRT_HOTPLUG_PERIOD_64     (0<<22)
+#define   ADPA_CRT_HOTPLUG_PERIOD_128    (1<<22)
+#define   ADPA_CRT_HOTPLUG_WARMUP_5MS    (0<<21)
+#define   ADPA_CRT_HOTPLUG_WARMUP_10MS   (1<<21)
+#define   ADPA_CRT_HOTPLUG_SAMPLE_2S     (0<<20)
+#define   ADPA_CRT_HOTPLUG_SAMPLE_4S     (1<<20)
+#define   ADPA_CRT_HOTPLUG_VOLTAGE_40    (0<<18)
+#define   ADPA_CRT_HOTPLUG_VOLTAGE_50    (1<<18)
+#define   ADPA_CRT_HOTPLUG_VOLTAGE_60    (2<<18)
+#define   ADPA_CRT_HOTPLUG_VOLTAGE_70    (3<<18)
+#define   ADPA_CRT_HOTPLUG_VOLREF_325MV  (0<<17)
+#define   ADPA_CRT_HOTPLUG_VOLREF_475MV  (1<<17)
+#define   ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16)
+#define   ADPA_USE_VGA_HVPOLARITY (1<<15)
+#define   ADPA_SETS_HVPOLARITY	0
+#define   ADPA_VSYNC_CNTL_DISABLE (1<<10)
+#define   ADPA_VSYNC_CNTL_ENABLE 0
+#define   ADPA_HSYNC_CNTL_DISABLE (1<<11)
+#define   ADPA_HSYNC_CNTL_ENABLE 0
+#define   ADPA_VSYNC_ACTIVE_HIGH (1<<4)
+#define   ADPA_VSYNC_ACTIVE_LOW	0
+#define   ADPA_HSYNC_ACTIVE_HIGH (1<<3)
+#define   ADPA_HSYNC_ACTIVE_LOW	0
+#define   ADPA_DPMS_MASK	(~(3<<10))
+#define   ADPA_DPMS_ON		(0<<10)
+#define   ADPA_DPMS_SUSPEND	(1<<10)
+#define   ADPA_DPMS_STANDBY	(2<<10)
+#define   ADPA_DPMS_OFF		(3<<10)
+
+
+/* Hotplug control (945+ only) */
+#define PORT_HOTPLUG_EN		(dev_priv->info.display_mmio_offset + 0x61110)
+#define   PORTB_HOTPLUG_INT_EN			(1 << 29)
+#define   PORTC_HOTPLUG_INT_EN			(1 << 28)
+#define   PORTD_HOTPLUG_INT_EN			(1 << 27)
+#define   SDVOB_HOTPLUG_INT_EN			(1 << 26)
+#define   SDVOC_HOTPLUG_INT_EN			(1 << 25)
+#define   TV_HOTPLUG_INT_EN			(1 << 18)
+#define   CRT_HOTPLUG_INT_EN			(1 << 9)
+#define HOTPLUG_INT_EN_MASK			(PORTB_HOTPLUG_INT_EN | \
+						 PORTC_HOTPLUG_INT_EN | \
+						 PORTD_HOTPLUG_INT_EN | \
+						 SDVOC_HOTPLUG_INT_EN | \
+						 SDVOB_HOTPLUG_INT_EN | \
+						 CRT_HOTPLUG_INT_EN)
+#define   CRT_HOTPLUG_FORCE_DETECT		(1 << 3)
+#define CRT_HOTPLUG_ACTIVATION_PERIOD_32	(0 << 8)
+/* must use period 64 on GM45 according to docs */
+#define CRT_HOTPLUG_ACTIVATION_PERIOD_64	(1 << 8)
+#define CRT_HOTPLUG_DAC_ON_TIME_2M		(0 << 7)
+#define CRT_HOTPLUG_DAC_ON_TIME_4M		(1 << 7)
+#define CRT_HOTPLUG_VOLTAGE_COMPARE_40		(0 << 5)
+#define CRT_HOTPLUG_VOLTAGE_COMPARE_50		(1 << 5)
+#define CRT_HOTPLUG_VOLTAGE_COMPARE_60		(2 << 5)
+#define CRT_HOTPLUG_VOLTAGE_COMPARE_70		(3 << 5)
+#define CRT_HOTPLUG_VOLTAGE_COMPARE_MASK	(3 << 5)
+#define CRT_HOTPLUG_DETECT_DELAY_1G		(0 << 4)
+#define CRT_HOTPLUG_DETECT_DELAY_2G		(1 << 4)
+#define CRT_HOTPLUG_DETECT_VOLTAGE_325MV	(0 << 2)
+#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV	(1 << 2)
+
+#define PORT_HOTPLUG_STAT	(dev_priv->info.display_mmio_offset + 0x61114)
+/*
+ * HDMI/DP bits are g4x+
+ *
+ * WARNING: Bspec for hpd status bits on gen4 seems to be completely confused.
+ * Please check the detailed lore in the commit message for for experimental
+ * evidence.
+ */
+/* Bspec says GM45 should match G4X/VLV/CHV, but reality disagrees */
+#define   PORTD_HOTPLUG_LIVE_STATUS_GM45	(1 << 29)
+#define   PORTC_HOTPLUG_LIVE_STATUS_GM45	(1 << 28)
+#define   PORTB_HOTPLUG_LIVE_STATUS_GM45	(1 << 27)
+/* G4X/VLV/CHV DP/HDMI bits again match Bspec */
+#define   PORTD_HOTPLUG_LIVE_STATUS_G4X		(1 << 27)
+#define   PORTC_HOTPLUG_LIVE_STATUS_G4X		(1 << 28)
+#define   PORTB_HOTPLUG_LIVE_STATUS_G4X		(1 << 29)
+#define   PORTD_HOTPLUG_INT_STATUS		(3 << 21)
+#define   PORTD_HOTPLUG_INT_LONG_PULSE		(2 << 21)
+#define   PORTD_HOTPLUG_INT_SHORT_PULSE		(1 << 21)
+#define   PORTC_HOTPLUG_INT_STATUS		(3 << 19)
+#define   PORTC_HOTPLUG_INT_LONG_PULSE		(2 << 19)
+#define   PORTC_HOTPLUG_INT_SHORT_PULSE		(1 << 19)
+#define   PORTB_HOTPLUG_INT_STATUS		(3 << 17)
+#define   PORTB_HOTPLUG_INT_LONG_PULSE		(2 << 17)
+#define   PORTB_HOTPLUG_INT_SHORT_PLUSE		(1 << 17)
+/* CRT/TV common between gen3+ */
+#define   CRT_HOTPLUG_INT_STATUS		(1 << 11)
+#define   TV_HOTPLUG_INT_STATUS			(1 << 10)
+#define   CRT_HOTPLUG_MONITOR_MASK		(3 << 8)
+#define   CRT_HOTPLUG_MONITOR_COLOR		(3 << 8)
+#define   CRT_HOTPLUG_MONITOR_MONO		(2 << 8)
+#define   CRT_HOTPLUG_MONITOR_NONE		(0 << 8)
+#define   DP_AUX_CHANNEL_D_INT_STATUS_G4X	(1 << 6)
+#define   DP_AUX_CHANNEL_C_INT_STATUS_G4X	(1 << 5)
+#define   DP_AUX_CHANNEL_B_INT_STATUS_G4X	(1 << 4)
+#define   DP_AUX_CHANNEL_MASK_INT_STATUS_G4X	(7 << 4)
+
+/* SDVO is different across gen3/4 */
+#define   SDVOC_HOTPLUG_INT_STATUS_G4X		(1 << 3)
+#define   SDVOB_HOTPLUG_INT_STATUS_G4X		(1 << 2)
+/*
+ * Bspec seems to be seriously misleaded about the SDVO hpd bits on i965g/gm,
+ * since reality corrobates that they're the same as on gen3. But keep these
+ * bits here (and the comment!) to help any other lost wanderers back onto the
+ * right tracks.
+ */
+#define   SDVOC_HOTPLUG_INT_STATUS_I965		(3 << 4)
+#define   SDVOB_HOTPLUG_INT_STATUS_I965		(3 << 2)
+#define   SDVOC_HOTPLUG_INT_STATUS_I915		(1 << 7)
+#define   SDVOB_HOTPLUG_INT_STATUS_I915		(1 << 6)
+#define   HOTPLUG_INT_STATUS_G4X		(CRT_HOTPLUG_INT_STATUS | \
+						 SDVOB_HOTPLUG_INT_STATUS_G4X | \
+						 SDVOC_HOTPLUG_INT_STATUS_G4X | \
+						 PORTB_HOTPLUG_INT_STATUS | \
+						 PORTC_HOTPLUG_INT_STATUS | \
+						 PORTD_HOTPLUG_INT_STATUS)
+
+#define HOTPLUG_INT_STATUS_I915			(CRT_HOTPLUG_INT_STATUS | \
+						 SDVOB_HOTPLUG_INT_STATUS_I915 | \
+						 SDVOC_HOTPLUG_INT_STATUS_I915 | \
+						 PORTB_HOTPLUG_INT_STATUS | \
+						 PORTC_HOTPLUG_INT_STATUS | \
+						 PORTD_HOTPLUG_INT_STATUS)
+
+/* SDVO and HDMI port control.
+ * The same register may be used for SDVO or HDMI */
+#define GEN3_SDVOB	0x61140
+#define GEN3_SDVOC	0x61160
+#define GEN4_HDMIB	GEN3_SDVOB
+#define GEN4_HDMIC	GEN3_SDVOC
+#define VLV_HDMIB	(VLV_DISPLAY_BASE + GEN4_HDMIB)
+#define VLV_HDMIC	(VLV_DISPLAY_BASE + GEN4_HDMIC)
+#define CHV_HDMID	(VLV_DISPLAY_BASE + 0x6116C)
+#define PCH_SDVOB	0xe1140
+#define PCH_HDMIB	PCH_SDVOB
+#define PCH_HDMIC	0xe1150
+#define PCH_HDMID	0xe1160
+
+#define PORT_DFT_I9XX				0x61150
+#define   DC_BALANCE_RESET			(1 << 25)
+#define PORT_DFT2_G4X		(dev_priv->info.display_mmio_offset + 0x61154)
+#define   DC_BALANCE_RESET_VLV			(1 << 31)
+#define   PIPE_SCRAMBLE_RESET_MASK		((1 << 14) | (0x3 << 0))
+#define   PIPE_C_SCRAMBLE_RESET			(1 << 14) /* chv */
+#define   PIPE_B_SCRAMBLE_RESET			(1 << 1)
+#define   PIPE_A_SCRAMBLE_RESET			(1 << 0)
+
+/* Gen 3 SDVO bits: */
+#define   SDVO_ENABLE				(1 << 31)
+#define   SDVO_PIPE_SEL(pipe)			((pipe) << 30)
+#define   SDVO_PIPE_SEL_MASK			(1 << 30)
+#define   SDVO_PIPE_B_SELECT			(1 << 30)
+#define   SDVO_STALL_SELECT			(1 << 29)
+#define   SDVO_INTERRUPT_ENABLE			(1 << 26)
+/*
+ * 915G/GM SDVO pixel multiplier.
+ * Programmed value is multiplier - 1, up to 5x.
+ * \sa DPLL_MD_UDI_MULTIPLIER_MASK
+ */
+#define   SDVO_PORT_MULTIPLY_MASK		(7 << 23)
+#define   SDVO_PORT_MULTIPLY_SHIFT		23
+#define   SDVO_PHASE_SELECT_MASK		(15 << 19)
+#define   SDVO_PHASE_SELECT_DEFAULT		(6 << 19)
+#define   SDVO_CLOCK_OUTPUT_INVERT		(1 << 18)
+#define   SDVOC_GANG_MODE			(1 << 16) /* Port C only */
+#define   SDVO_BORDER_ENABLE			(1 << 7) /* SDVO only */
+#define   SDVOB_PCIE_CONCURRENCY		(1 << 3) /* Port B only */
+#define   SDVO_DETECTED				(1 << 2)
+/* Bits to be preserved when writing */
+#define   SDVOB_PRESERVE_MASK ((1 << 17) | (1 << 16) | (1 << 14) | \
+			       SDVO_INTERRUPT_ENABLE)
+#define   SDVOC_PRESERVE_MASK ((1 << 17) | SDVO_INTERRUPT_ENABLE)
+
+/* Gen 4 SDVO/HDMI bits: */
+#define   SDVO_COLOR_FORMAT_8bpc		(0 << 26)
+#define   SDVO_COLOR_FORMAT_MASK		(7 << 26)
+#define   SDVO_ENCODING_SDVO			(0 << 10)
+#define   SDVO_ENCODING_HDMI			(2 << 10)
+#define   HDMI_MODE_SELECT_HDMI			(1 << 9) /* HDMI only */
+#define   HDMI_MODE_SELECT_DVI			(0 << 9) /* HDMI only */
+#define   HDMI_COLOR_RANGE_16_235		(1 << 8) /* HDMI only */
+#define   SDVO_AUDIO_ENABLE			(1 << 6)
+/* VSYNC/HSYNC bits new with 965, default is to be set */
+#define   SDVO_VSYNC_ACTIVE_HIGH		(1 << 4)
+#define   SDVO_HSYNC_ACTIVE_HIGH		(1 << 3)
+
+/* Gen 5 (IBX) SDVO/HDMI bits: */
+#define   HDMI_COLOR_FORMAT_12bpc		(3 << 26) /* HDMI only */
+#define   SDVOB_HOTPLUG_ENABLE			(1 << 23) /* SDVO only */
+
+/* Gen 6 (CPT) SDVO/HDMI bits: */
+#define   SDVO_PIPE_SEL_CPT(pipe)		((pipe) << 29)
+#define   SDVO_PIPE_SEL_MASK_CPT		(3 << 29)
+
+/* CHV SDVO/HDMI bits: */
+#define   SDVO_PIPE_SEL_CHV(pipe)		((pipe) << 24)
+#define   SDVO_PIPE_SEL_MASK_CHV		(3 << 24)
+
+
+/* DVO port control */
+#define DVOA			0x61120
+#define DVOB			0x61140
+#define DVOC			0x61160
+#define   DVO_ENABLE			(1 << 31)
+#define   DVO_PIPE_B_SELECT		(1 << 30)
+#define   DVO_PIPE_STALL_UNUSED		(0 << 28)
+#define   DVO_PIPE_STALL		(1 << 28)
+#define   DVO_PIPE_STALL_TV		(2 << 28)
+#define   DVO_PIPE_STALL_MASK		(3 << 28)
+#define   DVO_USE_VGA_SYNC		(1 << 15)
+#define   DVO_DATA_ORDER_I740		(0 << 14)
+#define   DVO_DATA_ORDER_FP		(1 << 14)
+#define   DVO_VSYNC_DISABLE		(1 << 11)
+#define   DVO_HSYNC_DISABLE		(1 << 10)
+#define   DVO_VSYNC_TRISTATE		(1 << 9)
+#define   DVO_HSYNC_TRISTATE		(1 << 8)
+#define   DVO_BORDER_ENABLE		(1 << 7)
+#define   DVO_DATA_ORDER_GBRG		(1 << 6)
+#define   DVO_DATA_ORDER_RGGB		(0 << 6)
+#define   DVO_DATA_ORDER_GBRG_ERRATA	(0 << 6)
+#define   DVO_DATA_ORDER_RGGB_ERRATA	(1 << 6)
+#define   DVO_VSYNC_ACTIVE_HIGH		(1 << 4)
+#define   DVO_HSYNC_ACTIVE_HIGH		(1 << 3)
+#define   DVO_BLANK_ACTIVE_HIGH		(1 << 2)
+#define   DVO_OUTPUT_CSTATE_PIXELS	(1 << 1)	/* SDG only */
+#define   DVO_OUTPUT_SOURCE_SIZE_PIXELS	(1 << 0)	/* SDG only */
+#define   DVO_PRESERVE_MASK		(0x7<<24)
+#define DVOA_SRCDIM		0x61124
+#define DVOB_SRCDIM		0x61144
+#define DVOC_SRCDIM		0x61164
+#define   DVO_SRCDIM_HORIZONTAL_SHIFT	12
+#define   DVO_SRCDIM_VERTICAL_SHIFT	0
+
+/* LVDS port control */
+#define LVDS			0x61180
+/*
+ * Enables the LVDS port.  This bit must be set before DPLLs are enabled, as
+ * the DPLL semantics change when the LVDS is assigned to that pipe.
+ */
+#define   LVDS_PORT_EN			(1 << 31)
+/* Selects pipe B for LVDS data.  Must be set on pre-965. */
+#define   LVDS_PIPEB_SELECT		(1 << 30)
+#define   LVDS_PIPE_MASK		(1 << 30)
+#define   LVDS_PIPE(pipe)		((pipe) << 30)
+/* LVDS dithering flag on 965/g4x platform */
+#define   LVDS_ENABLE_DITHER		(1 << 25)
+/* LVDS sync polarity flags. Set to invert (i.e. negative) */
+#define   LVDS_VSYNC_POLARITY		(1 << 21)
+#define   LVDS_HSYNC_POLARITY		(1 << 20)
+
+/* Enable border for unscaled (or aspect-scaled) display */
+#define   LVDS_BORDER_ENABLE		(1 << 15)
+/*
+ * Enables the A0-A2 data pairs and CLKA, containing 18 bits of color data per
+ * pixel.
+ */
+#define   LVDS_A0A2_CLKA_POWER_MASK	(3 << 8)
+#define   LVDS_A0A2_CLKA_POWER_DOWN	(0 << 8)
+#define   LVDS_A0A2_CLKA_POWER_UP	(3 << 8)
+/*
+ * Controls the A3 data pair, which contains the additional LSBs for 24 bit
+ * mode.  Only enabled if LVDS_A0A2_CLKA_POWER_UP also indicates it should be
+ * on.
+ */
+#define   LVDS_A3_POWER_MASK		(3 << 6)
+#define   LVDS_A3_POWER_DOWN		(0 << 6)
+#define   LVDS_A3_POWER_UP		(3 << 6)
+/*
+ * Controls the CLKB pair.  This should only be set when LVDS_B0B3_POWER_UP
+ * is set.
+ */
+#define   LVDS_CLKB_POWER_MASK		(3 << 4)
+#define   LVDS_CLKB_POWER_DOWN		(0 << 4)
+#define   LVDS_CLKB_POWER_UP		(3 << 4)
+/*
+ * Controls the B0-B3 data pairs.  This must be set to match the DPLL p2
+ * setting for whether we are in dual-channel mode.  The B3 pair will
+ * additionally only be powered up when LVDS_A3_POWER_UP is set.
+ */
+#define   LVDS_B0B3_POWER_MASK		(3 << 2)
+#define   LVDS_B0B3_POWER_DOWN		(0 << 2)
+#define   LVDS_B0B3_POWER_UP		(3 << 2)
+
+/* Video Data Island Packet control */
+#define VIDEO_DIP_DATA		0x61178
+/* Read the description of VIDEO_DIP_DATA (before Haswell) or VIDEO_DIP_ECC
+ * (Haswell and newer) to see which VIDEO_DIP_DATA byte corresponds to each byte
+ * of the infoframe structure specified by CEA-861. */
+#define   VIDEO_DIP_DATA_SIZE	32
+#define   VIDEO_DIP_VSC_DATA_SIZE	36
+#define VIDEO_DIP_CTL		0x61170
+/* Pre HSW: */
+#define   VIDEO_DIP_ENABLE		(1 << 31)
+#define   VIDEO_DIP_PORT(port)		((port) << 29)
+#define   VIDEO_DIP_PORT_MASK		(3 << 29)
+#define   VIDEO_DIP_ENABLE_GCP		(1 << 25)
+#define   VIDEO_DIP_ENABLE_AVI		(1 << 21)
+#define   VIDEO_DIP_ENABLE_VENDOR	(2 << 21)
+#define   VIDEO_DIP_ENABLE_GAMUT	(4 << 21)
+#define   VIDEO_DIP_ENABLE_SPD		(8 << 21)
+#define   VIDEO_DIP_SELECT_AVI		(0 << 19)
+#define   VIDEO_DIP_SELECT_VENDOR	(1 << 19)
+#define   VIDEO_DIP_SELECT_SPD		(3 << 19)
+#define   VIDEO_DIP_SELECT_MASK		(3 << 19)
+#define   VIDEO_DIP_FREQ_ONCE		(0 << 16)
+#define   VIDEO_DIP_FREQ_VSYNC		(1 << 16)
+#define   VIDEO_DIP_FREQ_2VSYNC		(2 << 16)
+#define   VIDEO_DIP_FREQ_MASK		(3 << 16)
+/* HSW and later: */
+#define   VIDEO_DIP_ENABLE_VSC_HSW	(1 << 20)
+#define   VIDEO_DIP_ENABLE_GCP_HSW	(1 << 16)
+#define   VIDEO_DIP_ENABLE_AVI_HSW	(1 << 12)
+#define   VIDEO_DIP_ENABLE_VS_HSW	(1 << 8)
+#define   VIDEO_DIP_ENABLE_GMP_HSW	(1 << 4)
+#define   VIDEO_DIP_ENABLE_SPD_HSW	(1 << 0)
+
+/* Panel power sequencing */
+#define PP_STATUS	0x61200
+#define   PP_ON		(1 << 31)
+/*
+ * Indicates that all dependencies of the panel are on:
+ *
+ * - PLL enabled
+ * - pipe enabled
+ * - LVDS/DVOB/DVOC on
+ */
+#define   PP_READY		(1 << 30)
+#define   PP_SEQUENCE_NONE	(0 << 28)
+#define   PP_SEQUENCE_POWER_UP	(1 << 28)
+#define   PP_SEQUENCE_POWER_DOWN (2 << 28)
+#define   PP_SEQUENCE_MASK	(3 << 28)
+#define   PP_SEQUENCE_SHIFT	28
+#define   PP_CYCLE_DELAY_ACTIVE	(1 << 27)
+#define   PP_SEQUENCE_STATE_MASK 0x0000000f
+#define   PP_SEQUENCE_STATE_OFF_IDLE	(0x0 << 0)
+#define   PP_SEQUENCE_STATE_OFF_S0_1	(0x1 << 0)
+#define   PP_SEQUENCE_STATE_OFF_S0_2	(0x2 << 0)
+#define   PP_SEQUENCE_STATE_OFF_S0_3	(0x3 << 0)
+#define   PP_SEQUENCE_STATE_ON_IDLE	(0x8 << 0)
+#define   PP_SEQUENCE_STATE_ON_S1_0	(0x9 << 0)
+#define   PP_SEQUENCE_STATE_ON_S1_2	(0xa << 0)
+#define   PP_SEQUENCE_STATE_ON_S1_3	(0xb << 0)
+#define   PP_SEQUENCE_STATE_RESET	(0xf << 0)
+#define PP_CONTROL	0x61204
+#define   POWER_TARGET_ON	(1 << 0)
+#define PP_ON_DELAYS	0x61208
+#define PP_OFF_DELAYS	0x6120c
+#define PP_DIVISOR	0x61210
+
+/* Panel fitting */
+#define PFIT_CONTROL	(dev_priv->info.display_mmio_offset + 0x61230)
+#define   PFIT_ENABLE		(1 << 31)
+#define   PFIT_PIPE_MASK	(3 << 29)
+#define   PFIT_PIPE_SHIFT	29
+#define   VERT_INTERP_DISABLE	(0 << 10)
+#define   VERT_INTERP_BILINEAR	(1 << 10)
+#define   VERT_INTERP_MASK	(3 << 10)
+#define   VERT_AUTO_SCALE	(1 << 9)
+#define   HORIZ_INTERP_DISABLE	(0 << 6)
+#define   HORIZ_INTERP_BILINEAR	(1 << 6)
+#define   HORIZ_INTERP_MASK	(3 << 6)
+#define   HORIZ_AUTO_SCALE	(1 << 5)
+#define   PANEL_8TO6_DITHER_ENABLE (1 << 3)
+#define   PFIT_FILTER_FUZZY	(0 << 24)
+#define   PFIT_SCALING_AUTO	(0 << 26)
+#define   PFIT_SCALING_PROGRAMMED (1 << 26)
+#define   PFIT_SCALING_PILLAR	(2 << 26)
+#define   PFIT_SCALING_LETTER	(3 << 26)
+#define PFIT_PGM_RATIOS	(dev_priv->info.display_mmio_offset + 0x61234)
+/* Pre-965 */
+#define		PFIT_VERT_SCALE_SHIFT		20
+#define		PFIT_VERT_SCALE_MASK		0xfff00000
+#define		PFIT_HORIZ_SCALE_SHIFT		4
+#define		PFIT_HORIZ_SCALE_MASK		0x0000fff0
+/* 965+ */
+#define		PFIT_VERT_SCALE_SHIFT_965	16
+#define		PFIT_VERT_SCALE_MASK_965	0x1fff0000
+#define		PFIT_HORIZ_SCALE_SHIFT_965	0
+#define		PFIT_HORIZ_SCALE_MASK_965	0x00001fff
+
+#define PFIT_AUTO_RATIOS (dev_priv->info.display_mmio_offset + 0x61238)
+
+#define _VLV_BLC_PWM_CTL2_A (dev_priv->info.display_mmio_offset + 0x61250)
+#define _VLV_BLC_PWM_CTL2_B (dev_priv->info.display_mmio_offset + 0x61350)
+#define VLV_BLC_PWM_CTL2(pipe) _PIPE(pipe, _VLV_BLC_PWM_CTL2_A, \
+				     _VLV_BLC_PWM_CTL2_B)
+
+#define _VLV_BLC_PWM_CTL_A (dev_priv->info.display_mmio_offset + 0x61254)
+#define _VLV_BLC_PWM_CTL_B (dev_priv->info.display_mmio_offset + 0x61354)
+#define VLV_BLC_PWM_CTL(pipe) _PIPE(pipe, _VLV_BLC_PWM_CTL_A, \
+				    _VLV_BLC_PWM_CTL_B)
+
+#define _VLV_BLC_HIST_CTL_A (dev_priv->info.display_mmio_offset + 0x61260)
+#define _VLV_BLC_HIST_CTL_B (dev_priv->info.display_mmio_offset + 0x61360)
+#define VLV_BLC_HIST_CTL(pipe) _PIPE(pipe, _VLV_BLC_HIST_CTL_A, \
+				     _VLV_BLC_HIST_CTL_B)
+
+/* Backlight control */
+#define BLC_PWM_CTL2	(dev_priv->info.display_mmio_offset + 0x61250) /* 965+ only */
+#define   BLM_PWM_ENABLE		(1 << 31)
+#define   BLM_COMBINATION_MODE		(1 << 30) /* gen4 only */
+#define   BLM_PIPE_SELECT		(1 << 29)
+#define   BLM_PIPE_SELECT_IVB		(3 << 29)
+#define   BLM_PIPE_A			(0 << 29)
+#define   BLM_PIPE_B			(1 << 29)
+#define   BLM_PIPE_C			(2 << 29) /* ivb + */
+#define   BLM_TRANSCODER_A		BLM_PIPE_A /* hsw */
+#define   BLM_TRANSCODER_B		BLM_PIPE_B
+#define   BLM_TRANSCODER_C		BLM_PIPE_C
+#define   BLM_TRANSCODER_EDP		(3 << 29)
+#define   BLM_PIPE(pipe)		((pipe) << 29)
+#define   BLM_POLARITY_I965		(1 << 28) /* gen4 only */
+#define   BLM_PHASE_IN_INTERUPT_STATUS	(1 << 26)
+#define   BLM_PHASE_IN_ENABLE		(1 << 25)
+#define   BLM_PHASE_IN_INTERUPT_ENABL	(1 << 24)
+#define   BLM_PHASE_IN_TIME_BASE_SHIFT	(16)
+#define   BLM_PHASE_IN_TIME_BASE_MASK	(0xff << 16)
+#define   BLM_PHASE_IN_COUNT_SHIFT	(8)
+#define   BLM_PHASE_IN_COUNT_MASK	(0xff << 8)
+#define   BLM_PHASE_IN_INCR_SHIFT	(0)
+#define   BLM_PHASE_IN_INCR_MASK	(0xff << 0)
+#define BLC_PWM_CTL	(dev_priv->info.display_mmio_offset + 0x61254)
+/*
+ * This is the most significant 15 bits of the number of backlight cycles in a
+ * complete cycle of the modulated backlight control.
+ *
+ * The actual value is this field multiplied by two.
+ */
+#define   BACKLIGHT_MODULATION_FREQ_SHIFT	(17)
+#define   BACKLIGHT_MODULATION_FREQ_MASK	(0x7fff << 17)
+#define   BLM_LEGACY_MODE			(1 << 16) /* gen2 only */
+/*
+ * This is the number of cycles out of the backlight modulation cycle for which
+ * the backlight is on.
+ *
+ * This field must be no greater than the number of cycles in the complete
+ * backlight modulation cycle.
+ */
+#define   BACKLIGHT_DUTY_CYCLE_SHIFT		(0)
+#define   BACKLIGHT_DUTY_CYCLE_MASK		(0xffff)
+#define   BACKLIGHT_DUTY_CYCLE_MASK_PNV		(0xfffe)
+#define   BLM_POLARITY_PNV			(1 << 0) /* pnv only */
+
+#define BLC_HIST_CTL	(dev_priv->info.display_mmio_offset + 0x61260)
+#define  BLM_HISTOGRAM_ENABLE			(1 << 31)
+
+/* New registers for PCH-split platforms. Safe where new bits show up, the
+ * register layout machtes with gen4 BLC_PWM_CTL[12]. */
+#define BLC_PWM_CPU_CTL2	0x48250
+#define BLC_PWM_CPU_CTL		0x48254
+
+#define HSW_BLC_PWM2_CTL	0x48350
+
+/* PCH CTL1 is totally different, all but the below bits are reserved. CTL2 is
+ * like the normal CTL from gen4 and earlier. Hooray for confusing naming. */
+#define BLC_PWM_PCH_CTL1	0xc8250
+#define   BLM_PCH_PWM_ENABLE			(1 << 31)
+#define   BLM_PCH_OVERRIDE_ENABLE		(1 << 30)
+#define   BLM_PCH_POLARITY			(1 << 29)
+#define BLC_PWM_PCH_CTL2	0xc8254
+
+#define UTIL_PIN_CTL		0x48400
+#define   UTIL_PIN_ENABLE	(1 << 31)
+
+#define   UTIL_PIN_PIPE(x)     ((x) << 29)
+#define   UTIL_PIN_PIPE_MASK   (3 << 29)
+#define   UTIL_PIN_MODE_PWM    (1 << 24)
+#define   UTIL_PIN_MODE_MASK   (0xf << 24)
+#define   UTIL_PIN_POLARITY    (1 << 22)
+
+/* BXT backlight register definition. */
+#define _BXT_BLC_PWM_CTL1			0xC8250
+#define   BXT_BLC_PWM_ENABLE			(1 << 31)
+#define   BXT_BLC_PWM_POLARITY			(1 << 29)
+#define _BXT_BLC_PWM_FREQ1			0xC8254
+#define _BXT_BLC_PWM_DUTY1			0xC8258
+
+#define _BXT_BLC_PWM_CTL2			0xC8350
+#define _BXT_BLC_PWM_FREQ2			0xC8354
+#define _BXT_BLC_PWM_DUTY2			0xC8358
+
+#define BXT_BLC_PWM_CTL(controller)    _PIPE(controller, \
+					_BXT_BLC_PWM_CTL1, _BXT_BLC_PWM_CTL2)
+#define BXT_BLC_PWM_FREQ(controller)   _PIPE(controller, \
+					_BXT_BLC_PWM_FREQ1, _BXT_BLC_PWM_FREQ2)
+#define BXT_BLC_PWM_DUTY(controller)   _PIPE(controller, \
+					_BXT_BLC_PWM_DUTY1, _BXT_BLC_PWM_DUTY2)
+
+#define PCH_GTC_CTL		0xe7000
+#define   PCH_GTC_ENABLE	(1 << 31)
+
+/* TV port control */
+#define TV_CTL			0x68000
+/* Enables the TV encoder */
+# define TV_ENC_ENABLE			(1 << 31)
+/* Sources the TV encoder input from pipe B instead of A. */
+# define TV_ENC_PIPEB_SELECT		(1 << 30)
+/* Outputs composite video (DAC A only) */
+# define TV_ENC_OUTPUT_COMPOSITE	(0 << 28)
+/* Outputs SVideo video (DAC B/C) */
+# define TV_ENC_OUTPUT_SVIDEO		(1 << 28)
+/* Outputs Component video (DAC A/B/C) */
+# define TV_ENC_OUTPUT_COMPONENT	(2 << 28)
+/* Outputs Composite and SVideo (DAC A/B/C) */
+# define TV_ENC_OUTPUT_SVIDEO_COMPOSITE	(3 << 28)
+# define TV_TRILEVEL_SYNC		(1 << 21)
+/* Enables slow sync generation (945GM only) */
+# define TV_SLOW_SYNC			(1 << 20)
+/* Selects 4x oversampling for 480i and 576p */
+# define TV_OVERSAMPLE_4X		(0 << 18)
+/* Selects 2x oversampling for 720p and 1080i */
+# define TV_OVERSAMPLE_2X		(1 << 18)
+/* Selects no oversampling for 1080p */
+# define TV_OVERSAMPLE_NONE		(2 << 18)
+/* Selects 8x oversampling */
+# define TV_OVERSAMPLE_8X		(3 << 18)
+/* Selects progressive mode rather than interlaced */
+# define TV_PROGRESSIVE			(1 << 17)
+/* Sets the colorburst to PAL mode.  Required for non-M PAL modes. */
+# define TV_PAL_BURST			(1 << 16)
+/* Field for setting delay of Y compared to C */
+# define TV_YC_SKEW_MASK		(7 << 12)
+/* Enables a fix for 480p/576p standard definition modes on the 915GM only */
+# define TV_ENC_SDP_FIX			(1 << 11)
+/*
+ * Enables a fix for the 915GM only.
+ *
+ * Not sure what it does.
+ */
+# define TV_ENC_C0_FIX			(1 << 10)
+/* Bits that must be preserved by software */
+# define TV_CTL_SAVE			((1 << 11) | (3 << 9) | (7 << 6) | 0xf)
+# define TV_FUSE_STATE_MASK		(3 << 4)
+/* Read-only state that reports all features enabled */
+# define TV_FUSE_STATE_ENABLED		(0 << 4)
+/* Read-only state that reports that Macrovision is disabled in hardware*/
+# define TV_FUSE_STATE_NO_MACROVISION	(1 << 4)
+/* Read-only state that reports that TV-out is disabled in hardware. */
+# define TV_FUSE_STATE_DISABLED		(2 << 4)
+/* Normal operation */
+# define TV_TEST_MODE_NORMAL		(0 << 0)
+/* Encoder test pattern 1 - combo pattern */
+# define TV_TEST_MODE_PATTERN_1		(1 << 0)
+/* Encoder test pattern 2 - full screen vertical 75% color bars */
+# define TV_TEST_MODE_PATTERN_2		(2 << 0)
+/* Encoder test pattern 3 - full screen horizontal 75% color bars */
+# define TV_TEST_MODE_PATTERN_3		(3 << 0)
+/* Encoder test pattern 4 - random noise */
+# define TV_TEST_MODE_PATTERN_4		(4 << 0)
+/* Encoder test pattern 5 - linear color ramps */
+# define TV_TEST_MODE_PATTERN_5		(5 << 0)
+/*
+ * This test mode forces the DACs to 50% of full output.
+ *
+ * This is used for load detection in combination with TVDAC_SENSE_MASK
+ */
+# define TV_TEST_MODE_MONITOR_DETECT	(7 << 0)
+# define TV_TEST_MODE_MASK		(7 << 0)
+
+#define TV_DAC			0x68004
+# define TV_DAC_SAVE		0x00ffff00
+/*
+ * Reports that DAC state change logic has reported change (RO).
+ *
+ * This gets cleared when TV_DAC_STATE_EN is cleared
+*/
+# define TVDAC_STATE_CHG		(1 << 31)
+# define TVDAC_SENSE_MASK		(7 << 28)
+/* Reports that DAC A voltage is above the detect threshold */
+# define TVDAC_A_SENSE			(1 << 30)
+/* Reports that DAC B voltage is above the detect threshold */
+# define TVDAC_B_SENSE			(1 << 29)
+/* Reports that DAC C voltage is above the detect threshold */
+# define TVDAC_C_SENSE			(1 << 28)
+/*
+ * Enables DAC state detection logic, for load-based TV detection.
+ *
+ * The PLL of the chosen pipe (in TV_CTL) must be running, and the encoder set
+ * to off, for load detection to work.
+ */
+# define TVDAC_STATE_CHG_EN		(1 << 27)
+/* Sets the DAC A sense value to high */
+# define TVDAC_A_SENSE_CTL		(1 << 26)
+/* Sets the DAC B sense value to high */
+# define TVDAC_B_SENSE_CTL		(1 << 25)
+/* Sets the DAC C sense value to high */
+# define TVDAC_C_SENSE_CTL		(1 << 24)
+/* Overrides the ENC_ENABLE and DAC voltage levels */
+# define DAC_CTL_OVERRIDE		(1 << 7)
+/* Sets the slew rate.  Must be preserved in software */
+# define ENC_TVDAC_SLEW_FAST		(1 << 6)
+# define DAC_A_1_3_V			(0 << 4)
+# define DAC_A_1_1_V			(1 << 4)
+# define DAC_A_0_7_V			(2 << 4)
+# define DAC_A_MASK			(3 << 4)
+# define DAC_B_1_3_V			(0 << 2)
+# define DAC_B_1_1_V			(1 << 2)
+# define DAC_B_0_7_V			(2 << 2)
+# define DAC_B_MASK			(3 << 2)
+# define DAC_C_1_3_V			(0 << 0)
+# define DAC_C_1_1_V			(1 << 0)
+# define DAC_C_0_7_V			(2 << 0)
+# define DAC_C_MASK			(3 << 0)
+
+/*
+ * CSC coefficients are stored in a floating point format with 9 bits of
+ * mantissa and 2 or 3 bits of exponent.  The exponent is represented as 2**-n,
+ * where 2-bit exponents are unsigned n, and 3-bit exponents are signed n with
+ * -1 (0x3) being the only legal negative value.
+ */
+#define TV_CSC_Y		0x68010
+# define TV_RY_MASK			0x07ff0000
+# define TV_RY_SHIFT			16
+# define TV_GY_MASK			0x00000fff
+# define TV_GY_SHIFT			0
+
+#define TV_CSC_Y2		0x68014
+# define TV_BY_MASK			0x07ff0000
+# define TV_BY_SHIFT			16
+/*
+ * Y attenuation for component video.
+ *
+ * Stored in 1.9 fixed point.
+ */
+# define TV_AY_MASK			0x000003ff
+# define TV_AY_SHIFT			0
+
+#define TV_CSC_U		0x68018
+# define TV_RU_MASK			0x07ff0000
+# define TV_RU_SHIFT			16
+# define TV_GU_MASK			0x000007ff
+# define TV_GU_SHIFT			0
+
+#define TV_CSC_U2		0x6801c
+# define TV_BU_MASK			0x07ff0000
+# define TV_BU_SHIFT			16
+/*
+ * U attenuation for component video.
+ *
+ * Stored in 1.9 fixed point.
+ */
+# define TV_AU_MASK			0x000003ff
+# define TV_AU_SHIFT			0
+
+#define TV_CSC_V		0x68020
+# define TV_RV_MASK			0x0fff0000
+# define TV_RV_SHIFT			16
+# define TV_GV_MASK			0x000007ff
+# define TV_GV_SHIFT			0
+
+#define TV_CSC_V2		0x68024
+# define TV_BV_MASK			0x07ff0000
+# define TV_BV_SHIFT			16
+/*
+ * V attenuation for component video.
+ *
+ * Stored in 1.9 fixed point.
+ */
+# define TV_AV_MASK			0x000007ff
+# define TV_AV_SHIFT			0
+
+#define TV_CLR_KNOBS		0x68028
+/* 2s-complement brightness adjustment */
+# define TV_BRIGHTNESS_MASK		0xff000000
+# define TV_BRIGHTNESS_SHIFT		24
+/* Contrast adjustment, as a 2.6 unsigned floating point number */
+# define TV_CONTRAST_MASK		0x00ff0000
+# define TV_CONTRAST_SHIFT		16
+/* Saturation adjustment, as a 2.6 unsigned floating point number */
+# define TV_SATURATION_MASK		0x0000ff00
+# define TV_SATURATION_SHIFT		8
+/* Hue adjustment, as an integer phase angle in degrees */
+# define TV_HUE_MASK			0x000000ff
+# define TV_HUE_SHIFT			0
+
+#define TV_CLR_LEVEL		0x6802c
+/* Controls the DAC level for black */
+# define TV_BLACK_LEVEL_MASK		0x01ff0000
+# define TV_BLACK_LEVEL_SHIFT		16
+/* Controls the DAC level for blanking */
+# define TV_BLANK_LEVEL_MASK		0x000001ff
+# define TV_BLANK_LEVEL_SHIFT		0
+
+#define TV_H_CTL_1		0x68030
+/* Number of pixels in the hsync. */
+# define TV_HSYNC_END_MASK		0x1fff0000
+# define TV_HSYNC_END_SHIFT		16
+/* Total number of pixels minus one in the line (display and blanking). */
+# define TV_HTOTAL_MASK			0x00001fff
+# define TV_HTOTAL_SHIFT		0
+
+#define TV_H_CTL_2		0x68034
+/* Enables the colorburst (needed for non-component color) */
+# define TV_BURST_ENA			(1 << 31)
+/* Offset of the colorburst from the start of hsync, in pixels minus one. */
+# define TV_HBURST_START_SHIFT		16
+# define TV_HBURST_START_MASK		0x1fff0000
+/* Length of the colorburst */
+# define TV_HBURST_LEN_SHIFT		0
+# define TV_HBURST_LEN_MASK		0x0001fff
+
+#define TV_H_CTL_3		0x68038
+/* End of hblank, measured in pixels minus one from start of hsync */
+# define TV_HBLANK_END_SHIFT		16
+# define TV_HBLANK_END_MASK		0x1fff0000
+/* Start of hblank, measured in pixels minus one from start of hsync */
+# define TV_HBLANK_START_SHIFT		0
+# define TV_HBLANK_START_MASK		0x0001fff
+
+#define TV_V_CTL_1		0x6803c
+/* XXX */
+# define TV_NBR_END_SHIFT		16
+# define TV_NBR_END_MASK		0x07ff0000
+/* XXX */
+# define TV_VI_END_F1_SHIFT		8
+# define TV_VI_END_F1_MASK		0x00003f00
+/* XXX */
+# define TV_VI_END_F2_SHIFT		0
+# define TV_VI_END_F2_MASK		0x0000003f
+
+#define TV_V_CTL_2		0x68040
+/* Length of vsync, in half lines */
+# define TV_VSYNC_LEN_MASK		0x07ff0000
+# define TV_VSYNC_LEN_SHIFT		16
+/* Offset of the start of vsync in field 1, measured in one less than the
+ * number of half lines.
+ */
+# define TV_VSYNC_START_F1_MASK		0x00007f00
+# define TV_VSYNC_START_F1_SHIFT	8
+/*
+ * Offset of the start of vsync in field 2, measured in one less than the
+ * number of half lines.
+ */
+# define TV_VSYNC_START_F2_MASK		0x0000007f
+# define TV_VSYNC_START_F2_SHIFT	0
+
+#define TV_V_CTL_3		0x68044
+/* Enables generation of the equalization signal */
+# define TV_EQUAL_ENA			(1 << 31)
+/* Length of vsync, in half lines */
+# define TV_VEQ_LEN_MASK		0x007f0000
+# define TV_VEQ_LEN_SHIFT		16
+/* Offset of the start of equalization in field 1, measured in one less than
+ * the number of half lines.
+ */
+# define TV_VEQ_START_F1_MASK		0x0007f00
+# define TV_VEQ_START_F1_SHIFT		8
+/*
+ * Offset of the start of equalization in field 2, measured in one less than
+ * the number of half lines.
+ */
+# define TV_VEQ_START_F2_MASK		0x000007f
+# define TV_VEQ_START_F2_SHIFT		0
+
+#define TV_V_CTL_4		0x68048
+/*
+ * Offset to start of vertical colorburst, measured in one less than the
+ * number of lines from vertical start.
+ */
+# define TV_VBURST_START_F1_MASK	0x003f0000
+# define TV_VBURST_START_F1_SHIFT	16
+/*
+ * Offset to the end of vertical colorburst, measured in one less than the
+ * number of lines from the start of NBR.
+ */
+# define TV_VBURST_END_F1_MASK		0x000000ff
+# define TV_VBURST_END_F1_SHIFT		0
+
+#define TV_V_CTL_5		0x6804c
+/*
+ * Offset to start of vertical colorburst, measured in one less than the
+ * number of lines from vertical start.
+ */
+# define TV_VBURST_START_F2_MASK	0x003f0000
+# define TV_VBURST_START_F2_SHIFT	16
+/*
+ * Offset to the end of vertical colorburst, measured in one less than the
+ * number of lines from the start of NBR.
+ */
+# define TV_VBURST_END_F2_MASK		0x000000ff
+# define TV_VBURST_END_F2_SHIFT		0
+
+#define TV_V_CTL_6		0x68050
+/*
+ * Offset to start of vertical colorburst, measured in one less than the
+ * number of lines from vertical start.
+ */
+# define TV_VBURST_START_F3_MASK	0x003f0000
+# define TV_VBURST_START_F3_SHIFT	16
+/*
+ * Offset to the end of vertical colorburst, measured in one less than the
+ * number of lines from the start of NBR.
+ */
+# define TV_VBURST_END_F3_MASK		0x000000ff
+# define TV_VBURST_END_F3_SHIFT		0
+
+#define TV_V_CTL_7		0x68054
+/*
+ * Offset to start of vertical colorburst, measured in one less than the
+ * number of lines from vertical start.
+ */
+# define TV_VBURST_START_F4_MASK	0x003f0000
+# define TV_VBURST_START_F4_SHIFT	16
+/*
+ * Offset to the end of vertical colorburst, measured in one less than the
+ * number of lines from the start of NBR.
+ */
+# define TV_VBURST_END_F4_MASK		0x000000ff
+# define TV_VBURST_END_F4_SHIFT		0
+
+#define TV_SC_CTL_1		0x68060
+/* Turns on the first subcarrier phase generation DDA */
+# define TV_SC_DDA1_EN			(1 << 31)
+/* Turns on the first subcarrier phase generation DDA */
+# define TV_SC_DDA2_EN			(1 << 30)
+/* Turns on the first subcarrier phase generation DDA */
+# define TV_SC_DDA3_EN			(1 << 29)
+/* Sets the subcarrier DDA to reset frequency every other field */
+# define TV_SC_RESET_EVERY_2		(0 << 24)
+/* Sets the subcarrier DDA to reset frequency every fourth field */
+# define TV_SC_RESET_EVERY_4		(1 << 24)
+/* Sets the subcarrier DDA to reset frequency every eighth field */
+# define TV_SC_RESET_EVERY_8		(2 << 24)
+/* Sets the subcarrier DDA to never reset the frequency */
+# define TV_SC_RESET_NEVER		(3 << 24)
+/* Sets the peak amplitude of the colorburst.*/
+# define TV_BURST_LEVEL_MASK		0x00ff0000
+# define TV_BURST_LEVEL_SHIFT		16
+/* Sets the increment of the first subcarrier phase generation DDA */
+# define TV_SCDDA1_INC_MASK		0x00000fff
+# define TV_SCDDA1_INC_SHIFT		0
+
+#define TV_SC_CTL_2		0x68064
+/* Sets the rollover for the second subcarrier phase generation DDA */
+# define TV_SCDDA2_SIZE_MASK		0x7fff0000
+# define TV_SCDDA2_SIZE_SHIFT		16
+/* Sets the increent of the second subcarrier phase generation DDA */
+# define TV_SCDDA2_INC_MASK		0x00007fff
+# define TV_SCDDA2_INC_SHIFT		0
+
+#define TV_SC_CTL_3		0x68068
+/* Sets the rollover for the third subcarrier phase generation DDA */
+# define TV_SCDDA3_SIZE_MASK		0x7fff0000
+# define TV_SCDDA3_SIZE_SHIFT		16
+/* Sets the increent of the third subcarrier phase generation DDA */
+# define TV_SCDDA3_INC_MASK		0x00007fff
+# define TV_SCDDA3_INC_SHIFT		0
+
+#define TV_WIN_POS		0x68070
+/* X coordinate of the display from the start of horizontal active */
+# define TV_XPOS_MASK			0x1fff0000
+# define TV_XPOS_SHIFT			16
+/* Y coordinate of the display from the start of vertical active (NBR) */
+# define TV_YPOS_MASK			0x00000fff
+# define TV_YPOS_SHIFT			0
+
+#define TV_WIN_SIZE		0x68074
+/* Horizontal size of the display window, measured in pixels*/
+# define TV_XSIZE_MASK			0x1fff0000
+# define TV_XSIZE_SHIFT			16
+/*
+ * Vertical size of the display window, measured in pixels.
+ *
+ * Must be even for interlaced modes.
+ */
+# define TV_YSIZE_MASK			0x00000fff
+# define TV_YSIZE_SHIFT			0
+
+#define TV_FILTER_CTL_1		0x68080
+/*
+ * Enables automatic scaling calculation.
+ *
+ * If set, the rest of the registers are ignored, and the calculated values can
+ * be read back from the register.
+ */
+# define TV_AUTO_SCALE			(1 << 31)
+/*
+ * Disables the vertical filter.
+ *
+ * This is required on modes more than 1024 pixels wide */
+# define TV_V_FILTER_BYPASS		(1 << 29)
+/* Enables adaptive vertical filtering */
+# define TV_VADAPT			(1 << 28)
+# define TV_VADAPT_MODE_MASK		(3 << 26)
+/* Selects the least adaptive vertical filtering mode */
+# define TV_VADAPT_MODE_LEAST		(0 << 26)
+/* Selects the moderately adaptive vertical filtering mode */
+# define TV_VADAPT_MODE_MODERATE	(1 << 26)
+/* Selects the most adaptive vertical filtering mode */
+# define TV_VADAPT_MODE_MOST		(3 << 26)
+/*
+ * Sets the horizontal scaling factor.
+ *
+ * This should be the fractional part of the horizontal scaling factor divided
+ * by the oversampling rate.  TV_HSCALE should be less than 1, and set to:
+ *
+ * (src width - 1) / ((oversample * dest width) - 1)
+ */
+# define TV_HSCALE_FRAC_MASK		0x00003fff
+# define TV_HSCALE_FRAC_SHIFT		0
+
+#define TV_FILTER_CTL_2		0x68084
+/*
+ * Sets the integer part of the 3.15 fixed-point vertical scaling factor.
+ *
+ * TV_VSCALE should be (src height - 1) / ((interlace * dest height) - 1)
+ */
+# define TV_VSCALE_INT_MASK		0x00038000
+# define TV_VSCALE_INT_SHIFT		15
+/*
+ * Sets the fractional part of the 3.15 fixed-point vertical scaling factor.
+ *
+ * \sa TV_VSCALE_INT_MASK
+ */
+# define TV_VSCALE_FRAC_MASK		0x00007fff
+# define TV_VSCALE_FRAC_SHIFT		0
+
+#define TV_FILTER_CTL_3		0x68088
+/*
+ * Sets the integer part of the 3.15 fixed-point vertical scaling factor.
+ *
+ * TV_VSCALE should be (src height - 1) / (1/4 * (dest height - 1))
+ *
+ * For progressive modes, TV_VSCALE_IP_INT should be set to zeroes.
+ */
+# define TV_VSCALE_IP_INT_MASK		0x00038000
+# define TV_VSCALE_IP_INT_SHIFT		15
+/*
+ * Sets the fractional part of the 3.15 fixed-point vertical scaling factor.
+ *
+ * For progressive modes, TV_VSCALE_IP_INT should be set to zeroes.
+ *
+ * \sa TV_VSCALE_IP_INT_MASK
+ */
+# define TV_VSCALE_IP_FRAC_MASK		0x00007fff
+# define TV_VSCALE_IP_FRAC_SHIFT		0
+
+#define TV_CC_CONTROL		0x68090
+# define TV_CC_ENABLE			(1 << 31)
+/*
+ * Specifies which field to send the CC data in.
+ *
+ * CC data is usually sent in field 0.
+ */
+# define TV_CC_FID_MASK			(1 << 27)
+# define TV_CC_FID_SHIFT		27
+/* Sets the horizontal position of the CC data.  Usually 135. */
+# define TV_CC_HOFF_MASK		0x03ff0000
+# define TV_CC_HOFF_SHIFT		16
+/* Sets the vertical position of the CC data.  Usually 21 */
+# define TV_CC_LINE_MASK		0x0000003f
+# define TV_CC_LINE_SHIFT		0
+
+#define TV_CC_DATA		0x68094
+# define TV_CC_RDY			(1 << 31)
+/* Second word of CC data to be transmitted. */
+# define TV_CC_DATA_2_MASK		0x007f0000
+# define TV_CC_DATA_2_SHIFT		16
+/* First word of CC data to be transmitted. */
+# define TV_CC_DATA_1_MASK		0x0000007f
+# define TV_CC_DATA_1_SHIFT		0
+
+#define TV_H_LUMA(i)		(0x68100 + (i) * 4) /* 60 registers */
+#define TV_H_CHROMA(i)		(0x68200 + (i) * 4) /* 60 registers */
+#define TV_V_LUMA(i)		(0x68300 + (i) * 4) /* 43 registers */
+#define TV_V_CHROMA(i)		(0x68400 + (i) * 4) /* 43 registers */
+
+/* Display Port */
+#define DP_A				0x64000 /* eDP */
+#define DP_B				0x64100
+#define DP_C				0x64200
+#define DP_D				0x64300
+
+#define VLV_DP_B			(VLV_DISPLAY_BASE + DP_B)
+#define VLV_DP_C			(VLV_DISPLAY_BASE + DP_C)
+#define CHV_DP_D			(VLV_DISPLAY_BASE + DP_D)
+
+#define   DP_PORT_EN			(1 << 31)
+#define   DP_PIPEB_SELECT		(1 << 30)
+#define   DP_PIPE_MASK			(1 << 30)
+#define   DP_PIPE_SELECT_CHV(pipe)	((pipe) << 16)
+#define   DP_PIPE_MASK_CHV		(3 << 16)
+
+/* Link training mode - select a suitable mode for each stage */
+#define   DP_LINK_TRAIN_PAT_1		(0 << 28)
+#define   DP_LINK_TRAIN_PAT_2		(1 << 28)
+#define   DP_LINK_TRAIN_PAT_IDLE	(2 << 28)
+#define   DP_LINK_TRAIN_OFF		(3 << 28)
+#define   DP_LINK_TRAIN_MASK		(3 << 28)
+#define   DP_LINK_TRAIN_SHIFT		28
+#define   DP_LINK_TRAIN_PAT_3_CHV	(1 << 14)
+#define   DP_LINK_TRAIN_MASK_CHV	((3 << 28)|(1<<14))
+
+/* CPT Link training mode */
+#define   DP_LINK_TRAIN_PAT_1_CPT	(0 << 8)
+#define   DP_LINK_TRAIN_PAT_2_CPT	(1 << 8)
+#define   DP_LINK_TRAIN_PAT_IDLE_CPT	(2 << 8)
+#define   DP_LINK_TRAIN_OFF_CPT		(3 << 8)
+#define   DP_LINK_TRAIN_MASK_CPT	(7 << 8)
+#define   DP_LINK_TRAIN_SHIFT_CPT	8
+
+/* Signal voltages. These are mostly controlled by the other end */
+#define   DP_VOLTAGE_0_4		(0 << 25)
+#define   DP_VOLTAGE_0_6		(1 << 25)
+#define   DP_VOLTAGE_0_8		(2 << 25)
+#define   DP_VOLTAGE_1_2		(3 << 25)
+#define   DP_VOLTAGE_MASK		(7 << 25)
+#define   DP_VOLTAGE_SHIFT		25
+
+/* Signal pre-emphasis levels, like voltages, the other end tells us what
+ * they want
+ */
+#define   DP_PRE_EMPHASIS_0		(0 << 22)
+#define   DP_PRE_EMPHASIS_3_5		(1 << 22)
+#define   DP_PRE_EMPHASIS_6		(2 << 22)
+#define   DP_PRE_EMPHASIS_9_5		(3 << 22)
+#define   DP_PRE_EMPHASIS_MASK		(7 << 22)
+#define   DP_PRE_EMPHASIS_SHIFT		22
+
+/* How many wires to use. I guess 3 was too hard */
+#define   DP_PORT_WIDTH(width)		(((width) - 1) << 19)
+#define   DP_PORT_WIDTH_MASK		(7 << 19)
+#define   DP_PORT_WIDTH_SHIFT		19
+
+/* Mystic DPCD version 1.1 special mode */
+#define   DP_ENHANCED_FRAMING		(1 << 18)
+
+/* eDP */
+#define   DP_PLL_FREQ_270MHZ		(0 << 16)
+#define   DP_PLL_FREQ_160MHZ		(1 << 16)
+#define   DP_PLL_FREQ_MASK		(3 << 16)
+
+/* locked once port is enabled */
+#define   DP_PORT_REVERSAL		(1 << 15)
+
+/* eDP */
+#define   DP_PLL_ENABLE			(1 << 14)
+
+/* sends the clock on lane 15 of the PEG for debug */
+#define   DP_CLOCK_OUTPUT_ENABLE	(1 << 13)
+
+#define   DP_SCRAMBLING_DISABLE		(1 << 12)
+#define   DP_SCRAMBLING_DISABLE_IRONLAKE	(1 << 7)
+
+/* limit RGB values to avoid confusing TVs */
+#define   DP_COLOR_RANGE_16_235		(1 << 8)
+
+/* Turn on the audio link */
+#define   DP_AUDIO_OUTPUT_ENABLE	(1 << 6)
+
+/* vs and hs sync polarity */
+#define   DP_SYNC_VS_HIGH		(1 << 4)
+#define   DP_SYNC_HS_HIGH		(1 << 3)
+
+/* A fantasy */
+#define   DP_DETECTED			(1 << 2)
+
+/* The aux channel provides a way to talk to the
+ * signal sink for DDC etc. Max packet size supported
+ * is 20 bytes in each direction, hence the 5 fixed
+ * data registers
+ */
+#define DPA_AUX_CH_CTL			0x64010
+#define DPA_AUX_CH_DATA1		0x64014
+#define DPA_AUX_CH_DATA2		0x64018
+#define DPA_AUX_CH_DATA3		0x6401c
+#define DPA_AUX_CH_DATA4		0x64020
+#define DPA_AUX_CH_DATA5		0x64024
+
+#define DPB_AUX_CH_CTL			0x64110
+#define DPB_AUX_CH_DATA1		0x64114
+#define DPB_AUX_CH_DATA2		0x64118
+#define DPB_AUX_CH_DATA3		0x6411c
+#define DPB_AUX_CH_DATA4		0x64120
+#define DPB_AUX_CH_DATA5		0x64124
+
+#define DPC_AUX_CH_CTL			0x64210
+#define DPC_AUX_CH_DATA1		0x64214
+#define DPC_AUX_CH_DATA2		0x64218
+#define DPC_AUX_CH_DATA3		0x6421c
+#define DPC_AUX_CH_DATA4		0x64220
+#define DPC_AUX_CH_DATA5		0x64224
+
+#define DPD_AUX_CH_CTL			0x64310
+#define DPD_AUX_CH_DATA1		0x64314
+#define DPD_AUX_CH_DATA2		0x64318
+#define DPD_AUX_CH_DATA3		0x6431c
+#define DPD_AUX_CH_DATA4		0x64320
+#define DPD_AUX_CH_DATA5		0x64324
+
+#define   DP_AUX_CH_CTL_SEND_BUSY	    (1 << 31)
+#define   DP_AUX_CH_CTL_DONE		    (1 << 30)
+#define   DP_AUX_CH_CTL_INTERRUPT	    (1 << 29)
+#define   DP_AUX_CH_CTL_TIME_OUT_ERROR	    (1 << 28)
+#define   DP_AUX_CH_CTL_TIME_OUT_400us	    (0 << 26)
+#define   DP_AUX_CH_CTL_TIME_OUT_600us	    (1 << 26)
+#define   DP_AUX_CH_CTL_TIME_OUT_800us	    (2 << 26)
+#define   DP_AUX_CH_CTL_TIME_OUT_1600us	    (3 << 26)
+#define   DP_AUX_CH_CTL_TIME_OUT_MASK	    (3 << 26)
+#define   DP_AUX_CH_CTL_RECEIVE_ERROR	    (1 << 25)
+#define   DP_AUX_CH_CTL_MESSAGE_SIZE_MASK    (0x1f << 20)
+#define   DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT   20
+#define   DP_AUX_CH_CTL_PRECHARGE_2US_MASK   (0xf << 16)
+#define   DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT  16
+#define   DP_AUX_CH_CTL_AUX_AKSV_SELECT	    (1 << 15)
+#define   DP_AUX_CH_CTL_MANCHESTER_TEST	    (1 << 14)
+#define   DP_AUX_CH_CTL_SYNC_TEST	    (1 << 13)
+#define   DP_AUX_CH_CTL_DEGLITCH_TEST	    (1 << 12)
+#define   DP_AUX_CH_CTL_PRECHARGE_TEST	    (1 << 11)
+#define   DP_AUX_CH_CTL_BIT_CLOCK_2X_MASK    (0x7ff)
+#define   DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT   0
+#define   DP_AUX_CH_CTL_PSR_DATA_AUX_REG_SKL	(1 << 14)
+#define   DP_AUX_CH_CTL_FS_DATA_AUX_REG_SKL	(1 << 13)
+#define   DP_AUX_CH_CTL_GTC_DATA_AUX_REG_SKL	(1 << 12)
+#define   DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL_MASK (0x1f << 5)
+#define   DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(c) (((c) - 1) << 5)
+#define   DP_AUX_CH_CTL_SYNC_PULSE_SKL(c)   ((c) - 1)
+
+/*
+ * Computing GMCH M and N values for the Display Port link
+ *
+ * GMCH M/N = dot clock * bytes per pixel / ls_clk * # of lanes
+ *
+ * ls_clk (we assume) is the DP link clock (1.62 or 2.7 GHz)
+ *
+ * The GMCH value is used internally
+ *
+ * bytes_per_pixel is the number of bytes coming out of the plane,
+ * which is after the LUTs, so we want the bytes for our color format.
+ * For our current usage, this is always 3, one byte for R, G and B.
+ */
+#define _PIPEA_DATA_M_G4X	0x70050
+#define _PIPEB_DATA_M_G4X	0x71050
+
+/* Transfer unit size for display port - 1, default is 0x3f (for TU size 64) */
+#define  TU_SIZE(x)             (((x)-1) << 25) /* default size 64 */
+#define  TU_SIZE_SHIFT		25
+#define  TU_SIZE_MASK           (0x3f << 25)
+
+#define  DATA_LINK_M_N_MASK	(0xffffff)
+#define  DATA_LINK_N_MAX	(0x800000)
+
+#define _PIPEA_DATA_N_G4X	0x70054
+#define _PIPEB_DATA_N_G4X	0x71054
+#define   PIPE_GMCH_DATA_N_MASK			(0xffffff)
+
+/*
+ * Computing Link M and N values for the Display Port link
+ *
+ * Link M / N = pixel_clock / ls_clk
+ *
+ * (the DP spec calls pixel_clock the 'strm_clk')
+ *
+ * The Link value is transmitted in the Main Stream
+ * Attributes and VB-ID.
+ */
+
+#define _PIPEA_LINK_M_G4X	0x70060
+#define _PIPEB_LINK_M_G4X	0x71060
+#define   PIPEA_DP_LINK_M_MASK			(0xffffff)
+
+#define _PIPEA_LINK_N_G4X	0x70064
+#define _PIPEB_LINK_N_G4X	0x71064
+#define   PIPEA_DP_LINK_N_MASK			(0xffffff)
+
+#define PIPE_DATA_M_G4X(pipe) _PIPE(pipe, _PIPEA_DATA_M_G4X, _PIPEB_DATA_M_G4X)
+#define PIPE_DATA_N_G4X(pipe) _PIPE(pipe, _PIPEA_DATA_N_G4X, _PIPEB_DATA_N_G4X)
+#define PIPE_LINK_M_G4X(pipe) _PIPE(pipe, _PIPEA_LINK_M_G4X, _PIPEB_LINK_M_G4X)
+#define PIPE_LINK_N_G4X(pipe) _PIPE(pipe, _PIPEA_LINK_N_G4X, _PIPEB_LINK_N_G4X)
+
+/* Display & cursor control */
+
+/* Pipe A */
+#define _PIPEADSL		0x70000
+#define   DSL_LINEMASK_GEN2	0x00000fff
+#define   DSL_LINEMASK_GEN3	0x00001fff
+#define _PIPEACONF		0x70008
+#define   PIPECONF_ENABLE	(1<<31)
+#define   PIPECONF_DISABLE	0
+#define   PIPECONF_DOUBLE_WIDE	(1<<30)
+#define   I965_PIPECONF_ACTIVE	(1<<30)
+#define   PIPECONF_DSI_PLL_LOCKED	(1<<29) /* vlv & pipe A only */
+#define   PIPECONF_FRAME_START_DELAY_MASK (3<<27)
+#define   PIPECONF_SINGLE_WIDE	0
+#define   PIPECONF_PIPE_UNLOCKED 0
+#define   PIPECONF_PIPE_LOCKED	(1<<25)
+#define   PIPECONF_PALETTE	0
+#define   PIPECONF_GAMMA		(1<<24)
+#define   PIPECONF_FORCE_BORDER	(1<<25)
+#define   PIPECONF_INTERLACE_MASK	(7 << 21)
+#define   PIPECONF_INTERLACE_MASK_HSW	(3 << 21)
+/* Note that pre-gen3 does not support interlaced display directly. Panel
+ * fitting must be disabled on pre-ilk for interlaced. */
+#define   PIPECONF_PROGRESSIVE			(0 << 21)
+#define   PIPECONF_INTERLACE_W_SYNC_SHIFT_PANEL	(4 << 21) /* gen4 only */
+#define   PIPECONF_INTERLACE_W_SYNC_SHIFT	(5 << 21) /* gen4 only */
+#define   PIPECONF_INTERLACE_W_FIELD_INDICATION	(6 << 21)
+#define   PIPECONF_INTERLACE_FIELD_0_ONLY	(7 << 21) /* gen3 only */
+/* Ironlake and later have a complete new set of values for interlaced. PFIT
+ * means panel fitter required, PF means progressive fetch, DBL means power
+ * saving pixel doubling. */
+#define   PIPECONF_PFIT_PF_INTERLACED_ILK	(1 << 21)
+#define   PIPECONF_INTERLACED_ILK		(3 << 21)
+#define   PIPECONF_INTERLACED_DBL_ILK		(4 << 21) /* ilk/snb only */
+#define   PIPECONF_PFIT_PF_INTERLACED_DBL_ILK	(5 << 21) /* ilk/snb only */
+#define   PIPECONF_INTERLACE_MODE_MASK		(7 << 21)
+#define   PIPECONF_EDP_RR_MODE_SWITCH		(1 << 20)
+#define   PIPECONF_CXSR_DOWNCLOCK	(1<<16)
+#define   PIPECONF_EDP_RR_MODE_SWITCH_VLV	(1 << 14)
+#define   PIPECONF_COLOR_RANGE_SELECT	(1 << 13)
+#define   PIPECONF_BPC_MASK	(0x7 << 5)
+#define   PIPECONF_8BPC		(0<<5)
+#define   PIPECONF_10BPC	(1<<5)
+#define   PIPECONF_6BPC		(2<<5)
+#define   PIPECONF_12BPC	(3<<5)
+#define   PIPECONF_DITHER_EN	(1<<4)
+#define   PIPECONF_DITHER_TYPE_MASK (0x0000000c)
+#define   PIPECONF_DITHER_TYPE_SP (0<<2)
+#define   PIPECONF_DITHER_TYPE_ST1 (1<<2)
+#define   PIPECONF_DITHER_TYPE_ST2 (2<<2)
+#define   PIPECONF_DITHER_TYPE_TEMP (3<<2)
+#define _PIPEASTAT		0x70024
+#define   PIPE_FIFO_UNDERRUN_STATUS		(1UL<<31)
+#define   SPRITE1_FLIP_DONE_INT_EN_VLV		(1UL<<30)
+#define   PIPE_CRC_ERROR_ENABLE			(1UL<<29)
+#define   PIPE_CRC_DONE_ENABLE			(1UL<<28)
+#define   PERF_COUNTER2_INTERRUPT_EN		(1UL<<27)
+#define   PIPE_GMBUS_EVENT_ENABLE		(1UL<<27)
+#define   PLANE_FLIP_DONE_INT_EN_VLV		(1UL<<26)
+#define   PIPE_HOTPLUG_INTERRUPT_ENABLE		(1UL<<26)
+#define   PIPE_VSYNC_INTERRUPT_ENABLE		(1UL<<25)
+#define   PIPE_DISPLAY_LINE_COMPARE_ENABLE	(1UL<<24)
+#define   PIPE_DPST_EVENT_ENABLE		(1UL<<23)
+#define   SPRITE0_FLIP_DONE_INT_EN_VLV		(1UL<<22)
+#define   PIPE_LEGACY_BLC_EVENT_ENABLE		(1UL<<22)
+#define   PIPE_ODD_FIELD_INTERRUPT_ENABLE	(1UL<<21)
+#define   PIPE_EVEN_FIELD_INTERRUPT_ENABLE	(1UL<<20)
+#define   PIPE_B_PSR_INTERRUPT_ENABLE_VLV	(1UL<<19)
+#define   PERF_COUNTER_INTERRUPT_EN		(1UL<<19)
+#define   PIPE_HOTPLUG_TV_INTERRUPT_ENABLE	(1UL<<18) /* pre-965 */
+#define   PIPE_START_VBLANK_INTERRUPT_ENABLE	(1UL<<18) /* 965 or later */
+#define   PIPE_FRAMESTART_INTERRUPT_ENABLE	(1UL<<17)
+#define   PIPE_VBLANK_INTERRUPT_ENABLE		(1UL<<17)
+#define   PIPEA_HBLANK_INT_EN_VLV		(1UL<<16)
+#define   PIPE_OVERLAY_UPDATED_ENABLE		(1UL<<16)
+#define   SPRITE1_FLIP_DONE_INT_STATUS_VLV	(1UL<<15)
+#define   SPRITE0_FLIP_DONE_INT_STATUS_VLV	(1UL<<14)
+#define   PIPE_CRC_ERROR_INTERRUPT_STATUS	(1UL<<13)
+#define   PIPE_CRC_DONE_INTERRUPT_STATUS	(1UL<<12)
+#define   PERF_COUNTER2_INTERRUPT_STATUS	(1UL<<11)
+#define   PIPE_GMBUS_INTERRUPT_STATUS		(1UL<<11)
+#define   PLANE_FLIP_DONE_INT_STATUS_VLV	(1UL<<10)
+#define   PIPE_HOTPLUG_INTERRUPT_STATUS		(1UL<<10)
+#define   PIPE_VSYNC_INTERRUPT_STATUS		(1UL<<9)
+#define   PIPE_DISPLAY_LINE_COMPARE_STATUS	(1UL<<8)
+#define   PIPE_DPST_EVENT_STATUS		(1UL<<7)
+#define   PIPE_A_PSR_STATUS_VLV			(1UL<<6)
+#define   PIPE_LEGACY_BLC_EVENT_STATUS		(1UL<<6)
+#define   PIPE_ODD_FIELD_INTERRUPT_STATUS	(1UL<<5)
+#define   PIPE_EVEN_FIELD_INTERRUPT_STATUS	(1UL<<4)
+#define   PIPE_B_PSR_STATUS_VLV			(1UL<<3)
+#define   PERF_COUNTER_INTERRUPT_STATUS		(1UL<<3)
+#define   PIPE_HOTPLUG_TV_INTERRUPT_STATUS	(1UL<<2) /* pre-965 */
+#define   PIPE_START_VBLANK_INTERRUPT_STATUS	(1UL<<2) /* 965 or later */
+#define   PIPE_FRAMESTART_INTERRUPT_STATUS	(1UL<<1)
+#define   PIPE_VBLANK_INTERRUPT_STATUS		(1UL<<1)
+#define   PIPE_HBLANK_INT_STATUS		(1UL<<0)
+#define   PIPE_OVERLAY_UPDATED_STATUS		(1UL<<0)
+
+#define PIPESTAT_INT_ENABLE_MASK		0x7fff0000
+#define PIPESTAT_INT_STATUS_MASK		0x0000ffff
+
+#define PIPE_A_OFFSET		0x70000
+#define PIPE_B_OFFSET		0x71000
+#define PIPE_C_OFFSET		0x72000
+#define CHV_PIPE_C_OFFSET	0x74000
+/*
+ * There's actually no pipe EDP. Some pipe registers have
+ * simply shifted from the pipe to the transcoder, while
+ * keeping their original offset. Thus we need PIPE_EDP_OFFSET
+ * to access such registers in transcoder EDP.
+ */
+#define PIPE_EDP_OFFSET	0x7f000
+
+#define _PIPE2(pipe, reg) (dev_priv->info.pipe_offsets[pipe] - \
+	dev_priv->info.pipe_offsets[PIPE_A] + (reg) + \
+	dev_priv->info.display_mmio_offset)
+
+#define PIPECONF(pipe) _PIPE2(pipe, _PIPEACONF)
+#define PIPEDSL(pipe)  _PIPE2(pipe, _PIPEADSL)
+#define PIPEFRAME(pipe) _PIPE2(pipe, _PIPEAFRAMEHIGH)
+#define PIPEFRAMEPIXEL(pipe)  _PIPE2(pipe, _PIPEAFRAMEPIXEL)
+#define PIPESTAT(pipe) _PIPE2(pipe, _PIPEASTAT)
+
+#define _PIPE_MISC_A			0x70030
+#define _PIPE_MISC_B			0x71030
+#define   PIPEMISC_DITHER_BPC_MASK	(7<<5)
+#define   PIPEMISC_DITHER_8_BPC		(0<<5)
+#define   PIPEMISC_DITHER_10_BPC	(1<<5)
+#define   PIPEMISC_DITHER_6_BPC		(2<<5)
+#define   PIPEMISC_DITHER_12_BPC	(3<<5)
+#define   PIPEMISC_DITHER_ENABLE	(1<<4)
+#define   PIPEMISC_DITHER_TYPE_MASK	(3<<2)
+#define   PIPEMISC_DITHER_TYPE_SP	(0<<2)
+#define PIPEMISC(pipe) _PIPE2(pipe, _PIPE_MISC_A)
+
+#define VLV_DPFLIPSTAT				(VLV_DISPLAY_BASE + 0x70028)
+#define   PIPEB_LINE_COMPARE_INT_EN		(1<<29)
+#define   PIPEB_HLINE_INT_EN			(1<<28)
+#define   PIPEB_VBLANK_INT_EN			(1<<27)
+#define   SPRITED_FLIP_DONE_INT_EN		(1<<26)
+#define   SPRITEC_FLIP_DONE_INT_EN		(1<<25)
+#define   PLANEB_FLIP_DONE_INT_EN		(1<<24)
+#define   PIPE_PSR_INT_EN			(1<<22)
+#define   PIPEA_LINE_COMPARE_INT_EN		(1<<21)
+#define   PIPEA_HLINE_INT_EN			(1<<20)
+#define   PIPEA_VBLANK_INT_EN			(1<<19)
+#define   SPRITEB_FLIP_DONE_INT_EN		(1<<18)
+#define   SPRITEA_FLIP_DONE_INT_EN		(1<<17)
+#define   PLANEA_FLIPDONE_INT_EN		(1<<16)
+#define   PIPEC_LINE_COMPARE_INT_EN		(1<<13)
+#define   PIPEC_HLINE_INT_EN			(1<<12)
+#define   PIPEC_VBLANK_INT_EN			(1<<11)
+#define   SPRITEF_FLIPDONE_INT_EN		(1<<10)
+#define   SPRITEE_FLIPDONE_INT_EN		(1<<9)
+#define   PLANEC_FLIPDONE_INT_EN		(1<<8)
+
+#define DPINVGTT				(VLV_DISPLAY_BASE + 0x7002c) /* VLV/CHV only */
+#define   SPRITEF_INVALID_GTT_INT_EN		(1<<27)
+#define   SPRITEE_INVALID_GTT_INT_EN		(1<<26)
+#define   PLANEC_INVALID_GTT_INT_EN		(1<<25)
+#define   CURSORC_INVALID_GTT_INT_EN		(1<<24)
+#define   CURSORB_INVALID_GTT_INT_EN		(1<<23)
+#define   CURSORA_INVALID_GTT_INT_EN		(1<<22)
+#define   SPRITED_INVALID_GTT_INT_EN		(1<<21)
+#define   SPRITEC_INVALID_GTT_INT_EN		(1<<20)
+#define   PLANEB_INVALID_GTT_INT_EN		(1<<19)
+#define   SPRITEB_INVALID_GTT_INT_EN		(1<<18)
+#define   SPRITEA_INVALID_GTT_INT_EN		(1<<17)
+#define   PLANEA_INVALID_GTT_INT_EN		(1<<16)
+#define   DPINVGTT_EN_MASK			0xff0000
+#define   DPINVGTT_EN_MASK_CHV			0xfff0000
+#define   SPRITEF_INVALID_GTT_STATUS		(1<<11)
+#define   SPRITEE_INVALID_GTT_STATUS		(1<<10)
+#define   PLANEC_INVALID_GTT_STATUS		(1<<9)
+#define   CURSORC_INVALID_GTT_STATUS		(1<<8)
+#define   CURSORB_INVALID_GTT_STATUS		(1<<7)
+#define   CURSORA_INVALID_GTT_STATUS		(1<<6)
+#define   SPRITED_INVALID_GTT_STATUS		(1<<5)
+#define   SPRITEC_INVALID_GTT_STATUS		(1<<4)
+#define   PLANEB_INVALID_GTT_STATUS		(1<<3)
+#define   SPRITEB_INVALID_GTT_STATUS		(1<<2)
+#define   SPRITEA_INVALID_GTT_STATUS		(1<<1)
+#define   PLANEA_INVALID_GTT_STATUS		(1<<0)
+#define   DPINVGTT_STATUS_MASK			0xff
+#define   DPINVGTT_STATUS_MASK_CHV		0xfff
+
+#define DSPARB			(dev_priv->info.display_mmio_offset + 0x70030)
+#define   DSPARB_CSTART_MASK	(0x7f << 7)
+#define   DSPARB_CSTART_SHIFT	7
+#define   DSPARB_BSTART_MASK	(0x7f)
+#define   DSPARB_BSTART_SHIFT	0
+#define   DSPARB_BEND_SHIFT	9 /* on 855 */
+#define   DSPARB_AEND_SHIFT	0
+#define   DSPARB_SPRITEA_SHIFT_VLV	0
+#define   DSPARB_SPRITEA_MASK_VLV	(0xff << 0)
+#define   DSPARB_SPRITEB_SHIFT_VLV	8
+#define   DSPARB_SPRITEB_MASK_VLV	(0xff << 8)
+#define   DSPARB_SPRITEC_SHIFT_VLV	16
+#define   DSPARB_SPRITEC_MASK_VLV	(0xff << 16)
+#define   DSPARB_SPRITED_SHIFT_VLV	24
+#define   DSPARB_SPRITED_MASK_VLV	(0xff << 24)
+#define DSPARB2			(VLV_DISPLAY_BASE + 0x70060) /* vlv/chv */
+#define   DSPARB_SPRITEA_HI_SHIFT_VLV	0
+#define   DSPARB_SPRITEA_HI_MASK_VLV	(0x1 << 0)
+#define   DSPARB_SPRITEB_HI_SHIFT_VLV	4
+#define   DSPARB_SPRITEB_HI_MASK_VLV	(0x1 << 4)
+#define   DSPARB_SPRITEC_HI_SHIFT_VLV	8
+#define   DSPARB_SPRITEC_HI_MASK_VLV	(0x1 << 8)
+#define   DSPARB_SPRITED_HI_SHIFT_VLV	12
+#define   DSPARB_SPRITED_HI_MASK_VLV	(0x1 << 12)
+#define   DSPARB_SPRITEE_HI_SHIFT_VLV	16
+#define   DSPARB_SPRITEE_HI_MASK_VLV	(0x1 << 16)
+#define   DSPARB_SPRITEF_HI_SHIFT_VLV	20
+#define   DSPARB_SPRITEF_HI_MASK_VLV	(0x1 << 20)
+#define DSPARB3			(VLV_DISPLAY_BASE + 0x7006c) /* chv */
+#define   DSPARB_SPRITEE_SHIFT_VLV	0
+#define   DSPARB_SPRITEE_MASK_VLV	(0xff << 0)
+#define   DSPARB_SPRITEF_SHIFT_VLV	8
+#define   DSPARB_SPRITEF_MASK_VLV	(0xff << 8)
+
+/* pnv/gen4/g4x/vlv/chv */
+#define DSPFW1			(dev_priv->info.display_mmio_offset + 0x70034)
+#define   DSPFW_SR_SHIFT		23
+#define   DSPFW_SR_MASK			(0x1ff<<23)
+#define   DSPFW_CURSORB_SHIFT		16
+#define   DSPFW_CURSORB_MASK		(0x3f<<16)
+#define   DSPFW_PLANEB_SHIFT		8
+#define   DSPFW_PLANEB_MASK		(0x7f<<8)
+#define   DSPFW_PLANEB_MASK_VLV		(0xff<<8) /* vlv/chv */
+#define   DSPFW_PLANEA_SHIFT		0
+#define   DSPFW_PLANEA_MASK		(0x7f<<0)
+#define   DSPFW_PLANEA_MASK_VLV		(0xff<<0) /* vlv/chv */
+#define DSPFW2			(dev_priv->info.display_mmio_offset + 0x70038)
+#define   DSPFW_FBC_SR_EN		(1<<31)	  /* g4x */
+#define   DSPFW_FBC_SR_SHIFT		28
+#define   DSPFW_FBC_SR_MASK		(0x7<<28) /* g4x */
+#define   DSPFW_FBC_HPLL_SR_SHIFT	24
+#define   DSPFW_FBC_HPLL_SR_MASK	(0xf<<24) /* g4x */
+#define   DSPFW_SPRITEB_SHIFT		(16)
+#define   DSPFW_SPRITEB_MASK		(0x7f<<16) /* g4x */
+#define   DSPFW_SPRITEB_MASK_VLV	(0xff<<16) /* vlv/chv */
+#define   DSPFW_CURSORA_SHIFT		8
+#define   DSPFW_CURSORA_MASK		(0x3f<<8)
+#define   DSPFW_PLANEC_OLD_SHIFT	0
+#define   DSPFW_PLANEC_OLD_MASK		(0x7f<<0) /* pre-gen4 sprite C */
+#define   DSPFW_SPRITEA_SHIFT		0
+#define   DSPFW_SPRITEA_MASK		(0x7f<<0) /* g4x */
+#define   DSPFW_SPRITEA_MASK_VLV	(0xff<<0) /* vlv/chv */
+#define DSPFW3			(dev_priv->info.display_mmio_offset + 0x7003c)
+#define   DSPFW_HPLL_SR_EN		(1<<31)
+#define   PINEVIEW_SELF_REFRESH_EN	(1<<30)
+#define   DSPFW_CURSOR_SR_SHIFT		24
+#define   DSPFW_CURSOR_SR_MASK		(0x3f<<24)
+#define   DSPFW_HPLL_CURSOR_SHIFT	16
+#define   DSPFW_HPLL_CURSOR_MASK	(0x3f<<16)
+#define   DSPFW_HPLL_SR_SHIFT		0
+#define   DSPFW_HPLL_SR_MASK		(0x1ff<<0)
+
+/* vlv/chv */
+#define DSPFW4			(VLV_DISPLAY_BASE + 0x70070)
+#define   DSPFW_SPRITEB_WM1_SHIFT	16
+#define   DSPFW_SPRITEB_WM1_MASK	(0xff<<16)
+#define   DSPFW_CURSORA_WM1_SHIFT	8
+#define   DSPFW_CURSORA_WM1_MASK	(0x3f<<8)
+#define   DSPFW_SPRITEA_WM1_SHIFT	0
+#define   DSPFW_SPRITEA_WM1_MASK	(0xff<<0)
+#define DSPFW5			(VLV_DISPLAY_BASE + 0x70074)
+#define   DSPFW_PLANEB_WM1_SHIFT	24
+#define   DSPFW_PLANEB_WM1_MASK		(0xff<<24)
+#define   DSPFW_PLANEA_WM1_SHIFT	16
+#define   DSPFW_PLANEA_WM1_MASK		(0xff<<16)
+#define   DSPFW_CURSORB_WM1_SHIFT	8
+#define   DSPFW_CURSORB_WM1_MASK	(0x3f<<8)
+#define   DSPFW_CURSOR_SR_WM1_SHIFT	0
+#define   DSPFW_CURSOR_SR_WM1_MASK	(0x3f<<0)
+#define DSPFW6			(VLV_DISPLAY_BASE + 0x70078)
+#define   DSPFW_SR_WM1_SHIFT		0
+#define   DSPFW_SR_WM1_MASK		(0x1ff<<0)
+#define DSPFW7			(VLV_DISPLAY_BASE + 0x7007c)
+#define DSPFW7_CHV		(VLV_DISPLAY_BASE + 0x700b4) /* wtf #1? */
+#define   DSPFW_SPRITED_WM1_SHIFT	24
+#define   DSPFW_SPRITED_WM1_MASK	(0xff<<24)
+#define   DSPFW_SPRITED_SHIFT		16
+#define   DSPFW_SPRITED_MASK_VLV	(0xff<<16)
+#define   DSPFW_SPRITEC_WM1_SHIFT	8
+#define   DSPFW_SPRITEC_WM1_MASK	(0xff<<8)
+#define   DSPFW_SPRITEC_SHIFT		0
+#define   DSPFW_SPRITEC_MASK_VLV	(0xff<<0)
+#define DSPFW8_CHV		(VLV_DISPLAY_BASE + 0x700b8)
+#define   DSPFW_SPRITEF_WM1_SHIFT	24
+#define   DSPFW_SPRITEF_WM1_MASK	(0xff<<24)
+#define   DSPFW_SPRITEF_SHIFT		16
+#define   DSPFW_SPRITEF_MASK_VLV	(0xff<<16)
+#define   DSPFW_SPRITEE_WM1_SHIFT	8
+#define   DSPFW_SPRITEE_WM1_MASK	(0xff<<8)
+#define   DSPFW_SPRITEE_SHIFT		0
+#define   DSPFW_SPRITEE_MASK_VLV	(0xff<<0)
+#define DSPFW9_CHV		(VLV_DISPLAY_BASE + 0x7007c) /* wtf #2? */
+#define   DSPFW_PLANEC_WM1_SHIFT	24
+#define   DSPFW_PLANEC_WM1_MASK		(0xff<<24)
+#define   DSPFW_PLANEC_SHIFT		16
+#define   DSPFW_PLANEC_MASK_VLV		(0xff<<16)
+#define   DSPFW_CURSORC_WM1_SHIFT	8
+#define   DSPFW_CURSORC_WM1_MASK	(0x3f<<16)
+#define   DSPFW_CURSORC_SHIFT		0
+#define   DSPFW_CURSORC_MASK		(0x3f<<0)
+
+/* vlv/chv high order bits */
+#define DSPHOWM			(VLV_DISPLAY_BASE + 0x70064)
+#define   DSPFW_SR_HI_SHIFT		24
+#define   DSPFW_SR_HI_MASK		(3<<24) /* 2 bits for chv, 1 for vlv */
+#define   DSPFW_SPRITEF_HI_SHIFT	23
+#define   DSPFW_SPRITEF_HI_MASK		(1<<23)
+#define   DSPFW_SPRITEE_HI_SHIFT	22
+#define   DSPFW_SPRITEE_HI_MASK		(1<<22)
+#define   DSPFW_PLANEC_HI_SHIFT		21
+#define   DSPFW_PLANEC_HI_MASK		(1<<21)
+#define   DSPFW_SPRITED_HI_SHIFT	20
+#define   DSPFW_SPRITED_HI_MASK		(1<<20)
+#define   DSPFW_SPRITEC_HI_SHIFT	16
+#define   DSPFW_SPRITEC_HI_MASK		(1<<16)
+#define   DSPFW_PLANEB_HI_SHIFT		12
+#define   DSPFW_PLANEB_HI_MASK		(1<<12)
+#define   DSPFW_SPRITEB_HI_SHIFT	8
+#define   DSPFW_SPRITEB_HI_MASK		(1<<8)
+#define   DSPFW_SPRITEA_HI_SHIFT	4
+#define   DSPFW_SPRITEA_HI_MASK		(1<<4)
+#define   DSPFW_PLANEA_HI_SHIFT		0
+#define   DSPFW_PLANEA_HI_MASK		(1<<0)
+#define DSPHOWM1		(VLV_DISPLAY_BASE + 0x70068)
+#define   DSPFW_SR_WM1_HI_SHIFT		24
+#define   DSPFW_SR_WM1_HI_MASK		(3<<24) /* 2 bits for chv, 1 for vlv */
+#define   DSPFW_SPRITEF_WM1_HI_SHIFT	23
+#define   DSPFW_SPRITEF_WM1_HI_MASK	(1<<23)
+#define   DSPFW_SPRITEE_WM1_HI_SHIFT	22
+#define   DSPFW_SPRITEE_WM1_HI_MASK	(1<<22)
+#define   DSPFW_PLANEC_WM1_HI_SHIFT	21
+#define   DSPFW_PLANEC_WM1_HI_MASK	(1<<21)
+#define   DSPFW_SPRITED_WM1_HI_SHIFT	20
+#define   DSPFW_SPRITED_WM1_HI_MASK	(1<<20)
+#define   DSPFW_SPRITEC_WM1_HI_SHIFT	16
+#define   DSPFW_SPRITEC_WM1_HI_MASK	(1<<16)
+#define   DSPFW_PLANEB_WM1_HI_SHIFT	12
+#define   DSPFW_PLANEB_WM1_HI_MASK	(1<<12)
+#define   DSPFW_SPRITEB_WM1_HI_SHIFT	8
+#define   DSPFW_SPRITEB_WM1_HI_MASK	(1<<8)
+#define   DSPFW_SPRITEA_WM1_HI_SHIFT	4
+#define   DSPFW_SPRITEA_WM1_HI_MASK	(1<<4)
+#define   DSPFW_PLANEA_WM1_HI_SHIFT	0
+#define   DSPFW_PLANEA_WM1_HI_MASK	(1<<0)
+
+/* drain latency register values*/
+#define VLV_DDL(pipe)			(VLV_DISPLAY_BASE + 0x70050 + 4 * (pipe))
+#define DDL_CURSOR_SHIFT		24
+#define DDL_SPRITE_SHIFT(sprite)	(8+8*(sprite))
+#define DDL_PLANE_SHIFT			0
+#define DDL_PRECISION_HIGH		(1<<7)
+#define DDL_PRECISION_LOW		(0<<7)
+#define DRAIN_LATENCY_MASK		0x7f
+
+#define CBR1_VLV			(VLV_DISPLAY_BASE + 0x70400)
+#define  CBR_PND_DEADLINE_DISABLE	(1<<31)
+#define  CBR_PWM_CLOCK_MUX_SELECT	(1<<30)
+
+/* FIFO watermark sizes etc */
+#define G4X_FIFO_LINE_SIZE	64
+#define I915_FIFO_LINE_SIZE	64
+#define I830_FIFO_LINE_SIZE	32
+
+#define VALLEYVIEW_FIFO_SIZE	255
+#define G4X_FIFO_SIZE		127
+#define I965_FIFO_SIZE		512
+#define I945_FIFO_SIZE		127
+#define I915_FIFO_SIZE		95
+#define I855GM_FIFO_SIZE	127 /* In cachelines */
+#define I830_FIFO_SIZE		95
+
+#define VALLEYVIEW_MAX_WM	0xff
+#define G4X_MAX_WM		0x3f
+#define I915_MAX_WM		0x3f
+
+#define PINEVIEW_DISPLAY_FIFO	512 /* in 64byte unit */
+#define PINEVIEW_FIFO_LINE_SIZE	64
+#define PINEVIEW_MAX_WM		0x1ff
+#define PINEVIEW_DFT_WM		0x3f
+#define PINEVIEW_DFT_HPLLOFF_WM	0
+#define PINEVIEW_GUARD_WM		10
+#define PINEVIEW_CURSOR_FIFO		64
+#define PINEVIEW_CURSOR_MAX_WM	0x3f
+#define PINEVIEW_CURSOR_DFT_WM	0
+#define PINEVIEW_CURSOR_GUARD_WM	5
+
+#define VALLEYVIEW_CURSOR_MAX_WM 64
+#define I965_CURSOR_FIFO	64
+#define I965_CURSOR_MAX_WM	32
+#define I965_CURSOR_DFT_WM	8
+
+/* Watermark register definitions for SKL */
+#define CUR_WM_A_0		0x70140
+#define CUR_WM_B_0		0x71140
+#define PLANE_WM_1_A_0		0x70240
+#define PLANE_WM_1_B_0		0x71240
+#define PLANE_WM_2_A_0		0x70340
+#define PLANE_WM_2_B_0		0x71340
+#define PLANE_WM_TRANS_1_A_0	0x70268
+#define PLANE_WM_TRANS_1_B_0	0x71268
+#define PLANE_WM_TRANS_2_A_0	0x70368
+#define PLANE_WM_TRANS_2_B_0	0x71368
+#define CUR_WM_TRANS_A_0	0x70168
+#define CUR_WM_TRANS_B_0	0x71168
+#define   PLANE_WM_EN		(1 << 31)
+#define   PLANE_WM_LINES_SHIFT	14
+#define   PLANE_WM_LINES_MASK	0x1f
+#define   PLANE_WM_BLOCKS_MASK	0x3ff
+
+#define CUR_WM_0(pipe) _PIPE(pipe, CUR_WM_A_0, CUR_WM_B_0)
+#define CUR_WM(pipe, level) (CUR_WM_0(pipe) + ((4) * (level)))
+#define CUR_WM_TRANS(pipe) _PIPE(pipe, CUR_WM_TRANS_A_0, CUR_WM_TRANS_B_0)
+
+#define _PLANE_WM_1(pipe) _PIPE(pipe, PLANE_WM_1_A_0, PLANE_WM_1_B_0)
+#define _PLANE_WM_2(pipe) _PIPE(pipe, PLANE_WM_2_A_0, PLANE_WM_2_B_0)
+#define _PLANE_WM_BASE(pipe, plane)	\
+			_PLANE(plane, _PLANE_WM_1(pipe), _PLANE_WM_2(pipe))
+#define PLANE_WM(pipe, plane, level)	\
+			(_PLANE_WM_BASE(pipe, plane) + ((4) * (level)))
+#define _PLANE_WM_TRANS_1(pipe)	\
+			_PIPE(pipe, PLANE_WM_TRANS_1_A_0, PLANE_WM_TRANS_1_B_0)
+#define _PLANE_WM_TRANS_2(pipe)	\
+			_PIPE(pipe, PLANE_WM_TRANS_2_A_0, PLANE_WM_TRANS_2_B_0)
+#define PLANE_WM_TRANS(pipe, plane)	\
+		_PLANE(plane, _PLANE_WM_TRANS_1(pipe), _PLANE_WM_TRANS_2(pipe))
+
+/* define the Watermark register on Ironlake */
+#define WM0_PIPEA_ILK		0x45100
+#define  WM0_PIPE_PLANE_MASK	(0xffff<<16)
+#define  WM0_PIPE_PLANE_SHIFT	16
+#define  WM0_PIPE_SPRITE_MASK	(0xff<<8)
+#define  WM0_PIPE_SPRITE_SHIFT	8
+#define  WM0_PIPE_CURSOR_MASK	(0xff)
+
+#define WM0_PIPEB_ILK		0x45104
+#define WM0_PIPEC_IVB		0x45200
+#define WM1_LP_ILK		0x45108
+#define  WM1_LP_SR_EN		(1<<31)
+#define  WM1_LP_LATENCY_SHIFT	24
+#define  WM1_LP_LATENCY_MASK	(0x7f<<24)
+#define  WM1_LP_FBC_MASK	(0xf<<20)
+#define  WM1_LP_FBC_SHIFT	20
+#define  WM1_LP_FBC_SHIFT_BDW	19
+#define  WM1_LP_SR_MASK		(0x7ff<<8)
+#define  WM1_LP_SR_SHIFT	8
+#define  WM1_LP_CURSOR_MASK	(0xff)
+#define WM2_LP_ILK		0x4510c
+#define  WM2_LP_EN		(1<<31)
+#define WM3_LP_ILK		0x45110
+#define  WM3_LP_EN		(1<<31)
+#define WM1S_LP_ILK		0x45120
+#define WM2S_LP_IVB		0x45124
+#define WM3S_LP_IVB		0x45128
+#define  WM1S_LP_EN		(1<<31)
+
+#define HSW_WM_LP_VAL(lat, fbc, pri, cur) \
+	(WM3_LP_EN | ((lat) << WM1_LP_LATENCY_SHIFT) | \
+	 ((fbc) << WM1_LP_FBC_SHIFT) | ((pri) << WM1_LP_SR_SHIFT) | (cur))
+
+/* Memory latency timer register */
+#define MLTR_ILK		0x11222
+#define  MLTR_WM1_SHIFT		0
+#define  MLTR_WM2_SHIFT		8
+/* the unit of memory self-refresh latency time is 0.5us */
+#define  ILK_SRLT_MASK		0x3f
+
+
+/* the address where we get all kinds of latency value */
+#define SSKPD			0x5d10
+#define SSKPD_WM_MASK		0x3f
+#define SSKPD_WM0_SHIFT		0
+#define SSKPD_WM1_SHIFT		8
+#define SSKPD_WM2_SHIFT		16
+#define SSKPD_WM3_SHIFT		24
+
+/*
+ * The two pipe frame counter registers are not synchronized, so
+ * reading a stable value is somewhat tricky. The following code
+ * should work:
+ *
+ *  do {
+ *    high1 = ((INREG(PIPEAFRAMEHIGH) & PIPE_FRAME_HIGH_MASK) >>
+ *             PIPE_FRAME_HIGH_SHIFT;
+ *    low1 =  ((INREG(PIPEAFRAMEPIXEL) & PIPE_FRAME_LOW_MASK) >>
+ *             PIPE_FRAME_LOW_SHIFT);
+ *    high2 = ((INREG(PIPEAFRAMEHIGH) & PIPE_FRAME_HIGH_MASK) >>
+ *             PIPE_FRAME_HIGH_SHIFT);
+ *  } while (high1 != high2);
+ *  frame = (high1 << 8) | low1;
+ */
+#define _PIPEAFRAMEHIGH          0x70040
+#define   PIPE_FRAME_HIGH_MASK    0x0000ffff
+#define   PIPE_FRAME_HIGH_SHIFT   0
+#define _PIPEAFRAMEPIXEL         0x70044
+#define   PIPE_FRAME_LOW_MASK     0xff000000
+#define   PIPE_FRAME_LOW_SHIFT    24
+#define   PIPE_PIXEL_MASK         0x00ffffff
+#define   PIPE_PIXEL_SHIFT        0
+/* GM45+ just has to be different */
+#define _PIPEA_FRMCOUNT_G4X	0x70040
+#define _PIPEA_FLIPCOUNT_G4X	0x70044
+#define PIPE_FRMCOUNT_G4X(pipe) _PIPE2(pipe, _PIPEA_FRMCOUNT_G4X)
+#define PIPE_FLIPCOUNT_G4X(pipe) _PIPE2(pipe, _PIPEA_FLIPCOUNT_G4X)
+
+/* Cursor A & B regs */
+#define _CURACNTR		0x70080
+/* Old style CUR*CNTR flags (desktop 8xx) */
+#define   CURSOR_ENABLE		0x80000000
+#define   CURSOR_GAMMA_ENABLE	0x40000000
+#define   CURSOR_STRIDE_SHIFT	28
+#define   CURSOR_STRIDE(x)	((ffs(x)-9) << CURSOR_STRIDE_SHIFT) /* 256,512,1k,2k */
+#define   CURSOR_PIPE_CSC_ENABLE (1<<24)
+#define   CURSOR_FORMAT_SHIFT	24
+#define   CURSOR_FORMAT_MASK	(0x07 << CURSOR_FORMAT_SHIFT)
+#define   CURSOR_FORMAT_2C	(0x00 << CURSOR_FORMAT_SHIFT)
+#define   CURSOR_FORMAT_3C	(0x01 << CURSOR_FORMAT_SHIFT)
+#define   CURSOR_FORMAT_4C	(0x02 << CURSOR_FORMAT_SHIFT)
+#define   CURSOR_FORMAT_ARGB	(0x04 << CURSOR_FORMAT_SHIFT)
+#define   CURSOR_FORMAT_XRGB	(0x05 << CURSOR_FORMAT_SHIFT)
+/* New style CUR*CNTR flags */
+#define   CURSOR_MODE		0x27
+#define   CURSOR_MODE_DISABLE   0x00
+#define   CURSOR_MODE_128_32B_AX 0x02
+#define   CURSOR_MODE_256_32B_AX 0x03
+#define   CURSOR_MODE_64_32B_AX 0x07
+#define   CURSOR_MODE_128_ARGB_AX ((1 << 5) | CURSOR_MODE_128_32B_AX)
+#define   CURSOR_MODE_256_ARGB_AX ((1 << 5) | CURSOR_MODE_256_32B_AX)
+#define   CURSOR_MODE_64_ARGB_AX ((1 << 5) | CURSOR_MODE_64_32B_AX)
+#define   MCURSOR_PIPE_SELECT	(1 << 28)
+#define   MCURSOR_PIPE_A	0x00
+#define   MCURSOR_PIPE_B	(1 << 28)
+#define   MCURSOR_GAMMA_ENABLE  (1 << 26)
+#define   CURSOR_ROTATE_180	(1<<15)
+#define   CURSOR_TRICKLE_FEED_DISABLE	(1 << 14)
+#define _CURABASE		0x70084
+#define _CURAPOS		0x70088
+#define   CURSOR_POS_MASK       0x007FF
+#define   CURSOR_POS_SIGN       0x8000
+#define   CURSOR_X_SHIFT        0
+#define   CURSOR_Y_SHIFT        16
+#define CURSIZE			0x700a0
+#define _CURBCNTR		0x700c0
+#define _CURBBASE		0x700c4
+#define _CURBPOS		0x700c8
+
+#define _CURBCNTR_IVB		0x71080
+#define _CURBBASE_IVB		0x71084
+#define _CURBPOS_IVB		0x71088
+
+#define _CURSOR2(pipe, reg) (dev_priv->info.cursor_offsets[(pipe)] - \
+	dev_priv->info.cursor_offsets[PIPE_A] + (reg) + \
+	dev_priv->info.display_mmio_offset)
+
+#define CURCNTR(pipe) _CURSOR2(pipe, _CURACNTR)
+#define CURBASE(pipe) _CURSOR2(pipe, _CURABASE)
+#define CURPOS(pipe) _CURSOR2(pipe, _CURAPOS)
+
+#define CURSOR_A_OFFSET 0x70080
+#define CURSOR_B_OFFSET 0x700c0
+#define CHV_CURSOR_C_OFFSET 0x700e0
+#define IVB_CURSOR_B_OFFSET 0x71080
+#define IVB_CURSOR_C_OFFSET 0x72080
+
+/* Display A control */
+#define _DSPACNTR				0x70180
+#define   DISPLAY_PLANE_ENABLE			(1<<31)
+#define   DISPLAY_PLANE_DISABLE			0
+#define   DISPPLANE_GAMMA_ENABLE		(1<<30)
+#define   DISPPLANE_GAMMA_DISABLE		0
+#define   DISPPLANE_PIXFORMAT_MASK		(0xf<<26)
+#define   DISPPLANE_YUV422			(0x0<<26)
+#define   DISPPLANE_8BPP			(0x2<<26)
+#define   DISPPLANE_BGRA555			(0x3<<26)
+#define   DISPPLANE_BGRX555			(0x4<<26)
+#define   DISPPLANE_BGRX565			(0x5<<26)
+#define   DISPPLANE_BGRX888			(0x6<<26)
+#define   DISPPLANE_BGRA888			(0x7<<26)
+#define   DISPPLANE_RGBX101010			(0x8<<26)
+#define   DISPPLANE_RGBA101010			(0x9<<26)
+#define   DISPPLANE_BGRX101010			(0xa<<26)
+#define   DISPPLANE_RGBX161616			(0xc<<26)
+#define   DISPPLANE_RGBX888			(0xe<<26)
+#define   DISPPLANE_RGBA888			(0xf<<26)
+#define   DISPPLANE_STEREO_ENABLE		(1<<25)
+#define   DISPPLANE_STEREO_DISABLE		0
+#define   DISPPLANE_PIPE_CSC_ENABLE		(1<<24)
+#define   DISPPLANE_SEL_PIPE_SHIFT		24
+#define   DISPPLANE_SEL_PIPE_MASK		(3<<DISPPLANE_SEL_PIPE_SHIFT)
+#define   DISPPLANE_SEL_PIPE_A			0
+#define   DISPPLANE_SEL_PIPE_B			(1<<DISPPLANE_SEL_PIPE_SHIFT)
+#define   DISPPLANE_SRC_KEY_ENABLE		(1<<22)
+#define   DISPPLANE_SRC_KEY_DISABLE		0
+#define   DISPPLANE_LINE_DOUBLE			(1<<20)
+#define   DISPPLANE_NO_LINE_DOUBLE		0
+#define   DISPPLANE_STEREO_POLARITY_FIRST	0
+#define   DISPPLANE_STEREO_POLARITY_SECOND	(1<<18)
+#define   DISPPLANE_ALPHA_PREMULTIPLY		(1<<16) /* CHV pipe B */
+#define   DISPPLANE_ROTATE_180			(1<<15)
+#define   DISPPLANE_TRICKLE_FEED_DISABLE	(1<<14) /* Ironlake */
+#define   DISPPLANE_TILED			(1<<10)
+#define   DISPPLANE_MIRROR			(1<<8) /* CHV pipe B */
+#define _DSPAADDR				0x70184
+#define _DSPASTRIDE				0x70188
+#define _DSPAPOS				0x7018C /* reserved */
+#define _DSPASIZE				0x70190
+#define _DSPASURF				0x7019C /* 965+ only */
+#define _DSPATILEOFF				0x701A4 /* 965+ only */
+#define _DSPAOFFSET				0x701A4 /* HSW */
+#define _DSPASURFLIVE				0x701AC
+
+#define DSPCNTR(plane) _PIPE2(plane, _DSPACNTR)
+#define DSPADDR(plane) _PIPE2(plane, _DSPAADDR)
+#define DSPSTRIDE(plane) _PIPE2(plane, _DSPASTRIDE)
+#define DSPPOS(plane) _PIPE2(plane, _DSPAPOS)
+#define DSPSIZE(plane) _PIPE2(plane, _DSPASIZE)
+#define DSPSURF(plane) _PIPE2(plane, _DSPASURF)
+#define DSPTILEOFF(plane) _PIPE2(plane, _DSPATILEOFF)
+#define DSPLINOFF(plane) DSPADDR(plane)
+#define DSPOFFSET(plane) _PIPE2(plane, _DSPAOFFSET)
+#define DSPSURFLIVE(plane) _PIPE2(plane, _DSPASURFLIVE)
+
+/* CHV pipe B blender and primary plane */
+#define _CHV_BLEND_A		0x60a00
+#define   CHV_BLEND_LEGACY		(0<<30)
+#define   CHV_BLEND_ANDROID		(1<<30)
+#define   CHV_BLEND_MPO			(2<<30)
+#define   CHV_BLEND_MASK		(3<<30)
+#define _CHV_CANVAS_A		0x60a04
+#define _PRIMPOS_A		0x60a08
+#define _PRIMSIZE_A		0x60a0c
+#define _PRIMCNSTALPHA_A	0x60a10
+#define   PRIM_CONST_ALPHA_ENABLE	(1<<31)
+
+#define CHV_BLEND(pipe) _TRANSCODER2(pipe, _CHV_BLEND_A)
+#define CHV_CANVAS(pipe) _TRANSCODER2(pipe, _CHV_CANVAS_A)
+#define PRIMPOS(plane) _TRANSCODER2(plane, _PRIMPOS_A)
+#define PRIMSIZE(plane) _TRANSCODER2(plane, _PRIMSIZE_A)
+#define PRIMCNSTALPHA(plane) _TRANSCODER2(plane, _PRIMCNSTALPHA_A)
+
+/* Display/Sprite base address macros */
+#define DISP_BASEADDR_MASK	(0xfffff000)
+#define I915_LO_DISPBASE(val)	(val & ~DISP_BASEADDR_MASK)
+#define I915_HI_DISPBASE(val)	(val & DISP_BASEADDR_MASK)
+
+/*
+ * VBIOS flags
+ * gen2:
+ * [00:06] alm,mgm
+ * [10:16] all
+ * [30:32] alm,mgm
+ * gen3+:
+ * [00:0f] all
+ * [10:1f] all
+ * [30:32] all
+ */
+#define SWF0(i)	(dev_priv->info.display_mmio_offset + 0x70410 + (i) * 4)
+#define SWF1(i)	(dev_priv->info.display_mmio_offset + 0x71410 + (i) * 4)
+#define SWF3(i)	(dev_priv->info.display_mmio_offset + 0x72414 + (i) * 4)
+
+/* Pipe B */
+#define _PIPEBDSL		(dev_priv->info.display_mmio_offset + 0x71000)
+#define _PIPEBCONF		(dev_priv->info.display_mmio_offset + 0x71008)
+#define _PIPEBSTAT		(dev_priv->info.display_mmio_offset + 0x71024)
+#define _PIPEBFRAMEHIGH		0x71040
+#define _PIPEBFRAMEPIXEL	0x71044
+#define _PIPEB_FRMCOUNT_G4X	(dev_priv->info.display_mmio_offset + 0x71040)
+#define _PIPEB_FLIPCOUNT_G4X	(dev_priv->info.display_mmio_offset + 0x71044)
+
+
+/* Display B control */
+#define _DSPBCNTR		(dev_priv->info.display_mmio_offset + 0x71180)
+#define   DISPPLANE_ALPHA_TRANS_ENABLE		(1<<15)
+#define   DISPPLANE_ALPHA_TRANS_DISABLE		0
+#define   DISPPLANE_SPRITE_ABOVE_DISPLAY	0
+#define   DISPPLANE_SPRITE_ABOVE_OVERLAY	(1)
+#define _DSPBADDR		(dev_priv->info.display_mmio_offset + 0x71184)
+#define _DSPBSTRIDE		(dev_priv->info.display_mmio_offset + 0x71188)
+#define _DSPBPOS		(dev_priv->info.display_mmio_offset + 0x7118C)
+#define _DSPBSIZE		(dev_priv->info.display_mmio_offset + 0x71190)
+#define _DSPBSURF		(dev_priv->info.display_mmio_offset + 0x7119C)
+#define _DSPBTILEOFF		(dev_priv->info.display_mmio_offset + 0x711A4)
+#define _DSPBOFFSET		(dev_priv->info.display_mmio_offset + 0x711A4)
+#define _DSPBSURFLIVE		(dev_priv->info.display_mmio_offset + 0x711AC)
+
+/* Sprite A control */
+#define _DVSACNTR		0x72180
+#define   DVS_ENABLE		(1<<31)
+#define   DVS_GAMMA_ENABLE	(1<<30)
+#define   DVS_PIXFORMAT_MASK	(3<<25)
+#define   DVS_FORMAT_YUV422	(0<<25)
+#define   DVS_FORMAT_RGBX101010	(1<<25)
+#define   DVS_FORMAT_RGBX888	(2<<25)
+#define   DVS_FORMAT_RGBX161616	(3<<25)
+#define   DVS_PIPE_CSC_ENABLE   (1<<24)
+#define   DVS_SOURCE_KEY	(1<<22)
+#define   DVS_RGB_ORDER_XBGR	(1<<20)
+#define   DVS_YUV_BYTE_ORDER_MASK (3<<16)
+#define   DVS_YUV_ORDER_YUYV	(0<<16)
+#define   DVS_YUV_ORDER_UYVY	(1<<16)
+#define   DVS_YUV_ORDER_YVYU	(2<<16)
+#define   DVS_YUV_ORDER_VYUY	(3<<16)
+#define   DVS_ROTATE_180	(1<<15)
+#define   DVS_DEST_KEY		(1<<2)
+#define   DVS_TRICKLE_FEED_DISABLE (1<<14)
+#define   DVS_TILED		(1<<10)
+#define _DVSALINOFF		0x72184
+#define _DVSASTRIDE		0x72188
+#define _DVSAPOS		0x7218c
+#define _DVSASIZE		0x72190
+#define _DVSAKEYVAL		0x72194
+#define _DVSAKEYMSK		0x72198
+#define _DVSASURF		0x7219c
+#define _DVSAKEYMAXVAL		0x721a0
+#define _DVSATILEOFF		0x721a4
+#define _DVSASURFLIVE		0x721ac
+#define _DVSASCALE		0x72204
+#define   DVS_SCALE_ENABLE	(1<<31)
+#define   DVS_FILTER_MASK	(3<<29)
+#define   DVS_FILTER_MEDIUM	(0<<29)
+#define   DVS_FILTER_ENHANCING	(1<<29)
+#define   DVS_FILTER_SOFTENING	(2<<29)
+#define   DVS_VERTICAL_OFFSET_HALF (1<<28) /* must be enabled below */
+#define   DVS_VERTICAL_OFFSET_ENABLE (1<<27)
+#define _DVSAGAMC		0x72300
+
+#define _DVSBCNTR		0x73180
+#define _DVSBLINOFF		0x73184
+#define _DVSBSTRIDE		0x73188
+#define _DVSBPOS		0x7318c
+#define _DVSBSIZE		0x73190
+#define _DVSBKEYVAL		0x73194
+#define _DVSBKEYMSK		0x73198
+#define _DVSBSURF		0x7319c
+#define _DVSBKEYMAXVAL		0x731a0
+#define _DVSBTILEOFF		0x731a4
+#define _DVSBSURFLIVE		0x731ac
+#define _DVSBSCALE		0x73204
+#define _DVSBGAMC		0x73300
+
+#define DVSCNTR(pipe) _PIPE(pipe, _DVSACNTR, _DVSBCNTR)
+#define DVSLINOFF(pipe) _PIPE(pipe, _DVSALINOFF, _DVSBLINOFF)
+#define DVSSTRIDE(pipe) _PIPE(pipe, _DVSASTRIDE, _DVSBSTRIDE)
+#define DVSPOS(pipe) _PIPE(pipe, _DVSAPOS, _DVSBPOS)
+#define DVSSURF(pipe) _PIPE(pipe, _DVSASURF, _DVSBSURF)
+#define DVSKEYMAX(pipe) _PIPE(pipe, _DVSAKEYMAXVAL, _DVSBKEYMAXVAL)
+#define DVSSIZE(pipe) _PIPE(pipe, _DVSASIZE, _DVSBSIZE)
+#define DVSSCALE(pipe) _PIPE(pipe, _DVSASCALE, _DVSBSCALE)
+#define DVSTILEOFF(pipe) _PIPE(pipe, _DVSATILEOFF, _DVSBTILEOFF)
+#define DVSKEYVAL(pipe) _PIPE(pipe, _DVSAKEYVAL, _DVSBKEYVAL)
+#define DVSKEYMSK(pipe) _PIPE(pipe, _DVSAKEYMSK, _DVSBKEYMSK)
+#define DVSSURFLIVE(pipe) _PIPE(pipe, _DVSASURFLIVE, _DVSBSURFLIVE)
+
+#define _SPRA_CTL		0x70280
+#define   SPRITE_ENABLE			(1<<31)
+#define   SPRITE_GAMMA_ENABLE		(1<<30)
+#define   SPRITE_PIXFORMAT_MASK		(7<<25)
+#define   SPRITE_FORMAT_YUV422		(0<<25)
+#define   SPRITE_FORMAT_RGBX101010	(1<<25)
+#define   SPRITE_FORMAT_RGBX888		(2<<25)
+#define   SPRITE_FORMAT_RGBX161616	(3<<25)
+#define   SPRITE_FORMAT_YUV444		(4<<25)
+#define   SPRITE_FORMAT_XR_BGR101010	(5<<25) /* Extended range */
+#define   SPRITE_PIPE_CSC_ENABLE	(1<<24)
+#define   SPRITE_SOURCE_KEY		(1<<22)
+#define   SPRITE_RGB_ORDER_RGBX		(1<<20) /* only for 888 and 161616 */
+#define   SPRITE_YUV_TO_RGB_CSC_DISABLE	(1<<19)
+#define   SPRITE_YUV_CSC_FORMAT_BT709	(1<<18) /* 0 is BT601 */
+#define   SPRITE_YUV_BYTE_ORDER_MASK	(3<<16)
+#define   SPRITE_YUV_ORDER_YUYV		(0<<16)
+#define   SPRITE_YUV_ORDER_UYVY		(1<<16)
+#define   SPRITE_YUV_ORDER_YVYU		(2<<16)
+#define   SPRITE_YUV_ORDER_VYUY		(3<<16)
+#define   SPRITE_ROTATE_180		(1<<15)
+#define   SPRITE_TRICKLE_FEED_DISABLE	(1<<14)
+#define   SPRITE_INT_GAMMA_ENABLE	(1<<13)
+#define   SPRITE_TILED			(1<<10)
+#define   SPRITE_DEST_KEY		(1<<2)
+#define _SPRA_LINOFF		0x70284
+#define _SPRA_STRIDE		0x70288
+#define _SPRA_POS		0x7028c
+#define _SPRA_SIZE		0x70290
+#define _SPRA_KEYVAL		0x70294
+#define _SPRA_KEYMSK		0x70298
+#define _SPRA_SURF		0x7029c
+#define _SPRA_KEYMAX		0x702a0
+#define _SPRA_TILEOFF		0x702a4
+#define _SPRA_OFFSET		0x702a4
+#define _SPRA_SURFLIVE		0x702ac
+#define _SPRA_SCALE		0x70304
+#define   SPRITE_SCALE_ENABLE	(1<<31)
+#define   SPRITE_FILTER_MASK	(3<<29)
+#define   SPRITE_FILTER_MEDIUM	(0<<29)
+#define   SPRITE_FILTER_ENHANCING	(1<<29)
+#define   SPRITE_FILTER_SOFTENING	(2<<29)
+#define   SPRITE_VERTICAL_OFFSET_HALF	(1<<28) /* must be enabled below */
+#define   SPRITE_VERTICAL_OFFSET_ENABLE	(1<<27)
+#define _SPRA_GAMC		0x70400
+
+#define _SPRB_CTL		0x71280
+#define _SPRB_LINOFF		0x71284
+#define _SPRB_STRIDE		0x71288
+#define _SPRB_POS		0x7128c
+#define _SPRB_SIZE		0x71290
+#define _SPRB_KEYVAL		0x71294
+#define _SPRB_KEYMSK		0x71298
+#define _SPRB_SURF		0x7129c
+#define _SPRB_KEYMAX		0x712a0
+#define _SPRB_TILEOFF		0x712a4
+#define _SPRB_OFFSET		0x712a4
+#define _SPRB_SURFLIVE		0x712ac
+#define _SPRB_SCALE		0x71304
+#define _SPRB_GAMC		0x71400
+
+#define SPRCTL(pipe) _PIPE(pipe, _SPRA_CTL, _SPRB_CTL)
+#define SPRLINOFF(pipe) _PIPE(pipe, _SPRA_LINOFF, _SPRB_LINOFF)
+#define SPRSTRIDE(pipe) _PIPE(pipe, _SPRA_STRIDE, _SPRB_STRIDE)
+#define SPRPOS(pipe) _PIPE(pipe, _SPRA_POS, _SPRB_POS)
+#define SPRSIZE(pipe) _PIPE(pipe, _SPRA_SIZE, _SPRB_SIZE)
+#define SPRKEYVAL(pipe) _PIPE(pipe, _SPRA_KEYVAL, _SPRB_KEYVAL)
+#define SPRKEYMSK(pipe) _PIPE(pipe, _SPRA_KEYMSK, _SPRB_KEYMSK)
+#define SPRSURF(pipe) _PIPE(pipe, _SPRA_SURF, _SPRB_SURF)
+#define SPRKEYMAX(pipe) _PIPE(pipe, _SPRA_KEYMAX, _SPRB_KEYMAX)
+#define SPRTILEOFF(pipe) _PIPE(pipe, _SPRA_TILEOFF, _SPRB_TILEOFF)
+#define SPROFFSET(pipe) _PIPE(pipe, _SPRA_OFFSET, _SPRB_OFFSET)
+#define SPRSCALE(pipe) _PIPE(pipe, _SPRA_SCALE, _SPRB_SCALE)
+#define SPRGAMC(pipe) _PIPE(pipe, _SPRA_GAMC, _SPRB_GAMC)
+#define SPRSURFLIVE(pipe) _PIPE(pipe, _SPRA_SURFLIVE, _SPRB_SURFLIVE)
+
+#define _SPACNTR		(VLV_DISPLAY_BASE + 0x72180)
+#define   SP_ENABLE			(1<<31)
+#define   SP_GAMMA_ENABLE		(1<<30)
+#define   SP_PIXFORMAT_MASK		(0xf<<26)
+#define   SP_FORMAT_YUV422		(0<<26)
+#define   SP_FORMAT_BGR565		(5<<26)
+#define   SP_FORMAT_BGRX8888		(6<<26)
+#define   SP_FORMAT_BGRA8888		(7<<26)
+#define   SP_FORMAT_RGBX1010102		(8<<26)
+#define   SP_FORMAT_RGBA1010102		(9<<26)
+#define   SP_FORMAT_RGBX8888		(0xe<<26)
+#define   SP_FORMAT_RGBA8888		(0xf<<26)
+#define   SP_ALPHA_PREMULTIPLY		(1<<23) /* CHV pipe B */
+#define   SP_SOURCE_KEY			(1<<22)
+#define   SP_YUV_BYTE_ORDER_MASK	(3<<16)
+#define   SP_YUV_ORDER_YUYV		(0<<16)
+#define   SP_YUV_ORDER_UYVY		(1<<16)
+#define   SP_YUV_ORDER_YVYU		(2<<16)
+#define   SP_YUV_ORDER_VYUY		(3<<16)
+#define   SP_ROTATE_180			(1<<15)
+#define   SP_TILED			(1<<10)
+#define   SP_MIRROR			(1<<8) /* CHV pipe B */
+#define _SPALINOFF		(VLV_DISPLAY_BASE + 0x72184)
+#define _SPASTRIDE		(VLV_DISPLAY_BASE + 0x72188)
+#define _SPAPOS			(VLV_DISPLAY_BASE + 0x7218c)
+#define _SPASIZE		(VLV_DISPLAY_BASE + 0x72190)
+#define _SPAKEYMINVAL		(VLV_DISPLAY_BASE + 0x72194)
+#define _SPAKEYMSK		(VLV_DISPLAY_BASE + 0x72198)
+#define _SPASURF		(VLV_DISPLAY_BASE + 0x7219c)
+#define _SPAKEYMAXVAL		(VLV_DISPLAY_BASE + 0x721a0)
+#define _SPATILEOFF		(VLV_DISPLAY_BASE + 0x721a4)
+#define _SPACONSTALPHA		(VLV_DISPLAY_BASE + 0x721a8)
+#define   SP_CONST_ALPHA_ENABLE		(1<<31)
+#define _SPAGAMC		(VLV_DISPLAY_BASE + 0x721f4)
+
+#define _SPBCNTR		(VLV_DISPLAY_BASE + 0x72280)
+#define _SPBLINOFF		(VLV_DISPLAY_BASE + 0x72284)
+#define _SPBSTRIDE		(VLV_DISPLAY_BASE + 0x72288)
+#define _SPBPOS			(VLV_DISPLAY_BASE + 0x7228c)
+#define _SPBSIZE		(VLV_DISPLAY_BASE + 0x72290)
+#define _SPBKEYMINVAL		(VLV_DISPLAY_BASE + 0x72294)
+#define _SPBKEYMSK		(VLV_DISPLAY_BASE + 0x72298)
+#define _SPBSURF		(VLV_DISPLAY_BASE + 0x7229c)
+#define _SPBKEYMAXVAL		(VLV_DISPLAY_BASE + 0x722a0)
+#define _SPBTILEOFF		(VLV_DISPLAY_BASE + 0x722a4)
+#define _SPBCONSTALPHA		(VLV_DISPLAY_BASE + 0x722a8)
+#define _SPBGAMC		(VLV_DISPLAY_BASE + 0x722f4)
+
+#define SPCNTR(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPACNTR, _SPBCNTR)
+#define SPLINOFF(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPALINOFF, _SPBLINOFF)
+#define SPSTRIDE(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPASTRIDE, _SPBSTRIDE)
+#define SPPOS(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPAPOS, _SPBPOS)
+#define SPSIZE(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPASIZE, _SPBSIZE)
+#define SPKEYMINVAL(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPAKEYMINVAL, _SPBKEYMINVAL)
+#define SPKEYMSK(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPAKEYMSK, _SPBKEYMSK)
+#define SPSURF(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPASURF, _SPBSURF)
+#define SPKEYMAXVAL(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPAKEYMAXVAL, _SPBKEYMAXVAL)
+#define SPTILEOFF(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPATILEOFF, _SPBTILEOFF)
+#define SPCONSTALPHA(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPACONSTALPHA, _SPBCONSTALPHA)
+#define SPGAMC(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPAGAMC, _SPBGAMC)
+
+/*
+ * CHV pipe B sprite CSC
+ *
+ * |cr|   |c0 c1 c2|   |cr + cr_ioff|   |cr_ooff|
+ * |yg| = |c3 c4 c5| x |yg + yg_ioff| + |yg_ooff|
+ * |cb|   |c6 c7 c8|   |cb + cr_ioff|   |cb_ooff|
+ */
+#define SPCSCYGOFF(sprite)	(VLV_DISPLAY_BASE + 0x6d900 + (sprite) * 0x1000)
+#define SPCSCCBOFF(sprite)	(VLV_DISPLAY_BASE + 0x6d904 + (sprite) * 0x1000)
+#define SPCSCCROFF(sprite)	(VLV_DISPLAY_BASE + 0x6d908 + (sprite) * 0x1000)
+#define  SPCSC_OOFF(x)		(((x) & 0x7ff) << 16) /* s11 */
+#define  SPCSC_IOFF(x)		(((x) & 0x7ff) << 0) /* s11 */
+
+#define SPCSCC01(sprite)	(VLV_DISPLAY_BASE + 0x6d90c + (sprite) * 0x1000)
+#define SPCSCC23(sprite)	(VLV_DISPLAY_BASE + 0x6d910 + (sprite) * 0x1000)
+#define SPCSCC45(sprite)	(VLV_DISPLAY_BASE + 0x6d914 + (sprite) * 0x1000)
+#define SPCSCC67(sprite)	(VLV_DISPLAY_BASE + 0x6d918 + (sprite) * 0x1000)
+#define SPCSCC8(sprite)		(VLV_DISPLAY_BASE + 0x6d91c + (sprite) * 0x1000)
+#define  SPCSC_C1(x)		(((x) & 0x7fff) << 16) /* s3.12 */
+#define  SPCSC_C0(x)		(((x) & 0x7fff) << 0) /* s3.12 */
+
+#define SPCSCYGICLAMP(sprite)	(VLV_DISPLAY_BASE + 0x6d920 + (sprite) * 0x1000)
+#define SPCSCCBICLAMP(sprite)	(VLV_DISPLAY_BASE + 0x6d924 + (sprite) * 0x1000)
+#define SPCSCCRICLAMP(sprite)	(VLV_DISPLAY_BASE + 0x6d928 + (sprite) * 0x1000)
+#define  SPCSC_IMAX(x)		(((x) & 0x7ff) << 16) /* s11 */
+#define  SPCSC_IMIN(x)		(((x) & 0x7ff) << 0) /* s11 */
+
+#define SPCSCYGOCLAMP(sprite)	(VLV_DISPLAY_BASE + 0x6d92c + (sprite) * 0x1000)
+#define SPCSCCBOCLAMP(sprite)	(VLV_DISPLAY_BASE + 0x6d930 + (sprite) * 0x1000)
+#define SPCSCCROCLAMP(sprite)	(VLV_DISPLAY_BASE + 0x6d934 + (sprite) * 0x1000)
+#define  SPCSC_OMAX(x)		((x) << 16) /* u10 */
+#define  SPCSC_OMIN(x)		((x) << 0) /* u10 */
+
+/* Skylake plane registers */
+
+#define _PLANE_CTL_1_A				0x70180
+#define _PLANE_CTL_2_A				0x70280
+#define _PLANE_CTL_3_A				0x70380
+#define   PLANE_CTL_ENABLE			(1 << 31)
+#define   PLANE_CTL_PIPE_GAMMA_ENABLE		(1 << 30)
+#define   PLANE_CTL_FORMAT_MASK			(0xf << 24)
+#define   PLANE_CTL_FORMAT_YUV422		(  0 << 24)
+#define   PLANE_CTL_FORMAT_NV12			(  1 << 24)
+#define   PLANE_CTL_FORMAT_XRGB_2101010		(  2 << 24)
+#define   PLANE_CTL_FORMAT_XRGB_8888		(  4 << 24)
+#define   PLANE_CTL_FORMAT_XRGB_16161616F	(  6 << 24)
+#define   PLANE_CTL_FORMAT_AYUV			(  8 << 24)
+#define   PLANE_CTL_FORMAT_INDEXED		( 12 << 24)
+#define   PLANE_CTL_FORMAT_RGB_565		( 14 << 24)
+#define   PLANE_CTL_PIPE_CSC_ENABLE		(1 << 23)
+#define   PLANE_CTL_KEY_ENABLE_MASK		(0x3 << 21)
+#define   PLANE_CTL_KEY_ENABLE_SOURCE		(  1 << 21)
+#define   PLANE_CTL_KEY_ENABLE_DESTINATION	(  2 << 21)
+#define   PLANE_CTL_ORDER_BGRX			(0 << 20)
+#define   PLANE_CTL_ORDER_RGBX			(1 << 20)
+#define   PLANE_CTL_YUV422_ORDER_MASK		(0x3 << 16)
+#define   PLANE_CTL_YUV422_YUYV			(  0 << 16)
+#define   PLANE_CTL_YUV422_UYVY			(  1 << 16)
+#define   PLANE_CTL_YUV422_YVYU			(  2 << 16)
+#define   PLANE_CTL_YUV422_VYUY			(  3 << 16)
+#define   PLANE_CTL_DECOMPRESSION_ENABLE	(1 << 15)
+#define   PLANE_CTL_TRICKLE_FEED_DISABLE	(1 << 14)
+#define   PLANE_CTL_PLANE_GAMMA_DISABLE		(1 << 13)
+#define   PLANE_CTL_TILED_MASK			(0x7 << 10)
+#define   PLANE_CTL_TILED_LINEAR		(  0 << 10)
+#define   PLANE_CTL_TILED_X			(  1 << 10)
+#define   PLANE_CTL_TILED_Y			(  4 << 10)
+#define   PLANE_CTL_TILED_YF			(  5 << 10)
+#define   PLANE_CTL_ALPHA_MASK			(0x3 << 4)
+#define   PLANE_CTL_ALPHA_DISABLE		(  0 << 4)
+#define   PLANE_CTL_ALPHA_SW_PREMULTIPLY	(  2 << 4)
+#define   PLANE_CTL_ALPHA_HW_PREMULTIPLY	(  3 << 4)
+#define   PLANE_CTL_ROTATE_MASK			0x3
+#define   PLANE_CTL_ROTATE_0			0x0
+#define   PLANE_CTL_ROTATE_90			0x1
+#define   PLANE_CTL_ROTATE_180			0x2
+#define   PLANE_CTL_ROTATE_270			0x3
+#define _PLANE_STRIDE_1_A			0x70188
+#define _PLANE_STRIDE_2_A			0x70288
+#define _PLANE_STRIDE_3_A			0x70388
+#define _PLANE_POS_1_A				0x7018c
+#define _PLANE_POS_2_A				0x7028c
+#define _PLANE_POS_3_A				0x7038c
+#define _PLANE_SIZE_1_A				0x70190
+#define _PLANE_SIZE_2_A				0x70290
+#define _PLANE_SIZE_3_A				0x70390
+#define _PLANE_SURF_1_A				0x7019c
+#define _PLANE_SURF_2_A				0x7029c
+#define _PLANE_SURF_3_A				0x7039c
+#define _PLANE_OFFSET_1_A			0x701a4
+#define _PLANE_OFFSET_2_A			0x702a4
+#define _PLANE_OFFSET_3_A			0x703a4
+#define _PLANE_KEYVAL_1_A			0x70194
+#define _PLANE_KEYVAL_2_A			0x70294
+#define _PLANE_KEYMSK_1_A			0x70198
+#define _PLANE_KEYMSK_2_A			0x70298
+#define _PLANE_KEYMAX_1_A			0x701a0
+#define _PLANE_KEYMAX_2_A			0x702a0
+#define _PLANE_BUF_CFG_1_A			0x7027c
+#define _PLANE_BUF_CFG_2_A			0x7037c
+#define _PLANE_NV12_BUF_CFG_1_A		0x70278
+#define _PLANE_NV12_BUF_CFG_2_A		0x70378
+
+#define _PLANE_CTL_1_B				0x71180
+#define _PLANE_CTL_2_B				0x71280
+#define _PLANE_CTL_3_B				0x71380
+#define _PLANE_CTL_1(pipe)	_PIPE(pipe, _PLANE_CTL_1_A, _PLANE_CTL_1_B)
+#define _PLANE_CTL_2(pipe)	_PIPE(pipe, _PLANE_CTL_2_A, _PLANE_CTL_2_B)
+#define _PLANE_CTL_3(pipe)	_PIPE(pipe, _PLANE_CTL_3_A, _PLANE_CTL_3_B)
+#define PLANE_CTL(pipe, plane)	\
+	_PLANE(plane, _PLANE_CTL_1(pipe), _PLANE_CTL_2(pipe))
+
+#define _PLANE_STRIDE_1_B			0x71188
+#define _PLANE_STRIDE_2_B			0x71288
+#define _PLANE_STRIDE_3_B			0x71388
+#define _PLANE_STRIDE_1(pipe)	\
+	_PIPE(pipe, _PLANE_STRIDE_1_A, _PLANE_STRIDE_1_B)
+#define _PLANE_STRIDE_2(pipe)	\
+	_PIPE(pipe, _PLANE_STRIDE_2_A, _PLANE_STRIDE_2_B)
+#define _PLANE_STRIDE_3(pipe)	\
+	_PIPE(pipe, _PLANE_STRIDE_3_A, _PLANE_STRIDE_3_B)
+#define PLANE_STRIDE(pipe, plane)	\
+	_PLANE(plane, _PLANE_STRIDE_1(pipe), _PLANE_STRIDE_2(pipe))
+
+#define _PLANE_POS_1_B				0x7118c
+#define _PLANE_POS_2_B				0x7128c
+#define _PLANE_POS_3_B				0x7138c
+#define _PLANE_POS_1(pipe)	_PIPE(pipe, _PLANE_POS_1_A, _PLANE_POS_1_B)
+#define _PLANE_POS_2(pipe)	_PIPE(pipe, _PLANE_POS_2_A, _PLANE_POS_2_B)
+#define _PLANE_POS_3(pipe)	_PIPE(pipe, _PLANE_POS_3_A, _PLANE_POS_3_B)
+#define PLANE_POS(pipe, plane)	\
+	_PLANE(plane, _PLANE_POS_1(pipe), _PLANE_POS_2(pipe))
+
+#define _PLANE_SIZE_1_B				0x71190
+#define _PLANE_SIZE_2_B				0x71290
+#define _PLANE_SIZE_3_B				0x71390
+#define _PLANE_SIZE_1(pipe)	_PIPE(pipe, _PLANE_SIZE_1_A, _PLANE_SIZE_1_B)
+#define _PLANE_SIZE_2(pipe)	_PIPE(pipe, _PLANE_SIZE_2_A, _PLANE_SIZE_2_B)
+#define _PLANE_SIZE_3(pipe)	_PIPE(pipe, _PLANE_SIZE_3_A, _PLANE_SIZE_3_B)
+#define PLANE_SIZE(pipe, plane)	\
+	_PLANE(plane, _PLANE_SIZE_1(pipe), _PLANE_SIZE_2(pipe))
+
+#define _PLANE_SURF_1_B				0x7119c
+#define _PLANE_SURF_2_B				0x7129c
+#define _PLANE_SURF_3_B				0x7139c
+#define _PLANE_SURF_1(pipe)	_PIPE(pipe, _PLANE_SURF_1_A, _PLANE_SURF_1_B)
+#define _PLANE_SURF_2(pipe)	_PIPE(pipe, _PLANE_SURF_2_A, _PLANE_SURF_2_B)
+#define _PLANE_SURF_3(pipe)	_PIPE(pipe, _PLANE_SURF_3_A, _PLANE_SURF_3_B)
+#define PLANE_SURF(pipe, plane)	\
+	_PLANE(plane, _PLANE_SURF_1(pipe), _PLANE_SURF_2(pipe))
+
+#define _PLANE_OFFSET_1_B			0x711a4
+#define _PLANE_OFFSET_2_B			0x712a4
+#define _PLANE_OFFSET_1(pipe) _PIPE(pipe, _PLANE_OFFSET_1_A, _PLANE_OFFSET_1_B)
+#define _PLANE_OFFSET_2(pipe) _PIPE(pipe, _PLANE_OFFSET_2_A, _PLANE_OFFSET_2_B)
+#define PLANE_OFFSET(pipe, plane)	\
+	_PLANE(plane, _PLANE_OFFSET_1(pipe), _PLANE_OFFSET_2(pipe))
+
+#define _PLANE_KEYVAL_1_B			0x71194
+#define _PLANE_KEYVAL_2_B			0x71294
+#define _PLANE_KEYVAL_1(pipe) _PIPE(pipe, _PLANE_KEYVAL_1_A, _PLANE_KEYVAL_1_B)
+#define _PLANE_KEYVAL_2(pipe) _PIPE(pipe, _PLANE_KEYVAL_2_A, _PLANE_KEYVAL_2_B)
+#define PLANE_KEYVAL(pipe, plane)	\
+	_PLANE(plane, _PLANE_KEYVAL_1(pipe), _PLANE_KEYVAL_2(pipe))
+
+#define _PLANE_KEYMSK_1_B			0x71198
+#define _PLANE_KEYMSK_2_B			0x71298
+#define _PLANE_KEYMSK_1(pipe) _PIPE(pipe, _PLANE_KEYMSK_1_A, _PLANE_KEYMSK_1_B)
+#define _PLANE_KEYMSK_2(pipe) _PIPE(pipe, _PLANE_KEYMSK_2_A, _PLANE_KEYMSK_2_B)
+#define PLANE_KEYMSK(pipe, plane)	\
+	_PLANE(plane, _PLANE_KEYMSK_1(pipe), _PLANE_KEYMSK_2(pipe))
+
+#define _PLANE_KEYMAX_1_B			0x711a0
+#define _PLANE_KEYMAX_2_B			0x712a0
+#define _PLANE_KEYMAX_1(pipe) _PIPE(pipe, _PLANE_KEYMAX_1_A, _PLANE_KEYMAX_1_B)
+#define _PLANE_KEYMAX_2(pipe) _PIPE(pipe, _PLANE_KEYMAX_2_A, _PLANE_KEYMAX_2_B)
+#define PLANE_KEYMAX(pipe, plane)	\
+	_PLANE(plane, _PLANE_KEYMAX_1(pipe), _PLANE_KEYMAX_2(pipe))
+
+#define _PLANE_BUF_CFG_1_B			0x7127c
+#define _PLANE_BUF_CFG_2_B			0x7137c
+#define _PLANE_BUF_CFG_1(pipe)	\
+	_PIPE(pipe, _PLANE_BUF_CFG_1_A, _PLANE_BUF_CFG_1_B)
+#define _PLANE_BUF_CFG_2(pipe)	\
+	_PIPE(pipe, _PLANE_BUF_CFG_2_A, _PLANE_BUF_CFG_2_B)
+#define PLANE_BUF_CFG(pipe, plane)	\
+	_PLANE(plane, _PLANE_BUF_CFG_1(pipe), _PLANE_BUF_CFG_2(pipe))
+
+#define _PLANE_NV12_BUF_CFG_1_B		0x71278
+#define _PLANE_NV12_BUF_CFG_2_B		0x71378
+#define _PLANE_NV12_BUF_CFG_1(pipe)	\
+	_PIPE(pipe, _PLANE_NV12_BUF_CFG_1_A, _PLANE_NV12_BUF_CFG_1_B)
+#define _PLANE_NV12_BUF_CFG_2(pipe)	\
+	_PIPE(pipe, _PLANE_NV12_BUF_CFG_2_A, _PLANE_NV12_BUF_CFG_2_B)
+#define PLANE_NV12_BUF_CFG(pipe, plane)	\
+	_PLANE(plane, _PLANE_NV12_BUF_CFG_1(pipe), _PLANE_NV12_BUF_CFG_2(pipe))
+
+/* SKL new cursor registers */
+#define _CUR_BUF_CFG_A				0x7017c
+#define _CUR_BUF_CFG_B				0x7117c
+#define CUR_BUF_CFG(pipe)	_PIPE(pipe, _CUR_BUF_CFG_A, _CUR_BUF_CFG_B)
+
+/* VBIOS regs */
+#define VGACNTRL		0x71400
+# define VGA_DISP_DISABLE			(1 << 31)
+# define VGA_2X_MODE				(1 << 30)
+# define VGA_PIPE_B_SELECT			(1 << 29)
+
+#define VLV_VGACNTRL		(VLV_DISPLAY_BASE + 0x71400)
+
+/* Ironlake */
+
+#define CPU_VGACNTRL	0x41000
+
+#define DIGITAL_PORT_HOTPLUG_CNTRL	0x44030
+#define  DIGITAL_PORTA_HOTPLUG_ENABLE		(1 << 4)
+#define  DIGITAL_PORTA_PULSE_DURATION_2ms	(0 << 2) /* pre-HSW */
+#define  DIGITAL_PORTA_PULSE_DURATION_4_5ms	(1 << 2) /* pre-HSW */
+#define  DIGITAL_PORTA_PULSE_DURATION_6ms	(2 << 2) /* pre-HSW */
+#define  DIGITAL_PORTA_PULSE_DURATION_100ms	(3 << 2) /* pre-HSW */
+#define  DIGITAL_PORTA_PULSE_DURATION_MASK	(3 << 2) /* pre-HSW */
+#define  DIGITAL_PORTA_HOTPLUG_STATUS_MASK	(3 << 0)
+#define  DIGITAL_PORTA_HOTPLUG_NO_DETECT	(0 << 0)
+#define  DIGITAL_PORTA_HOTPLUG_SHORT_DETECT	(1 << 0)
+#define  DIGITAL_PORTA_HOTPLUG_LONG_DETECT	(2 << 0)
+
+/* refresh rate hardware control */
+#define RR_HW_CTL       0x45300
+#define  RR_HW_LOW_POWER_FRAMES_MASK    0xff
+#define  RR_HW_HIGH_POWER_FRAMES_MASK   0xff00
+
+#define FDI_PLL_BIOS_0  0x46000
+#define  FDI_PLL_FB_CLOCK_MASK  0xff
+#define FDI_PLL_BIOS_1  0x46004
+#define FDI_PLL_BIOS_2  0x46008
+#define DISPLAY_PORT_PLL_BIOS_0         0x4600c
+#define DISPLAY_PORT_PLL_BIOS_1         0x46010
+#define DISPLAY_PORT_PLL_BIOS_2         0x46014
+
+#define PCH_3DCGDIS0		0x46020
+# define MARIUNIT_CLOCK_GATE_DISABLE		(1 << 18)
+# define SVSMUNIT_CLOCK_GATE_DISABLE		(1 << 1)
+
+#define PCH_3DCGDIS1		0x46024
+# define VFMUNIT_CLOCK_GATE_DISABLE		(1 << 11)
+
+#define FDI_PLL_FREQ_CTL        0x46030
+#define  FDI_PLL_FREQ_CHANGE_REQUEST    (1<<24)
+#define  FDI_PLL_FREQ_LOCK_LIMIT_MASK   0xfff00
+#define  FDI_PLL_FREQ_DISABLE_COUNT_LIMIT_MASK  0xff
+
+
+#define _PIPEA_DATA_M1		0x60030
+#define  PIPE_DATA_M1_OFFSET    0
+#define _PIPEA_DATA_N1		0x60034
+#define  PIPE_DATA_N1_OFFSET    0
+
+#define _PIPEA_DATA_M2		0x60038
+#define  PIPE_DATA_M2_OFFSET    0
+#define _PIPEA_DATA_N2		0x6003c
+#define  PIPE_DATA_N2_OFFSET    0
+
+#define _PIPEA_LINK_M1		0x60040
+#define  PIPE_LINK_M1_OFFSET    0
+#define _PIPEA_LINK_N1		0x60044
+#define  PIPE_LINK_N1_OFFSET    0
+
+#define _PIPEA_LINK_M2		0x60048
+#define  PIPE_LINK_M2_OFFSET    0
+#define _PIPEA_LINK_N2		0x6004c
+#define  PIPE_LINK_N2_OFFSET    0
+
+/* PIPEB timing regs are same start from 0x61000 */
+
+#define _PIPEB_DATA_M1		0x61030
+#define _PIPEB_DATA_N1		0x61034
+#define _PIPEB_DATA_M2		0x61038
+#define _PIPEB_DATA_N2		0x6103c
+#define _PIPEB_LINK_M1		0x61040
+#define _PIPEB_LINK_N1		0x61044
+#define _PIPEB_LINK_M2		0x61048
+#define _PIPEB_LINK_N2		0x6104c
+
+#define PIPE_DATA_M1(tran) _TRANSCODER2(tran, _PIPEA_DATA_M1)
+#define PIPE_DATA_N1(tran) _TRANSCODER2(tran, _PIPEA_DATA_N1)
+#define PIPE_DATA_M2(tran) _TRANSCODER2(tran, _PIPEA_DATA_M2)
+#define PIPE_DATA_N2(tran) _TRANSCODER2(tran, _PIPEA_DATA_N2)
+#define PIPE_LINK_M1(tran) _TRANSCODER2(tran, _PIPEA_LINK_M1)
+#define PIPE_LINK_N1(tran) _TRANSCODER2(tran, _PIPEA_LINK_N1)
+#define PIPE_LINK_M2(tran) _TRANSCODER2(tran, _PIPEA_LINK_M2)
+#define PIPE_LINK_N2(tran) _TRANSCODER2(tran, _PIPEA_LINK_N2)
+
+/* CPU panel fitter */
+/* IVB+ has 3 fitters, 0 is 7x5 capable, the other two only 3x3 */
+#define _PFA_CTL_1               0x68080
+#define _PFB_CTL_1               0x68880
+#define  PF_ENABLE              (1<<31)
+#define  PF_PIPE_SEL_MASK_IVB	(3<<29)
+#define  PF_PIPE_SEL_IVB(pipe)	((pipe)<<29)
+#define  PF_FILTER_MASK		(3<<23)
+#define  PF_FILTER_PROGRAMMED	(0<<23)
+#define  PF_FILTER_MED_3x3	(1<<23)
+#define  PF_FILTER_EDGE_ENHANCE	(2<<23)
+#define  PF_FILTER_EDGE_SOFTEN	(3<<23)
+#define _PFA_WIN_SZ		0x68074
+#define _PFB_WIN_SZ		0x68874
+#define _PFA_WIN_POS		0x68070
+#define _PFB_WIN_POS		0x68870
+#define _PFA_VSCALE		0x68084
+#define _PFB_VSCALE		0x68884
+#define _PFA_HSCALE		0x68090
+#define _PFB_HSCALE		0x68890
+
+#define PF_CTL(pipe)		_PIPE(pipe, _PFA_CTL_1, _PFB_CTL_1)
+#define PF_WIN_SZ(pipe)		_PIPE(pipe, _PFA_WIN_SZ, _PFB_WIN_SZ)
+#define PF_WIN_POS(pipe)	_PIPE(pipe, _PFA_WIN_POS, _PFB_WIN_POS)
+#define PF_VSCALE(pipe)		_PIPE(pipe, _PFA_VSCALE, _PFB_VSCALE)
+#define PF_HSCALE(pipe)		_PIPE(pipe, _PFA_HSCALE, _PFB_HSCALE)
+
+#define _PSA_CTL		0x68180
+#define _PSB_CTL		0x68980
+#define PS_ENABLE		(1<<31)
+#define _PSA_WIN_SZ		0x68174
+#define _PSB_WIN_SZ		0x68974
+#define _PSA_WIN_POS		0x68170
+#define _PSB_WIN_POS		0x68970
+
+#define PS_CTL(pipe)		_PIPE(pipe, _PSA_CTL, _PSB_CTL)
+#define PS_WIN_SZ(pipe)		_PIPE(pipe, _PSA_WIN_SZ, _PSB_WIN_SZ)
+#define PS_WIN_POS(pipe)	_PIPE(pipe, _PSA_WIN_POS, _PSB_WIN_POS)
+
+/*
+ * Skylake scalers
+ */
+#define _PS_1A_CTRL      0x68180
+#define _PS_2A_CTRL      0x68280
+#define _PS_1B_CTRL      0x68980
+#define _PS_2B_CTRL      0x68A80
+#define _PS_1C_CTRL      0x69180
+#define PS_SCALER_EN        (1 << 31)
+#define PS_SCALER_MODE_MASK (3 << 28)
+#define PS_SCALER_MODE_DYN  (0 << 28)
+#define PS_SCALER_MODE_HQ  (1 << 28)
+#define PS_PLANE_SEL_MASK  (7 << 25)
+#define PS_PLANE_SEL(plane) (((plane) + 1) << 25)
+#define PS_FILTER_MASK         (3 << 23)
+#define PS_FILTER_MEDIUM       (0 << 23)
+#define PS_FILTER_EDGE_ENHANCE (2 << 23)
+#define PS_FILTER_BILINEAR     (3 << 23)
+#define PS_VERT3TAP            (1 << 21)
+#define PS_VERT_INT_INVERT_FIELD1 (0 << 20)
+#define PS_VERT_INT_INVERT_FIELD0 (1 << 20)
+#define PS_PWRUP_PROGRESS         (1 << 17)
+#define PS_V_FILTER_BYPASS        (1 << 8)
+#define PS_VADAPT_EN              (1 << 7)
+#define PS_VADAPT_MODE_MASK        (3 << 5)
+#define PS_VADAPT_MODE_LEAST_ADAPT (0 << 5)
+#define PS_VADAPT_MODE_MOD_ADAPT   (1 << 5)
+#define PS_VADAPT_MODE_MOST_ADAPT  (3 << 5)
+
+#define _PS_PWR_GATE_1A     0x68160
+#define _PS_PWR_GATE_2A     0x68260
+#define _PS_PWR_GATE_1B     0x68960
+#define _PS_PWR_GATE_2B     0x68A60
+#define _PS_PWR_GATE_1C     0x69160
+#define PS_PWR_GATE_DIS_OVERRIDE       (1 << 31)
+#define PS_PWR_GATE_SETTLING_TIME_32   (0 << 3)
+#define PS_PWR_GATE_SETTLING_TIME_64   (1 << 3)
+#define PS_PWR_GATE_SETTLING_TIME_96   (2 << 3)
+#define PS_PWR_GATE_SETTLING_TIME_128  (3 << 3)
+#define PS_PWR_GATE_SLPEN_8             0
+#define PS_PWR_GATE_SLPEN_16            1
+#define PS_PWR_GATE_SLPEN_24            2
+#define PS_PWR_GATE_SLPEN_32            3
+
+#define _PS_WIN_POS_1A      0x68170
+#define _PS_WIN_POS_2A      0x68270
+#define _PS_WIN_POS_1B      0x68970
+#define _PS_WIN_POS_2B      0x68A70
+#define _PS_WIN_POS_1C      0x69170
+
+#define _PS_WIN_SZ_1A       0x68174
+#define _PS_WIN_SZ_2A       0x68274
+#define _PS_WIN_SZ_1B       0x68974
+#define _PS_WIN_SZ_2B       0x68A74
+#define _PS_WIN_SZ_1C       0x69174
+
+#define _PS_VSCALE_1A       0x68184
+#define _PS_VSCALE_2A       0x68284
+#define _PS_VSCALE_1B       0x68984
+#define _PS_VSCALE_2B       0x68A84
+#define _PS_VSCALE_1C       0x69184
+
+#define _PS_HSCALE_1A       0x68190
+#define _PS_HSCALE_2A       0x68290
+#define _PS_HSCALE_1B       0x68990
+#define _PS_HSCALE_2B       0x68A90
+#define _PS_HSCALE_1C       0x69190
+
+#define _PS_VPHASE_1A       0x68188
+#define _PS_VPHASE_2A       0x68288
+#define _PS_VPHASE_1B       0x68988
+#define _PS_VPHASE_2B       0x68A88
+#define _PS_VPHASE_1C       0x69188
+
+#define _PS_HPHASE_1A       0x68194
+#define _PS_HPHASE_2A       0x68294
+#define _PS_HPHASE_1B       0x68994
+#define _PS_HPHASE_2B       0x68A94
+#define _PS_HPHASE_1C       0x69194
+
+#define _PS_ECC_STAT_1A     0x681D0
+#define _PS_ECC_STAT_2A     0x682D0
+#define _PS_ECC_STAT_1B     0x689D0
+#define _PS_ECC_STAT_2B     0x68AD0
+#define _PS_ECC_STAT_1C     0x691D0
+
+#define _ID(id, a, b) ((a) + (id)*((b)-(a)))
+#define SKL_PS_CTRL(pipe, id) _PIPE(pipe,        \
+			_ID(id, _PS_1A_CTRL, _PS_2A_CTRL),       \
+			_ID(id, _PS_1B_CTRL, _PS_2B_CTRL))
+#define SKL_PS_PWR_GATE(pipe, id) _PIPE(pipe,    \
+			_ID(id, _PS_PWR_GATE_1A, _PS_PWR_GATE_2A), \
+			_ID(id, _PS_PWR_GATE_1B, _PS_PWR_GATE_2B))
+#define SKL_PS_WIN_POS(pipe, id) _PIPE(pipe,     \
+			_ID(id, _PS_WIN_POS_1A, _PS_WIN_POS_2A), \
+			_ID(id, _PS_WIN_POS_1B, _PS_WIN_POS_2B))
+#define SKL_PS_WIN_SZ(pipe, id)  _PIPE(pipe,     \
+			_ID(id, _PS_WIN_SZ_1A, _PS_WIN_SZ_2A),   \
+			_ID(id, _PS_WIN_SZ_1B, _PS_WIN_SZ_2B))
+#define SKL_PS_VSCALE(pipe, id)  _PIPE(pipe,     \
+			_ID(id, _PS_VSCALE_1A, _PS_VSCALE_2A),   \
+			_ID(id, _PS_VSCALE_1B, _PS_VSCALE_2B))
+#define SKL_PS_HSCALE(pipe, id)  _PIPE(pipe,     \
+			_ID(id, _PS_HSCALE_1A, _PS_HSCALE_2A),   \
+			_ID(id, _PS_HSCALE_1B, _PS_HSCALE_2B))
+#define SKL_PS_VPHASE(pipe, id)  _PIPE(pipe,     \
+			_ID(id, _PS_VPHASE_1A, _PS_VPHASE_2A),   \
+			_ID(id, _PS_VPHASE_1B, _PS_VPHASE_2B))
+#define SKL_PS_HPHASE(pipe, id)  _PIPE(pipe,     \
+			_ID(id, _PS_HPHASE_1A, _PS_HPHASE_2A),   \
+			_ID(id, _PS_HPHASE_1B, _PS_HPHASE_2B))
+#define SKL_PS_ECC_STAT(pipe, id)  _PIPE(pipe,     \
+			_ID(id, _PS_ECC_STAT_1A, _PS_ECC_STAT_2A),   \
+			_ID(id, _PS_ECC_STAT_1B, _PS_ECC_STAT_2B)
+
+/* legacy palette */
+#define _LGC_PALETTE_A           0x4a000
+#define _LGC_PALETTE_B           0x4a800
+#define LGC_PALETTE(pipe, i) (_PIPE(pipe, _LGC_PALETTE_A, _LGC_PALETTE_B) + (i) * 4)
+
+#define _GAMMA_MODE_A		0x4a480
+#define _GAMMA_MODE_B		0x4ac80
+#define GAMMA_MODE(pipe) _PIPE(pipe, _GAMMA_MODE_A, _GAMMA_MODE_B)
+#define GAMMA_MODE_MODE_MASK	(3 << 0)
+#define GAMMA_MODE_MODE_8BIT	(0 << 0)
+#define GAMMA_MODE_MODE_10BIT	(1 << 0)
+#define GAMMA_MODE_MODE_12BIT	(2 << 0)
+#define GAMMA_MODE_MODE_SPLIT	(3 << 0)
+
+/* interrupts */
+#define DE_MASTER_IRQ_CONTROL   (1 << 31)
+#define DE_SPRITEB_FLIP_DONE    (1 << 29)
+#define DE_SPRITEA_FLIP_DONE    (1 << 28)
+#define DE_PLANEB_FLIP_DONE     (1 << 27)
+#define DE_PLANEA_FLIP_DONE     (1 << 26)
+#define DE_PLANE_FLIP_DONE(plane) (1 << (26 + (plane)))
+#define DE_PCU_EVENT            (1 << 25)
+#define DE_GTT_FAULT            (1 << 24)
+#define DE_POISON               (1 << 23)
+#define DE_PERFORM_COUNTER      (1 << 22)
+#define DE_PCH_EVENT            (1 << 21)
+#define DE_AUX_CHANNEL_A        (1 << 20)
+#define DE_DP_A_HOTPLUG         (1 << 19)
+#define DE_GSE                  (1 << 18)
+#define DE_PIPEB_VBLANK         (1 << 15)
+#define DE_PIPEB_EVEN_FIELD     (1 << 14)
+#define DE_PIPEB_ODD_FIELD      (1 << 13)
+#define DE_PIPEB_LINE_COMPARE   (1 << 12)
+#define DE_PIPEB_VSYNC          (1 << 11)
+#define DE_PIPEB_CRC_DONE	(1 << 10)
+#define DE_PIPEB_FIFO_UNDERRUN  (1 << 8)
+#define DE_PIPEA_VBLANK         (1 << 7)
+#define DE_PIPE_VBLANK(pipe)    (1 << (7 + 8*(pipe)))
+#define DE_PIPEA_EVEN_FIELD     (1 << 6)
+#define DE_PIPEA_ODD_FIELD      (1 << 5)
+#define DE_PIPEA_LINE_COMPARE   (1 << 4)
+#define DE_PIPEA_VSYNC          (1 << 3)
+#define DE_PIPEA_CRC_DONE	(1 << 2)
+#define DE_PIPE_CRC_DONE(pipe)	(1 << (2 + 8*(pipe)))
+#define DE_PIPEA_FIFO_UNDERRUN  (1 << 0)
+#define DE_PIPE_FIFO_UNDERRUN(pipe)  (1 << (8*(pipe)))
+
+/* More Ivybridge lolz */
+#define DE_ERR_INT_IVB			(1<<30)
+#define DE_GSE_IVB			(1<<29)
+#define DE_PCH_EVENT_IVB		(1<<28)
+#define DE_DP_A_HOTPLUG_IVB		(1<<27)
+#define DE_AUX_CHANNEL_A_IVB		(1<<26)
+#define DE_SPRITEC_FLIP_DONE_IVB	(1<<14)
+#define DE_PLANEC_FLIP_DONE_IVB		(1<<13)
+#define DE_PIPEC_VBLANK_IVB		(1<<10)
+#define DE_SPRITEB_FLIP_DONE_IVB	(1<<9)
+#define DE_PLANEB_FLIP_DONE_IVB		(1<<8)
+#define DE_PIPEB_VBLANK_IVB		(1<<5)
+#define DE_SPRITEA_FLIP_DONE_IVB	(1<<4)
+#define DE_PLANEA_FLIP_DONE_IVB		(1<<3)
+#define DE_PLANE_FLIP_DONE_IVB(plane)	(1<< (3 + 5*(plane)))
+#define DE_PIPEA_VBLANK_IVB		(1<<0)
+#define DE_PIPE_VBLANK_IVB(pipe)	(1 << ((pipe) * 5))
+
+#define VLV_MASTER_IER			0x4400c /* Gunit master IER */
+#define   MASTER_INTERRUPT_ENABLE	(1<<31)
+
+#define DEISR   0x44000
+#define DEIMR   0x44004
+#define DEIIR   0x44008
+#define DEIER   0x4400c
+
+#define GTISR   0x44010
+#define GTIMR   0x44014
+#define GTIIR   0x44018
+#define GTIER   0x4401c
+
+#define GEN8_MASTER_IRQ			0x44200
+#define  GEN8_MASTER_IRQ_CONTROL	(1<<31)
+#define  GEN8_PCU_IRQ			(1<<30)
+#define  GEN8_DE_PCH_IRQ		(1<<23)
+#define  GEN8_DE_MISC_IRQ		(1<<22)
+#define  GEN8_DE_PORT_IRQ		(1<<20)
+#define  GEN8_DE_PIPE_C_IRQ		(1<<18)
+#define  GEN8_DE_PIPE_B_IRQ		(1<<17)
+#define  GEN8_DE_PIPE_A_IRQ		(1<<16)
+#define  GEN8_DE_PIPE_IRQ(pipe)		(1<<(16+(pipe)))
+#define  GEN8_GT_VECS_IRQ		(1<<6)
+#define  GEN8_GT_PM_IRQ			(1<<4)
+#define  GEN8_GT_VCS2_IRQ		(1<<3)
+#define  GEN8_GT_VCS1_IRQ		(1<<2)
+#define  GEN8_GT_BCS_IRQ		(1<<1)
+#define  GEN8_GT_RCS_IRQ		(1<<0)
+
+#define GEN8_GT_ISR(which) (0x44300 + (0x10 * (which)))
+#define GEN8_GT_IMR(which) (0x44304 + (0x10 * (which)))
+#define GEN8_GT_IIR(which) (0x44308 + (0x10 * (which)))
+#define GEN8_GT_IER(which) (0x4430c + (0x10 * (which)))
+
+#define GEN8_RCS_IRQ_SHIFT 0
+#define GEN8_BCS_IRQ_SHIFT 16
+#define GEN8_VCS1_IRQ_SHIFT 0
+#define GEN8_VCS2_IRQ_SHIFT 16
+#define GEN8_VECS_IRQ_SHIFT 0
+#define GEN8_WD_IRQ_SHIFT 16
+
+#define GEN8_DE_PIPE_ISR(pipe) (0x44400 + (0x10 * (pipe)))
+#define GEN8_DE_PIPE_IMR(pipe) (0x44404 + (0x10 * (pipe)))
+#define GEN8_DE_PIPE_IIR(pipe) (0x44408 + (0x10 * (pipe)))
+#define GEN8_DE_PIPE_IER(pipe) (0x4440c + (0x10 * (pipe)))
+#define  GEN8_PIPE_FIFO_UNDERRUN	(1 << 31)
+#define  GEN8_PIPE_CDCLK_CRC_ERROR	(1 << 29)
+#define  GEN8_PIPE_CDCLK_CRC_DONE	(1 << 28)
+#define  GEN8_PIPE_CURSOR_FAULT		(1 << 10)
+#define  GEN8_PIPE_SPRITE_FAULT		(1 << 9)
+#define  GEN8_PIPE_PRIMARY_FAULT	(1 << 8)
+#define  GEN8_PIPE_SPRITE_FLIP_DONE	(1 << 5)
+#define  GEN8_PIPE_PRIMARY_FLIP_DONE	(1 << 4)
+#define  GEN8_PIPE_SCAN_LINE_EVENT	(1 << 2)
+#define  GEN8_PIPE_VSYNC		(1 << 1)
+#define  GEN8_PIPE_VBLANK		(1 << 0)
+#define  GEN9_PIPE_CURSOR_FAULT		(1 << 11)
+#define  GEN9_PIPE_PLANE4_FAULT		(1 << 10)
+#define  GEN9_PIPE_PLANE3_FAULT		(1 << 9)
+#define  GEN9_PIPE_PLANE2_FAULT		(1 << 8)
+#define  GEN9_PIPE_PLANE1_FAULT		(1 << 7)
+#define  GEN9_PIPE_PLANE4_FLIP_DONE	(1 << 6)
+#define  GEN9_PIPE_PLANE3_FLIP_DONE	(1 << 5)
+#define  GEN9_PIPE_PLANE2_FLIP_DONE	(1 << 4)
+#define  GEN9_PIPE_PLANE1_FLIP_DONE	(1 << 3)
+#define  GEN9_PIPE_PLANE_FLIP_DONE(p)	(1 << (3 + (p)))
+#define GEN8_DE_PIPE_IRQ_FAULT_ERRORS \
+	(GEN8_PIPE_CURSOR_FAULT | \
+	 GEN8_PIPE_SPRITE_FAULT | \
+	 GEN8_PIPE_PRIMARY_FAULT)
+#define GEN9_DE_PIPE_IRQ_FAULT_ERRORS \
+	(GEN9_PIPE_CURSOR_FAULT | \
+	 GEN9_PIPE_PLANE4_FAULT | \
+	 GEN9_PIPE_PLANE3_FAULT | \
+	 GEN9_PIPE_PLANE2_FAULT | \
+	 GEN9_PIPE_PLANE1_FAULT)
+
+#define GEN8_DE_PORT_ISR 0x44440
+#define GEN8_DE_PORT_IMR 0x44444
+#define GEN8_DE_PORT_IIR 0x44448
+#define GEN8_DE_PORT_IER 0x4444c
+#define  GEN9_AUX_CHANNEL_D		(1 << 27)
+#define  GEN9_AUX_CHANNEL_C		(1 << 26)
+#define  GEN9_AUX_CHANNEL_B		(1 << 25)
+#define  BXT_DE_PORT_HP_DDIC		(1 << 5)
+#define  BXT_DE_PORT_HP_DDIB		(1 << 4)
+#define  BXT_DE_PORT_HP_DDIA		(1 << 3)
+#define  BXT_DE_PORT_HOTPLUG_MASK	(BXT_DE_PORT_HP_DDIA | \
+					 BXT_DE_PORT_HP_DDIB | \
+					 BXT_DE_PORT_HP_DDIC)
+#define  GEN8_PORT_DP_A_HOTPLUG		(1 << 3)
+#define  BXT_DE_PORT_GMBUS		(1 << 1)
+#define  GEN8_AUX_CHANNEL_A		(1 << 0)
+
+#define GEN8_DE_MISC_ISR 0x44460
+#define GEN8_DE_MISC_IMR 0x44464
+#define GEN8_DE_MISC_IIR 0x44468
+#define GEN8_DE_MISC_IER 0x4446c
+#define  GEN8_DE_MISC_GSE		(1 << 27)
+
+#define GEN8_PCU_ISR 0x444e0
+#define GEN8_PCU_IMR 0x444e4
+#define GEN8_PCU_IIR 0x444e8
+#define GEN8_PCU_IER 0x444ec
+
+#define ILK_DISPLAY_CHICKEN2	0x42004
+/* Required on all Ironlake and Sandybridge according to the B-Spec. */
+#define  ILK_ELPIN_409_SELECT	(1 << 25)
+#define  ILK_DPARB_GATE	(1<<22)
+#define  ILK_VSDPFD_FULL	(1<<21)
+#define FUSE_STRAP			0x42014
+#define  ILK_INTERNAL_GRAPHICS_DISABLE	(1 << 31)
+#define  ILK_INTERNAL_DISPLAY_DISABLE	(1 << 30)
+#define  ILK_DISPLAY_DEBUG_DISABLE	(1 << 29)
+#define  ILK_HDCP_DISABLE		(1 << 25)
+#define  ILK_eDP_A_DISABLE		(1 << 24)
+#define  HSW_CDCLK_LIMIT		(1 << 24)
+#define  ILK_DESKTOP			(1 << 23)
+
+#define ILK_DSPCLK_GATE_D			0x42020
+#define   ILK_VRHUNIT_CLOCK_GATE_DISABLE	(1 << 28)
+#define   ILK_DPFCUNIT_CLOCK_GATE_DISABLE	(1 << 9)
+#define   ILK_DPFCRUNIT_CLOCK_GATE_DISABLE	(1 << 8)
+#define   ILK_DPFDUNIT_CLOCK_GATE_ENABLE	(1 << 7)
+#define   ILK_DPARBUNIT_CLOCK_GATE_ENABLE	(1 << 5)
+
+#define IVB_CHICKEN3	0x4200c
+# define CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE	(1 << 5)
+# define CHICKEN3_DGMG_DONE_FIX_DISABLE		(1 << 2)
+
+#define CHICKEN_PAR1_1		0x42080
+#define  DPA_MASK_VBLANK_SRD	(1 << 15)
+#define  FORCE_ARB_IDLE_PLANES	(1 << 14)
+
+#define _CHICKEN_PIPESL_1_A	0x420b0
+#define _CHICKEN_PIPESL_1_B	0x420b4
+#define  HSW_FBCQ_DIS			(1 << 22)
+#define  BDW_DPRS_MASK_VBLANK_SRD	(1 << 0)
+#define CHICKEN_PIPESL_1(pipe) _PIPE(pipe, _CHICKEN_PIPESL_1_A, _CHICKEN_PIPESL_1_B)
+
+#define DISP_ARB_CTL	0x45000
+#define  DISP_TILE_SURFACE_SWIZZLING	(1<<13)
+#define  DISP_FBC_WM_DIS		(1<<15)
+#define DISP_ARB_CTL2	0x45004
+#define  DISP_DATA_PARTITION_5_6	(1<<6)
+#define DBUF_CTL	0x45008
+#define  DBUF_POWER_REQUEST		(1<<31)
+#define  DBUF_POWER_STATE		(1<<30)
+#define GEN7_MSG_CTL	0x45010
+#define  WAIT_FOR_PCH_RESET_ACK		(1<<1)
+#define  WAIT_FOR_PCH_FLR_ACK		(1<<0)
+#define HSW_NDE_RSTWRN_OPT	0x46408
+#define  RESET_PCH_HANDSHAKE_ENABLE	(1<<4)
+
+#define SKL_DFSM			0x51000
+#define SKL_DFSM_CDCLK_LIMIT_MASK	(3 << 23)
+#define SKL_DFSM_CDCLK_LIMIT_675	(0 << 23)
+#define SKL_DFSM_CDCLK_LIMIT_540	(1 << 23)
+#define SKL_DFSM_CDCLK_LIMIT_450	(2 << 23)
+#define SKL_DFSM_CDCLK_LIMIT_337_5	(3 << 23)
+
+#define FF_SLICE_CS_CHICKEN2			0x20e4
+#define  GEN9_TSG_BARRIER_ACK_DISABLE		(1<<8)
+
+/* GEN7 chicken */
+#define GEN7_COMMON_SLICE_CHICKEN1		0x7010
+# define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC	((1<<10) | (1<<26))
+# define GEN9_RHWO_OPTIMIZATION_DISABLE		(1<<14)
+#define COMMON_SLICE_CHICKEN2			0x7014
+# define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE	(1<<0)
+
+#define HIZ_CHICKEN					0x7018
+# define CHV_HZ_8X8_MODE_IN_1X				(1<<15)
+# define BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE	(1<<3)
+
+#define GEN9_SLICE_COMMON_ECO_CHICKEN0		0x7308
+#define  DISABLE_PIXEL_MASK_CAMMING		(1<<14)
+
+#define GEN7_L3SQCREG1				0xB010
+#define  VLV_B0_WA_L3SQCREG1_VALUE		0x00D30000
+
+#define GEN8_L3SQCREG1				0xB100
+#define  BDW_WA_L3SQCREG1_DEFAULT		0x784000
+
+#define GEN7_L3CNTLREG1				0xB01C
+#define  GEN7_WA_FOR_GEN7_L3_CONTROL			0x3C47FF8C
+#define  GEN7_L3AGDIS				(1<<19)
+#define GEN7_L3CNTLREG2				0xB020
+#define GEN7_L3CNTLREG3				0xB024
+
+#define GEN7_L3_CHICKEN_MODE_REGISTER		0xB030
+#define  GEN7_WA_L3_CHICKEN_MODE				0x20000000
+
+#define GEN7_L3SQCREG4				0xb034
+#define  L3SQ_URB_READ_CAM_MATCH_DISABLE	(1<<27)
+
+#define GEN8_L3SQCREG4				0xb118
+#define  GEN8_LQSC_RO_PERF_DIS			(1<<27)
+#define  GEN8_LQSC_FLUSH_COHERENT_LINES		(1<<21)
+
+/* GEN8 chicken */
+#define HDC_CHICKEN0				0x7300
+#define  HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE	(1<<15)
+#define  HDC_FENCE_DEST_SLM_DISABLE		(1<<14)
+#define  HDC_DONOT_FETCH_MEM_WHEN_MASKED	(1<<11)
+#define  HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT	(1<<5)
+#define  HDC_FORCE_NON_COHERENT			(1<<4)
+#define  HDC_BARRIER_PERFORMANCE_DISABLE	(1<<10)
+
+/* GEN9 chicken */
+#define SLICE_ECO_CHICKEN0			0x7308
+#define   PIXEL_MASK_CAMMING_DISABLE		(1 << 14)
+
+/* WaCatErrorRejectionIssue */
+#define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG		0x9030
+#define  GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB	(1<<11)
+
+#define HSW_SCRATCH1				0xb038
+#define  HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE	(1<<27)
+
+#define BDW_SCRATCH1					0xb11c
+#define  GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE	(1<<2)
+
+/* PCH */
+
+/* south display engine interrupt: IBX */
+#define SDE_AUDIO_POWER_D	(1 << 27)
+#define SDE_AUDIO_POWER_C	(1 << 26)
+#define SDE_AUDIO_POWER_B	(1 << 25)
+#define SDE_AUDIO_POWER_SHIFT	(25)
+#define SDE_AUDIO_POWER_MASK	(7 << SDE_AUDIO_POWER_SHIFT)
+#define SDE_GMBUS		(1 << 24)
+#define SDE_AUDIO_HDCP_TRANSB	(1 << 23)
+#define SDE_AUDIO_HDCP_TRANSA	(1 << 22)
+#define SDE_AUDIO_HDCP_MASK	(3 << 22)
+#define SDE_AUDIO_TRANSB	(1 << 21)
+#define SDE_AUDIO_TRANSA	(1 << 20)
+#define SDE_AUDIO_TRANS_MASK	(3 << 20)
+#define SDE_POISON		(1 << 19)
+/* 18 reserved */
+#define SDE_FDI_RXB		(1 << 17)
+#define SDE_FDI_RXA		(1 << 16)
+#define SDE_FDI_MASK		(3 << 16)
+#define SDE_AUXD		(1 << 15)
+#define SDE_AUXC		(1 << 14)
+#define SDE_AUXB		(1 << 13)
+#define SDE_AUX_MASK		(7 << 13)
+/* 12 reserved */
+#define SDE_CRT_HOTPLUG         (1 << 11)
+#define SDE_PORTD_HOTPLUG       (1 << 10)
+#define SDE_PORTC_HOTPLUG       (1 << 9)
+#define SDE_PORTB_HOTPLUG       (1 << 8)
+#define SDE_SDVOB_HOTPLUG       (1 << 6)
+#define SDE_HOTPLUG_MASK        (SDE_CRT_HOTPLUG | \
+				 SDE_SDVOB_HOTPLUG |	\
+				 SDE_PORTB_HOTPLUG |	\
+				 SDE_PORTC_HOTPLUG |	\
+				 SDE_PORTD_HOTPLUG)
+#define SDE_TRANSB_CRC_DONE	(1 << 5)
+#define SDE_TRANSB_CRC_ERR	(1 << 4)
+#define SDE_TRANSB_FIFO_UNDER	(1 << 3)
+#define SDE_TRANSA_CRC_DONE	(1 << 2)
+#define SDE_TRANSA_CRC_ERR	(1 << 1)
+#define SDE_TRANSA_FIFO_UNDER	(1 << 0)
+#define SDE_TRANS_MASK		(0x3f)
+
+/* south display engine interrupt: CPT/PPT */
+#define SDE_AUDIO_POWER_D_CPT	(1 << 31)
+#define SDE_AUDIO_POWER_C_CPT	(1 << 30)
+#define SDE_AUDIO_POWER_B_CPT	(1 << 29)
+#define SDE_AUDIO_POWER_SHIFT_CPT   29
+#define SDE_AUDIO_POWER_MASK_CPT    (7 << 29)
+#define SDE_AUXD_CPT		(1 << 27)
+#define SDE_AUXC_CPT		(1 << 26)
+#define SDE_AUXB_CPT		(1 << 25)
+#define SDE_AUX_MASK_CPT	(7 << 25)
+#define SDE_PORTE_HOTPLUG_SPT	(1 << 25)
+#define SDE_PORTA_HOTPLUG_SPT	(1 << 24)
+#define SDE_PORTD_HOTPLUG_CPT	(1 << 23)
+#define SDE_PORTC_HOTPLUG_CPT	(1 << 22)
+#define SDE_PORTB_HOTPLUG_CPT	(1 << 21)
+#define SDE_CRT_HOTPLUG_CPT	(1 << 19)
+#define SDE_SDVOB_HOTPLUG_CPT	(1 << 18)
+#define SDE_HOTPLUG_MASK_CPT	(SDE_CRT_HOTPLUG_CPT |		\
+				 SDE_SDVOB_HOTPLUG_CPT |	\
+				 SDE_PORTD_HOTPLUG_CPT |	\
+				 SDE_PORTC_HOTPLUG_CPT |	\
+				 SDE_PORTB_HOTPLUG_CPT)
+#define SDE_HOTPLUG_MASK_SPT	(SDE_PORTE_HOTPLUG_SPT |	\
+				 SDE_PORTD_HOTPLUG_CPT |	\
+				 SDE_PORTC_HOTPLUG_CPT |	\
+				 SDE_PORTB_HOTPLUG_CPT |	\
+				 SDE_PORTA_HOTPLUG_SPT)
+#define SDE_GMBUS_CPT		(1 << 17)
+#define SDE_ERROR_CPT		(1 << 16)
+#define SDE_AUDIO_CP_REQ_C_CPT	(1 << 10)
+#define SDE_AUDIO_CP_CHG_C_CPT	(1 << 9)
+#define SDE_FDI_RXC_CPT		(1 << 8)
+#define SDE_AUDIO_CP_REQ_B_CPT	(1 << 6)
+#define SDE_AUDIO_CP_CHG_B_CPT	(1 << 5)
+#define SDE_FDI_RXB_CPT		(1 << 4)
+#define SDE_AUDIO_CP_REQ_A_CPT	(1 << 2)
+#define SDE_AUDIO_CP_CHG_A_CPT	(1 << 1)
+#define SDE_FDI_RXA_CPT		(1 << 0)
+#define SDE_AUDIO_CP_REQ_CPT	(SDE_AUDIO_CP_REQ_C_CPT | \
+				 SDE_AUDIO_CP_REQ_B_CPT | \
+				 SDE_AUDIO_CP_REQ_A_CPT)
+#define SDE_AUDIO_CP_CHG_CPT	(SDE_AUDIO_CP_CHG_C_CPT | \
+				 SDE_AUDIO_CP_CHG_B_CPT | \
+				 SDE_AUDIO_CP_CHG_A_CPT)
+#define SDE_FDI_MASK_CPT	(SDE_FDI_RXC_CPT | \
+				 SDE_FDI_RXB_CPT | \
+				 SDE_FDI_RXA_CPT)
+
+#define SDEISR  0xc4000
+#define SDEIMR  0xc4004
+#define SDEIIR  0xc4008
+#define SDEIER  0xc400c
+
+#define SERR_INT			0xc4040
+#define  SERR_INT_POISON		(1<<31)
+#define  SERR_INT_TRANS_C_FIFO_UNDERRUN	(1<<6)
+#define  SERR_INT_TRANS_B_FIFO_UNDERRUN	(1<<3)
+#define  SERR_INT_TRANS_A_FIFO_UNDERRUN	(1<<0)
+#define  SERR_INT_TRANS_FIFO_UNDERRUN(pipe)	(1<<((pipe)*3))
+
+/* digital port hotplug */
+#define PCH_PORT_HOTPLUG		0xc4030	/* SHOTPLUG_CTL */
+#define  PORTA_HOTPLUG_ENABLE		(1 << 28) /* LPT:LP+ & BXT */
+#define  PORTA_HOTPLUG_STATUS_MASK	(3 << 24) /* SPT+ & BXT */
+#define  PORTA_HOTPLUG_NO_DETECT	(0 << 24) /* SPT+ & BXT */
+#define  PORTA_HOTPLUG_SHORT_DETECT	(1 << 24) /* SPT+ & BXT */
+#define  PORTA_HOTPLUG_LONG_DETECT	(2 << 24) /* SPT+ & BXT */
+#define  PORTD_HOTPLUG_ENABLE		(1 << 20)
+#define  PORTD_PULSE_DURATION_2ms	(0 << 18) /* pre-LPT */
+#define  PORTD_PULSE_DURATION_4_5ms	(1 << 18) /* pre-LPT */
+#define  PORTD_PULSE_DURATION_6ms	(2 << 18) /* pre-LPT */
+#define  PORTD_PULSE_DURATION_100ms	(3 << 18) /* pre-LPT */
+#define  PORTD_PULSE_DURATION_MASK	(3 << 18) /* pre-LPT */
+#define  PORTD_HOTPLUG_STATUS_MASK	(3 << 16)
+#define  PORTD_HOTPLUG_NO_DETECT	(0 << 16)
+#define  PORTD_HOTPLUG_SHORT_DETECT	(1 << 16)
+#define  PORTD_HOTPLUG_LONG_DETECT	(2 << 16)
+#define  PORTC_HOTPLUG_ENABLE		(1 << 12)
+#define  PORTC_PULSE_DURATION_2ms	(0 << 10) /* pre-LPT */
+#define  PORTC_PULSE_DURATION_4_5ms	(1 << 10) /* pre-LPT */
+#define  PORTC_PULSE_DURATION_6ms	(2 << 10) /* pre-LPT */
+#define  PORTC_PULSE_DURATION_100ms	(3 << 10) /* pre-LPT */
+#define  PORTC_PULSE_DURATION_MASK	(3 << 10) /* pre-LPT */
+#define  PORTC_HOTPLUG_STATUS_MASK	(3 << 8)
+#define  PORTC_HOTPLUG_NO_DETECT	(0 << 8)
+#define  PORTC_HOTPLUG_SHORT_DETECT	(1 << 8)
+#define  PORTC_HOTPLUG_LONG_DETECT	(2 << 8)
+#define  PORTB_HOTPLUG_ENABLE		(1 << 4)
+#define  PORTB_PULSE_DURATION_2ms	(0 << 2) /* pre-LPT */
+#define  PORTB_PULSE_DURATION_4_5ms	(1 << 2) /* pre-LPT */
+#define  PORTB_PULSE_DURATION_6ms	(2 << 2) /* pre-LPT */
+#define  PORTB_PULSE_DURATION_100ms	(3 << 2) /* pre-LPT */
+#define  PORTB_PULSE_DURATION_MASK	(3 << 2) /* pre-LPT */
+#define  PORTB_HOTPLUG_STATUS_MASK	(3 << 0)
+#define  PORTB_HOTPLUG_NO_DETECT	(0 << 0)
+#define  PORTB_HOTPLUG_SHORT_DETECT	(1 << 0)
+#define  PORTB_HOTPLUG_LONG_DETECT	(2 << 0)
+
+#define PCH_PORT_HOTPLUG2		0xc403C	/* SHOTPLUG_CTL2 SPT+ */
+#define  PORTE_HOTPLUG_ENABLE		(1 << 4)
+#define  PORTE_HOTPLUG_STATUS_MASK	(3 << 0)
+#define  PORTE_HOTPLUG_NO_DETECT	(0 << 0)
+#define  PORTE_HOTPLUG_SHORT_DETECT	(1 << 0)
+#define  PORTE_HOTPLUG_LONG_DETECT	(2 << 0)
+
+#define PCH_GPIOA               0xc5010
+#define PCH_GPIOB               0xc5014
+#define PCH_GPIOC               0xc5018
+#define PCH_GPIOD               0xc501c
+#define PCH_GPIOE               0xc5020
+#define PCH_GPIOF               0xc5024
+
+#define PCH_GMBUS0		0xc5100
+#define PCH_GMBUS1		0xc5104
+#define PCH_GMBUS2		0xc5108
+#define PCH_GMBUS3		0xc510c
+#define PCH_GMBUS4		0xc5110
+#define PCH_GMBUS5		0xc5120
+
+#define _PCH_DPLL_A              0xc6014
+#define _PCH_DPLL_B              0xc6018
+#define PCH_DPLL(pll) (pll == 0 ? _PCH_DPLL_A : _PCH_DPLL_B)
+
+#define _PCH_FPA0                0xc6040
+#define  FP_CB_TUNE		(0x3<<22)
+#define _PCH_FPA1                0xc6044
+#define _PCH_FPB0                0xc6048
+#define _PCH_FPB1                0xc604c
+#define PCH_FP0(pll) (pll == 0 ? _PCH_FPA0 : _PCH_FPB0)
+#define PCH_FP1(pll) (pll == 0 ? _PCH_FPA1 : _PCH_FPB1)
+
+#define PCH_DPLL_TEST           0xc606c
+
+#define PCH_DREF_CONTROL        0xC6200
+#define  DREF_CONTROL_MASK      0x7fc3
+#define  DREF_CPU_SOURCE_OUTPUT_DISABLE         (0<<13)
+#define  DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD      (2<<13)
+#define  DREF_CPU_SOURCE_OUTPUT_NONSPREAD       (3<<13)
+#define  DREF_CPU_SOURCE_OUTPUT_MASK		(3<<13)
+#define  DREF_SSC_SOURCE_DISABLE                (0<<11)
+#define  DREF_SSC_SOURCE_ENABLE                 (2<<11)
+#define  DREF_SSC_SOURCE_MASK			(3<<11)
+#define  DREF_NONSPREAD_SOURCE_DISABLE          (0<<9)
+#define  DREF_NONSPREAD_CK505_ENABLE		(1<<9)
+#define  DREF_NONSPREAD_SOURCE_ENABLE           (2<<9)
+#define  DREF_NONSPREAD_SOURCE_MASK		(3<<9)
+#define  DREF_SUPERSPREAD_SOURCE_DISABLE        (0<<7)
+#define  DREF_SUPERSPREAD_SOURCE_ENABLE         (2<<7)
+#define  DREF_SUPERSPREAD_SOURCE_MASK		(3<<7)
+#define  DREF_SSC4_DOWNSPREAD                   (0<<6)
+#define  DREF_SSC4_CENTERSPREAD                 (1<<6)
+#define  DREF_SSC1_DISABLE                      (0<<1)
+#define  DREF_SSC1_ENABLE                       (1<<1)
+#define  DREF_SSC4_DISABLE                      (0)
+#define  DREF_SSC4_ENABLE                       (1)
+
+#define PCH_RAWCLK_FREQ         0xc6204
+#define  FDL_TP1_TIMER_SHIFT    12
+#define  FDL_TP1_TIMER_MASK     (3<<12)
+#define  FDL_TP2_TIMER_SHIFT    10
+#define  FDL_TP2_TIMER_MASK     (3<<10)
+#define  RAWCLK_FREQ_MASK       0x3ff
+
+#define PCH_DPLL_TMR_CFG        0xc6208
+
+#define PCH_SSC4_PARMS          0xc6210
+#define PCH_SSC4_AUX_PARMS      0xc6214
+
+#define PCH_DPLL_SEL		0xc7000
+#define	 TRANS_DPLLB_SEL(pipe)		(1 << ((pipe) * 4))
+#define	 TRANS_DPLLA_SEL(pipe)		0
+#define  TRANS_DPLL_ENABLE(pipe)	(1 << ((pipe) * 4 + 3))
+
+/* transcoder */
+
+#define _PCH_TRANS_HTOTAL_A		0xe0000
+#define  TRANS_HTOTAL_SHIFT		16
+#define  TRANS_HACTIVE_SHIFT		0
+#define _PCH_TRANS_HBLANK_A		0xe0004
+#define  TRANS_HBLANK_END_SHIFT		16
+#define  TRANS_HBLANK_START_SHIFT	0
+#define _PCH_TRANS_HSYNC_A		0xe0008
+#define  TRANS_HSYNC_END_SHIFT		16
+#define  TRANS_HSYNC_START_SHIFT	0
+#define _PCH_TRANS_VTOTAL_A		0xe000c
+#define  TRANS_VTOTAL_SHIFT		16
+#define  TRANS_VACTIVE_SHIFT		0
+#define _PCH_TRANS_VBLANK_A		0xe0010
+#define  TRANS_VBLANK_END_SHIFT		16
+#define  TRANS_VBLANK_START_SHIFT	0
+#define _PCH_TRANS_VSYNC_A		0xe0014
+#define  TRANS_VSYNC_END_SHIFT	 	16
+#define  TRANS_VSYNC_START_SHIFT	0
+#define _PCH_TRANS_VSYNCSHIFT_A		0xe0028
+
+#define _PCH_TRANSA_DATA_M1	0xe0030
+#define _PCH_TRANSA_DATA_N1	0xe0034
+#define _PCH_TRANSA_DATA_M2	0xe0038
+#define _PCH_TRANSA_DATA_N2	0xe003c
+#define _PCH_TRANSA_LINK_M1	0xe0040
+#define _PCH_TRANSA_LINK_N1	0xe0044
+#define _PCH_TRANSA_LINK_M2	0xe0048
+#define _PCH_TRANSA_LINK_N2	0xe004c
+
+/* Per-transcoder DIP controls (PCH) */
+#define _VIDEO_DIP_CTL_A         0xe0200
+#define _VIDEO_DIP_DATA_A        0xe0208
+#define _VIDEO_DIP_GCP_A         0xe0210
+#define  GCP_COLOR_INDICATION		(1 << 2)
+#define  GCP_DEFAULT_PHASE_ENABLE	(1 << 1)
+#define  GCP_AV_MUTE			(1 << 0)
+
+#define _VIDEO_DIP_CTL_B         0xe1200
+#define _VIDEO_DIP_DATA_B        0xe1208
+#define _VIDEO_DIP_GCP_B         0xe1210
+
+#define TVIDEO_DIP_CTL(pipe) _PIPE(pipe, _VIDEO_DIP_CTL_A, _VIDEO_DIP_CTL_B)
+#define TVIDEO_DIP_DATA(pipe) _PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B)
+#define TVIDEO_DIP_GCP(pipe) _PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B)
+
+/* Per-transcoder DIP controls (VLV) */
+#define VLV_VIDEO_DIP_CTL_A		(VLV_DISPLAY_BASE + 0x60200)
+#define VLV_VIDEO_DIP_DATA_A		(VLV_DISPLAY_BASE + 0x60208)
+#define VLV_VIDEO_DIP_GDCP_PAYLOAD_A	(VLV_DISPLAY_BASE + 0x60210)
+
+#define VLV_VIDEO_DIP_CTL_B		(VLV_DISPLAY_BASE + 0x61170)
+#define VLV_VIDEO_DIP_DATA_B		(VLV_DISPLAY_BASE + 0x61174)
+#define VLV_VIDEO_DIP_GDCP_PAYLOAD_B	(VLV_DISPLAY_BASE + 0x61178)
+
+#define CHV_VIDEO_DIP_CTL_C		(VLV_DISPLAY_BASE + 0x611f0)
+#define CHV_VIDEO_DIP_DATA_C		(VLV_DISPLAY_BASE + 0x611f4)
+#define CHV_VIDEO_DIP_GDCP_PAYLOAD_C	(VLV_DISPLAY_BASE + 0x611f8)
+
+#define VLV_TVIDEO_DIP_CTL(pipe) \
+	_PIPE3((pipe), VLV_VIDEO_DIP_CTL_A, \
+	       VLV_VIDEO_DIP_CTL_B, CHV_VIDEO_DIP_CTL_C)
+#define VLV_TVIDEO_DIP_DATA(pipe) \
+	_PIPE3((pipe), VLV_VIDEO_DIP_DATA_A, \
+	       VLV_VIDEO_DIP_DATA_B, CHV_VIDEO_DIP_DATA_C)
+#define VLV_TVIDEO_DIP_GCP(pipe) \
+	_PIPE3((pipe), VLV_VIDEO_DIP_GDCP_PAYLOAD_A, \
+		VLV_VIDEO_DIP_GDCP_PAYLOAD_B, CHV_VIDEO_DIP_GDCP_PAYLOAD_C)
+
+/* Haswell DIP controls */
+#define HSW_VIDEO_DIP_CTL_A		0x60200
+#define HSW_VIDEO_DIP_AVI_DATA_A	0x60220
+#define HSW_VIDEO_DIP_VS_DATA_A		0x60260
+#define HSW_VIDEO_DIP_SPD_DATA_A	0x602A0
+#define HSW_VIDEO_DIP_GMP_DATA_A	0x602E0
+#define HSW_VIDEO_DIP_VSC_DATA_A	0x60320
+#define HSW_VIDEO_DIP_AVI_ECC_A		0x60240
+#define HSW_VIDEO_DIP_VS_ECC_A		0x60280
+#define HSW_VIDEO_DIP_SPD_ECC_A		0x602C0
+#define HSW_VIDEO_DIP_GMP_ECC_A		0x60300
+#define HSW_VIDEO_DIP_VSC_ECC_A		0x60344
+#define HSW_VIDEO_DIP_GCP_A		0x60210
+
+#define HSW_VIDEO_DIP_CTL_B		0x61200
+#define HSW_VIDEO_DIP_AVI_DATA_B	0x61220
+#define HSW_VIDEO_DIP_VS_DATA_B		0x61260
+#define HSW_VIDEO_DIP_SPD_DATA_B	0x612A0
+#define HSW_VIDEO_DIP_GMP_DATA_B	0x612E0
+#define HSW_VIDEO_DIP_VSC_DATA_B	0x61320
+#define HSW_VIDEO_DIP_BVI_ECC_B		0x61240
+#define HSW_VIDEO_DIP_VS_ECC_B		0x61280
+#define HSW_VIDEO_DIP_SPD_ECC_B		0x612C0
+#define HSW_VIDEO_DIP_GMP_ECC_B		0x61300
+#define HSW_VIDEO_DIP_VSC_ECC_B		0x61344
+#define HSW_VIDEO_DIP_GCP_B		0x61210
+
+#define HSW_TVIDEO_DIP_CTL(trans) \
+	 _TRANSCODER2(trans, HSW_VIDEO_DIP_CTL_A)
+#define HSW_TVIDEO_DIP_AVI_DATA(trans, i) \
+	(_TRANSCODER2(trans, HSW_VIDEO_DIP_AVI_DATA_A) + (i) * 4)
+#define HSW_TVIDEO_DIP_VS_DATA(trans, i) \
+	(_TRANSCODER2(trans, HSW_VIDEO_DIP_VS_DATA_A) + (i) * 4)
+#define HSW_TVIDEO_DIP_SPD_DATA(trans, i) \
+	(_TRANSCODER2(trans, HSW_VIDEO_DIP_SPD_DATA_A) + (i) * 4)
+#define HSW_TVIDEO_DIP_GCP(trans) \
+	_TRANSCODER2(trans, HSW_VIDEO_DIP_GCP_A)
+#define HSW_TVIDEO_DIP_VSC_DATA(trans, i) \
+	(_TRANSCODER2(trans, HSW_VIDEO_DIP_VSC_DATA_A) + (i) * 4)
+
+#define HSW_STEREO_3D_CTL_A	0x70020
+#define   S3D_ENABLE		(1<<31)
+#define HSW_STEREO_3D_CTL_B	0x71020
+
+#define HSW_STEREO_3D_CTL(trans) \
+	_PIPE2(trans, HSW_STEREO_3D_CTL_A)
+
+#define _PCH_TRANS_HTOTAL_B          0xe1000
+#define _PCH_TRANS_HBLANK_B          0xe1004
+#define _PCH_TRANS_HSYNC_B           0xe1008
+#define _PCH_TRANS_VTOTAL_B          0xe100c
+#define _PCH_TRANS_VBLANK_B          0xe1010
+#define _PCH_TRANS_VSYNC_B           0xe1014
+#define _PCH_TRANS_VSYNCSHIFT_B	 0xe1028
+
+#define PCH_TRANS_HTOTAL(pipe) _PIPE(pipe, _PCH_TRANS_HTOTAL_A, _PCH_TRANS_HTOTAL_B)
+#define PCH_TRANS_HBLANK(pipe) _PIPE(pipe, _PCH_TRANS_HBLANK_A, _PCH_TRANS_HBLANK_B)
+#define PCH_TRANS_HSYNC(pipe) _PIPE(pipe, _PCH_TRANS_HSYNC_A, _PCH_TRANS_HSYNC_B)
+#define PCH_TRANS_VTOTAL(pipe) _PIPE(pipe, _PCH_TRANS_VTOTAL_A, _PCH_TRANS_VTOTAL_B)
+#define PCH_TRANS_VBLANK(pipe) _PIPE(pipe, _PCH_TRANS_VBLANK_A, _PCH_TRANS_VBLANK_B)
+#define PCH_TRANS_VSYNC(pipe) _PIPE(pipe, _PCH_TRANS_VSYNC_A, _PCH_TRANS_VSYNC_B)
+#define PCH_TRANS_VSYNCSHIFT(pipe) _PIPE(pipe, _PCH_TRANS_VSYNCSHIFT_A, \
+					 _PCH_TRANS_VSYNCSHIFT_B)
+
+#define _PCH_TRANSB_DATA_M1	0xe1030
+#define _PCH_TRANSB_DATA_N1	0xe1034
+#define _PCH_TRANSB_DATA_M2	0xe1038
+#define _PCH_TRANSB_DATA_N2	0xe103c
+#define _PCH_TRANSB_LINK_M1	0xe1040
+#define _PCH_TRANSB_LINK_N1	0xe1044
+#define _PCH_TRANSB_LINK_M2	0xe1048
+#define _PCH_TRANSB_LINK_N2	0xe104c
+
+#define PCH_TRANS_DATA_M1(pipe) _PIPE(pipe, _PCH_TRANSA_DATA_M1, _PCH_TRANSB_DATA_M1)
+#define PCH_TRANS_DATA_N1(pipe) _PIPE(pipe, _PCH_TRANSA_DATA_N1, _PCH_TRANSB_DATA_N1)
+#define PCH_TRANS_DATA_M2(pipe) _PIPE(pipe, _PCH_TRANSA_DATA_M2, _PCH_TRANSB_DATA_M2)
+#define PCH_TRANS_DATA_N2(pipe) _PIPE(pipe, _PCH_TRANSA_DATA_N2, _PCH_TRANSB_DATA_N2)
+#define PCH_TRANS_LINK_M1(pipe) _PIPE(pipe, _PCH_TRANSA_LINK_M1, _PCH_TRANSB_LINK_M1)
+#define PCH_TRANS_LINK_N1(pipe) _PIPE(pipe, _PCH_TRANSA_LINK_N1, _PCH_TRANSB_LINK_N1)
+#define PCH_TRANS_LINK_M2(pipe) _PIPE(pipe, _PCH_TRANSA_LINK_M2, _PCH_TRANSB_LINK_M2)
+#define PCH_TRANS_LINK_N2(pipe) _PIPE(pipe, _PCH_TRANSA_LINK_N2, _PCH_TRANSB_LINK_N2)
+
+#define _PCH_TRANSACONF              0xf0008
+#define _PCH_TRANSBCONF              0xf1008
+#define PCH_TRANSCONF(pipe) _PIPE(pipe, _PCH_TRANSACONF, _PCH_TRANSBCONF)
+#define LPT_TRANSCONF		_PCH_TRANSACONF /* lpt has only one transcoder */
+#define  TRANS_DISABLE          (0<<31)
+#define  TRANS_ENABLE           (1<<31)
+#define  TRANS_STATE_MASK       (1<<30)
+#define  TRANS_STATE_DISABLE    (0<<30)
+#define  TRANS_STATE_ENABLE     (1<<30)
+#define  TRANS_FSYNC_DELAY_HB1  (0<<27)
+#define  TRANS_FSYNC_DELAY_HB2  (1<<27)
+#define  TRANS_FSYNC_DELAY_HB3  (2<<27)
+#define  TRANS_FSYNC_DELAY_HB4  (3<<27)
+#define  TRANS_INTERLACE_MASK   (7<<21)
+#define  TRANS_PROGRESSIVE      (0<<21)
+#define  TRANS_INTERLACED       (3<<21)
+#define  TRANS_LEGACY_INTERLACED_ILK (2<<21)
+#define  TRANS_8BPC             (0<<5)
+#define  TRANS_10BPC            (1<<5)
+#define  TRANS_6BPC             (2<<5)
+#define  TRANS_12BPC            (3<<5)
+
+#define _TRANSA_CHICKEN1	 0xf0060
+#define _TRANSB_CHICKEN1	 0xf1060
+#define TRANS_CHICKEN1(pipe) _PIPE(pipe, _TRANSA_CHICKEN1, _TRANSB_CHICKEN1)
+#define  TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE	(1<<10)
+#define  TRANS_CHICKEN1_DP0UNIT_GC_DISABLE	(1<<4)
+#define _TRANSA_CHICKEN2	 0xf0064
+#define _TRANSB_CHICKEN2	 0xf1064
+#define TRANS_CHICKEN2(pipe) _PIPE(pipe, _TRANSA_CHICKEN2, _TRANSB_CHICKEN2)
+#define  TRANS_CHICKEN2_TIMING_OVERRIDE			(1<<31)
+#define  TRANS_CHICKEN2_FDI_POLARITY_REVERSED		(1<<29)
+#define  TRANS_CHICKEN2_FRAME_START_DELAY_MASK		(3<<27)
+#define  TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER	(1<<26)
+#define  TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH	(1<<25)
+
+#define SOUTH_CHICKEN1		0xc2000
+#define  FDIA_PHASE_SYNC_SHIFT_OVR	19
+#define  FDIA_PHASE_SYNC_SHIFT_EN	18
+#define  FDI_PHASE_SYNC_OVR(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_OVR - ((pipe) * 2)))
+#define  FDI_PHASE_SYNC_EN(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_EN - ((pipe) * 2)))
+#define  FDI_BC_BIFURCATION_SELECT	(1 << 12)
+#define  SPT_PWM_GRANULARITY		(1<<0)
+#define SOUTH_CHICKEN2		0xc2004
+#define  FDI_MPHY_IOSFSB_RESET_STATUS	(1<<13)
+#define  FDI_MPHY_IOSFSB_RESET_CTL	(1<<12)
+#define  LPT_PWM_GRANULARITY		(1<<5)
+#define  DPLS_EDP_PPS_FIX_DIS		(1<<0)
+
+#define _FDI_RXA_CHICKEN         0xc200c
+#define _FDI_RXB_CHICKEN         0xc2010
+#define  FDI_RX_PHASE_SYNC_POINTER_OVR	(1<<1)
+#define  FDI_RX_PHASE_SYNC_POINTER_EN	(1<<0)
+#define FDI_RX_CHICKEN(pipe) _PIPE(pipe, _FDI_RXA_CHICKEN, _FDI_RXB_CHICKEN)
+
+#define SOUTH_DSPCLK_GATE_D	0xc2020
+#define  PCH_DPLUNIT_CLOCK_GATE_DISABLE (1<<30)
+#define  PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29)
+#define  PCH_CPUNIT_CLOCK_GATE_DISABLE (1<<14)
+#define  PCH_LP_PARTITION_LEVEL_DISABLE  (1<<12)
+
+/* CPU: FDI_TX */
+#define _FDI_TXA_CTL             0x60100
+#define _FDI_TXB_CTL             0x61100
+#define FDI_TX_CTL(pipe) _PIPE(pipe, _FDI_TXA_CTL, _FDI_TXB_CTL)
+#define  FDI_TX_DISABLE         (0<<31)
+#define  FDI_TX_ENABLE          (1<<31)
+#define  FDI_LINK_TRAIN_PATTERN_1       (0<<28)
+#define  FDI_LINK_TRAIN_PATTERN_2       (1<<28)
+#define  FDI_LINK_TRAIN_PATTERN_IDLE    (2<<28)
+#define  FDI_LINK_TRAIN_NONE            (3<<28)
+#define  FDI_LINK_TRAIN_VOLTAGE_0_4V    (0<<25)
+#define  FDI_LINK_TRAIN_VOLTAGE_0_6V    (1<<25)
+#define  FDI_LINK_TRAIN_VOLTAGE_0_8V    (2<<25)
+#define  FDI_LINK_TRAIN_VOLTAGE_1_2V    (3<<25)
+#define  FDI_LINK_TRAIN_PRE_EMPHASIS_NONE (0<<22)
+#define  FDI_LINK_TRAIN_PRE_EMPHASIS_1_5X (1<<22)
+#define  FDI_LINK_TRAIN_PRE_EMPHASIS_2X   (2<<22)
+#define  FDI_LINK_TRAIN_PRE_EMPHASIS_3X   (3<<22)
+/* ILK always use 400mV 0dB for voltage swing and pre-emphasis level.
+   SNB has different settings. */
+/* SNB A-stepping */
+#define  FDI_LINK_TRAIN_400MV_0DB_SNB_A		(0x38<<22)
+#define  FDI_LINK_TRAIN_400MV_6DB_SNB_A		(0x02<<22)
+#define  FDI_LINK_TRAIN_600MV_3_5DB_SNB_A	(0x01<<22)
+#define  FDI_LINK_TRAIN_800MV_0DB_SNB_A		(0x0<<22)
+/* SNB B-stepping */
+#define  FDI_LINK_TRAIN_400MV_0DB_SNB_B		(0x0<<22)
+#define  FDI_LINK_TRAIN_400MV_6DB_SNB_B		(0x3a<<22)
+#define  FDI_LINK_TRAIN_600MV_3_5DB_SNB_B	(0x39<<22)
+#define  FDI_LINK_TRAIN_800MV_0DB_SNB_B		(0x38<<22)
+#define  FDI_LINK_TRAIN_VOL_EMP_MASK		(0x3f<<22)
+#define  FDI_DP_PORT_WIDTH_SHIFT		19
+#define  FDI_DP_PORT_WIDTH_MASK			(7 << FDI_DP_PORT_WIDTH_SHIFT)
+#define  FDI_DP_PORT_WIDTH(width)           (((width) - 1) << FDI_DP_PORT_WIDTH_SHIFT)
+#define  FDI_TX_ENHANCE_FRAME_ENABLE    (1<<18)
+/* Ironlake: hardwired to 1 */
+#define  FDI_TX_PLL_ENABLE              (1<<14)
+
+/* Ivybridge has different bits for lolz */
+#define  FDI_LINK_TRAIN_PATTERN_1_IVB       (0<<8)
+#define  FDI_LINK_TRAIN_PATTERN_2_IVB       (1<<8)
+#define  FDI_LINK_TRAIN_PATTERN_IDLE_IVB    (2<<8)
+#define  FDI_LINK_TRAIN_NONE_IVB            (3<<8)
+
+/* both Tx and Rx */
+#define  FDI_COMPOSITE_SYNC		(1<<11)
+#define  FDI_LINK_TRAIN_AUTO		(1<<10)
+#define  FDI_SCRAMBLING_ENABLE          (0<<7)
+#define  FDI_SCRAMBLING_DISABLE         (1<<7)
+
+/* FDI_RX, FDI_X is hard-wired to Transcoder_X */
+#define _FDI_RXA_CTL             0xf000c
+#define _FDI_RXB_CTL             0xf100c
+#define FDI_RX_CTL(pipe) _PIPE(pipe, _FDI_RXA_CTL, _FDI_RXB_CTL)
+#define  FDI_RX_ENABLE          (1<<31)
+/* train, dp width same as FDI_TX */
+#define  FDI_FS_ERRC_ENABLE		(1<<27)
+#define  FDI_FE_ERRC_ENABLE		(1<<26)
+#define  FDI_RX_POLARITY_REVERSED_LPT	(1<<16)
+#define  FDI_8BPC                       (0<<16)
+#define  FDI_10BPC                      (1<<16)
+#define  FDI_6BPC                       (2<<16)
+#define  FDI_12BPC                      (3<<16)
+#define  FDI_RX_LINK_REVERSAL_OVERRIDE  (1<<15)
+#define  FDI_DMI_LINK_REVERSE_MASK      (1<<14)
+#define  FDI_RX_PLL_ENABLE              (1<<13)
+#define  FDI_FS_ERR_CORRECT_ENABLE      (1<<11)
+#define  FDI_FE_ERR_CORRECT_ENABLE      (1<<10)
+#define  FDI_FS_ERR_REPORT_ENABLE       (1<<9)
+#define  FDI_FE_ERR_REPORT_ENABLE       (1<<8)
+#define  FDI_RX_ENHANCE_FRAME_ENABLE    (1<<6)
+#define  FDI_PCDCLK	                (1<<4)
+/* CPT */
+#define  FDI_AUTO_TRAINING			(1<<10)
+#define  FDI_LINK_TRAIN_PATTERN_1_CPT		(0<<8)
+#define  FDI_LINK_TRAIN_PATTERN_2_CPT		(1<<8)
+#define  FDI_LINK_TRAIN_PATTERN_IDLE_CPT	(2<<8)
+#define  FDI_LINK_TRAIN_NORMAL_CPT		(3<<8)
+#define  FDI_LINK_TRAIN_PATTERN_MASK_CPT	(3<<8)
+
+#define _FDI_RXA_MISC			0xf0010
+#define _FDI_RXB_MISC			0xf1010
+#define  FDI_RX_PWRDN_LANE1_MASK	(3<<26)
+#define  FDI_RX_PWRDN_LANE1_VAL(x)	((x)<<26)
+#define  FDI_RX_PWRDN_LANE0_MASK	(3<<24)
+#define  FDI_RX_PWRDN_LANE0_VAL(x)	((x)<<24)
+#define  FDI_RX_TP1_TO_TP2_48		(2<<20)
+#define  FDI_RX_TP1_TO_TP2_64		(3<<20)
+#define  FDI_RX_FDI_DELAY_90		(0x90<<0)
+#define FDI_RX_MISC(pipe) _PIPE(pipe, _FDI_RXA_MISC, _FDI_RXB_MISC)
+
+#define _FDI_RXA_TUSIZE1         0xf0030
+#define _FDI_RXA_TUSIZE2         0xf0038
+#define _FDI_RXB_TUSIZE1         0xf1030
+#define _FDI_RXB_TUSIZE2         0xf1038
+#define FDI_RX_TUSIZE1(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE1, _FDI_RXB_TUSIZE1)
+#define FDI_RX_TUSIZE2(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE2, _FDI_RXB_TUSIZE2)
+
+/* FDI_RX interrupt register format */
+#define FDI_RX_INTER_LANE_ALIGN         (1<<10)
+#define FDI_RX_SYMBOL_LOCK              (1<<9) /* train 2 */
+#define FDI_RX_BIT_LOCK                 (1<<8) /* train 1 */
+#define FDI_RX_TRAIN_PATTERN_2_FAIL     (1<<7)
+#define FDI_RX_FS_CODE_ERR              (1<<6)
+#define FDI_RX_FE_CODE_ERR              (1<<5)
+#define FDI_RX_SYMBOL_ERR_RATE_ABOVE    (1<<4)
+#define FDI_RX_HDCP_LINK_FAIL           (1<<3)
+#define FDI_RX_PIXEL_FIFO_OVERFLOW      (1<<2)
+#define FDI_RX_CROSS_CLOCK_OVERFLOW     (1<<1)
+#define FDI_RX_SYMBOL_QUEUE_OVERFLOW    (1<<0)
+
+#define _FDI_RXA_IIR             0xf0014
+#define _FDI_RXA_IMR             0xf0018
+#define _FDI_RXB_IIR             0xf1014
+#define _FDI_RXB_IMR             0xf1018
+#define FDI_RX_IIR(pipe) _PIPE(pipe, _FDI_RXA_IIR, _FDI_RXB_IIR)
+#define FDI_RX_IMR(pipe) _PIPE(pipe, _FDI_RXA_IMR, _FDI_RXB_IMR)
+
+#define FDI_PLL_CTL_1           0xfe000
+#define FDI_PLL_CTL_2           0xfe004
+
+#define PCH_LVDS	0xe1180
+#define  LVDS_DETECTED	(1 << 1)
+
+/* vlv has 2 sets of panel control regs. */
+#define PIPEA_PP_STATUS         (VLV_DISPLAY_BASE + 0x61200)
+#define PIPEA_PP_CONTROL        (VLV_DISPLAY_BASE + 0x61204)
+#define PIPEA_PP_ON_DELAYS      (VLV_DISPLAY_BASE + 0x61208)
+#define  PANEL_PORT_SELECT_VLV(port)	((port) << 30)
+#define PIPEA_PP_OFF_DELAYS     (VLV_DISPLAY_BASE + 0x6120c)
+#define PIPEA_PP_DIVISOR        (VLV_DISPLAY_BASE + 0x61210)
+
+#define PIPEB_PP_STATUS         (VLV_DISPLAY_BASE + 0x61300)
+#define PIPEB_PP_CONTROL        (VLV_DISPLAY_BASE + 0x61304)
+#define PIPEB_PP_ON_DELAYS      (VLV_DISPLAY_BASE + 0x61308)
+#define PIPEB_PP_OFF_DELAYS     (VLV_DISPLAY_BASE + 0x6130c)
+#define PIPEB_PP_DIVISOR        (VLV_DISPLAY_BASE + 0x61310)
+
+#define VLV_PIPE_PP_STATUS(pipe) _PIPE(pipe, PIPEA_PP_STATUS, PIPEB_PP_STATUS)
+#define VLV_PIPE_PP_CONTROL(pipe) _PIPE(pipe, PIPEA_PP_CONTROL, PIPEB_PP_CONTROL)
+#define VLV_PIPE_PP_ON_DELAYS(pipe) \
+		_PIPE(pipe, PIPEA_PP_ON_DELAYS, PIPEB_PP_ON_DELAYS)
+#define VLV_PIPE_PP_OFF_DELAYS(pipe) \
+		_PIPE(pipe, PIPEA_PP_OFF_DELAYS, PIPEB_PP_OFF_DELAYS)
+#define VLV_PIPE_PP_DIVISOR(pipe) \
+		_PIPE(pipe, PIPEA_PP_DIVISOR, PIPEB_PP_DIVISOR)
+
+#define PCH_PP_STATUS		0xc7200
+#define PCH_PP_CONTROL		0xc7204
+#define  PANEL_UNLOCK_REGS	(0xabcd << 16)
+#define  PANEL_UNLOCK_MASK	(0xffff << 16)
+#define  BXT_POWER_CYCLE_DELAY_MASK	(0x1f0)
+#define  BXT_POWER_CYCLE_DELAY_SHIFT	4
+#define  EDP_FORCE_VDD		(1 << 3)
+#define  EDP_BLC_ENABLE		(1 << 2)
+#define  PANEL_POWER_RESET	(1 << 1)
+#define  PANEL_POWER_OFF	(0 << 0)
+#define  PANEL_POWER_ON		(1 << 0)
+#define PCH_PP_ON_DELAYS	0xc7208
+#define  PANEL_PORT_SELECT_MASK	(3 << 30)
+#define  PANEL_PORT_SELECT_LVDS	(0 << 30)
+#define  PANEL_PORT_SELECT_DPA	(1 << 30)
+#define  PANEL_PORT_SELECT_DPC	(2 << 30)
+#define  PANEL_PORT_SELECT_DPD	(3 << 30)
+#define  PANEL_POWER_UP_DELAY_MASK	(0x1fff0000)
+#define  PANEL_POWER_UP_DELAY_SHIFT	16
+#define  PANEL_LIGHT_ON_DELAY_MASK	(0x1fff)
+#define  PANEL_LIGHT_ON_DELAY_SHIFT	0
+
+#define PCH_PP_OFF_DELAYS	0xc720c
+#define  PANEL_POWER_DOWN_DELAY_MASK	(0x1fff0000)
+#define  PANEL_POWER_DOWN_DELAY_SHIFT	16
+#define  PANEL_LIGHT_OFF_DELAY_MASK	(0x1fff)
+#define  PANEL_LIGHT_OFF_DELAY_SHIFT	0
+
+#define PCH_PP_DIVISOR		0xc7210
+#define  PP_REFERENCE_DIVIDER_MASK	(0xffffff00)
+#define  PP_REFERENCE_DIVIDER_SHIFT	8
+#define  PANEL_POWER_CYCLE_DELAY_MASK	(0x1f)
+#define  PANEL_POWER_CYCLE_DELAY_SHIFT	0
+
+/* BXT PPS changes - 2nd set of PPS registers */
+#define _BXT_PP_STATUS2 	0xc7300
+#define _BXT_PP_CONTROL2 	0xc7304
+#define _BXT_PP_ON_DELAYS2	0xc7308
+#define _BXT_PP_OFF_DELAYS2	0xc730c
+
+#define BXT_PP_STATUS(n)	_PIPE(n, PCH_PP_STATUS, _BXT_PP_STATUS2)
+#define BXT_PP_CONTROL(n)	_PIPE(n, PCH_PP_CONTROL, _BXT_PP_CONTROL2)
+#define BXT_PP_ON_DELAYS(n)	_PIPE(n, PCH_PP_ON_DELAYS, _BXT_PP_ON_DELAYS2)
+#define BXT_PP_OFF_DELAYS(n)	_PIPE(n, PCH_PP_OFF_DELAYS, _BXT_PP_OFF_DELAYS2)
+
+#define PCH_DP_B		0xe4100
+#define PCH_DPB_AUX_CH_CTL	0xe4110
+#define PCH_DPB_AUX_CH_DATA1	0xe4114
+#define PCH_DPB_AUX_CH_DATA2	0xe4118
+#define PCH_DPB_AUX_CH_DATA3	0xe411c
+#define PCH_DPB_AUX_CH_DATA4	0xe4120
+#define PCH_DPB_AUX_CH_DATA5	0xe4124
+
+#define PCH_DP_C		0xe4200
+#define PCH_DPC_AUX_CH_CTL	0xe4210
+#define PCH_DPC_AUX_CH_DATA1	0xe4214
+#define PCH_DPC_AUX_CH_DATA2	0xe4218
+#define PCH_DPC_AUX_CH_DATA3	0xe421c
+#define PCH_DPC_AUX_CH_DATA4	0xe4220
+#define PCH_DPC_AUX_CH_DATA5	0xe4224
+
+#define PCH_DP_D		0xe4300
+#define PCH_DPD_AUX_CH_CTL	0xe4310
+#define PCH_DPD_AUX_CH_DATA1	0xe4314
+#define PCH_DPD_AUX_CH_DATA2	0xe4318
+#define PCH_DPD_AUX_CH_DATA3	0xe431c
+#define PCH_DPD_AUX_CH_DATA4	0xe4320
+#define PCH_DPD_AUX_CH_DATA5	0xe4324
+
+/* CPT */
+#define  PORT_TRANS_A_SEL_CPT	0
+#define  PORT_TRANS_B_SEL_CPT	(1<<29)
+#define  PORT_TRANS_C_SEL_CPT	(2<<29)
+#define  PORT_TRANS_SEL_MASK	(3<<29)
+#define  PORT_TRANS_SEL_CPT(pipe)	((pipe) << 29)
+#define  PORT_TO_PIPE(val)	(((val) & (1<<30)) >> 30)
+#define  PORT_TO_PIPE_CPT(val)	(((val) & PORT_TRANS_SEL_MASK) >> 29)
+#define  SDVO_PORT_TO_PIPE_CHV(val)	(((val) & (3<<24)) >> 24)
+#define  DP_PORT_TO_PIPE_CHV(val)	(((val) & (3<<16)) >> 16)
+
+#define TRANS_DP_CTL_A		0xe0300
+#define TRANS_DP_CTL_B		0xe1300
+#define TRANS_DP_CTL_C		0xe2300
+#define TRANS_DP_CTL(pipe)	_PIPE(pipe, TRANS_DP_CTL_A, TRANS_DP_CTL_B)
+#define  TRANS_DP_OUTPUT_ENABLE	(1<<31)
+#define  TRANS_DP_PORT_SEL_B	(0<<29)
+#define  TRANS_DP_PORT_SEL_C	(1<<29)
+#define  TRANS_DP_PORT_SEL_D	(2<<29)
+#define  TRANS_DP_PORT_SEL_NONE	(3<<29)
+#define  TRANS_DP_PORT_SEL_MASK	(3<<29)
+#define  TRANS_DP_PIPE_TO_PORT(val)	((((val) & TRANS_DP_PORT_SEL_MASK) >> 29) + PORT_B)
+#define  TRANS_DP_AUDIO_ONLY	(1<<26)
+#define  TRANS_DP_ENH_FRAMING	(1<<18)
+#define  TRANS_DP_8BPC		(0<<9)
+#define  TRANS_DP_10BPC		(1<<9)
+#define  TRANS_DP_6BPC		(2<<9)
+#define  TRANS_DP_12BPC		(3<<9)
+#define  TRANS_DP_BPC_MASK	(3<<9)
+#define  TRANS_DP_VSYNC_ACTIVE_HIGH	(1<<4)
+#define  TRANS_DP_VSYNC_ACTIVE_LOW	0
+#define  TRANS_DP_HSYNC_ACTIVE_HIGH	(1<<3)
+#define  TRANS_DP_HSYNC_ACTIVE_LOW	0
+#define  TRANS_DP_SYNC_MASK	(3<<3)
+
+/* SNB eDP training params */
+/* SNB A-stepping */
+#define  EDP_LINK_TRAIN_400MV_0DB_SNB_A		(0x38<<22)
+#define  EDP_LINK_TRAIN_400MV_6DB_SNB_A		(0x02<<22)
+#define  EDP_LINK_TRAIN_600MV_3_5DB_SNB_A	(0x01<<22)
+#define  EDP_LINK_TRAIN_800MV_0DB_SNB_A		(0x0<<22)
+/* SNB B-stepping */
+#define  EDP_LINK_TRAIN_400_600MV_0DB_SNB_B	(0x0<<22)
+#define  EDP_LINK_TRAIN_400MV_3_5DB_SNB_B	(0x1<<22)
+#define  EDP_LINK_TRAIN_400_600MV_6DB_SNB_B	(0x3a<<22)
+#define  EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B	(0x39<<22)
+#define  EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B	(0x38<<22)
+#define  EDP_LINK_TRAIN_VOL_EMP_MASK_SNB	(0x3f<<22)
+
+/* IVB */
+#define EDP_LINK_TRAIN_400MV_0DB_IVB		(0x24 <<22)
+#define EDP_LINK_TRAIN_400MV_3_5DB_IVB		(0x2a <<22)
+#define EDP_LINK_TRAIN_400MV_6DB_IVB		(0x2f <<22)
+#define EDP_LINK_TRAIN_600MV_0DB_IVB		(0x30 <<22)
+#define EDP_LINK_TRAIN_600MV_3_5DB_IVB		(0x36 <<22)
+#define EDP_LINK_TRAIN_800MV_0DB_IVB		(0x38 <<22)
+#define EDP_LINK_TRAIN_800MV_3_5DB_IVB		(0x3e <<22)
+
+/* legacy values */
+#define EDP_LINK_TRAIN_500MV_0DB_IVB		(0x00 <<22)
+#define EDP_LINK_TRAIN_1000MV_0DB_IVB		(0x20 <<22)
+#define EDP_LINK_TRAIN_500MV_3_5DB_IVB		(0x02 <<22)
+#define EDP_LINK_TRAIN_1000MV_3_5DB_IVB		(0x22 <<22)
+#define EDP_LINK_TRAIN_1000MV_6DB_IVB		(0x23 <<22)
+
+#define  EDP_LINK_TRAIN_VOL_EMP_MASK_IVB	(0x3f<<22)
+
+#define  VLV_PMWGICZ				0x1300a4
+
+#define  FORCEWAKE				0xA18C
+#define  FORCEWAKE_VLV				0x1300b0
+#define  FORCEWAKE_ACK_VLV			0x1300b4
+#define  FORCEWAKE_MEDIA_VLV			0x1300b8
+#define  FORCEWAKE_ACK_MEDIA_VLV		0x1300bc
+#define  FORCEWAKE_ACK_HSW			0x130044
+#define  FORCEWAKE_ACK				0x130090
+#define  VLV_GTLC_WAKE_CTRL			0x130090
+#define   VLV_GTLC_RENDER_CTX_EXISTS		(1 << 25)
+#define   VLV_GTLC_MEDIA_CTX_EXISTS		(1 << 24)
+#define   VLV_GTLC_ALLOWWAKEREQ			(1 << 0)
+
+#define  VLV_GTLC_PW_STATUS			0x130094
+#define   VLV_GTLC_ALLOWWAKEACK			(1 << 0)
+#define   VLV_GTLC_ALLOWWAKEERR			(1 << 1)
+#define   VLV_GTLC_PW_MEDIA_STATUS_MASK		(1 << 5)
+#define   VLV_GTLC_PW_RENDER_STATUS_MASK	(1 << 7)
+#define  FORCEWAKE_MT				0xa188 /* multi-threaded */
+#define  FORCEWAKE_MEDIA_GEN9			0xa270
+#define  FORCEWAKE_RENDER_GEN9			0xa278
+#define  FORCEWAKE_BLITTER_GEN9			0xa188
+#define  FORCEWAKE_ACK_MEDIA_GEN9		0x0D88
+#define  FORCEWAKE_ACK_RENDER_GEN9		0x0D84
+#define  FORCEWAKE_ACK_BLITTER_GEN9		0x130044
+#define   FORCEWAKE_KERNEL			0x1
+#define   FORCEWAKE_USER			0x2
+#define  FORCEWAKE_MT_ACK			0x130040
+#define  ECOBUS					0xa180
+#define    FORCEWAKE_MT_ENABLE			(1<<5)
+#define  VLV_SPAREG2H				0xA194
+
+#define  GTFIFODBG				0x120000
+#define    GT_FIFO_SBDROPERR			(1<<6)
+#define    GT_FIFO_BLOBDROPERR			(1<<5)
+#define    GT_FIFO_SB_READ_ABORTERR		(1<<4)
+#define    GT_FIFO_DROPERR			(1<<3)
+#define    GT_FIFO_OVFERR			(1<<2)
+#define    GT_FIFO_IAWRERR			(1<<1)
+#define    GT_FIFO_IARDERR			(1<<0)
+
+#define  GTFIFOCTL				0x120008
+#define    GT_FIFO_FREE_ENTRIES_MASK		0x7f
+#define    GT_FIFO_NUM_RESERVED_ENTRIES		20
+#define    GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL	(1 << 12)
+#define    GT_FIFO_CTL_RC6_POLICY_STALL		(1 << 11)
+
+#define  HSW_IDICR				0x9008
+#define    IDIHASHMSK(x)			(((x) & 0x3f) << 16)
+#define  HSW_EDRAM_PRESENT			0x120010
+#define    EDRAM_ENABLED			0x1
+
+#define GEN6_UCGCTL1				0x9400
+# define GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE		(1 << 16)
+# define GEN6_BLBUNIT_CLOCK_GATE_DISABLE		(1 << 5)
+# define GEN6_CSUNIT_CLOCK_GATE_DISABLE			(1 << 7)
+
+#define GEN6_UCGCTL2				0x9404
+# define GEN6_VFUNIT_CLOCK_GATE_DISABLE			(1 << 31)
+# define GEN7_VDSUNIT_CLOCK_GATE_DISABLE		(1 << 30)
+# define GEN7_TDLUNIT_CLOCK_GATE_DISABLE		(1 << 22)
+# define GEN6_RCZUNIT_CLOCK_GATE_DISABLE		(1 << 13)
+# define GEN6_RCPBUNIT_CLOCK_GATE_DISABLE		(1 << 12)
+# define GEN6_RCCUNIT_CLOCK_GATE_DISABLE		(1 << 11)
+
+#define GEN6_UCGCTL3				0x9408
+
+#define GEN7_UCGCTL4				0x940c
+#define  GEN7_L3BANK2X_CLOCK_GATE_DISABLE	(1<<25)
+
+#define GEN6_RCGCTL1				0x9410
+#define GEN6_RCGCTL2				0x9414
+#define GEN6_RSTCTL				0x9420
+
+#define GEN8_UCGCTL6				0x9430
+#define   GEN8_GAPSUNIT_CLOCK_GATE_DISABLE	(1<<24)
+#define   GEN8_SDEUNIT_CLOCK_GATE_DISABLE	(1<<14)
+#define   GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ (1<<28)
+
+#define GEN6_GFXPAUSE				0xA000
+#define GEN6_RPNSWREQ				0xA008
+#define   GEN6_TURBO_DISABLE			(1<<31)
+#define   GEN6_FREQUENCY(x)			((x)<<25)
+#define   HSW_FREQUENCY(x)			((x)<<24)
+#define   GEN9_FREQUENCY(x)			((x)<<23)
+#define   GEN6_OFFSET(x)			((x)<<19)
+#define   GEN6_AGGRESSIVE_TURBO			(0<<15)
+#define GEN6_RC_VIDEO_FREQ			0xA00C
+#define GEN6_RC_CONTROL				0xA090
+#define   GEN6_RC_CTL_RC6pp_ENABLE		(1<<16)
+#define   GEN6_RC_CTL_RC6p_ENABLE		(1<<17)
+#define   GEN6_RC_CTL_RC6_ENABLE		(1<<18)
+#define   GEN6_RC_CTL_RC1e_ENABLE		(1<<20)
+#define   GEN6_RC_CTL_RC7_ENABLE		(1<<22)
+#define   VLV_RC_CTL_CTX_RST_PARALLEL		(1<<24)
+#define   GEN7_RC_CTL_TO_MODE			(1<<28)
+#define   GEN6_RC_CTL_EI_MODE(x)		((x)<<27)
+#define   GEN6_RC_CTL_HW_ENABLE			(1<<31)
+#define GEN6_RP_DOWN_TIMEOUT			0xA010
+#define GEN6_RP_INTERRUPT_LIMITS		0xA014
+#define GEN6_RPSTAT1				0xA01C
+#define   GEN6_CAGF_SHIFT			8
+#define   HSW_CAGF_SHIFT			7
+#define   GEN9_CAGF_SHIFT			23
+#define   GEN6_CAGF_MASK			(0x7f << GEN6_CAGF_SHIFT)
+#define   HSW_CAGF_MASK				(0x7f << HSW_CAGF_SHIFT)
+#define   GEN9_CAGF_MASK			(0x1ff << GEN9_CAGF_SHIFT)
+#define GEN6_RP_CONTROL				0xA024
+#define   GEN6_RP_MEDIA_TURBO			(1<<11)
+#define   GEN6_RP_MEDIA_MODE_MASK		(3<<9)
+#define   GEN6_RP_MEDIA_HW_TURBO_MODE		(3<<9)
+#define   GEN6_RP_MEDIA_HW_NORMAL_MODE		(2<<9)
+#define   GEN6_RP_MEDIA_HW_MODE			(1<<9)
+#define   GEN6_RP_MEDIA_SW_MODE			(0<<9)
+#define   GEN6_RP_MEDIA_IS_GFX			(1<<8)
+#define   GEN6_RP_ENABLE			(1<<7)
+#define   GEN6_RP_UP_IDLE_MIN			(0x1<<3)
+#define   GEN6_RP_UP_BUSY_AVG			(0x2<<3)
+#define   GEN6_RP_UP_BUSY_CONT			(0x4<<3)
+#define   GEN6_RP_DOWN_IDLE_AVG			(0x2<<0)
+#define   GEN6_RP_DOWN_IDLE_CONT		(0x1<<0)
+#define GEN6_RP_UP_THRESHOLD			0xA02C
+#define GEN6_RP_DOWN_THRESHOLD			0xA030
+#define GEN6_RP_CUR_UP_EI			0xA050
+#define   GEN6_CURICONT_MASK			0xffffff
+#define GEN6_RP_CUR_UP				0xA054
+#define   GEN6_CURBSYTAVG_MASK			0xffffff
+#define GEN6_RP_PREV_UP				0xA058
+#define GEN6_RP_CUR_DOWN_EI			0xA05C
+#define   GEN6_CURIAVG_MASK			0xffffff
+#define GEN6_RP_CUR_DOWN			0xA060
+#define GEN6_RP_PREV_DOWN			0xA064
+#define GEN6_RP_UP_EI				0xA068
+#define GEN6_RP_DOWN_EI				0xA06C
+#define GEN6_RP_IDLE_HYSTERSIS			0xA070
+#define GEN6_RPDEUHWTC				0xA080
+#define GEN6_RPDEUC				0xA084
+#define GEN6_RPDEUCSW				0xA088
+#define GEN6_RC_STATE				0xA094
+#define GEN6_RC1_WAKE_RATE_LIMIT		0xA098
+#define GEN6_RC6_WAKE_RATE_LIMIT		0xA09C
+#define GEN6_RC6pp_WAKE_RATE_LIMIT		0xA0A0
+#define GEN6_RC_EVALUATION_INTERVAL		0xA0A8
+#define GEN6_RC_IDLE_HYSTERSIS			0xA0AC
+#define GEN6_RC_SLEEP				0xA0B0
+#define GEN6_RCUBMABDTMR			0xA0B0
+#define GEN6_RC1e_THRESHOLD			0xA0B4
+#define GEN6_RC6_THRESHOLD			0xA0B8
+#define GEN6_RC6p_THRESHOLD			0xA0BC
+#define VLV_RCEDATA				0xA0BC
+#define GEN6_RC6pp_THRESHOLD			0xA0C0
+#define GEN6_PMINTRMSK				0xA168
+#define GEN8_PMINTR_REDIRECT_TO_NON_DISP	(1<<31)
+#define VLV_PWRDWNUPCTL				0xA294
+#define GEN9_MEDIA_PG_IDLE_HYSTERESIS		0xA0C4
+#define GEN9_RENDER_PG_IDLE_HYSTERESIS		0xA0C8
+#define GEN9_PG_ENABLE				0xA210
+#define GEN9_RENDER_PG_ENABLE			(1<<0)
+#define GEN9_MEDIA_PG_ENABLE			(1<<1)
+
+#define VLV_CHICKEN_3				(VLV_DISPLAY_BASE + 0x7040C)
+#define  PIXEL_OVERLAP_CNT_MASK			(3 << 30)
+#define  PIXEL_OVERLAP_CNT_SHIFT		30
+
+#define GEN6_PMISR				0x44020
+#define GEN6_PMIMR				0x44024 /* rps_lock */
+#define GEN6_PMIIR				0x44028
+#define GEN6_PMIER				0x4402C
+#define  GEN6_PM_MBOX_EVENT			(1<<25)
+#define  GEN6_PM_THERMAL_EVENT			(1<<24)
+#define  GEN6_PM_RP_DOWN_TIMEOUT		(1<<6)
+#define  GEN6_PM_RP_UP_THRESHOLD		(1<<5)
+#define  GEN6_PM_RP_DOWN_THRESHOLD		(1<<4)
+#define  GEN6_PM_RP_UP_EI_EXPIRED		(1<<2)
+#define  GEN6_PM_RP_DOWN_EI_EXPIRED		(1<<1)
+#define  GEN6_PM_RPS_EVENTS			(GEN6_PM_RP_UP_THRESHOLD | \
+						 GEN6_PM_RP_DOWN_THRESHOLD | \
+						 GEN6_PM_RP_DOWN_TIMEOUT)
+
+#define GEN7_GT_SCRATCH(i)			(0x4F100 + (i) * 4)
+#define GEN7_GT_SCRATCH_REG_NUM			8
+
+#define VLV_GTLC_SURVIVABILITY_REG              0x130098
+#define VLV_GFX_CLK_STATUS_BIT			(1<<3)
+#define VLV_GFX_CLK_FORCE_ON_BIT		(1<<2)
+
+#define GEN6_GT_GFX_RC6_LOCKED			0x138104
+#define VLV_COUNTER_CONTROL			0x138104
+#define   VLV_COUNT_RANGE_HIGH			(1<<15)
+#define   VLV_MEDIA_RC0_COUNT_EN		(1<<5)
+#define   VLV_RENDER_RC0_COUNT_EN		(1<<4)
+#define   VLV_MEDIA_RC6_COUNT_EN		(1<<1)
+#define   VLV_RENDER_RC6_COUNT_EN		(1<<0)
+#define GEN6_GT_GFX_RC6				0x138108
+#define VLV_GT_RENDER_RC6			0x138108
+#define VLV_GT_MEDIA_RC6			0x13810C
+
+#define GEN6_GT_GFX_RC6p			0x13810C
+#define GEN6_GT_GFX_RC6pp			0x138110
+#define VLV_RENDER_C0_COUNT			0x138118
+#define VLV_MEDIA_C0_COUNT			0x13811C
+
+#define GEN6_PCODE_MAILBOX			0x138124
+#define   GEN6_PCODE_READY			(1<<31)
+#define	  GEN6_PCODE_WRITE_RC6VIDS		0x4
+#define	  GEN6_PCODE_READ_RC6VIDS		0x5
+#define     GEN6_ENCODE_RC6_VID(mv)		(((mv) - 245) / 5)
+#define     GEN6_DECODE_RC6_VID(vids)		(((vids) * 5) + 245)
+#define   BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ	0x18
+#define   GEN9_PCODE_READ_MEM_LATENCY		0x6
+#define     GEN9_MEM_LATENCY_LEVEL_MASK		0xFF
+#define     GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT	8
+#define     GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT	16
+#define     GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT	24
+#define   SKL_PCODE_CDCLK_CONTROL		0x7
+#define     SKL_CDCLK_PREPARE_FOR_CHANGE	0x3
+#define     SKL_CDCLK_READY_FOR_CHANGE		0x1
+#define   GEN6_PCODE_WRITE_MIN_FREQ_TABLE	0x8
+#define   GEN6_PCODE_READ_MIN_FREQ_TABLE	0x9
+#define   GEN6_READ_OC_PARAMS			0xc
+#define   GEN6_PCODE_READ_D_COMP		0x10
+#define   GEN6_PCODE_WRITE_D_COMP		0x11
+#define   HSW_PCODE_DE_WRITE_FREQ_REQ		0x17
+#define   DISPLAY_IPS_CONTROL			0x19
+#define	  HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL	0x1A
+#define GEN6_PCODE_DATA				0x138128
+#define   GEN6_PCODE_FREQ_IA_RATIO_SHIFT	8
+#define   GEN6_PCODE_FREQ_RING_RATIO_SHIFT	16
+#define GEN6_PCODE_DATA1			0x13812C
+
+#define GEN6_GT_CORE_STATUS		0x138060
+#define   GEN6_CORE_CPD_STATE_MASK	(7<<4)
+#define   GEN6_RCn_MASK			7
+#define   GEN6_RC0			0
+#define   GEN6_RC3			2
+#define   GEN6_RC6			3
+#define   GEN6_RC7			4
+
+#define GEN8_GT_SLICE_INFO		0x138064
+#define   GEN8_LSLICESTAT_MASK		0x7
+
+#define CHV_POWER_SS0_SIG1		0xa720
+#define CHV_POWER_SS1_SIG1		0xa728
+#define   CHV_SS_PG_ENABLE		(1<<1)
+#define   CHV_EU08_PG_ENABLE		(1<<9)
+#define   CHV_EU19_PG_ENABLE		(1<<17)
+#define   CHV_EU210_PG_ENABLE		(1<<25)
+
+#define CHV_POWER_SS0_SIG2		0xa724
+#define CHV_POWER_SS1_SIG2		0xa72c
+#define   CHV_EU311_PG_ENABLE		(1<<1)
+
+#define GEN9_SLICE_PGCTL_ACK(slice)	(0x804c + (slice)*0x4)
+#define   GEN9_PGCTL_SLICE_ACK		(1 << 0)
+#define   GEN9_PGCTL_SS_ACK(subslice)	(1 << (2 + (subslice)*2))
+
+#define GEN9_SS01_EU_PGCTL_ACK(slice)	(0x805c + (slice)*0x8)
+#define GEN9_SS23_EU_PGCTL_ACK(slice)	(0x8060 + (slice)*0x8)
+#define   GEN9_PGCTL_SSA_EU08_ACK	(1 << 0)
+#define   GEN9_PGCTL_SSA_EU19_ACK	(1 << 2)
+#define   GEN9_PGCTL_SSA_EU210_ACK	(1 << 4)
+#define   GEN9_PGCTL_SSA_EU311_ACK	(1 << 6)
+#define   GEN9_PGCTL_SSB_EU08_ACK	(1 << 8)
+#define   GEN9_PGCTL_SSB_EU19_ACK	(1 << 10)
+#define   GEN9_PGCTL_SSB_EU210_ACK	(1 << 12)
+#define   GEN9_PGCTL_SSB_EU311_ACK	(1 << 14)
+
+#define GEN7_MISCCPCTL			(0x9424)
+#define   GEN7_DOP_CLOCK_GATE_ENABLE		(1<<0)
+#define   GEN8_DOP_CLOCK_GATE_CFCLK_ENABLE	(1<<2)
+#define   GEN8_DOP_CLOCK_GATE_GUC_ENABLE	(1<<4)
+#define   GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE     (1<<6)
+
+#define GEN8_GARBCNTL                   0xB004
+#define   GEN9_GAPS_TSV_CREDIT_DISABLE  (1<<7)
+
+/* IVYBRIDGE DPF */
+#define GEN7_L3CDERRST1			0xB008 /* L3CD Error Status 1 */
+#define HSW_L3CDERRST11			0xB208 /* L3CD Error Status register 1 slice 1 */
+#define   GEN7_L3CDERRST1_ROW_MASK	(0x7ff<<14)
+#define   GEN7_PARITY_ERROR_VALID	(1<<13)
+#define   GEN7_L3CDERRST1_BANK_MASK	(3<<11)
+#define   GEN7_L3CDERRST1_SUBBANK_MASK	(7<<8)
+#define GEN7_PARITY_ERROR_ROW(reg) \
+		((reg & GEN7_L3CDERRST1_ROW_MASK) >> 14)
+#define GEN7_PARITY_ERROR_BANK(reg) \
+		((reg & GEN7_L3CDERRST1_BANK_MASK) >> 11)
+#define GEN7_PARITY_ERROR_SUBBANK(reg) \
+		((reg & GEN7_L3CDERRST1_SUBBANK_MASK) >> 8)
+#define   GEN7_L3CDERRST1_ENABLE	(1<<7)
+
+#define GEN7_L3LOG_BASE			0xB070
+#define HSW_L3LOG_BASE_SLICE1		0xB270
+#define GEN7_L3LOG_SIZE			0x80
+
+#define GEN7_HALF_SLICE_CHICKEN1	0xe100 /* IVB GT1 + VLV */
+#define GEN7_HALF_SLICE_CHICKEN1_GT2	0xf100
+#define   GEN7_MAX_PS_THREAD_DEP		(8<<12)
+#define   GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE	(1<<10)
+#define   GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE	(1<<4)
+#define   GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE	(1<<3)
+
+#define GEN9_HALF_SLICE_CHICKEN5	0xe188
+#define   GEN9_DG_MIRROR_FIX_ENABLE	(1<<5)
+#define   GEN9_CCS_TLB_PREFETCH_ENABLE	(1<<3)
+
+#define GEN8_ROW_CHICKEN		0xe4f0
+#define   PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE	(1<<8)
+#define   STALL_DOP_GATING_DISABLE		(1<<5)
+
+#define GEN7_ROW_CHICKEN2		0xe4f4
+#define GEN7_ROW_CHICKEN2_GT2		0xf4f4
+#define   DOP_CLOCK_GATING_DISABLE	(1<<0)
+
+#define HSW_ROW_CHICKEN3		0xe49c
+#define  HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE    (1 << 6)
+
+#define HALF_SLICE_CHICKEN2		0xe180
+#define   GEN8_ST_PO_DISABLE		(1<<13)
+
+#define HALF_SLICE_CHICKEN3		0xe184
+#define   HSW_SAMPLE_C_PERFORMANCE	(1<<9)
+#define   GEN8_CENTROID_PIXEL_OPT_DIS	(1<<8)
+#define   GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC	(1<<5)
+#define   GEN8_SAMPLER_POWER_BYPASS_DIS	(1<<1)
+
+#define GEN9_HALF_SLICE_CHICKEN7	0xe194
+#define   GEN9_ENABLE_YV12_BUGFIX	(1<<4)
+
+/* Audio */
+#define G4X_AUD_VID_DID			(dev_priv->info.display_mmio_offset + 0x62020)
+#define   INTEL_AUDIO_DEVCL		0x808629FB
+#define   INTEL_AUDIO_DEVBLC		0x80862801
+#define   INTEL_AUDIO_DEVCTG		0x80862802
+
+#define G4X_AUD_CNTL_ST			0x620B4
+#define   G4X_ELDV_DEVCL_DEVBLC		(1 << 13)
+#define   G4X_ELDV_DEVCTG		(1 << 14)
+#define   G4X_ELD_ADDR_MASK		(0xf << 5)
+#define   G4X_ELD_ACK			(1 << 4)
+#define G4X_HDMIW_HDMIEDID		0x6210C
+
+#define _IBX_HDMIW_HDMIEDID_A		0xE2050
+#define _IBX_HDMIW_HDMIEDID_B		0xE2150
+#define IBX_HDMIW_HDMIEDID(pipe) _PIPE(pipe, \
+					_IBX_HDMIW_HDMIEDID_A, \
+					_IBX_HDMIW_HDMIEDID_B)
+#define _IBX_AUD_CNTL_ST_A		0xE20B4
+#define _IBX_AUD_CNTL_ST_B		0xE21B4
+#define IBX_AUD_CNTL_ST(pipe) _PIPE(pipe, \
+					_IBX_AUD_CNTL_ST_A, \
+					_IBX_AUD_CNTL_ST_B)
+#define   IBX_ELD_BUFFER_SIZE_MASK	(0x1f << 10)
+#define   IBX_ELD_ADDRESS_MASK		(0x1f << 5)
+#define   IBX_ELD_ACK			(1 << 4)
+#define IBX_AUD_CNTL_ST2		0xE20C0
+#define   IBX_CP_READY(port)		((1 << 1) << (((port) - 1) * 4))
+#define   IBX_ELD_VALID(port)		((1 << 0) << (((port) - 1) * 4))
+
+#define _CPT_HDMIW_HDMIEDID_A		0xE5050
+#define _CPT_HDMIW_HDMIEDID_B		0xE5150
+#define CPT_HDMIW_HDMIEDID(pipe) _PIPE(pipe, \
+					_CPT_HDMIW_HDMIEDID_A, \
+					_CPT_HDMIW_HDMIEDID_B)
+#define _CPT_AUD_CNTL_ST_A		0xE50B4
+#define _CPT_AUD_CNTL_ST_B		0xE51B4
+#define CPT_AUD_CNTL_ST(pipe) _PIPE(pipe, \
+					_CPT_AUD_CNTL_ST_A, \
+					_CPT_AUD_CNTL_ST_B)
+#define CPT_AUD_CNTRL_ST2		0xE50C0
+
+#define _VLV_HDMIW_HDMIEDID_A		(VLV_DISPLAY_BASE + 0x62050)
+#define _VLV_HDMIW_HDMIEDID_B		(VLV_DISPLAY_BASE + 0x62150)
+#define VLV_HDMIW_HDMIEDID(pipe) _PIPE(pipe, \
+					_VLV_HDMIW_HDMIEDID_A, \
+					_VLV_HDMIW_HDMIEDID_B)
+#define _VLV_AUD_CNTL_ST_A		(VLV_DISPLAY_BASE + 0x620B4)
+#define _VLV_AUD_CNTL_ST_B		(VLV_DISPLAY_BASE + 0x621B4)
+#define VLV_AUD_CNTL_ST(pipe) _PIPE(pipe, \
+					_VLV_AUD_CNTL_ST_A, \
+					_VLV_AUD_CNTL_ST_B)
+#define VLV_AUD_CNTL_ST2		(VLV_DISPLAY_BASE + 0x620C0)
+
+/* These are the 4 32-bit write offset registers for each stream
+ * output buffer.  It determines the offset from the
+ * 3DSTATE_SO_BUFFERs that the next streamed vertex output goes to.
+ */
+#define GEN7_SO_WRITE_OFFSET(n)		(0x5280 + (n) * 4)
+
+#define _IBX_AUD_CONFIG_A		0xe2000
+#define _IBX_AUD_CONFIG_B		0xe2100
+#define IBX_AUD_CFG(pipe) _PIPE(pipe, \
+					_IBX_AUD_CONFIG_A, \
+					_IBX_AUD_CONFIG_B)
+#define _CPT_AUD_CONFIG_A		0xe5000
+#define _CPT_AUD_CONFIG_B		0xe5100
+#define CPT_AUD_CFG(pipe) _PIPE(pipe, \
+					_CPT_AUD_CONFIG_A, \
+					_CPT_AUD_CONFIG_B)
+#define _VLV_AUD_CONFIG_A		(VLV_DISPLAY_BASE + 0x62000)
+#define _VLV_AUD_CONFIG_B		(VLV_DISPLAY_BASE + 0x62100)
+#define VLV_AUD_CFG(pipe) _PIPE(pipe, \
+					_VLV_AUD_CONFIG_A, \
+					_VLV_AUD_CONFIG_B)
+
+#define   AUD_CONFIG_N_VALUE_INDEX		(1 << 29)
+#define   AUD_CONFIG_N_PROG_ENABLE		(1 << 28)
+#define   AUD_CONFIG_UPPER_N_SHIFT		20
+#define   AUD_CONFIG_UPPER_N_MASK		(0xff << 20)
+#define   AUD_CONFIG_LOWER_N_SHIFT		4
+#define   AUD_CONFIG_LOWER_N_MASK		(0xfff << 4)
+#define   AUD_CONFIG_PIXEL_CLOCK_HDMI_SHIFT	16
+#define   AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK	(0xf << 16)
+#define   AUD_CONFIG_PIXEL_CLOCK_HDMI_25175	(0 << 16)
+#define   AUD_CONFIG_PIXEL_CLOCK_HDMI_25200	(1 << 16)
+#define   AUD_CONFIG_PIXEL_CLOCK_HDMI_27000	(2 << 16)
+#define   AUD_CONFIG_PIXEL_CLOCK_HDMI_27027	(3 << 16)
+#define   AUD_CONFIG_PIXEL_CLOCK_HDMI_54000	(4 << 16)
+#define   AUD_CONFIG_PIXEL_CLOCK_HDMI_54054	(5 << 16)
+#define   AUD_CONFIG_PIXEL_CLOCK_HDMI_74176	(6 << 16)
+#define   AUD_CONFIG_PIXEL_CLOCK_HDMI_74250	(7 << 16)
+#define   AUD_CONFIG_PIXEL_CLOCK_HDMI_148352	(8 << 16)
+#define   AUD_CONFIG_PIXEL_CLOCK_HDMI_148500	(9 << 16)
+#define   AUD_CONFIG_DISABLE_NCTS		(1 << 3)
+
+/* HSW Audio */
+#define _HSW_AUD_CONFIG_A		0x65000
+#define _HSW_AUD_CONFIG_B		0x65100
+#define HSW_AUD_CFG(pipe) _PIPE(pipe, \
+					_HSW_AUD_CONFIG_A, \
+					_HSW_AUD_CONFIG_B)
+
+#define _HSW_AUD_MISC_CTRL_A		0x65010
+#define _HSW_AUD_MISC_CTRL_B		0x65110
+#define HSW_AUD_MISC_CTRL(pipe) _PIPE(pipe, \
+					_HSW_AUD_MISC_CTRL_A, \
+					_HSW_AUD_MISC_CTRL_B)
+
+#define _HSW_AUD_DIP_ELD_CTRL_ST_A	0x650b4
+#define _HSW_AUD_DIP_ELD_CTRL_ST_B	0x651b4
+#define HSW_AUD_DIP_ELD_CTRL(pipe) _PIPE(pipe, \
+					_HSW_AUD_DIP_ELD_CTRL_ST_A, \
+					_HSW_AUD_DIP_ELD_CTRL_ST_B)
+
+/* Audio Digital Converter */
+#define _HSW_AUD_DIG_CNVT_1		0x65080
+#define _HSW_AUD_DIG_CNVT_2		0x65180
+#define AUD_DIG_CNVT(pipe) _PIPE(pipe, \
+					_HSW_AUD_DIG_CNVT_1, \
+					_HSW_AUD_DIG_CNVT_2)
+#define DIP_PORT_SEL_MASK		0x3
+
+#define _HSW_AUD_EDID_DATA_A		0x65050
+#define _HSW_AUD_EDID_DATA_B		0x65150
+#define HSW_AUD_EDID_DATA(pipe) _PIPE(pipe, \
+					_HSW_AUD_EDID_DATA_A, \
+					_HSW_AUD_EDID_DATA_B)
+
+#define HSW_AUD_PIPE_CONV_CFG		0x6507c
+#define HSW_AUD_PIN_ELD_CP_VLD		0x650c0
+#define   AUDIO_INACTIVE(trans)		((1 << 3) << ((trans) * 4))
+#define   AUDIO_OUTPUT_ENABLE(trans)	((1 << 2) << ((trans) * 4))
+#define   AUDIO_CP_READY(trans)		((1 << 1) << ((trans) * 4))
+#define   AUDIO_ELD_VALID(trans)	((1 << 0) << ((trans) * 4))
+
+#define HSW_AUD_CHICKENBIT			0x65f10
+#define   SKL_AUD_CODEC_WAKE_SIGNAL		(1 << 15)
+
+/* HSW Power Wells */
+#define HSW_PWR_WELL_BIOS			0x45400 /* CTL1 */
+#define HSW_PWR_WELL_DRIVER			0x45404 /* CTL2 */
+#define HSW_PWR_WELL_KVMR			0x45408 /* CTL3 */
+#define HSW_PWR_WELL_DEBUG			0x4540C /* CTL4 */
+#define   HSW_PWR_WELL_ENABLE_REQUEST		(1<<31)
+#define   HSW_PWR_WELL_STATE_ENABLED		(1<<30)
+#define HSW_PWR_WELL_CTL5			0x45410
+#define   HSW_PWR_WELL_ENABLE_SINGLE_STEP	(1<<31)
+#define   HSW_PWR_WELL_PWR_GATE_OVERRIDE	(1<<20)
+#define   HSW_PWR_WELL_FORCE_ON			(1<<19)
+#define HSW_PWR_WELL_CTL6			0x45414
+
+/* SKL Fuse Status */
+#define SKL_FUSE_STATUS				0x42000
+#define  SKL_FUSE_DOWNLOAD_STATUS              (1<<31)
+#define  SKL_FUSE_PG0_DIST_STATUS              (1<<27)
+#define  SKL_FUSE_PG1_DIST_STATUS              (1<<26)
+#define  SKL_FUSE_PG2_DIST_STATUS              (1<<25)
+
+/* Per-pipe DDI Function Control */
+#define TRANS_DDI_FUNC_CTL_A		0x60400
+#define TRANS_DDI_FUNC_CTL_B		0x61400
+#define TRANS_DDI_FUNC_CTL_C		0x62400
+#define TRANS_DDI_FUNC_CTL_EDP		0x6F400
+#define TRANS_DDI_FUNC_CTL(tran) _TRANSCODER2(tran, TRANS_DDI_FUNC_CTL_A)
+
+#define  TRANS_DDI_FUNC_ENABLE		(1<<31)
+/* Those bits are ignored by pipe EDP since it can only connect to DDI A */
+#define  TRANS_DDI_PORT_MASK		(7<<28)
+#define  TRANS_DDI_PORT_SHIFT		28
+#define  TRANS_DDI_SELECT_PORT(x)	((x)<<28)
+#define  TRANS_DDI_PORT_NONE		(0<<28)
+#define  TRANS_DDI_MODE_SELECT_MASK	(7<<24)
+#define  TRANS_DDI_MODE_SELECT_HDMI	(0<<24)
+#define  TRANS_DDI_MODE_SELECT_DVI	(1<<24)
+#define  TRANS_DDI_MODE_SELECT_DP_SST	(2<<24)
+#define  TRANS_DDI_MODE_SELECT_DP_MST	(3<<24)
+#define  TRANS_DDI_MODE_SELECT_FDI	(4<<24)
+#define  TRANS_DDI_BPC_MASK		(7<<20)
+#define  TRANS_DDI_BPC_8		(0<<20)
+#define  TRANS_DDI_BPC_10		(1<<20)
+#define  TRANS_DDI_BPC_6		(2<<20)
+#define  TRANS_DDI_BPC_12		(3<<20)
+#define  TRANS_DDI_PVSYNC		(1<<17)
+#define  TRANS_DDI_PHSYNC		(1<<16)
+#define  TRANS_DDI_EDP_INPUT_MASK	(7<<12)
+#define  TRANS_DDI_EDP_INPUT_A_ON	(0<<12)
+#define  TRANS_DDI_EDP_INPUT_A_ONOFF	(4<<12)
+#define  TRANS_DDI_EDP_INPUT_B_ONOFF	(5<<12)
+#define  TRANS_DDI_EDP_INPUT_C_ONOFF	(6<<12)
+#define  TRANS_DDI_DP_VC_PAYLOAD_ALLOC	(1<<8)
+#define  TRANS_DDI_BFI_ENABLE		(1<<4)
+
+/* DisplayPort Transport Control */
+#define DP_TP_CTL_A			0x64040
+#define DP_TP_CTL_B			0x64140
+#define DP_TP_CTL(port) _PORT(port, DP_TP_CTL_A, DP_TP_CTL_B)
+#define  DP_TP_CTL_ENABLE			(1<<31)
+#define  DP_TP_CTL_MODE_SST			(0<<27)
+#define  DP_TP_CTL_MODE_MST			(1<<27)
+#define  DP_TP_CTL_FORCE_ACT			(1<<25)
+#define  DP_TP_CTL_ENHANCED_FRAME_ENABLE	(1<<18)
+#define  DP_TP_CTL_FDI_AUTOTRAIN		(1<<15)
+#define  DP_TP_CTL_LINK_TRAIN_MASK		(7<<8)
+#define  DP_TP_CTL_LINK_TRAIN_PAT1		(0<<8)
+#define  DP_TP_CTL_LINK_TRAIN_PAT2		(1<<8)
+#define  DP_TP_CTL_LINK_TRAIN_PAT3		(4<<8)
+#define  DP_TP_CTL_LINK_TRAIN_IDLE		(2<<8)
+#define  DP_TP_CTL_LINK_TRAIN_NORMAL		(3<<8)
+#define  DP_TP_CTL_SCRAMBLE_DISABLE		(1<<7)
+
+/* DisplayPort Transport Status */
+#define DP_TP_STATUS_A			0x64044
+#define DP_TP_STATUS_B			0x64144
+#define DP_TP_STATUS(port) _PORT(port, DP_TP_STATUS_A, DP_TP_STATUS_B)
+#define  DP_TP_STATUS_IDLE_DONE			(1<<25)
+#define  DP_TP_STATUS_ACT_SENT			(1<<24)
+#define  DP_TP_STATUS_MODE_STATUS_MST		(1<<23)
+#define  DP_TP_STATUS_AUTOTRAIN_DONE		(1<<12)
+#define  DP_TP_STATUS_PAYLOAD_MAPPING_VC2	(3 << 8)
+#define  DP_TP_STATUS_PAYLOAD_MAPPING_VC1	(3 << 4)
+#define  DP_TP_STATUS_PAYLOAD_MAPPING_VC0	(3 << 0)
+
+/* DDI Buffer Control */
+#define DDI_BUF_CTL_A				0x64000
+#define DDI_BUF_CTL_B				0x64100
+#define DDI_BUF_CTL(port) _PORT(port, DDI_BUF_CTL_A, DDI_BUF_CTL_B)
+#define  DDI_BUF_CTL_ENABLE			(1<<31)
+#define  DDI_BUF_TRANS_SELECT(n)	((n) << 24)
+#define  DDI_BUF_EMP_MASK			(0xf<<24)
+#define  DDI_BUF_PORT_REVERSAL			(1<<16)
+#define  DDI_BUF_IS_IDLE			(1<<7)
+#define  DDI_A_4_LANES				(1<<4)
+#define  DDI_PORT_WIDTH(width)			(((width) - 1) << 1)
+#define  DDI_PORT_WIDTH_MASK			(7 << 1)
+#define  DDI_PORT_WIDTH_SHIFT			1
+#define  DDI_INIT_DISPLAY_DETECTED		(1<<0)
+
+/* DDI Buffer Translations */
+#define DDI_BUF_TRANS_A				0x64E00
+#define DDI_BUF_TRANS_B				0x64E60
+#define DDI_BUF_TRANS_LO(port, i) (_PORT(port, DDI_BUF_TRANS_A, DDI_BUF_TRANS_B) + (i) * 8)
+#define DDI_BUF_TRANS_HI(port, i) (_PORT(port, DDI_BUF_TRANS_A, DDI_BUF_TRANS_B) + (i) * 8 + 4)
+
+/* Sideband Interface (SBI) is programmed indirectly, via
+ * SBI_ADDR, which contains the register offset; and SBI_DATA,
+ * which contains the payload */
+#define SBI_ADDR			0xC6000
+#define SBI_DATA			0xC6004
+#define SBI_CTL_STAT			0xC6008
+#define  SBI_CTL_DEST_ICLK		(0x0<<16)
+#define  SBI_CTL_DEST_MPHY		(0x1<<16)
+#define  SBI_CTL_OP_IORD		(0x2<<8)
+#define  SBI_CTL_OP_IOWR		(0x3<<8)
+#define  SBI_CTL_OP_CRRD		(0x6<<8)
+#define  SBI_CTL_OP_CRWR		(0x7<<8)
+#define  SBI_RESPONSE_FAIL		(0x1<<1)
+#define  SBI_RESPONSE_SUCCESS		(0x0<<1)
+#define  SBI_BUSY			(0x1<<0)
+#define  SBI_READY			(0x0<<0)
+
+/* SBI offsets */
+#define  SBI_SSCDIVINTPHASE6			0x0600
+#define   SBI_SSCDIVINTPHASE_DIVSEL_MASK	((0x7f)<<1)
+#define   SBI_SSCDIVINTPHASE_DIVSEL(x)		((x)<<1)
+#define   SBI_SSCDIVINTPHASE_INCVAL_MASK	((0x7f)<<8)
+#define   SBI_SSCDIVINTPHASE_INCVAL(x)		((x)<<8)
+#define   SBI_SSCDIVINTPHASE_DIR(x)		((x)<<15)
+#define   SBI_SSCDIVINTPHASE_PROPAGATE		(1<<0)
+#define  SBI_SSCCTL				0x020c
+#define  SBI_SSCCTL6				0x060C
+#define   SBI_SSCCTL_PATHALT			(1<<3)
+#define   SBI_SSCCTL_DISABLE			(1<<0)
+#define  SBI_SSCAUXDIV6				0x0610
+#define   SBI_SSCAUXDIV_FINALDIV2SEL(x)		((x)<<4)
+#define  SBI_DBUFF0				0x2a00
+#define  SBI_GEN0				0x1f00
+#define   SBI_GEN0_CFG_BUFFENABLE_DISABLE	(1<<0)
+
+/* LPT PIXCLK_GATE */
+#define PIXCLK_GATE			0xC6020
+#define  PIXCLK_GATE_UNGATE		(1<<0)
+#define  PIXCLK_GATE_GATE		(0<<0)
+
+/* SPLL */
+#define SPLL_CTL			0x46020
+#define  SPLL_PLL_ENABLE		(1<<31)
+#define  SPLL_PLL_SSC			(1<<28)
+#define  SPLL_PLL_NON_SSC		(2<<28)
+#define  SPLL_PLL_LCPLL			(3<<28)
+#define  SPLL_PLL_REF_MASK		(3<<28)
+#define  SPLL_PLL_FREQ_810MHz		(0<<26)
+#define  SPLL_PLL_FREQ_1350MHz		(1<<26)
+#define  SPLL_PLL_FREQ_2700MHz		(2<<26)
+#define  SPLL_PLL_FREQ_MASK		(3<<26)
+
+/* WRPLL */
+#define WRPLL_CTL1			0x46040
+#define WRPLL_CTL2			0x46060
+#define WRPLL_CTL(pll)			(pll == 0 ? WRPLL_CTL1 : WRPLL_CTL2)
+#define  WRPLL_PLL_ENABLE		(1<<31)
+#define  WRPLL_PLL_SSC			(1<<28)
+#define  WRPLL_PLL_NON_SSC		(2<<28)
+#define  WRPLL_PLL_LCPLL		(3<<28)
+#define  WRPLL_PLL_REF_MASK		(3<<28)
+/* WRPLL divider programming */
+#define  WRPLL_DIVIDER_REFERENCE(x)	((x)<<0)
+#define  WRPLL_DIVIDER_REF_MASK		(0xff)
+#define  WRPLL_DIVIDER_POST(x)		((x)<<8)
+#define  WRPLL_DIVIDER_POST_MASK	(0x3f<<8)
+#define  WRPLL_DIVIDER_POST_SHIFT	8
+#define  WRPLL_DIVIDER_FEEDBACK(x)	((x)<<16)
+#define  WRPLL_DIVIDER_FB_SHIFT		16
+#define  WRPLL_DIVIDER_FB_MASK		(0xff<<16)
+
+/* Port clock selection */
+#define PORT_CLK_SEL_A			0x46100
+#define PORT_CLK_SEL_B			0x46104
+#define PORT_CLK_SEL(port) _PORT(port, PORT_CLK_SEL_A, PORT_CLK_SEL_B)
+#define  PORT_CLK_SEL_LCPLL_2700	(0<<29)
+#define  PORT_CLK_SEL_LCPLL_1350	(1<<29)
+#define  PORT_CLK_SEL_LCPLL_810		(2<<29)
+#define  PORT_CLK_SEL_SPLL		(3<<29)
+#define  PORT_CLK_SEL_WRPLL(pll)	(((pll)+4)<<29)
+#define  PORT_CLK_SEL_WRPLL1		(4<<29)
+#define  PORT_CLK_SEL_WRPLL2		(5<<29)
+#define  PORT_CLK_SEL_NONE		(7<<29)
+#define  PORT_CLK_SEL_MASK		(7<<29)
+
+/* Transcoder clock selection */
+#define TRANS_CLK_SEL_A			0x46140
+#define TRANS_CLK_SEL_B			0x46144
+#define TRANS_CLK_SEL(tran) _TRANSCODER(tran, TRANS_CLK_SEL_A, TRANS_CLK_SEL_B)
+/* For each transcoder, we need to select the corresponding port clock */
+#define  TRANS_CLK_SEL_DISABLED		(0x0<<29)
+#define  TRANS_CLK_SEL_PORT(x)		(((x)+1)<<29)
+
+#define CDCLK_FREQ			0x46200
+
+#define TRANSA_MSA_MISC			0x60410
+#define TRANSB_MSA_MISC			0x61410
+#define TRANSC_MSA_MISC			0x62410
+#define TRANS_EDP_MSA_MISC		0x6f410
+#define TRANS_MSA_MISC(tran) _TRANSCODER2(tran, TRANSA_MSA_MISC)
+
+#define  TRANS_MSA_SYNC_CLK		(1<<0)
+#define  TRANS_MSA_6_BPC		(0<<5)
+#define  TRANS_MSA_8_BPC		(1<<5)
+#define  TRANS_MSA_10_BPC		(2<<5)
+#define  TRANS_MSA_12_BPC		(3<<5)
+#define  TRANS_MSA_16_BPC		(4<<5)
+
+/* LCPLL Control */
+#define LCPLL_CTL			0x130040
+#define  LCPLL_PLL_DISABLE		(1<<31)
+#define  LCPLL_PLL_LOCK			(1<<30)
+#define  LCPLL_CLK_FREQ_MASK		(3<<26)
+#define  LCPLL_CLK_FREQ_450		(0<<26)
+#define  LCPLL_CLK_FREQ_54O_BDW		(1<<26)
+#define  LCPLL_CLK_FREQ_337_5_BDW	(2<<26)
+#define  LCPLL_CLK_FREQ_675_BDW		(3<<26)
+#define  LCPLL_CD_CLOCK_DISABLE		(1<<25)
+#define  LCPLL_ROOT_CD_CLOCK_DISABLE	(1<<24)
+#define  LCPLL_CD2X_CLOCK_DISABLE	(1<<23)
+#define  LCPLL_POWER_DOWN_ALLOW		(1<<22)
+#define  LCPLL_CD_SOURCE_FCLK		(1<<21)
+#define  LCPLL_CD_SOURCE_FCLK_DONE	(1<<19)
+
+/*
+ * SKL Clocks
+ */
+
+/* CDCLK_CTL */
+#define CDCLK_CTL			0x46000
+#define  CDCLK_FREQ_SEL_MASK		(3<<26)
+#define  CDCLK_FREQ_450_432		(0<<26)
+#define  CDCLK_FREQ_540			(1<<26)
+#define  CDCLK_FREQ_337_308		(2<<26)
+#define  CDCLK_FREQ_675_617		(3<<26)
+#define  CDCLK_FREQ_DECIMAL_MASK	(0x7ff)
+
+#define  BXT_CDCLK_CD2X_DIV_SEL_MASK	(3<<22)
+#define  BXT_CDCLK_CD2X_DIV_SEL_1	(0<<22)
+#define  BXT_CDCLK_CD2X_DIV_SEL_1_5	(1<<22)
+#define  BXT_CDCLK_CD2X_DIV_SEL_2	(2<<22)
+#define  BXT_CDCLK_CD2X_DIV_SEL_4	(3<<22)
+#define  BXT_CDCLK_SSA_PRECHARGE_ENABLE	(1<<16)
+
+/* LCPLL_CTL */
+#define LCPLL1_CTL		0x46010
+#define LCPLL2_CTL		0x46014
+#define  LCPLL_PLL_ENABLE	(1<<31)
+
+/* DPLL control1 */
+#define DPLL_CTRL1		0x6C058
+#define  DPLL_CTRL1_HDMI_MODE(id)		(1<<((id)*6+5))
+#define  DPLL_CTRL1_SSC(id)			(1<<((id)*6+4))
+#define  DPLL_CTRL1_LINK_RATE_MASK(id)		(7<<((id)*6+1))
+#define  DPLL_CTRL1_LINK_RATE_SHIFT(id)		((id)*6+1)
+#define  DPLL_CTRL1_LINK_RATE(linkrate, id)	((linkrate)<<((id)*6+1))
+#define  DPLL_CTRL1_OVERRIDE(id)		(1<<((id)*6))
+#define  DPLL_CTRL1_LINK_RATE_2700		0
+#define  DPLL_CTRL1_LINK_RATE_1350		1
+#define  DPLL_CTRL1_LINK_RATE_810		2
+#define  DPLL_CTRL1_LINK_RATE_1620		3
+#define  DPLL_CTRL1_LINK_RATE_1080		4
+#define  DPLL_CTRL1_LINK_RATE_2160		5
+
+/* DPLL control2 */
+#define DPLL_CTRL2				0x6C05C
+#define  DPLL_CTRL2_DDI_CLK_OFF(port)		(1<<((port)+15))
+#define  DPLL_CTRL2_DDI_CLK_SEL_MASK(port)	(3<<((port)*3+1))
+#define  DPLL_CTRL2_DDI_CLK_SEL_SHIFT(port)    ((port)*3+1)
+#define  DPLL_CTRL2_DDI_CLK_SEL(clk, port)	((clk)<<((port)*3+1))
+#define  DPLL_CTRL2_DDI_SEL_OVERRIDE(port)     (1<<((port)*3))
+
+/* DPLL Status */
+#define DPLL_STATUS	0x6C060
+#define  DPLL_LOCK(id) (1<<((id)*8))
+
+/* DPLL cfg */
+#define DPLL1_CFGCR1	0x6C040
+#define DPLL2_CFGCR1	0x6C048
+#define DPLL3_CFGCR1	0x6C050
+#define  DPLL_CFGCR1_FREQ_ENABLE	(1<<31)
+#define  DPLL_CFGCR1_DCO_FRACTION_MASK	(0x7fff<<9)
+#define  DPLL_CFGCR1_DCO_FRACTION(x)	((x)<<9)
+#define  DPLL_CFGCR1_DCO_INTEGER_MASK	(0x1ff)
+
+#define DPLL1_CFGCR2	0x6C044
+#define DPLL2_CFGCR2	0x6C04C
+#define DPLL3_CFGCR2	0x6C054
+#define  DPLL_CFGCR2_QDIV_RATIO_MASK	(0xff<<8)
+#define  DPLL_CFGCR2_QDIV_RATIO(x)	((x)<<8)
+#define  DPLL_CFGCR2_QDIV_MODE(x)	((x)<<7)
+#define  DPLL_CFGCR2_KDIV_MASK		(3<<5)
+#define  DPLL_CFGCR2_KDIV(x)		((x)<<5)
+#define  DPLL_CFGCR2_KDIV_5 (0<<5)
+#define  DPLL_CFGCR2_KDIV_2 (1<<5)
+#define  DPLL_CFGCR2_KDIV_3 (2<<5)
+#define  DPLL_CFGCR2_KDIV_1 (3<<5)
+#define  DPLL_CFGCR2_PDIV_MASK		(7<<2)
+#define  DPLL_CFGCR2_PDIV(x)		((x)<<2)
+#define  DPLL_CFGCR2_PDIV_1 (0<<2)
+#define  DPLL_CFGCR2_PDIV_2 (1<<2)
+#define  DPLL_CFGCR2_PDIV_3 (2<<2)
+#define  DPLL_CFGCR2_PDIV_7 (4<<2)
+#define  DPLL_CFGCR2_CENTRAL_FREQ_MASK	(3)
+
+#define DPLL_CFGCR1(id) (DPLL1_CFGCR1 + ((id) - SKL_DPLL1) * 8)
+#define DPLL_CFGCR2(id) (DPLL1_CFGCR2 + ((id) - SKL_DPLL1) * 8)
+
+/* BXT display engine PLL */
+#define BXT_DE_PLL_CTL			0x6d000
+#define   BXT_DE_PLL_RATIO(x)		(x)	/* {60,65,100} * 19.2MHz */
+#define   BXT_DE_PLL_RATIO_MASK		0xff
+
+#define BXT_DE_PLL_ENABLE		0x46070
+#define   BXT_DE_PLL_PLL_ENABLE		(1 << 31)
+#define   BXT_DE_PLL_LOCK		(1 << 30)
+
+/* GEN9 DC */
+#define DC_STATE_EN			0x45504
+#define  DC_STATE_EN_UPTO_DC5		(1<<0)
+#define  DC_STATE_EN_DC9		(1<<3)
+#define  DC_STATE_EN_UPTO_DC6		(2<<0)
+#define  DC_STATE_EN_UPTO_DC5_DC6_MASK   0x3
+
+#define  DC_STATE_DEBUG                  0x45520
+#define  DC_STATE_DEBUG_MASK_MEMORY_UP	(1<<1)
+
+/* Please see hsw_read_dcomp() and hsw_write_dcomp() before using this register,
+ * since on HSW we can't write to it using I915_WRITE. */
+#define D_COMP_HSW			(MCHBAR_MIRROR_BASE_SNB + 0x5F0C)
+#define D_COMP_BDW			0x138144
+#define  D_COMP_RCOMP_IN_PROGRESS	(1<<9)
+#define  D_COMP_COMP_FORCE		(1<<8)
+#define  D_COMP_COMP_DISABLE		(1<<0)
+
+/* Pipe WM_LINETIME - watermark line time */
+#define PIPE_WM_LINETIME_A		0x45270
+#define PIPE_WM_LINETIME_B		0x45274
+#define PIPE_WM_LINETIME(pipe) _PIPE(pipe, PIPE_WM_LINETIME_A, \
+					   PIPE_WM_LINETIME_B)
+#define   PIPE_WM_LINETIME_MASK			(0x1ff)
+#define   PIPE_WM_LINETIME_TIME(x)		((x))
+#define   PIPE_WM_LINETIME_IPS_LINETIME_MASK	(0x1ff<<16)
+#define   PIPE_WM_LINETIME_IPS_LINETIME(x)	((x)<<16)
+
+/* SFUSE_STRAP */
+#define SFUSE_STRAP			0xc2014
+#define  SFUSE_STRAP_FUSE_LOCK		(1<<13)
+#define  SFUSE_STRAP_DISPLAY_DISABLED	(1<<7)
+#define  SFUSE_STRAP_DDIB_DETECTED	(1<<2)
+#define  SFUSE_STRAP_DDIC_DETECTED	(1<<1)
+#define  SFUSE_STRAP_DDID_DETECTED	(1<<0)
+
+#define WM_MISC				0x45260
+#define  WM_MISC_DATA_PARTITION_5_6	(1 << 0)
+
+#define WM_DBG				0x45280
+#define  WM_DBG_DISALLOW_MULTIPLE_LP	(1<<0)
+#define  WM_DBG_DISALLOW_MAXFIFO	(1<<1)
+#define  WM_DBG_DISALLOW_SPRITE		(1<<2)
+
+/* pipe CSC */
+#define _PIPE_A_CSC_COEFF_RY_GY	0x49010
+#define _PIPE_A_CSC_COEFF_BY	0x49014
+#define _PIPE_A_CSC_COEFF_RU_GU	0x49018
+#define _PIPE_A_CSC_COEFF_BU	0x4901c
+#define _PIPE_A_CSC_COEFF_RV_GV	0x49020
+#define _PIPE_A_CSC_COEFF_BV	0x49024
+#define _PIPE_A_CSC_MODE	0x49028
+#define   CSC_BLACK_SCREEN_OFFSET	(1 << 2)
+#define   CSC_POSITION_BEFORE_GAMMA	(1 << 1)
+#define   CSC_MODE_YUV_TO_RGB		(1 << 0)
+#define _PIPE_A_CSC_PREOFF_HI	0x49030
+#define _PIPE_A_CSC_PREOFF_ME	0x49034
+#define _PIPE_A_CSC_PREOFF_LO	0x49038
+#define _PIPE_A_CSC_POSTOFF_HI	0x49040
+#define _PIPE_A_CSC_POSTOFF_ME	0x49044
+#define _PIPE_A_CSC_POSTOFF_LO	0x49048
+
+#define _PIPE_B_CSC_COEFF_RY_GY	0x49110
+#define _PIPE_B_CSC_COEFF_BY	0x49114
+#define _PIPE_B_CSC_COEFF_RU_GU	0x49118
+#define _PIPE_B_CSC_COEFF_BU	0x4911c
+#define _PIPE_B_CSC_COEFF_RV_GV	0x49120
+#define _PIPE_B_CSC_COEFF_BV	0x49124
+#define _PIPE_B_CSC_MODE	0x49128
+#define _PIPE_B_CSC_PREOFF_HI	0x49130
+#define _PIPE_B_CSC_PREOFF_ME	0x49134
+#define _PIPE_B_CSC_PREOFF_LO	0x49138
+#define _PIPE_B_CSC_POSTOFF_HI	0x49140
+#define _PIPE_B_CSC_POSTOFF_ME	0x49144
+#define _PIPE_B_CSC_POSTOFF_LO	0x49148
+
+#define PIPE_CSC_COEFF_RY_GY(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_RY_GY, _PIPE_B_CSC_COEFF_RY_GY)
+#define PIPE_CSC_COEFF_BY(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_BY, _PIPE_B_CSC_COEFF_BY)
+#define PIPE_CSC_COEFF_RU_GU(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_RU_GU, _PIPE_B_CSC_COEFF_RU_GU)
+#define PIPE_CSC_COEFF_BU(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_BU, _PIPE_B_CSC_COEFF_BU)
+#define PIPE_CSC_COEFF_RV_GV(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_RV_GV, _PIPE_B_CSC_COEFF_RV_GV)
+#define PIPE_CSC_COEFF_BV(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_BV, _PIPE_B_CSC_COEFF_BV)
+#define PIPE_CSC_MODE(pipe) _PIPE(pipe, _PIPE_A_CSC_MODE, _PIPE_B_CSC_MODE)
+#define PIPE_CSC_PREOFF_HI(pipe) _PIPE(pipe, _PIPE_A_CSC_PREOFF_HI, _PIPE_B_CSC_PREOFF_HI)
+#define PIPE_CSC_PREOFF_ME(pipe) _PIPE(pipe, _PIPE_A_CSC_PREOFF_ME, _PIPE_B_CSC_PREOFF_ME)
+#define PIPE_CSC_PREOFF_LO(pipe) _PIPE(pipe, _PIPE_A_CSC_PREOFF_LO, _PIPE_B_CSC_PREOFF_LO)
+#define PIPE_CSC_POSTOFF_HI(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_HI, _PIPE_B_CSC_POSTOFF_HI)
+#define PIPE_CSC_POSTOFF_ME(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_ME, _PIPE_B_CSC_POSTOFF_ME)
+#define PIPE_CSC_POSTOFF_LO(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_LO, _PIPE_B_CSC_POSTOFF_LO)
+
+/* MIPI DSI registers */
+
+#define _MIPI_PORT(port, a, c)	_PORT3(port, a, 0, c)	/* ports A and C only */
+
+/* BXT MIPI clock controls */
+#define BXT_MAX_VAR_OUTPUT_KHZ			39500
+
+#define BXT_MIPI_CLOCK_CTL			0x46090
+#define  BXT_MIPI1_DIV_SHIFT			26
+#define  BXT_MIPI2_DIV_SHIFT			10
+#define  BXT_MIPI_DIV_SHIFT(port)		\
+			_MIPI_PORT(port, BXT_MIPI1_DIV_SHIFT, \
+					BXT_MIPI2_DIV_SHIFT)
+/* Var clock divider to generate TX source. Result must be < 39.5 M */
+#define  BXT_MIPI1_ESCLK_VAR_DIV_MASK		(0x3F << 26)
+#define  BXT_MIPI2_ESCLK_VAR_DIV_MASK		(0x3F << 10)
+#define  BXT_MIPI_ESCLK_VAR_DIV_MASK(port)	\
+			_MIPI_PORT(port, BXT_MIPI1_ESCLK_VAR_DIV_MASK, \
+						BXT_MIPI2_ESCLK_VAR_DIV_MASK)
+
+#define  BXT_MIPI_ESCLK_VAR_DIV(port, val)	\
+			(val << BXT_MIPI_DIV_SHIFT(port))
+/* TX control divider to select actual TX clock output from (8x/var) */
+#define  BXT_MIPI1_TX_ESCLK_SHIFT		21
+#define  BXT_MIPI2_TX_ESCLK_SHIFT		5
+#define  BXT_MIPI_TX_ESCLK_SHIFT(port)		\
+			_MIPI_PORT(port, BXT_MIPI1_TX_ESCLK_SHIFT, \
+					BXT_MIPI2_TX_ESCLK_SHIFT)
+#define  BXT_MIPI1_TX_ESCLK_FIXDIV_MASK		(3 << 21)
+#define  BXT_MIPI2_TX_ESCLK_FIXDIV_MASK		(3 << 5)
+#define  BXT_MIPI_TX_ESCLK_FIXDIV_MASK(port)	\
+			_MIPI_PORT(port, BXT_MIPI1_TX_ESCLK_FIXDIV_MASK, \
+						BXT_MIPI2_TX_ESCLK_FIXDIV_MASK)
+#define  BXT_MIPI_TX_ESCLK_8XDIV_BY2(port)	\
+		(0x0 << BXT_MIPI_TX_ESCLK_SHIFT(port))
+#define  BXT_MIPI_TX_ESCLK_8XDIV_BY4(port)	\
+		(0x1 << BXT_MIPI_TX_ESCLK_SHIFT(port))
+#define  BXT_MIPI_TX_ESCLK_8XDIV_BY8(port)	\
+		(0x2 << BXT_MIPI_TX_ESCLK_SHIFT(port))
+/* RX control divider to select actual RX clock output from 8x*/
+#define  BXT_MIPI1_RX_ESCLK_SHIFT		19
+#define  BXT_MIPI2_RX_ESCLK_SHIFT		3
+#define  BXT_MIPI_RX_ESCLK_SHIFT(port)		\
+			_MIPI_PORT(port, BXT_MIPI1_RX_ESCLK_SHIFT, \
+					BXT_MIPI2_RX_ESCLK_SHIFT)
+#define  BXT_MIPI1_RX_ESCLK_FIXDIV_MASK		(3 << 19)
+#define  BXT_MIPI2_RX_ESCLK_FIXDIV_MASK		(3 << 3)
+#define  BXT_MIPI_RX_ESCLK_FIXDIV_MASK(port)	\
+		(3 << BXT_MIPI_RX_ESCLK_SHIFT(port))
+#define  BXT_MIPI_RX_ESCLK_8X_BY2(port)	\
+		(1 << BXT_MIPI_RX_ESCLK_SHIFT(port))
+#define  BXT_MIPI_RX_ESCLK_8X_BY3(port)	\
+		(2 << BXT_MIPI_RX_ESCLK_SHIFT(port))
+#define  BXT_MIPI_RX_ESCLK_8X_BY4(port)	\
+		(3 << BXT_MIPI_RX_ESCLK_SHIFT(port))
+/* BXT-A WA: Always prog DPHY dividers to 00 */
+#define  BXT_MIPI1_DPHY_DIV_SHIFT		16
+#define  BXT_MIPI2_DPHY_DIV_SHIFT		0
+#define  BXT_MIPI_DPHY_DIV_SHIFT(port)		\
+			_MIPI_PORT(port, BXT_MIPI1_DPHY_DIV_SHIFT, \
+					BXT_MIPI2_DPHY_DIV_SHIFT)
+#define  BXT_MIPI_1_DPHY_DIVIDER_MASK		(3 << 16)
+#define  BXT_MIPI_2_DPHY_DIVIDER_MASK		(3 << 0)
+#define  BXT_MIPI_DPHY_DIVIDER_MASK(port)	\
+		(3 << BXT_MIPI_DPHY_DIV_SHIFT(port))
+
+/* BXT MIPI mode configure */
+#define  _BXT_MIPIA_TRANS_HACTIVE			0x6B0F8
+#define  _BXT_MIPIC_TRANS_HACTIVE			0x6B8F8
+#define  BXT_MIPI_TRANS_HACTIVE(tc)	_MIPI_PORT(tc, \
+		_BXT_MIPIA_TRANS_HACTIVE, _BXT_MIPIC_TRANS_HACTIVE)
+
+#define  _BXT_MIPIA_TRANS_VACTIVE			0x6B0FC
+#define  _BXT_MIPIC_TRANS_VACTIVE			0x6B8FC
+#define  BXT_MIPI_TRANS_VACTIVE(tc)	_MIPI_PORT(tc, \
+		_BXT_MIPIA_TRANS_VACTIVE, _BXT_MIPIC_TRANS_VACTIVE)
+
+#define  _BXT_MIPIA_TRANS_VTOTAL			0x6B100
+#define  _BXT_MIPIC_TRANS_VTOTAL			0x6B900
+#define  BXT_MIPI_TRANS_VTOTAL(tc)	_MIPI_PORT(tc, \
+		_BXT_MIPIA_TRANS_VTOTAL, _BXT_MIPIC_TRANS_VTOTAL)
+
+#define BXT_DSI_PLL_CTL			0x161000
+#define  BXT_DSI_PLL_PVD_RATIO_SHIFT	16
+#define  BXT_DSI_PLL_PVD_RATIO_MASK	(3 << BXT_DSI_PLL_PVD_RATIO_SHIFT)
+#define  BXT_DSI_PLL_PVD_RATIO_1	(1 << BXT_DSI_PLL_PVD_RATIO_SHIFT)
+#define  BXT_DSIC_16X_BY2		(1 << 10)
+#define  BXT_DSIC_16X_BY3		(2 << 10)
+#define  BXT_DSIC_16X_BY4		(3 << 10)
+#define  BXT_DSIA_16X_BY2		(1 << 8)
+#define  BXT_DSIA_16X_BY3		(2 << 8)
+#define  BXT_DSIA_16X_BY4		(3 << 8)
+#define  BXT_DSI_FREQ_SEL_SHIFT		8
+#define  BXT_DSI_FREQ_SEL_MASK		(0xF << BXT_DSI_FREQ_SEL_SHIFT)
+
+#define BXT_DSI_PLL_RATIO_MAX		0x7D
+#define BXT_DSI_PLL_RATIO_MIN		0x22
+#define BXT_DSI_PLL_RATIO_MASK		0xFF
+#define BXT_REF_CLOCK_KHZ		19500
+
+#define BXT_DSI_PLL_ENABLE		0x46080
+#define  BXT_DSI_PLL_DO_ENABLE		(1 << 31)
+#define  BXT_DSI_PLL_LOCKED		(1 << 30)
+
+#define _MIPIA_PORT_CTRL			(VLV_DISPLAY_BASE + 0x61190)
+#define _MIPIC_PORT_CTRL			(VLV_DISPLAY_BASE + 0x61700)
+#define MIPI_PORT_CTRL(port)	_MIPI_PORT(port, _MIPIA_PORT_CTRL, _MIPIC_PORT_CTRL)
+
+ /* BXT port control */
+#define _BXT_MIPIA_PORT_CTRL				0x6B0C0
+#define _BXT_MIPIC_PORT_CTRL				0x6B8C0
+#define BXT_MIPI_PORT_CTRL(tc)	_MIPI_PORT(tc, _BXT_MIPIA_PORT_CTRL, \
+						_BXT_MIPIC_PORT_CTRL)
+
+#define  DPI_ENABLE					(1 << 31) /* A + C */
+#define  MIPIA_MIPI4DPHY_DELAY_COUNT_SHIFT		27
+#define  MIPIA_MIPI4DPHY_DELAY_COUNT_MASK		(0xf << 27)
+#define  DUAL_LINK_MODE_SHIFT				26
+#define  DUAL_LINK_MODE_MASK				(1 << 26)
+#define  DUAL_LINK_MODE_FRONT_BACK			(0 << 26)
+#define  DUAL_LINK_MODE_PIXEL_ALTERNATIVE		(1 << 26)
+#define  DITHERING_ENABLE				(1 << 25) /* A + C */
+#define  FLOPPED_HSTX					(1 << 23)
+#define  DE_INVERT					(1 << 19) /* XXX */
+#define  MIPIA_FLISDSI_DELAY_COUNT_SHIFT		18
+#define  MIPIA_FLISDSI_DELAY_COUNT_MASK			(0xf << 18)
+#define  AFE_LATCHOUT					(1 << 17)
+#define  LP_OUTPUT_HOLD					(1 << 16)
+#define  MIPIC_FLISDSI_DELAY_COUNT_HIGH_SHIFT		15
+#define  MIPIC_FLISDSI_DELAY_COUNT_HIGH_MASK		(1 << 15)
+#define  MIPIC_MIPI4DPHY_DELAY_COUNT_SHIFT		11
+#define  MIPIC_MIPI4DPHY_DELAY_COUNT_MASK		(0xf << 11)
+#define  CSB_SHIFT					9
+#define  CSB_MASK					(3 << 9)
+#define  CSB_20MHZ					(0 << 9)
+#define  CSB_10MHZ					(1 << 9)
+#define  CSB_40MHZ					(2 << 9)
+#define  BANDGAP_MASK					(1 << 8)
+#define  BANDGAP_PNW_CIRCUIT				(0 << 8)
+#define  BANDGAP_LNC_CIRCUIT				(1 << 8)
+#define  MIPIC_FLISDSI_DELAY_COUNT_LOW_SHIFT		5
+#define  MIPIC_FLISDSI_DELAY_COUNT_LOW_MASK		(7 << 5)
+#define  TEARING_EFFECT_DELAY				(1 << 4) /* A + C */
+#define  TEARING_EFFECT_SHIFT				2 /* A + C */
+#define  TEARING_EFFECT_MASK				(3 << 2)
+#define  TEARING_EFFECT_OFF				(0 << 2)
+#define  TEARING_EFFECT_DSI				(1 << 2)
+#define  TEARING_EFFECT_GPIO				(2 << 2)
+#define  LANE_CONFIGURATION_SHIFT			0
+#define  LANE_CONFIGURATION_MASK			(3 << 0)
+#define  LANE_CONFIGURATION_4LANE			(0 << 0)
+#define  LANE_CONFIGURATION_DUAL_LINK_A			(1 << 0)
+#define  LANE_CONFIGURATION_DUAL_LINK_B			(2 << 0)
+
+#define _MIPIA_TEARING_CTRL			(VLV_DISPLAY_BASE + 0x61194)
+#define _MIPIC_TEARING_CTRL			(VLV_DISPLAY_BASE + 0x61704)
+#define MIPI_TEARING_CTRL(port)			_MIPI_PORT(port, \
+				_MIPIA_TEARING_CTRL, _MIPIC_TEARING_CTRL)
+#define  TEARING_EFFECT_DELAY_SHIFT			0
+#define  TEARING_EFFECT_DELAY_MASK			(0xffff << 0)
+
+/* XXX: all bits reserved */
+#define _MIPIA_AUTOPWG			(VLV_DISPLAY_BASE + 0x611a0)
+
+/* MIPI DSI Controller and D-PHY registers */
+
+#define _MIPIA_DEVICE_READY		(dev_priv->mipi_mmio_base + 0xb000)
+#define _MIPIC_DEVICE_READY		(dev_priv->mipi_mmio_base + 0xb800)
+#define MIPI_DEVICE_READY(port)		_MIPI_PORT(port, _MIPIA_DEVICE_READY, \
+						_MIPIC_DEVICE_READY)
+#define  BUS_POSSESSION					(1 << 3) /* set to give bus to receiver */
+#define  ULPS_STATE_MASK				(3 << 1)
+#define  ULPS_STATE_ENTER				(2 << 1)
+#define  ULPS_STATE_EXIT				(1 << 1)
+#define  ULPS_STATE_NORMAL_OPERATION			(0 << 1)
+#define  DEVICE_READY					(1 << 0)
+
+#define _MIPIA_INTR_STAT		(dev_priv->mipi_mmio_base + 0xb004)
+#define _MIPIC_INTR_STAT		(dev_priv->mipi_mmio_base + 0xb804)
+#define MIPI_INTR_STAT(port)		_MIPI_PORT(port, _MIPIA_INTR_STAT, \
+					_MIPIC_INTR_STAT)
+#define _MIPIA_INTR_EN			(dev_priv->mipi_mmio_base + 0xb008)
+#define _MIPIC_INTR_EN			(dev_priv->mipi_mmio_base + 0xb808)
+#define MIPI_INTR_EN(port)		_MIPI_PORT(port, _MIPIA_INTR_EN, \
+					_MIPIC_INTR_EN)
+#define  TEARING_EFFECT					(1 << 31)
+#define  SPL_PKT_SENT_INTERRUPT				(1 << 30)
+#define  GEN_READ_DATA_AVAIL				(1 << 29)
+#define  LP_GENERIC_WR_FIFO_FULL			(1 << 28)
+#define  HS_GENERIC_WR_FIFO_FULL			(1 << 27)
+#define  RX_PROT_VIOLATION				(1 << 26)
+#define  RX_INVALID_TX_LENGTH				(1 << 25)
+#define  ACK_WITH_NO_ERROR				(1 << 24)
+#define  TURN_AROUND_ACK_TIMEOUT			(1 << 23)
+#define  LP_RX_TIMEOUT					(1 << 22)
+#define  HS_TX_TIMEOUT					(1 << 21)
+#define  DPI_FIFO_UNDERRUN				(1 << 20)
+#define  LOW_CONTENTION					(1 << 19)
+#define  HIGH_CONTENTION				(1 << 18)
+#define  TXDSI_VC_ID_INVALID				(1 << 17)
+#define  TXDSI_DATA_TYPE_NOT_RECOGNISED			(1 << 16)
+#define  TXCHECKSUM_ERROR				(1 << 15)
+#define  TXECC_MULTIBIT_ERROR				(1 << 14)
+#define  TXECC_SINGLE_BIT_ERROR				(1 << 13)
+#define  TXFALSE_CONTROL_ERROR				(1 << 12)
+#define  RXDSI_VC_ID_INVALID				(1 << 11)
+#define  RXDSI_DATA_TYPE_NOT_REGOGNISED			(1 << 10)
+#define  RXCHECKSUM_ERROR				(1 << 9)
+#define  RXECC_MULTIBIT_ERROR				(1 << 8)
+#define  RXECC_SINGLE_BIT_ERROR				(1 << 7)
+#define  RXFALSE_CONTROL_ERROR				(1 << 6)
+#define  RXHS_RECEIVE_TIMEOUT_ERROR			(1 << 5)
+#define  RX_LP_TX_SYNC_ERROR				(1 << 4)
+#define  RXEXCAPE_MODE_ENTRY_ERROR			(1 << 3)
+#define  RXEOT_SYNC_ERROR				(1 << 2)
+#define  RXSOT_SYNC_ERROR				(1 << 1)
+#define  RXSOT_ERROR					(1 << 0)
+
+#define _MIPIA_DSI_FUNC_PRG		(dev_priv->mipi_mmio_base + 0xb00c)
+#define _MIPIC_DSI_FUNC_PRG		(dev_priv->mipi_mmio_base + 0xb80c)
+#define MIPI_DSI_FUNC_PRG(port)		_MIPI_PORT(port, _MIPIA_DSI_FUNC_PRG, \
+						_MIPIC_DSI_FUNC_PRG)
+#define  CMD_MODE_DATA_WIDTH_MASK			(7 << 13)
+#define  CMD_MODE_NOT_SUPPORTED				(0 << 13)
+#define  CMD_MODE_DATA_WIDTH_16_BIT			(1 << 13)
+#define  CMD_MODE_DATA_WIDTH_9_BIT			(2 << 13)
+#define  CMD_MODE_DATA_WIDTH_8_BIT			(3 << 13)
+#define  CMD_MODE_DATA_WIDTH_OPTION1			(4 << 13)
+#define  CMD_MODE_DATA_WIDTH_OPTION2			(5 << 13)
+#define  VID_MODE_FORMAT_MASK				(0xf << 7)
+#define  VID_MODE_NOT_SUPPORTED				(0 << 7)
+#define  VID_MODE_FORMAT_RGB565				(1 << 7)
+#define  VID_MODE_FORMAT_RGB666				(2 << 7)
+#define  VID_MODE_FORMAT_RGB666_LOOSE			(3 << 7)
+#define  VID_MODE_FORMAT_RGB888				(4 << 7)
+#define  CMD_MODE_CHANNEL_NUMBER_SHIFT			5
+#define  CMD_MODE_CHANNEL_NUMBER_MASK			(3 << 5)
+#define  VID_MODE_CHANNEL_NUMBER_SHIFT			3
+#define  VID_MODE_CHANNEL_NUMBER_MASK			(3 << 3)
+#define  DATA_LANES_PRG_REG_SHIFT			0
+#define  DATA_LANES_PRG_REG_MASK			(7 << 0)
+
+#define _MIPIA_HS_TX_TIMEOUT		(dev_priv->mipi_mmio_base + 0xb010)
+#define _MIPIC_HS_TX_TIMEOUT		(dev_priv->mipi_mmio_base + 0xb810)
+#define MIPI_HS_TX_TIMEOUT(port)	_MIPI_PORT(port, _MIPIA_HS_TX_TIMEOUT, \
+					_MIPIC_HS_TX_TIMEOUT)
+#define  HIGH_SPEED_TX_TIMEOUT_COUNTER_MASK		0xffffff
+
+#define _MIPIA_LP_RX_TIMEOUT		(dev_priv->mipi_mmio_base + 0xb014)
+#define _MIPIC_LP_RX_TIMEOUT		(dev_priv->mipi_mmio_base + 0xb814)
+#define MIPI_LP_RX_TIMEOUT(port)	_MIPI_PORT(port, _MIPIA_LP_RX_TIMEOUT, \
+					_MIPIC_LP_RX_TIMEOUT)
+#define  LOW_POWER_RX_TIMEOUT_COUNTER_MASK		0xffffff
+
+#define _MIPIA_TURN_AROUND_TIMEOUT	(dev_priv->mipi_mmio_base + 0xb018)
+#define _MIPIC_TURN_AROUND_TIMEOUT	(dev_priv->mipi_mmio_base + 0xb818)
+#define MIPI_TURN_AROUND_TIMEOUT(port)	_MIPI_PORT(port, \
+			_MIPIA_TURN_AROUND_TIMEOUT, _MIPIC_TURN_AROUND_TIMEOUT)
+#define  TURN_AROUND_TIMEOUT_MASK			0x3f
+
+#define _MIPIA_DEVICE_RESET_TIMER	(dev_priv->mipi_mmio_base + 0xb01c)
+#define _MIPIC_DEVICE_RESET_TIMER	(dev_priv->mipi_mmio_base + 0xb81c)
+#define MIPI_DEVICE_RESET_TIMER(port)	_MIPI_PORT(port, \
+			_MIPIA_DEVICE_RESET_TIMER, _MIPIC_DEVICE_RESET_TIMER)
+#define  DEVICE_RESET_TIMER_MASK			0xffff
+
+#define _MIPIA_DPI_RESOLUTION		(dev_priv->mipi_mmio_base + 0xb020)
+#define _MIPIC_DPI_RESOLUTION		(dev_priv->mipi_mmio_base + 0xb820)
+#define MIPI_DPI_RESOLUTION(port)	_MIPI_PORT(port, _MIPIA_DPI_RESOLUTION, \
+					_MIPIC_DPI_RESOLUTION)
+#define  VERTICAL_ADDRESS_SHIFT				16
+#define  VERTICAL_ADDRESS_MASK				(0xffff << 16)
+#define  HORIZONTAL_ADDRESS_SHIFT			0
+#define  HORIZONTAL_ADDRESS_MASK			0xffff
+
+#define _MIPIA_DBI_FIFO_THROTTLE	(dev_priv->mipi_mmio_base + 0xb024)
+#define _MIPIC_DBI_FIFO_THROTTLE	(dev_priv->mipi_mmio_base + 0xb824)
+#define MIPI_DBI_FIFO_THROTTLE(port)	_MIPI_PORT(port, \
+			_MIPIA_DBI_FIFO_THROTTLE, _MIPIC_DBI_FIFO_THROTTLE)
+#define  DBI_FIFO_EMPTY_HALF				(0 << 0)
+#define  DBI_FIFO_EMPTY_QUARTER				(1 << 0)
+#define  DBI_FIFO_EMPTY_7_LOCATIONS			(2 << 0)
+
+/* regs below are bits 15:0 */
+#define _MIPIA_HSYNC_PADDING_COUNT	(dev_priv->mipi_mmio_base + 0xb028)
+#define _MIPIC_HSYNC_PADDING_COUNT	(dev_priv->mipi_mmio_base + 0xb828)
+#define MIPI_HSYNC_PADDING_COUNT(port)	_MIPI_PORT(port, \
+			_MIPIA_HSYNC_PADDING_COUNT, _MIPIC_HSYNC_PADDING_COUNT)
+
+#define _MIPIA_HBP_COUNT		(dev_priv->mipi_mmio_base + 0xb02c)
+#define _MIPIC_HBP_COUNT		(dev_priv->mipi_mmio_base + 0xb82c)
+#define MIPI_HBP_COUNT(port)		_MIPI_PORT(port, _MIPIA_HBP_COUNT, \
+					_MIPIC_HBP_COUNT)
+
+#define _MIPIA_HFP_COUNT		(dev_priv->mipi_mmio_base + 0xb030)
+#define _MIPIC_HFP_COUNT		(dev_priv->mipi_mmio_base + 0xb830)
+#define MIPI_HFP_COUNT(port)		_MIPI_PORT(port, _MIPIA_HFP_COUNT, \
+					_MIPIC_HFP_COUNT)
+
+#define _MIPIA_HACTIVE_AREA_COUNT	(dev_priv->mipi_mmio_base + 0xb034)
+#define _MIPIC_HACTIVE_AREA_COUNT	(dev_priv->mipi_mmio_base + 0xb834)
+#define MIPI_HACTIVE_AREA_COUNT(port)	_MIPI_PORT(port, \
+			_MIPIA_HACTIVE_AREA_COUNT, _MIPIC_HACTIVE_AREA_COUNT)
+
+#define _MIPIA_VSYNC_PADDING_COUNT	(dev_priv->mipi_mmio_base + 0xb038)
+#define _MIPIC_VSYNC_PADDING_COUNT	(dev_priv->mipi_mmio_base + 0xb838)
+#define MIPI_VSYNC_PADDING_COUNT(port)	_MIPI_PORT(port, \
+			_MIPIA_VSYNC_PADDING_COUNT, _MIPIC_VSYNC_PADDING_COUNT)
+
+#define _MIPIA_VBP_COUNT		(dev_priv->mipi_mmio_base + 0xb03c)
+#define _MIPIC_VBP_COUNT		(dev_priv->mipi_mmio_base + 0xb83c)
+#define MIPI_VBP_COUNT(port)		_MIPI_PORT(port, _MIPIA_VBP_COUNT, \
+					_MIPIC_VBP_COUNT)
+
+#define _MIPIA_VFP_COUNT		(dev_priv->mipi_mmio_base + 0xb040)
+#define _MIPIC_VFP_COUNT		(dev_priv->mipi_mmio_base + 0xb840)
+#define MIPI_VFP_COUNT(port)		_MIPI_PORT(port, _MIPIA_VFP_COUNT, \
+					_MIPIC_VFP_COUNT)
+
+#define _MIPIA_HIGH_LOW_SWITCH_COUNT	(dev_priv->mipi_mmio_base + 0xb044)
+#define _MIPIC_HIGH_LOW_SWITCH_COUNT	(dev_priv->mipi_mmio_base + 0xb844)
+#define MIPI_HIGH_LOW_SWITCH_COUNT(port)	_MIPI_PORT(port,	\
+		_MIPIA_HIGH_LOW_SWITCH_COUNT, _MIPIC_HIGH_LOW_SWITCH_COUNT)
+
+/* regs above are bits 15:0 */
+
+#define _MIPIA_DPI_CONTROL		(dev_priv->mipi_mmio_base + 0xb048)
+#define _MIPIC_DPI_CONTROL		(dev_priv->mipi_mmio_base + 0xb848)
+#define MIPI_DPI_CONTROL(port)		_MIPI_PORT(port, _MIPIA_DPI_CONTROL, \
+					_MIPIC_DPI_CONTROL)
+#define  DPI_LP_MODE					(1 << 6)
+#define  BACKLIGHT_OFF					(1 << 5)
+#define  BACKLIGHT_ON					(1 << 4)
+#define  COLOR_MODE_OFF					(1 << 3)
+#define  COLOR_MODE_ON					(1 << 2)
+#define  TURN_ON					(1 << 1)
+#define  SHUTDOWN					(1 << 0)
+
+#define _MIPIA_DPI_DATA			(dev_priv->mipi_mmio_base + 0xb04c)
+#define _MIPIC_DPI_DATA			(dev_priv->mipi_mmio_base + 0xb84c)
+#define MIPI_DPI_DATA(port)		_MIPI_PORT(port, _MIPIA_DPI_DATA, \
+					_MIPIC_DPI_DATA)
+#define  COMMAND_BYTE_SHIFT				0
+#define  COMMAND_BYTE_MASK				(0x3f << 0)
+
+#define _MIPIA_INIT_COUNT		(dev_priv->mipi_mmio_base + 0xb050)
+#define _MIPIC_INIT_COUNT		(dev_priv->mipi_mmio_base + 0xb850)
+#define MIPI_INIT_COUNT(port)		_MIPI_PORT(port, _MIPIA_INIT_COUNT, \
+					_MIPIC_INIT_COUNT)
+#define  MASTER_INIT_TIMER_SHIFT			0
+#define  MASTER_INIT_TIMER_MASK				(0xffff << 0)
+
+#define _MIPIA_MAX_RETURN_PKT_SIZE	(dev_priv->mipi_mmio_base + 0xb054)
+#define _MIPIC_MAX_RETURN_PKT_SIZE	(dev_priv->mipi_mmio_base + 0xb854)
+#define MIPI_MAX_RETURN_PKT_SIZE(port)	_MIPI_PORT(port, \
+			_MIPIA_MAX_RETURN_PKT_SIZE, _MIPIC_MAX_RETURN_PKT_SIZE)
+#define  MAX_RETURN_PKT_SIZE_SHIFT			0
+#define  MAX_RETURN_PKT_SIZE_MASK			(0x3ff << 0)
+
+#define _MIPIA_VIDEO_MODE_FORMAT	(dev_priv->mipi_mmio_base + 0xb058)
+#define _MIPIC_VIDEO_MODE_FORMAT	(dev_priv->mipi_mmio_base + 0xb858)
+#define MIPI_VIDEO_MODE_FORMAT(port)	_MIPI_PORT(port, \
+			_MIPIA_VIDEO_MODE_FORMAT, _MIPIC_VIDEO_MODE_FORMAT)
+#define  RANDOM_DPI_DISPLAY_RESOLUTION			(1 << 4)
+#define  DISABLE_VIDEO_BTA				(1 << 3)
+#define  IP_TG_CONFIG					(1 << 2)
+#define  VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE		(1 << 0)
+#define  VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS		(2 << 0)
+#define  VIDEO_MODE_BURST				(3 << 0)
+
+#define _MIPIA_EOT_DISABLE		(dev_priv->mipi_mmio_base + 0xb05c)
+#define _MIPIC_EOT_DISABLE		(dev_priv->mipi_mmio_base + 0xb85c)
+#define MIPI_EOT_DISABLE(port)		_MIPI_PORT(port, _MIPIA_EOT_DISABLE, \
+					_MIPIC_EOT_DISABLE)
+#define  LP_RX_TIMEOUT_ERROR_RECOVERY_DISABLE		(1 << 7)
+#define  HS_RX_TIMEOUT_ERROR_RECOVERY_DISABLE		(1 << 6)
+#define  LOW_CONTENTION_RECOVERY_DISABLE		(1 << 5)
+#define  HIGH_CONTENTION_RECOVERY_DISABLE		(1 << 4)
+#define  TXDSI_TYPE_NOT_RECOGNISED_ERROR_RECOVERY_DISABLE (1 << 3)
+#define  TXECC_MULTIBIT_ERROR_RECOVERY_DISABLE		(1 << 2)
+#define  CLOCKSTOP					(1 << 1)
+#define  EOT_DISABLE					(1 << 0)
+
+#define _MIPIA_LP_BYTECLK		(dev_priv->mipi_mmio_base + 0xb060)
+#define _MIPIC_LP_BYTECLK		(dev_priv->mipi_mmio_base + 0xb860)
+#define MIPI_LP_BYTECLK(port)		_MIPI_PORT(port, _MIPIA_LP_BYTECLK, \
+					_MIPIC_LP_BYTECLK)
+#define  LP_BYTECLK_SHIFT				0
+#define  LP_BYTECLK_MASK				(0xffff << 0)
+
+/* bits 31:0 */
+#define _MIPIA_LP_GEN_DATA		(dev_priv->mipi_mmio_base + 0xb064)
+#define _MIPIC_LP_GEN_DATA		(dev_priv->mipi_mmio_base + 0xb864)
+#define MIPI_LP_GEN_DATA(port)		_MIPI_PORT(port, _MIPIA_LP_GEN_DATA, \
+					_MIPIC_LP_GEN_DATA)
+
+/* bits 31:0 */
+#define _MIPIA_HS_GEN_DATA		(dev_priv->mipi_mmio_base + 0xb068)
+#define _MIPIC_HS_GEN_DATA		(dev_priv->mipi_mmio_base + 0xb868)
+#define MIPI_HS_GEN_DATA(port)		_MIPI_PORT(port, _MIPIA_HS_GEN_DATA, \
+					_MIPIC_HS_GEN_DATA)
+
+#define _MIPIA_LP_GEN_CTRL		(dev_priv->mipi_mmio_base + 0xb06c)
+#define _MIPIC_LP_GEN_CTRL		(dev_priv->mipi_mmio_base + 0xb86c)
+#define MIPI_LP_GEN_CTRL(port)		_MIPI_PORT(port, _MIPIA_LP_GEN_CTRL, \
+					_MIPIC_LP_GEN_CTRL)
+#define _MIPIA_HS_GEN_CTRL		(dev_priv->mipi_mmio_base + 0xb070)
+#define _MIPIC_HS_GEN_CTRL		(dev_priv->mipi_mmio_base + 0xb870)
+#define MIPI_HS_GEN_CTRL(port)		_MIPI_PORT(port, _MIPIA_HS_GEN_CTRL, \
+					_MIPIC_HS_GEN_CTRL)
+#define  LONG_PACKET_WORD_COUNT_SHIFT			8
+#define  LONG_PACKET_WORD_COUNT_MASK			(0xffff << 8)
+#define  SHORT_PACKET_PARAM_SHIFT			8
+#define  SHORT_PACKET_PARAM_MASK			(0xffff << 8)
+#define  VIRTUAL_CHANNEL_SHIFT				6
+#define  VIRTUAL_CHANNEL_MASK				(3 << 6)
+#define  DATA_TYPE_SHIFT				0
+#define  DATA_TYPE_MASK					(0x3f << 0)
+/* data type values, see include/video/mipi_display.h */
+
+#define _MIPIA_GEN_FIFO_STAT		(dev_priv->mipi_mmio_base + 0xb074)
+#define _MIPIC_GEN_FIFO_STAT		(dev_priv->mipi_mmio_base + 0xb874)
+#define MIPI_GEN_FIFO_STAT(port)	_MIPI_PORT(port, _MIPIA_GEN_FIFO_STAT, \
+					_MIPIC_GEN_FIFO_STAT)
+#define  DPI_FIFO_EMPTY					(1 << 28)
+#define  DBI_FIFO_EMPTY					(1 << 27)
+#define  LP_CTRL_FIFO_EMPTY				(1 << 26)
+#define  LP_CTRL_FIFO_HALF_EMPTY			(1 << 25)
+#define  LP_CTRL_FIFO_FULL				(1 << 24)
+#define  HS_CTRL_FIFO_EMPTY				(1 << 18)
+#define  HS_CTRL_FIFO_HALF_EMPTY			(1 << 17)
+#define  HS_CTRL_FIFO_FULL				(1 << 16)
+#define  LP_DATA_FIFO_EMPTY				(1 << 10)
+#define  LP_DATA_FIFO_HALF_EMPTY			(1 << 9)
+#define  LP_DATA_FIFO_FULL				(1 << 8)
+#define  HS_DATA_FIFO_EMPTY				(1 << 2)
+#define  HS_DATA_FIFO_HALF_EMPTY			(1 << 1)
+#define  HS_DATA_FIFO_FULL				(1 << 0)
+
+#define _MIPIA_HS_LS_DBI_ENABLE		(dev_priv->mipi_mmio_base + 0xb078)
+#define _MIPIC_HS_LS_DBI_ENABLE		(dev_priv->mipi_mmio_base + 0xb878)
+#define MIPI_HS_LP_DBI_ENABLE(port)	_MIPI_PORT(port, \
+			_MIPIA_HS_LS_DBI_ENABLE, _MIPIC_HS_LS_DBI_ENABLE)
+#define  DBI_HS_LP_MODE_MASK				(1 << 0)
+#define  DBI_LP_MODE					(1 << 0)
+#define  DBI_HS_MODE					(0 << 0)
+
+#define _MIPIA_DPHY_PARAM		(dev_priv->mipi_mmio_base + 0xb080)
+#define _MIPIC_DPHY_PARAM		(dev_priv->mipi_mmio_base + 0xb880)
+#define MIPI_DPHY_PARAM(port)		_MIPI_PORT(port, _MIPIA_DPHY_PARAM, \
+					_MIPIC_DPHY_PARAM)
+#define  EXIT_ZERO_COUNT_SHIFT				24
+#define  EXIT_ZERO_COUNT_MASK				(0x3f << 24)
+#define  TRAIL_COUNT_SHIFT				16
+#define  TRAIL_COUNT_MASK				(0x1f << 16)
+#define  CLK_ZERO_COUNT_SHIFT				8
+#define  CLK_ZERO_COUNT_MASK				(0xff << 8)
+#define  PREPARE_COUNT_SHIFT				0
+#define  PREPARE_COUNT_MASK				(0x3f << 0)
+
+/* bits 31:0 */
+#define _MIPIA_DBI_BW_CTRL		(dev_priv->mipi_mmio_base + 0xb084)
+#define _MIPIC_DBI_BW_CTRL		(dev_priv->mipi_mmio_base + 0xb884)
+#define MIPI_DBI_BW_CTRL(port)		_MIPI_PORT(port, _MIPIA_DBI_BW_CTRL, \
+					_MIPIC_DBI_BW_CTRL)
+
+#define _MIPIA_CLK_LANE_SWITCH_TIME_CNT		(dev_priv->mipi_mmio_base \
+							+ 0xb088)
+#define _MIPIC_CLK_LANE_SWITCH_TIME_CNT		(dev_priv->mipi_mmio_base \
+							+ 0xb888)
+#define MIPI_CLK_LANE_SWITCH_TIME_CNT(port)	_MIPI_PORT(port, \
+	_MIPIA_CLK_LANE_SWITCH_TIME_CNT, _MIPIC_CLK_LANE_SWITCH_TIME_CNT)
+#define  LP_HS_SSW_CNT_SHIFT				16
+#define  LP_HS_SSW_CNT_MASK				(0xffff << 16)
+#define  HS_LP_PWR_SW_CNT_SHIFT				0
+#define  HS_LP_PWR_SW_CNT_MASK				(0xffff << 0)
+
+#define _MIPIA_STOP_STATE_STALL		(dev_priv->mipi_mmio_base + 0xb08c)
+#define _MIPIC_STOP_STATE_STALL		(dev_priv->mipi_mmio_base + 0xb88c)
+#define MIPI_STOP_STATE_STALL(port)	_MIPI_PORT(port, \
+			_MIPIA_STOP_STATE_STALL, _MIPIC_STOP_STATE_STALL)
+#define  STOP_STATE_STALL_COUNTER_SHIFT			0
+#define  STOP_STATE_STALL_COUNTER_MASK			(0xff << 0)
+
+#define _MIPIA_INTR_STAT_REG_1		(dev_priv->mipi_mmio_base + 0xb090)
+#define _MIPIC_INTR_STAT_REG_1		(dev_priv->mipi_mmio_base + 0xb890)
+#define MIPI_INTR_STAT_REG_1(port)	_MIPI_PORT(port, \
+				_MIPIA_INTR_STAT_REG_1, _MIPIC_INTR_STAT_REG_1)
+#define _MIPIA_INTR_EN_REG_1		(dev_priv->mipi_mmio_base + 0xb094)
+#define _MIPIC_INTR_EN_REG_1		(dev_priv->mipi_mmio_base + 0xb894)
+#define MIPI_INTR_EN_REG_1(port)	_MIPI_PORT(port, _MIPIA_INTR_EN_REG_1, \
+					_MIPIC_INTR_EN_REG_1)
+#define  RX_CONTENTION_DETECTED				(1 << 0)
+
+/* XXX: only pipe A ?!? */
+#define MIPIA_DBI_TYPEC_CTRL		(dev_priv->mipi_mmio_base + 0xb100)
+#define  DBI_TYPEC_ENABLE				(1 << 31)
+#define  DBI_TYPEC_WIP					(1 << 30)
+#define  DBI_TYPEC_OPTION_SHIFT				28
+#define  DBI_TYPEC_OPTION_MASK				(3 << 28)
+#define  DBI_TYPEC_FREQ_SHIFT				24
+#define  DBI_TYPEC_FREQ_MASK				(0xf << 24)
+#define  DBI_TYPEC_OVERRIDE				(1 << 8)
+#define  DBI_TYPEC_OVERRIDE_COUNTER_SHIFT		0
+#define  DBI_TYPEC_OVERRIDE_COUNTER_MASK		(0xff << 0)
+
+
+/* MIPI adapter registers */
+
+#define _MIPIA_CTRL			(dev_priv->mipi_mmio_base + 0xb104)
+#define _MIPIC_CTRL			(dev_priv->mipi_mmio_base + 0xb904)
+#define MIPI_CTRL(port)			_MIPI_PORT(port, _MIPIA_CTRL, \
+					_MIPIC_CTRL)
+#define  ESCAPE_CLOCK_DIVIDER_SHIFT			5 /* A only */
+#define  ESCAPE_CLOCK_DIVIDER_MASK			(3 << 5)
+#define  ESCAPE_CLOCK_DIVIDER_1				(0 << 5)
+#define  ESCAPE_CLOCK_DIVIDER_2				(1 << 5)
+#define  ESCAPE_CLOCK_DIVIDER_4				(2 << 5)
+#define  READ_REQUEST_PRIORITY_SHIFT			3
+#define  READ_REQUEST_PRIORITY_MASK			(3 << 3)
+#define  READ_REQUEST_PRIORITY_LOW			(0 << 3)
+#define  READ_REQUEST_PRIORITY_HIGH			(3 << 3)
+#define  RGB_FLIP_TO_BGR				(1 << 2)
+
+#define  BXT_PIPE_SELECT_MASK				(7 << 7)
+#define  BXT_PIPE_SELECT_C				(2 << 7)
+#define  BXT_PIPE_SELECT_B				(1 << 7)
+#define  BXT_PIPE_SELECT_A				(0 << 7)
+
+#define _MIPIA_DATA_ADDRESS		(dev_priv->mipi_mmio_base + 0xb108)
+#define _MIPIC_DATA_ADDRESS		(dev_priv->mipi_mmio_base + 0xb908)
+#define MIPI_DATA_ADDRESS(port)		_MIPI_PORT(port, _MIPIA_DATA_ADDRESS, \
+					_MIPIC_DATA_ADDRESS)
+#define  DATA_MEM_ADDRESS_SHIFT				5
+#define  DATA_MEM_ADDRESS_MASK				(0x7ffffff << 5)
+#define  DATA_VALID					(1 << 0)
+
+#define _MIPIA_DATA_LENGTH		(dev_priv->mipi_mmio_base + 0xb10c)
+#define _MIPIC_DATA_LENGTH		(dev_priv->mipi_mmio_base + 0xb90c)
+#define MIPI_DATA_LENGTH(port)		_MIPI_PORT(port, _MIPIA_DATA_LENGTH, \
+					_MIPIC_DATA_LENGTH)
+#define  DATA_LENGTH_SHIFT				0
+#define  DATA_LENGTH_MASK				(0xfffff << 0)
+
+#define _MIPIA_COMMAND_ADDRESS		(dev_priv->mipi_mmio_base + 0xb110)
+#define _MIPIC_COMMAND_ADDRESS		(dev_priv->mipi_mmio_base + 0xb910)
+#define MIPI_COMMAND_ADDRESS(port)	_MIPI_PORT(port, \
+				_MIPIA_COMMAND_ADDRESS, _MIPIC_COMMAND_ADDRESS)
+#define  COMMAND_MEM_ADDRESS_SHIFT			5
+#define  COMMAND_MEM_ADDRESS_MASK			(0x7ffffff << 5)
+#define  AUTO_PWG_ENABLE				(1 << 2)
+#define  MEMORY_WRITE_DATA_FROM_PIPE_RENDERING		(1 << 1)
+#define  COMMAND_VALID					(1 << 0)
+
+#define _MIPIA_COMMAND_LENGTH		(dev_priv->mipi_mmio_base + 0xb114)
+#define _MIPIC_COMMAND_LENGTH		(dev_priv->mipi_mmio_base + 0xb914)
+#define MIPI_COMMAND_LENGTH(port)	_MIPI_PORT(port, _MIPIA_COMMAND_LENGTH, \
+					_MIPIC_COMMAND_LENGTH)
+#define  COMMAND_LENGTH_SHIFT(n)			(8 * (n)) /* n: 0...3 */
+#define  COMMAND_LENGTH_MASK(n)				(0xff << (8 * (n)))
+
+#define _MIPIA_READ_DATA_RETURN0	(dev_priv->mipi_mmio_base + 0xb118)
+#define _MIPIC_READ_DATA_RETURN0	(dev_priv->mipi_mmio_base + 0xb918)
+#define MIPI_READ_DATA_RETURN(port, n) \
+	(_MIPI_PORT(port, _MIPIA_READ_DATA_RETURN0, _MIPIC_READ_DATA_RETURN0) \
+					+ 4 * (n)) /* n: 0...7 */
+
+#define _MIPIA_READ_DATA_VALID		(dev_priv->mipi_mmio_base + 0xb138)
+#define _MIPIC_READ_DATA_VALID		(dev_priv->mipi_mmio_base + 0xb938)
+#define MIPI_READ_DATA_VALID(port)	_MIPI_PORT(port, \
+				_MIPIA_READ_DATA_VALID, _MIPIC_READ_DATA_VALID)
+#define  READ_DATA_VALID(n)				(1 << (n))
+
+/* For UMS only (deprecated): */
+#define _PALETTE_A (dev_priv->info.display_mmio_offset + 0xa000)
+#define _PALETTE_B (dev_priv->info.display_mmio_offset + 0xa800)
+
+/* MOCS (Memory Object Control State) registers */
+#define GEN9_LNCFCMOCS0		0xb020	/* L3 Cache Control base */
+
+#define GEN9_GFX_MOCS_0		0xc800	/* Graphics MOCS base register*/
+#define GEN9_MFX0_MOCS_0	0xc900	/* Media 0 MOCS base register*/
+#define GEN9_MFX1_MOCS_0	0xca00	/* Media 1 MOCS base register*/
+#define GEN9_VEBOX_MOCS_0	0xcb00	/* Video MOCS base register*/
+#define GEN9_BLT_MOCS_0		0xcc00	/* Blitter MOCS base register*/
+
+#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
new file mode 100644
index 0000000..2d91821
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -0,0 +1,196 @@
+/*
+ *
+ * Copyright 2008 (c) Intel Corporation
+ *   Jesse Barnes <jbarnes@virtuousgeek.org>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <drm/drmP.h>
+#include <drm/i915_drm.h>
+#include "intel_drv.h"
+#include "i915_reg.h"
+
+static void i915_save_display(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	/* Display arbitration control */
+	if (INTEL_INFO(dev)->gen <= 4)
+		dev_priv->regfile.saveDSPARB = I915_READ(DSPARB);
+
+	/* LVDS state */
+	if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
+		dev_priv->regfile.saveLVDS = I915_READ(PCH_LVDS);
+	else if (INTEL_INFO(dev)->gen <= 4 && IS_MOBILE(dev) && !IS_I830(dev))
+		dev_priv->regfile.saveLVDS = I915_READ(LVDS);
+
+	/* Panel power sequencer */
+	if (HAS_PCH_SPLIT(dev)) {
+		dev_priv->regfile.savePP_CONTROL = I915_READ(PCH_PP_CONTROL);
+		dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS);
+		dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS);
+		dev_priv->regfile.savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR);
+	} else if (!IS_VALLEYVIEW(dev)) {
+		dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL);
+		dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS);
+		dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS);
+		dev_priv->regfile.savePP_DIVISOR = I915_READ(PP_DIVISOR);
+	}
+
+	/* save FBC interval */
+	if (HAS_FBC(dev) && INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev))
+		dev_priv->regfile.saveFBC_CONTROL = I915_READ(FBC_CONTROL);
+}
+
+static void i915_restore_display(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 mask = 0xffffffff;
+
+	/* Display arbitration */
+	if (INTEL_INFO(dev)->gen <= 4)
+		I915_WRITE(DSPARB, dev_priv->regfile.saveDSPARB);
+
+	mask = ~LVDS_PORT_EN;
+
+	/* LVDS state */
+	if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
+		I915_WRITE(PCH_LVDS, dev_priv->regfile.saveLVDS & mask);
+	else if (INTEL_INFO(dev)->gen <= 4 && IS_MOBILE(dev) && !IS_I830(dev))
+		I915_WRITE(LVDS, dev_priv->regfile.saveLVDS & mask);
+
+	/* Panel power sequencer */
+	if (HAS_PCH_SPLIT(dev)) {
+		I915_WRITE(PCH_PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS);
+		I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS);
+		I915_WRITE(PCH_PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR);
+		I915_WRITE(PCH_PP_CONTROL, dev_priv->regfile.savePP_CONTROL);
+	} else if (!IS_VALLEYVIEW(dev)) {
+		I915_WRITE(PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS);
+		I915_WRITE(PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS);
+		I915_WRITE(PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR);
+		I915_WRITE(PP_CONTROL, dev_priv->regfile.savePP_CONTROL);
+	}
+
+	/* only restore FBC info on the platform that supports FBC*/
+	intel_fbc_disable(dev_priv);
+
+	/* restore FBC interval */
+	if (HAS_FBC(dev) && INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev))
+		I915_WRITE(FBC_CONTROL, dev_priv->regfile.saveFBC_CONTROL);
+
+	i915_redisable_vga(dev);
+}
+
+int i915_save_state(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int i;
+
+	mutex_lock(&dev->struct_mutex);
+
+	i915_save_display(dev);
+
+	if (IS_GEN4(dev))
+		pci_read_config_word(dev->pdev, GCDGMBUS,
+				     &dev_priv->regfile.saveGCDGMBUS);
+
+	/* Cache mode state */
+	if (INTEL_INFO(dev)->gen < 7)
+		dev_priv->regfile.saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
+
+	/* Memory Arbitration state */
+	dev_priv->regfile.saveMI_ARB_STATE = I915_READ(MI_ARB_STATE);
+
+	/* Scratch space */
+	if (IS_GEN2(dev_priv) && IS_MOBILE(dev_priv)) {
+		for (i = 0; i < 7; i++) {
+			dev_priv->regfile.saveSWF0[i] = I915_READ(SWF0(i));
+			dev_priv->regfile.saveSWF1[i] = I915_READ(SWF1(i));
+		}
+		for (i = 0; i < 3; i++)
+			dev_priv->regfile.saveSWF3[i] = I915_READ(SWF3(i));
+	} else if (IS_GEN2(dev_priv)) {
+		for (i = 0; i < 7; i++)
+			dev_priv->regfile.saveSWF1[i] = I915_READ(SWF1(i));
+	} else if (HAS_GMCH_DISPLAY(dev_priv)) {
+		for (i = 0; i < 16; i++) {
+			dev_priv->regfile.saveSWF0[i] = I915_READ(SWF0(i));
+			dev_priv->regfile.saveSWF1[i] = I915_READ(SWF1(i));
+		}
+		for (i = 0; i < 3; i++)
+			dev_priv->regfile.saveSWF3[i] = I915_READ(SWF3(i));
+	}
+
+	mutex_unlock(&dev->struct_mutex);
+
+	return 0;
+}
+
+int i915_restore_state(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int i;
+
+	mutex_lock(&dev->struct_mutex);
+
+	i915_gem_restore_fences(dev);
+
+	if (IS_GEN4(dev))
+		pci_write_config_word(dev->pdev, GCDGMBUS,
+				      dev_priv->regfile.saveGCDGMBUS);
+	i915_restore_display(dev);
+
+	/* Cache mode state */
+	if (INTEL_INFO(dev)->gen < 7)
+		I915_WRITE(CACHE_MODE_0, dev_priv->regfile.saveCACHE_MODE_0 |
+			   0xffff0000);
+
+	/* Memory arbitration state */
+	I915_WRITE(MI_ARB_STATE, dev_priv->regfile.saveMI_ARB_STATE | 0xffff0000);
+
+	/* Scratch space */
+	if (IS_GEN2(dev_priv) && IS_MOBILE(dev_priv)) {
+		for (i = 0; i < 7; i++) {
+			I915_WRITE(SWF0(i), dev_priv->regfile.saveSWF0[i]);
+			I915_WRITE(SWF1(i), dev_priv->regfile.saveSWF1[i]);
+		}
+		for (i = 0; i < 3; i++)
+			I915_WRITE(SWF3(i), dev_priv->regfile.saveSWF3[i]);
+	} else if (IS_GEN2(dev_priv)) {
+		for (i = 0; i < 7; i++)
+			I915_WRITE(SWF1(i), dev_priv->regfile.saveSWF1[i]);
+	} else if (HAS_GMCH_DISPLAY(dev_priv)) {
+		for (i = 0; i < 16; i++) {
+			I915_WRITE(SWF0(i), dev_priv->regfile.saveSWF0[i]);
+			I915_WRITE(SWF1(i), dev_priv->regfile.saveSWF1[i]);
+		}
+		for (i = 0; i < 3; i++)
+			I915_WRITE(SWF3(i), dev_priv->regfile.saveSWF3[i]);
+	}
+
+	mutex_unlock(&dev->struct_mutex);
+
+	intel_i2c_reset(dev);
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
new file mode 100644
index 0000000..50ce9ce
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -0,0 +1,647 @@
+/*
+ * Copyright © 2012 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Ben Widawsky <ben@bwidawsk.net>
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/stat.h>
+#include <linux/sysfs.h>
+#include "intel_drv.h"
+#include "i915_drv.h"
+
+#define dev_to_drm_minor(d) dev_get_drvdata((d))
+
+#ifdef CONFIG_PM
+static u32 calc_residency(struct drm_device *dev, const u32 reg)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u64 raw_time; /* 32b value may overflow during fixed point math */
+	u64 units = 128ULL, div = 100000ULL;
+	u32 ret;
+
+	if (!intel_enable_rc6(dev))
+		return 0;
+
+	intel_runtime_pm_get(dev_priv);
+
+	/* On VLV and CHV, residency time is in CZ units rather than 1.28us */
+	if (IS_VALLEYVIEW(dev)) {
+		units = 1;
+		div = dev_priv->czclk_freq;
+
+		if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
+			units <<= 8;
+	} else if (IS_BROXTON(dev)) {
+		units = 1;
+		div = 1200;		/* 833.33ns */
+	}
+
+	raw_time = I915_READ(reg) * units;
+	ret = DIV_ROUND_UP_ULL(raw_time, div);
+
+	intel_runtime_pm_put(dev_priv);
+	return ret;
+}
+
+static ssize_t
+show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+	struct drm_minor *dminor = dev_to_drm_minor(kdev);
+	return snprintf(buf, PAGE_SIZE, "%x\n", intel_enable_rc6(dminor->dev));
+}
+
+static ssize_t
+show_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+	struct drm_minor *dminor = dev_get_drvdata(kdev);
+	u32 rc6_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6);
+	return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency);
+}
+
+static ssize_t
+show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+	struct drm_minor *dminor = dev_to_drm_minor(kdev);
+	u32 rc6p_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6p);
+	return snprintf(buf, PAGE_SIZE, "%u\n", rc6p_residency);
+}
+
+static ssize_t
+show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+	struct drm_minor *dminor = dev_to_drm_minor(kdev);
+	u32 rc6pp_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6pp);
+	return snprintf(buf, PAGE_SIZE, "%u\n", rc6pp_residency);
+}
+
+static ssize_t
+show_media_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+	struct drm_minor *dminor = dev_get_drvdata(kdev);
+	u32 rc6_residency = calc_residency(dminor->dev, VLV_GT_MEDIA_RC6);
+	return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency);
+}
+
+static DEVICE_ATTR(rc6_enable, S_IRUGO, show_rc6_mask, NULL);
+static DEVICE_ATTR(rc6_residency_ms, S_IRUGO, show_rc6_ms, NULL);
+static DEVICE_ATTR(rc6p_residency_ms, S_IRUGO, show_rc6p_ms, NULL);
+static DEVICE_ATTR(rc6pp_residency_ms, S_IRUGO, show_rc6pp_ms, NULL);
+static DEVICE_ATTR(media_rc6_residency_ms, S_IRUGO, show_media_rc6_ms, NULL);
+
+static struct attribute *rc6_attrs[] = {
+	&dev_attr_rc6_enable.attr,
+	&dev_attr_rc6_residency_ms.attr,
+	NULL
+};
+
+static struct attribute_group rc6_attr_group = {
+	.name = power_group_name,
+	.attrs =  rc6_attrs
+};
+
+static struct attribute *rc6p_attrs[] = {
+	&dev_attr_rc6p_residency_ms.attr,
+	&dev_attr_rc6pp_residency_ms.attr,
+	NULL
+};
+
+static struct attribute_group rc6p_attr_group = {
+	.name = power_group_name,
+	.attrs =  rc6p_attrs
+};
+
+static struct attribute *media_rc6_attrs[] = {
+	&dev_attr_media_rc6_residency_ms.attr,
+	NULL
+};
+
+static struct attribute_group media_rc6_attr_group = {
+	.name = power_group_name,
+	.attrs =  media_rc6_attrs
+};
+#endif
+
+static int l3_access_valid(struct drm_device *dev, loff_t offset)
+{
+	if (!HAS_L3_DPF(dev))
+		return -EPERM;
+
+	if (offset % 4 != 0)
+		return -EINVAL;
+
+	if (offset >= GEN7_L3LOG_SIZE)
+		return -ENXIO;
+
+	return 0;
+}
+
+static ssize_t
+i915_l3_read(struct file *filp, struct kobject *kobj,
+	     struct bin_attribute *attr, char *buf,
+	     loff_t offset, size_t count)
+{
+	struct device *dev = container_of(kobj, struct device, kobj);
+	struct drm_minor *dminor = dev_to_drm_minor(dev);
+	struct drm_device *drm_dev = dminor->dev;
+	struct drm_i915_private *dev_priv = drm_dev->dev_private;
+	int slice = (int)(uintptr_t)attr->private;
+	int ret;
+
+	count = round_down(count, 4);
+
+	ret = l3_access_valid(drm_dev, offset);
+	if (ret)
+		return ret;
+
+	count = min_t(size_t, GEN7_L3LOG_SIZE - offset, count);
+
+	ret = i915_mutex_lock_interruptible(drm_dev);
+	if (ret)
+		return ret;
+
+	if (dev_priv->l3_parity.remap_info[slice])
+		memcpy(buf,
+		       dev_priv->l3_parity.remap_info[slice] + (offset/4),
+		       count);
+	else
+		memset(buf, 0, count);
+
+	mutex_unlock(&drm_dev->struct_mutex);
+
+	return count;
+}
+
+static ssize_t
+i915_l3_write(struct file *filp, struct kobject *kobj,
+	      struct bin_attribute *attr, char *buf,
+	      loff_t offset, size_t count)
+{
+	struct device *dev = container_of(kobj, struct device, kobj);
+	struct drm_minor *dminor = dev_to_drm_minor(dev);
+	struct drm_device *drm_dev = dminor->dev;
+	struct drm_i915_private *dev_priv = drm_dev->dev_private;
+	struct intel_context *ctx;
+	u32 *temp = NULL; /* Just here to make handling failures easy */
+	int slice = (int)(uintptr_t)attr->private;
+	int ret;
+
+	if (!HAS_HW_CONTEXTS(drm_dev))
+		return -ENXIO;
+
+	ret = l3_access_valid(drm_dev, offset);
+	if (ret)
+		return ret;
+
+	ret = i915_mutex_lock_interruptible(drm_dev);
+	if (ret)
+		return ret;
+
+	if (!dev_priv->l3_parity.remap_info[slice]) {
+		temp = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL);
+		if (!temp) {
+			mutex_unlock(&drm_dev->struct_mutex);
+			return -ENOMEM;
+		}
+	}
+
+	ret = i915_gpu_idle(drm_dev);
+	if (ret) {
+		kfree(temp);
+		mutex_unlock(&drm_dev->struct_mutex);
+		return ret;
+	}
+
+	/* TODO: Ideally we really want a GPU reset here to make sure errors
+	 * aren't propagated. Since I cannot find a stable way to reset the GPU
+	 * at this point it is left as a TODO.
+	*/
+	if (temp)
+		dev_priv->l3_parity.remap_info[slice] = temp;
+
+	memcpy(dev_priv->l3_parity.remap_info[slice] + (offset/4), buf, count);
+
+	/* NB: We defer the remapping until we switch to the context */
+	list_for_each_entry(ctx, &dev_priv->context_list, link)
+		ctx->remap_slice |= (1<<slice);
+
+	mutex_unlock(&drm_dev->struct_mutex);
+
+	return count;
+}
+
+static struct bin_attribute dpf_attrs = {
+	.attr = {.name = "l3_parity", .mode = (S_IRUSR | S_IWUSR)},
+	.size = GEN7_L3LOG_SIZE,
+	.read = i915_l3_read,
+	.write = i915_l3_write,
+	.mmap = NULL,
+	.private = (void *)0
+};
+
+static struct bin_attribute dpf_attrs_1 = {
+	.attr = {.name = "l3_parity_slice_1", .mode = (S_IRUSR | S_IWUSR)},
+	.size = GEN7_L3LOG_SIZE,
+	.read = i915_l3_read,
+	.write = i915_l3_write,
+	.mmap = NULL,
+	.private = (void *)1
+};
+
+static ssize_t gt_act_freq_mhz_show(struct device *kdev,
+				    struct device_attribute *attr, char *buf)
+{
+	struct drm_minor *minor = dev_to_drm_minor(kdev);
+	struct drm_device *dev = minor->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret;
+
+	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
+	intel_runtime_pm_get(dev_priv);
+
+	mutex_lock(&dev_priv->rps.hw_lock);
+	if (IS_VALLEYVIEW(dev_priv->dev)) {
+		u32 freq;
+		freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
+		ret = intel_gpu_freq(dev_priv, (freq >> 8) & 0xff);
+	} else {
+		u32 rpstat = I915_READ(GEN6_RPSTAT1);
+		if (IS_GEN9(dev_priv))
+			ret = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
+		else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+			ret = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
+		else
+			ret = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
+		ret = intel_gpu_freq(dev_priv, ret);
+	}
+	mutex_unlock(&dev_priv->rps.hw_lock);
+
+	intel_runtime_pm_put(dev_priv);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", ret);
+}
+
+static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
+				    struct device_attribute *attr, char *buf)
+{
+	struct drm_minor *minor = dev_to_drm_minor(kdev);
+	struct drm_device *dev = minor->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret;
+
+	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
+	intel_runtime_pm_get(dev_priv);
+
+	mutex_lock(&dev_priv->rps.hw_lock);
+	ret = intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq);
+	mutex_unlock(&dev_priv->rps.hw_lock);
+
+	intel_runtime_pm_put(dev_priv);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", ret);
+}
+
+static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
+				     struct device_attribute *attr, char *buf)
+{
+	struct drm_minor *minor = dev_to_drm_minor(kdev);
+	struct drm_device *dev = minor->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	return snprintf(buf, PAGE_SIZE,
+			"%d\n",
+			intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
+}
+
+static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+	struct drm_minor *minor = dev_to_drm_minor(kdev);
+	struct drm_device *dev = minor->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret;
+
+	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
+	mutex_lock(&dev_priv->rps.hw_lock);
+	ret = intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit);
+	mutex_unlock(&dev_priv->rps.hw_lock);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", ret);
+}
+
+static ssize_t gt_max_freq_mhz_store(struct device *kdev,
+				     struct device_attribute *attr,
+				     const char *buf, size_t count)
+{
+	struct drm_minor *minor = dev_to_drm_minor(kdev);
+	struct drm_device *dev = minor->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 val;
+	ssize_t ret;
+
+	ret = kstrtou32(buf, 0, &val);
+	if (ret)
+		return ret;
+
+	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
+	mutex_lock(&dev_priv->rps.hw_lock);
+
+	val = intel_freq_opcode(dev_priv, val);
+
+	if (val < dev_priv->rps.min_freq ||
+	    val > dev_priv->rps.max_freq ||
+	    val < dev_priv->rps.min_freq_softlimit) {
+		mutex_unlock(&dev_priv->rps.hw_lock);
+		return -EINVAL;
+	}
+
+	if (val > dev_priv->rps.rp0_freq)
+		DRM_DEBUG("User requested overclocking to %d\n",
+			  intel_gpu_freq(dev_priv, val));
+
+	dev_priv->rps.max_freq_softlimit = val;
+
+	val = clamp_t(int, dev_priv->rps.cur_freq,
+		      dev_priv->rps.min_freq_softlimit,
+		      dev_priv->rps.max_freq_softlimit);
+
+	/* We still need *_set_rps to process the new max_delay and
+	 * update the interrupt limits and PMINTRMSK even though
+	 * frequency request may be unchanged. */
+	intel_set_rps(dev, val);
+
+	mutex_unlock(&dev_priv->rps.hw_lock);
+
+	return count;
+}
+
+static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+	struct drm_minor *minor = dev_to_drm_minor(kdev);
+	struct drm_device *dev = minor->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret;
+
+	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
+	mutex_lock(&dev_priv->rps.hw_lock);
+	ret = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit);
+	mutex_unlock(&dev_priv->rps.hw_lock);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", ret);
+}
+
+static ssize_t gt_min_freq_mhz_store(struct device *kdev,
+				     struct device_attribute *attr,
+				     const char *buf, size_t count)
+{
+	struct drm_minor *minor = dev_to_drm_minor(kdev);
+	struct drm_device *dev = minor->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 val;
+	ssize_t ret;
+
+	ret = kstrtou32(buf, 0, &val);
+	if (ret)
+		return ret;
+
+	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
+	mutex_lock(&dev_priv->rps.hw_lock);
+
+	val = intel_freq_opcode(dev_priv, val);
+
+	if (val < dev_priv->rps.min_freq ||
+	    val > dev_priv->rps.max_freq ||
+	    val > dev_priv->rps.max_freq_softlimit) {
+		mutex_unlock(&dev_priv->rps.hw_lock);
+		return -EINVAL;
+	}
+
+	dev_priv->rps.min_freq_softlimit = val;
+
+	val = clamp_t(int, dev_priv->rps.cur_freq,
+		      dev_priv->rps.min_freq_softlimit,
+		      dev_priv->rps.max_freq_softlimit);
+
+	/* We still need *_set_rps to process the new min_delay and
+	 * update the interrupt limits and PMINTRMSK even though
+	 * frequency request may be unchanged. */
+	intel_set_rps(dev, val);
+
+	mutex_unlock(&dev_priv->rps.hw_lock);
+
+	return count;
+
+}
+
+static DEVICE_ATTR(gt_act_freq_mhz, S_IRUGO, gt_act_freq_mhz_show, NULL);
+static DEVICE_ATTR(gt_cur_freq_mhz, S_IRUGO, gt_cur_freq_mhz_show, NULL);
+static DEVICE_ATTR(gt_max_freq_mhz, S_IRUGO | S_IWUSR, gt_max_freq_mhz_show, gt_max_freq_mhz_store);
+static DEVICE_ATTR(gt_min_freq_mhz, S_IRUGO | S_IWUSR, gt_min_freq_mhz_show, gt_min_freq_mhz_store);
+
+static DEVICE_ATTR(vlv_rpe_freq_mhz, S_IRUGO, vlv_rpe_freq_mhz_show, NULL);
+
+static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf);
+static DEVICE_ATTR(gt_RP0_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
+static DEVICE_ATTR(gt_RP1_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
+static DEVICE_ATTR(gt_RPn_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
+
+/* For now we have a static number of RP states */
+static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+	struct drm_minor *minor = dev_to_drm_minor(kdev);
+	struct drm_device *dev = minor->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 val;
+
+	if (attr == &dev_attr_gt_RP0_freq_mhz)
+		val = intel_gpu_freq(dev_priv, dev_priv->rps.rp0_freq);
+	else if (attr == &dev_attr_gt_RP1_freq_mhz)
+		val = intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq);
+	else if (attr == &dev_attr_gt_RPn_freq_mhz)
+		val = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq);
+	else
+		BUG();
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", val);
+}
+
+static const struct attribute *gen6_attrs[] = {
+	&dev_attr_gt_act_freq_mhz.attr,
+	&dev_attr_gt_cur_freq_mhz.attr,
+	&dev_attr_gt_max_freq_mhz.attr,
+	&dev_attr_gt_min_freq_mhz.attr,
+	&dev_attr_gt_RP0_freq_mhz.attr,
+	&dev_attr_gt_RP1_freq_mhz.attr,
+	&dev_attr_gt_RPn_freq_mhz.attr,
+	NULL,
+};
+
+static const struct attribute *vlv_attrs[] = {
+	&dev_attr_gt_act_freq_mhz.attr,
+	&dev_attr_gt_cur_freq_mhz.attr,
+	&dev_attr_gt_max_freq_mhz.attr,
+	&dev_attr_gt_min_freq_mhz.attr,
+	&dev_attr_gt_RP0_freq_mhz.attr,
+	&dev_attr_gt_RP1_freq_mhz.attr,
+	&dev_attr_gt_RPn_freq_mhz.attr,
+	&dev_attr_vlv_rpe_freq_mhz.attr,
+	NULL,
+};
+
+static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
+				struct bin_attribute *attr, char *buf,
+				loff_t off, size_t count)
+{
+
+	struct device *kdev = container_of(kobj, struct device, kobj);
+	struct drm_minor *minor = dev_to_drm_minor(kdev);
+	struct drm_device *dev = minor->dev;
+	struct i915_error_state_file_priv error_priv;
+	struct drm_i915_error_state_buf error_str;
+	ssize_t ret_count = 0;
+	int ret;
+
+	memset(&error_priv, 0, sizeof(error_priv));
+
+	ret = i915_error_state_buf_init(&error_str, to_i915(dev), count, off);
+	if (ret)
+		return ret;
+
+	error_priv.dev = dev;
+	i915_error_state_get(dev, &error_priv);
+
+	ret = i915_error_state_to_str(&error_str, &error_priv);
+	if (ret)
+		goto out;
+
+	ret_count = count < error_str.bytes ? count : error_str.bytes;
+
+	memcpy(buf, error_str.buf, ret_count);
+out:
+	i915_error_state_put(&error_priv);
+	i915_error_state_buf_release(&error_str);
+
+	return ret ?: ret_count;
+}
+
+static ssize_t error_state_write(struct file *file, struct kobject *kobj,
+				 struct bin_attribute *attr, char *buf,
+				 loff_t off, size_t count)
+{
+	struct device *kdev = container_of(kobj, struct device, kobj);
+	struct drm_minor *minor = dev_to_drm_minor(kdev);
+	struct drm_device *dev = minor->dev;
+	int ret;
+
+	DRM_DEBUG_DRIVER("Resetting error state\n");
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
+	i915_destroy_error_state(dev);
+	mutex_unlock(&dev->struct_mutex);
+
+	return count;
+}
+
+static struct bin_attribute error_state_attr = {
+	.attr.name = "error",
+	.attr.mode = S_IRUSR | S_IWUSR,
+	.size = 0,
+	.read = error_state_read,
+	.write = error_state_write,
+};
+
+void i915_setup_sysfs(struct drm_device *dev)
+{
+	int ret;
+
+#ifdef CONFIG_PM
+	if (HAS_RC6(dev)) {
+		ret = sysfs_merge_group(&dev->primary->kdev->kobj,
+					&rc6_attr_group);
+		if (ret)
+			DRM_ERROR("RC6 residency sysfs setup failed\n");
+	}
+	if (HAS_RC6p(dev)) {
+		ret = sysfs_merge_group(&dev->primary->kdev->kobj,
+					&rc6p_attr_group);
+		if (ret)
+			DRM_ERROR("RC6p residency sysfs setup failed\n");
+	}
+	if (IS_VALLEYVIEW(dev)) {
+		ret = sysfs_merge_group(&dev->primary->kdev->kobj,
+					&media_rc6_attr_group);
+		if (ret)
+			DRM_ERROR("Media RC6 residency sysfs setup failed\n");
+	}
+#endif
+	if (HAS_L3_DPF(dev)) {
+		ret = device_create_bin_file(dev->primary->kdev, &dpf_attrs);
+		if (ret)
+			DRM_ERROR("l3 parity sysfs setup failed\n");
+
+		if (NUM_L3_SLICES(dev) > 1) {
+			ret = device_create_bin_file(dev->primary->kdev,
+						     &dpf_attrs_1);
+			if (ret)
+				DRM_ERROR("l3 parity slice 1 setup failed\n");
+		}
+	}
+
+	ret = 0;
+	if (IS_VALLEYVIEW(dev))
+		ret = sysfs_create_files(&dev->primary->kdev->kobj, vlv_attrs);
+	else if (INTEL_INFO(dev)->gen >= 6)
+		ret = sysfs_create_files(&dev->primary->kdev->kobj, gen6_attrs);
+	if (ret)
+		DRM_ERROR("RPS sysfs setup failed\n");
+
+	ret = sysfs_create_bin_file(&dev->primary->kdev->kobj,
+				    &error_state_attr);
+	if (ret)
+		DRM_ERROR("error_state sysfs setup failed\n");
+}
+
+void i915_teardown_sysfs(struct drm_device *dev)
+{
+	sysfs_remove_bin_file(&dev->primary->kdev->kobj, &error_state_attr);
+	if (IS_VALLEYVIEW(dev))
+		sysfs_remove_files(&dev->primary->kdev->kobj, vlv_attrs);
+	else
+		sysfs_remove_files(&dev->primary->kdev->kobj, gen6_attrs);
+	device_remove_bin_file(dev->primary->kdev,  &dpf_attrs_1);
+	device_remove_bin_file(dev->primary->kdev,  &dpf_attrs);
+#ifdef CONFIG_PM
+	sysfs_unmerge_group(&dev->primary->kdev->kobj, &rc6_attr_group);
+	sysfs_unmerge_group(&dev->primary->kdev->kobj, &rc6p_attr_group);
+#endif
+}
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
new file mode 100644
index 0000000..04fe849
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -0,0 +1,818 @@
+#if !defined(_I915_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _I915_TRACE_H_
+
+#include <linux/stringify.h>
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+
+#include <drm/drmP.h>
+#include "i915_drv.h"
+#include "intel_drv.h"
+#include "intel_ringbuffer.h"
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM i915
+#define TRACE_INCLUDE_FILE i915_trace
+
+/* pipe updates */
+
+TRACE_EVENT(i915_pipe_update_start,
+	    TP_PROTO(struct intel_crtc *crtc),
+	    TP_ARGS(crtc),
+
+	    TP_STRUCT__entry(
+			     __field(enum pipe, pipe)
+			     __field(u32, frame)
+			     __field(u32, scanline)
+			     __field(u32, min)
+			     __field(u32, max)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->pipe = crtc->pipe;
+			   __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev,
+										       crtc->pipe);
+			   __entry->scanline = intel_get_crtc_scanline(crtc);
+			   __entry->min = crtc->debug.min_vbl;
+			   __entry->max = crtc->debug.max_vbl;
+			   ),
+
+	    TP_printk("pipe %c, frame=%u, scanline=%u, min=%u, max=%u",
+		      pipe_name(__entry->pipe), __entry->frame,
+		       __entry->scanline, __entry->min, __entry->max)
+);
+
+TRACE_EVENT(i915_pipe_update_vblank_evaded,
+	    TP_PROTO(struct intel_crtc *crtc),
+	    TP_ARGS(crtc),
+
+	    TP_STRUCT__entry(
+			     __field(enum pipe, pipe)
+			     __field(u32, frame)
+			     __field(u32, scanline)
+			     __field(u32, min)
+			     __field(u32, max)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->pipe = crtc->pipe;
+			   __entry->frame = crtc->debug.start_vbl_count;
+			   __entry->scanline = crtc->debug.scanline_start;
+			   __entry->min = crtc->debug.min_vbl;
+			   __entry->max = crtc->debug.max_vbl;
+			   ),
+
+	    TP_printk("pipe %c, frame=%u, scanline=%u, min=%u, max=%u",
+		      pipe_name(__entry->pipe), __entry->frame,
+		       __entry->scanline, __entry->min, __entry->max)
+);
+
+TRACE_EVENT(i915_pipe_update_end,
+	    TP_PROTO(struct intel_crtc *crtc, u32 frame, int scanline_end),
+	    TP_ARGS(crtc, frame, scanline_end),
+
+	    TP_STRUCT__entry(
+			     __field(enum pipe, pipe)
+			     __field(u32, frame)
+			     __field(u32, scanline)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->pipe = crtc->pipe;
+			   __entry->frame = frame;
+			   __entry->scanline = scanline_end;
+			   ),
+
+	    TP_printk("pipe %c, frame=%u, scanline=%u",
+		      pipe_name(__entry->pipe), __entry->frame,
+		      __entry->scanline)
+);
+
+/* object tracking */
+
+TRACE_EVENT(i915_gem_object_create,
+	    TP_PROTO(struct drm_i915_gem_object *obj),
+	    TP_ARGS(obj),
+
+	    TP_STRUCT__entry(
+			     __field(struct drm_i915_gem_object *, obj)
+			     __field(u32, size)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->obj = obj;
+			   __entry->size = obj->base.size;
+			   ),
+
+	    TP_printk("obj=%p, size=%u", __entry->obj, __entry->size)
+);
+
+TRACE_EVENT(i915_gem_shrink,
+	    TP_PROTO(struct drm_i915_private *i915, unsigned long target, unsigned flags),
+	    TP_ARGS(i915, target, flags),
+
+	    TP_STRUCT__entry(
+			     __field(int, dev)
+			     __field(unsigned long, target)
+			     __field(unsigned, flags)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->dev = i915->dev->primary->index;
+			   __entry->target = target;
+			   __entry->flags = flags;
+			   ),
+
+	    TP_printk("dev=%d, target=%lu, flags=%x",
+		      __entry->dev, __entry->target, __entry->flags)
+);
+
+TRACE_EVENT(i915_vma_bind,
+	    TP_PROTO(struct i915_vma *vma, unsigned flags),
+	    TP_ARGS(vma, flags),
+
+	    TP_STRUCT__entry(
+			     __field(struct drm_i915_gem_object *, obj)
+			     __field(struct i915_address_space *, vm)
+			     __field(u64, offset)
+			     __field(u32, size)
+			     __field(unsigned, flags)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->obj = vma->obj;
+			   __entry->vm = vma->vm;
+			   __entry->offset = vma->node.start;
+			   __entry->size = vma->node.size;
+			   __entry->flags = flags;
+			   ),
+
+	    TP_printk("obj=%p, offset=%016llx size=%x%s vm=%p",
+		      __entry->obj, __entry->offset, __entry->size,
+		      __entry->flags & PIN_MAPPABLE ? ", mappable" : "",
+		      __entry->vm)
+);
+
+TRACE_EVENT(i915_vma_unbind,
+	    TP_PROTO(struct i915_vma *vma),
+	    TP_ARGS(vma),
+
+	    TP_STRUCT__entry(
+			     __field(struct drm_i915_gem_object *, obj)
+			     __field(struct i915_address_space *, vm)
+			     __field(u64, offset)
+			     __field(u32, size)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->obj = vma->obj;
+			   __entry->vm = vma->vm;
+			   __entry->offset = vma->node.start;
+			   __entry->size = vma->node.size;
+			   ),
+
+	    TP_printk("obj=%p, offset=%016llx size=%x vm=%p",
+		      __entry->obj, __entry->offset, __entry->size, __entry->vm)
+);
+
+#define VM_TO_TRACE_NAME(vm) \
+	(i915_is_ggtt(vm) ? "G" : \
+		      "P")
+
+DECLARE_EVENT_CLASS(i915_va,
+	TP_PROTO(struct i915_address_space *vm, u64 start, u64 length, const char *name),
+	TP_ARGS(vm, start, length, name),
+
+	TP_STRUCT__entry(
+		__field(struct i915_address_space *, vm)
+		__field(u64, start)
+		__field(u64, end)
+		__string(name, name)
+	),
+
+	TP_fast_assign(
+		__entry->vm = vm;
+		__entry->start = start;
+		__entry->end = start + length - 1;
+		__assign_str(name, name);
+	),
+
+	TP_printk("vm=%p (%s), 0x%llx-0x%llx",
+		  __entry->vm, __get_str(name),  __entry->start, __entry->end)
+);
+
+DEFINE_EVENT(i915_va, i915_va_alloc,
+	     TP_PROTO(struct i915_address_space *vm, u64 start, u64 length, const char *name),
+	     TP_ARGS(vm, start, length, name)
+);
+
+DECLARE_EVENT_CLASS(i915_px_entry,
+	TP_PROTO(struct i915_address_space *vm, u32 px, u64 start, u64 px_shift),
+	TP_ARGS(vm, px, start, px_shift),
+
+	TP_STRUCT__entry(
+		__field(struct i915_address_space *, vm)
+		__field(u32, px)
+		__field(u64, start)
+		__field(u64, end)
+	),
+
+	TP_fast_assign(
+		__entry->vm = vm;
+		__entry->px = px;
+		__entry->start = start;
+		__entry->end = ((start + (1ULL << px_shift)) & ~((1ULL << px_shift)-1)) - 1;
+	),
+
+	TP_printk("vm=%p, pde=%d (0x%llx-0x%llx)",
+		  __entry->vm, __entry->px, __entry->start, __entry->end)
+);
+
+DEFINE_EVENT(i915_px_entry, i915_page_table_entry_alloc,
+	     TP_PROTO(struct i915_address_space *vm, u32 pde, u64 start, u64 pde_shift),
+	     TP_ARGS(vm, pde, start, pde_shift)
+);
+
+DEFINE_EVENT_PRINT(i915_px_entry, i915_page_directory_entry_alloc,
+		   TP_PROTO(struct i915_address_space *vm, u32 pdpe, u64 start, u64 pdpe_shift),
+		   TP_ARGS(vm, pdpe, start, pdpe_shift),
+
+		   TP_printk("vm=%p, pdpe=%d (0x%llx-0x%llx)",
+			     __entry->vm, __entry->px, __entry->start, __entry->end)
+);
+
+DEFINE_EVENT_PRINT(i915_px_entry, i915_page_directory_pointer_entry_alloc,
+		   TP_PROTO(struct i915_address_space *vm, u32 pml4e, u64 start, u64 pml4e_shift),
+		   TP_ARGS(vm, pml4e, start, pml4e_shift),
+
+		   TP_printk("vm=%p, pml4e=%d (0x%llx-0x%llx)",
+			     __entry->vm, __entry->px, __entry->start, __entry->end)
+);
+
+/* Avoid extra math because we only support two sizes. The format is defined by
+ * bitmap_scnprintf. Each 32 bits is 8 HEX digits followed by comma */
+#define TRACE_PT_SIZE(bits) \
+	((((bits) == 1024) ? 288 : 144) + 1)
+
+DECLARE_EVENT_CLASS(i915_page_table_entry_update,
+	TP_PROTO(struct i915_address_space *vm, u32 pde,
+		 struct i915_page_table *pt, u32 first, u32 count, u32 bits),
+	TP_ARGS(vm, pde, pt, first, count, bits),
+
+	TP_STRUCT__entry(
+		__field(struct i915_address_space *, vm)
+		__field(u32, pde)
+		__field(u32, first)
+		__field(u32, last)
+		__dynamic_array(char, cur_ptes, TRACE_PT_SIZE(bits))
+	),
+
+	TP_fast_assign(
+		__entry->vm = vm;
+		__entry->pde = pde;
+		__entry->first = first;
+		__entry->last = first + count - 1;
+		scnprintf(__get_str(cur_ptes),
+			  TRACE_PT_SIZE(bits),
+			  "%*pb",
+			  bits,
+			  pt->used_ptes);
+	),
+
+	TP_printk("vm=%p, pde=%d, updating %u:%u\t%s",
+		  __entry->vm, __entry->pde, __entry->last, __entry->first,
+		  __get_str(cur_ptes))
+);
+
+DEFINE_EVENT(i915_page_table_entry_update, i915_page_table_entry_map,
+	TP_PROTO(struct i915_address_space *vm, u32 pde,
+		 struct i915_page_table *pt, u32 first, u32 count, u32 bits),
+	TP_ARGS(vm, pde, pt, first, count, bits)
+);
+
+TRACE_EVENT(i915_gem_object_change_domain,
+	    TP_PROTO(struct drm_i915_gem_object *obj, u32 old_read, u32 old_write),
+	    TP_ARGS(obj, old_read, old_write),
+
+	    TP_STRUCT__entry(
+			     __field(struct drm_i915_gem_object *, obj)
+			     __field(u32, read_domains)
+			     __field(u32, write_domain)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->obj = obj;
+			   __entry->read_domains = obj->base.read_domains | (old_read << 16);
+			   __entry->write_domain = obj->base.write_domain | (old_write << 16);
+			   ),
+
+	    TP_printk("obj=%p, read=%02x=>%02x, write=%02x=>%02x",
+		      __entry->obj,
+		      __entry->read_domains >> 16,
+		      __entry->read_domains & 0xffff,
+		      __entry->write_domain >> 16,
+		      __entry->write_domain & 0xffff)
+);
+
+TRACE_EVENT(i915_gem_object_pwrite,
+	    TP_PROTO(struct drm_i915_gem_object *obj, u32 offset, u32 len),
+	    TP_ARGS(obj, offset, len),
+
+	    TP_STRUCT__entry(
+			     __field(struct drm_i915_gem_object *, obj)
+			     __field(u32, offset)
+			     __field(u32, len)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->obj = obj;
+			   __entry->offset = offset;
+			   __entry->len = len;
+			   ),
+
+	    TP_printk("obj=%p, offset=%u, len=%u",
+		      __entry->obj, __entry->offset, __entry->len)
+);
+
+TRACE_EVENT(i915_gem_object_pread,
+	    TP_PROTO(struct drm_i915_gem_object *obj, u32 offset, u32 len),
+	    TP_ARGS(obj, offset, len),
+
+	    TP_STRUCT__entry(
+			     __field(struct drm_i915_gem_object *, obj)
+			     __field(u32, offset)
+			     __field(u32, len)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->obj = obj;
+			   __entry->offset = offset;
+			   __entry->len = len;
+			   ),
+
+	    TP_printk("obj=%p, offset=%u, len=%u",
+		      __entry->obj, __entry->offset, __entry->len)
+);
+
+TRACE_EVENT(i915_gem_object_fault,
+	    TP_PROTO(struct drm_i915_gem_object *obj, u32 index, bool gtt, bool write),
+	    TP_ARGS(obj, index, gtt, write),
+
+	    TP_STRUCT__entry(
+			     __field(struct drm_i915_gem_object *, obj)
+			     __field(u32, index)
+			     __field(bool, gtt)
+			     __field(bool, write)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->obj = obj;
+			   __entry->index = index;
+			   __entry->gtt = gtt;
+			   __entry->write = write;
+			   ),
+
+	    TP_printk("obj=%p, %s index=%u %s",
+		      __entry->obj,
+		      __entry->gtt ? "GTT" : "CPU",
+		      __entry->index,
+		      __entry->write ? ", writable" : "")
+);
+
+DECLARE_EVENT_CLASS(i915_gem_object,
+	    TP_PROTO(struct drm_i915_gem_object *obj),
+	    TP_ARGS(obj),
+
+	    TP_STRUCT__entry(
+			     __field(struct drm_i915_gem_object *, obj)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->obj = obj;
+			   ),
+
+	    TP_printk("obj=%p", __entry->obj)
+);
+
+DEFINE_EVENT(i915_gem_object, i915_gem_object_clflush,
+	     TP_PROTO(struct drm_i915_gem_object *obj),
+	     TP_ARGS(obj)
+);
+
+DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy,
+	    TP_PROTO(struct drm_i915_gem_object *obj),
+	    TP_ARGS(obj)
+);
+
+TRACE_EVENT(i915_gem_evict,
+	    TP_PROTO(struct drm_device *dev, u32 size, u32 align, unsigned flags),
+	    TP_ARGS(dev, size, align, flags),
+
+	    TP_STRUCT__entry(
+			     __field(u32, dev)
+			     __field(u32, size)
+			     __field(u32, align)
+			     __field(unsigned, flags)
+			    ),
+
+	    TP_fast_assign(
+			   __entry->dev = dev->primary->index;
+			   __entry->size = size;
+			   __entry->align = align;
+			   __entry->flags = flags;
+			  ),
+
+	    TP_printk("dev=%d, size=%d, align=%d %s",
+		      __entry->dev, __entry->size, __entry->align,
+		      __entry->flags & PIN_MAPPABLE ? ", mappable" : "")
+);
+
+TRACE_EVENT(i915_gem_evict_everything,
+	    TP_PROTO(struct drm_device *dev),
+	    TP_ARGS(dev),
+
+	    TP_STRUCT__entry(
+			     __field(u32, dev)
+			    ),
+
+	    TP_fast_assign(
+			   __entry->dev = dev->primary->index;
+			  ),
+
+	    TP_printk("dev=%d", __entry->dev)
+);
+
+TRACE_EVENT(i915_gem_evict_vm,
+	    TP_PROTO(struct i915_address_space *vm),
+	    TP_ARGS(vm),
+
+	    TP_STRUCT__entry(
+			     __field(u32, dev)
+			     __field(struct i915_address_space *, vm)
+			    ),
+
+	    TP_fast_assign(
+			   __entry->dev = vm->dev->primary->index;
+			   __entry->vm = vm;
+			  ),
+
+	    TP_printk("dev=%d, vm=%p", __entry->dev, __entry->vm)
+);
+
+TRACE_EVENT(i915_gem_ring_sync_to,
+	    TP_PROTO(struct drm_i915_gem_request *to_req,
+		     struct intel_engine_cs *from,
+		     struct drm_i915_gem_request *req),
+	    TP_ARGS(to_req, from, req),
+
+	    TP_STRUCT__entry(
+			     __field(u32, dev)
+			     __field(u32, sync_from)
+			     __field(u32, sync_to)
+			     __field(u32, seqno)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->dev = from->dev->primary->index;
+			   __entry->sync_from = from->id;
+			   __entry->sync_to = to_req->ring->id;
+			   __entry->seqno = i915_gem_request_get_seqno(req);
+			   ),
+
+	    TP_printk("dev=%u, sync-from=%u, sync-to=%u, seqno=%u",
+		      __entry->dev,
+		      __entry->sync_from, __entry->sync_to,
+		      __entry->seqno)
+);
+
+TRACE_EVENT(i915_gem_ring_dispatch,
+	    TP_PROTO(struct drm_i915_gem_request *req, u32 flags),
+	    TP_ARGS(req, flags),
+
+	    TP_STRUCT__entry(
+			     __field(u32, dev)
+			     __field(u32, ring)
+			     __field(u32, seqno)
+			     __field(u32, flags)
+			     ),
+
+	    TP_fast_assign(
+			   struct intel_engine_cs *ring =
+						i915_gem_request_get_ring(req);
+			   __entry->dev = ring->dev->primary->index;
+			   __entry->ring = ring->id;
+			   __entry->seqno = i915_gem_request_get_seqno(req);
+			   __entry->flags = flags;
+			   i915_trace_irq_get(ring, req);
+			   ),
+
+	    TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x",
+		      __entry->dev, __entry->ring, __entry->seqno, __entry->flags)
+);
+
+TRACE_EVENT(i915_gem_ring_flush,
+	    TP_PROTO(struct drm_i915_gem_request *req, u32 invalidate, u32 flush),
+	    TP_ARGS(req, invalidate, flush),
+
+	    TP_STRUCT__entry(
+			     __field(u32, dev)
+			     __field(u32, ring)
+			     __field(u32, invalidate)
+			     __field(u32, flush)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->dev = req->ring->dev->primary->index;
+			   __entry->ring = req->ring->id;
+			   __entry->invalidate = invalidate;
+			   __entry->flush = flush;
+			   ),
+
+	    TP_printk("dev=%u, ring=%x, invalidate=%04x, flush=%04x",
+		      __entry->dev, __entry->ring,
+		      __entry->invalidate, __entry->flush)
+);
+
+DECLARE_EVENT_CLASS(i915_gem_request,
+	    TP_PROTO(struct drm_i915_gem_request *req),
+	    TP_ARGS(req),
+
+	    TP_STRUCT__entry(
+			     __field(u32, dev)
+			     __field(u32, ring)
+			     __field(u32, seqno)
+			     ),
+
+	    TP_fast_assign(
+			   struct intel_engine_cs *ring =
+						i915_gem_request_get_ring(req);
+			   __entry->dev = ring->dev->primary->index;
+			   __entry->ring = ring->id;
+			   __entry->seqno = i915_gem_request_get_seqno(req);
+			   ),
+
+	    TP_printk("dev=%u, ring=%u, seqno=%u",
+		      __entry->dev, __entry->ring, __entry->seqno)
+);
+
+DEFINE_EVENT(i915_gem_request, i915_gem_request_add,
+	    TP_PROTO(struct drm_i915_gem_request *req),
+	    TP_ARGS(req)
+);
+
+TRACE_EVENT(i915_gem_request_notify,
+	    TP_PROTO(struct intel_engine_cs *ring),
+	    TP_ARGS(ring),
+
+	    TP_STRUCT__entry(
+			     __field(u32, dev)
+			     __field(u32, ring)
+			     __field(u32, seqno)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->dev = ring->dev->primary->index;
+			   __entry->ring = ring->id;
+			   __entry->seqno = ring->get_seqno(ring, false);
+			   ),
+
+	    TP_printk("dev=%u, ring=%u, seqno=%u",
+		      __entry->dev, __entry->ring, __entry->seqno)
+);
+
+DEFINE_EVENT(i915_gem_request, i915_gem_request_retire,
+	    TP_PROTO(struct drm_i915_gem_request *req),
+	    TP_ARGS(req)
+);
+
+DEFINE_EVENT(i915_gem_request, i915_gem_request_complete,
+	    TP_PROTO(struct drm_i915_gem_request *req),
+	    TP_ARGS(req)
+);
+
+TRACE_EVENT(i915_gem_request_wait_begin,
+	    TP_PROTO(struct drm_i915_gem_request *req),
+	    TP_ARGS(req),
+
+	    TP_STRUCT__entry(
+			     __field(u32, dev)
+			     __field(u32, ring)
+			     __field(u32, seqno)
+			     __field(bool, blocking)
+			     ),
+
+	    /* NB: the blocking information is racy since mutex_is_locked
+	     * doesn't check that the current thread holds the lock. The only
+	     * other option would be to pass the boolean information of whether
+	     * or not the class was blocking down through the stack which is
+	     * less desirable.
+	     */
+	    TP_fast_assign(
+			   struct intel_engine_cs *ring =
+						i915_gem_request_get_ring(req);
+			   __entry->dev = ring->dev->primary->index;
+			   __entry->ring = ring->id;
+			   __entry->seqno = i915_gem_request_get_seqno(req);
+			   __entry->blocking =
+				     mutex_is_locked(&ring->dev->struct_mutex);
+			   ),
+
+	    TP_printk("dev=%u, ring=%u, seqno=%u, blocking=%s",
+		      __entry->dev, __entry->ring,
+		      __entry->seqno, __entry->blocking ?  "yes (NB)" : "no")
+);
+
+DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_end,
+	    TP_PROTO(struct drm_i915_gem_request *req),
+	    TP_ARGS(req)
+);
+
+TRACE_EVENT(i915_flip_request,
+	    TP_PROTO(int plane, struct drm_i915_gem_object *obj),
+
+	    TP_ARGS(plane, obj),
+
+	    TP_STRUCT__entry(
+		    __field(int, plane)
+		    __field(struct drm_i915_gem_object *, obj)
+		    ),
+
+	    TP_fast_assign(
+		    __entry->plane = plane;
+		    __entry->obj = obj;
+		    ),
+
+	    TP_printk("plane=%d, obj=%p", __entry->plane, __entry->obj)
+);
+
+TRACE_EVENT(i915_flip_complete,
+	    TP_PROTO(int plane, struct drm_i915_gem_object *obj),
+
+	    TP_ARGS(plane, obj),
+
+	    TP_STRUCT__entry(
+		    __field(int, plane)
+		    __field(struct drm_i915_gem_object *, obj)
+		    ),
+
+	    TP_fast_assign(
+		    __entry->plane = plane;
+		    __entry->obj = obj;
+		    ),
+
+	    TP_printk("plane=%d, obj=%p", __entry->plane, __entry->obj)
+);
+
+TRACE_EVENT_CONDITION(i915_reg_rw,
+	TP_PROTO(bool write, u32 reg, u64 val, int len, bool trace),
+
+	TP_ARGS(write, reg, val, len, trace),
+
+	TP_CONDITION(trace),
+
+	TP_STRUCT__entry(
+		__field(u64, val)
+		__field(u32, reg)
+		__field(u16, write)
+		__field(u16, len)
+		),
+
+	TP_fast_assign(
+		__entry->val = (u64)val;
+		__entry->reg = reg;
+		__entry->write = write;
+		__entry->len = len;
+		),
+
+	TP_printk("%s reg=0x%x, len=%d, val=(0x%x, 0x%x)",
+		__entry->write ? "write" : "read",
+		__entry->reg, __entry->len,
+		(u32)(__entry->val & 0xffffffff),
+		(u32)(__entry->val >> 32))
+);
+
+TRACE_EVENT(intel_gpu_freq_change,
+	    TP_PROTO(u32 freq),
+	    TP_ARGS(freq),
+
+	    TP_STRUCT__entry(
+			     __field(u32, freq)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->freq = freq;
+			   ),
+
+	    TP_printk("new_freq=%u", __entry->freq)
+);
+
+/**
+ * DOC: i915_ppgtt_create and i915_ppgtt_release tracepoints
+ *
+ * With full ppgtt enabled each process using drm will allocate at least one
+ * translation table. With these traces it is possible to keep track of the
+ * allocation and of the lifetime of the tables; this can be used during
+ * testing/debug to verify that we are not leaking ppgtts.
+ * These traces identify the ppgtt through the vm pointer, which is also printed
+ * by the i915_vma_bind and i915_vma_unbind tracepoints.
+ */
+DECLARE_EVENT_CLASS(i915_ppgtt,
+	TP_PROTO(struct i915_address_space *vm),
+	TP_ARGS(vm),
+
+	TP_STRUCT__entry(
+			__field(struct i915_address_space *, vm)
+			__field(u32, dev)
+	),
+
+	TP_fast_assign(
+			__entry->vm = vm;
+			__entry->dev = vm->dev->primary->index;
+	),
+
+	TP_printk("dev=%u, vm=%p", __entry->dev, __entry->vm)
+)
+
+DEFINE_EVENT(i915_ppgtt, i915_ppgtt_create,
+	TP_PROTO(struct i915_address_space *vm),
+	TP_ARGS(vm)
+);
+
+DEFINE_EVENT(i915_ppgtt, i915_ppgtt_release,
+	TP_PROTO(struct i915_address_space *vm),
+	TP_ARGS(vm)
+);
+
+/**
+ * DOC: i915_context_create and i915_context_free tracepoints
+ *
+ * These tracepoints are used to track creation and deletion of contexts.
+ * If full ppgtt is enabled, they also print the address of the vm assigned to
+ * the context.
+ */
+DECLARE_EVENT_CLASS(i915_context,
+	TP_PROTO(struct intel_context *ctx),
+	TP_ARGS(ctx),
+
+	TP_STRUCT__entry(
+			__field(u32, dev)
+			__field(struct intel_context *, ctx)
+			__field(struct i915_address_space *, vm)
+	),
+
+	TP_fast_assign(
+			__entry->ctx = ctx;
+			__entry->vm = ctx->ppgtt ? &ctx->ppgtt->base : NULL;
+			__entry->dev = ctx->i915->dev->primary->index;
+	),
+
+	TP_printk("dev=%u, ctx=%p, ctx_vm=%p",
+		  __entry->dev, __entry->ctx, __entry->vm)
+)
+
+DEFINE_EVENT(i915_context, i915_context_create,
+	TP_PROTO(struct intel_context *ctx),
+	TP_ARGS(ctx)
+);
+
+DEFINE_EVENT(i915_context, i915_context_free,
+	TP_PROTO(struct intel_context *ctx),
+	TP_ARGS(ctx)
+);
+
+/**
+ * DOC: switch_mm tracepoint
+ *
+ * This tracepoint allows tracking of the mm switch, which is an important point
+ * in the lifetime of the vm in the legacy submission path. This tracepoint is
+ * called only if full ppgtt is enabled.
+ */
+TRACE_EVENT(switch_mm,
+	TP_PROTO(struct intel_engine_cs *ring, struct intel_context *to),
+
+	TP_ARGS(ring, to),
+
+	TP_STRUCT__entry(
+			__field(u32, ring)
+			__field(struct intel_context *, to)
+			__field(struct i915_address_space *, vm)
+			__field(u32, dev)
+	),
+
+	TP_fast_assign(
+			__entry->ring = ring->id;
+			__entry->to = to;
+			__entry->vm = to->ppgtt? &to->ppgtt->base : NULL;
+			__entry->dev = ring->dev->primary->index;
+	),
+
+	TP_printk("dev=%u, ring=%u, ctx=%p, ctx_vm=%p",
+		  __entry->dev, __entry->ring, __entry->to, __entry->vm)
+);
+
+#endif /* _I915_TRACE_H_ */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/i915/i915_trace_points.c b/drivers/gpu/drm/i915/i915_trace_points.c
new file mode 100644
index 0000000..f1df2bd
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_trace_points.c
@@ -0,0 +1,13 @@
+/*
+ * Copyright © 2009 Intel Corporation
+ *
+ * Authors:
+ *    Chris Wilson <chris@chris-wilson.co.uk>
+ */
+
+#include "i915_drv.h"
+
+#ifndef __CHECKER__
+#define CREATE_TRACE_POINTS
+#include "i915_trace.h"
+#endif
diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c
new file mode 100644
index 0000000..5eee75b
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_vgpu.c
@@ -0,0 +1,264 @@
+/*
+ * Copyright(c) 2011-2015 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "intel_drv.h"
+#include "i915_vgpu.h"
+
+/**
+ * DOC: Intel GVT-g guest support
+ *
+ * Intel GVT-g is a graphics virtualization technology which shares the
+ * GPU among multiple virtual machines on a time-sharing basis. Each
+ * virtual machine is presented a virtual GPU (vGPU), which has equivalent
+ * features as the underlying physical GPU (pGPU), so i915 driver can run
+ * seamlessly in a virtual machine. This file provides vGPU specific
+ * optimizations when running in a virtual machine, to reduce the complexity
+ * of vGPU emulation and to improve the overall performance.
+ *
+ * A primary function introduced here is so-called "address space ballooning"
+ * technique. Intel GVT-g partitions global graphics memory among multiple VMs,
+ * so each VM can directly access a portion of the memory without hypervisor's
+ * intervention, e.g. filling textures or queuing commands. However with the
+ * partitioning an unmodified i915 driver would assume a smaller graphics
+ * memory starting from address ZERO, then requires vGPU emulation module to
+ * translate the graphics address between 'guest view' and 'host view', for
+ * all registers and command opcodes which contain a graphics memory address.
+ * To reduce the complexity, Intel GVT-g introduces "address space ballooning",
+ * by telling the exact partitioning knowledge to each guest i915 driver, which
+ * then reserves and prevents non-allocated portions from allocation. Thus vGPU
+ * emulation module only needs to scan and validate graphics addresses without
+ * complexity of address translation.
+ *
+ */
+
+/**
+ * i915_check_vgpu - detect virtual GPU
+ * @dev: drm device *
+ *
+ * This function is called at the initialization stage, to detect whether
+ * running on a vGPU.
+ */
+void i915_check_vgpu(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	uint64_t magic;
+	uint32_t version;
+
+	BUILD_BUG_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
+
+	if (!IS_HASWELL(dev))
+		return;
+
+	magic = readq(dev_priv->regs + vgtif_reg(magic));
+	if (magic != VGT_MAGIC)
+		return;
+
+	version = INTEL_VGT_IF_VERSION_ENCODE(
+		readw(dev_priv->regs + vgtif_reg(version_major)),
+		readw(dev_priv->regs + vgtif_reg(version_minor)));
+	if (version != INTEL_VGT_IF_VERSION) {
+		DRM_INFO("VGT interface version mismatch!\n");
+		return;
+	}
+
+	dev_priv->vgpu.active = true;
+	DRM_INFO("Virtual GPU for Intel GVT-g detected.\n");
+}
+
+struct _balloon_info_ {
+	/*
+	 * There are up to 2 regions per mappable/unmappable graphic
+	 * memory that might be ballooned. Here, index 0/1 is for mappable
+	 * graphic memory, 2/3 for unmappable graphic memory.
+	 */
+	struct drm_mm_node space[4];
+};
+
+static struct _balloon_info_ bl_info;
+
+/**
+ * intel_vgt_deballoon - deballoon reserved graphics address trunks
+ *
+ * This function is called to deallocate the ballooned-out graphic memory, when
+ * driver is unloaded or when ballooning fails.
+ */
+void intel_vgt_deballoon(void)
+{
+	int i;
+
+	DRM_DEBUG("VGT deballoon.\n");
+
+	for (i = 0; i < 4; i++) {
+		if (bl_info.space[i].allocated)
+			drm_mm_remove_node(&bl_info.space[i]);
+	}
+
+	memset(&bl_info, 0, sizeof(bl_info));
+}
+
+static int vgt_balloon_space(struct drm_mm *mm,
+			     struct drm_mm_node *node,
+			     unsigned long start, unsigned long end)
+{
+	unsigned long size = end - start;
+
+	if (start == end)
+		return -EINVAL;
+
+	DRM_INFO("balloon space: range [ 0x%lx - 0x%lx ] %lu KiB.\n",
+		 start, end, size / 1024);
+
+	node->start = start;
+	node->size = size;
+
+	return drm_mm_reserve_node(mm, node);
+}
+
+/**
+ * intel_vgt_balloon - balloon out reserved graphics address trunks
+ * @dev: drm device
+ *
+ * This function is called at the initialization stage, to balloon out the
+ * graphic address space allocated to other vGPUs, by marking these spaces as
+ * reserved. The ballooning related knowledge(starting address and size of
+ * the mappable/unmappable graphic memory) is described in the vgt_if structure
+ * in a reserved mmio range.
+ *
+ * To give an example, the drawing below depicts one typical scenario after
+ * ballooning. Here the vGPU1 has 2 pieces of graphic address spaces ballooned
+ * out each for the mappable and the non-mappable part. From the vGPU1 point of
+ * view, the total size is the same as the physical one, with the start address
+ * of its graphic space being zero. Yet there are some portions ballooned out(
+ * the shadow part, which are marked as reserved by drm allocator). From the
+ * host point of view, the graphic address space is partitioned by multiple
+ * vGPUs in different VMs.
+ *
+ *                        vGPU1 view         Host view
+ *             0 ------> +-----------+     +-----------+
+ *               ^       |///////////|     |   vGPU3   |
+ *               |       |///////////|     +-----------+
+ *               |       |///////////|     |   vGPU2   |
+ *               |       +-----------+     +-----------+
+ *        mappable GM    | available | ==> |   vGPU1   |
+ *               |       +-----------+     +-----------+
+ *               |       |///////////|     |           |
+ *               v       |///////////|     |   Host    |
+ *               +=======+===========+     +===========+
+ *               ^       |///////////|     |   vGPU3   |
+ *               |       |///////////|     +-----------+
+ *               |       |///////////|     |   vGPU2   |
+ *               |       +-----------+     +-----------+
+ *      unmappable GM    | available | ==> |   vGPU1   |
+ *               |       +-----------+     +-----------+
+ *               |       |///////////|     |           |
+ *               |       |///////////|     |   Host    |
+ *               v       |///////////|     |           |
+ * total GM size ------> +-----------+     +-----------+
+ *
+ * Returns:
+ * zero on success, non-zero if configuration invalid or ballooning failed
+ */
+int intel_vgt_balloon(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct i915_address_space *ggtt_vm = &dev_priv->gtt.base;
+	unsigned long ggtt_vm_end = ggtt_vm->start + ggtt_vm->total;
+
+	unsigned long mappable_base, mappable_size, mappable_end;
+	unsigned long unmappable_base, unmappable_size, unmappable_end;
+	int ret;
+
+	mappable_base = I915_READ(vgtif_reg(avail_rs.mappable_gmadr.base));
+	mappable_size = I915_READ(vgtif_reg(avail_rs.mappable_gmadr.size));
+	unmappable_base = I915_READ(vgtif_reg(avail_rs.nonmappable_gmadr.base));
+	unmappable_size = I915_READ(vgtif_reg(avail_rs.nonmappable_gmadr.size));
+
+	mappable_end = mappable_base + mappable_size;
+	unmappable_end = unmappable_base + unmappable_size;
+
+	DRM_INFO("VGT ballooning configuration:\n");
+	DRM_INFO("Mappable graphic memory: base 0x%lx size %ldKiB\n",
+		 mappable_base, mappable_size / 1024);
+	DRM_INFO("Unmappable graphic memory: base 0x%lx size %ldKiB\n",
+		 unmappable_base, unmappable_size / 1024);
+
+	if (mappable_base < ggtt_vm->start ||
+	    mappable_end > dev_priv->gtt.mappable_end ||
+	    unmappable_base < dev_priv->gtt.mappable_end ||
+	    unmappable_end > ggtt_vm_end) {
+		DRM_ERROR("Invalid ballooning configuration!\n");
+		return -EINVAL;
+	}
+
+	/* Unmappable graphic memory ballooning */
+	if (unmappable_base > dev_priv->gtt.mappable_end) {
+		ret = vgt_balloon_space(&ggtt_vm->mm,
+					&bl_info.space[2],
+					dev_priv->gtt.mappable_end,
+					unmappable_base);
+
+		if (ret)
+			goto err;
+	}
+
+	/*
+	 * No need to partition out the last physical page,
+	 * because it is reserved to the guard page.
+	 */
+	if (unmappable_end < ggtt_vm_end - PAGE_SIZE) {
+		ret = vgt_balloon_space(&ggtt_vm->mm,
+					&bl_info.space[3],
+					unmappable_end,
+					ggtt_vm_end - PAGE_SIZE);
+		if (ret)
+			goto err;
+	}
+
+	/* Mappable graphic memory ballooning */
+	if (mappable_base > ggtt_vm->start) {
+		ret = vgt_balloon_space(&ggtt_vm->mm,
+					&bl_info.space[0],
+					ggtt_vm->start, mappable_base);
+
+		if (ret)
+			goto err;
+	}
+
+	if (mappable_end < dev_priv->gtt.mappable_end) {
+		ret = vgt_balloon_space(&ggtt_vm->mm,
+					&bl_info.space[1],
+					mappable_end,
+					dev_priv->gtt.mappable_end);
+
+		if (ret)
+			goto err;
+	}
+
+	DRM_INFO("VGT balloon successfully\n");
+	return 0;
+
+err:
+	DRM_ERROR("VGT balloon fail\n");
+	intel_vgt_deballoon();
+	return ret;
+}
diff --git a/drivers/gpu/drm/i915/i915_vgpu.h b/drivers/gpu/drm/i915/i915_vgpu.h
new file mode 100644
index 0000000..21c97f4
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_vgpu.h
@@ -0,0 +1,121 @@
+/*
+ * Copyright(c) 2011-2015 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _I915_VGPU_H_
+#define _I915_VGPU_H_
+
+/* The MMIO offset of the shared info between guest and host emulator */
+#define VGT_PVINFO_PAGE	0x78000
+#define VGT_PVINFO_SIZE	0x1000
+
+/*
+ * The following structure pages are defined in GEN MMIO space
+ * for virtualization. (One page for now)
+ */
+#define VGT_MAGIC         0x4776544776544776ULL	/* 'vGTvGTvG' */
+#define VGT_VERSION_MAJOR 1
+#define VGT_VERSION_MINOR 0
+
+#define INTEL_VGT_IF_VERSION_ENCODE(major, minor) ((major) << 16 | (minor))
+#define INTEL_VGT_IF_VERSION \
+	INTEL_VGT_IF_VERSION_ENCODE(VGT_VERSION_MAJOR, VGT_VERSION_MINOR)
+
+/*
+ * notifications from guest to vgpu device model
+ */
+enum vgt_g2v_type {
+	VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE = 2,
+	VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY,
+	VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE,
+	VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY,
+	VGT_G2V_EXECLIST_CONTEXT_CREATE,
+	VGT_G2V_EXECLIST_CONTEXT_DESTROY,
+	VGT_G2V_MAX,
+};
+
+struct vgt_if {
+	uint64_t magic;		/* VGT_MAGIC */
+	uint16_t version_major;
+	uint16_t version_minor;
+	uint32_t vgt_id;	/* ID of vGT instance */
+	uint32_t rsv1[12];	/* pad to offset 0x40 */
+	/*
+	 *  Data structure to describe the balooning info of resources.
+	 *  Each VM can only have one portion of continuous area for now.
+	 *  (May support scattered resource in future)
+	 *  (starting from offset 0x40)
+	 */
+	struct {
+		/* Aperture register balooning */
+		struct {
+			uint32_t base;
+			uint32_t size;
+		} mappable_gmadr;	/* aperture */
+		/* GMADR register balooning */
+		struct {
+			uint32_t base;
+			uint32_t size;
+		} nonmappable_gmadr;	/* non aperture */
+		/* allowed fence registers */
+		uint32_t fence_num;
+		uint32_t rsv2[3];
+	} avail_rs;		/* available/assigned resource */
+	uint32_t rsv3[0x200 - 24];	/* pad to half page */
+	/*
+	 * The bottom half page is for response from Gfx driver to hypervisor.
+	 */
+	uint32_t rsv4;
+	uint32_t display_ready;	/* ready for display owner switch */
+
+	uint32_t rsv5[4];
+
+	uint32_t g2v_notify;
+	uint32_t rsv6[7];
+
+	uint32_t pdp0_lo;
+	uint32_t pdp0_hi;
+	uint32_t pdp1_lo;
+	uint32_t pdp1_hi;
+	uint32_t pdp2_lo;
+	uint32_t pdp2_hi;
+	uint32_t pdp3_lo;
+	uint32_t pdp3_hi;
+
+	uint32_t execlist_context_descriptor_lo;
+	uint32_t execlist_context_descriptor_hi;
+
+	uint32_t  rsv7[0x200 - 24];    /* pad to one page */
+} __packed;
+
+#define vgtif_reg(x) \
+	(VGT_PVINFO_PAGE + (long)&((struct vgt_if *)NULL)->x)
+
+/* vGPU display status to be used by the host side */
+#define VGT_DRV_DISPLAY_NOT_READY 0
+#define VGT_DRV_DISPLAY_READY     1  /* ready for display switch */
+
+extern void i915_check_vgpu(struct drm_device *dev);
+extern int intel_vgt_balloon(struct drm_device *dev);
+extern void intel_vgt_deballoon(void);
+
+#endif /* _I915_VGPU_H_ */
diff --git a/drivers/gpu/drm/i915/intel_acpi.c b/drivers/gpu/drm/i915/intel_acpi.c
new file mode 100644
index 0000000..eb638a1
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_acpi.c
@@ -0,0 +1,164 @@
+/*
+ * Intel ACPI functions
+ *
+ * _DSM related code stolen from nouveau_acpi.c.
+ */
+#include <linux/pci.h>
+#include <linux/acpi.h>
+#include <drm/drmP.h>
+#include "i915_drv.h"
+
+#define INTEL_DSM_REVISION_ID 1 /* For Calpella anyway... */
+#define INTEL_DSM_FN_PLATFORM_MUX_INFO 1 /* No args */
+
+static struct intel_dsm_priv {
+	acpi_handle dhandle;
+} intel_dsm_priv;
+
+static const u8 intel_dsm_guid[] = {
+	0xd3, 0x73, 0xd8, 0x7e,
+	0xd0, 0xc2,
+	0x4f, 0x4e,
+	0xa8, 0x54,
+	0x0f, 0x13, 0x17, 0xb0, 0x1c, 0x2c
+};
+
+static char *intel_dsm_port_name(u8 id)
+{
+	switch (id) {
+	case 0:
+		return "Reserved";
+	case 1:
+		return "Analog VGA";
+	case 2:
+		return "LVDS";
+	case 3:
+		return "Reserved";
+	case 4:
+		return "HDMI/DVI_B";
+	case 5:
+		return "HDMI/DVI_C";
+	case 6:
+		return "HDMI/DVI_D";
+	case 7:
+		return "DisplayPort_A";
+	case 8:
+		return "DisplayPort_B";
+	case 9:
+		return "DisplayPort_C";
+	case 0xa:
+		return "DisplayPort_D";
+	case 0xb:
+	case 0xc:
+	case 0xd:
+		return "Reserved";
+	case 0xe:
+		return "WiDi";
+	default:
+		return "bad type";
+	}
+}
+
+static char *intel_dsm_mux_type(u8 type)
+{
+	switch (type) {
+	case 0:
+		return "unknown";
+	case 1:
+		return "No MUX, iGPU only";
+	case 2:
+		return "No MUX, dGPU only";
+	case 3:
+		return "MUXed between iGPU and dGPU";
+	default:
+		return "bad type";
+	}
+}
+
+static void intel_dsm_platform_mux_info(void)
+{
+	int i;
+	union acpi_object *pkg, *connector_count;
+
+	pkg = acpi_evaluate_dsm_typed(intel_dsm_priv.dhandle, intel_dsm_guid,
+			INTEL_DSM_REVISION_ID, INTEL_DSM_FN_PLATFORM_MUX_INFO,
+			NULL, ACPI_TYPE_PACKAGE);
+	if (!pkg) {
+		DRM_DEBUG_DRIVER("failed to evaluate _DSM\n");
+		return;
+	}
+
+	connector_count = &pkg->package.elements[0];
+	DRM_DEBUG_DRIVER("MUX info connectors: %lld\n",
+		  (unsigned long long)connector_count->integer.value);
+	for (i = 1; i < pkg->package.count; i++) {
+		union acpi_object *obj = &pkg->package.elements[i];
+		union acpi_object *connector_id = &obj->package.elements[0];
+		union acpi_object *info = &obj->package.elements[1];
+		DRM_DEBUG_DRIVER("Connector id: 0x%016llx\n",
+			  (unsigned long long)connector_id->integer.value);
+		DRM_DEBUG_DRIVER("  port id: %s\n",
+		       intel_dsm_port_name(info->buffer.pointer[0]));
+		DRM_DEBUG_DRIVER("  display mux info: %s\n",
+		       intel_dsm_mux_type(info->buffer.pointer[1]));
+		DRM_DEBUG_DRIVER("  aux/dc mux info: %s\n",
+		       intel_dsm_mux_type(info->buffer.pointer[2]));
+		DRM_DEBUG_DRIVER("  hpd mux info: %s\n",
+		       intel_dsm_mux_type(info->buffer.pointer[3]));
+	}
+
+	ACPI_FREE(pkg);
+}
+
+static bool intel_dsm_pci_probe(struct pci_dev *pdev)
+{
+	acpi_handle dhandle;
+
+	dhandle = ACPI_HANDLE(&pdev->dev);
+	if (!dhandle)
+		return false;
+
+	if (!acpi_check_dsm(dhandle, intel_dsm_guid, INTEL_DSM_REVISION_ID,
+			    1 << INTEL_DSM_FN_PLATFORM_MUX_INFO)) {
+		DRM_DEBUG_KMS("no _DSM method for intel device\n");
+		return false;
+	}
+
+	intel_dsm_priv.dhandle = dhandle;
+	intel_dsm_platform_mux_info();
+
+	return true;
+}
+
+static bool intel_dsm_detect(void)
+{
+	char acpi_method_name[255] = { 0 };
+	struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name};
+	struct pci_dev *pdev = NULL;
+	bool has_dsm = false;
+	int vga_count = 0;
+
+	while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
+		vga_count++;
+		has_dsm |= intel_dsm_pci_probe(pdev);
+	}
+
+	if (vga_count == 2 && has_dsm) {
+		acpi_get_name(intel_dsm_priv.dhandle, ACPI_FULL_PATHNAME, &buffer);
+		DRM_DEBUG_DRIVER("vga_switcheroo: detected DSM switching method %s handle\n",
+				 acpi_method_name);
+		return true;
+	}
+
+	return false;
+}
+
+void intel_register_dsm_handler(void)
+{
+	if (!intel_dsm_detect())
+		return;
+}
+
+void intel_unregister_dsm_handler(void)
+{
+}
diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c
new file mode 100644
index 0000000..f1975f2
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_atomic.c
@@ -0,0 +1,311 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * DOC: atomic modeset support
+ *
+ * The functions here implement the state management and hardware programming
+ * dispatch required by the atomic modeset infrastructure.
+ * See intel_atomic_plane.c for the plane-specific atomic functionality.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_plane_helper.h>
+#include "intel_drv.h"
+
+/**
+ * intel_connector_atomic_get_property - fetch connector property value
+ * @connector: connector to fetch property for
+ * @state: state containing the property value
+ * @property: property to look up
+ * @val: pointer to write property value into
+ *
+ * The DRM core does not store shadow copies of properties for
+ * atomic-capable drivers.  This entrypoint is used to fetch
+ * the current value of a driver-specific connector property.
+ */
+int
+intel_connector_atomic_get_property(struct drm_connector *connector,
+				    const struct drm_connector_state *state,
+				    struct drm_property *property,
+				    uint64_t *val)
+{
+	int i;
+
+	/*
+	 * TODO: We only have atomic modeset for planes at the moment, so the
+	 * crtc/connector code isn't quite ready yet.  Until it's ready,
+	 * continue to look up all property values in the DRM's shadow copy
+	 * in obj->properties->values[].
+	 *
+	 * When the crtc/connector state work matures, this function should
+	 * be updated to read the values out of the state structure instead.
+	 */
+	for (i = 0; i < connector->base.properties->count; i++) {
+		if (connector->base.properties->properties[i] == property) {
+			*val = connector->base.properties->values[i];
+			return 0;
+		}
+	}
+
+	return -EINVAL;
+}
+
+/*
+ * intel_crtc_duplicate_state - duplicate crtc state
+ * @crtc: drm crtc
+ *
+ * Allocates and returns a copy of the crtc state (both common and
+ * Intel-specific) for the specified crtc.
+ *
+ * Returns: The newly allocated crtc state, or NULL on failure.
+ */
+struct drm_crtc_state *
+intel_crtc_duplicate_state(struct drm_crtc *crtc)
+{
+	struct intel_crtc_state *crtc_state;
+
+	crtc_state = kmemdup(crtc->state, sizeof(*crtc_state), GFP_KERNEL);
+	if (!crtc_state)
+		return NULL;
+
+	__drm_atomic_helper_crtc_duplicate_state(crtc, &crtc_state->base);
+
+	crtc_state->update_pipe = false;
+
+	return &crtc_state->base;
+}
+
+/**
+ * intel_crtc_destroy_state - destroy crtc state
+ * @crtc: drm crtc
+ *
+ * Destroys the crtc state (both common and Intel-specific) for the
+ * specified crtc.
+ */
+void
+intel_crtc_destroy_state(struct drm_crtc *crtc,
+			  struct drm_crtc_state *state)
+{
+	drm_atomic_helper_crtc_destroy_state(crtc, state);
+}
+
+/**
+ * intel_atomic_setup_scalers() - setup scalers for crtc per staged requests
+ * @dev: DRM device
+ * @crtc: intel crtc
+ * @crtc_state: incoming crtc_state to validate and setup scalers
+ *
+ * This function sets up scalers based on staged scaling requests for
+ * a @crtc and its planes. It is called from crtc level check path. If request
+ * is a supportable request, it attaches scalers to requested planes and crtc.
+ *
+ * This function takes into account the current scaler(s) in use by any planes
+ * not being part of this atomic state
+ *
+ *  Returns:
+ *         0 - scalers were setup succesfully
+ *         error code - otherwise
+ */
+int intel_atomic_setup_scalers(struct drm_device *dev,
+	struct intel_crtc *intel_crtc,
+	struct intel_crtc_state *crtc_state)
+{
+	struct drm_plane *plane = NULL;
+	struct intel_plane *intel_plane;
+	struct intel_plane_state *plane_state = NULL;
+	struct intel_crtc_scaler_state *scaler_state =
+		&crtc_state->scaler_state;
+	struct drm_atomic_state *drm_state = crtc_state->base.state;
+	int num_scalers_need;
+	int i, j;
+
+	num_scalers_need = hweight32(scaler_state->scaler_users);
+
+	/*
+	 * High level flow:
+	 * - staged scaler requests are already in scaler_state->scaler_users
+	 * - check whether staged scaling requests can be supported
+	 * - add planes using scalers that aren't in current transaction
+	 * - assign scalers to requested users
+	 * - as part of plane commit, scalers will be committed
+	 *   (i.e., either attached or detached) to respective planes in hw
+	 * - as part of crtc_commit, scaler will be either attached or detached
+	 *   to crtc in hw
+	 */
+
+	/* fail if required scalers > available scalers */
+	if (num_scalers_need > intel_crtc->num_scalers){
+		DRM_DEBUG_KMS("Too many scaling requests %d > %d\n",
+			num_scalers_need, intel_crtc->num_scalers);
+		return -EINVAL;
+	}
+
+	/* walkthrough scaler_users bits and start assigning scalers */
+	for (i = 0; i < sizeof(scaler_state->scaler_users) * 8; i++) {
+		int *scaler_id;
+		const char *name;
+		int idx;
+
+		/* skip if scaler not required */
+		if (!(scaler_state->scaler_users & (1 << i)))
+			continue;
+
+		if (i == SKL_CRTC_INDEX) {
+			name = "CRTC";
+			idx = intel_crtc->base.base.id;
+
+			/* panel fitter case: assign as a crtc scaler */
+			scaler_id = &scaler_state->scaler_id;
+		} else {
+			name = "PLANE";
+
+			/* plane scaler case: assign as a plane scaler */
+			/* find the plane that set the bit as scaler_user */
+			plane = drm_state->planes[i];
+
+			/*
+			 * to enable/disable hq mode, add planes that are using scaler
+			 * into this transaction
+			 */
+			if (!plane) {
+				struct drm_plane_state *state;
+				plane = drm_plane_from_index(dev, i);
+				state = drm_atomic_get_plane_state(drm_state, plane);
+				if (IS_ERR(state)) {
+					DRM_DEBUG_KMS("Failed to add [PLANE:%d] to drm_state\n",
+						plane->base.id);
+					return PTR_ERR(state);
+				}
+
+				/*
+				 * the plane is added after plane checks are run,
+				 * but since this plane is unchanged just do the
+				 * minimum required validation.
+				 */
+				if (plane->type == DRM_PLANE_TYPE_PRIMARY)
+					intel_crtc->atomic.wait_for_flips = true;
+				crtc_state->base.planes_changed = true;
+			}
+
+			intel_plane = to_intel_plane(plane);
+			idx = plane->base.id;
+
+			/* plane on different crtc cannot be a scaler user of this crtc */
+			if (WARN_ON(intel_plane->pipe != intel_crtc->pipe)) {
+				continue;
+			}
+
+			plane_state = to_intel_plane_state(drm_state->plane_states[i]);
+			scaler_id = &plane_state->scaler_id;
+		}
+
+		if (*scaler_id < 0) {
+			/* find a free scaler */
+			for (j = 0; j < intel_crtc->num_scalers; j++) {
+				if (!scaler_state->scalers[j].in_use) {
+					scaler_state->scalers[j].in_use = 1;
+					*scaler_id = j;
+					DRM_DEBUG_KMS("Attached scaler id %u.%u to %s:%d\n",
+						intel_crtc->pipe, *scaler_id, name, idx);
+					break;
+				}
+			}
+		}
+
+		if (WARN_ON(*scaler_id < 0)) {
+			DRM_DEBUG_KMS("Cannot find scaler for %s:%d\n", name, idx);
+			continue;
+		}
+
+		/* set scaler mode */
+		if (num_scalers_need == 1 && intel_crtc->pipe != PIPE_C) {
+			/*
+			 * when only 1 scaler is in use on either pipe A or B,
+			 * scaler 0 operates in high quality (HQ) mode.
+			 * In this case use scaler 0 to take advantage of HQ mode
+			 */
+			*scaler_id = 0;
+			scaler_state->scalers[0].in_use = 1;
+			scaler_state->scalers[0].mode = PS_SCALER_MODE_HQ;
+			scaler_state->scalers[1].in_use = 0;
+		} else {
+			scaler_state->scalers[*scaler_id].mode = PS_SCALER_MODE_DYN;
+		}
+	}
+
+	return 0;
+}
+
+static void
+intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
+				  struct intel_shared_dpll_config *shared_dpll)
+{
+	enum intel_dpll_id i;
+
+	/* Copy shared dpll state */
+	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
+		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
+
+		shared_dpll[i] = pll->config;
+	}
+}
+
+struct intel_shared_dpll_config *
+intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
+{
+	struct intel_atomic_state *state = to_intel_atomic_state(s);
+
+	WARN_ON(!drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
+
+	if (!state->dpll_set) {
+		state->dpll_set = true;
+
+		intel_atomic_duplicate_dpll_state(to_i915(s->dev),
+						  state->shared_dpll);
+	}
+
+	return state->shared_dpll;
+}
+
+struct drm_atomic_state *
+intel_atomic_state_alloc(struct drm_device *dev)
+{
+	struct intel_atomic_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
+
+	if (!state || drm_atomic_state_init(dev, &state->base) < 0) {
+		kfree(state);
+		return NULL;
+	}
+
+	return &state->base;
+}
+
+void intel_atomic_state_clear(struct drm_atomic_state *s)
+{
+	struct intel_atomic_state *state = to_intel_atomic_state(s);
+	drm_atomic_state_default_clear(&state->base);
+	state->dpll_set = false;
+}
diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c
new file mode 100644
index 0000000..a119806
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_atomic_plane.c
@@ -0,0 +1,247 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * DOC: atomic plane helpers
+ *
+ * The functions here are used by the atomic plane helper functions to
+ * implement legacy plane updates (i.e., drm_plane->update_plane() and
+ * drm_plane->disable_plane()).  This allows plane updates to use the
+ * atomic state infrastructure and perform plane updates as separate
+ * prepare/check/commit/cleanup steps.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_plane_helper.h>
+#include "intel_drv.h"
+
+/**
+ * intel_create_plane_state - create plane state object
+ * @plane: drm plane
+ *
+ * Allocates a fresh plane state for the given plane and sets some of
+ * the state values to sensible initial values.
+ *
+ * Returns: A newly allocated plane state, or NULL on failure
+ */
+struct intel_plane_state *
+intel_create_plane_state(struct drm_plane *plane)
+{
+	struct intel_plane_state *state;
+
+	state = kzalloc(sizeof(*state), GFP_KERNEL);
+	if (!state)
+		return NULL;
+
+	state->base.plane = plane;
+	state->base.rotation = BIT(DRM_ROTATE_0);
+	state->ckey.flags = I915_SET_COLORKEY_NONE;
+
+	return state;
+}
+
+/**
+ * intel_plane_duplicate_state - duplicate plane state
+ * @plane: drm plane
+ *
+ * Allocates and returns a copy of the plane state (both common and
+ * Intel-specific) for the specified plane.
+ *
+ * Returns: The newly allocated plane state, or NULL on failure.
+ */
+struct drm_plane_state *
+intel_plane_duplicate_state(struct drm_plane *plane)
+{
+	struct drm_plane_state *state;
+	struct intel_plane_state *intel_state;
+
+	intel_state = kmemdup(plane->state, sizeof(*intel_state), GFP_KERNEL);
+
+	if (!intel_state)
+		return NULL;
+
+	state = &intel_state->base;
+
+	__drm_atomic_helper_plane_duplicate_state(plane, state);
+
+	return state;
+}
+
+/**
+ * intel_plane_destroy_state - destroy plane state
+ * @plane: drm plane
+ * @state: state object to destroy
+ *
+ * Destroys the plane state (both common and Intel-specific) for the
+ * specified plane.
+ */
+void
+intel_plane_destroy_state(struct drm_plane *plane,
+			  struct drm_plane_state *state)
+{
+	drm_atomic_helper_plane_destroy_state(plane, state);
+}
+
+static int intel_plane_atomic_check(struct drm_plane *plane,
+				    struct drm_plane_state *state)
+{
+	struct drm_crtc *crtc = state->crtc;
+	struct intel_crtc *intel_crtc;
+	struct intel_crtc_state *crtc_state;
+	struct intel_plane *intel_plane = to_intel_plane(plane);
+	struct intel_plane_state *intel_state = to_intel_plane_state(state);
+	struct drm_crtc_state *drm_crtc_state;
+	int ret;
+
+	crtc = crtc ? crtc : plane->state->crtc;
+	intel_crtc = to_intel_crtc(crtc);
+
+	/*
+	 * Both crtc and plane->crtc could be NULL if we're updating a
+	 * property while the plane is disabled.  We don't actually have
+	 * anything driver-specific we need to test in that case, so
+	 * just return success.
+	 */
+	if (!crtc)
+		return 0;
+
+	drm_crtc_state = drm_atomic_get_existing_crtc_state(state->state, crtc);
+	if (WARN_ON(!drm_crtc_state))
+		return -EINVAL;
+
+	crtc_state = to_intel_crtc_state(drm_crtc_state);
+
+	/*
+	 * The original src/dest coordinates are stored in state->base, but
+	 * we want to keep another copy internal to our driver that we can
+	 * clip/modify ourselves.
+	 */
+	intel_state->src.x1 = state->src_x;
+	intel_state->src.y1 = state->src_y;
+	intel_state->src.x2 = state->src_x + state->src_w;
+	intel_state->src.y2 = state->src_y + state->src_h;
+	intel_state->dst.x1 = state->crtc_x;
+	intel_state->dst.y1 = state->crtc_y;
+	intel_state->dst.x2 = state->crtc_x + state->crtc_w;
+	intel_state->dst.y2 = state->crtc_y + state->crtc_h;
+
+	/* Clip all planes to CRTC size, or 0x0 if CRTC is disabled */
+	intel_state->clip.x1 = 0;
+	intel_state->clip.y1 = 0;
+	intel_state->clip.x2 =
+		crtc_state->base.active ? crtc_state->pipe_src_w : 0;
+	intel_state->clip.y2 =
+		crtc_state->base.active ? crtc_state->pipe_src_h : 0;
+
+	if (state->fb && intel_rotation_90_or_270(state->rotation)) {
+		if (!(state->fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
+			state->fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED)) {
+			DRM_DEBUG_KMS("Y/Yf tiling required for 90/270!\n");
+			return -EINVAL;
+		}
+
+		/*
+		 * 90/270 is not allowed with RGB64 16:16:16:16,
+		 * RGB 16-bit 5:6:5, and Indexed 8-bit.
+		 * TBD: Add RGB64 case once its added in supported format list.
+		 */
+		switch (state->fb->pixel_format) {
+		case DRM_FORMAT_C8:
+		case DRM_FORMAT_RGB565:
+			DRM_DEBUG_KMS("Unsupported pixel format %s for 90/270!\n",
+					drm_get_format_name(state->fb->pixel_format));
+			return -EINVAL;
+
+		default:
+			break;
+		}
+	}
+
+	intel_state->visible = false;
+	ret = intel_plane->check_plane(plane, crtc_state, intel_state);
+	if (ret)
+		return ret;
+
+	return intel_plane_atomic_calc_changes(&crtc_state->base, state);
+}
+
+static void intel_plane_atomic_update(struct drm_plane *plane,
+				      struct drm_plane_state *old_state)
+{
+	struct intel_plane *intel_plane = to_intel_plane(plane);
+	struct intel_plane_state *intel_state =
+		to_intel_plane_state(plane->state);
+
+	intel_plane->commit_plane(plane, intel_state);
+}
+
+const struct drm_plane_helper_funcs intel_plane_helper_funcs = {
+	.prepare_fb = intel_prepare_plane_fb,
+	.cleanup_fb = intel_cleanup_plane_fb,
+	.atomic_check = intel_plane_atomic_check,
+	.atomic_update = intel_plane_atomic_update,
+};
+
+/**
+ * intel_plane_atomic_get_property - fetch plane property value
+ * @plane: plane to fetch property for
+ * @state: state containing the property value
+ * @property: property to look up
+ * @val: pointer to write property value into
+ *
+ * The DRM core does not store shadow copies of properties for
+ * atomic-capable drivers.  This entrypoint is used to fetch
+ * the current value of a driver-specific plane property.
+ */
+int
+intel_plane_atomic_get_property(struct drm_plane *plane,
+				const struct drm_plane_state *state,
+				struct drm_property *property,
+				uint64_t *val)
+{
+	DRM_DEBUG_KMS("Unknown plane property '%s'\n", property->name);
+	return -EINVAL;
+}
+
+/**
+ * intel_plane_atomic_set_property - set plane property value
+ * @plane: plane to set property for
+ * @state: state to update property value in
+ * @property: property to set
+ * @val: value to set property to
+ *
+ * Writes the specified property value for a plane into the provided atomic
+ * state object.
+ *
+ * Returns 0 on success, -EINVAL on unrecognized properties
+ */
+int
+intel_plane_atomic_set_property(struct drm_plane *plane,
+				struct drm_plane_state *state,
+				struct drm_property *property,
+				uint64_t val)
+{
+	DRM_DEBUG_KMS("Unknown plane property '%s'\n", property->name);
+	return -EINVAL;
+}
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
new file mode 100644
index 0000000..4dccd9b
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -0,0 +1,802 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/component.h>
+#include <drm/i915_component.h>
+#include "intel_drv.h"
+
+#include <drm/drmP.h>
+#include <drm/drm_edid.h>
+#include "i915_drv.h"
+
+/**
+ * DOC: High Definition Audio over HDMI and Display Port
+ *
+ * The graphics and audio drivers together support High Definition Audio over
+ * HDMI and Display Port. The audio programming sequences are divided into audio
+ * codec and controller enable and disable sequences. The graphics driver
+ * handles the audio codec sequences, while the audio driver handles the audio
+ * controller sequences.
+ *
+ * The disable sequences must be performed before disabling the transcoder or
+ * port. The enable sequences may only be performed after enabling the
+ * transcoder and port, and after completed link training. Therefore the audio
+ * enable/disable sequences are part of the modeset sequence.
+ *
+ * The codec and controller sequences could be done either parallel or serial,
+ * but generally the ELDV/PD change in the codec sequence indicates to the audio
+ * driver that the controller sequence should start. Indeed, most of the
+ * co-operation between the graphics and audio drivers is handled via audio
+ * related registers. (The notable exception is the power management, not
+ * covered here.)
+ *
+ * The struct i915_audio_component is used to interact between the graphics
+ * and audio drivers. The struct i915_audio_component_ops *ops in it is
+ * defined in graphics driver and called in audio driver. The
+ * struct i915_audio_component_audio_ops *audio_ops is called from i915 driver.
+ */
+
+static const struct {
+	int clock;
+	u32 config;
+} hdmi_audio_clock[] = {
+	{ 25175, AUD_CONFIG_PIXEL_CLOCK_HDMI_25175 },
+	{ 25200, AUD_CONFIG_PIXEL_CLOCK_HDMI_25200 }, /* default per bspec */
+	{ 27000, AUD_CONFIG_PIXEL_CLOCK_HDMI_27000 },
+	{ 27027, AUD_CONFIG_PIXEL_CLOCK_HDMI_27027 },
+	{ 54000, AUD_CONFIG_PIXEL_CLOCK_HDMI_54000 },
+	{ 54054, AUD_CONFIG_PIXEL_CLOCK_HDMI_54054 },
+	{ 74176, AUD_CONFIG_PIXEL_CLOCK_HDMI_74176 },
+	{ 74250, AUD_CONFIG_PIXEL_CLOCK_HDMI_74250 },
+	{ 148352, AUD_CONFIG_PIXEL_CLOCK_HDMI_148352 },
+	{ 148500, AUD_CONFIG_PIXEL_CLOCK_HDMI_148500 },
+};
+
+/* HDMI N/CTS table */
+#define TMDS_297M 297000
+#define TMDS_296M 296703
+static const struct {
+	int sample_rate;
+	int clock;
+	int n;
+	int cts;
+} aud_ncts[] = {
+	{ 44100, TMDS_296M, 4459, 234375 },
+	{ 44100, TMDS_297M, 4704, 247500 },
+	{ 48000, TMDS_296M, 5824, 281250 },
+	{ 48000, TMDS_297M, 5120, 247500 },
+	{ 32000, TMDS_296M, 5824, 421875 },
+	{ 32000, TMDS_297M, 3072, 222750 },
+	{ 88200, TMDS_296M, 8918, 234375 },
+	{ 88200, TMDS_297M, 9408, 247500 },
+	{ 96000, TMDS_296M, 11648, 281250 },
+	{ 96000, TMDS_297M, 10240, 247500 },
+	{ 176400, TMDS_296M, 17836, 234375 },
+	{ 176400, TMDS_297M, 18816, 247500 },
+	{ 192000, TMDS_296M, 23296, 281250 },
+	{ 192000, TMDS_297M, 20480, 247500 },
+};
+
+/* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */
+static u32 audio_config_hdmi_pixel_clock(const struct drm_display_mode *adjusted_mode)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(hdmi_audio_clock); i++) {
+		if (adjusted_mode->crtc_clock == hdmi_audio_clock[i].clock)
+			break;
+	}
+
+	if (i == ARRAY_SIZE(hdmi_audio_clock)) {
+		DRM_DEBUG_KMS("HDMI audio pixel clock setting for %d not found, falling back to defaults\n",
+			      adjusted_mode->crtc_clock);
+		i = 1;
+	}
+
+	DRM_DEBUG_KMS("Configuring HDMI audio for pixel clock %d (0x%08x)\n",
+		      hdmi_audio_clock[i].clock,
+		      hdmi_audio_clock[i].config);
+
+	return hdmi_audio_clock[i].config;
+}
+
+static int audio_config_get_n(const struct drm_display_mode *mode, int rate)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(aud_ncts); i++) {
+		if ((rate == aud_ncts[i].sample_rate) &&
+			(mode->clock == aud_ncts[i].clock)) {
+			return aud_ncts[i].n;
+		}
+	}
+	return 0;
+}
+
+static uint32_t audio_config_setup_n_reg(int n, uint32_t val)
+{
+	int n_low, n_up;
+	uint32_t tmp = val;
+
+	n_low = n & 0xfff;
+	n_up = (n >> 12) & 0xff;
+	tmp &= ~(AUD_CONFIG_UPPER_N_MASK | AUD_CONFIG_LOWER_N_MASK);
+	tmp |= ((n_up << AUD_CONFIG_UPPER_N_SHIFT) |
+			(n_low << AUD_CONFIG_LOWER_N_SHIFT) |
+			AUD_CONFIG_N_PROG_ENABLE);
+	return tmp;
+}
+
+/* check whether N/CTS/M need be set manually */
+static bool audio_rate_need_prog(struct intel_crtc *crtc,
+				 const struct drm_display_mode *mode)
+{
+	if (((mode->clock == TMDS_297M) ||
+		 (mode->clock == TMDS_296M)) &&
+		intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
+		return true;
+	else
+		return false;
+}
+
+static bool intel_eld_uptodate(struct drm_connector *connector,
+			       int reg_eldv, uint32_t bits_eldv,
+			       int reg_elda, uint32_t bits_elda,
+			       int reg_edid)
+{
+	struct drm_i915_private *dev_priv = connector->dev->dev_private;
+	uint8_t *eld = connector->eld;
+	uint32_t tmp;
+	int i;
+
+	tmp = I915_READ(reg_eldv);
+	tmp &= bits_eldv;
+
+	if (!tmp)
+		return false;
+
+	tmp = I915_READ(reg_elda);
+	tmp &= ~bits_elda;
+	I915_WRITE(reg_elda, tmp);
+
+	for (i = 0; i < drm_eld_size(eld) / 4; i++)
+		if (I915_READ(reg_edid) != *((uint32_t *)eld + i))
+			return false;
+
+	return true;
+}
+
+static void g4x_audio_codec_disable(struct intel_encoder *encoder)
+{
+	struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+	uint32_t eldv, tmp;
+
+	DRM_DEBUG_KMS("Disable audio codec\n");
+
+	tmp = I915_READ(G4X_AUD_VID_DID);
+	if (tmp == INTEL_AUDIO_DEVBLC || tmp == INTEL_AUDIO_DEVCL)
+		eldv = G4X_ELDV_DEVCL_DEVBLC;
+	else
+		eldv = G4X_ELDV_DEVCTG;
+
+	/* Invalidate ELD */
+	tmp = I915_READ(G4X_AUD_CNTL_ST);
+	tmp &= ~eldv;
+	I915_WRITE(G4X_AUD_CNTL_ST, tmp);
+}
+
+static void g4x_audio_codec_enable(struct drm_connector *connector,
+				   struct intel_encoder *encoder,
+				   const struct drm_display_mode *adjusted_mode)
+{
+	struct drm_i915_private *dev_priv = connector->dev->dev_private;
+	uint8_t *eld = connector->eld;
+	uint32_t eldv;
+	uint32_t tmp;
+	int len, i;
+
+	DRM_DEBUG_KMS("Enable audio codec, %u bytes ELD\n", eld[2]);
+
+	tmp = I915_READ(G4X_AUD_VID_DID);
+	if (tmp == INTEL_AUDIO_DEVBLC || tmp == INTEL_AUDIO_DEVCL)
+		eldv = G4X_ELDV_DEVCL_DEVBLC;
+	else
+		eldv = G4X_ELDV_DEVCTG;
+
+	if (intel_eld_uptodate(connector,
+			       G4X_AUD_CNTL_ST, eldv,
+			       G4X_AUD_CNTL_ST, G4X_ELD_ADDR_MASK,
+			       G4X_HDMIW_HDMIEDID))
+		return;
+
+	tmp = I915_READ(G4X_AUD_CNTL_ST);
+	tmp &= ~(eldv | G4X_ELD_ADDR_MASK);
+	len = (tmp >> 9) & 0x1f;		/* ELD buffer size */
+	I915_WRITE(G4X_AUD_CNTL_ST, tmp);
+
+	len = min(drm_eld_size(eld) / 4, len);
+	DRM_DEBUG_DRIVER("ELD size %d\n", len);
+	for (i = 0; i < len; i++)
+		I915_WRITE(G4X_HDMIW_HDMIEDID, *((uint32_t *)eld + i));
+
+	tmp = I915_READ(G4X_AUD_CNTL_ST);
+	tmp |= eldv;
+	I915_WRITE(G4X_AUD_CNTL_ST, tmp);
+}
+
+static void hsw_audio_codec_disable(struct intel_encoder *encoder)
+{
+	struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+	enum pipe pipe = intel_crtc->pipe;
+	uint32_t tmp;
+
+	DRM_DEBUG_KMS("Disable audio codec on pipe %c\n", pipe_name(pipe));
+
+	mutex_lock(&dev_priv->av_mutex);
+
+	/* Disable timestamps */
+	tmp = I915_READ(HSW_AUD_CFG(pipe));
+	tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
+	tmp |= AUD_CONFIG_N_PROG_ENABLE;
+	tmp &= ~AUD_CONFIG_UPPER_N_MASK;
+	tmp &= ~AUD_CONFIG_LOWER_N_MASK;
+	if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT))
+		tmp |= AUD_CONFIG_N_VALUE_INDEX;
+	I915_WRITE(HSW_AUD_CFG(pipe), tmp);
+
+	/* Invalidate ELD */
+	tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
+	tmp &= ~AUDIO_ELD_VALID(pipe);
+	tmp &= ~AUDIO_OUTPUT_ENABLE(pipe);
+	I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
+
+	mutex_unlock(&dev_priv->av_mutex);
+}
+
+static void hsw_audio_codec_enable(struct drm_connector *connector,
+				   struct intel_encoder *encoder,
+				   const struct drm_display_mode *adjusted_mode)
+{
+	struct drm_i915_private *dev_priv = connector->dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+	enum pipe pipe = intel_crtc->pipe;
+	struct i915_audio_component *acomp = dev_priv->audio_component;
+	const uint8_t *eld = connector->eld;
+	struct intel_digital_port *intel_dig_port =
+		enc_to_dig_port(&encoder->base);
+	enum port port = intel_dig_port->port;
+	uint32_t tmp;
+	int len, i;
+	int n, rate;
+
+	DRM_DEBUG_KMS("Enable audio codec on pipe %c, %u bytes ELD\n",
+		      pipe_name(pipe), drm_eld_size(eld));
+
+	mutex_lock(&dev_priv->av_mutex);
+
+	/* Enable audio presence detect, invalidate ELD */
+	tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
+	tmp |= AUDIO_OUTPUT_ENABLE(pipe);
+	tmp &= ~AUDIO_ELD_VALID(pipe);
+	I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
+
+	/*
+	 * FIXME: We're supposed to wait for vblank here, but we have vblanks
+	 * disabled during the mode set. The proper fix would be to push the
+	 * rest of the setup into a vblank work item, queued here, but the
+	 * infrastructure is not there yet.
+	 */
+
+	/* Reset ELD write address */
+	tmp = I915_READ(HSW_AUD_DIP_ELD_CTRL(pipe));
+	tmp &= ~IBX_ELD_ADDRESS_MASK;
+	I915_WRITE(HSW_AUD_DIP_ELD_CTRL(pipe), tmp);
+
+	/* Up to 84 bytes of hw ELD buffer */
+	len = min(drm_eld_size(eld), 84);
+	for (i = 0; i < len / 4; i++)
+		I915_WRITE(HSW_AUD_EDID_DATA(pipe), *((uint32_t *)eld + i));
+
+	/* ELD valid */
+	tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
+	tmp |= AUDIO_ELD_VALID(pipe);
+	I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
+
+	/* Enable timestamps */
+	tmp = I915_READ(HSW_AUD_CFG(pipe));
+	tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
+	tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
+	if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT))
+		tmp |= AUD_CONFIG_N_VALUE_INDEX;
+	else
+		tmp |= audio_config_hdmi_pixel_clock(adjusted_mode);
+
+	tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
+	if (audio_rate_need_prog(intel_crtc, adjusted_mode)) {
+		if (!acomp)
+			rate = 0;
+		else if (port >= PORT_A && port <= PORT_E)
+			rate = acomp->aud_sample_rate[port];
+		else {
+			DRM_ERROR("invalid port: %d\n", port);
+			rate = 0;
+		}
+		n = audio_config_get_n(adjusted_mode, rate);
+		if (n != 0)
+			tmp = audio_config_setup_n_reg(n, tmp);
+		else
+			DRM_DEBUG_KMS("no suitable N value is found\n");
+	}
+
+	I915_WRITE(HSW_AUD_CFG(pipe), tmp);
+
+	mutex_unlock(&dev_priv->av_mutex);
+}
+
+static void ilk_audio_codec_disable(struct intel_encoder *encoder)
+{
+	struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+	struct intel_digital_port *intel_dig_port =
+		enc_to_dig_port(&encoder->base);
+	enum port port = intel_dig_port->port;
+	enum pipe pipe = intel_crtc->pipe;
+	uint32_t tmp, eldv;
+	int aud_config;
+	int aud_cntrl_st2;
+
+	DRM_DEBUG_KMS("Disable audio codec on port %c, pipe %c\n",
+		      port_name(port), pipe_name(pipe));
+
+	if (WARN_ON(port == PORT_A))
+		return;
+
+	if (HAS_PCH_IBX(dev_priv->dev)) {
+		aud_config = IBX_AUD_CFG(pipe);
+		aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
+	} else if (IS_VALLEYVIEW(dev_priv)) {
+		aud_config = VLV_AUD_CFG(pipe);
+		aud_cntrl_st2 = VLV_AUD_CNTL_ST2;
+	} else {
+		aud_config = CPT_AUD_CFG(pipe);
+		aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
+	}
+
+	/* Disable timestamps */
+	tmp = I915_READ(aud_config);
+	tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
+	tmp |= AUD_CONFIG_N_PROG_ENABLE;
+	tmp &= ~AUD_CONFIG_UPPER_N_MASK;
+	tmp &= ~AUD_CONFIG_LOWER_N_MASK;
+	if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT))
+		tmp |= AUD_CONFIG_N_VALUE_INDEX;
+	I915_WRITE(aud_config, tmp);
+
+	eldv = IBX_ELD_VALID(port);
+
+	/* Invalidate ELD */
+	tmp = I915_READ(aud_cntrl_st2);
+	tmp &= ~eldv;
+	I915_WRITE(aud_cntrl_st2, tmp);
+}
+
+static void ilk_audio_codec_enable(struct drm_connector *connector,
+				   struct intel_encoder *encoder,
+				   const struct drm_display_mode *adjusted_mode)
+{
+	struct drm_i915_private *dev_priv = connector->dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+	struct intel_digital_port *intel_dig_port =
+		enc_to_dig_port(&encoder->base);
+	enum port port = intel_dig_port->port;
+	enum pipe pipe = intel_crtc->pipe;
+	uint8_t *eld = connector->eld;
+	uint32_t eldv;
+	uint32_t tmp;
+	int len, i;
+	int hdmiw_hdmiedid;
+	int aud_config;
+	int aud_cntl_st;
+	int aud_cntrl_st2;
+
+	DRM_DEBUG_KMS("Enable audio codec on port %c, pipe %c, %u bytes ELD\n",
+		      port_name(port), pipe_name(pipe), drm_eld_size(eld));
+
+	if (WARN_ON(port == PORT_A))
+		return;
+
+	/*
+	 * FIXME: We're supposed to wait for vblank here, but we have vblanks
+	 * disabled during the mode set. The proper fix would be to push the
+	 * rest of the setup into a vblank work item, queued here, but the
+	 * infrastructure is not there yet.
+	 */
+
+	if (HAS_PCH_IBX(connector->dev)) {
+		hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID(pipe);
+		aud_config = IBX_AUD_CFG(pipe);
+		aud_cntl_st = IBX_AUD_CNTL_ST(pipe);
+		aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
+	} else if (IS_VALLEYVIEW(connector->dev)) {
+		hdmiw_hdmiedid = VLV_HDMIW_HDMIEDID(pipe);
+		aud_config = VLV_AUD_CFG(pipe);
+		aud_cntl_st = VLV_AUD_CNTL_ST(pipe);
+		aud_cntrl_st2 = VLV_AUD_CNTL_ST2;
+	} else {
+		hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID(pipe);
+		aud_config = CPT_AUD_CFG(pipe);
+		aud_cntl_st = CPT_AUD_CNTL_ST(pipe);
+		aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
+	}
+
+	eldv = IBX_ELD_VALID(port);
+
+	/* Invalidate ELD */
+	tmp = I915_READ(aud_cntrl_st2);
+	tmp &= ~eldv;
+	I915_WRITE(aud_cntrl_st2, tmp);
+
+	/* Reset ELD write address */
+	tmp = I915_READ(aud_cntl_st);
+	tmp &= ~IBX_ELD_ADDRESS_MASK;
+	I915_WRITE(aud_cntl_st, tmp);
+
+	/* Up to 84 bytes of hw ELD buffer */
+	len = min(drm_eld_size(eld), 84);
+	for (i = 0; i < len / 4; i++)
+		I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
+
+	/* ELD valid */
+	tmp = I915_READ(aud_cntrl_st2);
+	tmp |= eldv;
+	I915_WRITE(aud_cntrl_st2, tmp);
+
+	/* Enable timestamps */
+	tmp = I915_READ(aud_config);
+	tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
+	tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
+	tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
+	if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT))
+		tmp |= AUD_CONFIG_N_VALUE_INDEX;
+	else
+		tmp |= audio_config_hdmi_pixel_clock(adjusted_mode);
+	I915_WRITE(aud_config, tmp);
+}
+
+/**
+ * intel_audio_codec_enable - Enable the audio codec for HD audio
+ * @intel_encoder: encoder on which to enable audio
+ *
+ * The enable sequences may only be performed after enabling the transcoder and
+ * port, and after completed link training.
+ */
+void intel_audio_codec_enable(struct intel_encoder *intel_encoder)
+{
+	struct drm_encoder *encoder = &intel_encoder->base;
+	struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
+	const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
+	struct drm_connector *connector;
+	struct drm_device *dev = encoder->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct i915_audio_component *acomp = dev_priv->audio_component;
+	struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+	enum port port = intel_dig_port->port;
+
+	connector = drm_select_eld(encoder);
+	if (!connector)
+		return;
+
+	DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
+			 connector->base.id,
+			 connector->name,
+			 connector->encoder->base.id,
+			 connector->encoder->name);
+
+	/* ELD Conn_Type */
+	connector->eld[5] &= ~(3 << 2);
+	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
+		connector->eld[5] |= (1 << 2);
+
+	connector->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2;
+
+	if (dev_priv->display.audio_codec_enable)
+		dev_priv->display.audio_codec_enable(connector, intel_encoder,
+						     adjusted_mode);
+
+	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify)
+		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr, (int) port);
+}
+
+/**
+ * intel_audio_codec_disable - Disable the audio codec for HD audio
+ * @intel_encoder: encoder on which to disable audio
+ *
+ * The disable sequences must be performed before disabling the transcoder or
+ * port.
+ */
+void intel_audio_codec_disable(struct intel_encoder *intel_encoder)
+{
+	struct drm_encoder *encoder = &intel_encoder->base;
+	struct drm_device *dev = encoder->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct i915_audio_component *acomp = dev_priv->audio_component;
+	struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+	enum port port = intel_dig_port->port;
+
+	if (dev_priv->display.audio_codec_disable)
+		dev_priv->display.audio_codec_disable(intel_encoder);
+
+	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify)
+		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr, (int) port);
+}
+
+/**
+ * intel_init_audio - Set up chip specific audio functions
+ * @dev: drm device
+ */
+void intel_init_audio(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (IS_G4X(dev)) {
+		dev_priv->display.audio_codec_enable = g4x_audio_codec_enable;
+		dev_priv->display.audio_codec_disable = g4x_audio_codec_disable;
+	} else if (IS_VALLEYVIEW(dev)) {
+		dev_priv->display.audio_codec_enable = ilk_audio_codec_enable;
+		dev_priv->display.audio_codec_disable = ilk_audio_codec_disable;
+	} else if (IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8) {
+		dev_priv->display.audio_codec_enable = hsw_audio_codec_enable;
+		dev_priv->display.audio_codec_disable = hsw_audio_codec_disable;
+	} else if (HAS_PCH_SPLIT(dev)) {
+		dev_priv->display.audio_codec_enable = ilk_audio_codec_enable;
+		dev_priv->display.audio_codec_disable = ilk_audio_codec_disable;
+	}
+}
+
+static void i915_audio_component_get_power(struct device *dev)
+{
+	intel_display_power_get(dev_to_i915(dev), POWER_DOMAIN_AUDIO);
+}
+
+static void i915_audio_component_put_power(struct device *dev)
+{
+	intel_display_power_put(dev_to_i915(dev), POWER_DOMAIN_AUDIO);
+}
+
+static void i915_audio_component_codec_wake_override(struct device *dev,
+						     bool enable)
+{
+	struct drm_i915_private *dev_priv = dev_to_i915(dev);
+	u32 tmp;
+
+	if (!IS_SKYLAKE(dev_priv))
+		return;
+
+	/*
+	 * Enable/disable generating the codec wake signal, overriding the
+	 * internal logic to generate the codec wake to controller.
+	 */
+	tmp = I915_READ(HSW_AUD_CHICKENBIT);
+	tmp &= ~SKL_AUD_CODEC_WAKE_SIGNAL;
+	I915_WRITE(HSW_AUD_CHICKENBIT, tmp);
+	usleep_range(1000, 1500);
+
+	if (enable) {
+		tmp = I915_READ(HSW_AUD_CHICKENBIT);
+		tmp |= SKL_AUD_CODEC_WAKE_SIGNAL;
+		I915_WRITE(HSW_AUD_CHICKENBIT, tmp);
+		usleep_range(1000, 1500);
+	}
+}
+
+/* Get CDCLK in kHz  */
+static int i915_audio_component_get_cdclk_freq(struct device *dev)
+{
+	struct drm_i915_private *dev_priv = dev_to_i915(dev);
+	int ret;
+
+	if (WARN_ON_ONCE(!HAS_DDI(dev_priv)))
+		return -ENODEV;
+
+	intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
+	ret = dev_priv->display.get_display_clock_speed(dev_priv->dev);
+
+	intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
+
+	return ret;
+}
+
+static int i915_audio_component_sync_audio_rate(struct device *dev,
+						int port, int rate)
+{
+	struct drm_i915_private *dev_priv = dev_to_i915(dev);
+	struct drm_device *drm_dev = dev_priv->dev;
+	struct intel_encoder *intel_encoder;
+	struct intel_digital_port *intel_dig_port;
+	struct intel_crtc *crtc;
+	struct drm_display_mode *mode;
+	struct i915_audio_component *acomp = dev_priv->audio_component;
+	enum pipe pipe = -1;
+	u32 tmp;
+	int n;
+
+	/* HSW, BDW SKL need this fix */
+	if (!IS_SKYLAKE(dev_priv) &&
+		!IS_BROADWELL(dev_priv) &&
+		!IS_HASWELL(dev_priv))
+		return 0;
+
+	mutex_lock(&dev_priv->av_mutex);
+	/* 1. get the pipe */
+	for_each_intel_encoder(drm_dev, intel_encoder) {
+		if (intel_encoder->type != INTEL_OUTPUT_HDMI)
+			continue;
+		intel_dig_port = enc_to_dig_port(&intel_encoder->base);
+		if (port == intel_dig_port->port) {
+			crtc = to_intel_crtc(intel_encoder->base.crtc);
+			if (!crtc) {
+				DRM_DEBUG_KMS("%s: crtc is NULL\n", __func__);
+				continue;
+			}
+			pipe = crtc->pipe;
+			break;
+		}
+	}
+
+	if (pipe == INVALID_PIPE) {
+		DRM_DEBUG_KMS("no pipe for the port %c\n", port_name(port));
+		mutex_unlock(&dev_priv->av_mutex);
+		return -ENODEV;
+	}
+	DRM_DEBUG_KMS("pipe %c connects port %c\n",
+				  pipe_name(pipe), port_name(port));
+	mode = &crtc->config->base.adjusted_mode;
+
+	/* port must be valid now, otherwise the pipe will be invalid */
+	acomp->aud_sample_rate[port] = rate;
+
+	/* 2. check whether to set the N/CTS/M manually or not */
+	if (!audio_rate_need_prog(crtc, mode)) {
+		tmp = I915_READ(HSW_AUD_CFG(pipe));
+		tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
+		I915_WRITE(HSW_AUD_CFG(pipe), tmp);
+		mutex_unlock(&dev_priv->av_mutex);
+		return 0;
+	}
+
+	n = audio_config_get_n(mode, rate);
+	if (n == 0) {
+		DRM_DEBUG_KMS("Using automatic mode for N value on port %c\n",
+					  port_name(port));
+		tmp = I915_READ(HSW_AUD_CFG(pipe));
+		tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
+		I915_WRITE(HSW_AUD_CFG(pipe), tmp);
+		mutex_unlock(&dev_priv->av_mutex);
+		return 0;
+	}
+
+	/* 3. set the N/CTS/M */
+	tmp = I915_READ(HSW_AUD_CFG(pipe));
+	tmp = audio_config_setup_n_reg(n, tmp);
+	I915_WRITE(HSW_AUD_CFG(pipe), tmp);
+
+	mutex_unlock(&dev_priv->av_mutex);
+	return 0;
+}
+
+static const struct i915_audio_component_ops i915_audio_component_ops = {
+	.owner		= THIS_MODULE,
+	.get_power	= i915_audio_component_get_power,
+	.put_power	= i915_audio_component_put_power,
+	.codec_wake_override = i915_audio_component_codec_wake_override,
+	.get_cdclk_freq	= i915_audio_component_get_cdclk_freq,
+	.sync_audio_rate = i915_audio_component_sync_audio_rate,
+};
+
+static int i915_audio_component_bind(struct device *i915_dev,
+				     struct device *hda_dev, void *data)
+{
+	struct i915_audio_component *acomp = data;
+	struct drm_i915_private *dev_priv = dev_to_i915(i915_dev);
+	int i;
+
+	if (WARN_ON(acomp->ops || acomp->dev))
+		return -EEXIST;
+
+	drm_modeset_lock_all(dev_priv->dev);
+	acomp->ops = &i915_audio_component_ops;
+	acomp->dev = i915_dev;
+	BUILD_BUG_ON(MAX_PORTS != I915_MAX_PORTS);
+	for (i = 0; i < ARRAY_SIZE(acomp->aud_sample_rate); i++)
+		acomp->aud_sample_rate[i] = 0;
+	dev_priv->audio_component = acomp;
+	drm_modeset_unlock_all(dev_priv->dev);
+
+	return 0;
+}
+
+static void i915_audio_component_unbind(struct device *i915_dev,
+					struct device *hda_dev, void *data)
+{
+	struct i915_audio_component *acomp = data;
+	struct drm_i915_private *dev_priv = dev_to_i915(i915_dev);
+
+	drm_modeset_lock_all(dev_priv->dev);
+	acomp->ops = NULL;
+	acomp->dev = NULL;
+	dev_priv->audio_component = NULL;
+	drm_modeset_unlock_all(dev_priv->dev);
+}
+
+static const struct component_ops i915_audio_component_bind_ops = {
+	.bind	= i915_audio_component_bind,
+	.unbind	= i915_audio_component_unbind,
+};
+
+/**
+ * i915_audio_component_init - initialize and register the audio component
+ * @dev_priv: i915 device instance
+ *
+ * This will register with the component framework a child component which
+ * will bind dynamically to the snd_hda_intel driver's corresponding master
+ * component when the latter is registered. During binding the child
+ * initializes an instance of struct i915_audio_component which it receives
+ * from the master. The master can then start to use the interface defined by
+ * this struct. Each side can break the binding at any point by deregistering
+ * its own component after which each side's component unbind callback is
+ * called.
+ *
+ * We ignore any error during registration and continue with reduced
+ * functionality (i.e. without HDMI audio).
+ */
+void i915_audio_component_init(struct drm_i915_private *dev_priv)
+{
+	int ret;
+
+	ret = component_add(dev_priv->dev->dev, &i915_audio_component_bind_ops);
+	if (ret < 0) {
+		DRM_ERROR("failed to add audio component (%d)\n", ret);
+		/* continue with reduced functionality */
+		return;
+	}
+
+	dev_priv->audio_component_registered = true;
+}
+
+/**
+ * i915_audio_component_cleanup - deregister the audio component
+ * @dev_priv: i915 device instance
+ *
+ * Deregisters the audio component, breaking any existing binding to the
+ * corresponding snd_hda_intel driver's master component.
+ */
+void i915_audio_component_cleanup(struct drm_i915_private *dev_priv)
+{
+	if (!dev_priv->audio_component_registered)
+		return;
+
+	component_del(dev_priv->dev->dev, &i915_audio_component_bind_ops);
+	dev_priv->audio_component_registered = false;
+}
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
new file mode 100644
index 0000000..0a2ac3e
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -0,0 +1,1399 @@
+/*
+ * Copyright © 2006 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Eric Anholt <eric@anholt.net>
+ *
+ */
+#include <linux/dmi.h>
+#include <drm/drm_dp_helper.h>
+#include <drm/drmP.h>
+#include <drm/i915_drm.h>
+#include "i915_drv.h"
+#include "intel_bios.h"
+
+#define	SLAVE_ADDR1	0x70
+#define	SLAVE_ADDR2	0x72
+
+static int panel_type;
+
+static const void *
+find_section(const void *_bdb, int section_id)
+{
+	const struct bdb_header *bdb = _bdb;
+	const u8 *base = _bdb;
+	int index = 0;
+	u32 total, current_size;
+	u8 current_id;
+
+	/* skip to first section */
+	index += bdb->header_size;
+	total = bdb->bdb_size;
+
+	/* walk the sections looking for section_id */
+	while (index + 3 < total) {
+		current_id = *(base + index);
+		index++;
+
+		current_size = *((const u16 *)(base + index));
+		index += 2;
+
+		/* The MIPI Sequence Block v3+ has a separate size field. */
+		if (current_id == BDB_MIPI_SEQUENCE && *(base + index) >= 3)
+			current_size = *((const u32 *)(base + index + 1));
+
+		if (index + current_size > total)
+			return NULL;
+
+		if (current_id == section_id)
+			return base + index;
+
+		index += current_size;
+	}
+
+	return NULL;
+}
+
+static u16
+get_blocksize(const void *p)
+{
+	u16 *block_ptr, block_size;
+
+	block_ptr = (u16 *)((char *)p - 2);
+	block_size = *block_ptr;
+	return block_size;
+}
+
+static void
+fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode,
+			const struct lvds_dvo_timing *dvo_timing)
+{
+	panel_fixed_mode->hdisplay = (dvo_timing->hactive_hi << 8) |
+		dvo_timing->hactive_lo;
+	panel_fixed_mode->hsync_start = panel_fixed_mode->hdisplay +
+		((dvo_timing->hsync_off_hi << 8) | dvo_timing->hsync_off_lo);
+	panel_fixed_mode->hsync_end = panel_fixed_mode->hsync_start +
+		dvo_timing->hsync_pulse_width;
+	panel_fixed_mode->htotal = panel_fixed_mode->hdisplay +
+		((dvo_timing->hblank_hi << 8) | dvo_timing->hblank_lo);
+
+	panel_fixed_mode->vdisplay = (dvo_timing->vactive_hi << 8) |
+		dvo_timing->vactive_lo;
+	panel_fixed_mode->vsync_start = panel_fixed_mode->vdisplay +
+		dvo_timing->vsync_off;
+	panel_fixed_mode->vsync_end = panel_fixed_mode->vsync_start +
+		dvo_timing->vsync_pulse_width;
+	panel_fixed_mode->vtotal = panel_fixed_mode->vdisplay +
+		((dvo_timing->vblank_hi << 8) | dvo_timing->vblank_lo);
+	panel_fixed_mode->clock = dvo_timing->clock * 10;
+	panel_fixed_mode->type = DRM_MODE_TYPE_PREFERRED;
+
+	if (dvo_timing->hsync_positive)
+		panel_fixed_mode->flags |= DRM_MODE_FLAG_PHSYNC;
+	else
+		panel_fixed_mode->flags |= DRM_MODE_FLAG_NHSYNC;
+
+	if (dvo_timing->vsync_positive)
+		panel_fixed_mode->flags |= DRM_MODE_FLAG_PVSYNC;
+	else
+		panel_fixed_mode->flags |= DRM_MODE_FLAG_NVSYNC;
+
+	/* Some VBTs have bogus h/vtotal values */
+	if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal)
+		panel_fixed_mode->htotal = panel_fixed_mode->hsync_end + 1;
+	if (panel_fixed_mode->vsync_end > panel_fixed_mode->vtotal)
+		panel_fixed_mode->vtotal = panel_fixed_mode->vsync_end + 1;
+
+	drm_mode_set_name(panel_fixed_mode);
+}
+
+static const struct lvds_dvo_timing *
+get_lvds_dvo_timing(const struct bdb_lvds_lfp_data *lvds_lfp_data,
+		    const struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs,
+		    int index)
+{
+	/*
+	 * the size of fp_timing varies on the different platform.
+	 * So calculate the DVO timing relative offset in LVDS data
+	 * entry to get the DVO timing entry
+	 */
+
+	int lfp_data_size =
+		lvds_lfp_data_ptrs->ptr[1].dvo_timing_offset -
+		lvds_lfp_data_ptrs->ptr[0].dvo_timing_offset;
+	int dvo_timing_offset =
+		lvds_lfp_data_ptrs->ptr[0].dvo_timing_offset -
+		lvds_lfp_data_ptrs->ptr[0].fp_timing_offset;
+	char *entry = (char *)lvds_lfp_data->data + lfp_data_size * index;
+
+	return (struct lvds_dvo_timing *)(entry + dvo_timing_offset);
+}
+
+/* get lvds_fp_timing entry
+ * this function may return NULL if the corresponding entry is invalid
+ */
+static const struct lvds_fp_timing *
+get_lvds_fp_timing(const struct bdb_header *bdb,
+		   const struct bdb_lvds_lfp_data *data,
+		   const struct bdb_lvds_lfp_data_ptrs *ptrs,
+		   int index)
+{
+	size_t data_ofs = (const u8 *)data - (const u8 *)bdb;
+	u16 data_size = ((const u16 *)data)[-1]; /* stored in header */
+	size_t ofs;
+
+	if (index >= ARRAY_SIZE(ptrs->ptr))
+		return NULL;
+	ofs = ptrs->ptr[index].fp_timing_offset;
+	if (ofs < data_ofs ||
+	    ofs + sizeof(struct lvds_fp_timing) > data_ofs + data_size)
+		return NULL;
+	return (const struct lvds_fp_timing *)((const u8 *)bdb + ofs);
+}
+
+/* Try to find integrated panel data */
+static void
+parse_lfp_panel_data(struct drm_i915_private *dev_priv,
+		     const struct bdb_header *bdb)
+{
+	const struct bdb_lvds_options *lvds_options;
+	const struct bdb_lvds_lfp_data *lvds_lfp_data;
+	const struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs;
+	const struct lvds_dvo_timing *panel_dvo_timing;
+	const struct lvds_fp_timing *fp_timing;
+	struct drm_display_mode *panel_fixed_mode;
+	int drrs_mode;
+
+	lvds_options = find_section(bdb, BDB_LVDS_OPTIONS);
+	if (!lvds_options)
+		return;
+
+	dev_priv->vbt.lvds_dither = lvds_options->pixel_dither;
+	if (lvds_options->panel_type == 0xff)
+		return;
+
+	panel_type = lvds_options->panel_type;
+
+	drrs_mode = (lvds_options->dps_panel_type_bits
+				>> (panel_type * 2)) & MODE_MASK;
+	/*
+	 * VBT has static DRRS = 0 and seamless DRRS = 2.
+	 * The below piece of code is required to adjust vbt.drrs_type
+	 * to match the enum drrs_support_type.
+	 */
+	switch (drrs_mode) {
+	case 0:
+		dev_priv->vbt.drrs_type = STATIC_DRRS_SUPPORT;
+		DRM_DEBUG_KMS("DRRS supported mode is static\n");
+		break;
+	case 2:
+		dev_priv->vbt.drrs_type = SEAMLESS_DRRS_SUPPORT;
+		DRM_DEBUG_KMS("DRRS supported mode is seamless\n");
+		break;
+	default:
+		dev_priv->vbt.drrs_type = DRRS_NOT_SUPPORTED;
+		DRM_DEBUG_KMS("DRRS not supported (VBT input)\n");
+		break;
+	}
+
+	lvds_lfp_data = find_section(bdb, BDB_LVDS_LFP_DATA);
+	if (!lvds_lfp_data)
+		return;
+
+	lvds_lfp_data_ptrs = find_section(bdb, BDB_LVDS_LFP_DATA_PTRS);
+	if (!lvds_lfp_data_ptrs)
+		return;
+
+	dev_priv->vbt.lvds_vbt = 1;
+
+	panel_dvo_timing = get_lvds_dvo_timing(lvds_lfp_data,
+					       lvds_lfp_data_ptrs,
+					       lvds_options->panel_type);
+
+	panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL);
+	if (!panel_fixed_mode)
+		return;
+
+	fill_detail_timing_data(panel_fixed_mode, panel_dvo_timing);
+
+	dev_priv->vbt.lfp_lvds_vbt_mode = panel_fixed_mode;
+
+	DRM_DEBUG_KMS("Found panel mode in BIOS VBT tables:\n");
+	drm_mode_debug_printmodeline(panel_fixed_mode);
+
+	fp_timing = get_lvds_fp_timing(bdb, lvds_lfp_data,
+				       lvds_lfp_data_ptrs,
+				       lvds_options->panel_type);
+	if (fp_timing) {
+		/* check the resolution, just to be sure */
+		if (fp_timing->x_res == panel_fixed_mode->hdisplay &&
+		    fp_timing->y_res == panel_fixed_mode->vdisplay) {
+			dev_priv->vbt.bios_lvds_val = fp_timing->lvds_reg_val;
+			DRM_DEBUG_KMS("VBT initial LVDS value %x\n",
+				      dev_priv->vbt.bios_lvds_val);
+		}
+	}
+}
+
+static void
+parse_lfp_backlight(struct drm_i915_private *dev_priv,
+		    const struct bdb_header *bdb)
+{
+	const struct bdb_lfp_backlight_data *backlight_data;
+	const struct bdb_lfp_backlight_data_entry *entry;
+
+	backlight_data = find_section(bdb, BDB_LVDS_BACKLIGHT);
+	if (!backlight_data)
+		return;
+
+	if (backlight_data->entry_size != sizeof(backlight_data->data[0])) {
+		DRM_DEBUG_KMS("Unsupported backlight data entry size %u\n",
+			      backlight_data->entry_size);
+		return;
+	}
+
+	entry = &backlight_data->data[panel_type];
+
+	dev_priv->vbt.backlight.present = entry->type == BDB_BACKLIGHT_TYPE_PWM;
+	if (!dev_priv->vbt.backlight.present) {
+		DRM_DEBUG_KMS("PWM backlight not present in VBT (type %u)\n",
+			      entry->type);
+		return;
+	}
+
+	dev_priv->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz;
+	dev_priv->vbt.backlight.active_low_pwm = entry->active_low_pwm;
+	dev_priv->vbt.backlight.min_brightness = entry->min_brightness;
+	DRM_DEBUG_KMS("VBT backlight PWM modulation frequency %u Hz, "
+		      "active %s, min brightness %u, level %u\n",
+		      dev_priv->vbt.backlight.pwm_freq_hz,
+		      dev_priv->vbt.backlight.active_low_pwm ? "low" : "high",
+		      dev_priv->vbt.backlight.min_brightness,
+		      backlight_data->level[panel_type]);
+}
+
+/* Try to find sdvo panel data */
+static void
+parse_sdvo_panel_data(struct drm_i915_private *dev_priv,
+		      const struct bdb_header *bdb)
+{
+	const struct lvds_dvo_timing *dvo_timing;
+	struct drm_display_mode *panel_fixed_mode;
+	int index;
+
+	index = i915.vbt_sdvo_panel_type;
+	if (index == -2) {
+		DRM_DEBUG_KMS("Ignore SDVO panel mode from BIOS VBT tables.\n");
+		return;
+	}
+
+	if (index == -1) {
+		const struct bdb_sdvo_lvds_options *sdvo_lvds_options;
+
+		sdvo_lvds_options = find_section(bdb, BDB_SDVO_LVDS_OPTIONS);
+		if (!sdvo_lvds_options)
+			return;
+
+		index = sdvo_lvds_options->panel_type;
+	}
+
+	dvo_timing = find_section(bdb, BDB_SDVO_PANEL_DTDS);
+	if (!dvo_timing)
+		return;
+
+	panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL);
+	if (!panel_fixed_mode)
+		return;
+
+	fill_detail_timing_data(panel_fixed_mode, dvo_timing + index);
+
+	dev_priv->vbt.sdvo_lvds_vbt_mode = panel_fixed_mode;
+
+	DRM_DEBUG_KMS("Found SDVO panel mode in BIOS VBT tables:\n");
+	drm_mode_debug_printmodeline(panel_fixed_mode);
+}
+
+static int intel_bios_ssc_frequency(struct drm_device *dev,
+				    bool alternate)
+{
+	switch (INTEL_INFO(dev)->gen) {
+	case 2:
+		return alternate ? 66667 : 48000;
+	case 3:
+	case 4:
+		return alternate ? 100000 : 96000;
+	default:
+		return alternate ? 100000 : 120000;
+	}
+}
+
+static void
+parse_general_features(struct drm_i915_private *dev_priv,
+		       const struct bdb_header *bdb)
+{
+	struct drm_device *dev = dev_priv->dev;
+	const struct bdb_general_features *general;
+
+	general = find_section(bdb, BDB_GENERAL_FEATURES);
+	if (general) {
+		dev_priv->vbt.int_tv_support = general->int_tv_support;
+		dev_priv->vbt.int_crt_support = general->int_crt_support;
+		dev_priv->vbt.lvds_use_ssc = general->enable_ssc;
+		dev_priv->vbt.lvds_ssc_freq =
+			intel_bios_ssc_frequency(dev, general->ssc_freq);
+		dev_priv->vbt.display_clock_mode = general->display_clock_mode;
+		dev_priv->vbt.fdi_rx_polarity_inverted = general->fdi_rx_polarity_inverted;
+		DRM_DEBUG_KMS("BDB_GENERAL_FEATURES int_tv_support %d int_crt_support %d lvds_use_ssc %d lvds_ssc_freq %d display_clock_mode %d fdi_rx_polarity_inverted %d\n",
+			      dev_priv->vbt.int_tv_support,
+			      dev_priv->vbt.int_crt_support,
+			      dev_priv->vbt.lvds_use_ssc,
+			      dev_priv->vbt.lvds_ssc_freq,
+			      dev_priv->vbt.display_clock_mode,
+			      dev_priv->vbt.fdi_rx_polarity_inverted);
+	}
+}
+
+static void
+parse_general_definitions(struct drm_i915_private *dev_priv,
+			  const struct bdb_header *bdb)
+{
+	const struct bdb_general_definitions *general;
+
+	general = find_section(bdb, BDB_GENERAL_DEFINITIONS);
+	if (general) {
+		u16 block_size = get_blocksize(general);
+		if (block_size >= sizeof(*general)) {
+			int bus_pin = general->crt_ddc_gmbus_pin;
+			DRM_DEBUG_KMS("crt_ddc_bus_pin: %d\n", bus_pin);
+			if (intel_gmbus_is_valid_pin(dev_priv, bus_pin))
+				dev_priv->vbt.crt_ddc_pin = bus_pin;
+		} else {
+			DRM_DEBUG_KMS("BDB_GD too small (%d). Invalid.\n",
+				      block_size);
+		}
+	}
+}
+
+static const union child_device_config *
+child_device_ptr(const struct bdb_general_definitions *p_defs, int i)
+{
+	return (const void *) &p_defs->devices[i * p_defs->child_dev_size];
+}
+
+static void
+parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
+			  const struct bdb_header *bdb)
+{
+	struct sdvo_device_mapping *p_mapping;
+	const struct bdb_general_definitions *p_defs;
+	const struct old_child_dev_config *child; /* legacy */
+	int i, child_device_num, count;
+	u16	block_size;
+
+	p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
+	if (!p_defs) {
+		DRM_DEBUG_KMS("No general definition block is found, unable to construct sdvo mapping.\n");
+		return;
+	}
+
+	/*
+	 * Only parse SDVO mappings when the general definitions block child
+	 * device size matches that of the *legacy* child device config
+	 * struct. Thus, SDVO mapping will be skipped for newer VBT.
+	 */
+	if (p_defs->child_dev_size != sizeof(*child)) {
+		DRM_DEBUG_KMS("Unsupported child device size for SDVO mapping.\n");
+		return;
+	}
+	/* get the block size of general definitions */
+	block_size = get_blocksize(p_defs);
+	/* get the number of child device */
+	child_device_num = (block_size - sizeof(*p_defs)) /
+		p_defs->child_dev_size;
+	count = 0;
+	for (i = 0; i < child_device_num; i++) {
+		child = &child_device_ptr(p_defs, i)->old;
+		if (!child->device_type) {
+			/* skip the device block if device type is invalid */
+			continue;
+		}
+		if (child->slave_addr != SLAVE_ADDR1 &&
+		    child->slave_addr != SLAVE_ADDR2) {
+			/*
+			 * If the slave address is neither 0x70 nor 0x72,
+			 * it is not a SDVO device. Skip it.
+			 */
+			continue;
+		}
+		if (child->dvo_port != DEVICE_PORT_DVOB &&
+		    child->dvo_port != DEVICE_PORT_DVOC) {
+			/* skip the incorrect SDVO port */
+			DRM_DEBUG_KMS("Incorrect SDVO port. Skip it\n");
+			continue;
+		}
+		DRM_DEBUG_KMS("the SDVO device with slave addr %2x is found on"
+			      " %s port\n",
+			      child->slave_addr,
+			      (child->dvo_port == DEVICE_PORT_DVOB) ?
+			      "SDVOB" : "SDVOC");
+		p_mapping = &(dev_priv->sdvo_mappings[child->dvo_port - 1]);
+		if (!p_mapping->initialized) {
+			p_mapping->dvo_port = child->dvo_port;
+			p_mapping->slave_addr = child->slave_addr;
+			p_mapping->dvo_wiring = child->dvo_wiring;
+			p_mapping->ddc_pin = child->ddc_pin;
+			p_mapping->i2c_pin = child->i2c_pin;
+			p_mapping->initialized = 1;
+			DRM_DEBUG_KMS("SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d\n",
+				      p_mapping->dvo_port,
+				      p_mapping->slave_addr,
+				      p_mapping->dvo_wiring,
+				      p_mapping->ddc_pin,
+				      p_mapping->i2c_pin);
+		} else {
+			DRM_DEBUG_KMS("Maybe one SDVO port is shared by "
+					 "two SDVO device.\n");
+		}
+		if (child->slave2_addr) {
+			/* Maybe this is a SDVO device with multiple inputs */
+			/* And the mapping info is not added */
+			DRM_DEBUG_KMS("there exists the slave2_addr. Maybe this"
+				" is a SDVO device with multiple inputs.\n");
+		}
+		count++;
+	}
+
+	if (!count) {
+		/* No SDVO device info is found */
+		DRM_DEBUG_KMS("No SDVO device info is found in VBT\n");
+	}
+	return;
+}
+
+static void
+parse_driver_features(struct drm_i915_private *dev_priv,
+		      const struct bdb_header *bdb)
+{
+	const struct bdb_driver_features *driver;
+
+	driver = find_section(bdb, BDB_DRIVER_FEATURES);
+	if (!driver)
+		return;
+
+	if (driver->lvds_config == BDB_DRIVER_FEATURE_EDP)
+		dev_priv->vbt.edp_support = 1;
+
+	if (driver->dual_frequency)
+		dev_priv->render_reclock_avail = true;
+
+	DRM_DEBUG_KMS("DRRS State Enabled:%d\n", driver->drrs_enabled);
+	/*
+	 * If DRRS is not supported, drrs_type has to be set to 0.
+	 * This is because, VBT is configured in such a way that
+	 * static DRRS is 0 and DRRS not supported is represented by
+	 * driver->drrs_enabled=false
+	 */
+	if (!driver->drrs_enabled)
+		dev_priv->vbt.drrs_type = DRRS_NOT_SUPPORTED;
+}
+
+static void
+parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
+{
+	const struct bdb_edp *edp;
+	const struct edp_power_seq *edp_pps;
+	const struct edp_link_params *edp_link_params;
+
+	edp = find_section(bdb, BDB_EDP);
+	if (!edp) {
+		if (dev_priv->vbt.edp_support)
+			DRM_DEBUG_KMS("No eDP BDB found but eDP panel supported.\n");
+		return;
+	}
+
+	switch ((edp->color_depth >> (panel_type * 2)) & 3) {
+	case EDP_18BPP:
+		dev_priv->vbt.edp_bpp = 18;
+		break;
+	case EDP_24BPP:
+		dev_priv->vbt.edp_bpp = 24;
+		break;
+	case EDP_30BPP:
+		dev_priv->vbt.edp_bpp = 30;
+		break;
+	}
+
+	/* Get the eDP sequencing and link info */
+	edp_pps = &edp->power_seqs[panel_type];
+	edp_link_params = &edp->link_params[panel_type];
+
+	dev_priv->vbt.edp_pps = *edp_pps;
+
+	switch (edp_link_params->rate) {
+	case EDP_RATE_1_62:
+		dev_priv->vbt.edp_rate = DP_LINK_BW_1_62;
+		break;
+	case EDP_RATE_2_7:
+		dev_priv->vbt.edp_rate = DP_LINK_BW_2_7;
+		break;
+	default:
+		DRM_DEBUG_KMS("VBT has unknown eDP link rate value %u\n",
+			      edp_link_params->rate);
+		break;
+	}
+
+	switch (edp_link_params->lanes) {
+	case EDP_LANE_1:
+		dev_priv->vbt.edp_lanes = 1;
+		break;
+	case EDP_LANE_2:
+		dev_priv->vbt.edp_lanes = 2;
+		break;
+	case EDP_LANE_4:
+		dev_priv->vbt.edp_lanes = 4;
+		break;
+	default:
+		DRM_DEBUG_KMS("VBT has unknown eDP lane count value %u\n",
+			      edp_link_params->lanes);
+		break;
+	}
+
+	switch (edp_link_params->preemphasis) {
+	case EDP_PREEMPHASIS_NONE:
+		dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_0;
+		break;
+	case EDP_PREEMPHASIS_3_5dB:
+		dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_1;
+		break;
+	case EDP_PREEMPHASIS_6dB:
+		dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_2;
+		break;
+	case EDP_PREEMPHASIS_9_5dB:
+		dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_3;
+		break;
+	default:
+		DRM_DEBUG_KMS("VBT has unknown eDP pre-emphasis value %u\n",
+			      edp_link_params->preemphasis);
+		break;
+	}
+
+	switch (edp_link_params->vswing) {
+	case EDP_VSWING_0_4V:
+		dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_0;
+		break;
+	case EDP_VSWING_0_6V:
+		dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_1;
+		break;
+	case EDP_VSWING_0_8V:
+		dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
+		break;
+	case EDP_VSWING_1_2V:
+		dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
+		break;
+	default:
+		DRM_DEBUG_KMS("VBT has unknown eDP voltage swing value %u\n",
+			      edp_link_params->vswing);
+		break;
+	}
+
+	if (bdb->version >= 173) {
+		uint8_t vswing;
+
+		/* Don't read from VBT if module parameter has valid value*/
+		if (i915.edp_vswing) {
+			dev_priv->edp_low_vswing = i915.edp_vswing == 1;
+		} else {
+			vswing = (edp->edp_vswing_preemph >> (panel_type * 4)) & 0xF;
+			dev_priv->edp_low_vswing = vswing == 0;
+		}
+	}
+}
+
+static void
+parse_psr(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
+{
+	const struct bdb_psr *psr;
+	const struct psr_table *psr_table;
+
+	psr = find_section(bdb, BDB_PSR);
+	if (!psr) {
+		DRM_DEBUG_KMS("No PSR BDB found.\n");
+		return;
+	}
+
+	psr_table = &psr->psr_table[panel_type];
+
+	dev_priv->vbt.psr.full_link = psr_table->full_link;
+	dev_priv->vbt.psr.require_aux_wakeup = psr_table->require_aux_to_wakeup;
+
+	/* Allowed VBT values goes from 0 to 15 */
+	dev_priv->vbt.psr.idle_frames = psr_table->idle_frames < 0 ? 0 :
+		psr_table->idle_frames > 15 ? 15 : psr_table->idle_frames;
+
+	switch (psr_table->lines_to_wait) {
+	case 0:
+		dev_priv->vbt.psr.lines_to_wait = PSR_0_LINES_TO_WAIT;
+		break;
+	case 1:
+		dev_priv->vbt.psr.lines_to_wait = PSR_1_LINE_TO_WAIT;
+		break;
+	case 2:
+		dev_priv->vbt.psr.lines_to_wait = PSR_4_LINES_TO_WAIT;
+		break;
+	case 3:
+		dev_priv->vbt.psr.lines_to_wait = PSR_8_LINES_TO_WAIT;
+		break;
+	default:
+		DRM_DEBUG_KMS("VBT has unknown PSR lines to wait %u\n",
+			      psr_table->lines_to_wait);
+		break;
+	}
+
+	dev_priv->vbt.psr.tp1_wakeup_time = psr_table->tp1_wakeup_time;
+	dev_priv->vbt.psr.tp2_tp3_wakeup_time = psr_table->tp2_tp3_wakeup_time;
+}
+
+static u8 *goto_next_sequence(u8 *data, int *size)
+{
+	u16 len;
+	int tmp = *size;
+
+	if (--tmp < 0)
+		return NULL;
+
+	/* goto first element */
+	data++;
+	while (1) {
+		switch (*data) {
+		case MIPI_SEQ_ELEM_SEND_PKT:
+			/*
+			 * skip by this element payload size
+			 * skip elem id, command flag and data type
+			 */
+			tmp -= 5;
+			if (tmp < 0)
+				return NULL;
+
+			data += 3;
+			len = *((u16 *)data);
+
+			tmp -= len;
+			if (tmp < 0)
+				return NULL;
+
+			/* skip by len */
+			data = data + 2 + len;
+			break;
+		case MIPI_SEQ_ELEM_DELAY:
+			/* skip by elem id, and delay is 4 bytes */
+			tmp -= 5;
+			if (tmp < 0)
+				return NULL;
+
+			data += 5;
+			break;
+		case MIPI_SEQ_ELEM_GPIO:
+			tmp -= 3;
+			if (tmp < 0)
+				return NULL;
+
+			data += 3;
+			break;
+		default:
+			DRM_ERROR("Unknown element\n");
+			return NULL;
+		}
+
+		/* end of sequence ? */
+		if (*data == 0)
+			break;
+	}
+
+	/* goto next sequence or end of block byte */
+	if (--tmp < 0)
+		return NULL;
+
+	data++;
+
+	/* update amount of data left for the sequence block to be parsed */
+	*size = tmp;
+	return data;
+}
+
+static void
+parse_mipi(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
+{
+	const struct bdb_mipi_config *start;
+	const struct bdb_mipi_sequence *sequence;
+	const struct mipi_config *config;
+	const struct mipi_pps_data *pps;
+	u8 *data;
+	const u8 *seq_data;
+	int i, panel_id, seq_size;
+	u16 block_size;
+
+	/* parse MIPI blocks only if LFP type is MIPI */
+	if (!dev_priv->vbt.has_mipi)
+		return;
+
+	/* Initialize this to undefined indicating no generic MIPI support */
+	dev_priv->vbt.dsi.panel_id = MIPI_DSI_UNDEFINED_PANEL_ID;
+
+	/* Block #40 is already parsed and panel_fixed_mode is
+	 * stored in dev_priv->lfp_lvds_vbt_mode
+	 * resuse this when needed
+	 */
+
+	/* Parse #52 for panel index used from panel_type already
+	 * parsed
+	 */
+	start = find_section(bdb, BDB_MIPI_CONFIG);
+	if (!start) {
+		DRM_DEBUG_KMS("No MIPI config BDB found");
+		return;
+	}
+
+	DRM_DEBUG_DRIVER("Found MIPI Config block, panel index = %d\n",
+								panel_type);
+
+	/*
+	 * get hold of the correct configuration block and pps data as per
+	 * the panel_type as index
+	 */
+	config = &start->config[panel_type];
+	pps = &start->pps[panel_type];
+
+	/* store as of now full data. Trim when we realise all is not needed */
+	dev_priv->vbt.dsi.config = kmemdup(config, sizeof(struct mipi_config), GFP_KERNEL);
+	if (!dev_priv->vbt.dsi.config)
+		return;
+
+	dev_priv->vbt.dsi.pps = kmemdup(pps, sizeof(struct mipi_pps_data), GFP_KERNEL);
+	if (!dev_priv->vbt.dsi.pps) {
+		kfree(dev_priv->vbt.dsi.config);
+		return;
+	}
+
+	/* We have mandatory mipi config blocks. Initialize as generic panel */
+	dev_priv->vbt.dsi.panel_id = MIPI_DSI_GENERIC_PANEL_ID;
+
+	/* Check if we have sequence block as well */
+	sequence = find_section(bdb, BDB_MIPI_SEQUENCE);
+	if (!sequence) {
+		DRM_DEBUG_KMS("No MIPI Sequence found, parsing complete\n");
+		return;
+	}
+
+	/* Fail gracefully for forward incompatible sequence block. */
+	if (sequence->version >= 3) {
+		DRM_ERROR("Unable to parse MIPI Sequence Block v3+\n");
+		return;
+	}
+
+	DRM_DEBUG_DRIVER("Found MIPI sequence block\n");
+
+	block_size = get_blocksize(sequence);
+
+	/*
+	 * parse the sequence block for individual sequences
+	 */
+	dev_priv->vbt.dsi.seq_version = sequence->version;
+
+	seq_data = &sequence->data[0];
+
+	/*
+	 * sequence block is variable length and hence we need to parse and
+	 * get the sequence data for specific panel id
+	 */
+	for (i = 0; i < MAX_MIPI_CONFIGURATIONS; i++) {
+		panel_id = *seq_data;
+		seq_size = *((u16 *) (seq_data + 1));
+		if (panel_id == panel_type)
+			break;
+
+		/* skip the sequence including seq header of 3 bytes */
+		seq_data = seq_data + 3 + seq_size;
+		if ((seq_data - &sequence->data[0]) > block_size) {
+			DRM_ERROR("Sequence start is beyond sequence block size, corrupted sequence block\n");
+			return;
+		}
+	}
+
+	if (i == MAX_MIPI_CONFIGURATIONS) {
+		DRM_ERROR("Sequence block detected but no valid configuration\n");
+		return;
+	}
+
+	/* check if found sequence is completely within the sequence block
+	 * just being paranoid */
+	if (seq_size > block_size) {
+		DRM_ERROR("Corrupted sequence/size, bailing out\n");
+		return;
+	}
+
+	/* skip the panel id(1 byte) and seq size(2 bytes) */
+	dev_priv->vbt.dsi.data = kmemdup(seq_data + 3, seq_size, GFP_KERNEL);
+	if (!dev_priv->vbt.dsi.data)
+		return;
+
+	/*
+	 * loop into the sequence data and split into multiple sequneces
+	 * There are only 5 types of sequences as of now
+	 */
+	data = dev_priv->vbt.dsi.data;
+	dev_priv->vbt.dsi.size = seq_size;
+
+	/* two consecutive 0x00 indicate end of all sequences */
+	while (1) {
+		int seq_id = *data;
+		if (MIPI_SEQ_MAX > seq_id && seq_id > MIPI_SEQ_UNDEFINED) {
+			dev_priv->vbt.dsi.sequence[seq_id] = data;
+			DRM_DEBUG_DRIVER("Found mipi sequence - %d\n", seq_id);
+		} else {
+			DRM_ERROR("undefined sequence\n");
+			goto err;
+		}
+
+		/* partial parsing to skip elements */
+		data = goto_next_sequence(data, &seq_size);
+
+		if (data == NULL) {
+			DRM_ERROR("Sequence elements going beyond block itself. Sequence block parsing failed\n");
+			goto err;
+		}
+
+		if (*data == 0)
+			break; /* end of sequence reached */
+	}
+
+	DRM_DEBUG_DRIVER("MIPI related vbt parsing complete\n");
+	return;
+err:
+	kfree(dev_priv->vbt.dsi.data);
+	dev_priv->vbt.dsi.data = NULL;
+
+	/* error during parsing so set all pointers to null
+	 * because of partial parsing */
+	memset(dev_priv->vbt.dsi.sequence, 0, sizeof(dev_priv->vbt.dsi.sequence));
+}
+
+static u8 translate_iboost(u8 val)
+{
+	static const u8 mapping[] = { 1, 3, 7 }; /* See VBT spec */
+
+	if (val >= ARRAY_SIZE(mapping)) {
+		DRM_DEBUG_KMS("Unsupported I_boost value found in VBT (%d), display may not work properly\n", val);
+		return 0;
+	}
+	return mapping[val];
+}
+
+static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
+			   const struct bdb_header *bdb)
+{
+	union child_device_config *it, *child = NULL;
+	struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port];
+	uint8_t hdmi_level_shift;
+	int i, j;
+	bool is_dvi, is_hdmi, is_dp, is_edp, is_crt;
+	uint8_t aux_channel, ddc_pin;
+	/* Each DDI port can have more than one value on the "DVO Port" field,
+	 * so look for all the possible values for each port and abort if more
+	 * than one is found. */
+	int dvo_ports[][3] = {
+		{DVO_PORT_HDMIA, DVO_PORT_DPA, -1},
+		{DVO_PORT_HDMIB, DVO_PORT_DPB, -1},
+		{DVO_PORT_HDMIC, DVO_PORT_DPC, -1},
+		{DVO_PORT_HDMID, DVO_PORT_DPD, -1},
+		{DVO_PORT_CRT, DVO_PORT_HDMIE, DVO_PORT_DPE},
+	};
+
+	/* Find the child device to use, abort if more than one found. */
+	for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
+		it = dev_priv->vbt.child_dev + i;
+
+		for (j = 0; j < 3; j++) {
+			if (dvo_ports[port][j] == -1)
+				break;
+
+			if (it->common.dvo_port == dvo_ports[port][j]) {
+				if (child) {
+					DRM_DEBUG_KMS("More than one child device for port %c in VBT.\n",
+						      port_name(port));
+					return;
+				}
+				child = it;
+			}
+		}
+	}
+	if (!child)
+		return;
+
+	aux_channel = child->raw[25];
+	ddc_pin = child->common.ddc_pin;
+
+	is_dvi = child->common.device_type & DEVICE_TYPE_TMDS_DVI_SIGNALING;
+	is_dp = child->common.device_type & DEVICE_TYPE_DISPLAYPORT_OUTPUT;
+	is_crt = child->common.device_type & DEVICE_TYPE_ANALOG_OUTPUT;
+	is_hdmi = is_dvi && (child->common.device_type & DEVICE_TYPE_NOT_HDMI_OUTPUT) == 0;
+	is_edp = is_dp && (child->common.device_type & DEVICE_TYPE_INTERNAL_CONNECTOR);
+
+	if (port == PORT_A && is_dvi) {
+		DRM_DEBUG_KMS("VBT claims port A supports DVI%s, ignoring\n",
+			      is_hdmi ? "/HDMI" : "");
+		is_dvi = false;
+		is_hdmi = false;
+	}
+
+	info->supports_dvi = is_dvi;
+	info->supports_hdmi = is_hdmi;
+	info->supports_dp = is_dp;
+
+	DRM_DEBUG_KMS("Port %c VBT info: DP:%d HDMI:%d DVI:%d EDP:%d CRT:%d\n",
+		      port_name(port), is_dp, is_hdmi, is_dvi, is_edp, is_crt);
+
+	if (is_edp && is_dvi)
+		DRM_DEBUG_KMS("Internal DP port %c is TMDS compatible\n",
+			      port_name(port));
+	if (is_crt && port != PORT_E)
+		DRM_DEBUG_KMS("Port %c is analog\n", port_name(port));
+	if (is_crt && (is_dvi || is_dp))
+		DRM_DEBUG_KMS("Analog port %c is also DP or TMDS compatible\n",
+			      port_name(port));
+	if (is_dvi && (port == PORT_A || port == PORT_E))
+		DRM_DEBUG_KMS("Port %c is TMDS compatible\n", port_name(port));
+	if (!is_dvi && !is_dp && !is_crt)
+		DRM_DEBUG_KMS("Port %c is not DP/TMDS/CRT compatible\n",
+			      port_name(port));
+	if (is_edp && (port == PORT_B || port == PORT_C || port == PORT_E))
+		DRM_DEBUG_KMS("Port %c is internal DP\n", port_name(port));
+
+	if (is_dvi) {
+		if (port == PORT_E) {
+			info->alternate_ddc_pin = ddc_pin;
+			/* if DDIE share ddc pin with other port, then
+			 * dvi/hdmi couldn't exist on the shared port.
+			 * Otherwise they share the same ddc bin and system
+			 * couldn't communicate with them seperately. */
+			if (ddc_pin == DDC_PIN_B) {
+				dev_priv->vbt.ddi_port_info[PORT_B].supports_dvi = 0;
+				dev_priv->vbt.ddi_port_info[PORT_B].supports_hdmi = 0;
+			} else if (ddc_pin == DDC_PIN_C) {
+				dev_priv->vbt.ddi_port_info[PORT_C].supports_dvi = 0;
+				dev_priv->vbt.ddi_port_info[PORT_C].supports_hdmi = 0;
+			} else if (ddc_pin == DDC_PIN_D) {
+				dev_priv->vbt.ddi_port_info[PORT_D].supports_dvi = 0;
+				dev_priv->vbt.ddi_port_info[PORT_D].supports_hdmi = 0;
+			}
+		} else if (ddc_pin == DDC_PIN_B && port != PORT_B)
+			DRM_DEBUG_KMS("Unexpected DDC pin for port B\n");
+		else if (ddc_pin == DDC_PIN_C && port != PORT_C)
+			DRM_DEBUG_KMS("Unexpected DDC pin for port C\n");
+		else if (ddc_pin == DDC_PIN_D && port != PORT_D)
+			DRM_DEBUG_KMS("Unexpected DDC pin for port D\n");
+	}
+
+	if (is_dp) {
+		if (port == PORT_E) {
+			info->alternate_aux_channel = aux_channel;
+			/* if DDIE share aux channel with other port, then
+			 * DP couldn't exist on the shared port. Otherwise
+			 * they share the same aux channel and system
+			 * couldn't communicate with them seperately. */
+			if (aux_channel == DP_AUX_A)
+				dev_priv->vbt.ddi_port_info[PORT_A].supports_dp = 0;
+			else if (aux_channel == DP_AUX_B)
+				dev_priv->vbt.ddi_port_info[PORT_B].supports_dp = 0;
+			else if (aux_channel == DP_AUX_C)
+				dev_priv->vbt.ddi_port_info[PORT_C].supports_dp = 0;
+			else if (aux_channel == DP_AUX_D)
+				dev_priv->vbt.ddi_port_info[PORT_D].supports_dp = 0;
+		}
+		else if (aux_channel == DP_AUX_A && port != PORT_A)
+			DRM_DEBUG_KMS("Unexpected AUX channel for port A\n");
+		else if (aux_channel == DP_AUX_B && port != PORT_B)
+			DRM_DEBUG_KMS("Unexpected AUX channel for port B\n");
+		else if (aux_channel == DP_AUX_C && port != PORT_C)
+			DRM_DEBUG_KMS("Unexpected AUX channel for port C\n");
+		else if (aux_channel == DP_AUX_D && port != PORT_D)
+			DRM_DEBUG_KMS("Unexpected AUX channel for port D\n");
+	}
+
+	if (bdb->version >= 158) {
+		/* The VBT HDMI level shift values match the table we have. */
+		hdmi_level_shift = child->raw[7] & 0xF;
+		DRM_DEBUG_KMS("VBT HDMI level shift for port %c: %d\n",
+			      port_name(port),
+			      hdmi_level_shift);
+		info->hdmi_level_shift = hdmi_level_shift;
+	}
+
+	/* Parse the I_boost config for SKL and above */
+	if (bdb->version >= 196 && (child->common.flags_1 & IBOOST_ENABLE)) {
+		info->dp_boost_level = translate_iboost(child->common.iboost_level & 0xF);
+		DRM_DEBUG_KMS("VBT (e)DP boost level for port %c: %d\n",
+			      port_name(port), info->dp_boost_level);
+		info->hdmi_boost_level = translate_iboost(child->common.iboost_level >> 4);
+		DRM_DEBUG_KMS("VBT HDMI boost level for port %c: %d\n",
+			      port_name(port), info->hdmi_boost_level);
+	}
+}
+
+static void parse_ddi_ports(struct drm_i915_private *dev_priv,
+			    const struct bdb_header *bdb)
+{
+	struct drm_device *dev = dev_priv->dev;
+	enum port port;
+
+	if (!HAS_DDI(dev))
+		return;
+
+	if (!dev_priv->vbt.child_dev_num)
+		return;
+
+	if (bdb->version < 155)
+		return;
+
+	for (port = PORT_A; port < I915_MAX_PORTS; port++)
+		parse_ddi_port(dev_priv, port, bdb);
+}
+
+static void
+parse_device_mapping(struct drm_i915_private *dev_priv,
+		     const struct bdb_header *bdb)
+{
+	const struct bdb_general_definitions *p_defs;
+	const union child_device_config *p_child;
+	union child_device_config *child_dev_ptr;
+	int i, child_device_num, count;
+	u8 expected_size;
+	u16 block_size;
+
+	p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
+	if (!p_defs) {
+		DRM_DEBUG_KMS("No general definition block is found, no devices defined.\n");
+		return;
+	}
+	if (bdb->version < 195) {
+		expected_size = sizeof(struct old_child_dev_config);
+	} else if (bdb->version == 195) {
+		expected_size = 37;
+	} else if (bdb->version <= 197) {
+		expected_size = 38;
+	} else {
+		expected_size = 38;
+		BUILD_BUG_ON(sizeof(*p_child) < 38);
+		DRM_DEBUG_DRIVER("Expected child device config size for VBT version %u not known; assuming %u\n",
+				 bdb->version, expected_size);
+	}
+
+	/* The legacy sized child device config is the minimum we need. */
+	if (p_defs->child_dev_size < sizeof(struct old_child_dev_config)) {
+		DRM_ERROR("Child device config size %u is too small.\n",
+			  p_defs->child_dev_size);
+		return;
+	}
+
+	/* Flag an error for unexpected size, but continue anyway. */
+	if (p_defs->child_dev_size != expected_size)
+		DRM_ERROR("Unexpected child device config size %u (expected %u for VBT version %u)\n",
+			  p_defs->child_dev_size, expected_size, bdb->version);
+
+	/* get the block size of general definitions */
+	block_size = get_blocksize(p_defs);
+	/* get the number of child device */
+	child_device_num = (block_size - sizeof(*p_defs)) /
+				p_defs->child_dev_size;
+	count = 0;
+	/* get the number of child device that is present */
+	for (i = 0; i < child_device_num; i++) {
+		p_child = child_device_ptr(p_defs, i);
+		if (!p_child->common.device_type) {
+			/* skip the device block if device type is invalid */
+			continue;
+		}
+		count++;
+	}
+	if (!count) {
+		DRM_DEBUG_KMS("no child dev is parsed from VBT\n");
+		return;
+	}
+	dev_priv->vbt.child_dev = kcalloc(count, sizeof(*p_child), GFP_KERNEL);
+	if (!dev_priv->vbt.child_dev) {
+		DRM_DEBUG_KMS("No memory space for child device\n");
+		return;
+	}
+
+	dev_priv->vbt.child_dev_num = count;
+	count = 0;
+	for (i = 0; i < child_device_num; i++) {
+		p_child = child_device_ptr(p_defs, i);
+		if (!p_child->common.device_type) {
+			/* skip the device block if device type is invalid */
+			continue;
+		}
+
+		if (p_child->common.dvo_port >= DVO_PORT_MIPIA
+		    && p_child->common.dvo_port <= DVO_PORT_MIPID
+		    &&p_child->common.device_type & DEVICE_TYPE_MIPI_OUTPUT) {
+			DRM_DEBUG_KMS("Found MIPI as LFP\n");
+			dev_priv->vbt.has_mipi = 1;
+			dev_priv->vbt.dsi.port = p_child->common.dvo_port;
+		}
+
+		child_dev_ptr = dev_priv->vbt.child_dev + count;
+		count++;
+
+		/*
+		 * Copy as much as we know (sizeof) and is available
+		 * (child_dev_size) of the child device. Accessing the data must
+		 * depend on VBT version.
+		 */
+		memcpy(child_dev_ptr, p_child,
+		       min_t(size_t, p_defs->child_dev_size, sizeof(*p_child)));
+	}
+	return;
+}
+
+static void
+init_vbt_defaults(struct drm_i915_private *dev_priv)
+{
+	struct drm_device *dev = dev_priv->dev;
+	enum port port;
+
+	dev_priv->vbt.crt_ddc_pin = GMBUS_PIN_VGADDC;
+
+	/* Default to having backlight */
+	dev_priv->vbt.backlight.present = true;
+
+	/* LFP panel data */
+	dev_priv->vbt.lvds_dither = 1;
+	dev_priv->vbt.lvds_vbt = 0;
+
+	/* SDVO panel data */
+	dev_priv->vbt.sdvo_lvds_vbt_mode = NULL;
+
+	/* general features */
+	dev_priv->vbt.int_tv_support = 1;
+	dev_priv->vbt.int_crt_support = 1;
+
+	/* Default to using SSC */
+	dev_priv->vbt.lvds_use_ssc = 1;
+	/*
+	 * Core/SandyBridge/IvyBridge use alternative (120MHz) reference
+	 * clock for LVDS.
+	 */
+	dev_priv->vbt.lvds_ssc_freq = intel_bios_ssc_frequency(dev,
+			!HAS_PCH_SPLIT(dev));
+	DRM_DEBUG_KMS("Set default to SSC at %d kHz\n", dev_priv->vbt.lvds_ssc_freq);
+
+	for (port = PORT_A; port < I915_MAX_PORTS; port++) {
+		struct ddi_vbt_port_info *info =
+			&dev_priv->vbt.ddi_port_info[port];
+
+		info->hdmi_level_shift = HDMI_LEVEL_SHIFT_UNKNOWN;
+
+		info->supports_dvi = (port != PORT_A && port != PORT_E);
+		info->supports_hdmi = info->supports_dvi;
+		info->supports_dp = (port != PORT_E);
+	}
+}
+
+static int intel_no_opregion_vbt_callback(const struct dmi_system_id *id)
+{
+	DRM_DEBUG_KMS("Falling back to manually reading VBT from "
+		      "VBIOS ROM for %s\n",
+		      id->ident);
+	return 1;
+}
+
+static const struct dmi_system_id intel_no_opregion_vbt[] = {
+	{
+		.callback = intel_no_opregion_vbt_callback,
+		.ident = "ThinkCentre A57",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "97027RG"),
+		},
+	},
+	{ }
+};
+
+static const struct bdb_header *validate_vbt(const void *base,
+					     size_t size,
+					     const void *_vbt,
+					     const char *source)
+{
+	size_t offset = _vbt - base;
+	const struct vbt_header *vbt = _vbt;
+	const struct bdb_header *bdb;
+
+	if (offset + sizeof(struct vbt_header) > size) {
+		DRM_DEBUG_DRIVER("VBT header incomplete\n");
+		return NULL;
+	}
+
+	if (memcmp(vbt->signature, "$VBT", 4)) {
+		DRM_DEBUG_DRIVER("VBT invalid signature\n");
+		return NULL;
+	}
+
+	offset += vbt->bdb_offset;
+	if (offset + sizeof(struct bdb_header) > size) {
+		DRM_DEBUG_DRIVER("BDB header incomplete\n");
+		return NULL;
+	}
+
+	bdb = base + offset;
+	if (offset + bdb->bdb_size > size) {
+		DRM_DEBUG_DRIVER("BDB incomplete\n");
+		return NULL;
+	}
+
+	DRM_DEBUG_KMS("Using VBT from %s: %20s\n",
+		      source, vbt->signature);
+	return bdb;
+}
+
+static const struct bdb_header *find_vbt(void __iomem *bios, size_t size)
+{
+	const struct bdb_header *bdb = NULL;
+	size_t i;
+
+	/* Scour memory looking for the VBT signature. */
+	for (i = 0; i + 4 < size; i++) {
+		if (ioread32(bios + i) == *((const u32 *) "$VBT")) {
+			/*
+			 * This is the one place where we explicitly discard the
+			 * address space (__iomem) of the BIOS/VBT. From now on
+			 * everything is based on 'base', and treated as regular
+			 * memory.
+			 */
+			void *_bios = (void __force *) bios;
+
+			bdb = validate_vbt(_bios, size, _bios + i, "PCI ROM");
+			break;
+		}
+	}
+
+	return bdb;
+}
+
+/**
+ * intel_parse_bios - find VBT and initialize settings from the BIOS
+ * @dev: DRM device
+ *
+ * Loads the Video BIOS and checks that the VBT exists.  Sets scratch registers
+ * to appropriate values.
+ *
+ * Returns 0 on success, nonzero on failure.
+ */
+int
+intel_parse_bios(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct pci_dev *pdev = dev->pdev;
+	const struct bdb_header *bdb = NULL;
+	u8 __iomem *bios = NULL;
+
+	if (HAS_PCH_NOP(dev))
+		return -ENODEV;
+
+	init_vbt_defaults(dev_priv);
+
+	/* XXX Should this validation be moved to intel_opregion.c? */
+	if (!dmi_check_system(intel_no_opregion_vbt) && dev_priv->opregion.vbt)
+		bdb = validate_vbt(dev_priv->opregion.header, OPREGION_SIZE,
+				   dev_priv->opregion.vbt, "OpRegion");
+
+	if (bdb == NULL) {
+		size_t size;
+
+		bios = pci_map_rom(pdev, &size);
+		if (!bios)
+			return -1;
+
+		bdb = find_vbt(bios, size);
+		if (!bdb) {
+			pci_unmap_rom(pdev, bios);
+			return -1;
+		}
+	}
+
+	/* Grab useful general definitions */
+	parse_general_features(dev_priv, bdb);
+	parse_general_definitions(dev_priv, bdb);
+	parse_lfp_panel_data(dev_priv, bdb);
+	parse_lfp_backlight(dev_priv, bdb);
+	parse_sdvo_panel_data(dev_priv, bdb);
+	parse_sdvo_device_mapping(dev_priv, bdb);
+	parse_device_mapping(dev_priv, bdb);
+	parse_driver_features(dev_priv, bdb);
+	parse_edp(dev_priv, bdb);
+	parse_psr(dev_priv, bdb);
+	parse_mipi(dev_priv, bdb);
+	parse_ddi_ports(dev_priv, bdb);
+
+	if (bios)
+		pci_unmap_rom(pdev, bios);
+
+	return 0;
+}
+
+/**
+ * intel_bios_is_port_present - is the specified digital port present
+ * @dev_priv:	i915 device instance
+ * @port:	port to check
+ *
+ * Return true if the device in %port is present.
+ */
+bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port)
+{
+	static const struct {
+		u16 dp, hdmi;
+	} port_mapping[] = {
+		[PORT_B] = { DVO_PORT_DPB, DVO_PORT_HDMIB, },
+		[PORT_C] = { DVO_PORT_DPC, DVO_PORT_HDMIC, },
+		[PORT_D] = { DVO_PORT_DPD, DVO_PORT_HDMID, },
+		[PORT_E] = { DVO_PORT_DPE, DVO_PORT_HDMIE, },
+	};
+	int i;
+
+	/* FIXME maybe deal with port A as well? */
+	if (WARN_ON(port == PORT_A) || port >= ARRAY_SIZE(port_mapping))
+		return false;
+
+	if (!dev_priv->vbt.child_dev_num)
+		return false;
+
+	for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
+		const union child_device_config *p_child =
+			&dev_priv->vbt.child_dev[i];
+		if ((p_child->common.dvo_port == port_mapping[port].dp ||
+		     p_child->common.dvo_port == port_mapping[port].hdmi) &&
+		    (p_child->common.device_type & (DEVICE_TYPE_TMDS_DVI_SIGNALING |
+						    DEVICE_TYPE_DISPLAYPORT_OUTPUT)))
+			return true;
+	}
+
+	return false;
+}
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
new file mode 100644
index 0000000..7ec8c9a
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -0,0 +1,972 @@
+/*
+ * Copyright © 2006 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#ifndef _I830_BIOS_H_
+#define _I830_BIOS_H_
+
+#include <drm/drmP.h>
+
+struct vbt_header {
+	u8 signature[20];		/**< Always starts with 'VBT$' */
+	u16 version;			/**< decimal */
+	u16 header_size;		/**< in bytes */
+	u16 vbt_size;			/**< in bytes */
+	u8 vbt_checksum;
+	u8 reserved0;
+	u32 bdb_offset;			/**< from beginning of VBT */
+	u32 aim_offset[4];		/**< from beginning of VBT */
+} __packed;
+
+struct bdb_header {
+	u8 signature[16];		/**< Always 'BIOS_DATA_BLOCK' */
+	u16 version;			/**< decimal */
+	u16 header_size;		/**< in bytes */
+	u16 bdb_size;			/**< in bytes */
+} __packed;
+
+/* strictly speaking, this is a "skip" block, but it has interesting info */
+struct vbios_data {
+	u8 type; /* 0 == desktop, 1 == mobile */
+	u8 relstage;
+	u8 chipset;
+	u8 lvds_present:1;
+	u8 tv_present:1;
+	u8 rsvd2:6; /* finish byte */
+	u8 rsvd3[4];
+	u8 signon[155];
+	u8 copyright[61];
+	u16 code_segment;
+	u8 dos_boot_mode;
+	u8 bandwidth_percent;
+	u8 rsvd4; /* popup memory size */
+	u8 resize_pci_bios;
+	u8 rsvd5; /* is crt already on ddc2 */
+} __packed;
+
+/*
+ * There are several types of BIOS data blocks (BDBs), each block has
+ * an ID and size in the first 3 bytes (ID in first, size in next 2).
+ * Known types are listed below.
+ */
+#define BDB_GENERAL_FEATURES	  1
+#define BDB_GENERAL_DEFINITIONS	  2
+#define BDB_OLD_TOGGLE_LIST	  3
+#define BDB_MODE_SUPPORT_LIST	  4
+#define BDB_GENERIC_MODE_TABLE	  5
+#define BDB_EXT_MMIO_REGS	  6
+#define BDB_SWF_IO		  7
+#define BDB_SWF_MMIO		  8
+#define BDB_PSR			  9
+#define BDB_MODE_REMOVAL_TABLE	 10
+#define BDB_CHILD_DEVICE_TABLE	 11
+#define BDB_DRIVER_FEATURES	 12
+#define BDB_DRIVER_PERSISTENCE	 13
+#define BDB_EXT_TABLE_PTRS	 14
+#define BDB_DOT_CLOCK_OVERRIDE	 15
+#define BDB_DISPLAY_SELECT	 16
+/* 17 rsvd */
+#define BDB_DRIVER_ROTATION	 18
+#define BDB_DISPLAY_REMOVE	 19
+#define BDB_OEM_CUSTOM		 20
+#define BDB_EFP_LIST		 21 /* workarounds for VGA hsync/vsync */
+#define BDB_SDVO_LVDS_OPTIONS	 22
+#define BDB_SDVO_PANEL_DTDS	 23
+#define BDB_SDVO_LVDS_PNP_IDS	 24
+#define BDB_SDVO_LVDS_POWER_SEQ	 25
+#define BDB_TV_OPTIONS		 26
+#define BDB_EDP			 27
+#define BDB_LVDS_OPTIONS	 40
+#define BDB_LVDS_LFP_DATA_PTRS	 41
+#define BDB_LVDS_LFP_DATA	 42
+#define BDB_LVDS_BACKLIGHT	 43
+#define BDB_LVDS_POWER		 44
+#define BDB_MIPI_CONFIG		 52
+#define BDB_MIPI_SEQUENCE	 53
+#define BDB_SKIP		254 /* VBIOS private block, ignore */
+
+struct bdb_general_features {
+        /* bits 1 */
+	u8 panel_fitting:2;
+	u8 flexaim:1;
+	u8 msg_enable:1;
+	u8 clear_screen:3;
+	u8 color_flip:1;
+
+        /* bits 2 */
+	u8 download_ext_vbt:1;
+	u8 enable_ssc:1;
+	u8 ssc_freq:1;
+	u8 enable_lfp_on_override:1;
+	u8 disable_ssc_ddt:1;
+	u8 rsvd7:1;
+	u8 display_clock_mode:1;
+	u8 rsvd8:1; /* finish byte */
+
+        /* bits 3 */
+	u8 disable_smooth_vision:1;
+	u8 single_dvi:1;
+	u8 rsvd9:1;
+	u8 fdi_rx_polarity_inverted:1;
+	u8 rsvd10:4; /* finish byte */
+
+        /* bits 4 */
+	u8 legacy_monitor_detect;
+
+        /* bits 5 */
+	u8 int_crt_support:1;
+	u8 int_tv_support:1;
+	u8 int_efp_support:1;
+	u8 dp_ssc_enb:1;	/* PCH attached eDP supports SSC */
+	u8 dp_ssc_freq:1;	/* SSC freq for PCH attached eDP */
+	u8 rsvd11:3; /* finish byte */
+} __packed;
+
+/* pre-915 */
+#define GPIO_PIN_DVI_LVDS	0x03 /* "DVI/LVDS DDC GPIO pins" */
+#define GPIO_PIN_ADD_I2C	0x05 /* "ADDCARD I2C GPIO pins" */
+#define GPIO_PIN_ADD_DDC	0x04 /* "ADDCARD DDC GPIO pins" */
+#define GPIO_PIN_ADD_DDC_I2C	0x06 /* "ADDCARD DDC/I2C GPIO pins" */
+
+/* Pre 915 */
+#define DEVICE_TYPE_NONE	0x00
+#define DEVICE_TYPE_CRT		0x01
+#define DEVICE_TYPE_TV		0x09
+#define DEVICE_TYPE_EFP		0x12
+#define DEVICE_TYPE_LFP		0x22
+/* On 915+ */
+#define DEVICE_TYPE_CRT_DPMS		0x6001
+#define DEVICE_TYPE_CRT_DPMS_HOTPLUG	0x4001
+#define DEVICE_TYPE_TV_COMPOSITE	0x0209
+#define DEVICE_TYPE_TV_MACROVISION	0x0289
+#define DEVICE_TYPE_TV_RF_COMPOSITE	0x020c
+#define DEVICE_TYPE_TV_SVIDEO_COMPOSITE	0x0609
+#define DEVICE_TYPE_TV_SCART		0x0209
+#define DEVICE_TYPE_TV_CODEC_HOTPLUG_PWR 0x6009
+#define DEVICE_TYPE_EFP_HOTPLUG_PWR	0x6012
+#define DEVICE_TYPE_EFP_DVI_HOTPLUG_PWR	0x6052
+#define DEVICE_TYPE_EFP_DVI_I		0x6053
+#define DEVICE_TYPE_EFP_DVI_D_DUAL	0x6152
+#define DEVICE_TYPE_EFP_DVI_D_HDCP	0x60d2
+#define DEVICE_TYPE_OPENLDI_HOTPLUG_PWR	0x6062
+#define DEVICE_TYPE_OPENLDI_DUALPIX	0x6162
+#define DEVICE_TYPE_LFP_PANELLINK	0x5012
+#define DEVICE_TYPE_LFP_CMOS_PWR	0x5042
+#define DEVICE_TYPE_LFP_LVDS_PWR	0x5062
+#define DEVICE_TYPE_LFP_LVDS_DUAL	0x5162
+#define DEVICE_TYPE_LFP_LVDS_DUAL_HDCP	0x51e2
+
+#define DEVICE_CFG_NONE		0x00
+#define DEVICE_CFG_12BIT_DVOB	0x01
+#define DEVICE_CFG_12BIT_DVOC	0x02
+#define DEVICE_CFG_24BIT_DVOBC	0x09
+#define DEVICE_CFG_24BIT_DVOCB	0x0a
+#define DEVICE_CFG_DUAL_DVOB	0x11
+#define DEVICE_CFG_DUAL_DVOC	0x12
+#define DEVICE_CFG_DUAL_DVOBC	0x13
+#define DEVICE_CFG_DUAL_LINK_DVOBC	0x19
+#define DEVICE_CFG_DUAL_LINK_DVOCB	0x1a
+
+#define DEVICE_WIRE_NONE	0x00
+#define DEVICE_WIRE_DVOB	0x01
+#define DEVICE_WIRE_DVOC	0x02
+#define DEVICE_WIRE_DVOBC	0x03
+#define DEVICE_WIRE_DVOBB	0x05
+#define DEVICE_WIRE_DVOCC	0x06
+#define DEVICE_WIRE_DVOB_MASTER 0x0d
+#define DEVICE_WIRE_DVOC_MASTER 0x0e
+
+#define DEVICE_PORT_DVOA	0x00 /* none on 845+ */
+#define DEVICE_PORT_DVOB	0x01
+#define DEVICE_PORT_DVOC	0x02
+
+/*
+ * We used to keep this struct but without any version control. We should avoid
+ * using it in the future, but it should be safe to keep using it in the old
+ * code. Do not change; we rely on its size.
+ */
+struct old_child_dev_config {
+	u16 handle;
+	u16 device_type;
+	u8  device_id[10]; /* ascii string */
+	u16 addin_offset;
+	u8  dvo_port; /* See Device_PORT_* above */
+	u8  i2c_pin;
+	u8  slave_addr;
+	u8  ddc_pin;
+	u16 edid_ptr;
+	u8  dvo_cfg; /* See DEVICE_CFG_* above */
+	u8  dvo2_port;
+	u8  i2c2_pin;
+	u8  slave2_addr;
+	u8  ddc2_pin;
+	u8  capabilities;
+	u8  dvo_wiring;/* See DEVICE_WIRE_* above */
+	u8  dvo2_wiring;
+	u16 extended_type;
+	u8  dvo_function;
+} __packed;
+
+/* This one contains field offsets that are known to be common for all BDB
+ * versions. Notice that the meaning of the contents contents may still change,
+ * but at least the offsets are consistent. */
+
+/* Definitions for flags_1 */
+#define IBOOST_ENABLE (1<<3)
+
+struct common_child_dev_config {
+	u16 handle;
+	u16 device_type;
+	u8 not_common1[12];
+	u8 dvo_port;
+	u8 not_common2[2];
+	u8 ddc_pin;
+	u16 edid_ptr;
+	u8 obsolete;
+	u8 flags_1;
+	u8 not_common3[13];
+	u8 iboost_level;
+} __packed;
+
+
+/* This field changes depending on the BDB version, so the most reliable way to
+ * read it is by checking the BDB version and reading the raw pointer. */
+union child_device_config {
+	/* This one is safe to be used anywhere, but the code should still check
+	 * the BDB version. */
+	u8 raw[33];
+	/* This one should only be kept for legacy code. */
+	struct old_child_dev_config old;
+	/* This one should also be safe to use anywhere, even without version
+	 * checks. */
+	struct common_child_dev_config common;
+} __packed;
+
+struct bdb_general_definitions {
+	/* DDC GPIO */
+	u8 crt_ddc_gmbus_pin;
+
+	/* DPMS bits */
+	u8 dpms_acpi:1;
+	u8 skip_boot_crt_detect:1;
+	u8 dpms_aim:1;
+	u8 rsvd1:5; /* finish byte */
+
+	/* boot device bits */
+	u8 boot_display[2];
+	u8 child_dev_size;
+
+	/*
+	 * Device info:
+	 * If TV is present, it'll be at devices[0].
+	 * LVDS will be next, either devices[0] or [1], if present.
+	 * On some platforms the number of device is 6. But could be as few as
+	 * 4 if both TV and LVDS are missing.
+	 * And the device num is related with the size of general definition
+	 * block. It is obtained by using the following formula:
+	 * number = (block_size - sizeof(bdb_general_definitions))/
+	 *	     defs->child_dev_size;
+	 */
+	uint8_t devices[0];
+} __packed;
+
+/* Mask for DRRS / Panel Channel / SSC / BLT control bits extraction */
+#define MODE_MASK		0x3
+
+struct bdb_lvds_options {
+	u8 panel_type;
+	u8 rsvd1;
+	/* LVDS capabilities, stored in a dword */
+	u8 pfit_mode:2;
+	u8 pfit_text_mode_enhanced:1;
+	u8 pfit_gfx_mode_enhanced:1;
+	u8 pfit_ratio_auto:1;
+	u8 pixel_dither:1;
+	u8 lvds_edid:1;
+	u8 rsvd2:1;
+	u8 rsvd4;
+	/* LVDS Panel channel bits stored here */
+	u32 lvds_panel_channel_bits;
+	/* LVDS SSC (Spread Spectrum Clock) bits stored here. */
+	u16 ssc_bits;
+	u16 ssc_freq;
+	u16 ssc_ddt;
+	/* Panel color depth defined here */
+	u16 panel_color_depth;
+	/* LVDS panel type bits stored here */
+	u32 dps_panel_type_bits;
+	/* LVDS backlight control type bits stored here */
+	u32 blt_control_type_bits;
+} __packed;
+
+/* LFP pointer table contains entries to the struct below */
+struct bdb_lvds_lfp_data_ptr {
+	u16 fp_timing_offset; /* offsets are from start of bdb */
+	u8 fp_table_size;
+	u16 dvo_timing_offset;
+	u8 dvo_table_size;
+	u16 panel_pnp_id_offset;
+	u8 pnp_table_size;
+} __packed;
+
+struct bdb_lvds_lfp_data_ptrs {
+	u8 lvds_entries; /* followed by one or more lvds_data_ptr structs */
+	struct bdb_lvds_lfp_data_ptr ptr[16];
+} __packed;
+
+/* LFP data has 3 blocks per entry */
+struct lvds_fp_timing {
+	u16 x_res;
+	u16 y_res;
+	u32 lvds_reg;
+	u32 lvds_reg_val;
+	u32 pp_on_reg;
+	u32 pp_on_reg_val;
+	u32 pp_off_reg;
+	u32 pp_off_reg_val;
+	u32 pp_cycle_reg;
+	u32 pp_cycle_reg_val;
+	u32 pfit_reg;
+	u32 pfit_reg_val;
+	u16 terminator;
+} __packed;
+
+struct lvds_dvo_timing {
+	u16 clock;		/**< In 10khz */
+	u8 hactive_lo;
+	u8 hblank_lo;
+	u8 hblank_hi:4;
+	u8 hactive_hi:4;
+	u8 vactive_lo;
+	u8 vblank_lo;
+	u8 vblank_hi:4;
+	u8 vactive_hi:4;
+	u8 hsync_off_lo;
+	u8 hsync_pulse_width;
+	u8 vsync_pulse_width:4;
+	u8 vsync_off:4;
+	u8 rsvd0:6;
+	u8 hsync_off_hi:2;
+	u8 h_image;
+	u8 v_image;
+	u8 max_hv;
+	u8 h_border;
+	u8 v_border;
+	u8 rsvd1:3;
+	u8 digital:2;
+	u8 vsync_positive:1;
+	u8 hsync_positive:1;
+	u8 rsvd2:1;
+} __packed;
+
+struct lvds_pnp_id {
+	u16 mfg_name;
+	u16 product_code;
+	u32 serial;
+	u8 mfg_week;
+	u8 mfg_year;
+} __packed;
+
+struct bdb_lvds_lfp_data_entry {
+	struct lvds_fp_timing fp_timing;
+	struct lvds_dvo_timing dvo_timing;
+	struct lvds_pnp_id pnp_id;
+} __packed;
+
+struct bdb_lvds_lfp_data {
+	struct bdb_lvds_lfp_data_entry data[16];
+} __packed;
+
+#define BDB_BACKLIGHT_TYPE_NONE	0
+#define BDB_BACKLIGHT_TYPE_PWM	2
+
+struct bdb_lfp_backlight_data_entry {
+	u8 type:2;
+	u8 active_low_pwm:1;
+	u8 obsolete1:5;
+	u16 pwm_freq_hz;
+	u8 min_brightness;
+	u8 obsolete2;
+	u8 obsolete3;
+} __packed;
+
+struct bdb_lfp_backlight_data {
+	u8 entry_size;
+	struct bdb_lfp_backlight_data_entry data[16];
+	u8 level[16];
+} __packed;
+
+struct aimdb_header {
+	char signature[16];
+	char oem_device[20];
+	u16 aimdb_version;
+	u16 aimdb_header_size;
+	u16 aimdb_size;
+} __packed;
+
+struct aimdb_block {
+	u8 aimdb_id;
+	u16 aimdb_size;
+} __packed;
+
+struct vch_panel_data {
+	u16 fp_timing_offset;
+	u8 fp_timing_size;
+	u16 dvo_timing_offset;
+	u8 dvo_timing_size;
+	u16 text_fitting_offset;
+	u8 text_fitting_size;
+	u16 graphics_fitting_offset;
+	u8 graphics_fitting_size;
+} __packed;
+
+struct vch_bdb_22 {
+	struct aimdb_block aimdb_block;
+	struct vch_panel_data panels[16];
+} __packed;
+
+struct bdb_sdvo_lvds_options {
+	u8 panel_backlight;
+	u8 h40_set_panel_type;
+	u8 panel_type;
+	u8 ssc_clk_freq;
+	u16 als_low_trip;
+	u16 als_high_trip;
+	u8 sclalarcoeff_tab_row_num;
+	u8 sclalarcoeff_tab_row_size;
+	u8 coefficient[8];
+	u8 panel_misc_bits_1;
+	u8 panel_misc_bits_2;
+	u8 panel_misc_bits_3;
+	u8 panel_misc_bits_4;
+} __packed;
+
+
+#define BDB_DRIVER_FEATURE_NO_LVDS		0
+#define BDB_DRIVER_FEATURE_INT_LVDS		1
+#define BDB_DRIVER_FEATURE_SDVO_LVDS		2
+#define BDB_DRIVER_FEATURE_EDP			3
+
+struct bdb_driver_features {
+	u8 boot_dev_algorithm:1;
+	u8 block_display_switch:1;
+	u8 allow_display_switch:1;
+	u8 hotplug_dvo:1;
+	u8 dual_view_zoom:1;
+	u8 int15h_hook:1;
+	u8 sprite_in_clone:1;
+	u8 primary_lfp_id:1;
+
+	u16 boot_mode_x;
+	u16 boot_mode_y;
+	u8 boot_mode_bpp;
+	u8 boot_mode_refresh;
+
+	u16 enable_lfp_primary:1;
+	u16 selective_mode_pruning:1;
+	u16 dual_frequency:1;
+	u16 render_clock_freq:1; /* 0: high freq; 1: low freq */
+	u16 nt_clone_support:1;
+	u16 power_scheme_ui:1; /* 0: CUI; 1: 3rd party */
+	u16 sprite_display_assign:1; /* 0: secondary; 1: primary */
+	u16 cui_aspect_scaling:1;
+	u16 preserve_aspect_ratio:1;
+	u16 sdvo_device_power_down:1;
+	u16 crt_hotplug:1;
+	u16 lvds_config:2;
+	u16 tv_hotplug:1;
+	u16 hdmi_config:2;
+
+	u8 static_display:1;
+	u8 reserved2:7;
+	u16 legacy_crt_max_x;
+	u16 legacy_crt_max_y;
+	u8 legacy_crt_max_refresh;
+
+	u8 hdmi_termination;
+	u8 custom_vbt_version;
+	/* Driver features data block */
+	u16 rmpm_enabled:1;
+	u16 s2ddt_enabled:1;
+	u16 dpst_enabled:1;
+	u16 bltclt_enabled:1;
+	u16 adb_enabled:1;
+	u16 drrs_enabled:1;
+	u16 grs_enabled:1;
+	u16 gpmt_enabled:1;
+	u16 tbt_enabled:1;
+	u16 psr_enabled:1;
+	u16 ips_enabled:1;
+	u16 reserved3:4;
+	u16 pc_feature_valid:1;
+} __packed;
+
+#define EDP_18BPP	0
+#define EDP_24BPP	1
+#define EDP_30BPP	2
+#define EDP_RATE_1_62	0
+#define EDP_RATE_2_7	1
+#define EDP_LANE_1	0
+#define EDP_LANE_2	1
+#define EDP_LANE_4	3
+#define EDP_PREEMPHASIS_NONE	0
+#define EDP_PREEMPHASIS_3_5dB	1
+#define EDP_PREEMPHASIS_6dB	2
+#define EDP_PREEMPHASIS_9_5dB	3
+#define EDP_VSWING_0_4V		0
+#define EDP_VSWING_0_6V		1
+#define EDP_VSWING_0_8V		2
+#define EDP_VSWING_1_2V		3
+
+struct edp_power_seq {
+	u16 t1_t3;
+	u16 t8;
+	u16 t9;
+	u16 t10;
+	u16 t11_t12;
+} __packed;
+
+struct edp_link_params {
+	u8 rate:4;
+	u8 lanes:4;
+	u8 preemphasis:4;
+	u8 vswing:4;
+} __packed;
+
+struct bdb_edp {
+	struct edp_power_seq power_seqs[16];
+	u32 color_depth;
+	struct edp_link_params link_params[16];
+	u32 sdrrs_msa_timing_delay;
+
+	/* ith bit indicates enabled/disabled for (i+1)th panel */
+	u16 edp_s3d_feature;
+	u16 edp_t3_optimization;
+	u64 edp_vswing_preemph;		/* v173 */
+} __packed;
+
+struct psr_table {
+	/* Feature bits */
+	u8 full_link:1;
+	u8 require_aux_to_wakeup:1;
+	u8 feature_bits_rsvd:6;
+
+	/* Wait times */
+	u8 idle_frames:4;
+	u8 lines_to_wait:3;
+	u8 wait_times_rsvd:1;
+
+	/* TP wake up time in multiple of 100 */
+	u16 tp1_wakeup_time;
+	u16 tp2_tp3_wakeup_time;
+} __packed;
+
+struct bdb_psr {
+	struct psr_table psr_table[16];
+} __packed;
+
+int intel_parse_bios(struct drm_device *dev);
+
+/*
+ * Driver<->VBIOS interaction occurs through scratch bits in
+ * GR18 & SWF*.
+ */
+
+/* GR18 bits are set on display switch and hotkey events */
+#define GR18_DRIVER_SWITCH_EN	(1<<7) /* 0: VBIOS control, 1: driver control */
+#define GR18_HOTKEY_MASK	0x78 /* See also SWF4 15:0 */
+#define   GR18_HK_NONE		(0x0<<3)
+#define   GR18_HK_LFP_STRETCH	(0x1<<3)
+#define   GR18_HK_TOGGLE_DISP	(0x2<<3)
+#define   GR18_HK_DISP_SWITCH	(0x4<<3) /* see SWF14 15:0 for what to enable */
+#define   GR18_HK_POPUP_DISABLED (0x6<<3)
+#define   GR18_HK_POPUP_ENABLED	(0x7<<3)
+#define   GR18_HK_PFIT		(0x8<<3)
+#define   GR18_HK_APM_CHANGE	(0xa<<3)
+#define   GR18_HK_MULTIPLE	(0xc<<3)
+#define GR18_USER_INT_EN	(1<<2)
+#define GR18_A0000_FLUSH_EN	(1<<1)
+#define GR18_SMM_EN		(1<<0)
+
+/* Set by driver, cleared by VBIOS */
+#define SWF00_YRES_SHIFT	16
+#define SWF00_XRES_SHIFT	0
+#define SWF00_RES_MASK		0xffff
+
+/* Set by VBIOS at boot time and driver at runtime */
+#define SWF01_TV2_FORMAT_SHIFT	8
+#define SWF01_TV1_FORMAT_SHIFT	0
+#define SWF01_TV_FORMAT_MASK	0xffff
+
+#define SWF10_VBIOS_BLC_I2C_EN	(1<<29)
+#define SWF10_GTT_OVERRIDE_EN	(1<<28)
+#define SWF10_LFP_DPMS_OVR	(1<<27) /* override DPMS on display switch */
+#define SWF10_ACTIVE_TOGGLE_LIST_MASK (7<<24)
+#define   SWF10_OLD_TOGGLE	0x0
+#define   SWF10_TOGGLE_LIST_1	0x1
+#define   SWF10_TOGGLE_LIST_2	0x2
+#define   SWF10_TOGGLE_LIST_3	0x3
+#define   SWF10_TOGGLE_LIST_4	0x4
+#define SWF10_PANNING_EN	(1<<23)
+#define SWF10_DRIVER_LOADED	(1<<22)
+#define SWF10_EXTENDED_DESKTOP	(1<<21)
+#define SWF10_EXCLUSIVE_MODE	(1<<20)
+#define SWF10_OVERLAY_EN	(1<<19)
+#define SWF10_PLANEB_HOLDOFF	(1<<18)
+#define SWF10_PLANEA_HOLDOFF	(1<<17)
+#define SWF10_VGA_HOLDOFF	(1<<16)
+#define SWF10_ACTIVE_DISP_MASK	0xffff
+#define   SWF10_PIPEB_LFP2	(1<<15)
+#define   SWF10_PIPEB_EFP2	(1<<14)
+#define   SWF10_PIPEB_TV2	(1<<13)
+#define   SWF10_PIPEB_CRT2	(1<<12)
+#define   SWF10_PIPEB_LFP	(1<<11)
+#define   SWF10_PIPEB_EFP	(1<<10)
+#define   SWF10_PIPEB_TV	(1<<9)
+#define   SWF10_PIPEB_CRT	(1<<8)
+#define   SWF10_PIPEA_LFP2	(1<<7)
+#define   SWF10_PIPEA_EFP2	(1<<6)
+#define   SWF10_PIPEA_TV2	(1<<5)
+#define   SWF10_PIPEA_CRT2	(1<<4)
+#define   SWF10_PIPEA_LFP	(1<<3)
+#define   SWF10_PIPEA_EFP	(1<<2)
+#define   SWF10_PIPEA_TV	(1<<1)
+#define   SWF10_PIPEA_CRT	(1<<0)
+
+#define SWF11_MEMORY_SIZE_SHIFT	16
+#define SWF11_SV_TEST_EN	(1<<15)
+#define SWF11_IS_AGP		(1<<14)
+#define SWF11_DISPLAY_HOLDOFF	(1<<13)
+#define SWF11_DPMS_REDUCED	(1<<12)
+#define SWF11_IS_VBE_MODE	(1<<11)
+#define SWF11_PIPEB_ACCESS	(1<<10) /* 0 here means pipe a */
+#define SWF11_DPMS_MASK		0x07
+#define   SWF11_DPMS_OFF	(1<<2)
+#define   SWF11_DPMS_SUSPEND	(1<<1)
+#define   SWF11_DPMS_STANDBY	(1<<0)
+#define   SWF11_DPMS_ON		0
+
+#define SWF14_GFX_PFIT_EN	(1<<31)
+#define SWF14_TEXT_PFIT_EN	(1<<30)
+#define SWF14_LID_STATUS_CLOSED	(1<<29) /* 0 here means open */
+#define SWF14_POPUP_EN		(1<<28)
+#define SWF14_DISPLAY_HOLDOFF	(1<<27)
+#define SWF14_DISP_DETECT_EN	(1<<26)
+#define SWF14_DOCKING_STATUS_DOCKED (1<<25) /* 0 here means undocked */
+#define SWF14_DRIVER_STATUS	(1<<24)
+#define SWF14_OS_TYPE_WIN9X	(1<<23)
+#define SWF14_OS_TYPE_WINNT	(1<<22)
+/* 21:19 rsvd */
+#define SWF14_PM_TYPE_MASK	0x00070000
+#define   SWF14_PM_ACPI_VIDEO	(0x4 << 16)
+#define   SWF14_PM_ACPI		(0x3 << 16)
+#define   SWF14_PM_APM_12	(0x2 << 16)
+#define   SWF14_PM_APM_11	(0x1 << 16)
+#define SWF14_HK_REQUEST_MASK	0x0000ffff /* see GR18 6:3 for event type */
+          /* if GR18 indicates a display switch */
+#define   SWF14_DS_PIPEB_LFP2_EN (1<<15)
+#define   SWF14_DS_PIPEB_EFP2_EN (1<<14)
+#define   SWF14_DS_PIPEB_TV2_EN  (1<<13)
+#define   SWF14_DS_PIPEB_CRT2_EN (1<<12)
+#define   SWF14_DS_PIPEB_LFP_EN  (1<<11)
+#define   SWF14_DS_PIPEB_EFP_EN  (1<<10)
+#define   SWF14_DS_PIPEB_TV_EN   (1<<9)
+#define   SWF14_DS_PIPEB_CRT_EN  (1<<8)
+#define   SWF14_DS_PIPEA_LFP2_EN (1<<7)
+#define   SWF14_DS_PIPEA_EFP2_EN (1<<6)
+#define   SWF14_DS_PIPEA_TV2_EN  (1<<5)
+#define   SWF14_DS_PIPEA_CRT2_EN (1<<4)
+#define   SWF14_DS_PIPEA_LFP_EN  (1<<3)
+#define   SWF14_DS_PIPEA_EFP_EN  (1<<2)
+#define   SWF14_DS_PIPEA_TV_EN   (1<<1)
+#define   SWF14_DS_PIPEA_CRT_EN  (1<<0)
+          /* if GR18 indicates a panel fitting request */
+#define   SWF14_PFIT_EN		(1<<0) /* 0 means disable */
+          /* if GR18 indicates an APM change request */
+#define   SWF14_APM_HIBERNATE	0x4
+#define   SWF14_APM_SUSPEND	0x3
+#define   SWF14_APM_STANDBY	0x1
+#define   SWF14_APM_RESTORE	0x0
+
+/* Add the device class for LFP, TV, HDMI */
+#define	 DEVICE_TYPE_INT_LFP	0x1022
+#define	 DEVICE_TYPE_INT_TV	0x1009
+#define	 DEVICE_TYPE_HDMI	0x60D2
+#define	 DEVICE_TYPE_DP		0x68C6
+#define	 DEVICE_TYPE_eDP	0x78C6
+
+#define  DEVICE_TYPE_CLASS_EXTENSION	(1 << 15)
+#define  DEVICE_TYPE_POWER_MANAGEMENT	(1 << 14)
+#define  DEVICE_TYPE_HOTPLUG_SIGNALING	(1 << 13)
+#define  DEVICE_TYPE_INTERNAL_CONNECTOR	(1 << 12)
+#define  DEVICE_TYPE_NOT_HDMI_OUTPUT	(1 << 11)
+#define  DEVICE_TYPE_MIPI_OUTPUT	(1 << 10)
+#define  DEVICE_TYPE_COMPOSITE_OUTPUT	(1 << 9)
+#define  DEVICE_TYPE_DUAL_CHANNEL	(1 << 8)
+#define  DEVICE_TYPE_HIGH_SPEED_LINK	(1 << 6)
+#define  DEVICE_TYPE_LVDS_SINGALING	(1 << 5)
+#define  DEVICE_TYPE_TMDS_DVI_SIGNALING	(1 << 4)
+#define  DEVICE_TYPE_VIDEO_SIGNALING	(1 << 3)
+#define  DEVICE_TYPE_DISPLAYPORT_OUTPUT	(1 << 2)
+#define  DEVICE_TYPE_DIGITAL_OUTPUT	(1 << 1)
+#define  DEVICE_TYPE_ANALOG_OUTPUT	(1 << 0)
+
+/*
+ * Bits we care about when checking for DEVICE_TYPE_eDP
+ * Depending on the system, the other bits may or may not
+ * be set for eDP outputs.
+ */
+#define DEVICE_TYPE_eDP_BITS \
+	(DEVICE_TYPE_INTERNAL_CONNECTOR | \
+	 DEVICE_TYPE_MIPI_OUTPUT | \
+	 DEVICE_TYPE_COMPOSITE_OUTPUT | \
+	 DEVICE_TYPE_DUAL_CHANNEL | \
+	 DEVICE_TYPE_LVDS_SINGALING | \
+	 DEVICE_TYPE_TMDS_DVI_SIGNALING | \
+	 DEVICE_TYPE_VIDEO_SIGNALING | \
+	 DEVICE_TYPE_DISPLAYPORT_OUTPUT | \
+	 DEVICE_TYPE_ANALOG_OUTPUT)
+
+/* define the DVO port for HDMI output type */
+#define		DVO_B		1
+#define		DVO_C		2
+#define		DVO_D		3
+
+/* Possible values for the "DVO Port" field for versions >= 155: */
+#define DVO_PORT_HDMIA	0
+#define DVO_PORT_HDMIB	1
+#define DVO_PORT_HDMIC	2
+#define DVO_PORT_HDMID	3
+#define DVO_PORT_LVDS	4
+#define DVO_PORT_TV	5
+#define DVO_PORT_CRT	6
+#define DVO_PORT_DPB	7
+#define DVO_PORT_DPC	8
+#define DVO_PORT_DPD	9
+#define DVO_PORT_DPA	10
+#define DVO_PORT_DPE	11
+#define DVO_PORT_HDMIE	12
+#define DVO_PORT_MIPIA	21
+#define DVO_PORT_MIPIB	22
+#define DVO_PORT_MIPIC	23
+#define DVO_PORT_MIPID	24
+
+/* Block 52 contains MIPI Panel info
+ * 6 such enteries will there. Index into correct
+ * entery is based on the panel_index in #40 LFP
+ */
+#define MAX_MIPI_CONFIGURATIONS	6
+
+#define MIPI_DSI_UNDEFINED_PANEL_ID	0
+#define MIPI_DSI_GENERIC_PANEL_ID	1
+
+/*
+ * PMIC vs SoC Backlight support specified in pwm_blc
+ * field in mipi_config block below.
+*/
+#define PPS_BLC_PMIC   0
+#define PPS_BLC_SOC    1
+
+struct mipi_config {
+	u16 panel_id;
+
+	/* General Params */
+	u32 enable_dithering:1;
+	u32 rsvd1:1;
+	u32 is_bridge:1;
+
+	u32 panel_arch_type:2;
+	u32 is_cmd_mode:1;
+
+#define NON_BURST_SYNC_PULSE	0x1
+#define NON_BURST_SYNC_EVENTS	0x2
+#define BURST_MODE		0x3
+	u32 video_transfer_mode:2;
+
+	u32 cabc_supported:1;
+	u32 pwm_blc:1;
+
+	/* Bit 13:10 */
+#define PIXEL_FORMAT_RGB565			0x1
+#define PIXEL_FORMAT_RGB666			0x2
+#define PIXEL_FORMAT_RGB666_LOOSELY_PACKED	0x3
+#define PIXEL_FORMAT_RGB888			0x4
+	u32 videomode_color_format:4;
+
+	/* Bit 15:14 */
+#define ENABLE_ROTATION_0	0x0
+#define ENABLE_ROTATION_90	0x1
+#define ENABLE_ROTATION_180	0x2
+#define ENABLE_ROTATION_270	0x3
+	u32 rotation:2;
+	u32 bta_enabled:1;
+	u32 rsvd2:15;
+
+	/* 2 byte Port Description */
+#define DUAL_LINK_NOT_SUPPORTED	0
+#define DUAL_LINK_FRONT_BACK	1
+#define DUAL_LINK_PIXEL_ALT	2
+	u16 dual_link:2;
+	u16 lane_cnt:2;
+	u16 pixel_overlap:3;
+	u16 rsvd3:9;
+
+	u16 rsvd4;
+
+	u8 rsvd5;
+	u32 target_burst_mode_freq;
+	u32 dsi_ddr_clk;
+	u32 bridge_ref_clk;
+
+#define  BYTE_CLK_SEL_20MHZ		0
+#define  BYTE_CLK_SEL_10MHZ		1
+#define  BYTE_CLK_SEL_5MHZ		2
+	u8 byte_clk_sel:2;
+
+	u8 rsvd6:6;
+
+	/* DPHY Flags */
+	u16 dphy_param_valid:1;
+	u16 eot_pkt_disabled:1;
+	u16 enable_clk_stop:1;
+	u16 rsvd7:13;
+
+	u32 hs_tx_timeout;
+	u32 lp_rx_timeout;
+	u32 turn_around_timeout;
+	u32 device_reset_timer;
+	u32 master_init_timer;
+	u32 dbi_bw_timer;
+	u32 lp_byte_clk_val;
+
+	/*  4 byte Dphy Params */
+	u32 prepare_cnt:6;
+	u32 rsvd8:2;
+	u32 clk_zero_cnt:8;
+	u32 trail_cnt:5;
+	u32 rsvd9:3;
+	u32 exit_zero_cnt:6;
+	u32 rsvd10:2;
+
+	u32 clk_lane_switch_cnt;
+	u32 hl_switch_cnt;
+
+	u32 rsvd11[6];
+
+	/* timings based on dphy spec */
+	u8 tclk_miss;
+	u8 tclk_post;
+	u8 rsvd12;
+	u8 tclk_pre;
+	u8 tclk_prepare;
+	u8 tclk_settle;
+	u8 tclk_term_enable;
+	u8 tclk_trail;
+	u16 tclk_prepare_clkzero;
+	u8 rsvd13;
+	u8 td_term_enable;
+	u8 teot;
+	u8 ths_exit;
+	u8 ths_prepare;
+	u16 ths_prepare_hszero;
+	u8 rsvd14;
+	u8 ths_settle;
+	u8 ths_skip;
+	u8 ths_trail;
+	u8 tinit;
+	u8 tlpx;
+	u8 rsvd15[3];
+
+	/* GPIOs */
+	u8 panel_enable;
+	u8 bl_enable;
+	u8 pwm_enable;
+	u8 reset_r_n;
+	u8 pwr_down_r;
+	u8 stdby_r_n;
+
+} __packed;
+
+/* Block 52 contains MIPI configuration block
+ * 6 * bdb_mipi_config, followed by 6 pps data
+ * block below
+ *
+ * all delays has a unit of 100us
+ */
+struct mipi_pps_data {
+	u16 panel_on_delay;
+	u16 bl_enable_delay;
+	u16 bl_disable_delay;
+	u16 panel_off_delay;
+	u16 panel_power_cycle_delay;
+} __packed;
+
+struct bdb_mipi_config {
+	struct mipi_config config[MAX_MIPI_CONFIGURATIONS];
+	struct mipi_pps_data pps[MAX_MIPI_CONFIGURATIONS];
+} __packed;
+
+/* Block 53 contains MIPI sequences as needed by the panel
+ * for enabling it. This block can be variable in size and
+ * can be maximum of 6 blocks
+ */
+struct bdb_mipi_sequence {
+	u8 version;
+	u8 data[0];
+} __packed;
+
+/* MIPI Sequnece Block definitions */
+enum mipi_seq {
+	MIPI_SEQ_UNDEFINED = 0,
+	MIPI_SEQ_ASSERT_RESET,
+	MIPI_SEQ_INIT_OTP,
+	MIPI_SEQ_DISPLAY_ON,
+	MIPI_SEQ_DISPLAY_OFF,
+	MIPI_SEQ_DEASSERT_RESET,
+	MIPI_SEQ_MAX
+};
+
+enum mipi_seq_element {
+	MIPI_SEQ_ELEM_UNDEFINED = 0,
+	MIPI_SEQ_ELEM_SEND_PKT,
+	MIPI_SEQ_ELEM_DELAY,
+	MIPI_SEQ_ELEM_GPIO,
+	MIPI_SEQ_ELEM_STATUS,
+	MIPI_SEQ_ELEM_MAX
+};
+
+enum mipi_gpio_pin_index {
+	MIPI_GPIO_UNDEFINED = 0,
+	MIPI_GPIO_PANEL_ENABLE,
+	MIPI_GPIO_BL_ENABLE,
+	MIPI_GPIO_PWM_ENABLE,
+	MIPI_GPIO_RESET_N,
+	MIPI_GPIO_PWR_DOWN_R,
+	MIPI_GPIO_STDBY_RST_N,
+	MIPI_GPIO_MAX
+};
+
+#endif /* _I830_BIOS_H_ */
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
new file mode 100644
index 0000000..63fea6a
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -0,0 +1,881 @@
+/*
+ * Copyright © 2006-2007 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *	Eric Anholt <eric@anholt.net>
+ */
+
+#include <linux/dmi.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_edid.h>
+#include "intel_drv.h"
+#include <drm/i915_drm.h>
+#include "i915_drv.h"
+
+/* Here's the desired hotplug mode */
+#define ADPA_HOTPLUG_BITS (ADPA_CRT_HOTPLUG_PERIOD_128 |		\
+			   ADPA_CRT_HOTPLUG_WARMUP_10MS |		\
+			   ADPA_CRT_HOTPLUG_SAMPLE_4S |			\
+			   ADPA_CRT_HOTPLUG_VOLTAGE_50 |		\
+			   ADPA_CRT_HOTPLUG_VOLREF_325MV |		\
+			   ADPA_CRT_HOTPLUG_ENABLE)
+
+struct intel_crt {
+	struct intel_encoder base;
+	/* DPMS state is stored in the connector, which we need in the
+	 * encoder's enable/disable callbacks */
+	struct intel_connector *connector;
+	bool force_hotplug_required;
+	u32 adpa_reg;
+};
+
+static struct intel_crt *intel_encoder_to_crt(struct intel_encoder *encoder)
+{
+	return container_of(encoder, struct intel_crt, base);
+}
+
+static struct intel_crt *intel_attached_crt(struct drm_connector *connector)
+{
+	return intel_encoder_to_crt(intel_attached_encoder(connector));
+}
+
+static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
+				   enum pipe *pipe)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crt *crt = intel_encoder_to_crt(encoder);
+	enum intel_display_power_domain power_domain;
+	u32 tmp;
+
+	power_domain = intel_display_port_power_domain(encoder);
+	if (!intel_display_power_is_enabled(dev_priv, power_domain))
+		return false;
+
+	tmp = I915_READ(crt->adpa_reg);
+
+	if (!(tmp & ADPA_DAC_ENABLE))
+		return false;
+
+	if (HAS_PCH_CPT(dev))
+		*pipe = PORT_TO_PIPE_CPT(tmp);
+	else
+		*pipe = PORT_TO_PIPE(tmp);
+
+	return true;
+}
+
+static unsigned int intel_crt_get_flags(struct intel_encoder *encoder)
+{
+	struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+	struct intel_crt *crt = intel_encoder_to_crt(encoder);
+	u32 tmp, flags = 0;
+
+	tmp = I915_READ(crt->adpa_reg);
+
+	if (tmp & ADPA_HSYNC_ACTIVE_HIGH)
+		flags |= DRM_MODE_FLAG_PHSYNC;
+	else
+		flags |= DRM_MODE_FLAG_NHSYNC;
+
+	if (tmp & ADPA_VSYNC_ACTIVE_HIGH)
+		flags |= DRM_MODE_FLAG_PVSYNC;
+	else
+		flags |= DRM_MODE_FLAG_NVSYNC;
+
+	return flags;
+}
+
+static void intel_crt_get_config(struct intel_encoder *encoder,
+				 struct intel_crtc_state *pipe_config)
+{
+	struct drm_device *dev = encoder->base.dev;
+	int dotclock;
+
+	pipe_config->base.adjusted_mode.flags |= intel_crt_get_flags(encoder);
+
+	dotclock = pipe_config->port_clock;
+
+	if (HAS_PCH_SPLIT(dev))
+		ironlake_check_encoder_dotclock(pipe_config, dotclock);
+
+	pipe_config->base.adjusted_mode.crtc_clock = dotclock;
+}
+
+static void hsw_crt_get_config(struct intel_encoder *encoder,
+			       struct intel_crtc_state *pipe_config)
+{
+	intel_ddi_get_config(encoder, pipe_config);
+
+	pipe_config->base.adjusted_mode.flags &= ~(DRM_MODE_FLAG_PHSYNC |
+					      DRM_MODE_FLAG_NHSYNC |
+					      DRM_MODE_FLAG_PVSYNC |
+					      DRM_MODE_FLAG_NVSYNC);
+	pipe_config->base.adjusted_mode.flags |= intel_crt_get_flags(encoder);
+}
+
+/* Note: The caller is required to filter out dpms modes not supported by the
+ * platform. */
+static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crt *crt = intel_encoder_to_crt(encoder);
+	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+	const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
+	u32 adpa;
+
+	if (INTEL_INFO(dev)->gen >= 5)
+		adpa = ADPA_HOTPLUG_BITS;
+	else
+		adpa = 0;
+
+	if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+		adpa |= ADPA_HSYNC_ACTIVE_HIGH;
+	if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+		adpa |= ADPA_VSYNC_ACTIVE_HIGH;
+
+	/* For CPT allow 3 pipe config, for others just use A or B */
+	if (HAS_PCH_LPT(dev))
+		; /* Those bits don't exist here */
+	else if (HAS_PCH_CPT(dev))
+		adpa |= PORT_TRANS_SEL_CPT(crtc->pipe);
+	else if (crtc->pipe == 0)
+		adpa |= ADPA_PIPE_A_SELECT;
+	else
+		adpa |= ADPA_PIPE_B_SELECT;
+
+	if (!HAS_PCH_SPLIT(dev))
+		I915_WRITE(BCLRPAT(crtc->pipe), 0);
+
+	switch (mode) {
+	case DRM_MODE_DPMS_ON:
+		adpa |= ADPA_DAC_ENABLE;
+		break;
+	case DRM_MODE_DPMS_STANDBY:
+		adpa |= ADPA_DAC_ENABLE | ADPA_HSYNC_CNTL_DISABLE;
+		break;
+	case DRM_MODE_DPMS_SUSPEND:
+		adpa |= ADPA_DAC_ENABLE | ADPA_VSYNC_CNTL_DISABLE;
+		break;
+	case DRM_MODE_DPMS_OFF:
+		adpa |= ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE;
+		break;
+	}
+
+	I915_WRITE(crt->adpa_reg, adpa);
+}
+
+static void intel_disable_crt(struct intel_encoder *encoder)
+{
+	intel_crt_set_dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+
+static void pch_disable_crt(struct intel_encoder *encoder)
+{
+}
+
+static void pch_post_disable_crt(struct intel_encoder *encoder)
+{
+	intel_disable_crt(encoder);
+}
+
+static void intel_enable_crt(struct intel_encoder *encoder)
+{
+	struct intel_crt *crt = intel_encoder_to_crt(encoder);
+
+	intel_crt_set_dpms(encoder, crt->connector->base.dpms);
+}
+
+static enum drm_mode_status
+intel_crt_mode_valid(struct drm_connector *connector,
+		     struct drm_display_mode *mode)
+{
+	struct drm_device *dev = connector->dev;
+
+	int max_clock = 0;
+	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+		return MODE_NO_DBLESCAN;
+
+	if (mode->clock < 25000)
+		return MODE_CLOCK_LOW;
+
+	if (IS_GEN2(dev))
+		max_clock = 350000;
+	else
+		max_clock = 400000;
+	if (mode->clock > max_clock)
+		return MODE_CLOCK_HIGH;
+
+	/* The FDI receiver on LPT only supports 8bpc and only has 2 lanes. */
+	if (HAS_PCH_LPT(dev) &&
+	    (ironlake_get_lanes_required(mode->clock, 270000, 24) > 2))
+		return MODE_CLOCK_HIGH;
+
+	return MODE_OK;
+}
+
+static bool intel_crt_compute_config(struct intel_encoder *encoder,
+				     struct intel_crtc_state *pipe_config)
+{
+	struct drm_device *dev = encoder->base.dev;
+
+	if (HAS_PCH_SPLIT(dev))
+		pipe_config->has_pch_encoder = true;
+
+	/* LPT FDI RX only supports 8bpc. */
+	if (HAS_PCH_LPT(dev)) {
+		if (pipe_config->bw_constrained && pipe_config->pipe_bpp < 24) {
+			DRM_DEBUG_KMS("LPT only supports 24bpp\n");
+			return false;
+		}
+
+		pipe_config->pipe_bpp = 24;
+	}
+
+	/* FDI must always be 2.7 GHz */
+	if (HAS_DDI(dev)) {
+		pipe_config->ddi_pll_sel = PORT_CLK_SEL_SPLL;
+		pipe_config->port_clock = 135000 * 2;
+
+		pipe_config->dpll_hw_state.wrpll = 0;
+		pipe_config->dpll_hw_state.spll =
+			SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz | SPLL_PLL_SSC;
+	}
+
+	return true;
+}
+
+static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+	struct intel_crt *crt = intel_attached_crt(connector);
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 adpa;
+	bool ret;
+
+	/* The first time through, trigger an explicit detection cycle */
+	if (crt->force_hotplug_required) {
+		bool turn_off_dac = HAS_PCH_SPLIT(dev);
+		u32 save_adpa;
+
+		crt->force_hotplug_required = 0;
+
+		save_adpa = adpa = I915_READ(crt->adpa_reg);
+		DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa);
+
+		adpa |= ADPA_CRT_HOTPLUG_FORCE_TRIGGER;
+		if (turn_off_dac)
+			adpa &= ~ADPA_DAC_ENABLE;
+
+		I915_WRITE(crt->adpa_reg, adpa);
+
+		if (wait_for((I915_READ(crt->adpa_reg) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
+			     1000))
+			DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
+
+		if (turn_off_dac) {
+			I915_WRITE(crt->adpa_reg, save_adpa);
+			POSTING_READ(crt->adpa_reg);
+		}
+	}
+
+	/* Check the status to see if both blue and green are on now */
+	adpa = I915_READ(crt->adpa_reg);
+	if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) != 0)
+		ret = true;
+	else
+		ret = false;
+	DRM_DEBUG_KMS("ironlake hotplug adpa=0x%x, result %d\n", adpa, ret);
+
+	return ret;
+}
+
+static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+	struct intel_crt *crt = intel_attached_crt(connector);
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 adpa;
+	bool ret;
+	u32 save_adpa;
+
+	save_adpa = adpa = I915_READ(crt->adpa_reg);
+	DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa);
+
+	adpa |= ADPA_CRT_HOTPLUG_FORCE_TRIGGER;
+
+	I915_WRITE(crt->adpa_reg, adpa);
+
+	if (wait_for((I915_READ(crt->adpa_reg) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
+		     1000)) {
+		DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
+		I915_WRITE(crt->adpa_reg, save_adpa);
+	}
+
+	/* Check the status to see if both blue and green are on now */
+	adpa = I915_READ(crt->adpa_reg);
+	if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) != 0)
+		ret = true;
+	else
+		ret = false;
+
+	DRM_DEBUG_KMS("valleyview hotplug adpa=0x%x, result %d\n", adpa, ret);
+
+	return ret;
+}
+
+/**
+ * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect CRT presence.
+ *
+ * Not for i915G/i915GM
+ *
+ * \return true if CRT is connected.
+ * \return false if CRT is disconnected.
+ */
+static bool intel_crt_detect_hotplug(struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 stat;
+	bool ret = false;
+	int i, tries = 0;
+
+	if (HAS_PCH_SPLIT(dev))
+		return intel_ironlake_crt_detect_hotplug(connector);
+
+	if (IS_VALLEYVIEW(dev))
+		return valleyview_crt_detect_hotplug(connector);
+
+	/*
+	 * On 4 series desktop, CRT detect sequence need to be done twice
+	 * to get a reliable result.
+	 */
+
+	if (IS_G4X(dev) && !IS_GM45(dev))
+		tries = 2;
+	else
+		tries = 1;
+
+	for (i = 0; i < tries ; i++) {
+		/* turn on the FORCE_DETECT */
+		i915_hotplug_interrupt_update(dev_priv,
+					      CRT_HOTPLUG_FORCE_DETECT,
+					      CRT_HOTPLUG_FORCE_DETECT);
+		/* wait for FORCE_DETECT to go off */
+		if (wait_for((I915_READ(PORT_HOTPLUG_EN) &
+			      CRT_HOTPLUG_FORCE_DETECT) == 0,
+			     1000))
+			DRM_DEBUG_KMS("timed out waiting for FORCE_DETECT to go off");
+	}
+
+	stat = I915_READ(PORT_HOTPLUG_STAT);
+	if ((stat & CRT_HOTPLUG_MONITOR_MASK) != CRT_HOTPLUG_MONITOR_NONE)
+		ret = true;
+
+	/* clear the interrupt we just generated, if any */
+	I915_WRITE(PORT_HOTPLUG_STAT, CRT_HOTPLUG_INT_STATUS);
+
+	i915_hotplug_interrupt_update(dev_priv, CRT_HOTPLUG_FORCE_DETECT, 0);
+
+	return ret;
+}
+
+static struct edid *intel_crt_get_edid(struct drm_connector *connector,
+				struct i2c_adapter *i2c)
+{
+	struct edid *edid;
+
+	edid = drm_get_edid(connector, i2c);
+
+	if (!edid && !intel_gmbus_is_forced_bit(i2c)) {
+		DRM_DEBUG_KMS("CRT GMBUS EDID read failed, retry using GPIO bit-banging\n");
+		intel_gmbus_force_bit(i2c, true);
+		edid = drm_get_edid(connector, i2c);
+		intel_gmbus_force_bit(i2c, false);
+	}
+
+	return edid;
+}
+
+/* local version of intel_ddc_get_modes() to use intel_crt_get_edid() */
+static int intel_crt_ddc_get_modes(struct drm_connector *connector,
+				struct i2c_adapter *adapter)
+{
+	struct edid *edid;
+	int ret;
+
+	edid = intel_crt_get_edid(connector, adapter);
+	if (!edid)
+		return 0;
+
+	ret = intel_connector_update_modes(connector, edid);
+	kfree(edid);
+
+	return ret;
+}
+
+static bool intel_crt_detect_ddc(struct drm_connector *connector)
+{
+	struct intel_crt *crt = intel_attached_crt(connector);
+	struct drm_i915_private *dev_priv = crt->base.base.dev->dev_private;
+	struct edid *edid;
+	struct i2c_adapter *i2c;
+	bool ret = false;
+
+	BUG_ON(crt->base.type != INTEL_OUTPUT_ANALOG);
+
+	i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->vbt.crt_ddc_pin);
+	edid = intel_crt_get_edid(connector, i2c);
+
+	if (edid) {
+		bool is_digital = edid->input & DRM_EDID_INPUT_DIGITAL;
+
+		/*
+		 * This may be a DVI-I connector with a shared DDC
+		 * link between analog and digital outputs, so we
+		 * have to check the EDID input spec of the attached device.
+		 */
+		if (!is_digital) {
+			DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n");
+			ret = true;
+		} else {
+			DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n");
+		}
+	} else {
+		DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [no valid EDID found]\n");
+	}
+
+	kfree(edid);
+
+	return ret;
+}
+
+static enum drm_connector_status
+intel_crt_load_detect(struct intel_crt *crt)
+{
+	struct drm_device *dev = crt->base.base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t pipe = to_intel_crtc(crt->base.base.crtc)->pipe;
+	uint32_t save_bclrpat;
+	uint32_t save_vtotal;
+	uint32_t vtotal, vactive;
+	uint32_t vsample;
+	uint32_t vblank, vblank_start, vblank_end;
+	uint32_t dsl;
+	uint32_t bclrpat_reg;
+	uint32_t vtotal_reg;
+	uint32_t vblank_reg;
+	uint32_t vsync_reg;
+	uint32_t pipeconf_reg;
+	uint32_t pipe_dsl_reg;
+	uint8_t	st00;
+	enum drm_connector_status status;
+
+	DRM_DEBUG_KMS("starting load-detect on CRT\n");
+
+	bclrpat_reg = BCLRPAT(pipe);
+	vtotal_reg = VTOTAL(pipe);
+	vblank_reg = VBLANK(pipe);
+	vsync_reg = VSYNC(pipe);
+	pipeconf_reg = PIPECONF(pipe);
+	pipe_dsl_reg = PIPEDSL(pipe);
+
+	save_bclrpat = I915_READ(bclrpat_reg);
+	save_vtotal = I915_READ(vtotal_reg);
+	vblank = I915_READ(vblank_reg);
+
+	vtotal = ((save_vtotal >> 16) & 0xfff) + 1;
+	vactive = (save_vtotal & 0x7ff) + 1;
+
+	vblank_start = (vblank & 0xfff) + 1;
+	vblank_end = ((vblank >> 16) & 0xfff) + 1;
+
+	/* Set the border color to purple. */
+	I915_WRITE(bclrpat_reg, 0x500050);
+
+	if (!IS_GEN2(dev)) {
+		uint32_t pipeconf = I915_READ(pipeconf_reg);
+		I915_WRITE(pipeconf_reg, pipeconf | PIPECONF_FORCE_BORDER);
+		POSTING_READ(pipeconf_reg);
+		/* Wait for next Vblank to substitue
+		 * border color for Color info */
+		intel_wait_for_vblank(dev, pipe);
+		st00 = I915_READ8(VGA_MSR_WRITE);
+		status = ((st00 & (1 << 4)) != 0) ?
+			connector_status_connected :
+			connector_status_disconnected;
+
+		I915_WRITE(pipeconf_reg, pipeconf);
+	} else {
+		bool restore_vblank = false;
+		int count, detect;
+
+		/*
+		* If there isn't any border, add some.
+		* Yes, this will flicker
+		*/
+		if (vblank_start <= vactive && vblank_end >= vtotal) {
+			uint32_t vsync = I915_READ(vsync_reg);
+			uint32_t vsync_start = (vsync & 0xffff) + 1;
+
+			vblank_start = vsync_start;
+			I915_WRITE(vblank_reg,
+				   (vblank_start - 1) |
+				   ((vblank_end - 1) << 16));
+			restore_vblank = true;
+		}
+		/* sample in the vertical border, selecting the larger one */
+		if (vblank_start - vactive >= vtotal - vblank_end)
+			vsample = (vblank_start + vactive) >> 1;
+		else
+			vsample = (vtotal + vblank_end) >> 1;
+
+		/*
+		 * Wait for the border to be displayed
+		 */
+		while (I915_READ(pipe_dsl_reg) >= vactive)
+			;
+		while ((dsl = I915_READ(pipe_dsl_reg)) <= vsample)
+			;
+		/*
+		 * Watch ST00 for an entire scanline
+		 */
+		detect = 0;
+		count = 0;
+		do {
+			count++;
+			/* Read the ST00 VGA status register */
+			st00 = I915_READ8(VGA_MSR_WRITE);
+			if (st00 & (1 << 4))
+				detect++;
+		} while ((I915_READ(pipe_dsl_reg) == dsl));
+
+		/* restore vblank if necessary */
+		if (restore_vblank)
+			I915_WRITE(vblank_reg, vblank);
+		/*
+		 * If more than 3/4 of the scanline detected a monitor,
+		 * then it is assumed to be present. This works even on i830,
+		 * where there isn't any way to force the border color across
+		 * the screen
+		 */
+		status = detect * 4 > count * 3 ?
+			 connector_status_connected :
+			 connector_status_disconnected;
+	}
+
+	/* Restore previous settings */
+	I915_WRITE(bclrpat_reg, save_bclrpat);
+
+	return status;
+}
+
+static enum drm_connector_status
+intel_crt_detect(struct drm_connector *connector, bool force)
+{
+	struct drm_device *dev = connector->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crt *crt = intel_attached_crt(connector);
+	struct intel_encoder *intel_encoder = &crt->base;
+	enum intel_display_power_domain power_domain;
+	enum drm_connector_status status;
+	struct intel_load_detect_pipe tmp;
+	struct drm_modeset_acquire_ctx ctx;
+
+	DRM_DEBUG_KMS("[CONNECTOR:%d:%s] force=%d\n",
+		      connector->base.id, connector->name,
+		      force);
+
+	power_domain = intel_display_port_power_domain(intel_encoder);
+	intel_display_power_get(dev_priv, power_domain);
+
+	if (I915_HAS_HOTPLUG(dev)) {
+		/* We can not rely on the HPD pin always being correctly wired
+		 * up, for example many KVM do not pass it through, and so
+		 * only trust an assertion that the monitor is connected.
+		 */
+		if (intel_crt_detect_hotplug(connector)) {
+			DRM_DEBUG_KMS("CRT detected via hotplug\n");
+			status = connector_status_connected;
+			goto out;
+		} else
+			DRM_DEBUG_KMS("CRT not detected via hotplug\n");
+	}
+
+	if (intel_crt_detect_ddc(connector)) {
+		status = connector_status_connected;
+		goto out;
+	}
+
+	/* Load detection is broken on HPD capable machines. Whoever wants a
+	 * broken monitor (without edid) to work behind a broken kvm (that fails
+	 * to have the right resistors for HP detection) needs to fix this up.
+	 * For now just bail out. */
+	if (I915_HAS_HOTPLUG(dev) && !i915.load_detect_test) {
+		status = connector_status_disconnected;
+		goto out;
+	}
+
+	if (!force) {
+		status = connector->status;
+		goto out;
+	}
+
+	drm_modeset_acquire_init(&ctx, 0);
+
+	/* for pre-945g platforms use load detect */
+	if (intel_get_load_detect_pipe(connector, NULL, &tmp, &ctx)) {
+		if (intel_crt_detect_ddc(connector))
+			status = connector_status_connected;
+		else if (INTEL_INFO(dev)->gen < 4)
+			status = intel_crt_load_detect(crt);
+		else
+			status = connector_status_unknown;
+		intel_release_load_detect_pipe(connector, &tmp, &ctx);
+	} else
+		status = connector_status_unknown;
+
+	drm_modeset_drop_locks(&ctx);
+	drm_modeset_acquire_fini(&ctx);
+
+out:
+	intel_display_power_put(dev_priv, power_domain);
+	return status;
+}
+
+static void intel_crt_destroy(struct drm_connector *connector)
+{
+	drm_connector_cleanup(connector);
+	kfree(connector);
+}
+
+static int intel_crt_get_modes(struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crt *crt = intel_attached_crt(connector);
+	struct intel_encoder *intel_encoder = &crt->base;
+	enum intel_display_power_domain power_domain;
+	int ret;
+	struct i2c_adapter *i2c;
+
+	power_domain = intel_display_port_power_domain(intel_encoder);
+	intel_display_power_get(dev_priv, power_domain);
+
+	i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->vbt.crt_ddc_pin);
+	ret = intel_crt_ddc_get_modes(connector, i2c);
+	if (ret || !IS_G4X(dev))
+		goto out;
+
+	/* Try to probe digital port for output in DVI-I -> VGA mode. */
+	i2c = intel_gmbus_get_adapter(dev_priv, GMBUS_PIN_DPB);
+	ret = intel_crt_ddc_get_modes(connector, i2c);
+
+out:
+	intel_display_power_put(dev_priv, power_domain);
+
+	return ret;
+}
+
+static int intel_crt_set_property(struct drm_connector *connector,
+				  struct drm_property *property,
+				  uint64_t value)
+{
+	return 0;
+}
+
+static void intel_crt_reset(struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crt *crt = intel_attached_crt(connector);
+
+	if (INTEL_INFO(dev)->gen >= 5) {
+		u32 adpa;
+
+		adpa = I915_READ(crt->adpa_reg);
+		adpa &= ~ADPA_CRT_HOTPLUG_MASK;
+		adpa |= ADPA_HOTPLUG_BITS;
+		I915_WRITE(crt->adpa_reg, adpa);
+		POSTING_READ(crt->adpa_reg);
+
+		DRM_DEBUG_KMS("crt adpa set to 0x%x\n", adpa);
+		crt->force_hotplug_required = 1;
+	}
+
+}
+
+/*
+ * Routines for controlling stuff on the analog port
+ */
+
+static const struct drm_connector_funcs intel_crt_connector_funcs = {
+	.reset = intel_crt_reset,
+	.dpms = drm_atomic_helper_connector_dpms,
+	.detect = intel_crt_detect,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.destroy = intel_crt_destroy,
+	.set_property = intel_crt_set_property,
+	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+	.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+	.atomic_get_property = intel_connector_atomic_get_property,
+};
+
+static const struct drm_connector_helper_funcs intel_crt_connector_helper_funcs = {
+	.mode_valid = intel_crt_mode_valid,
+	.get_modes = intel_crt_get_modes,
+	.best_encoder = intel_best_encoder,
+};
+
+static const struct drm_encoder_funcs intel_crt_enc_funcs = {
+	.destroy = intel_encoder_destroy,
+};
+
+static int intel_no_crt_dmi_callback(const struct dmi_system_id *id)
+{
+	DRM_INFO("Skipping CRT initialization for %s\n", id->ident);
+	return 1;
+}
+
+static const struct dmi_system_id intel_no_crt[] = {
+	{
+		.callback = intel_no_crt_dmi_callback,
+		.ident = "ACER ZGB",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "ACER"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "ZGB"),
+		},
+	},
+	{
+		.callback = intel_no_crt_dmi_callback,
+		.ident = "DELL XPS 8700",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+			DMI_MATCH(DMI_PRODUCT_NAME, "XPS 8700"),
+		},
+	},
+	{ }
+};
+
+void intel_crt_init(struct drm_device *dev)
+{
+	struct drm_connector *connector;
+	struct intel_crt *crt;
+	struct intel_connector *intel_connector;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	/* Skip machines without VGA that falsely report hotplug events */
+	if (dmi_check_system(intel_no_crt))
+		return;
+
+	crt = kzalloc(sizeof(struct intel_crt), GFP_KERNEL);
+	if (!crt)
+		return;
+
+	intel_connector = intel_connector_alloc();
+	if (!intel_connector) {
+		kfree(crt);
+		return;
+	}
+
+	connector = &intel_connector->base;
+	crt->connector = intel_connector;
+	drm_connector_init(dev, &intel_connector->base,
+			   &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
+
+	drm_encoder_init(dev, &crt->base.base, &intel_crt_enc_funcs,
+			 DRM_MODE_ENCODER_DAC);
+
+	intel_connector_attach_encoder(intel_connector, &crt->base);
+
+	crt->base.type = INTEL_OUTPUT_ANALOG;
+	crt->base.cloneable = (1 << INTEL_OUTPUT_DVO) | (1 << INTEL_OUTPUT_HDMI);
+	if (IS_I830(dev))
+		crt->base.crtc_mask = (1 << 0);
+	else
+		crt->base.crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+
+	if (IS_GEN2(dev))
+		connector->interlace_allowed = 0;
+	else
+		connector->interlace_allowed = 1;
+	connector->doublescan_allowed = 0;
+
+	if (HAS_PCH_SPLIT(dev))
+		crt->adpa_reg = PCH_ADPA;
+	else if (IS_VALLEYVIEW(dev))
+		crt->adpa_reg = VLV_ADPA;
+	else
+		crt->adpa_reg = ADPA;
+
+	crt->base.compute_config = intel_crt_compute_config;
+	if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev)) {
+		crt->base.disable = pch_disable_crt;
+		crt->base.post_disable = pch_post_disable_crt;
+	} else {
+		crt->base.disable = intel_disable_crt;
+	}
+	crt->base.enable = intel_enable_crt;
+	if (I915_HAS_HOTPLUG(dev))
+		crt->base.hpd_pin = HPD_CRT;
+	if (HAS_DDI(dev)) {
+		crt->base.get_config = hsw_crt_get_config;
+		crt->base.get_hw_state = intel_ddi_get_hw_state;
+	} else {
+		crt->base.get_config = intel_crt_get_config;
+		crt->base.get_hw_state = intel_crt_get_hw_state;
+	}
+	intel_connector->get_hw_state = intel_connector_get_hw_state;
+	intel_connector->unregister = intel_connector_unregister;
+
+	drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
+
+	drm_connector_register(connector);
+
+	if (!I915_HAS_HOTPLUG(dev))
+		intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+
+	/*
+	 * Configure the automatic hotplug detection stuff
+	 */
+	crt->force_hotplug_required = 0;
+
+	/*
+	 * TODO: find a proper way to discover whether we need to set the the
+	 * polarity and link reversal bits or not, instead of relying on the
+	 * BIOS.
+	 */
+	if (HAS_PCH_LPT(dev)) {
+		u32 fdi_config = FDI_RX_POLARITY_REVERSED_LPT |
+				 FDI_RX_LINK_REVERSAL_OVERRIDE;
+
+		dev_priv->fdi_rx_config = I915_READ(FDI_RX_CTL(PIPE_A)) & fdi_config;
+	}
+
+	intel_crt_reset(connector);
+}
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
new file mode 100644
index 0000000..fc28c51
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -0,0 +1,487 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+#include <linux/firmware.h>
+#include "i915_drv.h"
+#include "i915_reg.h"
+
+/**
+ * DOC: csr support for dmc
+ *
+ * Display Context Save and Restore (CSR) firmware support added from gen9
+ * onwards to drive newly added DMC (Display microcontroller) in display
+ * engine to save and restore the state of display engine when it enter into
+ * low-power state and comes back to normal.
+ *
+ * Firmware loading status will be one of the below states: FW_UNINITIALIZED,
+ * FW_LOADED, FW_FAILED.
+ *
+ * Once the firmware is written into the registers status will be moved from
+ * FW_UNINITIALIZED to FW_LOADED and for any erroneous condition status will
+ * be moved to FW_FAILED.
+ */
+
+#define I915_CSR_SKL "i915/skl_dmc_ver1.bin"
+#define I915_CSR_BXT "i915/bxt_dmc_ver1.bin"
+
+MODULE_FIRMWARE(I915_CSR_SKL);
+MODULE_FIRMWARE(I915_CSR_BXT);
+
+/*
+* SKL CSR registers for DC5 and DC6
+*/
+#define CSR_PROGRAM(i)			(0x80000 + (i) * 4)
+#define CSR_SSP_BASE_ADDR_GEN9		0x00002FC0
+#define CSR_HTP_ADDR_SKL		0x00500034
+#define CSR_SSP_BASE			0x8F074
+#define CSR_HTP_SKL			0x8F004
+#define CSR_LAST_WRITE			0x8F034
+#define CSR_LAST_WRITE_VALUE		0xc003b400
+/* MMIO address range for CSR program (0x80000 - 0x82FFF) */
+#define CSR_MAX_FW_SIZE			0x2FFF
+#define CSR_DEFAULT_FW_OFFSET		0xFFFFFFFF
+#define CSR_MMIO_START_RANGE	0x80000
+#define CSR_MMIO_END_RANGE		0x8FFFF
+
+struct intel_css_header {
+	/* 0x09 for DMC */
+	uint32_t module_type;
+
+	/* Includes the DMC specific header in dwords */
+	uint32_t header_len;
+
+	/* always value would be 0x10000 */
+	uint32_t header_ver;
+
+	/* Not used */
+	uint32_t module_id;
+
+	/* Not used */
+	uint32_t module_vendor;
+
+	/* in YYYYMMDD format */
+	uint32_t date;
+
+	/* Size in dwords (CSS_Headerlen + PackageHeaderLen + dmc FWsLen)/4 */
+	uint32_t size;
+
+	/* Not used */
+	uint32_t key_size;
+
+	/* Not used */
+	uint32_t modulus_size;
+
+	/* Not used */
+	uint32_t exponent_size;
+
+	/* Not used */
+	uint32_t reserved1[12];
+
+	/* Major Minor */
+	uint32_t version;
+
+	/* Not used */
+	uint32_t reserved2[8];
+
+	/* Not used */
+	uint32_t kernel_header_info;
+} __packed;
+
+struct intel_fw_info {
+	uint16_t reserved1;
+
+	/* Stepping (A, B, C, ..., *). * is a wildcard */
+	char stepping;
+
+	/* Sub-stepping (0, 1, ..., *). * is a wildcard */
+	char substepping;
+
+	uint32_t offset;
+	uint32_t reserved2;
+} __packed;
+
+struct intel_package_header {
+	/* DMC container header length in dwords */
+	unsigned char header_len;
+
+	/* always value would be 0x01 */
+	unsigned char header_ver;
+
+	unsigned char reserved[10];
+
+	/* Number of valid entries in the FWInfo array below */
+	uint32_t num_entries;
+
+	struct intel_fw_info fw_info[20];
+} __packed;
+
+struct intel_dmc_header {
+	/* always value would be 0x40403E3E */
+	uint32_t signature;
+
+	/* DMC binary header length */
+	unsigned char header_len;
+
+	/* 0x01 */
+	unsigned char header_ver;
+
+	/* Reserved */
+	uint16_t dmcc_ver;
+
+	/* Major, Minor */
+	uint32_t	project;
+
+	/* Firmware program size (excluding header) in dwords */
+	uint32_t	fw_size;
+
+	/* Major Minor version */
+	uint32_t fw_version;
+
+	/* Number of valid MMIO cycles present. */
+	uint32_t mmio_count;
+
+	/* MMIO address */
+	uint32_t mmioaddr[8];
+
+	/* MMIO data */
+	uint32_t mmiodata[8];
+
+	/* FW filename  */
+	unsigned char dfile[32];
+
+	uint32_t reserved1[2];
+} __packed;
+
+struct stepping_info {
+	char stepping;
+	char substepping;
+};
+
+static const struct stepping_info skl_stepping_info[] = {
+		{'A', '0'}, {'B', '0'}, {'C', '0'},
+		{'D', '0'}, {'E', '0'}, {'F', '0'},
+		{'G', '0'}, {'H', '0'}, {'I', '0'},
+		{'J', '0'}, {'K', '0'}
+};
+
+static struct stepping_info bxt_stepping_info[] = {
+	{'A', '0'}, {'A', '1'}, {'A', '2'},
+	{'B', '0'}, {'B', '1'}, {'B', '2'}
+};
+
+static char intel_get_stepping(struct drm_device *dev)
+{
+	if (IS_SKYLAKE(dev) && (dev->pdev->revision <
+			ARRAY_SIZE(skl_stepping_info)))
+		return skl_stepping_info[dev->pdev->revision].stepping;
+	else if (IS_BROXTON(dev) && (dev->pdev->revision <
+				ARRAY_SIZE(bxt_stepping_info)))
+		return bxt_stepping_info[dev->pdev->revision].stepping;
+	else
+		return -ENODATA;
+}
+
+static char intel_get_substepping(struct drm_device *dev)
+{
+	if (IS_SKYLAKE(dev) && (dev->pdev->revision <
+			ARRAY_SIZE(skl_stepping_info)))
+		return skl_stepping_info[dev->pdev->revision].substepping;
+	else if (IS_BROXTON(dev) && (dev->pdev->revision <
+			ARRAY_SIZE(bxt_stepping_info)))
+		return bxt_stepping_info[dev->pdev->revision].substepping;
+	else
+		return -ENODATA;
+}
+
+/**
+ * intel_csr_load_status_get() - to get firmware loading status.
+ * @dev_priv: i915 device.
+ *
+ * This function helps to get the firmware loading status.
+ *
+ * Return: Firmware loading status.
+ */
+enum csr_state intel_csr_load_status_get(struct drm_i915_private *dev_priv)
+{
+	enum csr_state state;
+
+	mutex_lock(&dev_priv->csr_lock);
+	state = dev_priv->csr.state;
+	mutex_unlock(&dev_priv->csr_lock);
+
+	return state;
+}
+
+/**
+ * intel_csr_load_status_set() - help to set firmware loading status.
+ * @dev_priv: i915 device.
+ * @state: enumeration of firmware loading status.
+ *
+ * Set the firmware loading status.
+ */
+void intel_csr_load_status_set(struct drm_i915_private *dev_priv,
+			enum csr_state state)
+{
+	mutex_lock(&dev_priv->csr_lock);
+	dev_priv->csr.state = state;
+	mutex_unlock(&dev_priv->csr_lock);
+}
+
+/**
+ * intel_csr_load_program() - write the firmware from memory to register.
+ * @dev: drm device.
+ *
+ * CSR firmware is read from a .bin file and kept in internal memory one time.
+ * Everytime display comes back from low power state this function is called to
+ * copy the firmware from internal memory to registers.
+ */
+void intel_csr_load_program(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 *payload = dev_priv->csr.dmc_payload;
+	uint32_t i, fw_size;
+
+	if (!IS_GEN9(dev)) {
+		DRM_ERROR("No CSR support available for this platform\n");
+		return;
+	}
+
+	/*
+	 * FIXME: Firmware gets lost on S3/S4, but not when entering system
+	 * standby or suspend-to-idle (which is just like forced runtime pm).
+	 * Unfortunately the ACPI subsystem doesn't yet give us a way to
+	 * differentiate this, hence figure it out with this hack.
+	 */
+	if (I915_READ(CSR_PROGRAM(0)))
+		return;
+
+	mutex_lock(&dev_priv->csr_lock);
+	fw_size = dev_priv->csr.dmc_fw_size;
+	for (i = 0; i < fw_size; i++)
+		I915_WRITE(CSR_PROGRAM(i), payload[i]);
+
+	for (i = 0; i < dev_priv->csr.mmio_count; i++) {
+		I915_WRITE(dev_priv->csr.mmioaddr[i],
+			dev_priv->csr.mmiodata[i]);
+	}
+
+	dev_priv->csr.state = FW_LOADED;
+	mutex_unlock(&dev_priv->csr_lock);
+}
+
+static void finish_csr_load(const struct firmware *fw, void *context)
+{
+	struct drm_i915_private *dev_priv = context;
+	struct drm_device *dev = dev_priv->dev;
+	struct intel_css_header *css_header;
+	struct intel_package_header *package_header;
+	struct intel_dmc_header *dmc_header;
+	struct intel_csr *csr = &dev_priv->csr;
+	char stepping = intel_get_stepping(dev);
+	char substepping = intel_get_substepping(dev);
+	uint32_t dmc_offset = CSR_DEFAULT_FW_OFFSET, readcount = 0, nbytes;
+	uint32_t i;
+	uint32_t *dmc_payload;
+	bool fw_loaded = false;
+
+	if (!fw) {
+		i915_firmware_load_error_print(csr->fw_path, 0);
+		goto out;
+	}
+
+	if ((stepping == -ENODATA) || (substepping == -ENODATA)) {
+		DRM_ERROR("Unknown stepping info, firmware loading failed\n");
+		goto out;
+	}
+
+	/* Extract CSS Header information*/
+	css_header = (struct intel_css_header *)fw->data;
+	if (sizeof(struct intel_css_header) !=
+		(css_header->header_len * 4)) {
+		DRM_ERROR("Firmware has wrong CSS header length %u bytes\n",
+			(css_header->header_len * 4));
+		goto out;
+	}
+	readcount += sizeof(struct intel_css_header);
+
+	/* Extract Package Header information*/
+	package_header = (struct intel_package_header *)
+					&fw->data[readcount];
+	if (sizeof(struct intel_package_header) !=
+		(package_header->header_len * 4)) {
+		DRM_ERROR("Firmware has wrong package header length %u bytes\n",
+			(package_header->header_len * 4));
+		goto out;
+	}
+	readcount += sizeof(struct intel_package_header);
+
+	/* Search for dmc_offset to find firware binary. */
+	for (i = 0; i < package_header->num_entries; i++) {
+		if (package_header->fw_info[i].substepping == '*' &&
+			stepping == package_header->fw_info[i].stepping) {
+			dmc_offset = package_header->fw_info[i].offset;
+			break;
+		} else if (stepping == package_header->fw_info[i].stepping &&
+			substepping == package_header->fw_info[i].substepping) {
+			dmc_offset = package_header->fw_info[i].offset;
+			break;
+		} else if (package_header->fw_info[i].stepping == '*' &&
+			package_header->fw_info[i].substepping == '*')
+			dmc_offset = package_header->fw_info[i].offset;
+	}
+	if (dmc_offset == CSR_DEFAULT_FW_OFFSET) {
+		DRM_ERROR("Firmware not supported for %c stepping\n", stepping);
+		goto out;
+	}
+	readcount += dmc_offset;
+
+	/* Extract dmc_header information. */
+	dmc_header = (struct intel_dmc_header *)&fw->data[readcount];
+	if (sizeof(struct intel_dmc_header) != (dmc_header->header_len)) {
+		DRM_ERROR("Firmware has wrong dmc header length %u bytes\n",
+				(dmc_header->header_len));
+		goto out;
+	}
+	readcount += sizeof(struct intel_dmc_header);
+
+	/* Cache the dmc header info. */
+	if (dmc_header->mmio_count > ARRAY_SIZE(csr->mmioaddr)) {
+		DRM_ERROR("Firmware has wrong mmio count %u\n",
+						dmc_header->mmio_count);
+		goto out;
+	}
+	csr->mmio_count = dmc_header->mmio_count;
+	for (i = 0; i < dmc_header->mmio_count; i++) {
+		if (dmc_header->mmioaddr[i] < CSR_MMIO_START_RANGE ||
+			dmc_header->mmioaddr[i] > CSR_MMIO_END_RANGE) {
+			DRM_ERROR(" Firmware has wrong mmio address 0x%x\n",
+						dmc_header->mmioaddr[i]);
+			goto out;
+		}
+		csr->mmioaddr[i] = dmc_header->mmioaddr[i];
+		csr->mmiodata[i] = dmc_header->mmiodata[i];
+	}
+
+	/* fw_size is in dwords, so multiplied by 4 to convert into bytes. */
+	nbytes = dmc_header->fw_size * 4;
+	if (nbytes > CSR_MAX_FW_SIZE) {
+		DRM_ERROR("CSR firmware too big (%u) bytes\n", nbytes);
+		goto out;
+	}
+	csr->dmc_fw_size = dmc_header->fw_size;
+
+	csr->dmc_payload = kmalloc(nbytes, GFP_KERNEL);
+	if (!csr->dmc_payload) {
+		DRM_ERROR("Memory allocation failed for dmc payload\n");
+		goto out;
+	}
+
+	dmc_payload = csr->dmc_payload;
+	memcpy(dmc_payload, &fw->data[readcount], nbytes);
+
+	/* load csr program during system boot, as needed for DC states */
+	intel_csr_load_program(dev);
+	fw_loaded = true;
+
+	DRM_DEBUG_KMS("Finished loading %s\n", dev_priv->csr.fw_path);
+out:
+	if (fw_loaded)
+		intel_runtime_pm_put(dev_priv);
+	else
+		intel_csr_load_status_set(dev_priv, FW_FAILED);
+
+	release_firmware(fw);
+}
+
+/**
+ * intel_csr_ucode_init() - initialize the firmware loading.
+ * @dev: drm device.
+ *
+ * This function is called at the time of loading the display driver to read
+ * firmware from a .bin file and copied into a internal memory.
+ */
+void intel_csr_ucode_init(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_csr *csr = &dev_priv->csr;
+	int ret;
+
+	if (!HAS_CSR(dev))
+		return;
+
+	if (IS_SKYLAKE(dev))
+		csr->fw_path = I915_CSR_SKL;
+	else if (IS_BROXTON(dev_priv))
+		csr->fw_path = I915_CSR_BXT;
+	else {
+		DRM_ERROR("Unexpected: no known CSR firmware for platform\n");
+		intel_csr_load_status_set(dev_priv, FW_FAILED);
+		return;
+	}
+
+	DRM_DEBUG_KMS("Loading %s\n", csr->fw_path);
+
+	/*
+	 * Obtain a runtime pm reference, until CSR is loaded,
+	 * to avoid entering runtime-suspend.
+	 */
+	intel_runtime_pm_get(dev_priv);
+
+	/* CSR supported for platform, load firmware */
+	ret = request_firmware_nowait(THIS_MODULE, true, csr->fw_path,
+				&dev_priv->dev->pdev->dev,
+				GFP_KERNEL, dev_priv,
+				finish_csr_load);
+	if (ret) {
+		i915_firmware_load_error_print(csr->fw_path, ret);
+		intel_csr_load_status_set(dev_priv, FW_FAILED);
+	}
+}
+
+/**
+ * intel_csr_ucode_fini() - unload the CSR firmware.
+ * @dev: drm device.
+ *
+ * Firmmware unloading includes freeing the internal momory and reset the
+ * firmware loading status.
+ */
+void intel_csr_ucode_fini(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (!HAS_CSR(dev))
+		return;
+
+	intel_csr_load_status_set(dev_priv, FW_FAILED);
+	kfree(dev_priv->csr.dmc_payload);
+}
+
+void assert_csr_loaded(struct drm_i915_private *dev_priv)
+{
+	WARN_ONCE(intel_csr_load_status_get(dev_priv) != FW_LOADED,
+		  "CSR is not loaded.\n");
+	WARN_ONCE(!I915_READ(CSR_PROGRAM(0)),
+		  "CSR program storage start is NULL\n");
+	WARN_ONCE(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n");
+	WARN_ONCE(!I915_READ(CSR_HTP_SKL), "CSR HTP Not fine\n");
+}
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
new file mode 100644
index 0000000..3c6b076
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -0,0 +1,3329 @@
+/*
+ * Copyright © 2012 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Eugeni Dodonov <eugeni.dodonov@intel.com>
+ *
+ */
+
+#include "i915_drv.h"
+#include "intel_drv.h"
+
+struct ddi_buf_trans {
+	u32 trans1;	/* balance leg enable, de-emph level */
+	u32 trans2;	/* vref sel, vswing */
+	u8 i_boost;	/* SKL: I_boost; valid: 0x0, 0x1, 0x3, 0x7 */
+};
+
+/* HDMI/DVI modes ignore everything but the last 2 items. So we share
+ * them for both DP and FDI transports, allowing those ports to
+ * automatically adapt to HDMI connections as well
+ */
+static const struct ddi_buf_trans hsw_ddi_translations_dp[] = {
+	{ 0x00FFFFFF, 0x0006000E, 0x0 },
+	{ 0x00D75FFF, 0x0005000A, 0x0 },
+	{ 0x00C30FFF, 0x00040006, 0x0 },
+	{ 0x80AAAFFF, 0x000B0000, 0x0 },
+	{ 0x00FFFFFF, 0x0005000A, 0x0 },
+	{ 0x00D75FFF, 0x000C0004, 0x0 },
+	{ 0x80C30FFF, 0x000B0000, 0x0 },
+	{ 0x00FFFFFF, 0x00040006, 0x0 },
+	{ 0x80D75FFF, 0x000B0000, 0x0 },
+};
+
+static const struct ddi_buf_trans hsw_ddi_translations_fdi[] = {
+	{ 0x00FFFFFF, 0x0007000E, 0x0 },
+	{ 0x00D75FFF, 0x000F000A, 0x0 },
+	{ 0x00C30FFF, 0x00060006, 0x0 },
+	{ 0x00AAAFFF, 0x001E0000, 0x0 },
+	{ 0x00FFFFFF, 0x000F000A, 0x0 },
+	{ 0x00D75FFF, 0x00160004, 0x0 },
+	{ 0x00C30FFF, 0x001E0000, 0x0 },
+	{ 0x00FFFFFF, 0x00060006, 0x0 },
+	{ 0x00D75FFF, 0x001E0000, 0x0 },
+};
+
+static const struct ddi_buf_trans hsw_ddi_translations_hdmi[] = {
+					/* Idx	NT mV d	T mV d	db	*/
+	{ 0x00FFFFFF, 0x0006000E, 0x0 },/* 0:	400	400	0	*/
+	{ 0x00E79FFF, 0x000E000C, 0x0 },/* 1:	400	500	2	*/
+	{ 0x00D75FFF, 0x0005000A, 0x0 },/* 2:	400	600	3.5	*/
+	{ 0x00FFFFFF, 0x0005000A, 0x0 },/* 3:	600	600	0	*/
+	{ 0x00E79FFF, 0x001D0007, 0x0 },/* 4:	600	750	2	*/
+	{ 0x00D75FFF, 0x000C0004, 0x0 },/* 5:	600	900	3.5	*/
+	{ 0x00FFFFFF, 0x00040006, 0x0 },/* 6:	800	800	0	*/
+	{ 0x80E79FFF, 0x00030002, 0x0 },/* 7:	800	1000	2	*/
+	{ 0x00FFFFFF, 0x00140005, 0x0 },/* 8:	850	850	0	*/
+	{ 0x00FFFFFF, 0x000C0004, 0x0 },/* 9:	900	900	0	*/
+	{ 0x00FFFFFF, 0x001C0003, 0x0 },/* 10:	950	950	0	*/
+	{ 0x80FFFFFF, 0x00030002, 0x0 },/* 11:	1000	1000	0	*/
+};
+
+static const struct ddi_buf_trans bdw_ddi_translations_edp[] = {
+	{ 0x00FFFFFF, 0x00000012, 0x0 },
+	{ 0x00EBAFFF, 0x00020011, 0x0 },
+	{ 0x00C71FFF, 0x0006000F, 0x0 },
+	{ 0x00AAAFFF, 0x000E000A, 0x0 },
+	{ 0x00FFFFFF, 0x00020011, 0x0 },
+	{ 0x00DB6FFF, 0x0005000F, 0x0 },
+	{ 0x00BEEFFF, 0x000A000C, 0x0 },
+	{ 0x00FFFFFF, 0x0005000F, 0x0 },
+	{ 0x00DB6FFF, 0x000A000C, 0x0 },
+};
+
+static const struct ddi_buf_trans bdw_ddi_translations_dp[] = {
+	{ 0x00FFFFFF, 0x0007000E, 0x0 },
+	{ 0x00D75FFF, 0x000E000A, 0x0 },
+	{ 0x00BEFFFF, 0x00140006, 0x0 },
+	{ 0x80B2CFFF, 0x001B0002, 0x0 },
+	{ 0x00FFFFFF, 0x000E000A, 0x0 },
+	{ 0x00DB6FFF, 0x00160005, 0x0 },
+	{ 0x80C71FFF, 0x001A0002, 0x0 },
+	{ 0x00F7DFFF, 0x00180004, 0x0 },
+	{ 0x80D75FFF, 0x001B0002, 0x0 },
+};
+
+static const struct ddi_buf_trans bdw_ddi_translations_fdi[] = {
+	{ 0x00FFFFFF, 0x0001000E, 0x0 },
+	{ 0x00D75FFF, 0x0004000A, 0x0 },
+	{ 0x00C30FFF, 0x00070006, 0x0 },
+	{ 0x00AAAFFF, 0x000C0000, 0x0 },
+	{ 0x00FFFFFF, 0x0004000A, 0x0 },
+	{ 0x00D75FFF, 0x00090004, 0x0 },
+	{ 0x00C30FFF, 0x000C0000, 0x0 },
+	{ 0x00FFFFFF, 0x00070006, 0x0 },
+	{ 0x00D75FFF, 0x000C0000, 0x0 },
+};
+
+static const struct ddi_buf_trans bdw_ddi_translations_hdmi[] = {
+					/* Idx	NT mV d	T mV df	db	*/
+	{ 0x00FFFFFF, 0x0007000E, 0x0 },/* 0:	400	400	0	*/
+	{ 0x00D75FFF, 0x000E000A, 0x0 },/* 1:	400	600	3.5	*/
+	{ 0x00BEFFFF, 0x00140006, 0x0 },/* 2:	400	800	6	*/
+	{ 0x00FFFFFF, 0x0009000D, 0x0 },/* 3:	450	450	0	*/
+	{ 0x00FFFFFF, 0x000E000A, 0x0 },/* 4:	600	600	0	*/
+	{ 0x00D7FFFF, 0x00140006, 0x0 },/* 5:	600	800	2.5	*/
+	{ 0x80CB2FFF, 0x001B0002, 0x0 },/* 6:	600	1000	4.5	*/
+	{ 0x00FFFFFF, 0x00140006, 0x0 },/* 7:	800	800	0	*/
+	{ 0x80E79FFF, 0x001B0002, 0x0 },/* 8:	800	1000	2	*/
+	{ 0x80FFFFFF, 0x001B0002, 0x0 },/* 9:	1000	1000	0	*/
+};
+
+/* Skylake H and S */
+static const struct ddi_buf_trans skl_ddi_translations_dp[] = {
+	{ 0x00002016, 0x000000A0, 0x0 },
+	{ 0x00005012, 0x0000009B, 0x0 },
+	{ 0x00007011, 0x00000088, 0x0 },
+	{ 0x00009010, 0x000000C7, 0x0 },
+	{ 0x00002016, 0x0000009B, 0x0 },
+	{ 0x00005012, 0x00000088, 0x0 },
+	{ 0x00007011, 0x000000C7, 0x0 },
+	{ 0x00002016, 0x000000DF, 0x0 },
+	{ 0x00005012, 0x000000C7, 0x0 },
+};
+
+/* Skylake U */
+static const struct ddi_buf_trans skl_u_ddi_translations_dp[] = {
+	{ 0x0000201B, 0x000000A2, 0x0 },
+	{ 0x00005012, 0x00000088, 0x0 },
+	{ 0x00007011, 0x00000087, 0x0 },
+	{ 0x80009010, 0x000000C7, 0x1 },	/* Uses I_boost level 0x1 */
+	{ 0x0000201B, 0x0000009D, 0x0 },
+	{ 0x00005012, 0x000000C7, 0x0 },
+	{ 0x00007011, 0x000000C7, 0x0 },
+	{ 0x00002016, 0x00000088, 0x0 },
+	{ 0x00005012, 0x000000C7, 0x0 },
+};
+
+/* Skylake Y */
+static const struct ddi_buf_trans skl_y_ddi_translations_dp[] = {
+	{ 0x00000018, 0x000000A2, 0x0 },
+	{ 0x00005012, 0x00000088, 0x0 },
+	{ 0x00007011, 0x00000087, 0x0 },
+	{ 0x80009010, 0x000000C7, 0x3 },	/* Uses I_boost level 0x3 */
+	{ 0x00000018, 0x0000009D, 0x0 },
+	{ 0x00005012, 0x000000C7, 0x0 },
+	{ 0x00007011, 0x000000C7, 0x0 },
+	{ 0x00000018, 0x00000088, 0x0 },
+	{ 0x00005012, 0x000000C7, 0x0 },
+};
+
+/*
+ * Skylake H and S
+ * eDP 1.4 low vswing translation parameters
+ */
+static const struct ddi_buf_trans skl_ddi_translations_edp[] = {
+	{ 0x00000018, 0x000000A8, 0x0 },
+	{ 0x00004013, 0x000000A9, 0x0 },
+	{ 0x00007011, 0x000000A2, 0x0 },
+	{ 0x00009010, 0x0000009C, 0x0 },
+	{ 0x00000018, 0x000000A9, 0x0 },
+	{ 0x00006013, 0x000000A2, 0x0 },
+	{ 0x00007011, 0x000000A6, 0x0 },
+	{ 0x00000018, 0x000000AB, 0x0 },
+	{ 0x00007013, 0x0000009F, 0x0 },
+	{ 0x00000018, 0x000000DF, 0x0 },
+};
+
+/*
+ * Skylake U
+ * eDP 1.4 low vswing translation parameters
+ */
+static const struct ddi_buf_trans skl_u_ddi_translations_edp[] = {
+	{ 0x00000018, 0x000000A8, 0x0 },
+	{ 0x00004013, 0x000000A9, 0x0 },
+	{ 0x00007011, 0x000000A2, 0x0 },
+	{ 0x00009010, 0x0000009C, 0x0 },
+	{ 0x00000018, 0x000000A9, 0x0 },
+	{ 0x00006013, 0x000000A2, 0x0 },
+	{ 0x00007011, 0x000000A6, 0x0 },
+	{ 0x00002016, 0x000000AB, 0x0 },
+	{ 0x00005013, 0x0000009F, 0x0 },
+	{ 0x00000018, 0x000000DF, 0x0 },
+};
+
+/*
+ * Skylake Y
+ * eDP 1.4 low vswing translation parameters
+ */
+static const struct ddi_buf_trans skl_y_ddi_translations_edp[] = {
+	{ 0x00000018, 0x000000A8, 0x0 },
+	{ 0x00004013, 0x000000AB, 0x0 },
+	{ 0x00007011, 0x000000A4, 0x0 },
+	{ 0x00009010, 0x000000DF, 0x0 },
+	{ 0x00000018, 0x000000AA, 0x0 },
+	{ 0x00006013, 0x000000A4, 0x0 },
+	{ 0x00007011, 0x0000009D, 0x0 },
+	{ 0x00000018, 0x000000A0, 0x0 },
+	{ 0x00006012, 0x000000DF, 0x0 },
+	{ 0x00000018, 0x0000008A, 0x0 },
+};
+
+/* Skylake U, H and S */
+static const struct ddi_buf_trans skl_ddi_translations_hdmi[] = {
+	{ 0x00000018, 0x000000AC, 0x0 },
+	{ 0x00005012, 0x0000009D, 0x0 },
+	{ 0x00007011, 0x00000088, 0x0 },
+	{ 0x00000018, 0x000000A1, 0x0 },
+	{ 0x00000018, 0x00000098, 0x0 },
+	{ 0x00004013, 0x00000088, 0x0 },
+	{ 0x00006012, 0x00000087, 0x0 },
+	{ 0x00000018, 0x000000DF, 0x0 },
+	{ 0x00003015, 0x00000087, 0x0 },	/* Default */
+	{ 0x00003015, 0x000000C7, 0x0 },
+	{ 0x00000018, 0x000000C7, 0x0 },
+};
+
+/* Skylake Y */
+static const struct ddi_buf_trans skl_y_ddi_translations_hdmi[] = {
+	{ 0x00000018, 0x000000A1, 0x0 },
+	{ 0x00005012, 0x000000DF, 0x0 },
+	{ 0x00007011, 0x00000084, 0x0 },
+	{ 0x00000018, 0x000000A4, 0x0 },
+	{ 0x00000018, 0x0000009D, 0x0 },
+	{ 0x00004013, 0x00000080, 0x0 },
+	{ 0x00006013, 0x000000C7, 0x0 },
+	{ 0x00000018, 0x0000008A, 0x0 },
+	{ 0x00003015, 0x000000C7, 0x0 },	/* Default */
+	{ 0x80003015, 0x000000C7, 0x7 },	/* Uses I_boost level 0x7 */
+	{ 0x00000018, 0x000000C7, 0x0 },
+};
+
+struct bxt_ddi_buf_trans {
+	u32 margin;	/* swing value */
+	u32 scale;	/* scale value */
+	u32 enable;	/* scale enable */
+	u32 deemphasis;
+	bool default_index; /* true if the entry represents default value */
+};
+
+static const struct bxt_ddi_buf_trans bxt_ddi_translations_dp[] = {
+					/* Idx	NT mV diff	db  */
+	{ 52,  0x9A, 0, 128, true  },	/* 0:	400		0   */
+	{ 78,  0x9A, 0, 85,  false },	/* 1:	400		3.5 */
+	{ 104, 0x9A, 0, 64,  false },	/* 2:	400		6   */
+	{ 154, 0x9A, 0, 43,  false },	/* 3:	400		9.5 */
+	{ 77,  0x9A, 0, 128, false },	/* 4:	600		0   */
+	{ 116, 0x9A, 0, 85,  false },	/* 5:	600		3.5 */
+	{ 154, 0x9A, 0, 64,  false },	/* 6:	600		6   */
+	{ 102, 0x9A, 0, 128, false },	/* 7:	800		0   */
+	{ 154, 0x9A, 0, 85,  false },	/* 8:	800		3.5 */
+	{ 154, 0x9A, 1, 128, false },	/* 9:	1200		0   */
+};
+
+static const struct bxt_ddi_buf_trans bxt_ddi_translations_edp[] = {
+					/* Idx	NT mV diff	db  */
+	{ 26, 0, 0, 128, false },	/* 0:	200		0   */
+	{ 38, 0, 0, 112, false },	/* 1:	200		1.5 */
+	{ 48, 0, 0, 96,  false },	/* 2:	200		4   */
+	{ 54, 0, 0, 69,  false },	/* 3:	200		6   */
+	{ 32, 0, 0, 128, false },	/* 4:	250		0   */
+	{ 48, 0, 0, 104, false },	/* 5:	250		1.5 */
+	{ 54, 0, 0, 85,  false },	/* 6:	250		4   */
+	{ 43, 0, 0, 128, false },	/* 7:	300		0   */
+	{ 54, 0, 0, 101, false },	/* 8:	300		1.5 */
+	{ 48, 0, 0, 128, false },	/* 9:	300		0   */
+};
+
+/* BSpec has 2 recommended values - entries 0 and 8.
+ * Using the entry with higher vswing.
+ */
+static const struct bxt_ddi_buf_trans bxt_ddi_translations_hdmi[] = {
+					/* Idx	NT mV diff	db  */
+	{ 52,  0x9A, 0, 128, false },	/* 0:	400		0   */
+	{ 52,  0x9A, 0, 85,  false },	/* 1:	400		3.5 */
+	{ 52,  0x9A, 0, 64,  false },	/* 2:	400		6   */
+	{ 42,  0x9A, 0, 43,  false },	/* 3:	400		9.5 */
+	{ 77,  0x9A, 0, 128, false },	/* 4:	600		0   */
+	{ 77,  0x9A, 0, 85,  false },	/* 5:	600		3.5 */
+	{ 77,  0x9A, 0, 64,  false },	/* 6:	600		6   */
+	{ 102, 0x9A, 0, 128, false },	/* 7:	800		0   */
+	{ 102, 0x9A, 0, 85,  false },	/* 8:	800		3.5 */
+	{ 154, 0x9A, 1, 128, true },	/* 9:	1200		0   */
+};
+
+static void bxt_ddi_vswing_sequence(struct drm_device *dev, u32 level,
+				    enum port port, int type);
+
+static void ddi_get_encoder_port(struct intel_encoder *intel_encoder,
+				 struct intel_digital_port **dig_port,
+				 enum port *port)
+{
+	struct drm_encoder *encoder = &intel_encoder->base;
+
+	switch (intel_encoder->type) {
+	case INTEL_OUTPUT_DP_MST:
+		*dig_port = enc_to_mst(encoder)->primary;
+		*port = (*dig_port)->port;
+		break;
+	case INTEL_OUTPUT_DISPLAYPORT:
+	case INTEL_OUTPUT_EDP:
+	case INTEL_OUTPUT_HDMI:
+	case INTEL_OUTPUT_UNKNOWN:
+		*dig_port = enc_to_dig_port(encoder);
+		*port = (*dig_port)->port;
+		break;
+	case INTEL_OUTPUT_ANALOG:
+		*dig_port = NULL;
+		*port = PORT_E;
+		break;
+	default:
+		WARN(1, "Invalid DDI encoder type %d\n", intel_encoder->type);
+		break;
+	}
+}
+
+enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
+{
+	struct intel_digital_port *dig_port;
+	enum port port;
+
+	ddi_get_encoder_port(intel_encoder, &dig_port, &port);
+
+	return port;
+}
+
+static bool
+intel_dig_port_supports_hdmi(const struct intel_digital_port *intel_dig_port)
+{
+	return intel_dig_port->hdmi.hdmi_reg;
+}
+
+static const struct ddi_buf_trans *skl_get_buf_trans_dp(struct drm_device *dev,
+							int *n_entries)
+{
+	const struct ddi_buf_trans *ddi_translations;
+
+	if (IS_SKL_ULX(dev)) {
+		ddi_translations = skl_y_ddi_translations_dp;
+		*n_entries = ARRAY_SIZE(skl_y_ddi_translations_dp);
+	} else if (IS_SKL_ULT(dev)) {
+		ddi_translations = skl_u_ddi_translations_dp;
+		*n_entries = ARRAY_SIZE(skl_u_ddi_translations_dp);
+	} else {
+		ddi_translations = skl_ddi_translations_dp;
+		*n_entries = ARRAY_SIZE(skl_ddi_translations_dp);
+	}
+
+	return ddi_translations;
+}
+
+static const struct ddi_buf_trans *skl_get_buf_trans_edp(struct drm_device *dev,
+							 int *n_entries)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	const struct ddi_buf_trans *ddi_translations;
+
+	if (IS_SKL_ULX(dev)) {
+		if (dev_priv->edp_low_vswing) {
+			ddi_translations = skl_y_ddi_translations_edp;
+			*n_entries = ARRAY_SIZE(skl_y_ddi_translations_edp);
+		} else {
+			ddi_translations = skl_y_ddi_translations_dp;
+			*n_entries = ARRAY_SIZE(skl_y_ddi_translations_dp);
+		}
+	} else if (IS_SKL_ULT(dev)) {
+		if (dev_priv->edp_low_vswing) {
+			ddi_translations = skl_u_ddi_translations_edp;
+			*n_entries = ARRAY_SIZE(skl_u_ddi_translations_edp);
+		} else {
+			ddi_translations = skl_u_ddi_translations_dp;
+			*n_entries = ARRAY_SIZE(skl_u_ddi_translations_dp);
+		}
+	} else {
+		if (dev_priv->edp_low_vswing) {
+			ddi_translations = skl_ddi_translations_edp;
+			*n_entries = ARRAY_SIZE(skl_ddi_translations_edp);
+		} else {
+			ddi_translations = skl_ddi_translations_dp;
+			*n_entries = ARRAY_SIZE(skl_ddi_translations_dp);
+		}
+	}
+
+	return ddi_translations;
+}
+
+static const struct ddi_buf_trans *
+skl_get_buf_trans_hdmi(struct drm_device *dev,
+		       int *n_entries)
+{
+	const struct ddi_buf_trans *ddi_translations;
+
+	if (IS_SKL_ULX(dev)) {
+		ddi_translations = skl_y_ddi_translations_hdmi;
+		*n_entries = ARRAY_SIZE(skl_y_ddi_translations_hdmi);
+	} else {
+		ddi_translations = skl_ddi_translations_hdmi;
+		*n_entries = ARRAY_SIZE(skl_ddi_translations_hdmi);
+	}
+
+	return ddi_translations;
+}
+
+/*
+ * Starting with Haswell, DDI port buffers must be programmed with correct
+ * values in advance. The buffer values are different for FDI and DP modes,
+ * but the HDMI/DVI fields are shared among those. So we program the DDI
+ * in either FDI or DP modes only, as HDMI connections will work with both
+ * of those
+ */
+static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port,
+				      bool supports_hdmi)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 iboost_bit = 0;
+	int i, n_hdmi_entries, n_dp_entries, n_edp_entries, hdmi_default_entry,
+	    size;
+	int hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
+	const struct ddi_buf_trans *ddi_translations_fdi;
+	const struct ddi_buf_trans *ddi_translations_dp;
+	const struct ddi_buf_trans *ddi_translations_edp;
+	const struct ddi_buf_trans *ddi_translations_hdmi;
+	const struct ddi_buf_trans *ddi_translations;
+
+	if (IS_BROXTON(dev)) {
+		if (!supports_hdmi)
+			return;
+
+		/* Vswing programming for HDMI */
+		bxt_ddi_vswing_sequence(dev, hdmi_level, port,
+					INTEL_OUTPUT_HDMI);
+		return;
+	} else if (IS_SKYLAKE(dev)) {
+		ddi_translations_fdi = NULL;
+		ddi_translations_dp =
+				skl_get_buf_trans_dp(dev, &n_dp_entries);
+		ddi_translations_edp =
+				skl_get_buf_trans_edp(dev, &n_edp_entries);
+		ddi_translations_hdmi =
+				skl_get_buf_trans_hdmi(dev, &n_hdmi_entries);
+		hdmi_default_entry = 8;
+		/* If we're boosting the current, set bit 31 of trans1 */
+		if (dev_priv->vbt.ddi_port_info[port].hdmi_boost_level ||
+		    dev_priv->vbt.ddi_port_info[port].dp_boost_level)
+			iboost_bit = 1<<31;
+	} else if (IS_BROADWELL(dev)) {
+		ddi_translations_fdi = bdw_ddi_translations_fdi;
+		ddi_translations_dp = bdw_ddi_translations_dp;
+
+		if (dev_priv->edp_low_vswing) {
+			ddi_translations_edp = bdw_ddi_translations_edp;
+			n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp);
+		} else {
+			ddi_translations_edp = bdw_ddi_translations_dp;
+			n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
+		}
+
+		ddi_translations_hdmi = bdw_ddi_translations_hdmi;
+
+		n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
+		n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
+		hdmi_default_entry = 7;
+	} else if (IS_HASWELL(dev)) {
+		ddi_translations_fdi = hsw_ddi_translations_fdi;
+		ddi_translations_dp = hsw_ddi_translations_dp;
+		ddi_translations_edp = hsw_ddi_translations_dp;
+		ddi_translations_hdmi = hsw_ddi_translations_hdmi;
+		n_dp_entries = n_edp_entries = ARRAY_SIZE(hsw_ddi_translations_dp);
+		n_hdmi_entries = ARRAY_SIZE(hsw_ddi_translations_hdmi);
+		hdmi_default_entry = 6;
+	} else {
+		WARN(1, "ddi translation table missing\n");
+		ddi_translations_edp = bdw_ddi_translations_dp;
+		ddi_translations_fdi = bdw_ddi_translations_fdi;
+		ddi_translations_dp = bdw_ddi_translations_dp;
+		ddi_translations_hdmi = bdw_ddi_translations_hdmi;
+		n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp);
+		n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
+		n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
+		hdmi_default_entry = 7;
+	}
+
+	switch (port) {
+	case PORT_A:
+		ddi_translations = ddi_translations_edp;
+		size = n_edp_entries;
+		break;
+	case PORT_B:
+	case PORT_C:
+		ddi_translations = ddi_translations_dp;
+		size = n_dp_entries;
+		break;
+	case PORT_D:
+		if (intel_dp_is_edp(dev, PORT_D)) {
+			ddi_translations = ddi_translations_edp;
+			size = n_edp_entries;
+		} else {
+			ddi_translations = ddi_translations_dp;
+			size = n_dp_entries;
+		}
+		break;
+	case PORT_E:
+		if (ddi_translations_fdi)
+			ddi_translations = ddi_translations_fdi;
+		else
+			ddi_translations = ddi_translations_dp;
+		size = n_dp_entries;
+		break;
+	default:
+		BUG();
+	}
+
+	for (i = 0; i < size; i++) {
+		I915_WRITE(DDI_BUF_TRANS_LO(port, i),
+			   ddi_translations[i].trans1 | iboost_bit);
+		I915_WRITE(DDI_BUF_TRANS_HI(port, i),
+			   ddi_translations[i].trans2);
+	}
+
+	if (!supports_hdmi)
+		return;
+
+	/* Choose a good default if VBT is badly populated */
+	if (hdmi_level == HDMI_LEVEL_SHIFT_UNKNOWN ||
+	    hdmi_level >= n_hdmi_entries)
+		hdmi_level = hdmi_default_entry;
+
+	/* Entry 9 is for HDMI: */
+	I915_WRITE(DDI_BUF_TRANS_LO(port, i),
+		   ddi_translations_hdmi[hdmi_level].trans1 | iboost_bit);
+	I915_WRITE(DDI_BUF_TRANS_HI(port, i),
+		   ddi_translations_hdmi[hdmi_level].trans2);
+}
+
+/* Program DDI buffers translations for DP. By default, program ports A-D in DP
+ * mode and port E for FDI.
+ */
+void intel_prepare_ddi(struct drm_device *dev)
+{
+	struct intel_encoder *intel_encoder;
+	bool visited[I915_MAX_PORTS] = { 0, };
+
+	if (!HAS_DDI(dev))
+		return;
+
+	for_each_intel_encoder(dev, intel_encoder) {
+		struct intel_digital_port *intel_dig_port;
+		enum port port;
+		bool supports_hdmi;
+
+		if (intel_encoder->type == INTEL_OUTPUT_DSI)
+			continue;
+
+		ddi_get_encoder_port(intel_encoder, &intel_dig_port, &port);
+		if (visited[port])
+			continue;
+
+		supports_hdmi = intel_dig_port &&
+				intel_dig_port_supports_hdmi(intel_dig_port);
+
+		intel_prepare_ddi_buffers(dev, port, supports_hdmi);
+		visited[port] = true;
+	}
+}
+
+static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
+				    enum port port)
+{
+	uint32_t reg = DDI_BUF_CTL(port);
+	int i;
+
+	for (i = 0; i < 16; i++) {
+		udelay(1);
+		if (I915_READ(reg) & DDI_BUF_IS_IDLE)
+			return;
+	}
+	DRM_ERROR("Timeout waiting for DDI BUF %c idle bit\n", port_name(port));
+}
+
+/* Starting with Haswell, different DDI ports can work in FDI mode for
+ * connection to the PCH-located connectors. For this, it is necessary to train
+ * both the DDI port and PCH receiver for the desired DDI buffer settings.
+ *
+ * The recommended port to work in FDI mode is DDI E, which we use here. Also,
+ * please note that when FDI mode is active on DDI E, it shares 2 lines with
+ * DDI A (which is used for eDP)
+ */
+
+void hsw_fdi_link_train(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	u32 temp, i, rx_ctl_val;
+
+	/* Set the FDI_RX_MISC pwrdn lanes and the 2 workarounds listed at the
+	 * mode set "sequence for CRT port" document:
+	 * - TP1 to TP2 time with the default value
+	 * - FDI delay to 90h
+	 *
+	 * WaFDIAutoLinkSetTimingOverrride:hsw
+	 */
+	I915_WRITE(FDI_RX_MISC(PIPE_A), FDI_RX_PWRDN_LANE1_VAL(2) |
+				  FDI_RX_PWRDN_LANE0_VAL(2) |
+				  FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
+
+	/* Enable the PCH Receiver FDI PLL */
+	rx_ctl_val = dev_priv->fdi_rx_config | FDI_RX_ENHANCE_FRAME_ENABLE |
+		     FDI_RX_PLL_ENABLE |
+		     FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
+	I915_WRITE(FDI_RX_CTL(PIPE_A), rx_ctl_val);
+	POSTING_READ(FDI_RX_CTL(PIPE_A));
+	udelay(220);
+
+	/* Switch from Rawclk to PCDclk */
+	rx_ctl_val |= FDI_PCDCLK;
+	I915_WRITE(FDI_RX_CTL(PIPE_A), rx_ctl_val);
+
+	/* Configure Port Clock Select */
+	I915_WRITE(PORT_CLK_SEL(PORT_E), intel_crtc->config->ddi_pll_sel);
+	WARN_ON(intel_crtc->config->ddi_pll_sel != PORT_CLK_SEL_SPLL);
+
+	/* Start the training iterating through available voltages and emphasis,
+	 * testing each value twice. */
+	for (i = 0; i < ARRAY_SIZE(hsw_ddi_translations_fdi) * 2; i++) {
+		/* Configure DP_TP_CTL with auto-training */
+		I915_WRITE(DP_TP_CTL(PORT_E),
+					DP_TP_CTL_FDI_AUTOTRAIN |
+					DP_TP_CTL_ENHANCED_FRAME_ENABLE |
+					DP_TP_CTL_LINK_TRAIN_PAT1 |
+					DP_TP_CTL_ENABLE);
+
+		/* Configure and enable DDI_BUF_CTL for DDI E with next voltage.
+		 * DDI E does not support port reversal, the functionality is
+		 * achieved on the PCH side in FDI_RX_CTL, so no need to set the
+		 * port reversal bit */
+		I915_WRITE(DDI_BUF_CTL(PORT_E),
+			   DDI_BUF_CTL_ENABLE |
+			   ((intel_crtc->config->fdi_lanes - 1) << 1) |
+			   DDI_BUF_TRANS_SELECT(i / 2));
+		POSTING_READ(DDI_BUF_CTL(PORT_E));
+
+		udelay(600);
+
+		/* Program PCH FDI Receiver TU */
+		I915_WRITE(FDI_RX_TUSIZE1(PIPE_A), TU_SIZE(64));
+
+		/* Enable PCH FDI Receiver with auto-training */
+		rx_ctl_val |= FDI_RX_ENABLE | FDI_LINK_TRAIN_AUTO;
+		I915_WRITE(FDI_RX_CTL(PIPE_A), rx_ctl_val);
+		POSTING_READ(FDI_RX_CTL(PIPE_A));
+
+		/* Wait for FDI receiver lane calibration */
+		udelay(30);
+
+		/* Unset FDI_RX_MISC pwrdn lanes */
+		temp = I915_READ(FDI_RX_MISC(PIPE_A));
+		temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
+		I915_WRITE(FDI_RX_MISC(PIPE_A), temp);
+		POSTING_READ(FDI_RX_MISC(PIPE_A));
+
+		/* Wait for FDI auto training time */
+		udelay(5);
+
+		temp = I915_READ(DP_TP_STATUS(PORT_E));
+		if (temp & DP_TP_STATUS_AUTOTRAIN_DONE) {
+			DRM_DEBUG_KMS("FDI link training done on step %d\n", i);
+
+			/* Enable normal pixel sending for FDI */
+			I915_WRITE(DP_TP_CTL(PORT_E),
+				   DP_TP_CTL_FDI_AUTOTRAIN |
+				   DP_TP_CTL_LINK_TRAIN_NORMAL |
+				   DP_TP_CTL_ENHANCED_FRAME_ENABLE |
+				   DP_TP_CTL_ENABLE);
+
+			return;
+		}
+
+		temp = I915_READ(DDI_BUF_CTL(PORT_E));
+		temp &= ~DDI_BUF_CTL_ENABLE;
+		I915_WRITE(DDI_BUF_CTL(PORT_E), temp);
+		POSTING_READ(DDI_BUF_CTL(PORT_E));
+
+		/* Disable DP_TP_CTL and FDI_RX_CTL and retry */
+		temp = I915_READ(DP_TP_CTL(PORT_E));
+		temp &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
+		temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
+		I915_WRITE(DP_TP_CTL(PORT_E), temp);
+		POSTING_READ(DP_TP_CTL(PORT_E));
+
+		intel_wait_ddi_buf_idle(dev_priv, PORT_E);
+
+		rx_ctl_val &= ~FDI_RX_ENABLE;
+		I915_WRITE(FDI_RX_CTL(PIPE_A), rx_ctl_val);
+		POSTING_READ(FDI_RX_CTL(PIPE_A));
+
+		/* Reset FDI_RX_MISC pwrdn lanes */
+		temp = I915_READ(FDI_RX_MISC(PIPE_A));
+		temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
+		temp |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2);
+		I915_WRITE(FDI_RX_MISC(PIPE_A), temp);
+		POSTING_READ(FDI_RX_MISC(PIPE_A));
+	}
+
+	DRM_ERROR("FDI link training failed!\n");
+}
+
+void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder)
+{
+	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+	struct intel_digital_port *intel_dig_port =
+		enc_to_dig_port(&encoder->base);
+
+	intel_dp->DP = intel_dig_port->saved_port_bits |
+		DDI_BUF_CTL_ENABLE | DDI_BUF_TRANS_SELECT(0);
+	intel_dp->DP |= DDI_PORT_WIDTH(intel_dp->lane_count);
+}
+
+static struct intel_encoder *
+intel_ddi_get_crtc_encoder(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct intel_encoder *intel_encoder, *ret = NULL;
+	int num_encoders = 0;
+
+	for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
+		ret = intel_encoder;
+		num_encoders++;
+	}
+
+	if (num_encoders != 1)
+		WARN(1, "%d encoders on crtc for pipe %c\n", num_encoders,
+		     pipe_name(intel_crtc->pipe));
+
+	BUG_ON(ret == NULL);
+	return ret;
+}
+
+struct intel_encoder *
+intel_ddi_get_crtc_new_encoder(struct intel_crtc_state *crtc_state)
+{
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_encoder *ret = NULL;
+	struct drm_atomic_state *state;
+	struct drm_connector *connector;
+	struct drm_connector_state *connector_state;
+	int num_encoders = 0;
+	int i;
+
+	state = crtc_state->base.state;
+
+	for_each_connector_in_state(state, connector, connector_state, i) {
+		if (connector_state->crtc != crtc_state->base.crtc)
+			continue;
+
+		ret = to_intel_encoder(connector_state->best_encoder);
+		num_encoders++;
+	}
+
+	WARN(num_encoders != 1, "%d encoders on crtc for pipe %c\n", num_encoders,
+	     pipe_name(crtc->pipe));
+
+	BUG_ON(ret == NULL);
+	return ret;
+}
+
+#define LC_FREQ 2700
+#define LC_FREQ_2K U64_C(LC_FREQ * 2000)
+
+#define P_MIN 2
+#define P_MAX 64
+#define P_INC 2
+
+/* Constraints for PLL good behavior */
+#define REF_MIN 48
+#define REF_MAX 400
+#define VCO_MIN 2400
+#define VCO_MAX 4800
+
+#define abs_diff(a, b) ({			\
+	typeof(a) __a = (a);			\
+	typeof(b) __b = (b);			\
+	(void) (&__a == &__b);			\
+	__a > __b ? (__a - __b) : (__b - __a); })
+
+struct hsw_wrpll_rnp {
+	unsigned p, n2, r2;
+};
+
+static unsigned hsw_wrpll_get_budget_for_freq(int clock)
+{
+	unsigned budget;
+
+	switch (clock) {
+	case 25175000:
+	case 25200000:
+	case 27000000:
+	case 27027000:
+	case 37762500:
+	case 37800000:
+	case 40500000:
+	case 40541000:
+	case 54000000:
+	case 54054000:
+	case 59341000:
+	case 59400000:
+	case 72000000:
+	case 74176000:
+	case 74250000:
+	case 81000000:
+	case 81081000:
+	case 89012000:
+	case 89100000:
+	case 108000000:
+	case 108108000:
+	case 111264000:
+	case 111375000:
+	case 148352000:
+	case 148500000:
+	case 162000000:
+	case 162162000:
+	case 222525000:
+	case 222750000:
+	case 296703000:
+	case 297000000:
+		budget = 0;
+		break;
+	case 233500000:
+	case 245250000:
+	case 247750000:
+	case 253250000:
+	case 298000000:
+		budget = 1500;
+		break;
+	case 169128000:
+	case 169500000:
+	case 179500000:
+	case 202000000:
+		budget = 2000;
+		break;
+	case 256250000:
+	case 262500000:
+	case 270000000:
+	case 272500000:
+	case 273750000:
+	case 280750000:
+	case 281250000:
+	case 286000000:
+	case 291750000:
+		budget = 4000;
+		break;
+	case 267250000:
+	case 268500000:
+		budget = 5000;
+		break;
+	default:
+		budget = 1000;
+		break;
+	}
+
+	return budget;
+}
+
+static void hsw_wrpll_update_rnp(uint64_t freq2k, unsigned budget,
+				 unsigned r2, unsigned n2, unsigned p,
+				 struct hsw_wrpll_rnp *best)
+{
+	uint64_t a, b, c, d, diff, diff_best;
+
+	/* No best (r,n,p) yet */
+	if (best->p == 0) {
+		best->p = p;
+		best->n2 = n2;
+		best->r2 = r2;
+		return;
+	}
+
+	/*
+	 * Output clock is (LC_FREQ_2K / 2000) * N / (P * R), which compares to
+	 * freq2k.
+	 *
+	 * delta = 1e6 *
+	 *	   abs(freq2k - (LC_FREQ_2K * n2/(p * r2))) /
+	 *	   freq2k;
+	 *
+	 * and we would like delta <= budget.
+	 *
+	 * If the discrepancy is above the PPM-based budget, always prefer to
+	 * improve upon the previous solution.  However, if you're within the
+	 * budget, try to maximize Ref * VCO, that is N / (P * R^2).
+	 */
+	a = freq2k * budget * p * r2;
+	b = freq2k * budget * best->p * best->r2;
+	diff = abs_diff(freq2k * p * r2, LC_FREQ_2K * n2);
+	diff_best = abs_diff(freq2k * best->p * best->r2,
+			     LC_FREQ_2K * best->n2);
+	c = 1000000 * diff;
+	d = 1000000 * diff_best;
+
+	if (a < c && b < d) {
+		/* If both are above the budget, pick the closer */
+		if (best->p * best->r2 * diff < p * r2 * diff_best) {
+			best->p = p;
+			best->n2 = n2;
+			best->r2 = r2;
+		}
+	} else if (a >= c && b < d) {
+		/* If A is below the threshold but B is above it?  Update. */
+		best->p = p;
+		best->n2 = n2;
+		best->r2 = r2;
+	} else if (a >= c && b >= d) {
+		/* Both are below the limit, so pick the higher n2/(r2*r2) */
+		if (n2 * best->r2 * best->r2 > best->n2 * r2 * r2) {
+			best->p = p;
+			best->n2 = n2;
+			best->r2 = r2;
+		}
+	}
+	/* Otherwise a < c && b >= d, do nothing */
+}
+
+static int hsw_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv, int reg)
+{
+	int refclk = LC_FREQ;
+	int n, p, r;
+	u32 wrpll;
+
+	wrpll = I915_READ(reg);
+	switch (wrpll & WRPLL_PLL_REF_MASK) {
+	case WRPLL_PLL_SSC:
+	case WRPLL_PLL_NON_SSC:
+		/*
+		 * We could calculate spread here, but our checking
+		 * code only cares about 5% accuracy, and spread is a max of
+		 * 0.5% downspread.
+		 */
+		refclk = 135;
+		break;
+	case WRPLL_PLL_LCPLL:
+		refclk = LC_FREQ;
+		break;
+	default:
+		WARN(1, "bad wrpll refclk\n");
+		return 0;
+	}
+
+	r = wrpll & WRPLL_DIVIDER_REF_MASK;
+	p = (wrpll & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT;
+	n = (wrpll & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT;
+
+	/* Convert to KHz, p & r have a fixed point portion */
+	return (refclk * n * 100) / (p * r);
+}
+
+static int skl_calc_wrpll_link(struct drm_i915_private *dev_priv,
+			       uint32_t dpll)
+{
+	uint32_t cfgcr1_reg, cfgcr2_reg;
+	uint32_t cfgcr1_val, cfgcr2_val;
+	uint32_t p0, p1, p2, dco_freq;
+
+	cfgcr1_reg = DPLL_CFGCR1(dpll);
+	cfgcr2_reg = DPLL_CFGCR2(dpll);
+
+	cfgcr1_val = I915_READ(cfgcr1_reg);
+	cfgcr2_val = I915_READ(cfgcr2_reg);
+
+	p0 = cfgcr2_val & DPLL_CFGCR2_PDIV_MASK;
+	p2 = cfgcr2_val & DPLL_CFGCR2_KDIV_MASK;
+
+	if (cfgcr2_val &  DPLL_CFGCR2_QDIV_MODE(1))
+		p1 = (cfgcr2_val & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8;
+	else
+		p1 = 1;
+
+
+	switch (p0) {
+	case DPLL_CFGCR2_PDIV_1:
+		p0 = 1;
+		break;
+	case DPLL_CFGCR2_PDIV_2:
+		p0 = 2;
+		break;
+	case DPLL_CFGCR2_PDIV_3:
+		p0 = 3;
+		break;
+	case DPLL_CFGCR2_PDIV_7:
+		p0 = 7;
+		break;
+	}
+
+	switch (p2) {
+	case DPLL_CFGCR2_KDIV_5:
+		p2 = 5;
+		break;
+	case DPLL_CFGCR2_KDIV_2:
+		p2 = 2;
+		break;
+	case DPLL_CFGCR2_KDIV_3:
+		p2 = 3;
+		break;
+	case DPLL_CFGCR2_KDIV_1:
+		p2 = 1;
+		break;
+	}
+
+	dco_freq = (cfgcr1_val & DPLL_CFGCR1_DCO_INTEGER_MASK) * 24 * 1000;
+
+	dco_freq += (((cfgcr1_val & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) * 24 *
+		1000) / 0x8000;
+
+	return dco_freq / (p0 * p1 * p2 * 5);
+}
+
+static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
+{
+	int dotclock;
+
+	if (pipe_config->has_pch_encoder)
+		dotclock = intel_dotclock_calculate(pipe_config->port_clock,
+						    &pipe_config->fdi_m_n);
+	else if (pipe_config->has_dp_encoder)
+		dotclock = intel_dotclock_calculate(pipe_config->port_clock,
+						    &pipe_config->dp_m_n);
+	else if (pipe_config->has_hdmi_sink && pipe_config->pipe_bpp == 36)
+		dotclock = pipe_config->port_clock * 2 / 3;
+	else
+		dotclock = pipe_config->port_clock;
+
+	if (pipe_config->pixel_multiplier)
+		dotclock /= pipe_config->pixel_multiplier;
+
+	pipe_config->base.adjusted_mode.crtc_clock = dotclock;
+}
+
+static void skl_ddi_clock_get(struct intel_encoder *encoder,
+				struct intel_crtc_state *pipe_config)
+{
+	struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+	int link_clock = 0;
+	uint32_t dpll_ctl1, dpll;
+
+	dpll = pipe_config->ddi_pll_sel;
+
+	dpll_ctl1 = I915_READ(DPLL_CTRL1);
+
+	if (dpll_ctl1 & DPLL_CTRL1_HDMI_MODE(dpll)) {
+		link_clock = skl_calc_wrpll_link(dev_priv, dpll);
+	} else {
+		link_clock = dpll_ctl1 & DPLL_CTRL1_LINK_RATE_MASK(dpll);
+		link_clock >>= DPLL_CTRL1_LINK_RATE_SHIFT(dpll);
+
+		switch (link_clock) {
+		case DPLL_CTRL1_LINK_RATE_810:
+			link_clock = 81000;
+			break;
+		case DPLL_CTRL1_LINK_RATE_1080:
+			link_clock = 108000;
+			break;
+		case DPLL_CTRL1_LINK_RATE_1350:
+			link_clock = 135000;
+			break;
+		case DPLL_CTRL1_LINK_RATE_1620:
+			link_clock = 162000;
+			break;
+		case DPLL_CTRL1_LINK_RATE_2160:
+			link_clock = 216000;
+			break;
+		case DPLL_CTRL1_LINK_RATE_2700:
+			link_clock = 270000;
+			break;
+		default:
+			WARN(1, "Unsupported link rate\n");
+			break;
+		}
+		link_clock *= 2;
+	}
+
+	pipe_config->port_clock = link_clock;
+
+	ddi_dotclock_get(pipe_config);
+}
+
+static void hsw_ddi_clock_get(struct intel_encoder *encoder,
+			      struct intel_crtc_state *pipe_config)
+{
+	struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+	int link_clock = 0;
+	u32 val, pll;
+
+	val = pipe_config->ddi_pll_sel;
+	switch (val & PORT_CLK_SEL_MASK) {
+	case PORT_CLK_SEL_LCPLL_810:
+		link_clock = 81000;
+		break;
+	case PORT_CLK_SEL_LCPLL_1350:
+		link_clock = 135000;
+		break;
+	case PORT_CLK_SEL_LCPLL_2700:
+		link_clock = 270000;
+		break;
+	case PORT_CLK_SEL_WRPLL1:
+		link_clock = hsw_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL1);
+		break;
+	case PORT_CLK_SEL_WRPLL2:
+		link_clock = hsw_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL2);
+		break;
+	case PORT_CLK_SEL_SPLL:
+		pll = I915_READ(SPLL_CTL) & SPLL_PLL_FREQ_MASK;
+		if (pll == SPLL_PLL_FREQ_810MHz)
+			link_clock = 81000;
+		else if (pll == SPLL_PLL_FREQ_1350MHz)
+			link_clock = 135000;
+		else if (pll == SPLL_PLL_FREQ_2700MHz)
+			link_clock = 270000;
+		else {
+			WARN(1, "bad spll freq\n");
+			return;
+		}
+		break;
+	default:
+		WARN(1, "bad port clock sel\n");
+		return;
+	}
+
+	pipe_config->port_clock = link_clock * 2;
+
+	ddi_dotclock_get(pipe_config);
+}
+
+static int bxt_calc_pll_link(struct drm_i915_private *dev_priv,
+				enum intel_dpll_id dpll)
+{
+	struct intel_shared_dpll *pll;
+	struct intel_dpll_hw_state *state;
+	intel_clock_t clock;
+
+	/* For DDI ports we always use a shared PLL. */
+	if (WARN_ON(dpll == DPLL_ID_PRIVATE))
+		return 0;
+
+	pll = &dev_priv->shared_dplls[dpll];
+	state = &pll->config.hw_state;
+
+	clock.m1 = 2;
+	clock.m2 = (state->pll0 & PORT_PLL_M2_MASK) << 22;
+	if (state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
+		clock.m2 |= state->pll2 & PORT_PLL_M2_FRAC_MASK;
+	clock.n = (state->pll1 & PORT_PLL_N_MASK) >> PORT_PLL_N_SHIFT;
+	clock.p1 = (state->ebb0 & PORT_PLL_P1_MASK) >> PORT_PLL_P1_SHIFT;
+	clock.p2 = (state->ebb0 & PORT_PLL_P2_MASK) >> PORT_PLL_P2_SHIFT;
+
+	return chv_calc_dpll_params(100000, &clock);
+}
+
+static void bxt_ddi_clock_get(struct intel_encoder *encoder,
+				struct intel_crtc_state *pipe_config)
+{
+	struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+	enum port port = intel_ddi_get_encoder_port(encoder);
+	uint32_t dpll = port;
+
+	pipe_config->port_clock = bxt_calc_pll_link(dev_priv, dpll);
+
+	ddi_dotclock_get(pipe_config);
+}
+
+void intel_ddi_clock_get(struct intel_encoder *encoder,
+			 struct intel_crtc_state *pipe_config)
+{
+	struct drm_device *dev = encoder->base.dev;
+
+	if (INTEL_INFO(dev)->gen <= 8)
+		hsw_ddi_clock_get(encoder, pipe_config);
+	else if (IS_SKYLAKE(dev))
+		skl_ddi_clock_get(encoder, pipe_config);
+	else if (IS_BROXTON(dev))
+		bxt_ddi_clock_get(encoder, pipe_config);
+}
+
+static void
+hsw_ddi_calculate_wrpll(int clock /* in Hz */,
+			unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
+{
+	uint64_t freq2k;
+	unsigned p, n2, r2;
+	struct hsw_wrpll_rnp best = { 0, 0, 0 };
+	unsigned budget;
+
+	freq2k = clock / 100;
+
+	budget = hsw_wrpll_get_budget_for_freq(clock);
+
+	/* Special case handling for 540 pixel clock: bypass WR PLL entirely
+	 * and directly pass the LC PLL to it. */
+	if (freq2k == 5400000) {
+		*n2_out = 2;
+		*p_out = 1;
+		*r2_out = 2;
+		return;
+	}
+
+	/*
+	 * Ref = LC_FREQ / R, where Ref is the actual reference input seen by
+	 * the WR PLL.
+	 *
+	 * We want R so that REF_MIN <= Ref <= REF_MAX.
+	 * Injecting R2 = 2 * R gives:
+	 *   REF_MAX * r2 > LC_FREQ * 2 and
+	 *   REF_MIN * r2 < LC_FREQ * 2
+	 *
+	 * Which means the desired boundaries for r2 are:
+	 *  LC_FREQ * 2 / REF_MAX < r2 < LC_FREQ * 2 / REF_MIN
+	 *
+	 */
+	for (r2 = LC_FREQ * 2 / REF_MAX + 1;
+	     r2 <= LC_FREQ * 2 / REF_MIN;
+	     r2++) {
+
+		/*
+		 * VCO = N * Ref, that is: VCO = N * LC_FREQ / R
+		 *
+		 * Once again we want VCO_MIN <= VCO <= VCO_MAX.
+		 * Injecting R2 = 2 * R and N2 = 2 * N, we get:
+		 *   VCO_MAX * r2 > n2 * LC_FREQ and
+		 *   VCO_MIN * r2 < n2 * LC_FREQ)
+		 *
+		 * Which means the desired boundaries for n2 are:
+		 * VCO_MIN * r2 / LC_FREQ < n2 < VCO_MAX * r2 / LC_FREQ
+		 */
+		for (n2 = VCO_MIN * r2 / LC_FREQ + 1;
+		     n2 <= VCO_MAX * r2 / LC_FREQ;
+		     n2++) {
+
+			for (p = P_MIN; p <= P_MAX; p += P_INC)
+				hsw_wrpll_update_rnp(freq2k, budget,
+						     r2, n2, p, &best);
+		}
+	}
+
+	*n2_out = best.n2;
+	*p_out = best.p;
+	*r2_out = best.r2;
+}
+
+static bool
+hsw_ddi_pll_select(struct intel_crtc *intel_crtc,
+		   struct intel_crtc_state *crtc_state,
+		   struct intel_encoder *intel_encoder)
+{
+	int clock = crtc_state->port_clock;
+
+	if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
+		struct intel_shared_dpll *pll;
+		uint32_t val;
+		unsigned p, n2, r2;
+
+		hsw_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p);
+
+		val = WRPLL_PLL_ENABLE | WRPLL_PLL_LCPLL |
+		      WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
+		      WRPLL_DIVIDER_POST(p);
+
+		memset(&crtc_state->dpll_hw_state, 0,
+		       sizeof(crtc_state->dpll_hw_state));
+
+		crtc_state->dpll_hw_state.wrpll = val;
+
+		pll = intel_get_shared_dpll(intel_crtc, crtc_state);
+		if (pll == NULL) {
+			DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
+					 pipe_name(intel_crtc->pipe));
+			return false;
+		}
+
+		crtc_state->ddi_pll_sel = PORT_CLK_SEL_WRPLL(pll->id);
+	} else if (crtc_state->ddi_pll_sel == PORT_CLK_SEL_SPLL) {
+		struct drm_atomic_state *state = crtc_state->base.state;
+		struct intel_shared_dpll_config *spll =
+			&intel_atomic_get_shared_dpll_state(state)[DPLL_ID_SPLL];
+
+		if (spll->crtc_mask &&
+		    WARN_ON(spll->hw_state.spll != crtc_state->dpll_hw_state.spll))
+			return false;
+
+		crtc_state->shared_dpll = DPLL_ID_SPLL;
+		spll->hw_state.spll = crtc_state->dpll_hw_state.spll;
+		spll->crtc_mask |= 1 << intel_crtc->pipe;
+	}
+
+	return true;
+}
+
+struct skl_wrpll_context {
+	uint64_t min_deviation;		/* current minimal deviation */
+	uint64_t central_freq;		/* chosen central freq */
+	uint64_t dco_freq;		/* chosen dco freq */
+	unsigned int p;			/* chosen divider */
+};
+
+static void skl_wrpll_context_init(struct skl_wrpll_context *ctx)
+{
+	memset(ctx, 0, sizeof(*ctx));
+
+	ctx->min_deviation = U64_MAX;
+}
+
+/* DCO freq must be within +1%/-6%  of the DCO central freq */
+#define SKL_DCO_MAX_PDEVIATION	100
+#define SKL_DCO_MAX_NDEVIATION	600
+
+static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx,
+				  uint64_t central_freq,
+				  uint64_t dco_freq,
+				  unsigned int divider)
+{
+	uint64_t deviation;
+
+	deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq),
+			      central_freq);
+
+	/* positive deviation */
+	if (dco_freq >= central_freq) {
+		if (deviation < SKL_DCO_MAX_PDEVIATION &&
+		    deviation < ctx->min_deviation) {
+			ctx->min_deviation = deviation;
+			ctx->central_freq = central_freq;
+			ctx->dco_freq = dco_freq;
+			ctx->p = divider;
+		}
+	/* negative deviation */
+	} else if (deviation < SKL_DCO_MAX_NDEVIATION &&
+		   deviation < ctx->min_deviation) {
+		ctx->min_deviation = deviation;
+		ctx->central_freq = central_freq;
+		ctx->dco_freq = dco_freq;
+		ctx->p = divider;
+	}
+}
+
+static void skl_wrpll_get_multipliers(unsigned int p,
+				      unsigned int *p0 /* out */,
+				      unsigned int *p1 /* out */,
+				      unsigned int *p2 /* out */)
+{
+	/* even dividers */
+	if (p % 2 == 0) {
+		unsigned int half = p / 2;
+
+		if (half == 1 || half == 2 || half == 3 || half == 5) {
+			*p0 = 2;
+			*p1 = 1;
+			*p2 = half;
+		} else if (half % 2 == 0) {
+			*p0 = 2;
+			*p1 = half / 2;
+			*p2 = 2;
+		} else if (half % 3 == 0) {
+			*p0 = 3;
+			*p1 = half / 3;
+			*p2 = 2;
+		} else if (half % 7 == 0) {
+			*p0 = 7;
+			*p1 = half / 7;
+			*p2 = 2;
+		}
+	} else if (p == 3 || p == 9) {  /* 3, 5, 7, 9, 15, 21, 35 */
+		*p0 = 3;
+		*p1 = 1;
+		*p2 = p / 3;
+	} else if (p == 5 || p == 7) {
+		*p0 = p;
+		*p1 = 1;
+		*p2 = 1;
+	} else if (p == 15) {
+		*p0 = 3;
+		*p1 = 1;
+		*p2 = 5;
+	} else if (p == 21) {
+		*p0 = 7;
+		*p1 = 1;
+		*p2 = 3;
+	} else if (p == 35) {
+		*p0 = 7;
+		*p1 = 1;
+		*p2 = 5;
+	}
+}
+
+struct skl_wrpll_params {
+	uint32_t        dco_fraction;
+	uint32_t        dco_integer;
+	uint32_t        qdiv_ratio;
+	uint32_t        qdiv_mode;
+	uint32_t        kdiv;
+	uint32_t        pdiv;
+	uint32_t        central_freq;
+};
+
+static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
+				      uint64_t afe_clock,
+				      uint64_t central_freq,
+				      uint32_t p0, uint32_t p1, uint32_t p2)
+{
+	uint64_t dco_freq;
+
+	switch (central_freq) {
+	case 9600000000ULL:
+		params->central_freq = 0;
+		break;
+	case 9000000000ULL:
+		params->central_freq = 1;
+		break;
+	case 8400000000ULL:
+		params->central_freq = 3;
+	}
+
+	switch (p0) {
+	case 1:
+		params->pdiv = 0;
+		break;
+	case 2:
+		params->pdiv = 1;
+		break;
+	case 3:
+		params->pdiv = 2;
+		break;
+	case 7:
+		params->pdiv = 4;
+		break;
+	default:
+		WARN(1, "Incorrect PDiv\n");
+	}
+
+	switch (p2) {
+	case 5:
+		params->kdiv = 0;
+		break;
+	case 2:
+		params->kdiv = 1;
+		break;
+	case 3:
+		params->kdiv = 2;
+		break;
+	case 1:
+		params->kdiv = 3;
+		break;
+	default:
+		WARN(1, "Incorrect KDiv\n");
+	}
+
+	params->qdiv_ratio = p1;
+	params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1;
+
+	dco_freq = p0 * p1 * p2 * afe_clock;
+
+	/*
+	 * Intermediate values are in Hz.
+	 * Divide by MHz to match bsepc
+	 */
+	params->dco_integer = div_u64(dco_freq, 24 * MHz(1));
+	params->dco_fraction =
+		div_u64((div_u64(dco_freq, 24) -
+			 params->dco_integer * MHz(1)) * 0x8000, MHz(1));
+}
+
+static bool
+skl_ddi_calculate_wrpll(int clock /* in Hz */,
+			struct skl_wrpll_params *wrpll_params)
+{
+	uint64_t afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
+	uint64_t dco_central_freq[3] = {8400000000ULL,
+					9000000000ULL,
+					9600000000ULL};
+	static const int even_dividers[] = {  4,  6,  8, 10, 12, 14, 16, 18, 20,
+					     24, 28, 30, 32, 36, 40, 42, 44,
+					     48, 52, 54, 56, 60, 64, 66, 68,
+					     70, 72, 76, 78, 80, 84, 88, 90,
+					     92, 96, 98 };
+	static const int odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 };
+	static const struct {
+		const int *list;
+		int n_dividers;
+	} dividers[] = {
+		{ even_dividers, ARRAY_SIZE(even_dividers) },
+		{ odd_dividers, ARRAY_SIZE(odd_dividers) },
+	};
+	struct skl_wrpll_context ctx;
+	unsigned int dco, d, i;
+	unsigned int p0, p1, p2;
+
+	skl_wrpll_context_init(&ctx);
+
+	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
+		for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
+			for (i = 0; i < dividers[d].n_dividers; i++) {
+				unsigned int p = dividers[d].list[i];
+				uint64_t dco_freq = p * afe_clock;
+
+				skl_wrpll_try_divider(&ctx,
+						      dco_central_freq[dco],
+						      dco_freq,
+						      p);
+				/*
+				 * Skip the remaining dividers if we're sure to
+				 * have found the definitive divider, we can't
+				 * improve a 0 deviation.
+				 */
+				if (ctx.min_deviation == 0)
+					goto skip_remaining_dividers;
+			}
+		}
+
+skip_remaining_dividers:
+		/*
+		 * If a solution is found with an even divider, prefer
+		 * this one.
+		 */
+		if (d == 0 && ctx.p)
+			break;
+	}
+
+	if (!ctx.p) {
+		DRM_DEBUG_DRIVER("No valid divider found for %dHz\n", clock);
+		return false;
+	}
+
+	/*
+	 * gcc incorrectly analyses that these can be used without being
+	 * initialized. To be fair, it's hard to guess.
+	 */
+	p0 = p1 = p2 = 0;
+	skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
+	skl_wrpll_params_populate(wrpll_params, afe_clock, ctx.central_freq,
+				  p0, p1, p2);
+
+	return true;
+}
+
+static bool
+skl_ddi_pll_select(struct intel_crtc *intel_crtc,
+		   struct intel_crtc_state *crtc_state,
+		   struct intel_encoder *intel_encoder)
+{
+	struct intel_shared_dpll *pll;
+	uint32_t ctrl1, cfgcr1, cfgcr2;
+	int clock = crtc_state->port_clock;
+
+	/*
+	 * See comment in intel_dpll_hw_state to understand why we always use 0
+	 * as the DPLL id in this function.
+	 */
+
+	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
+
+	if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
+		struct skl_wrpll_params wrpll_params = { 0, };
+
+		ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
+
+		if (!skl_ddi_calculate_wrpll(clock * 1000, &wrpll_params))
+			return false;
+
+		cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
+			 DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
+			 wrpll_params.dco_integer;
+
+		cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
+			 DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
+			 DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
+			 DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
+			 wrpll_params.central_freq;
+	} else if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
+		   intel_encoder->type == INTEL_OUTPUT_DP_MST) {
+		switch (crtc_state->port_clock / 2) {
+		case 81000:
+			ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
+			break;
+		case 135000:
+			ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
+			break;
+		case 270000:
+			ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
+			break;
+		}
+
+		cfgcr1 = cfgcr2 = 0;
+	} else /* eDP */
+		return true;
+
+	memset(&crtc_state->dpll_hw_state, 0,
+	       sizeof(crtc_state->dpll_hw_state));
+
+	crtc_state->dpll_hw_state.ctrl1 = ctrl1;
+	crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
+	crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
+
+	pll = intel_get_shared_dpll(intel_crtc, crtc_state);
+	if (pll == NULL) {
+		DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
+				 pipe_name(intel_crtc->pipe));
+		return false;
+	}
+
+	/* shared DPLL id 0 is DPLL 1 */
+	crtc_state->ddi_pll_sel = pll->id + 1;
+
+	return true;
+}
+
+/* bxt clock parameters */
+struct bxt_clk_div {
+	int clock;
+	uint32_t p1;
+	uint32_t p2;
+	uint32_t m2_int;
+	uint32_t m2_frac;
+	bool m2_frac_en;
+	uint32_t n;
+};
+
+/* pre-calculated values for DP linkrates */
+static const struct bxt_clk_div bxt_dp_clk_val[] = {
+	{162000, 4, 2, 32, 1677722, 1, 1},
+	{270000, 4, 1, 27,       0, 0, 1},
+	{540000, 2, 1, 27,       0, 0, 1},
+	{216000, 3, 2, 32, 1677722, 1, 1},
+	{243000, 4, 1, 24, 1258291, 1, 1},
+	{324000, 4, 1, 32, 1677722, 1, 1},
+	{432000, 3, 1, 32, 1677722, 1, 1}
+};
+
+static bool
+bxt_ddi_pll_select(struct intel_crtc *intel_crtc,
+		   struct intel_crtc_state *crtc_state,
+		   struct intel_encoder *intel_encoder)
+{
+	struct intel_shared_dpll *pll;
+	struct bxt_clk_div clk_div = {0};
+	int vco = 0;
+	uint32_t prop_coef, int_coef, gain_ctl, targ_cnt;
+	uint32_t lanestagger;
+	int clock = crtc_state->port_clock;
+
+	if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
+		intel_clock_t best_clock;
+
+		/* Calculate HDMI div */
+		/*
+		 * FIXME: tie the following calculation into
+		 * i9xx_crtc_compute_clock
+		 */
+		if (!bxt_find_best_dpll(crtc_state, clock, &best_clock)) {
+			DRM_DEBUG_DRIVER("no PLL dividers found for clock %d pipe %c\n",
+					 clock, pipe_name(intel_crtc->pipe));
+			return false;
+		}
+
+		clk_div.p1 = best_clock.p1;
+		clk_div.p2 = best_clock.p2;
+		WARN_ON(best_clock.m1 != 2);
+		clk_div.n = best_clock.n;
+		clk_div.m2_int = best_clock.m2 >> 22;
+		clk_div.m2_frac = best_clock.m2 & ((1 << 22) - 1);
+		clk_div.m2_frac_en = clk_div.m2_frac != 0;
+
+		vco = best_clock.vco;
+	} else if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
+			intel_encoder->type == INTEL_OUTPUT_EDP) {
+		int i;
+
+		clk_div = bxt_dp_clk_val[0];
+		for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
+			if (bxt_dp_clk_val[i].clock == clock) {
+				clk_div = bxt_dp_clk_val[i];
+				break;
+			}
+		}
+		vco = clock * 10 / 2 * clk_div.p1 * clk_div.p2;
+	}
+
+	if (vco >= 6200000 && vco <= 6700000) {
+		prop_coef = 4;
+		int_coef = 9;
+		gain_ctl = 3;
+		targ_cnt = 8;
+	} else if ((vco > 5400000 && vco < 6200000) ||
+			(vco >= 4800000 && vco < 5400000)) {
+		prop_coef = 5;
+		int_coef = 11;
+		gain_ctl = 3;
+		targ_cnt = 9;
+	} else if (vco == 5400000) {
+		prop_coef = 3;
+		int_coef = 8;
+		gain_ctl = 1;
+		targ_cnt = 9;
+	} else {
+		DRM_ERROR("Invalid VCO\n");
+		return false;
+	}
+
+	memset(&crtc_state->dpll_hw_state, 0,
+	       sizeof(crtc_state->dpll_hw_state));
+
+	if (clock > 270000)
+		lanestagger = 0x18;
+	else if (clock > 135000)
+		lanestagger = 0x0d;
+	else if (clock > 67000)
+		lanestagger = 0x07;
+	else if (clock > 33000)
+		lanestagger = 0x04;
+	else
+		lanestagger = 0x02;
+
+	crtc_state->dpll_hw_state.ebb0 =
+		PORT_PLL_P1(clk_div.p1) | PORT_PLL_P2(clk_div.p2);
+	crtc_state->dpll_hw_state.pll0 = clk_div.m2_int;
+	crtc_state->dpll_hw_state.pll1 = PORT_PLL_N(clk_div.n);
+	crtc_state->dpll_hw_state.pll2 = clk_div.m2_frac;
+
+	if (clk_div.m2_frac_en)
+		crtc_state->dpll_hw_state.pll3 =
+			PORT_PLL_M2_FRAC_ENABLE;
+
+	crtc_state->dpll_hw_state.pll6 =
+		prop_coef | PORT_PLL_INT_COEFF(int_coef);
+	crtc_state->dpll_hw_state.pll6 |=
+		PORT_PLL_GAIN_CTL(gain_ctl);
+
+	crtc_state->dpll_hw_state.pll8 = targ_cnt;
+
+	crtc_state->dpll_hw_state.pll9 = 5 << PORT_PLL_LOCK_THRESHOLD_SHIFT;
+
+	crtc_state->dpll_hw_state.pll10 =
+		PORT_PLL_DCO_AMP(PORT_PLL_DCO_AMP_DEFAULT)
+		| PORT_PLL_DCO_AMP_OVR_EN_H;
+
+	crtc_state->dpll_hw_state.ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
+
+	crtc_state->dpll_hw_state.pcsdw12 =
+		LANESTAGGER_STRAP_OVRD | lanestagger;
+
+	pll = intel_get_shared_dpll(intel_crtc, crtc_state);
+	if (pll == NULL) {
+		DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
+			pipe_name(intel_crtc->pipe));
+		return false;
+	}
+
+	/* shared DPLL id 0 is DPLL A */
+	crtc_state->ddi_pll_sel = pll->id;
+
+	return true;
+}
+
+/*
+ * Tries to find a *shared* PLL for the CRTC and store it in
+ * intel_crtc->ddi_pll_sel.
+ *
+ * For private DPLLs, compute_config() should do the selection for us. This
+ * function should be folded into compute_config() eventually.
+ */
+bool intel_ddi_pll_select(struct intel_crtc *intel_crtc,
+			  struct intel_crtc_state *crtc_state)
+{
+	struct drm_device *dev = intel_crtc->base.dev;
+	struct intel_encoder *intel_encoder =
+		intel_ddi_get_crtc_new_encoder(crtc_state);
+
+	if (IS_SKYLAKE(dev))
+		return skl_ddi_pll_select(intel_crtc, crtc_state,
+					  intel_encoder);
+	else if (IS_BROXTON(dev))
+		return bxt_ddi_pll_select(intel_crtc, crtc_state,
+					  intel_encoder);
+	else
+		return hsw_ddi_pll_select(intel_crtc, crtc_state,
+					  intel_encoder);
+}
+
+void intel_ddi_set_pipe_settings(struct drm_crtc *crtc)
+{
+	struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
+	enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
+	int type = intel_encoder->type;
+	uint32_t temp;
+
+	if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP || type == INTEL_OUTPUT_DP_MST) {
+		temp = TRANS_MSA_SYNC_CLK;
+		switch (intel_crtc->config->pipe_bpp) {
+		case 18:
+			temp |= TRANS_MSA_6_BPC;
+			break;
+		case 24:
+			temp |= TRANS_MSA_8_BPC;
+			break;
+		case 30:
+			temp |= TRANS_MSA_10_BPC;
+			break;
+		case 36:
+			temp |= TRANS_MSA_12_BPC;
+			break;
+		default:
+			BUG();
+		}
+		I915_WRITE(TRANS_MSA_MISC(cpu_transcoder), temp);
+	}
+}
+
+void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc, bool state)
+{
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
+	uint32_t temp;
+	temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
+	if (state == true)
+		temp |= TRANS_DDI_DP_VC_PAYLOAD_ALLOC;
+	else
+		temp &= ~TRANS_DDI_DP_VC_PAYLOAD_ALLOC;
+	I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
+}
+
+void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
+{
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
+	struct drm_encoder *encoder = &intel_encoder->base;
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum pipe pipe = intel_crtc->pipe;
+	enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
+	enum port port = intel_ddi_get_encoder_port(intel_encoder);
+	int type = intel_encoder->type;
+	uint32_t temp;
+
+	/* Enable TRANS_DDI_FUNC_CTL for the pipe to work in HDMI mode */
+	temp = TRANS_DDI_FUNC_ENABLE;
+	temp |= TRANS_DDI_SELECT_PORT(port);
+
+	switch (intel_crtc->config->pipe_bpp) {
+	case 18:
+		temp |= TRANS_DDI_BPC_6;
+		break;
+	case 24:
+		temp |= TRANS_DDI_BPC_8;
+		break;
+	case 30:
+		temp |= TRANS_DDI_BPC_10;
+		break;
+	case 36:
+		temp |= TRANS_DDI_BPC_12;
+		break;
+	default:
+		BUG();
+	}
+
+	if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_PVSYNC)
+		temp |= TRANS_DDI_PVSYNC;
+	if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_PHSYNC)
+		temp |= TRANS_DDI_PHSYNC;
+
+	if (cpu_transcoder == TRANSCODER_EDP) {
+		switch (pipe) {
+		case PIPE_A:
+			/* On Haswell, can only use the always-on power well for
+			 * eDP when not using the panel fitter, and when not
+			 * using motion blur mitigation (which we don't
+			 * support). */
+			if (IS_HASWELL(dev) &&
+			    (intel_crtc->config->pch_pfit.enabled ||
+			     intel_crtc->config->pch_pfit.force_thru))
+				temp |= TRANS_DDI_EDP_INPUT_A_ONOFF;
+			else
+				temp |= TRANS_DDI_EDP_INPUT_A_ON;
+			break;
+		case PIPE_B:
+			temp |= TRANS_DDI_EDP_INPUT_B_ONOFF;
+			break;
+		case PIPE_C:
+			temp |= TRANS_DDI_EDP_INPUT_C_ONOFF;
+			break;
+		default:
+			BUG();
+			break;
+		}
+	}
+
+	if (type == INTEL_OUTPUT_HDMI) {
+		if (intel_crtc->config->has_hdmi_sink)
+			temp |= TRANS_DDI_MODE_SELECT_HDMI;
+		else
+			temp |= TRANS_DDI_MODE_SELECT_DVI;
+
+	} else if (type == INTEL_OUTPUT_ANALOG) {
+		temp |= TRANS_DDI_MODE_SELECT_FDI;
+		temp |= (intel_crtc->config->fdi_lanes - 1) << 1;
+
+	} else if (type == INTEL_OUTPUT_DISPLAYPORT ||
+		   type == INTEL_OUTPUT_EDP) {
+		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+		if (intel_dp->is_mst) {
+			temp |= TRANS_DDI_MODE_SELECT_DP_MST;
+		} else
+			temp |= TRANS_DDI_MODE_SELECT_DP_SST;
+
+		temp |= DDI_PORT_WIDTH(intel_crtc->config->lane_count);
+	} else if (type == INTEL_OUTPUT_DP_MST) {
+		struct intel_dp *intel_dp = &enc_to_mst(encoder)->primary->dp;
+
+		if (intel_dp->is_mst) {
+			temp |= TRANS_DDI_MODE_SELECT_DP_MST;
+		} else
+			temp |= TRANS_DDI_MODE_SELECT_DP_SST;
+
+		temp |= DDI_PORT_WIDTH(intel_crtc->config->lane_count);
+	} else {
+		WARN(1, "Invalid encoder type %d for pipe %c\n",
+		     intel_encoder->type, pipe_name(pipe));
+	}
+
+	I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
+}
+
+void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
+				       enum transcoder cpu_transcoder)
+{
+	uint32_t reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
+	uint32_t val = I915_READ(reg);
+
+	val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK | TRANS_DDI_DP_VC_PAYLOAD_ALLOC);
+	val |= TRANS_DDI_PORT_NONE;
+	I915_WRITE(reg, val);
+}
+
+bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
+{
+	struct drm_device *dev = intel_connector->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_encoder *intel_encoder = intel_connector->encoder;
+	int type = intel_connector->base.connector_type;
+	enum port port = intel_ddi_get_encoder_port(intel_encoder);
+	enum pipe pipe = 0;
+	enum transcoder cpu_transcoder;
+	enum intel_display_power_domain power_domain;
+	uint32_t tmp;
+
+	power_domain = intel_display_port_power_domain(intel_encoder);
+	if (!intel_display_power_is_enabled(dev_priv, power_domain))
+		return false;
+
+	if (!intel_encoder->get_hw_state(intel_encoder, &pipe))
+		return false;
+
+	if (port == PORT_A)
+		cpu_transcoder = TRANSCODER_EDP;
+	else
+		cpu_transcoder = (enum transcoder) pipe;
+
+	tmp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
+
+	switch (tmp & TRANS_DDI_MODE_SELECT_MASK) {
+	case TRANS_DDI_MODE_SELECT_HDMI:
+	case TRANS_DDI_MODE_SELECT_DVI:
+		return (type == DRM_MODE_CONNECTOR_HDMIA);
+
+	case TRANS_DDI_MODE_SELECT_DP_SST:
+		if (type == DRM_MODE_CONNECTOR_eDP)
+			return true;
+		return (type == DRM_MODE_CONNECTOR_DisplayPort);
+	case TRANS_DDI_MODE_SELECT_DP_MST:
+		/* if the transcoder is in MST state then
+		 * connector isn't connected */
+		return false;
+
+	case TRANS_DDI_MODE_SELECT_FDI:
+		return (type == DRM_MODE_CONNECTOR_VGA);
+
+	default:
+		return false;
+	}
+}
+
+bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
+			    enum pipe *pipe)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum port port = intel_ddi_get_encoder_port(encoder);
+	enum intel_display_power_domain power_domain;
+	u32 tmp;
+	int i;
+
+	power_domain = intel_display_port_power_domain(encoder);
+	if (!intel_display_power_is_enabled(dev_priv, power_domain))
+		return false;
+
+	tmp = I915_READ(DDI_BUF_CTL(port));
+
+	if (!(tmp & DDI_BUF_CTL_ENABLE))
+		return false;
+
+	if (port == PORT_A) {
+		tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
+
+		switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
+		case TRANS_DDI_EDP_INPUT_A_ON:
+		case TRANS_DDI_EDP_INPUT_A_ONOFF:
+			*pipe = PIPE_A;
+			break;
+		case TRANS_DDI_EDP_INPUT_B_ONOFF:
+			*pipe = PIPE_B;
+			break;
+		case TRANS_DDI_EDP_INPUT_C_ONOFF:
+			*pipe = PIPE_C;
+			break;
+		}
+
+		return true;
+	} else {
+		for (i = TRANSCODER_A; i <= TRANSCODER_C; i++) {
+			tmp = I915_READ(TRANS_DDI_FUNC_CTL(i));
+
+			if ((tmp & TRANS_DDI_PORT_MASK)
+			    == TRANS_DDI_SELECT_PORT(port)) {
+				if ((tmp & TRANS_DDI_MODE_SELECT_MASK) == TRANS_DDI_MODE_SELECT_DP_MST)
+					return false;
+
+				*pipe = i;
+				return true;
+			}
+		}
+	}
+
+	DRM_DEBUG_KMS("No pipe for ddi port %c found\n", port_name(port));
+
+	return false;
+}
+
+void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc)
+{
+	struct drm_crtc *crtc = &intel_crtc->base;
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
+	enum port port = intel_ddi_get_encoder_port(intel_encoder);
+	enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
+
+	if (cpu_transcoder != TRANSCODER_EDP)
+		I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
+			   TRANS_CLK_SEL_PORT(port));
+}
+
+void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc)
+{
+	struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
+	enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
+
+	if (cpu_transcoder != TRANSCODER_EDP)
+		I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
+			   TRANS_CLK_SEL_DISABLED);
+}
+
+static void skl_ddi_set_iboost(struct drm_device *dev, u32 level,
+			       enum port port, int type)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	const struct ddi_buf_trans *ddi_translations;
+	uint8_t iboost;
+	uint8_t dp_iboost, hdmi_iboost;
+	int n_entries;
+	u32 reg;
+
+	/* VBT may override standard boost values */
+	dp_iboost = dev_priv->vbt.ddi_port_info[port].dp_boost_level;
+	hdmi_iboost = dev_priv->vbt.ddi_port_info[port].hdmi_boost_level;
+
+	if (type == INTEL_OUTPUT_DISPLAYPORT) {
+		if (dp_iboost) {
+			iboost = dp_iboost;
+		} else {
+			ddi_translations = skl_get_buf_trans_dp(dev, &n_entries);
+			iboost = ddi_translations[port].i_boost;
+		}
+	} else if (type == INTEL_OUTPUT_EDP) {
+		if (dp_iboost) {
+			iboost = dp_iboost;
+		} else {
+			ddi_translations = skl_get_buf_trans_edp(dev, &n_entries);
+			iboost = ddi_translations[port].i_boost;
+		}
+	} else if (type == INTEL_OUTPUT_HDMI) {
+		if (hdmi_iboost) {
+			iboost = hdmi_iboost;
+		} else {
+			ddi_translations = skl_get_buf_trans_hdmi(dev, &n_entries);
+			iboost = ddi_translations[port].i_boost;
+		}
+	} else {
+		return;
+	}
+
+	/* Make sure that the requested I_boost is valid */
+	if (iboost && iboost != 0x1 && iboost != 0x3 && iboost != 0x7) {
+		DRM_ERROR("Invalid I_boost value %u\n", iboost);
+		return;
+	}
+
+	reg = I915_READ(DISPIO_CR_TX_BMU_CR0);
+	reg &= ~BALANCE_LEG_MASK(port);
+	reg &= ~(1 << (BALANCE_LEG_DISABLE_SHIFT + port));
+
+	if (iboost)
+		reg |= iboost << BALANCE_LEG_SHIFT(port);
+	else
+		reg |= 1 << (BALANCE_LEG_DISABLE_SHIFT + port);
+
+	I915_WRITE(DISPIO_CR_TX_BMU_CR0, reg);
+}
+
+static void bxt_ddi_vswing_sequence(struct drm_device *dev, u32 level,
+				    enum port port, int type)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	const struct bxt_ddi_buf_trans *ddi_translations;
+	u32 n_entries, i;
+	uint32_t val;
+
+	if (type == INTEL_OUTPUT_EDP && dev_priv->edp_low_vswing) {
+		n_entries = ARRAY_SIZE(bxt_ddi_translations_edp);
+		ddi_translations = bxt_ddi_translations_edp;
+	} else if (type == INTEL_OUTPUT_DISPLAYPORT
+			|| type == INTEL_OUTPUT_EDP) {
+		n_entries = ARRAY_SIZE(bxt_ddi_translations_dp);
+		ddi_translations = bxt_ddi_translations_dp;
+	} else if (type == INTEL_OUTPUT_HDMI) {
+		n_entries = ARRAY_SIZE(bxt_ddi_translations_hdmi);
+		ddi_translations = bxt_ddi_translations_hdmi;
+	} else {
+		DRM_DEBUG_KMS("Vswing programming not done for encoder %d\n",
+				type);
+		return;
+	}
+
+	/* Check if default value has to be used */
+	if (level >= n_entries ||
+	    (type == INTEL_OUTPUT_HDMI && level == HDMI_LEVEL_SHIFT_UNKNOWN)) {
+		for (i = 0; i < n_entries; i++) {
+			if (ddi_translations[i].default_index) {
+				level = i;
+				break;
+			}
+		}
+	}
+
+	/*
+	 * While we write to the group register to program all lanes at once we
+	 * can read only lane registers and we pick lanes 0/1 for that.
+	 */
+	val = I915_READ(BXT_PORT_PCS_DW10_LN01(port));
+	val &= ~(TX2_SWING_CALC_INIT | TX1_SWING_CALC_INIT);
+	I915_WRITE(BXT_PORT_PCS_DW10_GRP(port), val);
+
+	val = I915_READ(BXT_PORT_TX_DW2_LN0(port));
+	val &= ~(MARGIN_000 | UNIQ_TRANS_SCALE);
+	val |= ddi_translations[level].margin << MARGIN_000_SHIFT |
+	       ddi_translations[level].scale << UNIQ_TRANS_SCALE_SHIFT;
+	I915_WRITE(BXT_PORT_TX_DW2_GRP(port), val);
+
+	val = I915_READ(BXT_PORT_TX_DW3_LN0(port));
+	val &= ~SCALE_DCOMP_METHOD;
+	if (ddi_translations[level].enable)
+		val |= SCALE_DCOMP_METHOD;
+
+	if ((val & UNIQUE_TRANGE_EN_METHOD) && !(val & SCALE_DCOMP_METHOD))
+		DRM_ERROR("Disabled scaling while ouniqetrangenmethod was set");
+
+	I915_WRITE(BXT_PORT_TX_DW3_GRP(port), val);
+
+	val = I915_READ(BXT_PORT_TX_DW4_LN0(port));
+	val &= ~DE_EMPHASIS;
+	val |= ddi_translations[level].deemphasis << DEEMPH_SHIFT;
+	I915_WRITE(BXT_PORT_TX_DW4_GRP(port), val);
+
+	val = I915_READ(BXT_PORT_PCS_DW10_LN01(port));
+	val |= TX2_SWING_CALC_INIT | TX1_SWING_CALC_INIT;
+	I915_WRITE(BXT_PORT_PCS_DW10_GRP(port), val);
+}
+
+static uint32_t translate_signal_level(int signal_levels)
+{
+	uint32_t level;
+
+	switch (signal_levels) {
+	default:
+		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level: 0x%x\n",
+			      signal_levels);
+	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
+		level = 0;
+		break;
+	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
+		level = 1;
+		break;
+	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
+		level = 2;
+		break;
+	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
+		level = 3;
+		break;
+
+	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
+		level = 4;
+		break;
+	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
+		level = 5;
+		break;
+	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
+		level = 6;
+		break;
+
+	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
+		level = 7;
+		break;
+	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
+		level = 8;
+		break;
+
+	case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
+		level = 9;
+		break;
+	}
+
+	return level;
+}
+
+uint32_t ddi_signal_levels(struct intel_dp *intel_dp)
+{
+	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
+	struct drm_device *dev = dport->base.base.dev;
+	struct intel_encoder *encoder = &dport->base;
+	uint8_t train_set = intel_dp->train_set[0];
+	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
+					 DP_TRAIN_PRE_EMPHASIS_MASK);
+	enum port port = dport->port;
+	uint32_t level;
+
+	level = translate_signal_level(signal_levels);
+
+	if (IS_SKYLAKE(dev))
+		skl_ddi_set_iboost(dev, level, port, encoder->type);
+	else if (IS_BROXTON(dev))
+		bxt_ddi_vswing_sequence(dev, level, port, encoder->type);
+
+	return DDI_BUF_TRANS_SELECT(level);
+}
+
+static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
+{
+	struct drm_encoder *encoder = &intel_encoder->base;
+	struct drm_device *dev = encoder->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
+	enum port port = intel_ddi_get_encoder_port(intel_encoder);
+	int type = intel_encoder->type;
+	int hdmi_level;
+
+	if (type == INTEL_OUTPUT_EDP) {
+		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+		intel_edp_panel_on(intel_dp);
+	}
+
+	if (IS_SKYLAKE(dev)) {
+		uint32_t dpll = crtc->config->ddi_pll_sel;
+		uint32_t val;
+
+		/*
+		 * DPLL0 is used for eDP and is the only "private" DPLL (as
+		 * opposed to shared) on SKL
+		 */
+		if (type == INTEL_OUTPUT_EDP) {
+			WARN_ON(dpll != SKL_DPLL0);
+
+			val = I915_READ(DPLL_CTRL1);
+
+			val &= ~(DPLL_CTRL1_HDMI_MODE(dpll) |
+				 DPLL_CTRL1_SSC(dpll) |
+				 DPLL_CTRL1_LINK_RATE_MASK(dpll));
+			val |= crtc->config->dpll_hw_state.ctrl1 << (dpll * 6);
+
+			I915_WRITE(DPLL_CTRL1, val);
+			POSTING_READ(DPLL_CTRL1);
+		}
+
+		/* DDI -> PLL mapping  */
+		val = I915_READ(DPLL_CTRL2);
+
+		val &= ~(DPLL_CTRL2_DDI_CLK_OFF(port) |
+			DPLL_CTRL2_DDI_CLK_SEL_MASK(port));
+		val |= (DPLL_CTRL2_DDI_CLK_SEL(dpll, port) |
+			DPLL_CTRL2_DDI_SEL_OVERRIDE(port));
+
+		I915_WRITE(DPLL_CTRL2, val);
+
+	} else if (INTEL_INFO(dev)->gen < 9) {
+		WARN_ON(crtc->config->ddi_pll_sel == PORT_CLK_SEL_NONE);
+		I915_WRITE(PORT_CLK_SEL(port), crtc->config->ddi_pll_sel);
+	}
+
+	if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
+		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+		intel_dp_set_link_params(intel_dp, crtc->config);
+
+		intel_ddi_init_dp_buf_reg(intel_encoder);
+
+		intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
+		intel_dp_start_link_train(intel_dp);
+		if (port != PORT_A || INTEL_INFO(dev)->gen >= 9)
+			intel_dp_stop_link_train(intel_dp);
+	} else if (type == INTEL_OUTPUT_HDMI) {
+		struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+
+		if (IS_BROXTON(dev)) {
+			hdmi_level = dev_priv->vbt.
+				ddi_port_info[port].hdmi_level_shift;
+			bxt_ddi_vswing_sequence(dev, hdmi_level, port,
+					INTEL_OUTPUT_HDMI);
+		}
+		intel_hdmi->set_infoframes(encoder,
+					   crtc->config->has_hdmi_sink,
+					   &crtc->config->base.adjusted_mode);
+	}
+}
+
+static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
+{
+	struct drm_encoder *encoder = &intel_encoder->base;
+	struct drm_device *dev = encoder->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum port port = intel_ddi_get_encoder_port(intel_encoder);
+	int type = intel_encoder->type;
+	uint32_t val;
+	bool wait = false;
+
+	val = I915_READ(DDI_BUF_CTL(port));
+	if (val & DDI_BUF_CTL_ENABLE) {
+		val &= ~DDI_BUF_CTL_ENABLE;
+		I915_WRITE(DDI_BUF_CTL(port), val);
+		wait = true;
+	}
+
+	val = I915_READ(DP_TP_CTL(port));
+	val &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
+	val |= DP_TP_CTL_LINK_TRAIN_PAT1;
+	I915_WRITE(DP_TP_CTL(port), val);
+
+	if (wait)
+		intel_wait_ddi_buf_idle(dev_priv, port);
+
+	if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
+		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+		intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
+		intel_edp_panel_vdd_on(intel_dp);
+		intel_edp_panel_off(intel_dp);
+	}
+
+	if (IS_SKYLAKE(dev))
+		I915_WRITE(DPLL_CTRL2, (I915_READ(DPLL_CTRL2) |
+					DPLL_CTRL2_DDI_CLK_OFF(port)));
+	else if (INTEL_INFO(dev)->gen < 9)
+		I915_WRITE(PORT_CLK_SEL(port), PORT_CLK_SEL_NONE);
+}
+
+static void intel_enable_ddi(struct intel_encoder *intel_encoder)
+{
+	struct drm_encoder *encoder = &intel_encoder->base;
+	struct drm_crtc *crtc = encoder->crtc;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct drm_device *dev = encoder->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum port port = intel_ddi_get_encoder_port(intel_encoder);
+	int type = intel_encoder->type;
+
+	if (type == INTEL_OUTPUT_HDMI) {
+		struct intel_digital_port *intel_dig_port =
+			enc_to_dig_port(encoder);
+
+		/* In HDMI/DVI mode, the port width, and swing/emphasis values
+		 * are ignored so nothing special needs to be done besides
+		 * enabling the port.
+		 */
+		I915_WRITE(DDI_BUF_CTL(port),
+			   intel_dig_port->saved_port_bits |
+			   DDI_BUF_CTL_ENABLE);
+	} else if (type == INTEL_OUTPUT_EDP) {
+		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+		if (port == PORT_A && INTEL_INFO(dev)->gen < 9)
+			intel_dp_stop_link_train(intel_dp);
+
+		intel_edp_backlight_on(intel_dp);
+		intel_psr_enable(intel_dp);
+		intel_edp_drrs_enable(intel_dp);
+	}
+
+	if (intel_crtc->config->has_audio) {
+		intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
+		intel_audio_codec_enable(intel_encoder);
+	}
+}
+
+static void intel_disable_ddi(struct intel_encoder *intel_encoder)
+{
+	struct drm_encoder *encoder = &intel_encoder->base;
+	struct drm_crtc *crtc = encoder->crtc;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	int type = intel_encoder->type;
+	struct drm_device *dev = encoder->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (intel_crtc->config->has_audio) {
+		intel_audio_codec_disable(intel_encoder);
+		intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
+	}
+
+	if (type == INTEL_OUTPUT_EDP) {
+		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+		intel_edp_drrs_disable(intel_dp);
+		intel_psr_disable(intel_dp);
+		intel_edp_backlight_off(intel_dp);
+	}
+}
+
+static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv,
+			       struct intel_shared_dpll *pll)
+{
+	I915_WRITE(WRPLL_CTL(pll->id), pll->config.hw_state.wrpll);
+	POSTING_READ(WRPLL_CTL(pll->id));
+	udelay(20);
+}
+
+static void hsw_ddi_spll_enable(struct drm_i915_private *dev_priv,
+				struct intel_shared_dpll *pll)
+{
+	I915_WRITE(SPLL_CTL, pll->config.hw_state.spll);
+	POSTING_READ(SPLL_CTL);
+	udelay(20);
+}
+
+static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
+				  struct intel_shared_dpll *pll)
+{
+	uint32_t val;
+
+	val = I915_READ(WRPLL_CTL(pll->id));
+	I915_WRITE(WRPLL_CTL(pll->id), val & ~WRPLL_PLL_ENABLE);
+	POSTING_READ(WRPLL_CTL(pll->id));
+}
+
+static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv,
+				 struct intel_shared_dpll *pll)
+{
+	uint32_t val;
+
+	val = I915_READ(SPLL_CTL);
+	I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE);
+	POSTING_READ(SPLL_CTL);
+}
+
+static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
+				       struct intel_shared_dpll *pll,
+				       struct intel_dpll_hw_state *hw_state)
+{
+	uint32_t val;
+
+	if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS))
+		return false;
+
+	val = I915_READ(WRPLL_CTL(pll->id));
+	hw_state->wrpll = val;
+
+	return val & WRPLL_PLL_ENABLE;
+}
+
+static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv,
+				      struct intel_shared_dpll *pll,
+				      struct intel_dpll_hw_state *hw_state)
+{
+	uint32_t val;
+
+	if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS))
+		return false;
+
+	val = I915_READ(SPLL_CTL);
+	hw_state->spll = val;
+
+	return val & SPLL_PLL_ENABLE;
+}
+
+
+static const char * const hsw_ddi_pll_names[] = {
+	"WRPLL 1",
+	"WRPLL 2",
+	"SPLL"
+};
+
+static void hsw_shared_dplls_init(struct drm_i915_private *dev_priv)
+{
+	int i;
+
+	dev_priv->num_shared_dpll = 3;
+
+	for (i = 0; i < 2; i++) {
+		dev_priv->shared_dplls[i].id = i;
+		dev_priv->shared_dplls[i].name = hsw_ddi_pll_names[i];
+		dev_priv->shared_dplls[i].disable = hsw_ddi_wrpll_disable;
+		dev_priv->shared_dplls[i].enable = hsw_ddi_wrpll_enable;
+		dev_priv->shared_dplls[i].get_hw_state =
+			hsw_ddi_wrpll_get_hw_state;
+	}
+
+	/* SPLL is special, but needs to be initialized anyway.. */
+	dev_priv->shared_dplls[i].id = i;
+	dev_priv->shared_dplls[i].name = hsw_ddi_pll_names[i];
+	dev_priv->shared_dplls[i].disable = hsw_ddi_spll_disable;
+	dev_priv->shared_dplls[i].enable = hsw_ddi_spll_enable;
+	dev_priv->shared_dplls[i].get_hw_state = hsw_ddi_spll_get_hw_state;
+
+}
+
+static const char * const skl_ddi_pll_names[] = {
+	"DPLL 1",
+	"DPLL 2",
+	"DPLL 3",
+};
+
+struct skl_dpll_regs {
+	u32 ctl, cfgcr1, cfgcr2;
+};
+
+/* this array is indexed by the *shared* pll id */
+static const struct skl_dpll_regs skl_dpll_regs[3] = {
+	{
+		/* DPLL 1 */
+		.ctl = LCPLL2_CTL,
+		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL1),
+		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL1),
+	},
+	{
+		/* DPLL 2 */
+		.ctl = WRPLL_CTL1,
+		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL2),
+		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL2),
+	},
+	{
+		/* DPLL 3 */
+		.ctl = WRPLL_CTL2,
+		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL3),
+		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL3),
+	},
+};
+
+static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
+			       struct intel_shared_dpll *pll)
+{
+	uint32_t val;
+	unsigned int dpll;
+	const struct skl_dpll_regs *regs = skl_dpll_regs;
+
+	/* DPLL0 is not part of the shared DPLLs, so pll->id is 0 for DPLL1 */
+	dpll = pll->id + 1;
+
+	val = I915_READ(DPLL_CTRL1);
+
+	val &= ~(DPLL_CTRL1_HDMI_MODE(dpll) | DPLL_CTRL1_SSC(dpll) |
+		 DPLL_CTRL1_LINK_RATE_MASK(dpll));
+	val |= pll->config.hw_state.ctrl1 << (dpll * 6);
+
+	I915_WRITE(DPLL_CTRL1, val);
+	POSTING_READ(DPLL_CTRL1);
+
+	I915_WRITE(regs[pll->id].cfgcr1, pll->config.hw_state.cfgcr1);
+	I915_WRITE(regs[pll->id].cfgcr2, pll->config.hw_state.cfgcr2);
+	POSTING_READ(regs[pll->id].cfgcr1);
+	POSTING_READ(regs[pll->id].cfgcr2);
+
+	/* the enable bit is always bit 31 */
+	I915_WRITE(regs[pll->id].ctl,
+		   I915_READ(regs[pll->id].ctl) | LCPLL_PLL_ENABLE);
+
+	if (wait_for(I915_READ(DPLL_STATUS) & DPLL_LOCK(dpll), 5))
+		DRM_ERROR("DPLL %d not locked\n", dpll);
+}
+
+static void skl_ddi_pll_disable(struct drm_i915_private *dev_priv,
+				struct intel_shared_dpll *pll)
+{
+	const struct skl_dpll_regs *regs = skl_dpll_regs;
+
+	/* the enable bit is always bit 31 */
+	I915_WRITE(regs[pll->id].ctl,
+		   I915_READ(regs[pll->id].ctl) & ~LCPLL_PLL_ENABLE);
+	POSTING_READ(regs[pll->id].ctl);
+}
+
+static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
+				     struct intel_shared_dpll *pll,
+				     struct intel_dpll_hw_state *hw_state)
+{
+	uint32_t val;
+	unsigned int dpll;
+	const struct skl_dpll_regs *regs = skl_dpll_regs;
+
+	if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS))
+		return false;
+
+	/* DPLL0 is not part of the shared DPLLs, so pll->id is 0 for DPLL1 */
+	dpll = pll->id + 1;
+
+	val = I915_READ(regs[pll->id].ctl);
+	if (!(val & LCPLL_PLL_ENABLE))
+		return false;
+
+	val = I915_READ(DPLL_CTRL1);
+	hw_state->ctrl1 = (val >> (dpll * 6)) & 0x3f;
+
+	/* avoid reading back stale values if HDMI mode is not enabled */
+	if (val & DPLL_CTRL1_HDMI_MODE(dpll)) {
+		hw_state->cfgcr1 = I915_READ(regs[pll->id].cfgcr1);
+		hw_state->cfgcr2 = I915_READ(regs[pll->id].cfgcr2);
+	}
+
+	return true;
+}
+
+static void skl_shared_dplls_init(struct drm_i915_private *dev_priv)
+{
+	int i;
+
+	dev_priv->num_shared_dpll = 3;
+
+	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
+		dev_priv->shared_dplls[i].id = i;
+		dev_priv->shared_dplls[i].name = skl_ddi_pll_names[i];
+		dev_priv->shared_dplls[i].disable = skl_ddi_pll_disable;
+		dev_priv->shared_dplls[i].enable = skl_ddi_pll_enable;
+		dev_priv->shared_dplls[i].get_hw_state =
+			skl_ddi_pll_get_hw_state;
+	}
+}
+
+static void broxton_phy_init(struct drm_i915_private *dev_priv,
+			     enum dpio_phy phy)
+{
+	enum port port;
+	uint32_t val;
+
+	val = I915_READ(BXT_P_CR_GT_DISP_PWRON);
+	val |= GT_DISPLAY_POWER_ON(phy);
+	I915_WRITE(BXT_P_CR_GT_DISP_PWRON, val);
+
+	/* Considering 10ms timeout until BSpec is updated */
+	if (wait_for(I915_READ(BXT_PORT_CL1CM_DW0(phy)) & PHY_POWER_GOOD, 10))
+		DRM_ERROR("timeout during PHY%d power on\n", phy);
+
+	for (port =  (phy == DPIO_PHY0 ? PORT_B : PORT_A);
+	     port <= (phy == DPIO_PHY0 ? PORT_C : PORT_A); port++) {
+		int lane;
+
+		for (lane = 0; lane < 4; lane++) {
+			val = I915_READ(BXT_PORT_TX_DW14_LN(port, lane));
+			/*
+			 * Note that on CHV this flag is called UPAR, but has
+			 * the same function.
+			 */
+			val &= ~LATENCY_OPTIM;
+			if (lane != 1)
+				val |= LATENCY_OPTIM;
+
+			I915_WRITE(BXT_PORT_TX_DW14_LN(port, lane), val);
+		}
+	}
+
+	/* Program PLL Rcomp code offset */
+	val = I915_READ(BXT_PORT_CL1CM_DW9(phy));
+	val &= ~IREF0RC_OFFSET_MASK;
+	val |= 0xE4 << IREF0RC_OFFSET_SHIFT;
+	I915_WRITE(BXT_PORT_CL1CM_DW9(phy), val);
+
+	val = I915_READ(BXT_PORT_CL1CM_DW10(phy));
+	val &= ~IREF1RC_OFFSET_MASK;
+	val |= 0xE4 << IREF1RC_OFFSET_SHIFT;
+	I915_WRITE(BXT_PORT_CL1CM_DW10(phy), val);
+
+	/* Program power gating */
+	val = I915_READ(BXT_PORT_CL1CM_DW28(phy));
+	val |= OCL1_POWER_DOWN_EN | DW28_OLDO_DYN_PWR_DOWN_EN |
+		SUS_CLK_CONFIG;
+	I915_WRITE(BXT_PORT_CL1CM_DW28(phy), val);
+
+	if (phy == DPIO_PHY0) {
+		val = I915_READ(BXT_PORT_CL2CM_DW6_BC);
+		val |= DW6_OLDO_DYN_PWR_DOWN_EN;
+		I915_WRITE(BXT_PORT_CL2CM_DW6_BC, val);
+	}
+
+	val = I915_READ(BXT_PORT_CL1CM_DW30(phy));
+	val &= ~OCL2_LDOFUSE_PWR_DIS;
+	/*
+	 * On PHY1 disable power on the second channel, since no port is
+	 * connected there. On PHY0 both channels have a port, so leave it
+	 * enabled.
+	 * TODO: port C is only connected on BXT-P, so on BXT0/1 we should
+	 * power down the second channel on PHY0 as well.
+	 */
+	if (phy == DPIO_PHY1)
+		val |= OCL2_LDOFUSE_PWR_DIS;
+	I915_WRITE(BXT_PORT_CL1CM_DW30(phy), val);
+
+	if (phy == DPIO_PHY0) {
+		uint32_t grc_code;
+		/*
+		 * PHY0 isn't connected to an RCOMP resistor so copy over
+		 * the corresponding calibrated value from PHY1, and disable
+		 * the automatic calibration on PHY0.
+		 */
+		if (wait_for(I915_READ(BXT_PORT_REF_DW3(DPIO_PHY1)) & GRC_DONE,
+			     10))
+			DRM_ERROR("timeout waiting for PHY1 GRC\n");
+
+		val = I915_READ(BXT_PORT_REF_DW6(DPIO_PHY1));
+		val = (val & GRC_CODE_MASK) >> GRC_CODE_SHIFT;
+		grc_code = val << GRC_CODE_FAST_SHIFT |
+			   val << GRC_CODE_SLOW_SHIFT |
+			   val;
+		I915_WRITE(BXT_PORT_REF_DW6(DPIO_PHY0), grc_code);
+
+		val = I915_READ(BXT_PORT_REF_DW8(DPIO_PHY0));
+		val |= GRC_DIS | GRC_RDY_OVRD;
+		I915_WRITE(BXT_PORT_REF_DW8(DPIO_PHY0), val);
+	}
+
+	val = I915_READ(BXT_PHY_CTL_FAMILY(phy));
+	val |= COMMON_RESET_DIS;
+	I915_WRITE(BXT_PHY_CTL_FAMILY(phy), val);
+}
+
+void broxton_ddi_phy_init(struct drm_device *dev)
+{
+	/* Enable PHY1 first since it provides Rcomp for PHY0 */
+	broxton_phy_init(dev->dev_private, DPIO_PHY1);
+	broxton_phy_init(dev->dev_private, DPIO_PHY0);
+}
+
+static void broxton_phy_uninit(struct drm_i915_private *dev_priv,
+			       enum dpio_phy phy)
+{
+	uint32_t val;
+
+	val = I915_READ(BXT_PHY_CTL_FAMILY(phy));
+	val &= ~COMMON_RESET_DIS;
+	I915_WRITE(BXT_PHY_CTL_FAMILY(phy), val);
+}
+
+void broxton_ddi_phy_uninit(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	broxton_phy_uninit(dev_priv, DPIO_PHY1);
+	broxton_phy_uninit(dev_priv, DPIO_PHY0);
+
+	/* FIXME: do this in broxton_phy_uninit per phy */
+	I915_WRITE(BXT_P_CR_GT_DISP_PWRON, 0);
+}
+
+static const char * const bxt_ddi_pll_names[] = {
+	"PORT PLL A",
+	"PORT PLL B",
+	"PORT PLL C",
+};
+
+static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
+				struct intel_shared_dpll *pll)
+{
+	uint32_t temp;
+	enum port port = (enum port)pll->id;	/* 1:1 port->PLL mapping */
+
+	temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
+	temp &= ~PORT_PLL_REF_SEL;
+	/* Non-SSC reference */
+	I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
+
+	/* Disable 10 bit clock */
+	temp = I915_READ(BXT_PORT_PLL_EBB_4(port));
+	temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
+	I915_WRITE(BXT_PORT_PLL_EBB_4(port), temp);
+
+	/* Write P1 & P2 */
+	temp = I915_READ(BXT_PORT_PLL_EBB_0(port));
+	temp &= ~(PORT_PLL_P1_MASK | PORT_PLL_P2_MASK);
+	temp |= pll->config.hw_state.ebb0;
+	I915_WRITE(BXT_PORT_PLL_EBB_0(port), temp);
+
+	/* Write M2 integer */
+	temp = I915_READ(BXT_PORT_PLL(port, 0));
+	temp &= ~PORT_PLL_M2_MASK;
+	temp |= pll->config.hw_state.pll0;
+	I915_WRITE(BXT_PORT_PLL(port, 0), temp);
+
+	/* Write N */
+	temp = I915_READ(BXT_PORT_PLL(port, 1));
+	temp &= ~PORT_PLL_N_MASK;
+	temp |= pll->config.hw_state.pll1;
+	I915_WRITE(BXT_PORT_PLL(port, 1), temp);
+
+	/* Write M2 fraction */
+	temp = I915_READ(BXT_PORT_PLL(port, 2));
+	temp &= ~PORT_PLL_M2_FRAC_MASK;
+	temp |= pll->config.hw_state.pll2;
+	I915_WRITE(BXT_PORT_PLL(port, 2), temp);
+
+	/* Write M2 fraction enable */
+	temp = I915_READ(BXT_PORT_PLL(port, 3));
+	temp &= ~PORT_PLL_M2_FRAC_ENABLE;
+	temp |= pll->config.hw_state.pll3;
+	I915_WRITE(BXT_PORT_PLL(port, 3), temp);
+
+	/* Write coeff */
+	temp = I915_READ(BXT_PORT_PLL(port, 6));
+	temp &= ~PORT_PLL_PROP_COEFF_MASK;
+	temp &= ~PORT_PLL_INT_COEFF_MASK;
+	temp &= ~PORT_PLL_GAIN_CTL_MASK;
+	temp |= pll->config.hw_state.pll6;
+	I915_WRITE(BXT_PORT_PLL(port, 6), temp);
+
+	/* Write calibration val */
+	temp = I915_READ(BXT_PORT_PLL(port, 8));
+	temp &= ~PORT_PLL_TARGET_CNT_MASK;
+	temp |= pll->config.hw_state.pll8;
+	I915_WRITE(BXT_PORT_PLL(port, 8), temp);
+
+	temp = I915_READ(BXT_PORT_PLL(port, 9));
+	temp &= ~PORT_PLL_LOCK_THRESHOLD_MASK;
+	temp |= pll->config.hw_state.pll9;
+	I915_WRITE(BXT_PORT_PLL(port, 9), temp);
+
+	temp = I915_READ(BXT_PORT_PLL(port, 10));
+	temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
+	temp &= ~PORT_PLL_DCO_AMP_MASK;
+	temp |= pll->config.hw_state.pll10;
+	I915_WRITE(BXT_PORT_PLL(port, 10), temp);
+
+	/* Recalibrate with new settings */
+	temp = I915_READ(BXT_PORT_PLL_EBB_4(port));
+	temp |= PORT_PLL_RECALIBRATE;
+	I915_WRITE(BXT_PORT_PLL_EBB_4(port), temp);
+	temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
+	temp |= pll->config.hw_state.ebb4;
+	I915_WRITE(BXT_PORT_PLL_EBB_4(port), temp);
+
+	/* Enable PLL */
+	temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
+	temp |= PORT_PLL_ENABLE;
+	I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
+	POSTING_READ(BXT_PORT_PLL_ENABLE(port));
+
+	if (wait_for_atomic_us((I915_READ(BXT_PORT_PLL_ENABLE(port)) &
+			PORT_PLL_LOCK), 200))
+		DRM_ERROR("PLL %d not locked\n", port);
+
+	/*
+	 * While we write to the group register to program all lanes at once we
+	 * can read only lane registers and we pick lanes 0/1 for that.
+	 */
+	temp = I915_READ(BXT_PORT_PCS_DW12_LN01(port));
+	temp &= ~LANE_STAGGER_MASK;
+	temp &= ~LANESTAGGER_STRAP_OVRD;
+	temp |= pll->config.hw_state.pcsdw12;
+	I915_WRITE(BXT_PORT_PCS_DW12_GRP(port), temp);
+}
+
+static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv,
+					struct intel_shared_dpll *pll)
+{
+	enum port port = (enum port)pll->id;	/* 1:1 port->PLL mapping */
+	uint32_t temp;
+
+	temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
+	temp &= ~PORT_PLL_ENABLE;
+	I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
+	POSTING_READ(BXT_PORT_PLL_ENABLE(port));
+}
+
+static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
+					struct intel_shared_dpll *pll,
+					struct intel_dpll_hw_state *hw_state)
+{
+	enum port port = (enum port)pll->id;	/* 1:1 port->PLL mapping */
+	uint32_t val;
+
+	if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS))
+		return false;
+
+	val = I915_READ(BXT_PORT_PLL_ENABLE(port));
+	if (!(val & PORT_PLL_ENABLE))
+		return false;
+
+	hw_state->ebb0 = I915_READ(BXT_PORT_PLL_EBB_0(port));
+	hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
+
+	hw_state->ebb4 = I915_READ(BXT_PORT_PLL_EBB_4(port));
+	hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
+
+	hw_state->pll0 = I915_READ(BXT_PORT_PLL(port, 0));
+	hw_state->pll0 &= PORT_PLL_M2_MASK;
+
+	hw_state->pll1 = I915_READ(BXT_PORT_PLL(port, 1));
+	hw_state->pll1 &= PORT_PLL_N_MASK;
+
+	hw_state->pll2 = I915_READ(BXT_PORT_PLL(port, 2));
+	hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
+
+	hw_state->pll3 = I915_READ(BXT_PORT_PLL(port, 3));
+	hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
+
+	hw_state->pll6 = I915_READ(BXT_PORT_PLL(port, 6));
+	hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
+			  PORT_PLL_INT_COEFF_MASK |
+			  PORT_PLL_GAIN_CTL_MASK;
+
+	hw_state->pll8 = I915_READ(BXT_PORT_PLL(port, 8));
+	hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
+
+	hw_state->pll9 = I915_READ(BXT_PORT_PLL(port, 9));
+	hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
+
+	hw_state->pll10 = I915_READ(BXT_PORT_PLL(port, 10));
+	hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
+			   PORT_PLL_DCO_AMP_MASK;
+
+	/*
+	 * While we write to the group register to program all lanes at once we
+	 * can read only lane registers. We configure all lanes the same way, so
+	 * here just read out lanes 0/1 and output a note if lanes 2/3 differ.
+	 */
+	hw_state->pcsdw12 = I915_READ(BXT_PORT_PCS_DW12_LN01(port));
+	if (I915_READ(BXT_PORT_PCS_DW12_LN23(port)) != hw_state->pcsdw12)
+		DRM_DEBUG_DRIVER("lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
+				 hw_state->pcsdw12,
+				 I915_READ(BXT_PORT_PCS_DW12_LN23(port)));
+	hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
+
+	return true;
+}
+
+static void bxt_shared_dplls_init(struct drm_i915_private *dev_priv)
+{
+	int i;
+
+	dev_priv->num_shared_dpll = 3;
+
+	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
+		dev_priv->shared_dplls[i].id = i;
+		dev_priv->shared_dplls[i].name = bxt_ddi_pll_names[i];
+		dev_priv->shared_dplls[i].disable = bxt_ddi_pll_disable;
+		dev_priv->shared_dplls[i].enable = bxt_ddi_pll_enable;
+		dev_priv->shared_dplls[i].get_hw_state =
+			bxt_ddi_pll_get_hw_state;
+	}
+}
+
+void intel_ddi_pll_init(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t val = I915_READ(LCPLL_CTL);
+
+	if (IS_SKYLAKE(dev))
+		skl_shared_dplls_init(dev_priv);
+	else if (IS_BROXTON(dev))
+		bxt_shared_dplls_init(dev_priv);
+	else
+		hsw_shared_dplls_init(dev_priv);
+
+	if (IS_SKYLAKE(dev)) {
+		int cdclk_freq;
+
+		cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
+		dev_priv->skl_boot_cdclk = cdclk_freq;
+		if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE))
+			DRM_ERROR("LCPLL1 is disabled\n");
+		else
+			intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
+	} else if (IS_BROXTON(dev)) {
+		broxton_init_cdclk(dev);
+		broxton_ddi_phy_init(dev);
+	} else {
+		/*
+		 * The LCPLL register should be turned on by the BIOS. For now
+		 * let's just check its state and print errors in case
+		 * something is wrong.  Don't even try to turn it on.
+		 */
+
+		if (val & LCPLL_CD_SOURCE_FCLK)
+			DRM_ERROR("CDCLK source is not LCPLL\n");
+
+		if (val & LCPLL_PLL_DISABLE)
+			DRM_ERROR("LCPLL is disabled\n");
+	}
+}
+
+void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder)
+{
+	struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+	struct intel_dp *intel_dp = &intel_dig_port->dp;
+	struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+	enum port port = intel_dig_port->port;
+	uint32_t val;
+	bool wait = false;
+
+	if (I915_READ(DP_TP_CTL(port)) & DP_TP_CTL_ENABLE) {
+		val = I915_READ(DDI_BUF_CTL(port));
+		if (val & DDI_BUF_CTL_ENABLE) {
+			val &= ~DDI_BUF_CTL_ENABLE;
+			I915_WRITE(DDI_BUF_CTL(port), val);
+			wait = true;
+		}
+
+		val = I915_READ(DP_TP_CTL(port));
+		val &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
+		val |= DP_TP_CTL_LINK_TRAIN_PAT1;
+		I915_WRITE(DP_TP_CTL(port), val);
+		POSTING_READ(DP_TP_CTL(port));
+
+		if (wait)
+			intel_wait_ddi_buf_idle(dev_priv, port);
+	}
+
+	val = DP_TP_CTL_ENABLE |
+	      DP_TP_CTL_LINK_TRAIN_PAT1 | DP_TP_CTL_SCRAMBLE_DISABLE;
+	if (intel_dp->is_mst)
+		val |= DP_TP_CTL_MODE_MST;
+	else {
+		val |= DP_TP_CTL_MODE_SST;
+		if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
+			val |= DP_TP_CTL_ENHANCED_FRAME_ENABLE;
+	}
+	I915_WRITE(DP_TP_CTL(port), val);
+	POSTING_READ(DP_TP_CTL(port));
+
+	intel_dp->DP |= DDI_BUF_CTL_ENABLE;
+	I915_WRITE(DDI_BUF_CTL(port), intel_dp->DP);
+	POSTING_READ(DDI_BUF_CTL(port));
+
+	udelay(600);
+}
+
+void intel_ddi_fdi_disable(struct drm_crtc *crtc)
+{
+	struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+	struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
+	uint32_t val;
+
+	intel_ddi_post_disable(intel_encoder);
+
+	val = I915_READ(FDI_RX_CTL(PIPE_A));
+	val &= ~FDI_RX_ENABLE;
+	I915_WRITE(FDI_RX_CTL(PIPE_A), val);
+
+	val = I915_READ(FDI_RX_MISC(PIPE_A));
+	val &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
+	val |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2);
+	I915_WRITE(FDI_RX_MISC(PIPE_A), val);
+
+	val = I915_READ(FDI_RX_CTL(PIPE_A));
+	val &= ~FDI_PCDCLK;
+	I915_WRITE(FDI_RX_CTL(PIPE_A), val);
+
+	val = I915_READ(FDI_RX_CTL(PIPE_A));
+	val &= ~FDI_RX_PLL_ENABLE;
+	I915_WRITE(FDI_RX_CTL(PIPE_A), val);
+}
+
+void intel_ddi_get_config(struct intel_encoder *encoder,
+			  struct intel_crtc_state *pipe_config)
+{
+	struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+	enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
+	struct intel_hdmi *intel_hdmi;
+	u32 temp, flags = 0;
+
+	temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
+	if (temp & TRANS_DDI_PHSYNC)
+		flags |= DRM_MODE_FLAG_PHSYNC;
+	else
+		flags |= DRM_MODE_FLAG_NHSYNC;
+	if (temp & TRANS_DDI_PVSYNC)
+		flags |= DRM_MODE_FLAG_PVSYNC;
+	else
+		flags |= DRM_MODE_FLAG_NVSYNC;
+
+	pipe_config->base.adjusted_mode.flags |= flags;
+
+	switch (temp & TRANS_DDI_BPC_MASK) {
+	case TRANS_DDI_BPC_6:
+		pipe_config->pipe_bpp = 18;
+		break;
+	case TRANS_DDI_BPC_8:
+		pipe_config->pipe_bpp = 24;
+		break;
+	case TRANS_DDI_BPC_10:
+		pipe_config->pipe_bpp = 30;
+		break;
+	case TRANS_DDI_BPC_12:
+		pipe_config->pipe_bpp = 36;
+		break;
+	default:
+		break;
+	}
+
+	switch (temp & TRANS_DDI_MODE_SELECT_MASK) {
+	case TRANS_DDI_MODE_SELECT_HDMI:
+		pipe_config->has_hdmi_sink = true;
+		intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+
+		if (intel_hdmi->infoframe_enabled(&encoder->base))
+			pipe_config->has_infoframe = true;
+		break;
+	case TRANS_DDI_MODE_SELECT_DVI:
+	case TRANS_DDI_MODE_SELECT_FDI:
+		break;
+	case TRANS_DDI_MODE_SELECT_DP_SST:
+	case TRANS_DDI_MODE_SELECT_DP_MST:
+		pipe_config->has_dp_encoder = true;
+		pipe_config->lane_count =
+			((temp & DDI_PORT_WIDTH_MASK) >> DDI_PORT_WIDTH_SHIFT) + 1;
+		intel_dp_get_m_n(intel_crtc, pipe_config);
+		break;
+	default:
+		break;
+	}
+
+	if (intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_AUDIO)) {
+		temp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
+		if (temp & AUDIO_OUTPUT_ENABLE(intel_crtc->pipe))
+			pipe_config->has_audio = true;
+	}
+
+	if (encoder->type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp_bpp &&
+	    pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
+		/*
+		 * This is a big fat ugly hack.
+		 *
+		 * Some machines in UEFI boot mode provide us a VBT that has 18
+		 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
+		 * unknown we fail to light up. Yet the same BIOS boots up with
+		 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
+		 * max, not what it tells us to use.
+		 *
+		 * Note: This will still be broken if the eDP panel is not lit
+		 * up by the BIOS, and thus we can't get the mode at module
+		 * load.
+		 */
+		DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
+			      pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
+		dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
+	}
+
+	intel_ddi_clock_get(encoder, pipe_config);
+}
+
+static bool intel_ddi_compute_config(struct intel_encoder *encoder,
+				     struct intel_crtc_state *pipe_config)
+{
+	int type = encoder->type;
+	int port = intel_ddi_get_encoder_port(encoder);
+
+	WARN(type == INTEL_OUTPUT_UNKNOWN, "compute_config() on unknown output!\n");
+
+	if (port == PORT_A)
+		pipe_config->cpu_transcoder = TRANSCODER_EDP;
+
+	if (type == INTEL_OUTPUT_HDMI)
+		return intel_hdmi_compute_config(encoder, pipe_config);
+	else
+		return intel_dp_compute_config(encoder, pipe_config);
+}
+
+static const struct drm_encoder_funcs intel_ddi_funcs = {
+	.reset = intel_dp_encoder_reset,
+	.destroy = intel_dp_encoder_destroy,
+};
+
+static struct intel_connector *
+intel_ddi_init_dp_connector(struct intel_digital_port *intel_dig_port)
+{
+	struct intel_connector *connector;
+	enum port port = intel_dig_port->port;
+
+	connector = intel_connector_alloc();
+	if (!connector)
+		return NULL;
+
+	intel_dig_port->dp.output_reg = DDI_BUF_CTL(port);
+	if (!intel_dp_init_connector(intel_dig_port, connector)) {
+		kfree(connector);
+		return NULL;
+	}
+
+	return connector;
+}
+
+static struct intel_connector *
+intel_ddi_init_hdmi_connector(struct intel_digital_port *intel_dig_port)
+{
+	struct intel_connector *connector;
+	enum port port = intel_dig_port->port;
+
+	connector = intel_connector_alloc();
+	if (!connector)
+		return NULL;
+
+	intel_dig_port->hdmi.hdmi_reg = DDI_BUF_CTL(port);
+	intel_hdmi_init_connector(intel_dig_port, connector);
+
+	return connector;
+}
+
+void intel_ddi_init(struct drm_device *dev, enum port port)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_digital_port *intel_dig_port;
+	struct intel_encoder *intel_encoder;
+	struct drm_encoder *encoder;
+	bool init_hdmi, init_dp;
+
+	init_hdmi = (dev_priv->vbt.ddi_port_info[port].supports_dvi ||
+		     dev_priv->vbt.ddi_port_info[port].supports_hdmi);
+	init_dp = dev_priv->vbt.ddi_port_info[port].supports_dp;
+	if (!init_dp && !init_hdmi) {
+		DRM_DEBUG_KMS("VBT says port %c is not DVI/HDMI/DP compatible, respect it\n",
+			      port_name(port));
+		return;
+	}
+
+	intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
+	if (!intel_dig_port)
+		return;
+
+	intel_encoder = &intel_dig_port->base;
+	encoder = &intel_encoder->base;
+
+	drm_encoder_init(dev, encoder, &intel_ddi_funcs,
+			 DRM_MODE_ENCODER_TMDS);
+
+	intel_encoder->compute_config = intel_ddi_compute_config;
+	intel_encoder->enable = intel_enable_ddi;
+	intel_encoder->pre_enable = intel_ddi_pre_enable;
+	intel_encoder->disable = intel_disable_ddi;
+	intel_encoder->post_disable = intel_ddi_post_disable;
+	intel_encoder->get_hw_state = intel_ddi_get_hw_state;
+	intel_encoder->get_config = intel_ddi_get_config;
+	intel_encoder->suspend = intel_dp_encoder_suspend;
+
+	intel_dig_port->port = port;
+	intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
+					  (DDI_BUF_PORT_REVERSAL |
+					   DDI_A_4_LANES);
+
+	intel_encoder->type = INTEL_OUTPUT_UNKNOWN;
+	intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+	intel_encoder->cloneable = 0;
+
+	if (init_dp) {
+		if (!intel_ddi_init_dp_connector(intel_dig_port))
+			goto err;
+
+		intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
+		/*
+		 * On BXT A0/A1, sw needs to activate DDIA HPD logic and
+		 * interrupts to check the external panel connection.
+		 */
+		if (IS_BROXTON(dev_priv) && (INTEL_REVID(dev) < BXT_REVID_B0)
+					 && port == PORT_B)
+			dev_priv->hotplug.irq_port[PORT_A] = intel_dig_port;
+		else
+			dev_priv->hotplug.irq_port[port] = intel_dig_port;
+	}
+
+	/* In theory we don't need the encoder->type check, but leave it just in
+	 * case we have some really bad VBTs... */
+	if (intel_encoder->type != INTEL_OUTPUT_EDP && init_hdmi) {
+		if (!intel_ddi_init_hdmi_connector(intel_dig_port))
+			goto err;
+	}
+
+	return;
+
+err:
+	drm_encoder_cleanup(encoder);
+	kfree(intel_dig_port);
+}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
new file mode 100644
index 0000000..4f5d07b
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -0,0 +1,15857 @@
+/*
+ * Copyright © 2006-2007 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *	Eric Anholt <eric@anholt.net>
+ */
+
+#include <linux/dmi.h>
+#include <linux/module.h>
+#include <linux/input.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/vgaarb.h>
+#include <drm/drm_edid.h>
+#include <drm/drmP.h>
+#include "intel_drv.h"
+#include <drm/i915_drm.h>
+#include "i915_drv.h"
+#include "i915_trace.h"
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_dp_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_rect.h>
+#include <linux/dma_remapping.h>
+
+/* Primary plane formats for gen <= 3 */
+static const uint32_t i8xx_primary_formats[] = {
+	DRM_FORMAT_C8,
+	DRM_FORMAT_RGB565,
+	DRM_FORMAT_XRGB1555,
+	DRM_FORMAT_XRGB8888,
+};
+
+/* Primary plane formats for gen >= 4 */
+static const uint32_t i965_primary_formats[] = {
+	DRM_FORMAT_C8,
+	DRM_FORMAT_RGB565,
+	DRM_FORMAT_XRGB8888,
+	DRM_FORMAT_XBGR8888,
+	DRM_FORMAT_XRGB2101010,
+	DRM_FORMAT_XBGR2101010,
+};
+
+static const uint32_t skl_primary_formats[] = {
+	DRM_FORMAT_C8,
+	DRM_FORMAT_RGB565,
+	DRM_FORMAT_XRGB8888,
+	DRM_FORMAT_XBGR8888,
+	DRM_FORMAT_ARGB8888,
+	DRM_FORMAT_ABGR8888,
+	DRM_FORMAT_XRGB2101010,
+	DRM_FORMAT_XBGR2101010,
+	DRM_FORMAT_YUYV,
+	DRM_FORMAT_YVYU,
+	DRM_FORMAT_UYVY,
+	DRM_FORMAT_VYUY,
+};
+
+/* Cursor formats */
+static const uint32_t intel_cursor_formats[] = {
+	DRM_FORMAT_ARGB8888,
+};
+
+static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
+
+static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
+				struct intel_crtc_state *pipe_config);
+static void ironlake_pch_clock_get(struct intel_crtc *crtc,
+				   struct intel_crtc_state *pipe_config);
+
+static int intel_framebuffer_init(struct drm_device *dev,
+				  struct intel_framebuffer *ifb,
+				  struct drm_mode_fb_cmd2 *mode_cmd,
+				  struct drm_i915_gem_object *obj);
+static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc);
+static void intel_set_pipe_timings(struct intel_crtc *intel_crtc);
+static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
+					 struct intel_link_m_n *m_n,
+					 struct intel_link_m_n *m2_n2);
+static void ironlake_set_pipeconf(struct drm_crtc *crtc);
+static void haswell_set_pipeconf(struct drm_crtc *crtc);
+static void intel_set_pipe_csc(struct drm_crtc *crtc);
+static void vlv_prepare_pll(struct intel_crtc *crtc,
+			    const struct intel_crtc_state *pipe_config);
+static void chv_prepare_pll(struct intel_crtc *crtc,
+			    const struct intel_crtc_state *pipe_config);
+static void intel_begin_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
+static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
+static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc,
+	struct intel_crtc_state *crtc_state);
+static int i9xx_get_refclk(const struct intel_crtc_state *crtc_state,
+			   int num_connectors);
+static void skylake_pfit_enable(struct intel_crtc *crtc);
+static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force);
+static void ironlake_pfit_enable(struct intel_crtc *crtc);
+static void intel_modeset_setup_hw_state(struct drm_device *dev);
+static void intel_pre_disable_primary(struct drm_crtc *crtc);
+
+typedef struct {
+	int	min, max;
+} intel_range_t;
+
+typedef struct {
+	int	dot_limit;
+	int	p2_slow, p2_fast;
+} intel_p2_t;
+
+typedef struct intel_limit intel_limit_t;
+struct intel_limit {
+	intel_range_t   dot, vco, n, m, m1, m2, p, p1;
+	intel_p2_t	    p2;
+};
+
+/* returns HPLL frequency in kHz */
+static int valleyview_get_vco(struct drm_i915_private *dev_priv)
+{
+	int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
+
+	/* Obtain SKU information */
+	mutex_lock(&dev_priv->sb_lock);
+	hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
+		CCK_FUSE_HPLL_FREQ_MASK;
+	mutex_unlock(&dev_priv->sb_lock);
+
+	return vco_freq[hpll_freq] * 1000;
+}
+
+static int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
+				  const char *name, u32 reg)
+{
+	u32 val;
+	int divider;
+
+	if (dev_priv->hpll_freq == 0)
+		dev_priv->hpll_freq = valleyview_get_vco(dev_priv);
+
+	mutex_lock(&dev_priv->sb_lock);
+	val = vlv_cck_read(dev_priv, reg);
+	mutex_unlock(&dev_priv->sb_lock);
+
+	divider = val & CCK_FREQUENCY_VALUES;
+
+	WARN((val & CCK_FREQUENCY_STATUS) !=
+	     (divider << CCK_FREQUENCY_STATUS_SHIFT),
+	     "%s change in progress\n", name);
+
+	return DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, divider + 1);
+}
+
+int
+intel_pch_rawclk(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	WARN_ON(!HAS_PCH_SPLIT(dev));
+
+	return I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK;
+}
+
+/* hrawclock is 1/4 the FSB frequency */
+int intel_hrawclk(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t clkcfg;
+
+	/* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
+	if (IS_VALLEYVIEW(dev))
+		return 200;
+
+	clkcfg = I915_READ(CLKCFG);
+	switch (clkcfg & CLKCFG_FSB_MASK) {
+	case CLKCFG_FSB_400:
+		return 100;
+	case CLKCFG_FSB_533:
+		return 133;
+	case CLKCFG_FSB_667:
+		return 166;
+	case CLKCFG_FSB_800:
+		return 200;
+	case CLKCFG_FSB_1067:
+		return 266;
+	case CLKCFG_FSB_1333:
+		return 333;
+	/* these two are just a guess; one of them might be right */
+	case CLKCFG_FSB_1600:
+	case CLKCFG_FSB_1600_ALT:
+		return 400;
+	default:
+		return 133;
+	}
+}
+
+static void intel_update_czclk(struct drm_i915_private *dev_priv)
+{
+	if (!IS_VALLEYVIEW(dev_priv))
+		return;
+
+	dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
+						      CCK_CZ_CLOCK_CONTROL);
+
+	DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq);
+}
+
+static inline u32 /* units of 100MHz */
+intel_fdi_link_freq(struct drm_device *dev)
+{
+	if (IS_GEN5(dev)) {
+		struct drm_i915_private *dev_priv = dev->dev_private;
+		return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2;
+	} else
+		return 27;
+}
+
+static const intel_limit_t intel_limits_i8xx_dac = {
+	.dot = { .min = 25000, .max = 350000 },
+	.vco = { .min = 908000, .max = 1512000 },
+	.n = { .min = 2, .max = 16 },
+	.m = { .min = 96, .max = 140 },
+	.m1 = { .min = 18, .max = 26 },
+	.m2 = { .min = 6, .max = 16 },
+	.p = { .min = 4, .max = 128 },
+	.p1 = { .min = 2, .max = 33 },
+	.p2 = { .dot_limit = 165000,
+		.p2_slow = 4, .p2_fast = 2 },
+};
+
+static const intel_limit_t intel_limits_i8xx_dvo = {
+	.dot = { .min = 25000, .max = 350000 },
+	.vco = { .min = 908000, .max = 1512000 },
+	.n = { .min = 2, .max = 16 },
+	.m = { .min = 96, .max = 140 },
+	.m1 = { .min = 18, .max = 26 },
+	.m2 = { .min = 6, .max = 16 },
+	.p = { .min = 4, .max = 128 },
+	.p1 = { .min = 2, .max = 33 },
+	.p2 = { .dot_limit = 165000,
+		.p2_slow = 4, .p2_fast = 4 },
+};
+
+static const intel_limit_t intel_limits_i8xx_lvds = {
+	.dot = { .min = 25000, .max = 350000 },
+	.vco = { .min = 908000, .max = 1512000 },
+	.n = { .min = 2, .max = 16 },
+	.m = { .min = 96, .max = 140 },
+	.m1 = { .min = 18, .max = 26 },
+	.m2 = { .min = 6, .max = 16 },
+	.p = { .min = 4, .max = 128 },
+	.p1 = { .min = 1, .max = 6 },
+	.p2 = { .dot_limit = 165000,
+		.p2_slow = 14, .p2_fast = 7 },
+};
+
+static const intel_limit_t intel_limits_i9xx_sdvo = {
+	.dot = { .min = 20000, .max = 400000 },
+	.vco = { .min = 1400000, .max = 2800000 },
+	.n = { .min = 1, .max = 6 },
+	.m = { .min = 70, .max = 120 },
+	.m1 = { .min = 8, .max = 18 },
+	.m2 = { .min = 3, .max = 7 },
+	.p = { .min = 5, .max = 80 },
+	.p1 = { .min = 1, .max = 8 },
+	.p2 = { .dot_limit = 200000,
+		.p2_slow = 10, .p2_fast = 5 },
+};
+
+static const intel_limit_t intel_limits_i9xx_lvds = {
+	.dot = { .min = 20000, .max = 400000 },
+	.vco = { .min = 1400000, .max = 2800000 },
+	.n = { .min = 1, .max = 6 },
+	.m = { .min = 70, .max = 120 },
+	.m1 = { .min = 8, .max = 18 },
+	.m2 = { .min = 3, .max = 7 },
+	.p = { .min = 7, .max = 98 },
+	.p1 = { .min = 1, .max = 8 },
+	.p2 = { .dot_limit = 112000,
+		.p2_slow = 14, .p2_fast = 7 },
+};
+
+
+static const intel_limit_t intel_limits_g4x_sdvo = {
+	.dot = { .min = 25000, .max = 270000 },
+	.vco = { .min = 1750000, .max = 3500000},
+	.n = { .min = 1, .max = 4 },
+	.m = { .min = 104, .max = 138 },
+	.m1 = { .min = 17, .max = 23 },
+	.m2 = { .min = 5, .max = 11 },
+	.p = { .min = 10, .max = 30 },
+	.p1 = { .min = 1, .max = 3},
+	.p2 = { .dot_limit = 270000,
+		.p2_slow = 10,
+		.p2_fast = 10
+	},
+};
+
+static const intel_limit_t intel_limits_g4x_hdmi = {
+	.dot = { .min = 22000, .max = 400000 },
+	.vco = { .min = 1750000, .max = 3500000},
+	.n = { .min = 1, .max = 4 },
+	.m = { .min = 104, .max = 138 },
+	.m1 = { .min = 16, .max = 23 },
+	.m2 = { .min = 5, .max = 11 },
+	.p = { .min = 5, .max = 80 },
+	.p1 = { .min = 1, .max = 8},
+	.p2 = { .dot_limit = 165000,
+		.p2_slow = 10, .p2_fast = 5 },
+};
+
+static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
+	.dot = { .min = 20000, .max = 115000 },
+	.vco = { .min = 1750000, .max = 3500000 },
+	.n = { .min = 1, .max = 3 },
+	.m = { .min = 104, .max = 138 },
+	.m1 = { .min = 17, .max = 23 },
+	.m2 = { .min = 5, .max = 11 },
+	.p = { .min = 28, .max = 112 },
+	.p1 = { .min = 2, .max = 8 },
+	.p2 = { .dot_limit = 0,
+		.p2_slow = 14, .p2_fast = 14
+	},
+};
+
+static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
+	.dot = { .min = 80000, .max = 224000 },
+	.vco = { .min = 1750000, .max = 3500000 },
+	.n = { .min = 1, .max = 3 },
+	.m = { .min = 104, .max = 138 },
+	.m1 = { .min = 17, .max = 23 },
+	.m2 = { .min = 5, .max = 11 },
+	.p = { .min = 14, .max = 42 },
+	.p1 = { .min = 2, .max = 6 },
+	.p2 = { .dot_limit = 0,
+		.p2_slow = 7, .p2_fast = 7
+	},
+};
+
+static const intel_limit_t intel_limits_pineview_sdvo = {
+	.dot = { .min = 20000, .max = 400000},
+	.vco = { .min = 1700000, .max = 3500000 },
+	/* Pineview's Ncounter is a ring counter */
+	.n = { .min = 3, .max = 6 },
+	.m = { .min = 2, .max = 256 },
+	/* Pineview only has one combined m divider, which we treat as m2. */
+	.m1 = { .min = 0, .max = 0 },
+	.m2 = { .min = 0, .max = 254 },
+	.p = { .min = 5, .max = 80 },
+	.p1 = { .min = 1, .max = 8 },
+	.p2 = { .dot_limit = 200000,
+		.p2_slow = 10, .p2_fast = 5 },
+};
+
+static const intel_limit_t intel_limits_pineview_lvds = {
+	.dot = { .min = 20000, .max = 400000 },
+	.vco = { .min = 1700000, .max = 3500000 },
+	.n = { .min = 3, .max = 6 },
+	.m = { .min = 2, .max = 256 },
+	.m1 = { .min = 0, .max = 0 },
+	.m2 = { .min = 0, .max = 254 },
+	.p = { .min = 7, .max = 112 },
+	.p1 = { .min = 1, .max = 8 },
+	.p2 = { .dot_limit = 112000,
+		.p2_slow = 14, .p2_fast = 14 },
+};
+
+/* Ironlake / Sandybridge
+ *
+ * We calculate clock using (register_value + 2) for N/M1/M2, so here
+ * the range value for them is (actual_value - 2).
+ */
+static const intel_limit_t intel_limits_ironlake_dac = {
+	.dot = { .min = 25000, .max = 350000 },
+	.vco = { .min = 1760000, .max = 3510000 },
+	.n = { .min = 1, .max = 5 },
+	.m = { .min = 79, .max = 127 },
+	.m1 = { .min = 12, .max = 22 },
+	.m2 = { .min = 5, .max = 9 },
+	.p = { .min = 5, .max = 80 },
+	.p1 = { .min = 1, .max = 8 },
+	.p2 = { .dot_limit = 225000,
+		.p2_slow = 10, .p2_fast = 5 },
+};
+
+static const intel_limit_t intel_limits_ironlake_single_lvds = {
+	.dot = { .min = 25000, .max = 350000 },
+	.vco = { .min = 1760000, .max = 3510000 },
+	.n = { .min = 1, .max = 3 },
+	.m = { .min = 79, .max = 118 },
+	.m1 = { .min = 12, .max = 22 },
+	.m2 = { .min = 5, .max = 9 },
+	.p = { .min = 28, .max = 112 },
+	.p1 = { .min = 2, .max = 8 },
+	.p2 = { .dot_limit = 225000,
+		.p2_slow = 14, .p2_fast = 14 },
+};
+
+static const intel_limit_t intel_limits_ironlake_dual_lvds = {
+	.dot = { .min = 25000, .max = 350000 },
+	.vco = { .min = 1760000, .max = 3510000 },
+	.n = { .min = 1, .max = 3 },
+	.m = { .min = 79, .max = 127 },
+	.m1 = { .min = 12, .max = 22 },
+	.m2 = { .min = 5, .max = 9 },
+	.p = { .min = 14, .max = 56 },
+	.p1 = { .min = 2, .max = 8 },
+	.p2 = { .dot_limit = 225000,
+		.p2_slow = 7, .p2_fast = 7 },
+};
+
+/* LVDS 100mhz refclk limits. */
+static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
+	.dot = { .min = 25000, .max = 350000 },
+	.vco = { .min = 1760000, .max = 3510000 },
+	.n = { .min = 1, .max = 2 },
+	.m = { .min = 79, .max = 126 },
+	.m1 = { .min = 12, .max = 22 },
+	.m2 = { .min = 5, .max = 9 },
+	.p = { .min = 28, .max = 112 },
+	.p1 = { .min = 2, .max = 8 },
+	.p2 = { .dot_limit = 225000,
+		.p2_slow = 14, .p2_fast = 14 },
+};
+
+static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
+	.dot = { .min = 25000, .max = 350000 },
+	.vco = { .min = 1760000, .max = 3510000 },
+	.n = { .min = 1, .max = 3 },
+	.m = { .min = 79, .max = 126 },
+	.m1 = { .min = 12, .max = 22 },
+	.m2 = { .min = 5, .max = 9 },
+	.p = { .min = 14, .max = 42 },
+	.p1 = { .min = 2, .max = 6 },
+	.p2 = { .dot_limit = 225000,
+		.p2_slow = 7, .p2_fast = 7 },
+};
+
+static const intel_limit_t intel_limits_vlv = {
+	 /*
+	  * These are the data rate limits (measured in fast clocks)
+	  * since those are the strictest limits we have. The fast
+	  * clock and actual rate limits are more relaxed, so checking
+	  * them would make no difference.
+	  */
+	.dot = { .min = 25000 * 5, .max = 270000 * 5 },
+	.vco = { .min = 4000000, .max = 6000000 },
+	.n = { .min = 1, .max = 7 },
+	.m1 = { .min = 2, .max = 3 },
+	.m2 = { .min = 11, .max = 156 },
+	.p1 = { .min = 2, .max = 3 },
+	.p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
+};
+
+static const intel_limit_t intel_limits_chv = {
+	/*
+	 * These are the data rate limits (measured in fast clocks)
+	 * since those are the strictest limits we have.  The fast
+	 * clock and actual rate limits are more relaxed, so checking
+	 * them would make no difference.
+	 */
+	.dot = { .min = 25000 * 5, .max = 540000 * 5},
+	.vco = { .min = 4800000, .max = 6480000 },
+	.n = { .min = 1, .max = 1 },
+	.m1 = { .min = 2, .max = 2 },
+	.m2 = { .min = 24 << 22, .max = 175 << 22 },
+	.p1 = { .min = 2, .max = 4 },
+	.p2 = {	.p2_slow = 1, .p2_fast = 14 },
+};
+
+static const intel_limit_t intel_limits_bxt = {
+	/* FIXME: find real dot limits */
+	.dot = { .min = 0, .max = INT_MAX },
+	.vco = { .min = 4800000, .max = 6700000 },
+	.n = { .min = 1, .max = 1 },
+	.m1 = { .min = 2, .max = 2 },
+	/* FIXME: find real m2 limits */
+	.m2 = { .min = 2 << 22, .max = 255 << 22 },
+	.p1 = { .min = 2, .max = 4 },
+	.p2 = { .p2_slow = 1, .p2_fast = 20 },
+};
+
+static bool
+needs_modeset(struct drm_crtc_state *state)
+{
+	return drm_atomic_crtc_needs_modeset(state);
+}
+
+/**
+ * Returns whether any output on the specified pipe is of the specified type
+ */
+bool intel_pipe_has_type(struct intel_crtc *crtc, enum intel_output_type type)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct intel_encoder *encoder;
+
+	for_each_encoder_on_crtc(dev, &crtc->base, encoder)
+		if (encoder->type == type)
+			return true;
+
+	return false;
+}
+
+/**
+ * Returns whether any output on the specified pipe will have the specified
+ * type after a staged modeset is complete, i.e., the same as
+ * intel_pipe_has_type() but looking at encoder->new_crtc instead of
+ * encoder->crtc.
+ */
+static bool intel_pipe_will_have_type(const struct intel_crtc_state *crtc_state,
+				      int type)
+{
+	struct drm_atomic_state *state = crtc_state->base.state;
+	struct drm_connector *connector;
+	struct drm_connector_state *connector_state;
+	struct intel_encoder *encoder;
+	int i, num_connectors = 0;
+
+	for_each_connector_in_state(state, connector, connector_state, i) {
+		if (connector_state->crtc != crtc_state->base.crtc)
+			continue;
+
+		num_connectors++;
+
+		encoder = to_intel_encoder(connector_state->best_encoder);
+		if (encoder->type == type)
+			return true;
+	}
+
+	WARN_ON(num_connectors == 0);
+
+	return false;
+}
+
+static const intel_limit_t *
+intel_ironlake_limit(struct intel_crtc_state *crtc_state, int refclk)
+{
+	struct drm_device *dev = crtc_state->base.crtc->dev;
+	const intel_limit_t *limit;
+
+	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+		if (intel_is_dual_link_lvds(dev)) {
+			if (refclk == 100000)
+				limit = &intel_limits_ironlake_dual_lvds_100m;
+			else
+				limit = &intel_limits_ironlake_dual_lvds;
+		} else {
+			if (refclk == 100000)
+				limit = &intel_limits_ironlake_single_lvds_100m;
+			else
+				limit = &intel_limits_ironlake_single_lvds;
+		}
+	} else
+		limit = &intel_limits_ironlake_dac;
+
+	return limit;
+}
+
+static const intel_limit_t *
+intel_g4x_limit(struct intel_crtc_state *crtc_state)
+{
+	struct drm_device *dev = crtc_state->base.crtc->dev;
+	const intel_limit_t *limit;
+
+	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+		if (intel_is_dual_link_lvds(dev))
+			limit = &intel_limits_g4x_dual_channel_lvds;
+		else
+			limit = &intel_limits_g4x_single_channel_lvds;
+	} else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI) ||
+		   intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
+		limit = &intel_limits_g4x_hdmi;
+	} else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO)) {
+		limit = &intel_limits_g4x_sdvo;
+	} else /* The option is for other outputs */
+		limit = &intel_limits_i9xx_sdvo;
+
+	return limit;
+}
+
+static const intel_limit_t *
+intel_limit(struct intel_crtc_state *crtc_state, int refclk)
+{
+	struct drm_device *dev = crtc_state->base.crtc->dev;
+	const intel_limit_t *limit;
+
+	if (IS_BROXTON(dev))
+		limit = &intel_limits_bxt;
+	else if (HAS_PCH_SPLIT(dev))
+		limit = intel_ironlake_limit(crtc_state, refclk);
+	else if (IS_G4X(dev)) {
+		limit = intel_g4x_limit(crtc_state);
+	} else if (IS_PINEVIEW(dev)) {
+		if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
+			limit = &intel_limits_pineview_lvds;
+		else
+			limit = &intel_limits_pineview_sdvo;
+	} else if (IS_CHERRYVIEW(dev)) {
+		limit = &intel_limits_chv;
+	} else if (IS_VALLEYVIEW(dev)) {
+		limit = &intel_limits_vlv;
+	} else if (!IS_GEN2(dev)) {
+		if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
+			limit = &intel_limits_i9xx_lvds;
+		else
+			limit = &intel_limits_i9xx_sdvo;
+	} else {
+		if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
+			limit = &intel_limits_i8xx_lvds;
+		else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO))
+			limit = &intel_limits_i8xx_dvo;
+		else
+			limit = &intel_limits_i8xx_dac;
+	}
+	return limit;
+}
+
+/*
+ * Platform specific helpers to calculate the port PLL loopback- (clock.m),
+ * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
+ * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
+ * The helpers' return value is the rate of the clock that is fed to the
+ * display engine's pipe which can be the above fast dot clock rate or a
+ * divided-down version of it.
+ */
+/* m1 is reserved as 0 in Pineview, n is a ring counter */
+static int pnv_calc_dpll_params(int refclk, intel_clock_t *clock)
+{
+	clock->m = clock->m2 + 2;
+	clock->p = clock->p1 * clock->p2;
+	if (WARN_ON(clock->n == 0 || clock->p == 0))
+		return 0;
+	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
+	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
+
+	return clock->dot;
+}
+
+static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
+{
+	return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
+}
+
+static int i9xx_calc_dpll_params(int refclk, intel_clock_t *clock)
+{
+	clock->m = i9xx_dpll_compute_m(clock);
+	clock->p = clock->p1 * clock->p2;
+	if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
+		return 0;
+	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
+	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
+
+	return clock->dot;
+}
+
+static int vlv_calc_dpll_params(int refclk, intel_clock_t *clock)
+{
+	clock->m = clock->m1 * clock->m2;
+	clock->p = clock->p1 * clock->p2;
+	if (WARN_ON(clock->n == 0 || clock->p == 0))
+		return 0;
+	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
+	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
+
+	return clock->dot / 5;
+}
+
+int chv_calc_dpll_params(int refclk, intel_clock_t *clock)
+{
+	clock->m = clock->m1 * clock->m2;
+	clock->p = clock->p1 * clock->p2;
+	if (WARN_ON(clock->n == 0 || clock->p == 0))
+		return 0;
+	clock->vco = DIV_ROUND_CLOSEST_ULL((uint64_t)refclk * clock->m,
+			clock->n << 22);
+	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
+
+	return clock->dot / 5;
+}
+
+#define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
+/**
+ * Returns whether the given set of divisors are valid for a given refclk with
+ * the given connectors.
+ */
+
+static bool intel_PLL_is_valid(struct drm_device *dev,
+			       const intel_limit_t *limit,
+			       const intel_clock_t *clock)
+{
+	if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
+		INTELPllInvalid("n out of range\n");
+	if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
+		INTELPllInvalid("p1 out of range\n");
+	if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
+		INTELPllInvalid("m2 out of range\n");
+	if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
+		INTELPllInvalid("m1 out of range\n");
+
+	if (!IS_PINEVIEW(dev) && !IS_VALLEYVIEW(dev) && !IS_BROXTON(dev))
+		if (clock->m1 <= clock->m2)
+			INTELPllInvalid("m1 <= m2\n");
+
+	if (!IS_VALLEYVIEW(dev) && !IS_BROXTON(dev)) {
+		if (clock->p < limit->p.min || limit->p.max < clock->p)
+			INTELPllInvalid("p out of range\n");
+		if (clock->m < limit->m.min || limit->m.max < clock->m)
+			INTELPllInvalid("m out of range\n");
+	}
+
+	if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
+		INTELPllInvalid("vco out of range\n");
+	/* XXX: We may need to be checking "Dot clock" depending on the multiplier,
+	 * connector, etc., rather than just a single range.
+	 */
+	if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
+		INTELPllInvalid("dot out of range\n");
+
+	return true;
+}
+
+static int
+i9xx_select_p2_div(const intel_limit_t *limit,
+		   const struct intel_crtc_state *crtc_state,
+		   int target)
+{
+	struct drm_device *dev = crtc_state->base.crtc->dev;
+
+	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+		/*
+		 * For LVDS just rely on its current settings for dual-channel.
+		 * We haven't figured out how to reliably set up different
+		 * single/dual channel state, if we even can.
+		 */
+		if (intel_is_dual_link_lvds(dev))
+			return limit->p2.p2_fast;
+		else
+			return limit->p2.p2_slow;
+	} else {
+		if (target < limit->p2.dot_limit)
+			return limit->p2.p2_slow;
+		else
+			return limit->p2.p2_fast;
+	}
+}
+
+static bool
+i9xx_find_best_dpll(const intel_limit_t *limit,
+		    struct intel_crtc_state *crtc_state,
+		    int target, int refclk, intel_clock_t *match_clock,
+		    intel_clock_t *best_clock)
+{
+	struct drm_device *dev = crtc_state->base.crtc->dev;
+	intel_clock_t clock;
+	int err = target;
+
+	memset(best_clock, 0, sizeof(*best_clock));
+
+	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
+
+	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
+	     clock.m1++) {
+		for (clock.m2 = limit->m2.min;
+		     clock.m2 <= limit->m2.max; clock.m2++) {
+			if (clock.m2 >= clock.m1)
+				break;
+			for (clock.n = limit->n.min;
+			     clock.n <= limit->n.max; clock.n++) {
+				for (clock.p1 = limit->p1.min;
+					clock.p1 <= limit->p1.max; clock.p1++) {
+					int this_err;
+
+					i9xx_calc_dpll_params(refclk, &clock);
+					if (!intel_PLL_is_valid(dev, limit,
+								&clock))
+						continue;
+					if (match_clock &&
+					    clock.p != match_clock->p)
+						continue;
+
+					this_err = abs(clock.dot - target);
+					if (this_err < err) {
+						*best_clock = clock;
+						err = this_err;
+					}
+				}
+			}
+		}
+	}
+
+	return (err != target);
+}
+
+static bool
+pnv_find_best_dpll(const intel_limit_t *limit,
+		   struct intel_crtc_state *crtc_state,
+		   int target, int refclk, intel_clock_t *match_clock,
+		   intel_clock_t *best_clock)
+{
+	struct drm_device *dev = crtc_state->base.crtc->dev;
+	intel_clock_t clock;
+	int err = target;
+
+	memset(best_clock, 0, sizeof(*best_clock));
+
+	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
+
+	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
+	     clock.m1++) {
+		for (clock.m2 = limit->m2.min;
+		     clock.m2 <= limit->m2.max; clock.m2++) {
+			for (clock.n = limit->n.min;
+			     clock.n <= limit->n.max; clock.n++) {
+				for (clock.p1 = limit->p1.min;
+					clock.p1 <= limit->p1.max; clock.p1++) {
+					int this_err;
+
+					pnv_calc_dpll_params(refclk, &clock);
+					if (!intel_PLL_is_valid(dev, limit,
+								&clock))
+						continue;
+					if (match_clock &&
+					    clock.p != match_clock->p)
+						continue;
+
+					this_err = abs(clock.dot - target);
+					if (this_err < err) {
+						*best_clock = clock;
+						err = this_err;
+					}
+				}
+			}
+		}
+	}
+
+	return (err != target);
+}
+
+static bool
+g4x_find_best_dpll(const intel_limit_t *limit,
+		   struct intel_crtc_state *crtc_state,
+		   int target, int refclk, intel_clock_t *match_clock,
+		   intel_clock_t *best_clock)
+{
+	struct drm_device *dev = crtc_state->base.crtc->dev;
+	intel_clock_t clock;
+	int max_n;
+	bool found = false;
+	/* approximately equals target * 0.00585 */
+	int err_most = (target >> 8) + (target >> 9);
+
+	memset(best_clock, 0, sizeof(*best_clock));
+
+	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
+
+	max_n = limit->n.max;
+	/* based on hardware requirement, prefer smaller n to precision */
+	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
+		/* based on hardware requirement, prefere larger m1,m2 */
+		for (clock.m1 = limit->m1.max;
+		     clock.m1 >= limit->m1.min; clock.m1--) {
+			for (clock.m2 = limit->m2.max;
+			     clock.m2 >= limit->m2.min; clock.m2--) {
+				for (clock.p1 = limit->p1.max;
+				     clock.p1 >= limit->p1.min; clock.p1--) {
+					int this_err;
+
+					i9xx_calc_dpll_params(refclk, &clock);
+					if (!intel_PLL_is_valid(dev, limit,
+								&clock))
+						continue;
+
+					this_err = abs(clock.dot - target);
+					if (this_err < err_most) {
+						*best_clock = clock;
+						err_most = this_err;
+						max_n = clock.n;
+						found = true;
+					}
+				}
+			}
+		}
+	}
+	return found;
+}
+
+/*
+ * Check if the calculated PLL configuration is more optimal compared to the
+ * best configuration and error found so far. Return the calculated error.
+ */
+static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
+			       const intel_clock_t *calculated_clock,
+			       const intel_clock_t *best_clock,
+			       unsigned int best_error_ppm,
+			       unsigned int *error_ppm)
+{
+	/*
+	 * For CHV ignore the error and consider only the P value.
+	 * Prefer a bigger P value based on HW requirements.
+	 */
+	if (IS_CHERRYVIEW(dev)) {
+		*error_ppm = 0;
+
+		return calculated_clock->p > best_clock->p;
+	}
+
+	if (WARN_ON_ONCE(!target_freq))
+		return false;
+
+	*error_ppm = div_u64(1000000ULL *
+				abs(target_freq - calculated_clock->dot),
+			     target_freq);
+	/*
+	 * Prefer a better P value over a better (smaller) error if the error
+	 * is small. Ensure this preference for future configurations too by
+	 * setting the error to 0.
+	 */
+	if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
+		*error_ppm = 0;
+
+		return true;
+	}
+
+	return *error_ppm + 10 < best_error_ppm;
+}
+
+static bool
+vlv_find_best_dpll(const intel_limit_t *limit,
+		   struct intel_crtc_state *crtc_state,
+		   int target, int refclk, intel_clock_t *match_clock,
+		   intel_clock_t *best_clock)
+{
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct drm_device *dev = crtc->base.dev;
+	intel_clock_t clock;
+	unsigned int bestppm = 1000000;
+	/* min update 19.2 MHz */
+	int max_n = min(limit->n.max, refclk / 19200);
+	bool found = false;
+
+	target *= 5; /* fast clock */
+
+	memset(best_clock, 0, sizeof(*best_clock));
+
+	/* based on hardware requirement, prefer smaller n to precision */
+	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
+		for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
+			for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
+			     clock.p2 -= clock.p2 > 10 ? 2 : 1) {
+				clock.p = clock.p1 * clock.p2;
+				/* based on hardware requirement, prefer bigger m1,m2 values */
+				for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
+					unsigned int ppm;
+
+					clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
+								     refclk * clock.m1);
+
+					vlv_calc_dpll_params(refclk, &clock);
+
+					if (!intel_PLL_is_valid(dev, limit,
+								&clock))
+						continue;
+
+					if (!vlv_PLL_is_optimal(dev, target,
+								&clock,
+								best_clock,
+								bestppm, &ppm))
+						continue;
+
+					*best_clock = clock;
+					bestppm = ppm;
+					found = true;
+				}
+			}
+		}
+	}
+
+	return found;
+}
+
+static bool
+chv_find_best_dpll(const intel_limit_t *limit,
+		   struct intel_crtc_state *crtc_state,
+		   int target, int refclk, intel_clock_t *match_clock,
+		   intel_clock_t *best_clock)
+{
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct drm_device *dev = crtc->base.dev;
+	unsigned int best_error_ppm;
+	intel_clock_t clock;
+	uint64_t m2;
+	int found = false;
+
+	memset(best_clock, 0, sizeof(*best_clock));
+	best_error_ppm = 1000000;
+
+	/*
+	 * Based on hardware doc, the n always set to 1, and m1 always
+	 * set to 2.  If requires to support 200Mhz refclk, we need to
+	 * revisit this because n may not 1 anymore.
+	 */
+	clock.n = 1, clock.m1 = 2;
+	target *= 5;	/* fast clock */
+
+	for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
+		for (clock.p2 = limit->p2.p2_fast;
+				clock.p2 >= limit->p2.p2_slow;
+				clock.p2 -= clock.p2 > 10 ? 2 : 1) {
+			unsigned int error_ppm;
+
+			clock.p = clock.p1 * clock.p2;
+
+			m2 = DIV_ROUND_CLOSEST_ULL(((uint64_t)target * clock.p *
+					clock.n) << 22, refclk * clock.m1);
+
+			if (m2 > INT_MAX/clock.m1)
+				continue;
+
+			clock.m2 = m2;
+
+			chv_calc_dpll_params(refclk, &clock);
+
+			if (!intel_PLL_is_valid(dev, limit, &clock))
+				continue;
+
+			if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
+						best_error_ppm, &error_ppm))
+				continue;
+
+			*best_clock = clock;
+			best_error_ppm = error_ppm;
+			found = true;
+		}
+	}
+
+	return found;
+}
+
+bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
+			intel_clock_t *best_clock)
+{
+	int refclk = i9xx_get_refclk(crtc_state, 0);
+
+	return chv_find_best_dpll(intel_limit(crtc_state, refclk), crtc_state,
+				  target_clock, refclk, NULL, best_clock);
+}
+
+bool intel_crtc_active(struct drm_crtc *crtc)
+{
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+	/* Be paranoid as we can arrive here with only partial
+	 * state retrieved from the hardware during setup.
+	 *
+	 * We can ditch the adjusted_mode.crtc_clock check as soon
+	 * as Haswell has gained clock readout/fastboot support.
+	 *
+	 * We can ditch the crtc->primary->fb check as soon as we can
+	 * properly reconstruct framebuffers.
+	 *
+	 * FIXME: The intel_crtc->active here should be switched to
+	 * crtc->state->active once we have proper CRTC states wired up
+	 * for atomic.
+	 */
+	return intel_crtc->active && crtc->primary->state->fb &&
+		intel_crtc->config->base.adjusted_mode.crtc_clock;
+}
+
+enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
+					     enum pipe pipe)
+{
+	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+	return intel_crtc->config->cpu_transcoder;
+}
+
+static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 reg = PIPEDSL(pipe);
+	u32 line1, line2;
+	u32 line_mask;
+
+	if (IS_GEN2(dev))
+		line_mask = DSL_LINEMASK_GEN2;
+	else
+		line_mask = DSL_LINEMASK_GEN3;
+
+	line1 = I915_READ(reg) & line_mask;
+	msleep(5);
+	line2 = I915_READ(reg) & line_mask;
+
+	return line1 == line2;
+}
+
+/*
+ * intel_wait_for_pipe_off - wait for pipe to turn off
+ * @crtc: crtc whose pipe to wait for
+ *
+ * After disabling a pipe, we can't wait for vblank in the usual way,
+ * spinning on the vblank interrupt status bit, since we won't actually
+ * see an interrupt when the pipe is disabled.
+ *
+ * On Gen4 and above:
+ *   wait for the pipe register state bit to turn off
+ *
+ * Otherwise:
+ *   wait for the display line value to settle (it usually
+ *   ends up stopping at the start of the next frame).
+ *
+ */
+static void intel_wait_for_pipe_off(struct intel_crtc *crtc)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
+	enum pipe pipe = crtc->pipe;
+
+	if (INTEL_INFO(dev)->gen >= 4) {
+		int reg = PIPECONF(cpu_transcoder);
+
+		/* Wait for the Pipe State to go off */
+		if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
+			     100))
+			WARN(1, "pipe_off wait timed out\n");
+	} else {
+		/* Wait for the display line to settle */
+		if (wait_for(pipe_dsl_stopped(dev, pipe), 100))
+			WARN(1, "pipe_off wait timed out\n");
+	}
+}
+
+static const char *state_string(bool enabled)
+{
+	return enabled ? "on" : "off";
+}
+
+/* Only for pre-ILK configs */
+void assert_pll(struct drm_i915_private *dev_priv,
+		enum pipe pipe, bool state)
+{
+	u32 val;
+	bool cur_state;
+
+	val = I915_READ(DPLL(pipe));
+	cur_state = !!(val & DPLL_VCO_ENABLE);
+	I915_STATE_WARN(cur_state != state,
+	     "PLL state assertion failure (expected %s, current %s)\n",
+	     state_string(state), state_string(cur_state));
+}
+
+/* XXX: the dsi pll is shared between MIPI DSI ports */
+static void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
+{
+	u32 val;
+	bool cur_state;
+
+	mutex_lock(&dev_priv->sb_lock);
+	val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
+	mutex_unlock(&dev_priv->sb_lock);
+
+	cur_state = val & DSI_PLL_VCO_EN;
+	I915_STATE_WARN(cur_state != state,
+	     "DSI PLL state assertion failure (expected %s, current %s)\n",
+	     state_string(state), state_string(cur_state));
+}
+#define assert_dsi_pll_enabled(d) assert_dsi_pll(d, true)
+#define assert_dsi_pll_disabled(d) assert_dsi_pll(d, false)
+
+struct intel_shared_dpll *
+intel_crtc_to_shared_dpll(struct intel_crtc *crtc)
+{
+	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+
+	if (crtc->config->shared_dpll < 0)
+		return NULL;
+
+	return &dev_priv->shared_dplls[crtc->config->shared_dpll];
+}
+
+/* For ILK+ */
+void assert_shared_dpll(struct drm_i915_private *dev_priv,
+			struct intel_shared_dpll *pll,
+			bool state)
+{
+	bool cur_state;
+	struct intel_dpll_hw_state hw_state;
+
+	if (WARN (!pll,
+		  "asserting DPLL %s with no DPLL\n", state_string(state)))
+		return;
+
+	cur_state = pll->get_hw_state(dev_priv, pll, &hw_state);
+	I915_STATE_WARN(cur_state != state,
+	     "%s assertion failure (expected %s, current %s)\n",
+	     pll->name, state_string(state), state_string(cur_state));
+}
+
+static void assert_fdi_tx(struct drm_i915_private *dev_priv,
+			  enum pipe pipe, bool state)
+{
+	bool cur_state;
+	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
+								      pipe);
+
+	if (HAS_DDI(dev_priv->dev)) {
+		/* DDI does not have a specific FDI_TX register */
+		u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
+		cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
+	} else {
+		u32 val = I915_READ(FDI_TX_CTL(pipe));
+		cur_state = !!(val & FDI_TX_ENABLE);
+	}
+	I915_STATE_WARN(cur_state != state,
+	     "FDI TX state assertion failure (expected %s, current %s)\n",
+	     state_string(state), state_string(cur_state));
+}
+#define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
+#define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
+
+static void assert_fdi_rx(struct drm_i915_private *dev_priv,
+			  enum pipe pipe, bool state)
+{
+	u32 val;
+	bool cur_state;
+
+	val = I915_READ(FDI_RX_CTL(pipe));
+	cur_state = !!(val & FDI_RX_ENABLE);
+	I915_STATE_WARN(cur_state != state,
+	     "FDI RX state assertion failure (expected %s, current %s)\n",
+	     state_string(state), state_string(cur_state));
+}
+#define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
+#define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
+
+static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
+				      enum pipe pipe)
+{
+	u32 val;
+
+	/* ILK FDI PLL is always enabled */
+	if (INTEL_INFO(dev_priv->dev)->gen == 5)
+		return;
+
+	/* On Haswell, DDI ports are responsible for the FDI PLL setup */
+	if (HAS_DDI(dev_priv->dev))
+		return;
+
+	val = I915_READ(FDI_TX_CTL(pipe));
+	I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
+}
+
+void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
+		       enum pipe pipe, bool state)
+{
+	u32 val;
+	bool cur_state;
+
+	val = I915_READ(FDI_RX_CTL(pipe));
+	cur_state = !!(val & FDI_RX_PLL_ENABLE);
+	I915_STATE_WARN(cur_state != state,
+	     "FDI RX PLL assertion failure (expected %s, current %s)\n",
+	     state_string(state), state_string(cur_state));
+}
+
+void assert_panel_unlocked(struct drm_i915_private *dev_priv,
+			   enum pipe pipe)
+{
+	struct drm_device *dev = dev_priv->dev;
+	int pp_reg;
+	u32 val;
+	enum pipe panel_pipe = PIPE_A;
+	bool locked = true;
+
+	if (WARN_ON(HAS_DDI(dev)))
+		return;
+
+	if (HAS_PCH_SPLIT(dev)) {
+		u32 port_sel;
+
+		pp_reg = PCH_PP_CONTROL;
+		port_sel = I915_READ(PCH_PP_ON_DELAYS) & PANEL_PORT_SELECT_MASK;
+
+		if (port_sel == PANEL_PORT_SELECT_LVDS &&
+		    I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT)
+			panel_pipe = PIPE_B;
+		/* XXX: else fix for eDP */
+	} else if (IS_VALLEYVIEW(dev)) {
+		/* presumably write lock depends on pipe, not port select */
+		pp_reg = VLV_PIPE_PP_CONTROL(pipe);
+		panel_pipe = pipe;
+	} else {
+		pp_reg = PP_CONTROL;
+		if (I915_READ(LVDS) & LVDS_PIPEB_SELECT)
+			panel_pipe = PIPE_B;
+	}
+
+	val = I915_READ(pp_reg);
+	if (!(val & PANEL_POWER_ON) ||
+	    ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
+		locked = false;
+
+	I915_STATE_WARN(panel_pipe == pipe && locked,
+	     "panel assertion failure, pipe %c regs locked\n",
+	     pipe_name(pipe));
+}
+
+static void assert_cursor(struct drm_i915_private *dev_priv,
+			  enum pipe pipe, bool state)
+{
+	struct drm_device *dev = dev_priv->dev;
+	bool cur_state;
+
+	if (IS_845G(dev) || IS_I865G(dev))
+		cur_state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
+	else
+		cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
+
+	I915_STATE_WARN(cur_state != state,
+	     "cursor on pipe %c assertion failure (expected %s, current %s)\n",
+	     pipe_name(pipe), state_string(state), state_string(cur_state));
+}
+#define assert_cursor_enabled(d, p) assert_cursor(d, p, true)
+#define assert_cursor_disabled(d, p) assert_cursor(d, p, false)
+
+void assert_pipe(struct drm_i915_private *dev_priv,
+		 enum pipe pipe, bool state)
+{
+	bool cur_state;
+	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
+								      pipe);
+
+	/* if we need the pipe quirk it must be always on */
+	if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
+	    (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
+		state = true;
+
+	if (!intel_display_power_is_enabled(dev_priv,
+				POWER_DOMAIN_TRANSCODER(cpu_transcoder))) {
+		cur_state = false;
+	} else {
+		u32 val = I915_READ(PIPECONF(cpu_transcoder));
+		cur_state = !!(val & PIPECONF_ENABLE);
+	}
+
+	I915_STATE_WARN(cur_state != state,
+	     "pipe %c assertion failure (expected %s, current %s)\n",
+	     pipe_name(pipe), state_string(state), state_string(cur_state));
+}
+
+static void assert_plane(struct drm_i915_private *dev_priv,
+			 enum plane plane, bool state)
+{
+	u32 val;
+	bool cur_state;
+
+	val = I915_READ(DSPCNTR(plane));
+	cur_state = !!(val & DISPLAY_PLANE_ENABLE);
+	I915_STATE_WARN(cur_state != state,
+	     "plane %c assertion failure (expected %s, current %s)\n",
+	     plane_name(plane), state_string(state), state_string(cur_state));
+}
+
+#define assert_plane_enabled(d, p) assert_plane(d, p, true)
+#define assert_plane_disabled(d, p) assert_plane(d, p, false)
+
+static void assert_planes_disabled(struct drm_i915_private *dev_priv,
+				   enum pipe pipe)
+{
+	struct drm_device *dev = dev_priv->dev;
+	int i;
+
+	/* Primary planes are fixed to pipes on gen4+ */
+	if (INTEL_INFO(dev)->gen >= 4) {
+		u32 val = I915_READ(DSPCNTR(pipe));
+		I915_STATE_WARN(val & DISPLAY_PLANE_ENABLE,
+		     "plane %c assertion failure, should be disabled but not\n",
+		     plane_name(pipe));
+		return;
+	}
+
+	/* Need to check both planes against the pipe */
+	for_each_pipe(dev_priv, i) {
+		u32 val = I915_READ(DSPCNTR(i));
+		enum pipe cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
+			DISPPLANE_SEL_PIPE_SHIFT;
+		I915_STATE_WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
+		     "plane %c assertion failure, should be off on pipe %c but is still active\n",
+		     plane_name(i), pipe_name(pipe));
+	}
+}
+
+static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
+				    enum pipe pipe)
+{
+	struct drm_device *dev = dev_priv->dev;
+	int sprite;
+
+	if (INTEL_INFO(dev)->gen >= 9) {
+		for_each_sprite(dev_priv, pipe, sprite) {
+			u32 val = I915_READ(PLANE_CTL(pipe, sprite));
+			I915_STATE_WARN(val & PLANE_CTL_ENABLE,
+			     "plane %d assertion failure, should be off on pipe %c but is still active\n",
+			     sprite, pipe_name(pipe));
+		}
+	} else if (IS_VALLEYVIEW(dev)) {
+		for_each_sprite(dev_priv, pipe, sprite) {
+			u32 val = I915_READ(SPCNTR(pipe, sprite));
+			I915_STATE_WARN(val & SP_ENABLE,
+			     "sprite %c assertion failure, should be off on pipe %c but is still active\n",
+			     sprite_name(pipe, sprite), pipe_name(pipe));
+		}
+	} else if (INTEL_INFO(dev)->gen >= 7) {
+		u32 val = I915_READ(SPRCTL(pipe));
+		I915_STATE_WARN(val & SPRITE_ENABLE,
+		     "sprite %c assertion failure, should be off on pipe %c but is still active\n",
+		     plane_name(pipe), pipe_name(pipe));
+	} else if (INTEL_INFO(dev)->gen >= 5) {
+		u32 val = I915_READ(DVSCNTR(pipe));
+		I915_STATE_WARN(val & DVS_ENABLE,
+		     "sprite %c assertion failure, should be off on pipe %c but is still active\n",
+		     plane_name(pipe), pipe_name(pipe));
+	}
+}
+
+static void assert_vblank_disabled(struct drm_crtc *crtc)
+{
+	if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
+		drm_crtc_vblank_put(crtc);
+}
+
+static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
+{
+	u32 val;
+	bool enabled;
+
+	I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv->dev) || HAS_PCH_CPT(dev_priv->dev)));
+
+	val = I915_READ(PCH_DREF_CONTROL);
+	enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
+			    DREF_SUPERSPREAD_SOURCE_MASK));
+	I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
+}
+
+static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
+					   enum pipe pipe)
+{
+	u32 val;
+	bool enabled;
+
+	val = I915_READ(PCH_TRANSCONF(pipe));
+	enabled = !!(val & TRANS_ENABLE);
+	I915_STATE_WARN(enabled,
+	     "transcoder assertion failed, should be off on pipe %c but is still active\n",
+	     pipe_name(pipe));
+}
+
+static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
+			    enum pipe pipe, u32 port_sel, u32 val)
+{
+	if ((val & DP_PORT_EN) == 0)
+		return false;
+
+	if (HAS_PCH_CPT(dev_priv->dev)) {
+		u32	trans_dp_ctl_reg = TRANS_DP_CTL(pipe);
+		u32	trans_dp_ctl = I915_READ(trans_dp_ctl_reg);
+		if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
+			return false;
+	} else if (IS_CHERRYVIEW(dev_priv->dev)) {
+		if ((val & DP_PIPE_MASK_CHV) != DP_PIPE_SELECT_CHV(pipe))
+			return false;
+	} else {
+		if ((val & DP_PIPE_MASK) != (pipe << 30))
+			return false;
+	}
+	return true;
+}
+
+static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
+			      enum pipe pipe, u32 val)
+{
+	if ((val & SDVO_ENABLE) == 0)
+		return false;
+
+	if (HAS_PCH_CPT(dev_priv->dev)) {
+		if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe))
+			return false;
+	} else if (IS_CHERRYVIEW(dev_priv->dev)) {
+		if ((val & SDVO_PIPE_SEL_MASK_CHV) != SDVO_PIPE_SEL_CHV(pipe))
+			return false;
+	} else {
+		if ((val & SDVO_PIPE_SEL_MASK) != SDVO_PIPE_SEL(pipe))
+			return false;
+	}
+	return true;
+}
+
+static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
+			      enum pipe pipe, u32 val)
+{
+	if ((val & LVDS_PORT_EN) == 0)
+		return false;
+
+	if (HAS_PCH_CPT(dev_priv->dev)) {
+		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
+			return false;
+	} else {
+		if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
+			return false;
+	}
+	return true;
+}
+
+static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
+			      enum pipe pipe, u32 val)
+{
+	if ((val & ADPA_DAC_ENABLE) == 0)
+		return false;
+	if (HAS_PCH_CPT(dev_priv->dev)) {
+		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
+			return false;
+	} else {
+		if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
+			return false;
+	}
+	return true;
+}
+
+static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
+				   enum pipe pipe, int reg, u32 port_sel)
+{
+	u32 val = I915_READ(reg);
+	I915_STATE_WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
+	     "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
+	     reg, pipe_name(pipe));
+
+	I915_STATE_WARN(HAS_PCH_IBX(dev_priv->dev) && (val & DP_PORT_EN) == 0
+	     && (val & DP_PIPEB_SELECT),
+	     "IBX PCH dp port still using transcoder B\n");
+}
+
+static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
+				     enum pipe pipe, int reg)
+{
+	u32 val = I915_READ(reg);
+	I915_STATE_WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
+	     "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
+	     reg, pipe_name(pipe));
+
+	I915_STATE_WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_ENABLE) == 0
+	     && (val & SDVO_PIPE_B_SELECT),
+	     "IBX PCH hdmi port still using transcoder B\n");
+}
+
+static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
+				      enum pipe pipe)
+{
+	u32 val;
+
+	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
+	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
+	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
+
+	val = I915_READ(PCH_ADPA);
+	I915_STATE_WARN(adpa_pipe_enabled(dev_priv, pipe, val),
+	     "PCH VGA enabled on transcoder %c, should be disabled\n",
+	     pipe_name(pipe));
+
+	val = I915_READ(PCH_LVDS);
+	I915_STATE_WARN(lvds_pipe_enabled(dev_priv, pipe, val),
+	     "PCH LVDS enabled on transcoder %c, should be disabled\n",
+	     pipe_name(pipe));
+
+	assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIB);
+	assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIC);
+	assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
+}
+
+static void vlv_enable_pll(struct intel_crtc *crtc,
+			   const struct intel_crtc_state *pipe_config)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int reg = DPLL(crtc->pipe);
+	u32 dpll = pipe_config->dpll_hw_state.dpll;
+
+	assert_pipe_disabled(dev_priv, crtc->pipe);
+
+	/* No really, not for ILK+ */
+	BUG_ON(!IS_VALLEYVIEW(dev_priv->dev));
+
+	/* PLL is protected by panel, make sure we can write it */
+	if (IS_MOBILE(dev_priv->dev))
+		assert_panel_unlocked(dev_priv, crtc->pipe);
+
+	I915_WRITE(reg, dpll);
+	POSTING_READ(reg);
+	udelay(150);
+
+	if (wait_for(((I915_READ(reg) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
+		DRM_ERROR("DPLL %d failed to lock\n", crtc->pipe);
+
+	I915_WRITE(DPLL_MD(crtc->pipe), pipe_config->dpll_hw_state.dpll_md);
+	POSTING_READ(DPLL_MD(crtc->pipe));
+
+	/* We do this three times for luck */
+	I915_WRITE(reg, dpll);
+	POSTING_READ(reg);
+	udelay(150); /* wait for warmup */
+	I915_WRITE(reg, dpll);
+	POSTING_READ(reg);
+	udelay(150); /* wait for warmup */
+	I915_WRITE(reg, dpll);
+	POSTING_READ(reg);
+	udelay(150); /* wait for warmup */
+}
+
+static void chv_enable_pll(struct intel_crtc *crtc,
+			   const struct intel_crtc_state *pipe_config)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int pipe = crtc->pipe;
+	enum dpio_channel port = vlv_pipe_to_channel(pipe);
+	u32 tmp;
+
+	assert_pipe_disabled(dev_priv, crtc->pipe);
+
+	BUG_ON(!IS_CHERRYVIEW(dev_priv->dev));
+
+	mutex_lock(&dev_priv->sb_lock);
+
+	/* Enable back the 10bit clock to display controller */
+	tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
+	tmp |= DPIO_DCLKP_EN;
+	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
+
+	mutex_unlock(&dev_priv->sb_lock);
+
+	/*
+	 * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
+	 */
+	udelay(1);
+
+	/* Enable PLL */
+	I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
+
+	/* Check PLL is locked */
+	if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
+		DRM_ERROR("PLL %d failed to lock\n", pipe);
+
+	/* not sure when this should be written */
+	I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
+	POSTING_READ(DPLL_MD(pipe));
+}
+
+static int intel_num_dvo_pipes(struct drm_device *dev)
+{
+	struct intel_crtc *crtc;
+	int count = 0;
+
+	for_each_intel_crtc(dev, crtc)
+		count += crtc->base.state->active &&
+			intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO);
+
+	return count;
+}
+
+static void i9xx_enable_pll(struct intel_crtc *crtc)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int reg = DPLL(crtc->pipe);
+	u32 dpll = crtc->config->dpll_hw_state.dpll;
+
+	assert_pipe_disabled(dev_priv, crtc->pipe);
+
+	/* No really, not for ILK+ */
+	BUG_ON(INTEL_INFO(dev)->gen >= 5);
+
+	/* PLL is protected by panel, make sure we can write it */
+	if (IS_MOBILE(dev) && !IS_I830(dev))
+		assert_panel_unlocked(dev_priv, crtc->pipe);
+
+	/* Enable DVO 2x clock on both PLLs if necessary */
+	if (IS_I830(dev) && intel_num_dvo_pipes(dev) > 0) {
+		/*
+		 * It appears to be important that we don't enable this
+		 * for the current pipe before otherwise configuring the
+		 * PLL. No idea how this should be handled if multiple
+		 * DVO outputs are enabled simultaneosly.
+		 */
+		dpll |= DPLL_DVO_2X_MODE;
+		I915_WRITE(DPLL(!crtc->pipe),
+			   I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE);
+	}
+
+	/*
+	 * Apparently we need to have VGA mode enabled prior to changing
+	 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
+	 * dividers, even though the register value does change.
+	 */
+	I915_WRITE(reg, 0);
+
+	I915_WRITE(reg, dpll);
+
+	/* Wait for the clocks to stabilize. */
+	POSTING_READ(reg);
+	udelay(150);
+
+	if (INTEL_INFO(dev)->gen >= 4) {
+		I915_WRITE(DPLL_MD(crtc->pipe),
+			   crtc->config->dpll_hw_state.dpll_md);
+	} else {
+		/* The pixel multiplier can only be updated once the
+		 * DPLL is enabled and the clocks are stable.
+		 *
+		 * So write it again.
+		 */
+		I915_WRITE(reg, dpll);
+	}
+
+	/* We do this three times for luck */
+	I915_WRITE(reg, dpll);
+	POSTING_READ(reg);
+	udelay(150); /* wait for warmup */
+	I915_WRITE(reg, dpll);
+	POSTING_READ(reg);
+	udelay(150); /* wait for warmup */
+	I915_WRITE(reg, dpll);
+	POSTING_READ(reg);
+	udelay(150); /* wait for warmup */
+}
+
+/**
+ * i9xx_disable_pll - disable a PLL
+ * @dev_priv: i915 private structure
+ * @pipe: pipe PLL to disable
+ *
+ * Disable the PLL for @pipe, making sure the pipe is off first.
+ *
+ * Note!  This is for pre-ILK only.
+ */
+static void i9xx_disable_pll(struct intel_crtc *crtc)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum pipe pipe = crtc->pipe;
+
+	/* Disable DVO 2x clock on both PLLs if necessary */
+	if (IS_I830(dev) &&
+	    intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO) &&
+	    !intel_num_dvo_pipes(dev)) {
+		I915_WRITE(DPLL(PIPE_B),
+			   I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
+		I915_WRITE(DPLL(PIPE_A),
+			   I915_READ(DPLL(PIPE_A)) & ~DPLL_DVO_2X_MODE);
+	}
+
+	/* Don't disable pipe or pipe PLLs if needed */
+	if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
+	    (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
+		return;
+
+	/* Make sure the pipe isn't still relying on us */
+	assert_pipe_disabled(dev_priv, pipe);
+
+	I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
+	POSTING_READ(DPLL(pipe));
+}
+
+static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
+{
+	u32 val;
+
+	/* Make sure the pipe isn't still relying on us */
+	assert_pipe_disabled(dev_priv, pipe);
+
+	/*
+	 * Leave integrated clock source and reference clock enabled for pipe B.
+	 * The latter is needed for VGA hotplug / manual detection.
+	 */
+	val = DPLL_VGA_MODE_DIS;
+	if (pipe == PIPE_B)
+		val = DPLL_INTEGRATED_CRI_CLK_VLV | DPLL_REF_CLK_ENABLE_VLV;
+	I915_WRITE(DPLL(pipe), val);
+	POSTING_READ(DPLL(pipe));
+
+}
+
+static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
+{
+	enum dpio_channel port = vlv_pipe_to_channel(pipe);
+	u32 val;
+
+	/* Make sure the pipe isn't still relying on us */
+	assert_pipe_disabled(dev_priv, pipe);
+
+	/* Set PLL en = 0 */
+	val = DPLL_SSC_REF_CLK_CHV |
+		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
+	if (pipe != PIPE_A)
+		val |= DPLL_INTEGRATED_CRI_CLK_VLV;
+	I915_WRITE(DPLL(pipe), val);
+	POSTING_READ(DPLL(pipe));
+
+	mutex_lock(&dev_priv->sb_lock);
+
+	/* Disable 10bit clock to display controller */
+	val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
+	val &= ~DPIO_DCLKP_EN;
+	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
+
+	mutex_unlock(&dev_priv->sb_lock);
+}
+
+void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
+			 struct intel_digital_port *dport,
+			 unsigned int expected_mask)
+{
+	u32 port_mask;
+	int dpll_reg;
+
+	switch (dport->port) {
+	case PORT_B:
+		port_mask = DPLL_PORTB_READY_MASK;
+		dpll_reg = DPLL(0);
+		break;
+	case PORT_C:
+		port_mask = DPLL_PORTC_READY_MASK;
+		dpll_reg = DPLL(0);
+		expected_mask <<= 4;
+		break;
+	case PORT_D:
+		port_mask = DPLL_PORTD_READY_MASK;
+		dpll_reg = DPIO_PHY_STATUS;
+		break;
+	default:
+		BUG();
+	}
+
+	if (wait_for((I915_READ(dpll_reg) & port_mask) == expected_mask, 1000))
+		WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n",
+		     port_name(dport->port), I915_READ(dpll_reg) & port_mask, expected_mask);
+}
+
+static void intel_prepare_shared_dpll(struct intel_crtc *crtc)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
+
+	if (WARN_ON(pll == NULL))
+		return;
+
+	WARN_ON(!pll->config.crtc_mask);
+	if (pll->active == 0) {
+		DRM_DEBUG_DRIVER("setting up %s\n", pll->name);
+		WARN_ON(pll->on);
+		assert_shared_dpll_disabled(dev_priv, pll);
+
+		pll->mode_set(dev_priv, pll);
+	}
+}
+
+/**
+ * intel_enable_shared_dpll - enable PCH PLL
+ * @dev_priv: i915 private structure
+ * @pipe: pipe PLL to enable
+ *
+ * The PCH PLL needs to be enabled before the PCH transcoder, since it
+ * drives the transcoder clock.
+ */
+static void intel_enable_shared_dpll(struct intel_crtc *crtc)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
+
+	if (WARN_ON(pll == NULL))
+		return;
+
+	if (WARN_ON(pll->config.crtc_mask == 0))
+		return;
+
+	DRM_DEBUG_KMS("enable %s (active %d, on? %d) for crtc %d\n",
+		      pll->name, pll->active, pll->on,
+		      crtc->base.base.id);
+
+	if (pll->active++) {
+		WARN_ON(!pll->on);
+		assert_shared_dpll_enabled(dev_priv, pll);
+		return;
+	}
+	WARN_ON(pll->on);
+
+	intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
+
+	DRM_DEBUG_KMS("enabling %s\n", pll->name);
+	pll->enable(dev_priv, pll);
+	pll->on = true;
+}
+
+static void intel_disable_shared_dpll(struct intel_crtc *crtc)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
+
+	/* PCH only available on ILK+ */
+	if (INTEL_INFO(dev)->gen < 5)
+		return;
+
+	if (pll == NULL)
+		return;
+
+	if (WARN_ON(!(pll->config.crtc_mask & (1 << drm_crtc_index(&crtc->base)))))
+		return;
+
+	DRM_DEBUG_KMS("disable %s (active %d, on? %d) for crtc %d\n",
+		      pll->name, pll->active, pll->on,
+		      crtc->base.base.id);
+
+	if (WARN_ON(pll->active == 0)) {
+		assert_shared_dpll_disabled(dev_priv, pll);
+		return;
+	}
+
+	assert_shared_dpll_enabled(dev_priv, pll);
+	WARN_ON(!pll->on);
+	if (--pll->active)
+		return;
+
+	DRM_DEBUG_KMS("disabling %s\n", pll->name);
+	pll->disable(dev_priv, pll);
+	pll->on = false;
+
+	intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
+}
+
+static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
+					   enum pipe pipe)
+{
+	struct drm_device *dev = dev_priv->dev;
+	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	uint32_t reg, val, pipeconf_val;
+
+	/* PCH only available on ILK+ */
+	BUG_ON(!HAS_PCH_SPLIT(dev));
+
+	/* Make sure PCH DPLL is enabled */
+	assert_shared_dpll_enabled(dev_priv,
+				   intel_crtc_to_shared_dpll(intel_crtc));
+
+	/* FDI must be feeding us bits for PCH ports */
+	assert_fdi_tx_enabled(dev_priv, pipe);
+	assert_fdi_rx_enabled(dev_priv, pipe);
+
+	if (HAS_PCH_CPT(dev)) {
+		/* Workaround: Set the timing override bit before enabling the
+		 * pch transcoder. */
+		reg = TRANS_CHICKEN2(pipe);
+		val = I915_READ(reg);
+		val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
+		I915_WRITE(reg, val);
+	}
+
+	reg = PCH_TRANSCONF(pipe);
+	val = I915_READ(reg);
+	pipeconf_val = I915_READ(PIPECONF(pipe));
+
+	if (HAS_PCH_IBX(dev_priv->dev)) {
+		/*
+		 * Make the BPC in transcoder be consistent with
+		 * that in pipeconf reg. For HDMI we must use 8bpc
+		 * here for both 8bpc and 12bpc.
+		 */
+		val &= ~PIPECONF_BPC_MASK;
+		if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_HDMI))
+			val |= PIPECONF_8BPC;
+		else
+			val |= pipeconf_val & PIPECONF_BPC_MASK;
+	}
+
+	val &= ~TRANS_INTERLACE_MASK;
+	if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
+		if (HAS_PCH_IBX(dev_priv->dev) &&
+		    intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
+			val |= TRANS_LEGACY_INTERLACED_ILK;
+		else
+			val |= TRANS_INTERLACED;
+	else
+		val |= TRANS_PROGRESSIVE;
+
+	I915_WRITE(reg, val | TRANS_ENABLE);
+	if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
+		DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
+}
+
+static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
+				      enum transcoder cpu_transcoder)
+{
+	u32 val, pipeconf_val;
+
+	/* PCH only available on ILK+ */
+	BUG_ON(!HAS_PCH_SPLIT(dev_priv->dev));
+
+	/* FDI must be feeding us bits for PCH ports */
+	assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
+	assert_fdi_rx_enabled(dev_priv, TRANSCODER_A);
+
+	/* Workaround: set timing override bit. */
+	val = I915_READ(TRANS_CHICKEN2(PIPE_A));
+	val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
+	I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
+
+	val = TRANS_ENABLE;
+	pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
+
+	if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
+	    PIPECONF_INTERLACED_ILK)
+		val |= TRANS_INTERLACED;
+	else
+		val |= TRANS_PROGRESSIVE;
+
+	I915_WRITE(LPT_TRANSCONF, val);
+	if (wait_for(I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE, 100))
+		DRM_ERROR("Failed to enable PCH transcoder\n");
+}
+
+static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
+					    enum pipe pipe)
+{
+	struct drm_device *dev = dev_priv->dev;
+	uint32_t reg, val;
+
+	/* FDI relies on the transcoder */
+	assert_fdi_tx_disabled(dev_priv, pipe);
+	assert_fdi_rx_disabled(dev_priv, pipe);
+
+	/* Ports must be off as well */
+	assert_pch_ports_disabled(dev_priv, pipe);
+
+	reg = PCH_TRANSCONF(pipe);
+	val = I915_READ(reg);
+	val &= ~TRANS_ENABLE;
+	I915_WRITE(reg, val);
+	/* wait for PCH transcoder off, transcoder state */
+	if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
+		DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
+
+	if (!HAS_PCH_IBX(dev)) {
+		/* Workaround: Clear the timing override chicken bit again. */
+		reg = TRANS_CHICKEN2(pipe);
+		val = I915_READ(reg);
+		val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
+		I915_WRITE(reg, val);
+	}
+}
+
+static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
+{
+	u32 val;
+
+	val = I915_READ(LPT_TRANSCONF);
+	val &= ~TRANS_ENABLE;
+	I915_WRITE(LPT_TRANSCONF, val);
+	/* wait for PCH transcoder off, transcoder state */
+	if (wait_for((I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE) == 0, 50))
+		DRM_ERROR("Failed to disable PCH transcoder\n");
+
+	/* Workaround: clear timing override bit. */
+	val = I915_READ(TRANS_CHICKEN2(PIPE_A));
+	val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
+	I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
+}
+
+/**
+ * intel_enable_pipe - enable a pipe, asserting requirements
+ * @crtc: crtc responsible for the pipe
+ *
+ * Enable @crtc's pipe, making sure that various hardware specific requirements
+ * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
+ */
+static void intel_enable_pipe(struct intel_crtc *crtc)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum pipe pipe = crtc->pipe;
+	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
+								      pipe);
+	enum pipe pch_transcoder;
+	int reg;
+	u32 val;
+
+	DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe));
+
+	assert_planes_disabled(dev_priv, pipe);
+	assert_cursor_disabled(dev_priv, pipe);
+	assert_sprites_disabled(dev_priv, pipe);
+
+	if (HAS_PCH_LPT(dev_priv->dev))
+		pch_transcoder = TRANSCODER_A;
+	else
+		pch_transcoder = pipe;
+
+	/*
+	 * A pipe without a PLL won't actually be able to drive bits from
+	 * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
+	 * need the check.
+	 */
+	if (HAS_GMCH_DISPLAY(dev_priv->dev))
+		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI))
+			assert_dsi_pll_enabled(dev_priv);
+		else
+			assert_pll_enabled(dev_priv, pipe);
+	else {
+		if (crtc->config->has_pch_encoder) {
+			/* if driving the PCH, we need FDI enabled */
+			assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder);
+			assert_fdi_tx_pll_enabled(dev_priv,
+						  (enum pipe) cpu_transcoder);
+		}
+		/* FIXME: assert CPU port conditions for SNB+ */
+	}
+
+	reg = PIPECONF(cpu_transcoder);
+	val = I915_READ(reg);
+	if (val & PIPECONF_ENABLE) {
+		WARN_ON(!((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
+			  (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)));
+		return;
+	}
+
+	I915_WRITE(reg, val | PIPECONF_ENABLE);
+	POSTING_READ(reg);
+}
+
+/**
+ * intel_disable_pipe - disable a pipe, asserting requirements
+ * @crtc: crtc whose pipes is to be disabled
+ *
+ * Disable the pipe of @crtc, making sure that various hardware
+ * specific requirements are met, if applicable, e.g. plane
+ * disabled, panel fitter off, etc.
+ *
+ * Will wait until the pipe has shut down before returning.
+ */
+static void intel_disable_pipe(struct intel_crtc *crtc)
+{
+	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+	enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
+	enum pipe pipe = crtc->pipe;
+	int reg;
+	u32 val;
+
+	DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe));
+
+	/*
+	 * Make sure planes won't keep trying to pump pixels to us,
+	 * or we might hang the display.
+	 */
+	assert_planes_disabled(dev_priv, pipe);
+	assert_cursor_disabled(dev_priv, pipe);
+	assert_sprites_disabled(dev_priv, pipe);
+
+	reg = PIPECONF(cpu_transcoder);
+	val = I915_READ(reg);
+	if ((val & PIPECONF_ENABLE) == 0)
+		return;
+
+	/*
+	 * Double wide has implications for planes
+	 * so best keep it disabled when not needed.
+	 */
+	if (crtc->config->double_wide)
+		val &= ~PIPECONF_DOUBLE_WIDE;
+
+	/* Don't disable pipe or pipe PLLs if needed */
+	if (!(pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) &&
+	    !(pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
+		val &= ~PIPECONF_ENABLE;
+
+	I915_WRITE(reg, val);
+	if ((val & PIPECONF_ENABLE) == 0)
+		intel_wait_for_pipe_off(crtc);
+}
+
+static bool need_vtd_wa(struct drm_device *dev)
+{
+#ifdef CONFIG_INTEL_IOMMU
+	if (INTEL_INFO(dev)->gen >= 6 && intel_iommu_gfx_mapped)
+		return true;
+#endif
+	return false;
+}
+
+unsigned int
+intel_tile_height(struct drm_device *dev, uint32_t pixel_format,
+		  uint64_t fb_format_modifier, unsigned int plane)
+{
+	unsigned int tile_height;
+	uint32_t pixel_bytes;
+
+	switch (fb_format_modifier) {
+	case DRM_FORMAT_MOD_NONE:
+		tile_height = 1;
+		break;
+	case I915_FORMAT_MOD_X_TILED:
+		tile_height = IS_GEN2(dev) ? 16 : 8;
+		break;
+	case I915_FORMAT_MOD_Y_TILED:
+		tile_height = 32;
+		break;
+	case I915_FORMAT_MOD_Yf_TILED:
+		pixel_bytes = drm_format_plane_cpp(pixel_format, plane);
+		switch (pixel_bytes) {
+		default:
+		case 1:
+			tile_height = 64;
+			break;
+		case 2:
+		case 4:
+			tile_height = 32;
+			break;
+		case 8:
+			tile_height = 16;
+			break;
+		case 16:
+			WARN_ONCE(1,
+				  "128-bit pixels are not supported for display!");
+			tile_height = 16;
+			break;
+		}
+		break;
+	default:
+		MISSING_CASE(fb_format_modifier);
+		tile_height = 1;
+		break;
+	}
+
+	return tile_height;
+}
+
+unsigned int
+intel_fb_align_height(struct drm_device *dev, unsigned int height,
+		      uint32_t pixel_format, uint64_t fb_format_modifier)
+{
+	return ALIGN(height, intel_tile_height(dev, pixel_format,
+					       fb_format_modifier, 0));
+}
+
+static int
+intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, struct drm_framebuffer *fb,
+			const struct drm_plane_state *plane_state)
+{
+	struct intel_rotation_info *info = &view->rotation_info;
+	unsigned int tile_height, tile_pitch;
+
+	*view = i915_ggtt_view_normal;
+
+	if (!plane_state)
+		return 0;
+
+	if (!intel_rotation_90_or_270(plane_state->rotation))
+		return 0;
+
+	*view = i915_ggtt_view_rotated;
+
+	info->height = fb->height;
+	info->pixel_format = fb->pixel_format;
+	info->pitch = fb->pitches[0];
+	info->uv_offset = fb->offsets[1];
+	info->fb_modifier = fb->modifier[0];
+
+	tile_height = intel_tile_height(fb->dev, fb->pixel_format,
+					fb->modifier[0], 0);
+	tile_pitch = PAGE_SIZE / tile_height;
+	info->width_pages = DIV_ROUND_UP(fb->pitches[0], tile_pitch);
+	info->height_pages = DIV_ROUND_UP(fb->height, tile_height);
+	info->size = info->width_pages * info->height_pages * PAGE_SIZE;
+
+	if (info->pixel_format == DRM_FORMAT_NV12) {
+		tile_height = intel_tile_height(fb->dev, fb->pixel_format,
+						fb->modifier[0], 1);
+		tile_pitch = PAGE_SIZE / tile_height;
+		info->width_pages_uv = DIV_ROUND_UP(fb->pitches[0], tile_pitch);
+		info->height_pages_uv = DIV_ROUND_UP(fb->height / 2,
+						     tile_height);
+		info->size_uv = info->width_pages_uv * info->height_pages_uv *
+				PAGE_SIZE;
+	}
+
+	return 0;
+}
+
+static unsigned int intel_linear_alignment(struct drm_i915_private *dev_priv)
+{
+	if (INTEL_INFO(dev_priv)->gen >= 9)
+		return 256 * 1024;
+	else if (IS_BROADWATER(dev_priv) || IS_CRESTLINE(dev_priv) ||
+		 IS_VALLEYVIEW(dev_priv))
+		return 128 * 1024;
+	else if (INTEL_INFO(dev_priv)->gen >= 4)
+		return 4 * 1024;
+	else
+		return 0;
+}
+
+int
+intel_pin_and_fence_fb_obj(struct drm_plane *plane,
+			   struct drm_framebuffer *fb,
+			   const struct drm_plane_state *plane_state,
+			   struct intel_engine_cs *pipelined,
+			   struct drm_i915_gem_request **pipelined_request)
+{
+	struct drm_device *dev = fb->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+	struct i915_ggtt_view view;
+	u32 alignment;
+	int ret;
+
+	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+	switch (fb->modifier[0]) {
+	case DRM_FORMAT_MOD_NONE:
+		alignment = intel_linear_alignment(dev_priv);
+		break;
+	case I915_FORMAT_MOD_X_TILED:
+		if (INTEL_INFO(dev)->gen >= 9)
+			alignment = 256 * 1024;
+		else {
+			/* pin() will align the object as required by fence */
+			alignment = 0;
+		}
+		break;
+	case I915_FORMAT_MOD_Y_TILED:
+	case I915_FORMAT_MOD_Yf_TILED:
+		if (WARN_ONCE(INTEL_INFO(dev)->gen < 9,
+			  "Y tiling bo slipped through, driver bug!\n"))
+			return -EINVAL;
+		alignment = 1 * 1024 * 1024;
+		break;
+	default:
+		MISSING_CASE(fb->modifier[0]);
+		return -EINVAL;
+	}
+
+	ret = intel_fill_fb_ggtt_view(&view, fb, plane_state);
+	if (ret)
+		return ret;
+
+	/* Note that the w/a also requires 64 PTE of padding following the
+	 * bo. We currently fill all unused PTE with the shadow page and so
+	 * we should always have valid PTE following the scanout preventing
+	 * the VT-d warning.
+	 */
+	if (need_vtd_wa(dev) && alignment < 256 * 1024)
+		alignment = 256 * 1024;
+
+	/*
+	 * Global gtt pte registers are special registers which actually forward
+	 * writes to a chunk of system memory. Which means that there is no risk
+	 * that the register values disappear as soon as we call
+	 * intel_runtime_pm_put(), so it is correct to wrap only the
+	 * pin/unpin/fence and not more.
+	 */
+	intel_runtime_pm_get(dev_priv);
+
+	dev_priv->mm.interruptible = false;
+	ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined,
+						   pipelined_request, &view);
+	if (ret)
+		goto err_interruptible;
+
+	/* Install a fence for tiled scan-out. Pre-i965 always needs a
+	 * fence, whereas 965+ only requires a fence if using
+	 * framebuffer compression.  For simplicity, we always install
+	 * a fence as the cost is not that onerous.
+	 */
+	if (view.type == I915_GGTT_VIEW_NORMAL) {
+		ret = i915_gem_object_get_fence(obj);
+		if (ret == -EDEADLK) {
+			/*
+			 * -EDEADLK means there are no free fences
+			 * no pending flips.
+			 *
+			 * This is propagated to atomic, but it uses
+			 * -EDEADLK to force a locking recovery, so
+			 * change the returned error to -EBUSY.
+			 */
+			ret = -EBUSY;
+			goto err_unpin;
+		} else if (ret)
+			goto err_unpin;
+
+		i915_gem_object_pin_fence(obj);
+	}
+
+	dev_priv->mm.interruptible = true;
+	intel_runtime_pm_put(dev_priv);
+	return 0;
+
+err_unpin:
+	i915_gem_object_unpin_from_display_plane(obj, &view);
+err_interruptible:
+	dev_priv->mm.interruptible = true;
+	intel_runtime_pm_put(dev_priv);
+	return ret;
+}
+
+static void intel_unpin_fb_obj(struct drm_framebuffer *fb,
+			       const struct drm_plane_state *plane_state)
+{
+	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+	struct i915_ggtt_view view;
+	int ret;
+
+	WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex));
+
+	ret = intel_fill_fb_ggtt_view(&view, fb, plane_state);
+	WARN_ONCE(ret, "Couldn't get view from plane state!");
+
+	if (view.type == I915_GGTT_VIEW_NORMAL)
+		i915_gem_object_unpin_fence(obj);
+
+	i915_gem_object_unpin_from_display_plane(obj, &view);
+}
+
+/* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
+ * is assumed to be a power-of-two. */
+unsigned long intel_gen4_compute_page_offset(struct drm_i915_private *dev_priv,
+					     int *x, int *y,
+					     unsigned int tiling_mode,
+					     unsigned int cpp,
+					     unsigned int pitch)
+{
+	if (tiling_mode != I915_TILING_NONE) {
+		unsigned int tile_rows, tiles;
+
+		tile_rows = *y / 8;
+		*y %= 8;
+
+		tiles = *x / (512/cpp);
+		*x %= 512/cpp;
+
+		return tile_rows * pitch * 8 + tiles * 4096;
+	} else {
+		unsigned int alignment = intel_linear_alignment(dev_priv) - 1;
+		unsigned int offset;
+
+		offset = *y * pitch + *x * cpp;
+		*y = (offset & alignment) / pitch;
+		*x = ((offset & alignment) - *y * pitch) / cpp;
+		return offset & ~alignment;
+	}
+}
+
+static int i9xx_format_to_fourcc(int format)
+{
+	switch (format) {
+	case DISPPLANE_8BPP:
+		return DRM_FORMAT_C8;
+	case DISPPLANE_BGRX555:
+		return DRM_FORMAT_XRGB1555;
+	case DISPPLANE_BGRX565:
+		return DRM_FORMAT_RGB565;
+	default:
+	case DISPPLANE_BGRX888:
+		return DRM_FORMAT_XRGB8888;
+	case DISPPLANE_RGBX888:
+		return DRM_FORMAT_XBGR8888;
+	case DISPPLANE_BGRX101010:
+		return DRM_FORMAT_XRGB2101010;
+	case DISPPLANE_RGBX101010:
+		return DRM_FORMAT_XBGR2101010;
+	}
+}
+
+static int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
+{
+	switch (format) {
+	case PLANE_CTL_FORMAT_RGB_565:
+		return DRM_FORMAT_RGB565;
+	default:
+	case PLANE_CTL_FORMAT_XRGB_8888:
+		if (rgb_order) {
+			if (alpha)
+				return DRM_FORMAT_ABGR8888;
+			else
+				return DRM_FORMAT_XBGR8888;
+		} else {
+			if (alpha)
+				return DRM_FORMAT_ARGB8888;
+			else
+				return DRM_FORMAT_XRGB8888;
+		}
+	case PLANE_CTL_FORMAT_XRGB_2101010:
+		if (rgb_order)
+			return DRM_FORMAT_XBGR2101010;
+		else
+			return DRM_FORMAT_XRGB2101010;
+	}
+}
+
+static bool
+intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
+			      struct intel_initial_plane_config *plane_config)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_gem_object *obj = NULL;
+	struct drm_mode_fb_cmd2 mode_cmd = { 0 };
+	struct drm_framebuffer *fb = &plane_config->fb->base;
+	u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
+	u32 size_aligned = round_up(plane_config->base + plane_config->size,
+				    PAGE_SIZE);
+
+	size_aligned -= base_aligned;
+
+	if (plane_config->size == 0)
+		return false;
+
+	/* If the FB is too big, just don't use it since fbdev is not very
+	 * important and we should probably use that space with FBC or other
+	 * features. */
+	if (size_aligned * 2 > dev_priv->gtt.stolen_usable_size)
+		return false;
+
+	obj = i915_gem_object_create_stolen_for_preallocated(dev,
+							     base_aligned,
+							     base_aligned,
+							     size_aligned);
+	if (!obj)
+		return false;
+
+	obj->tiling_mode = plane_config->tiling;
+	if (obj->tiling_mode == I915_TILING_X)
+		obj->stride = fb->pitches[0];
+
+	mode_cmd.pixel_format = fb->pixel_format;
+	mode_cmd.width = fb->width;
+	mode_cmd.height = fb->height;
+	mode_cmd.pitches[0] = fb->pitches[0];
+	mode_cmd.modifier[0] = fb->modifier[0];
+	mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
+
+	mutex_lock(&dev->struct_mutex);
+	if (intel_framebuffer_init(dev, to_intel_framebuffer(fb),
+				   &mode_cmd, obj)) {
+		DRM_DEBUG_KMS("intel fb init failed\n");
+		goto out_unref_obj;
+	}
+	mutex_unlock(&dev->struct_mutex);
+
+	DRM_DEBUG_KMS("initial plane fb obj %p\n", obj);
+	return true;
+
+out_unref_obj:
+	drm_gem_object_unreference(&obj->base);
+	mutex_unlock(&dev->struct_mutex);
+	return false;
+}
+
+/* Update plane->state->fb to match plane->fb after driver-internal updates */
+static void
+update_state_fb(struct drm_plane *plane)
+{
+	if (plane->fb == plane->state->fb)
+		return;
+
+	if (plane->state->fb)
+		drm_framebuffer_unreference(plane->state->fb);
+	plane->state->fb = plane->fb;
+	if (plane->state->fb)
+		drm_framebuffer_reference(plane->state->fb);
+}
+
+static void
+intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
+			     struct intel_initial_plane_config *plane_config)
+{
+	struct drm_device *dev = intel_crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_crtc *c;
+	struct intel_crtc *i;
+	struct drm_i915_gem_object *obj;
+	struct drm_plane *primary = intel_crtc->base.primary;
+	struct drm_plane_state *plane_state = primary->state;
+	struct drm_crtc_state *crtc_state = intel_crtc->base.state;
+	struct intel_plane *intel_plane = to_intel_plane(primary);
+	struct drm_framebuffer *fb;
+
+	if (!plane_config->fb)
+		return;
+
+	if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
+		fb = &plane_config->fb->base;
+		goto valid_fb;
+	}
+
+	kfree(plane_config->fb);
+
+	/*
+	 * Failed to alloc the obj, check to see if we should share
+	 * an fb with another CRTC instead
+	 */
+	for_each_crtc(dev, c) {
+		i = to_intel_crtc(c);
+
+		if (c == &intel_crtc->base)
+			continue;
+
+		if (!i->active)
+			continue;
+
+		fb = c->primary->fb;
+		if (!fb)
+			continue;
+
+		obj = intel_fb_obj(fb);
+		if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) {
+			drm_framebuffer_reference(fb);
+			goto valid_fb;
+		}
+	}
+
+	/*
+	 * We've failed to reconstruct the BIOS FB.  Current display state
+	 * indicates that the primary plane is visible, but has a NULL FB,
+	 * which will lead to problems later if we don't fix it up.  The
+	 * simplest solution is to just disable the primary plane now and
+	 * pretend the BIOS never had it enabled.
+	 */
+	to_intel_plane_state(plane_state)->visible = false;
+	crtc_state->plane_mask &= ~(1 << drm_plane_index(primary));
+	intel_pre_disable_primary(&intel_crtc->base);
+	intel_plane->disable_plane(primary, &intel_crtc->base);
+
+	return;
+
+valid_fb:
+	plane_state->src_x = 0;
+	plane_state->src_y = 0;
+	plane_state->src_w = fb->width << 16;
+	plane_state->src_h = fb->height << 16;
+
+	plane_state->crtc_x = 0;
+	plane_state->crtc_y = 0;
+	plane_state->crtc_w = fb->width;
+	plane_state->crtc_h = fb->height;
+
+	obj = intel_fb_obj(fb);
+	if (obj->tiling_mode != I915_TILING_NONE)
+		dev_priv->preserve_bios_swizzle = true;
+
+	drm_framebuffer_reference(fb);
+	primary->fb = primary->state->fb = fb;
+	primary->crtc = primary->state->crtc = &intel_crtc->base;
+	intel_crtc->base.state->plane_mask |= (1 << drm_plane_index(primary));
+	obj->frontbuffer_bits |= to_intel_plane(primary)->frontbuffer_bit;
+}
+
+static void i9xx_update_primary_plane(struct drm_crtc *crtc,
+				      struct drm_framebuffer *fb,
+				      int x, int y)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct drm_plane *primary = crtc->primary;
+	bool visible = to_intel_plane_state(primary->state)->visible;
+	struct drm_i915_gem_object *obj;
+	int plane = intel_crtc->plane;
+	unsigned long linear_offset;
+	u32 dspcntr;
+	u32 reg = DSPCNTR(plane);
+	int pixel_size;
+
+	if (!visible || !fb) {
+		I915_WRITE(reg, 0);
+		if (INTEL_INFO(dev)->gen >= 4)
+			I915_WRITE(DSPSURF(plane), 0);
+		else
+			I915_WRITE(DSPADDR(plane), 0);
+		POSTING_READ(reg);
+		return;
+	}
+
+	obj = intel_fb_obj(fb);
+	if (WARN_ON(obj == NULL))
+		return;
+
+	pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
+
+	dspcntr = DISPPLANE_GAMMA_ENABLE;
+
+	dspcntr |= DISPLAY_PLANE_ENABLE;
+
+	if (INTEL_INFO(dev)->gen < 4) {
+		if (intel_crtc->pipe == PIPE_B)
+			dspcntr |= DISPPLANE_SEL_PIPE_B;
+
+		/* pipesrc and dspsize control the size that is scaled from,
+		 * which should always be the user's requested size.
+		 */
+		I915_WRITE(DSPSIZE(plane),
+			   ((intel_crtc->config->pipe_src_h - 1) << 16) |
+			   (intel_crtc->config->pipe_src_w - 1));
+		I915_WRITE(DSPPOS(plane), 0);
+	} else if (IS_CHERRYVIEW(dev) && plane == PLANE_B) {
+		I915_WRITE(PRIMSIZE(plane),
+			   ((intel_crtc->config->pipe_src_h - 1) << 16) |
+			   (intel_crtc->config->pipe_src_w - 1));
+		I915_WRITE(PRIMPOS(plane), 0);
+		I915_WRITE(PRIMCNSTALPHA(plane), 0);
+	}
+
+	switch (fb->pixel_format) {
+	case DRM_FORMAT_C8:
+		dspcntr |= DISPPLANE_8BPP;
+		break;
+	case DRM_FORMAT_XRGB1555:
+		dspcntr |= DISPPLANE_BGRX555;
+		break;
+	case DRM_FORMAT_RGB565:
+		dspcntr |= DISPPLANE_BGRX565;
+		break;
+	case DRM_FORMAT_XRGB8888:
+		dspcntr |= DISPPLANE_BGRX888;
+		break;
+	case DRM_FORMAT_XBGR8888:
+		dspcntr |= DISPPLANE_RGBX888;
+		break;
+	case DRM_FORMAT_XRGB2101010:
+		dspcntr |= DISPPLANE_BGRX101010;
+		break;
+	case DRM_FORMAT_XBGR2101010:
+		dspcntr |= DISPPLANE_RGBX101010;
+		break;
+	default:
+		BUG();
+	}
+
+	if (INTEL_INFO(dev)->gen >= 4 &&
+	    obj->tiling_mode != I915_TILING_NONE)
+		dspcntr |= DISPPLANE_TILED;
+
+	if (IS_G4X(dev))
+		dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
+
+	linear_offset = y * fb->pitches[0] + x * pixel_size;
+
+	if (INTEL_INFO(dev)->gen >= 4) {
+		intel_crtc->dspaddr_offset =
+			intel_gen4_compute_page_offset(dev_priv,
+						       &x, &y, obj->tiling_mode,
+						       pixel_size,
+						       fb->pitches[0]);
+		linear_offset -= intel_crtc->dspaddr_offset;
+	} else {
+		intel_crtc->dspaddr_offset = linear_offset;
+	}
+
+	if (crtc->primary->state->rotation == BIT(DRM_ROTATE_180)) {
+		dspcntr |= DISPPLANE_ROTATE_180;
+
+		x += (intel_crtc->config->pipe_src_w - 1);
+		y += (intel_crtc->config->pipe_src_h - 1);
+
+		/* Finding the last pixel of the last line of the display
+		data and adding to linear_offset*/
+		linear_offset +=
+			(intel_crtc->config->pipe_src_h - 1) * fb->pitches[0] +
+			(intel_crtc->config->pipe_src_w - 1) * pixel_size;
+	}
+
+	intel_crtc->adjusted_x = x;
+	intel_crtc->adjusted_y = y;
+
+	I915_WRITE(reg, dspcntr);
+
+	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
+	if (INTEL_INFO(dev)->gen >= 4) {
+		I915_WRITE(DSPSURF(plane),
+			   i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
+		I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
+		I915_WRITE(DSPLINOFF(plane), linear_offset);
+	} else
+		I915_WRITE(DSPADDR(plane), i915_gem_obj_ggtt_offset(obj) + linear_offset);
+	POSTING_READ(reg);
+}
+
+static void ironlake_update_primary_plane(struct drm_crtc *crtc,
+					  struct drm_framebuffer *fb,
+					  int x, int y)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct drm_plane *primary = crtc->primary;
+	bool visible = to_intel_plane_state(primary->state)->visible;
+	struct drm_i915_gem_object *obj;
+	int plane = intel_crtc->plane;
+	unsigned long linear_offset;
+	u32 dspcntr;
+	u32 reg = DSPCNTR(plane);
+	int pixel_size;
+
+	if (!visible || !fb) {
+		I915_WRITE(reg, 0);
+		I915_WRITE(DSPSURF(plane), 0);
+		POSTING_READ(reg);
+		return;
+	}
+
+	obj = intel_fb_obj(fb);
+	if (WARN_ON(obj == NULL))
+		return;
+
+	pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
+
+	dspcntr = DISPPLANE_GAMMA_ENABLE;
+
+	dspcntr |= DISPLAY_PLANE_ENABLE;
+
+	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+		dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
+
+	switch (fb->pixel_format) {
+	case DRM_FORMAT_C8:
+		dspcntr |= DISPPLANE_8BPP;
+		break;
+	case DRM_FORMAT_RGB565:
+		dspcntr |= DISPPLANE_BGRX565;
+		break;
+	case DRM_FORMAT_XRGB8888:
+		dspcntr |= DISPPLANE_BGRX888;
+		break;
+	case DRM_FORMAT_XBGR8888:
+		dspcntr |= DISPPLANE_RGBX888;
+		break;
+	case DRM_FORMAT_XRGB2101010:
+		dspcntr |= DISPPLANE_BGRX101010;
+		break;
+	case DRM_FORMAT_XBGR2101010:
+		dspcntr |= DISPPLANE_RGBX101010;
+		break;
+	default:
+		BUG();
+	}
+
+	if (obj->tiling_mode != I915_TILING_NONE)
+		dspcntr |= DISPPLANE_TILED;
+
+	if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
+		dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
+
+	linear_offset = y * fb->pitches[0] + x * pixel_size;
+	intel_crtc->dspaddr_offset =
+		intel_gen4_compute_page_offset(dev_priv,
+					       &x, &y, obj->tiling_mode,
+					       pixel_size,
+					       fb->pitches[0]);
+	linear_offset -= intel_crtc->dspaddr_offset;
+	if (crtc->primary->state->rotation == BIT(DRM_ROTATE_180)) {
+		dspcntr |= DISPPLANE_ROTATE_180;
+
+		if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
+			x += (intel_crtc->config->pipe_src_w - 1);
+			y += (intel_crtc->config->pipe_src_h - 1);
+
+			/* Finding the last pixel of the last line of the display
+			data and adding to linear_offset*/
+			linear_offset +=
+				(intel_crtc->config->pipe_src_h - 1) * fb->pitches[0] +
+				(intel_crtc->config->pipe_src_w - 1) * pixel_size;
+		}
+	}
+
+	intel_crtc->adjusted_x = x;
+	intel_crtc->adjusted_y = y;
+
+	I915_WRITE(reg, dspcntr);
+
+	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
+	I915_WRITE(DSPSURF(plane),
+		   i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
+	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+		I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
+	} else {
+		I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
+		I915_WRITE(DSPLINOFF(plane), linear_offset);
+	}
+	POSTING_READ(reg);
+}
+
+u32 intel_fb_stride_alignment(struct drm_device *dev, uint64_t fb_modifier,
+			      uint32_t pixel_format)
+{
+	u32 bits_per_pixel = drm_format_plane_cpp(pixel_format, 0) * 8;
+
+	/*
+	 * The stride is either expressed as a multiple of 64 bytes
+	 * chunks for linear buffers or in number of tiles for tiled
+	 * buffers.
+	 */
+	switch (fb_modifier) {
+	case DRM_FORMAT_MOD_NONE:
+		return 64;
+	case I915_FORMAT_MOD_X_TILED:
+		if (INTEL_INFO(dev)->gen == 2)
+			return 128;
+		return 512;
+	case I915_FORMAT_MOD_Y_TILED:
+		/* No need to check for old gens and Y tiling since this is
+		 * about the display engine and those will be blocked before
+		 * we get here.
+		 */
+		return 128;
+	case I915_FORMAT_MOD_Yf_TILED:
+		if (bits_per_pixel == 8)
+			return 64;
+		else
+			return 128;
+	default:
+		MISSING_CASE(fb_modifier);
+		return 64;
+	}
+}
+
+u32 intel_plane_obj_offset(struct intel_plane *intel_plane,
+			   struct drm_i915_gem_object *obj,
+			   unsigned int plane)
+{
+	const struct i915_ggtt_view *view = &i915_ggtt_view_normal;
+	struct i915_vma *vma;
+	u64 offset;
+
+	if (intel_rotation_90_or_270(intel_plane->base.state->rotation))
+		view = &i915_ggtt_view_rotated;
+
+	vma = i915_gem_obj_to_ggtt_view(obj, view);
+	if (WARN(!vma, "ggtt vma for display object not found! (view=%u)\n",
+		view->type))
+		return -1;
+
+	offset = vma->node.start;
+
+	if (plane == 1) {
+		offset += vma->ggtt_view.rotation_info.uv_start_page *
+			  PAGE_SIZE;
+	}
+
+	WARN_ON(upper_32_bits(offset));
+
+	return lower_32_bits(offset);
+}
+
+static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
+{
+	struct drm_device *dev = intel_crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0);
+	I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
+	I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
+}
+
+/*
+ * This function detaches (aka. unbinds) unused scalers in hardware
+ */
+static void skl_detach_scalers(struct intel_crtc *intel_crtc)
+{
+	struct intel_crtc_scaler_state *scaler_state;
+	int i;
+
+	scaler_state = &intel_crtc->config->scaler_state;
+
+	/* loop through and disable scalers that aren't in use */
+	for (i = 0; i < intel_crtc->num_scalers; i++) {
+		if (!scaler_state->scalers[i].in_use)
+			skl_detach_scaler(intel_crtc, i);
+	}
+}
+
+u32 skl_plane_ctl_format(uint32_t pixel_format)
+{
+	switch (pixel_format) {
+	case DRM_FORMAT_C8:
+		return PLANE_CTL_FORMAT_INDEXED;
+	case DRM_FORMAT_RGB565:
+		return PLANE_CTL_FORMAT_RGB_565;
+	case DRM_FORMAT_XBGR8888:
+		return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
+	case DRM_FORMAT_XRGB8888:
+		return PLANE_CTL_FORMAT_XRGB_8888;
+	/*
+	 * XXX: For ARBG/ABGR formats we default to expecting scanout buffers
+	 * to be already pre-multiplied. We need to add a knob (or a different
+	 * DRM_FORMAT) for user-space to configure that.
+	 */
+	case DRM_FORMAT_ABGR8888:
+		return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX |
+			PLANE_CTL_ALPHA_SW_PREMULTIPLY;
+	case DRM_FORMAT_ARGB8888:
+		return PLANE_CTL_FORMAT_XRGB_8888 |
+			PLANE_CTL_ALPHA_SW_PREMULTIPLY;
+	case DRM_FORMAT_XRGB2101010:
+		return PLANE_CTL_FORMAT_XRGB_2101010;
+	case DRM_FORMAT_XBGR2101010:
+		return PLANE_CTL_ORDER_RGBX | PLANE_CTL_FORMAT_XRGB_2101010;
+	case DRM_FORMAT_YUYV:
+		return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
+	case DRM_FORMAT_YVYU:
+		return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
+	case DRM_FORMAT_UYVY:
+		return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
+	case DRM_FORMAT_VYUY:
+		return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
+	default:
+		MISSING_CASE(pixel_format);
+	}
+
+	return 0;
+}
+
+u32 skl_plane_ctl_tiling(uint64_t fb_modifier)
+{
+	switch (fb_modifier) {
+	case DRM_FORMAT_MOD_NONE:
+		break;
+	case I915_FORMAT_MOD_X_TILED:
+		return PLANE_CTL_TILED_X;
+	case I915_FORMAT_MOD_Y_TILED:
+		return PLANE_CTL_TILED_Y;
+	case I915_FORMAT_MOD_Yf_TILED:
+		return PLANE_CTL_TILED_YF;
+	default:
+		MISSING_CASE(fb_modifier);
+	}
+
+	return 0;
+}
+
+u32 skl_plane_ctl_rotation(unsigned int rotation)
+{
+	switch (rotation) {
+	case BIT(DRM_ROTATE_0):
+		break;
+	/*
+	 * DRM_ROTATE_ is counter clockwise to stay compatible with Xrandr
+	 * while i915 HW rotation is clockwise, thats why this swapping.
+	 */
+	case BIT(DRM_ROTATE_90):
+		return PLANE_CTL_ROTATE_270;
+	case BIT(DRM_ROTATE_180):
+		return PLANE_CTL_ROTATE_180;
+	case BIT(DRM_ROTATE_270):
+		return PLANE_CTL_ROTATE_90;
+	default:
+		MISSING_CASE(rotation);
+	}
+
+	return 0;
+}
+
+static void skylake_update_primary_plane(struct drm_crtc *crtc,
+					 struct drm_framebuffer *fb,
+					 int x, int y)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct drm_plane *plane = crtc->primary;
+	bool visible = to_intel_plane_state(plane->state)->visible;
+	struct drm_i915_gem_object *obj;
+	int pipe = intel_crtc->pipe;
+	u32 plane_ctl, stride_div, stride;
+	u32 tile_height, plane_offset, plane_size;
+	unsigned int rotation;
+	int x_offset, y_offset;
+	u32 surf_addr;
+	struct intel_crtc_state *crtc_state = intel_crtc->config;
+	struct intel_plane_state *plane_state;
+	int src_x = 0, src_y = 0, src_w = 0, src_h = 0;
+	int dst_x = 0, dst_y = 0, dst_w = 0, dst_h = 0;
+	int scaler_id = -1;
+
+	plane_state = to_intel_plane_state(plane->state);
+
+	if (!visible || !fb) {
+		I915_WRITE(PLANE_CTL(pipe, 0), 0);
+		I915_WRITE(PLANE_SURF(pipe, 0), 0);
+		POSTING_READ(PLANE_CTL(pipe, 0));
+		return;
+	}
+
+	plane_ctl = PLANE_CTL_ENABLE |
+		    PLANE_CTL_PIPE_GAMMA_ENABLE |
+		    PLANE_CTL_PIPE_CSC_ENABLE;
+
+	plane_ctl |= skl_plane_ctl_format(fb->pixel_format);
+	plane_ctl |= skl_plane_ctl_tiling(fb->modifier[0]);
+	plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
+
+	rotation = plane->state->rotation;
+	plane_ctl |= skl_plane_ctl_rotation(rotation);
+
+	obj = intel_fb_obj(fb);
+	stride_div = intel_fb_stride_alignment(dev, fb->modifier[0],
+					       fb->pixel_format);
+	surf_addr = intel_plane_obj_offset(to_intel_plane(plane), obj, 0);
+
+	WARN_ON(drm_rect_width(&plane_state->src) == 0);
+
+	scaler_id = plane_state->scaler_id;
+	src_x = plane_state->src.x1 >> 16;
+	src_y = plane_state->src.y1 >> 16;
+	src_w = drm_rect_width(&plane_state->src) >> 16;
+	src_h = drm_rect_height(&plane_state->src) >> 16;
+	dst_x = plane_state->dst.x1;
+	dst_y = plane_state->dst.y1;
+	dst_w = drm_rect_width(&plane_state->dst);
+	dst_h = drm_rect_height(&plane_state->dst);
+
+	WARN_ON(x != src_x || y != src_y);
+
+	if (intel_rotation_90_or_270(rotation)) {
+		/* stride = Surface height in tiles */
+		tile_height = intel_tile_height(dev, fb->pixel_format,
+						fb->modifier[0], 0);
+		stride = DIV_ROUND_UP(fb->height, tile_height);
+		x_offset = stride * tile_height - y - src_h;
+		y_offset = x;
+		plane_size = (src_w - 1) << 16 | (src_h - 1);
+	} else {
+		stride = fb->pitches[0] / stride_div;
+		x_offset = x;
+		y_offset = y;
+		plane_size = (src_h - 1) << 16 | (src_w - 1);
+	}
+	plane_offset = y_offset << 16 | x_offset;
+
+	intel_crtc->adjusted_x = x_offset;
+	intel_crtc->adjusted_y = y_offset;
+
+	I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl);
+	I915_WRITE(PLANE_OFFSET(pipe, 0), plane_offset);
+	I915_WRITE(PLANE_SIZE(pipe, 0), plane_size);
+	I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
+
+	if (scaler_id >= 0) {
+		uint32_t ps_ctrl = 0;
+
+		WARN_ON(!dst_w || !dst_h);
+		ps_ctrl = PS_SCALER_EN | PS_PLANE_SEL(0) |
+			crtc_state->scaler_state.scalers[scaler_id].mode;
+		I915_WRITE(SKL_PS_CTRL(pipe, scaler_id), ps_ctrl);
+		I915_WRITE(SKL_PS_PWR_GATE(pipe, scaler_id), 0);
+		I915_WRITE(SKL_PS_WIN_POS(pipe, scaler_id), (dst_x << 16) | dst_y);
+		I915_WRITE(SKL_PS_WIN_SZ(pipe, scaler_id), (dst_w << 16) | dst_h);
+		I915_WRITE(PLANE_POS(pipe, 0), 0);
+	} else {
+		I915_WRITE(PLANE_POS(pipe, 0), (dst_y << 16) | dst_x);
+	}
+
+	I915_WRITE(PLANE_SURF(pipe, 0), surf_addr);
+
+	POSTING_READ(PLANE_SURF(pipe, 0));
+}
+
+/* Assume fb object is pinned & idle & fenced and just update base pointers */
+static int
+intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
+			   int x, int y, enum mode_set_atomic state)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (dev_priv->fbc.disable_fbc)
+		dev_priv->fbc.disable_fbc(dev_priv);
+
+	dev_priv->display.update_primary_plane(crtc, fb, x, y);
+
+	return 0;
+}
+
+static void intel_complete_page_flips(struct drm_device *dev)
+{
+	struct drm_crtc *crtc;
+
+	for_each_crtc(dev, crtc) {
+		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+		enum plane plane = intel_crtc->plane;
+
+		intel_prepare_page_flip(dev, plane);
+		intel_finish_page_flip_plane(dev, plane);
+	}
+}
+
+static void intel_update_primary_planes(struct drm_device *dev)
+{
+	struct drm_crtc *crtc;
+
+	for_each_crtc(dev, crtc) {
+		struct intel_plane *plane = to_intel_plane(crtc->primary);
+		struct intel_plane_state *plane_state;
+
+		drm_modeset_lock_crtc(crtc, &plane->base);
+
+		plane_state = to_intel_plane_state(plane->base.state);
+
+		if (plane_state->base.fb)
+			plane->commit_plane(&plane->base, plane_state);
+
+		drm_modeset_unlock_crtc(crtc);
+	}
+}
+
+void intel_prepare_reset(struct drm_device *dev)
+{
+	/* no reset support for gen2 */
+	if (IS_GEN2(dev))
+		return;
+
+	/* reset doesn't touch the display */
+	if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
+		return;
+
+	drm_modeset_lock_all(dev);
+	/*
+	 * Disabling the crtcs gracefully seems nicer. Also the
+	 * g33 docs say we should at least disable all the planes.
+	 */
+	intel_display_suspend(dev);
+}
+
+void intel_finish_reset(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = to_i915(dev);
+
+	/*
+	 * Flips in the rings will be nuked by the reset,
+	 * so complete all pending flips so that user space
+	 * will get its events and not get stuck.
+	 */
+	intel_complete_page_flips(dev);
+
+	/* no reset support for gen2 */
+	if (IS_GEN2(dev))
+		return;
+
+	/* reset doesn't touch the display */
+	if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) {
+		/*
+		 * Flips in the rings have been nuked by the reset,
+		 * so update the base address of all primary
+		 * planes to the the last fb to make sure we're
+		 * showing the correct fb after a reset.
+		 *
+		 * FIXME: Atomic will make this obsolete since we won't schedule
+		 * CS-based flips (which might get lost in gpu resets) any more.
+		 */
+		intel_update_primary_planes(dev);
+		return;
+	}
+
+	/*
+	 * The display has been reset as well,
+	 * so need a full re-initialization.
+	 */
+	intel_runtime_pm_disable_interrupts(dev_priv);
+	intel_runtime_pm_enable_interrupts(dev_priv);
+
+	intel_modeset_init_hw(dev);
+
+	spin_lock_irq(&dev_priv->irq_lock);
+	if (dev_priv->display.hpd_irq_setup)
+		dev_priv->display.hpd_irq_setup(dev);
+	spin_unlock_irq(&dev_priv->irq_lock);
+
+	intel_display_resume(dev);
+
+	intel_hpd_init(dev_priv);
+
+	drm_modeset_unlock_all(dev);
+}
+
+static void
+intel_finish_fb(struct drm_framebuffer *old_fb)
+{
+	struct drm_i915_gem_object *obj = intel_fb_obj(old_fb);
+	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+	bool was_interruptible = dev_priv->mm.interruptible;
+	int ret;
+
+	/* Big Hammer, we also need to ensure that any pending
+	 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
+	 * current scanout is retired before unpinning the old
+	 * framebuffer. Note that we rely on userspace rendering
+	 * into the buffer attached to the pipe they are waiting
+	 * on. If not, userspace generates a GPU hang with IPEHR
+	 * point to the MI_WAIT_FOR_EVENT.
+	 *
+	 * This should only fail upon a hung GPU, in which case we
+	 * can safely continue.
+	 */
+	dev_priv->mm.interruptible = false;
+	ret = i915_gem_object_wait_rendering(obj, true);
+	dev_priv->mm.interruptible = was_interruptible;
+
+	WARN_ON(ret);
+}
+
+static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	bool pending;
+
+	if (i915_reset_in_progress(&dev_priv->gpu_error) ||
+	    intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
+		return false;
+
+	spin_lock_irq(&dev->event_lock);
+	pending = to_intel_crtc(crtc)->unpin_work != NULL;
+	spin_unlock_irq(&dev->event_lock);
+
+	return pending;
+}
+
+static void intel_update_pipe_config(struct intel_crtc *crtc,
+				     struct intel_crtc_state *old_crtc_state)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc_state *pipe_config =
+		to_intel_crtc_state(crtc->base.state);
+
+	/* drm_atomic_helper_update_legacy_modeset_state might not be called. */
+	crtc->base.mode = crtc->base.state->mode;
+
+	DRM_DEBUG_KMS("Updating pipe size %ix%i -> %ix%i\n",
+		      old_crtc_state->pipe_src_w, old_crtc_state->pipe_src_h,
+		      pipe_config->pipe_src_w, pipe_config->pipe_src_h);
+
+	if (HAS_DDI(dev))
+		intel_set_pipe_csc(&crtc->base);
+
+	/*
+	 * Update pipe size and adjust fitter if needed: the reason for this is
+	 * that in compute_mode_changes we check the native mode (not the pfit
+	 * mode) to see if we can flip rather than do a full mode set. In the
+	 * fastboot case, we'll flip, but if we don't update the pipesrc and
+	 * pfit state, we'll end up with a big fb scanned out into the wrong
+	 * sized surface.
+	 */
+
+	I915_WRITE(PIPESRC(crtc->pipe),
+		   ((pipe_config->pipe_src_w - 1) << 16) |
+		   (pipe_config->pipe_src_h - 1));
+
+	/* on skylake this is done by detaching scalers */
+	if (INTEL_INFO(dev)->gen >= 9) {
+		skl_detach_scalers(crtc);
+
+		if (pipe_config->pch_pfit.enabled)
+			skylake_pfit_enable(crtc);
+	} else if (HAS_PCH_SPLIT(dev)) {
+		if (pipe_config->pch_pfit.enabled)
+			ironlake_pfit_enable(crtc);
+		else if (old_crtc_state->pch_pfit.enabled)
+			ironlake_pfit_disable(crtc, true);
+	}
+}
+
+static void intel_fdi_normal_train(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	int pipe = intel_crtc->pipe;
+	u32 reg, temp;
+
+	/* enable normal train */
+	reg = FDI_TX_CTL(pipe);
+	temp = I915_READ(reg);
+	if (IS_IVYBRIDGE(dev)) {
+		temp &= ~FDI_LINK_TRAIN_NONE_IVB;
+		temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
+	} else {
+		temp &= ~FDI_LINK_TRAIN_NONE;
+		temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
+	}
+	I915_WRITE(reg, temp);
+
+	reg = FDI_RX_CTL(pipe);
+	temp = I915_READ(reg);
+	if (HAS_PCH_CPT(dev)) {
+		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+		temp |= FDI_LINK_TRAIN_NORMAL_CPT;
+	} else {
+		temp &= ~FDI_LINK_TRAIN_NONE;
+		temp |= FDI_LINK_TRAIN_NONE;
+	}
+	I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
+
+	/* wait one idle pattern time */
+	POSTING_READ(reg);
+	udelay(1000);
+
+	/* IVB wants error correction enabled */
+	if (IS_IVYBRIDGE(dev))
+		I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
+			   FDI_FE_ERRC_ENABLE);
+}
+
+/* The FDI link training functions for ILK/Ibexpeak. */
+static void ironlake_fdi_link_train(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	int pipe = intel_crtc->pipe;
+	u32 reg, temp, tries;
+
+	/* FDI needs bits from pipe first */
+	assert_pipe_enabled(dev_priv, pipe);
+
+	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
+	   for train result */
+	reg = FDI_RX_IMR(pipe);
+	temp = I915_READ(reg);
+	temp &= ~FDI_RX_SYMBOL_LOCK;
+	temp &= ~FDI_RX_BIT_LOCK;
+	I915_WRITE(reg, temp);
+	I915_READ(reg);
+	udelay(150);
+
+	/* enable CPU FDI TX and PCH FDI RX */
+	reg = FDI_TX_CTL(pipe);
+	temp = I915_READ(reg);
+	temp &= ~FDI_DP_PORT_WIDTH_MASK;
+	temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
+	temp &= ~FDI_LINK_TRAIN_NONE;
+	temp |= FDI_LINK_TRAIN_PATTERN_1;
+	I915_WRITE(reg, temp | FDI_TX_ENABLE);
+
+	reg = FDI_RX_CTL(pipe);
+	temp = I915_READ(reg);
+	temp &= ~FDI_LINK_TRAIN_NONE;
+	temp |= FDI_LINK_TRAIN_PATTERN_1;
+	I915_WRITE(reg, temp | FDI_RX_ENABLE);
+
+	POSTING_READ(reg);
+	udelay(150);
+
+	/* Ironlake workaround, enable clock pointer after FDI enable*/
+	I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
+	I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
+		   FDI_RX_PHASE_SYNC_POINTER_EN);
+
+	reg = FDI_RX_IIR(pipe);
+	for (tries = 0; tries < 5; tries++) {
+		temp = I915_READ(reg);
+		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+
+		if ((temp & FDI_RX_BIT_LOCK)) {
+			DRM_DEBUG_KMS("FDI train 1 done.\n");
+			I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
+			break;
+		}
+	}
+	if (tries == 5)
+		DRM_ERROR("FDI train 1 fail!\n");
+
+	/* Train 2 */
+	reg = FDI_TX_CTL(pipe);
+	temp = I915_READ(reg);
+	temp &= ~FDI_LINK_TRAIN_NONE;
+	temp |= FDI_LINK_TRAIN_PATTERN_2;
+	I915_WRITE(reg, temp);
+
+	reg = FDI_RX_CTL(pipe);
+	temp = I915_READ(reg);
+	temp &= ~FDI_LINK_TRAIN_NONE;
+	temp |= FDI_LINK_TRAIN_PATTERN_2;
+	I915_WRITE(reg, temp);
+
+	POSTING_READ(reg);
+	udelay(150);
+
+	reg = FDI_RX_IIR(pipe);
+	for (tries = 0; tries < 5; tries++) {
+		temp = I915_READ(reg);
+		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+
+		if (temp & FDI_RX_SYMBOL_LOCK) {
+			I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
+			DRM_DEBUG_KMS("FDI train 2 done.\n");
+			break;
+		}
+	}
+	if (tries == 5)
+		DRM_ERROR("FDI train 2 fail!\n");
+
+	DRM_DEBUG_KMS("FDI train done\n");
+
+}
+
+static const int snb_b_fdi_train_param[] = {
+	FDI_LINK_TRAIN_400MV_0DB_SNB_B,
+	FDI_LINK_TRAIN_400MV_6DB_SNB_B,
+	FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
+	FDI_LINK_TRAIN_800MV_0DB_SNB_B,
+};
+
+/* The FDI link training functions for SNB/Cougarpoint. */
+static void gen6_fdi_link_train(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	int pipe = intel_crtc->pipe;
+	u32 reg, temp, i, retry;
+
+	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
+	   for train result */
+	reg = FDI_RX_IMR(pipe);
+	temp = I915_READ(reg);
+	temp &= ~FDI_RX_SYMBOL_LOCK;
+	temp &= ~FDI_RX_BIT_LOCK;
+	I915_WRITE(reg, temp);
+
+	POSTING_READ(reg);
+	udelay(150);
+
+	/* enable CPU FDI TX and PCH FDI RX */
+	reg = FDI_TX_CTL(pipe);
+	temp = I915_READ(reg);
+	temp &= ~FDI_DP_PORT_WIDTH_MASK;
+	temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
+	temp &= ~FDI_LINK_TRAIN_NONE;
+	temp |= FDI_LINK_TRAIN_PATTERN_1;
+	temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+	/* SNB-B */
+	temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
+	I915_WRITE(reg, temp | FDI_TX_ENABLE);
+
+	I915_WRITE(FDI_RX_MISC(pipe),
+		   FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
+
+	reg = FDI_RX_CTL(pipe);
+	temp = I915_READ(reg);
+	if (HAS_PCH_CPT(dev)) {
+		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
+	} else {
+		temp &= ~FDI_LINK_TRAIN_NONE;
+		temp |= FDI_LINK_TRAIN_PATTERN_1;
+	}
+	I915_WRITE(reg, temp | FDI_RX_ENABLE);
+
+	POSTING_READ(reg);
+	udelay(150);
+
+	for (i = 0; i < 4; i++) {
+		reg = FDI_TX_CTL(pipe);
+		temp = I915_READ(reg);
+		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+		temp |= snb_b_fdi_train_param[i];
+		I915_WRITE(reg, temp);
+
+		POSTING_READ(reg);
+		udelay(500);
+
+		for (retry = 0; retry < 5; retry++) {
+			reg = FDI_RX_IIR(pipe);
+			temp = I915_READ(reg);
+			DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+			if (temp & FDI_RX_BIT_LOCK) {
+				I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
+				DRM_DEBUG_KMS("FDI train 1 done.\n");
+				break;
+			}
+			udelay(50);
+		}
+		if (retry < 5)
+			break;
+	}
+	if (i == 4)
+		DRM_ERROR("FDI train 1 fail!\n");
+
+	/* Train 2 */
+	reg = FDI_TX_CTL(pipe);
+	temp = I915_READ(reg);
+	temp &= ~FDI_LINK_TRAIN_NONE;
+	temp |= FDI_LINK_TRAIN_PATTERN_2;
+	if (IS_GEN6(dev)) {
+		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+		/* SNB-B */
+		temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
+	}
+	I915_WRITE(reg, temp);
+
+	reg = FDI_RX_CTL(pipe);
+	temp = I915_READ(reg);
+	if (HAS_PCH_CPT(dev)) {
+		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+		temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
+	} else {
+		temp &= ~FDI_LINK_TRAIN_NONE;
+		temp |= FDI_LINK_TRAIN_PATTERN_2;
+	}
+	I915_WRITE(reg, temp);
+
+	POSTING_READ(reg);
+	udelay(150);
+
+	for (i = 0; i < 4; i++) {
+		reg = FDI_TX_CTL(pipe);
+		temp = I915_READ(reg);
+		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+		temp |= snb_b_fdi_train_param[i];
+		I915_WRITE(reg, temp);
+
+		POSTING_READ(reg);
+		udelay(500);
+
+		for (retry = 0; retry < 5; retry++) {
+			reg = FDI_RX_IIR(pipe);
+			temp = I915_READ(reg);
+			DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+			if (temp & FDI_RX_SYMBOL_LOCK) {
+				I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
+				DRM_DEBUG_KMS("FDI train 2 done.\n");
+				break;
+			}
+			udelay(50);
+		}
+		if (retry < 5)
+			break;
+	}
+	if (i == 4)
+		DRM_ERROR("FDI train 2 fail!\n");
+
+	DRM_DEBUG_KMS("FDI train done.\n");
+}
+
+/* Manual link training for Ivy Bridge A0 parts */
+static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	int pipe = intel_crtc->pipe;
+	u32 reg, temp, i, j;
+
+	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
+	   for train result */
+	reg = FDI_RX_IMR(pipe);
+	temp = I915_READ(reg);
+	temp &= ~FDI_RX_SYMBOL_LOCK;
+	temp &= ~FDI_RX_BIT_LOCK;
+	I915_WRITE(reg, temp);
+
+	POSTING_READ(reg);
+	udelay(150);
+
+	DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
+		      I915_READ(FDI_RX_IIR(pipe)));
+
+	/* Try each vswing and preemphasis setting twice before moving on */
+	for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
+		/* disable first in case we need to retry */
+		reg = FDI_TX_CTL(pipe);
+		temp = I915_READ(reg);
+		temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
+		temp &= ~FDI_TX_ENABLE;
+		I915_WRITE(reg, temp);
+
+		reg = FDI_RX_CTL(pipe);
+		temp = I915_READ(reg);
+		temp &= ~FDI_LINK_TRAIN_AUTO;
+		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+		temp &= ~FDI_RX_ENABLE;
+		I915_WRITE(reg, temp);
+
+		/* enable CPU FDI TX and PCH FDI RX */
+		reg = FDI_TX_CTL(pipe);
+		temp = I915_READ(reg);
+		temp &= ~FDI_DP_PORT_WIDTH_MASK;
+		temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
+		temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
+		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+		temp |= snb_b_fdi_train_param[j/2];
+		temp |= FDI_COMPOSITE_SYNC;
+		I915_WRITE(reg, temp | FDI_TX_ENABLE);
+
+		I915_WRITE(FDI_RX_MISC(pipe),
+			   FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
+
+		reg = FDI_RX_CTL(pipe);
+		temp = I915_READ(reg);
+		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
+		temp |= FDI_COMPOSITE_SYNC;
+		I915_WRITE(reg, temp | FDI_RX_ENABLE);
+
+		POSTING_READ(reg);
+		udelay(1); /* should be 0.5us */
+
+		for (i = 0; i < 4; i++) {
+			reg = FDI_RX_IIR(pipe);
+			temp = I915_READ(reg);
+			DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+
+			if (temp & FDI_RX_BIT_LOCK ||
+			    (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
+				I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
+				DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
+					      i);
+				break;
+			}
+			udelay(1); /* should be 0.5us */
+		}
+		if (i == 4) {
+			DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
+			continue;
+		}
+
+		/* Train 2 */
+		reg = FDI_TX_CTL(pipe);
+		temp = I915_READ(reg);
+		temp &= ~FDI_LINK_TRAIN_NONE_IVB;
+		temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
+		I915_WRITE(reg, temp);
+
+		reg = FDI_RX_CTL(pipe);
+		temp = I915_READ(reg);
+		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+		temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
+		I915_WRITE(reg, temp);
+
+		POSTING_READ(reg);
+		udelay(2); /* should be 1.5us */
+
+		for (i = 0; i < 4; i++) {
+			reg = FDI_RX_IIR(pipe);
+			temp = I915_READ(reg);
+			DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+
+			if (temp & FDI_RX_SYMBOL_LOCK ||
+			    (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
+				I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
+				DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
+					      i);
+				goto train_done;
+			}
+			udelay(2); /* should be 1.5us */
+		}
+		if (i == 4)
+			DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
+	}
+
+train_done:
+	DRM_DEBUG_KMS("FDI train done.\n");
+}
+
+static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
+{
+	struct drm_device *dev = intel_crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int pipe = intel_crtc->pipe;
+	u32 reg, temp;
+
+
+	/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
+	reg = FDI_RX_CTL(pipe);
+	temp = I915_READ(reg);
+	temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
+	temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
+	temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
+	I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
+
+	POSTING_READ(reg);
+	udelay(200);
+
+	/* Switch from Rawclk to PCDclk */
+	temp = I915_READ(reg);
+	I915_WRITE(reg, temp | FDI_PCDCLK);
+
+	POSTING_READ(reg);
+	udelay(200);
+
+	/* Enable CPU FDI TX PLL, always on for Ironlake */
+	reg = FDI_TX_CTL(pipe);
+	temp = I915_READ(reg);
+	if ((temp & FDI_TX_PLL_ENABLE) == 0) {
+		I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
+
+		POSTING_READ(reg);
+		udelay(100);
+	}
+}
+
+static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
+{
+	struct drm_device *dev = intel_crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int pipe = intel_crtc->pipe;
+	u32 reg, temp;
+
+	/* Switch from PCDclk to Rawclk */
+	reg = FDI_RX_CTL(pipe);
+	temp = I915_READ(reg);
+	I915_WRITE(reg, temp & ~FDI_PCDCLK);
+
+	/* Disable CPU FDI TX PLL */
+	reg = FDI_TX_CTL(pipe);
+	temp = I915_READ(reg);
+	I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
+
+	POSTING_READ(reg);
+	udelay(100);
+
+	reg = FDI_RX_CTL(pipe);
+	temp = I915_READ(reg);
+	I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
+
+	/* Wait for the clocks to turn off. */
+	POSTING_READ(reg);
+	udelay(100);
+}
+
+static void ironlake_fdi_disable(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	int pipe = intel_crtc->pipe;
+	u32 reg, temp;
+
+	/* disable CPU FDI tx and PCH FDI rx */
+	reg = FDI_TX_CTL(pipe);
+	temp = I915_READ(reg);
+	I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
+	POSTING_READ(reg);
+
+	reg = FDI_RX_CTL(pipe);
+	temp = I915_READ(reg);
+	temp &= ~(0x7 << 16);
+	temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
+	I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
+
+	POSTING_READ(reg);
+	udelay(100);
+
+	/* Ironlake workaround, disable clock pointer after downing FDI */
+	if (HAS_PCH_IBX(dev))
+		I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
+
+	/* still set train pattern 1 */
+	reg = FDI_TX_CTL(pipe);
+	temp = I915_READ(reg);
+	temp &= ~FDI_LINK_TRAIN_NONE;
+	temp |= FDI_LINK_TRAIN_PATTERN_1;
+	I915_WRITE(reg, temp);
+
+	reg = FDI_RX_CTL(pipe);
+	temp = I915_READ(reg);
+	if (HAS_PCH_CPT(dev)) {
+		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
+	} else {
+		temp &= ~FDI_LINK_TRAIN_NONE;
+		temp |= FDI_LINK_TRAIN_PATTERN_1;
+	}
+	/* BPC in FDI rx is consistent with that in PIPECONF */
+	temp &= ~(0x07 << 16);
+	temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
+	I915_WRITE(reg, temp);
+
+	POSTING_READ(reg);
+	udelay(100);
+}
+
+bool intel_has_pending_fb_unpin(struct drm_device *dev)
+{
+	struct intel_crtc *crtc;
+
+	/* Note that we don't need to be called with mode_config.lock here
+	 * as our list of CRTC objects is static for the lifetime of the
+	 * device and so cannot disappear as we iterate. Similarly, we can
+	 * happily treat the predicates as racy, atomic checks as userspace
+	 * cannot claim and pin a new fb without at least acquring the
+	 * struct_mutex and so serialising with us.
+	 */
+	for_each_intel_crtc(dev, crtc) {
+		if (atomic_read(&crtc->unpin_work_count) == 0)
+			continue;
+
+		if (crtc->unpin_work)
+			intel_wait_for_vblank(dev, crtc->pipe);
+
+		return true;
+	}
+
+	return false;
+}
+
+static void page_flip_completed(struct intel_crtc *intel_crtc)
+{
+	struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
+	struct intel_unpin_work *work = intel_crtc->unpin_work;
+
+	/* ensure that the unpin work is consistent wrt ->pending. */
+	smp_rmb();
+	intel_crtc->unpin_work = NULL;
+
+	if (work->event)
+		drm_send_vblank_event(intel_crtc->base.dev,
+				      intel_crtc->pipe,
+				      work->event);
+
+	drm_crtc_vblank_put(&intel_crtc->base);
+
+	wake_up_all(&dev_priv->pending_flip_queue);
+	trace_i915_flip_complete(intel_crtc->plane,
+				 work->pending_flip_obj);
+
+	queue_work(dev_priv->wq, &work->work);
+}
+
+void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue));
+	if (WARN_ON(wait_event_timeout(dev_priv->pending_flip_queue,
+				       !intel_crtc_has_pending_flip(crtc),
+				       60*HZ) == 0)) {
+		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+		spin_lock_irq(&dev->event_lock);
+		if (intel_crtc->unpin_work) {
+			WARN_ONCE(1, "Removing stuck page flip\n");
+			page_flip_completed(intel_crtc);
+		}
+		spin_unlock_irq(&dev->event_lock);
+	}
+
+	if (crtc->primary->fb) {
+		mutex_lock(&dev->struct_mutex);
+		intel_finish_fb(crtc->primary->fb);
+		mutex_unlock(&dev->struct_mutex);
+	}
+}
+
+/* Program iCLKIP clock to the desired frequency */
+static void lpt_program_iclkip(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int clock = to_intel_crtc(crtc)->config->base.adjusted_mode.crtc_clock;
+	u32 divsel, phaseinc, auxdiv, phasedir = 0;
+	u32 temp;
+
+	mutex_lock(&dev_priv->sb_lock);
+
+	/* It is necessary to ungate the pixclk gate prior to programming
+	 * the divisors, and gate it back when it is done.
+	 */
+	I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
+
+	/* Disable SSCCTL */
+	intel_sbi_write(dev_priv, SBI_SSCCTL6,
+			intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK) |
+				SBI_SSCCTL_DISABLE,
+			SBI_ICLK);
+
+	/* 20MHz is a corner case which is out of range for the 7-bit divisor */
+	if (clock == 20000) {
+		auxdiv = 1;
+		divsel = 0x41;
+		phaseinc = 0x20;
+	} else {
+		/* The iCLK virtual clock root frequency is in MHz,
+		 * but the adjusted_mode->crtc_clock in in KHz. To get the
+		 * divisors, it is necessary to divide one by another, so we
+		 * convert the virtual clock precision to KHz here for higher
+		 * precision.
+		 */
+		u32 iclk_virtual_root_freq = 172800 * 1000;
+		u32 iclk_pi_range = 64;
+		u32 desired_divisor, msb_divisor_value, pi_value;
+
+		desired_divisor = (iclk_virtual_root_freq / clock);
+		msb_divisor_value = desired_divisor / iclk_pi_range;
+		pi_value = desired_divisor % iclk_pi_range;
+
+		auxdiv = 0;
+		divsel = msb_divisor_value - 2;
+		phaseinc = pi_value;
+	}
+
+	/* This should not happen with any sane values */
+	WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
+		~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
+	WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
+		~SBI_SSCDIVINTPHASE_INCVAL_MASK);
+
+	DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
+			clock,
+			auxdiv,
+			divsel,
+			phasedir,
+			phaseinc);
+
+	/* Program SSCDIVINTPHASE6 */
+	temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
+	temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
+	temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
+	temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
+	temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
+	temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
+	temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
+	intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
+
+	/* Program SSCAUXDIV */
+	temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
+	temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
+	temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
+	intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
+
+	/* Enable modulator and associated divider */
+	temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
+	temp &= ~SBI_SSCCTL_DISABLE;
+	intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
+
+	/* Wait for initialization time */
+	udelay(24);
+
+	I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
+
+	mutex_unlock(&dev_priv->sb_lock);
+}
+
+static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
+						enum pipe pch_transcoder)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
+
+	I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
+		   I915_READ(HTOTAL(cpu_transcoder)));
+	I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
+		   I915_READ(HBLANK(cpu_transcoder)));
+	I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
+		   I915_READ(HSYNC(cpu_transcoder)));
+
+	I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
+		   I915_READ(VTOTAL(cpu_transcoder)));
+	I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
+		   I915_READ(VBLANK(cpu_transcoder)));
+	I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
+		   I915_READ(VSYNC(cpu_transcoder)));
+	I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
+		   I915_READ(VSYNCSHIFT(cpu_transcoder)));
+}
+
+static void cpt_set_fdi_bc_bifurcation(struct drm_device *dev, bool enable)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t temp;
+
+	temp = I915_READ(SOUTH_CHICKEN1);
+	if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
+		return;
+
+	WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
+	WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
+
+	temp &= ~FDI_BC_BIFURCATION_SELECT;
+	if (enable)
+		temp |= FDI_BC_BIFURCATION_SELECT;
+
+	DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis");
+	I915_WRITE(SOUTH_CHICKEN1, temp);
+	POSTING_READ(SOUTH_CHICKEN1);
+}
+
+static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
+{
+	struct drm_device *dev = intel_crtc->base.dev;
+
+	switch (intel_crtc->pipe) {
+	case PIPE_A:
+		break;
+	case PIPE_B:
+		if (intel_crtc->config->fdi_lanes > 2)
+			cpt_set_fdi_bc_bifurcation(dev, false);
+		else
+			cpt_set_fdi_bc_bifurcation(dev, true);
+
+		break;
+	case PIPE_C:
+		cpt_set_fdi_bc_bifurcation(dev, true);
+
+		break;
+	default:
+		BUG();
+	}
+}
+
+/*
+ * Enable PCH resources required for PCH ports:
+ *   - PCH PLLs
+ *   - FDI training & RX/TX
+ *   - update transcoder timings
+ *   - DP transcoding bits
+ *   - transcoder
+ */
+static void ironlake_pch_enable(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	int pipe = intel_crtc->pipe;
+	u32 reg, temp;
+
+	assert_pch_transcoder_disabled(dev_priv, pipe);
+
+	if (IS_IVYBRIDGE(dev))
+		ivybridge_update_fdi_bc_bifurcation(intel_crtc);
+
+	/* Write the TU size bits before fdi link training, so that error
+	 * detection works. */
+	I915_WRITE(FDI_RX_TUSIZE1(pipe),
+		   I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
+
+	/* For PCH output, training FDI link */
+	dev_priv->display.fdi_link_train(crtc);
+
+	/* We need to program the right clock selection before writing the pixel
+	 * mutliplier into the DPLL. */
+	if (HAS_PCH_CPT(dev)) {
+		u32 sel;
+
+		temp = I915_READ(PCH_DPLL_SEL);
+		temp |= TRANS_DPLL_ENABLE(pipe);
+		sel = TRANS_DPLLB_SEL(pipe);
+		if (intel_crtc->config->shared_dpll == DPLL_ID_PCH_PLL_B)
+			temp |= sel;
+		else
+			temp &= ~sel;
+		I915_WRITE(PCH_DPLL_SEL, temp);
+	}
+
+	/* XXX: pch pll's can be enabled any time before we enable the PCH
+	 * transcoder, and we actually should do this to not upset any PCH
+	 * transcoder that already use the clock when we share it.
+	 *
+	 * Note that enable_shared_dpll tries to do the right thing, but
+	 * get_shared_dpll unconditionally resets the pll - we need that to have
+	 * the right LVDS enable sequence. */
+	intel_enable_shared_dpll(intel_crtc);
+
+	/* set transcoder timing, panel must allow it */
+	assert_panel_unlocked(dev_priv, pipe);
+	ironlake_pch_transcoder_set_timings(intel_crtc, pipe);
+
+	intel_fdi_normal_train(crtc);
+
+	/* For PCH DP, enable TRANS_DP_CTL */
+	if (HAS_PCH_CPT(dev) && intel_crtc->config->has_dp_encoder) {
+		u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
+		reg = TRANS_DP_CTL(pipe);
+		temp = I915_READ(reg);
+		temp &= ~(TRANS_DP_PORT_SEL_MASK |
+			  TRANS_DP_SYNC_MASK |
+			  TRANS_DP_BPC_MASK);
+		temp |= TRANS_DP_OUTPUT_ENABLE;
+		temp |= bpc << 9; /* same format but at 11:9 */
+
+		if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
+			temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
+		if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
+			temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
+
+		switch (intel_trans_dp_port_sel(crtc)) {
+		case PCH_DP_B:
+			temp |= TRANS_DP_PORT_SEL_B;
+			break;
+		case PCH_DP_C:
+			temp |= TRANS_DP_PORT_SEL_C;
+			break;
+		case PCH_DP_D:
+			temp |= TRANS_DP_PORT_SEL_D;
+			break;
+		default:
+			BUG();
+		}
+
+		I915_WRITE(reg, temp);
+	}
+
+	ironlake_enable_pch_transcoder(dev_priv, pipe);
+}
+
+static void lpt_pch_enable(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
+
+	assert_pch_transcoder_disabled(dev_priv, TRANSCODER_A);
+
+	lpt_program_iclkip(crtc);
+
+	/* Set transcoder timing. */
+	ironlake_pch_transcoder_set_timings(intel_crtc, PIPE_A);
+
+	lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
+}
+
+struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
+						struct intel_crtc_state *crtc_state)
+{
+	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+	struct intel_shared_dpll *pll;
+	struct intel_shared_dpll_config *shared_dpll;
+	enum intel_dpll_id i;
+	int max = dev_priv->num_shared_dpll;
+
+	shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state);
+
+	if (HAS_PCH_IBX(dev_priv->dev)) {
+		/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
+		i = (enum intel_dpll_id) crtc->pipe;
+		pll = &dev_priv->shared_dplls[i];
+
+		DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
+			      crtc->base.base.id, pll->name);
+
+		WARN_ON(shared_dpll[i].crtc_mask);
+
+		goto found;
+	}
+
+	if (IS_BROXTON(dev_priv->dev)) {
+		/* PLL is attached to port in bxt */
+		struct intel_encoder *encoder;
+		struct intel_digital_port *intel_dig_port;
+
+		encoder = intel_ddi_get_crtc_new_encoder(crtc_state);
+		if (WARN_ON(!encoder))
+			return NULL;
+
+		intel_dig_port = enc_to_dig_port(&encoder->base);
+		/* 1:1 mapping between ports and PLLs */
+		i = (enum intel_dpll_id)intel_dig_port->port;
+		pll = &dev_priv->shared_dplls[i];
+		DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
+			crtc->base.base.id, pll->name);
+		WARN_ON(shared_dpll[i].crtc_mask);
+
+		goto found;
+	} else if (INTEL_INFO(dev_priv)->gen < 9 && HAS_DDI(dev_priv))
+		/* Do not consider SPLL */
+		max = 2;
+
+	for (i = 0; i < max; i++) {
+		pll = &dev_priv->shared_dplls[i];
+
+		/* Only want to check enabled timings first */
+		if (shared_dpll[i].crtc_mask == 0)
+			continue;
+
+		if (memcmp(&crtc_state->dpll_hw_state,
+			   &shared_dpll[i].hw_state,
+			   sizeof(crtc_state->dpll_hw_state)) == 0) {
+			DRM_DEBUG_KMS("CRTC:%d sharing existing %s (crtc mask 0x%08x, ative %d)\n",
+				      crtc->base.base.id, pll->name,
+				      shared_dpll[i].crtc_mask,
+				      pll->active);
+			goto found;
+		}
+	}
+
+	/* Ok no matching timings, maybe there's a free one? */
+	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
+		pll = &dev_priv->shared_dplls[i];
+		if (shared_dpll[i].crtc_mask == 0) {
+			DRM_DEBUG_KMS("CRTC:%d allocated %s\n",
+				      crtc->base.base.id, pll->name);
+			goto found;
+		}
+	}
+
+	return NULL;
+
+found:
+	if (shared_dpll[i].crtc_mask == 0)
+		shared_dpll[i].hw_state =
+			crtc_state->dpll_hw_state;
+
+	crtc_state->shared_dpll = i;
+	DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name,
+			 pipe_name(crtc->pipe));
+
+	shared_dpll[i].crtc_mask |= 1 << crtc->pipe;
+
+	return pll;
+}
+
+static void intel_shared_dpll_commit(struct drm_atomic_state *state)
+{
+	struct drm_i915_private *dev_priv = to_i915(state->dev);
+	struct intel_shared_dpll_config *shared_dpll;
+	struct intel_shared_dpll *pll;
+	enum intel_dpll_id i;
+
+	if (!to_intel_atomic_state(state)->dpll_set)
+		return;
+
+	shared_dpll = to_intel_atomic_state(state)->shared_dpll;
+	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
+		pll = &dev_priv->shared_dplls[i];
+		pll->config = shared_dpll[i];
+	}
+}
+
+static void cpt_verify_modeset(struct drm_device *dev, int pipe)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int dslreg = PIPEDSL(pipe);
+	u32 temp;
+
+	temp = I915_READ(dslreg);
+	udelay(500);
+	if (wait_for(I915_READ(dslreg) != temp, 5)) {
+		if (wait_for(I915_READ(dslreg) != temp, 5))
+			DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
+	}
+}
+
+static int
+skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
+		  unsigned scaler_user, int *scaler_id, unsigned int rotation,
+		  int src_w, int src_h, int dst_w, int dst_h)
+{
+	struct intel_crtc_scaler_state *scaler_state =
+		&crtc_state->scaler_state;
+	struct intel_crtc *intel_crtc =
+		to_intel_crtc(crtc_state->base.crtc);
+	int need_scaling;
+
+	need_scaling = intel_rotation_90_or_270(rotation) ?
+		(src_h != dst_w || src_w != dst_h):
+		(src_w != dst_w || src_h != dst_h);
+
+	/*
+	 * if plane is being disabled or scaler is no more required or force detach
+	 *  - free scaler binded to this plane/crtc
+	 *  - in order to do this, update crtc->scaler_usage
+	 *
+	 * Here scaler state in crtc_state is set free so that
+	 * scaler can be assigned to other user. Actual register
+	 * update to free the scaler is done in plane/panel-fit programming.
+	 * For this purpose crtc/plane_state->scaler_id isn't reset here.
+	 */
+	if (force_detach || !need_scaling) {
+		if (*scaler_id >= 0) {
+			scaler_state->scaler_users &= ~(1 << scaler_user);
+			scaler_state->scalers[*scaler_id].in_use = 0;
+
+			DRM_DEBUG_KMS("scaler_user index %u.%u: "
+				"Staged freeing scaler id %d scaler_users = 0x%x\n",
+				intel_crtc->pipe, scaler_user, *scaler_id,
+				scaler_state->scaler_users);
+			*scaler_id = -1;
+		}
+		return 0;
+	}
+
+	/* range checks */
+	if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
+		dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
+
+		src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
+		dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H) {
+		DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u "
+			"size is out of scaler range\n",
+			intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h);
+		return -EINVAL;
+	}
+
+	/* mark this plane as a scaler user in crtc_state */
+	scaler_state->scaler_users |= (1 << scaler_user);
+	DRM_DEBUG_KMS("scaler_user index %u.%u: "
+		"staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
+		intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
+		scaler_state->scaler_users);
+
+	return 0;
+}
+
+/**
+ * skl_update_scaler_crtc - Stages update to scaler state for a given crtc.
+ *
+ * @state: crtc's scaler state
+ *
+ * Return
+ *     0 - scaler_usage updated successfully
+ *    error - requested scaling cannot be supported or other error condition
+ */
+int skl_update_scaler_crtc(struct intel_crtc_state *state)
+{
+	struct intel_crtc *intel_crtc = to_intel_crtc(state->base.crtc);
+	const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
+
+	DRM_DEBUG_KMS("Updating scaler for [CRTC:%i] scaler_user index %u.%u\n",
+		      intel_crtc->base.base.id, intel_crtc->pipe, SKL_CRTC_INDEX);
+
+	return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
+		&state->scaler_state.scaler_id, BIT(DRM_ROTATE_0),
+		state->pipe_src_w, state->pipe_src_h,
+		adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_vdisplay);
+}
+
+/**
+ * skl_update_scaler_plane - Stages update to scaler state for a given plane.
+ *
+ * @state: crtc's scaler state
+ * @plane_state: atomic plane state to update
+ *
+ * Return
+ *     0 - scaler_usage updated successfully
+ *    error - requested scaling cannot be supported or other error condition
+ */
+static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
+				   struct intel_plane_state *plane_state)
+{
+
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_plane *intel_plane =
+		to_intel_plane(plane_state->base.plane);
+	struct drm_framebuffer *fb = plane_state->base.fb;
+	int ret;
+
+	bool force_detach = !fb || !plane_state->visible;
+
+	DRM_DEBUG_KMS("Updating scaler for [PLANE:%d] scaler_user index %u.%u\n",
+		      intel_plane->base.base.id, intel_crtc->pipe,
+		      drm_plane_index(&intel_plane->base));
+
+	ret = skl_update_scaler(crtc_state, force_detach,
+				drm_plane_index(&intel_plane->base),
+				&plane_state->scaler_id,
+				plane_state->base.rotation,
+				drm_rect_width(&plane_state->src) >> 16,
+				drm_rect_height(&plane_state->src) >> 16,
+				drm_rect_width(&plane_state->dst),
+				drm_rect_height(&plane_state->dst));
+
+	if (ret || plane_state->scaler_id < 0)
+		return ret;
+
+	/* check colorkey */
+	if (plane_state->ckey.flags != I915_SET_COLORKEY_NONE) {
+		DRM_DEBUG_KMS("[PLANE:%d] scaling with color key not allowed",
+			      intel_plane->base.base.id);
+		return -EINVAL;
+	}
+
+	/* Check src format */
+	switch (fb->pixel_format) {
+	case DRM_FORMAT_RGB565:
+	case DRM_FORMAT_XBGR8888:
+	case DRM_FORMAT_XRGB8888:
+	case DRM_FORMAT_ABGR8888:
+	case DRM_FORMAT_ARGB8888:
+	case DRM_FORMAT_XRGB2101010:
+	case DRM_FORMAT_XBGR2101010:
+	case DRM_FORMAT_YUYV:
+	case DRM_FORMAT_YVYU:
+	case DRM_FORMAT_UYVY:
+	case DRM_FORMAT_VYUY:
+		break;
+	default:
+		DRM_DEBUG_KMS("[PLANE:%d] FB:%d unsupported scaling format 0x%x\n",
+			intel_plane->base.base.id, fb->base.id, fb->pixel_format);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void skylake_scaler_disable(struct intel_crtc *crtc)
+{
+	int i;
+
+	for (i = 0; i < crtc->num_scalers; i++)
+		skl_detach_scaler(crtc, i);
+}
+
+static void skylake_pfit_enable(struct intel_crtc *crtc)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int pipe = crtc->pipe;
+	struct intel_crtc_scaler_state *scaler_state =
+		&crtc->config->scaler_state;
+
+	DRM_DEBUG_KMS("for crtc_state = %p\n", crtc->config);
+
+	if (crtc->config->pch_pfit.enabled) {
+		int id;
+
+		if (WARN_ON(crtc->config->scaler_state.scaler_id < 0)) {
+			DRM_ERROR("Requesting pfit without getting a scaler first\n");
+			return;
+		}
+
+		id = scaler_state->scaler_id;
+		I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
+			PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
+		I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc->config->pch_pfit.pos);
+		I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc->config->pch_pfit.size);
+
+		DRM_DEBUG_KMS("for crtc_state = %p scaler_id = %d\n", crtc->config, id);
+	}
+}
+
+static void ironlake_pfit_enable(struct intel_crtc *crtc)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int pipe = crtc->pipe;
+
+	if (crtc->config->pch_pfit.enabled) {
+		/* Force use of hard-coded filter coefficients
+		 * as some pre-programmed values are broken,
+		 * e.g. x201.
+		 */
+		if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
+			I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
+						 PF_PIPE_SEL_IVB(pipe));
+		else
+			I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
+		I915_WRITE(PF_WIN_POS(pipe), crtc->config->pch_pfit.pos);
+		I915_WRITE(PF_WIN_SZ(pipe), crtc->config->pch_pfit.size);
+	}
+}
+
+void hsw_enable_ips(struct intel_crtc *crtc)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (!crtc->config->ips_enabled)
+		return;
+
+	/* We can only enable IPS after we enable a plane and wait for a vblank */
+	intel_wait_for_vblank(dev, crtc->pipe);
+
+	assert_plane_enabled(dev_priv, crtc->plane);
+	if (IS_BROADWELL(dev)) {
+		mutex_lock(&dev_priv->rps.hw_lock);
+		WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0xc0000000));
+		mutex_unlock(&dev_priv->rps.hw_lock);
+		/* Quoting Art Runyan: "its not safe to expect any particular
+		 * value in IPS_CTL bit 31 after enabling IPS through the
+		 * mailbox." Moreover, the mailbox may return a bogus state,
+		 * so we need to just enable it and continue on.
+		 */
+	} else {
+		I915_WRITE(IPS_CTL, IPS_ENABLE);
+		/* The bit only becomes 1 in the next vblank, so this wait here
+		 * is essentially intel_wait_for_vblank. If we don't have this
+		 * and don't wait for vblanks until the end of crtc_enable, then
+		 * the HW state readout code will complain that the expected
+		 * IPS_CTL value is not the one we read. */
+		if (wait_for(I915_READ_NOTRACE(IPS_CTL) & IPS_ENABLE, 50))
+			DRM_ERROR("Timed out waiting for IPS enable\n");
+	}
+}
+
+void hsw_disable_ips(struct intel_crtc *crtc)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (!crtc->config->ips_enabled)
+		return;
+
+	assert_plane_enabled(dev_priv, crtc->plane);
+	if (IS_BROADWELL(dev)) {
+		mutex_lock(&dev_priv->rps.hw_lock);
+		WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
+		mutex_unlock(&dev_priv->rps.hw_lock);
+		/* wait for pcode to finish disabling IPS, which may take up to 42ms */
+		if (wait_for((I915_READ(IPS_CTL) & IPS_ENABLE) == 0, 42))
+			DRM_ERROR("Timed out waiting for IPS disable\n");
+	} else {
+		I915_WRITE(IPS_CTL, 0);
+		POSTING_READ(IPS_CTL);
+	}
+
+	/* We need to wait for a vblank before we can disable the plane. */
+	intel_wait_for_vblank(dev, crtc->pipe);
+}
+
+/** Loads the palette/gamma unit for the CRTC with the prepared values */
+static void intel_crtc_load_lut(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	enum pipe pipe = intel_crtc->pipe;
+	int i;
+	bool reenable_ips = false;
+
+	/* The clocks have to be on to load the palette. */
+	if (!crtc->state->active)
+		return;
+
+	if (HAS_GMCH_DISPLAY(dev_priv->dev)) {
+		if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI))
+			assert_dsi_pll_enabled(dev_priv);
+		else
+			assert_pll_enabled(dev_priv, pipe);
+	}
+
+	/* Workaround : Do not read or write the pipe palette/gamma data while
+	 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
+	 */
+	if (IS_HASWELL(dev) && intel_crtc->config->ips_enabled &&
+	    ((I915_READ(GAMMA_MODE(pipe)) & GAMMA_MODE_MODE_MASK) ==
+	     GAMMA_MODE_MODE_SPLIT)) {
+		hsw_disable_ips(intel_crtc);
+		reenable_ips = true;
+	}
+
+	for (i = 0; i < 256; i++) {
+		u32 palreg;
+
+		if (HAS_GMCH_DISPLAY(dev))
+			palreg = PALETTE(pipe, i);
+		else
+			palreg = LGC_PALETTE(pipe, i);
+
+		I915_WRITE(palreg,
+			   (intel_crtc->lut_r[i] << 16) |
+			   (intel_crtc->lut_g[i] << 8) |
+			   intel_crtc->lut_b[i]);
+	}
+
+	if (reenable_ips)
+		hsw_enable_ips(intel_crtc);
+}
+
+static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
+{
+	if (intel_crtc->overlay) {
+		struct drm_device *dev = intel_crtc->base.dev;
+		struct drm_i915_private *dev_priv = dev->dev_private;
+
+		mutex_lock(&dev->struct_mutex);
+		dev_priv->mm.interruptible = false;
+		(void) intel_overlay_switch_off(intel_crtc->overlay);
+		dev_priv->mm.interruptible = true;
+		mutex_unlock(&dev->struct_mutex);
+	}
+
+	/* Let userspace switch the overlay on again. In most cases userspace
+	 * has to recompute where to put it anyway.
+	 */
+}
+
+/**
+ * intel_post_enable_primary - Perform operations after enabling primary plane
+ * @crtc: the CRTC whose primary plane was just enabled
+ *
+ * Performs potentially sleeping operations that must be done after the primary
+ * plane is enabled, such as updating FBC and IPS.  Note that this may be
+ * called due to an explicit primary plane update, or due to an implicit
+ * re-enable that is caused when a sprite plane is updated to no longer
+ * completely hide the primary plane.
+ */
+static void
+intel_post_enable_primary(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	int pipe = intel_crtc->pipe;
+
+	/*
+	 * BDW signals flip done immediately if the plane
+	 * is disabled, even if the plane enable is already
+	 * armed to occur at the next vblank :(
+	 */
+	if (IS_BROADWELL(dev))
+		intel_wait_for_vblank(dev, pipe);
+
+	/*
+	 * FIXME IPS should be fine as long as one plane is
+	 * enabled, but in practice it seems to have problems
+	 * when going from primary only to sprite only and vice
+	 * versa.
+	 */
+	hsw_enable_ips(intel_crtc);
+
+	/*
+	 * Gen2 reports pipe underruns whenever all planes are disabled.
+	 * So don't enable underrun reporting before at least some planes
+	 * are enabled.
+	 * FIXME: Need to fix the logic to work when we turn off all planes
+	 * but leave the pipe running.
+	 */
+	if (IS_GEN2(dev))
+		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
+
+	/* Underruns don't raise interrupts, so check manually. */
+	if (HAS_GMCH_DISPLAY(dev))
+		i9xx_check_fifo_underruns(dev_priv);
+}
+
+/**
+ * intel_pre_disable_primary - Perform operations before disabling primary plane
+ * @crtc: the CRTC whose primary plane is to be disabled
+ *
+ * Performs potentially sleeping operations that must be done before the
+ * primary plane is disabled, such as updating FBC and IPS.  Note that this may
+ * be called due to an explicit primary plane update, or due to an implicit
+ * disable that is caused when a sprite plane completely hides the primary
+ * plane.
+ */
+static void
+intel_pre_disable_primary(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	int pipe = intel_crtc->pipe;
+
+	/*
+	 * Gen2 reports pipe underruns whenever all planes are disabled.
+	 * So diasble underrun reporting before all the planes get disabled.
+	 * FIXME: Need to fix the logic to work when we turn off all planes
+	 * but leave the pipe running.
+	 */
+	if (IS_GEN2(dev))
+		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
+
+	/*
+	 * Vblank time updates from the shadow to live plane control register
+	 * are blocked if the memory self-refresh mode is active at that
+	 * moment. So to make sure the plane gets truly disabled, disable
+	 * first the self-refresh mode. The self-refresh enable bit in turn
+	 * will be checked/applied by the HW only at the next frame start
+	 * event which is after the vblank start event, so we need to have a
+	 * wait-for-vblank between disabling the plane and the pipe.
+	 */
+	if (HAS_GMCH_DISPLAY(dev)) {
+		intel_set_memory_cxsr(dev_priv, false);
+		dev_priv->wm.vlv.cxsr = false;
+		intel_wait_for_vblank(dev, pipe);
+	}
+
+	/*
+	 * FIXME IPS should be fine as long as one plane is
+	 * enabled, but in practice it seems to have problems
+	 * when going from primary only to sprite only and vice
+	 * versa.
+	 */
+	hsw_disable_ips(intel_crtc);
+}
+
+static void intel_post_plane_update(struct intel_crtc *crtc)
+{
+	struct intel_crtc_atomic_commit *atomic = &crtc->atomic;
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_plane *plane;
+
+	if (atomic->wait_vblank)
+		intel_wait_for_vblank(dev, crtc->pipe);
+
+	intel_frontbuffer_flip(dev, atomic->fb_bits);
+
+	if (atomic->disable_cxsr)
+		crtc->wm.cxsr_allowed = true;
+
+	if (crtc->atomic.update_wm_post)
+		intel_update_watermarks(&crtc->base);
+
+	if (atomic->update_fbc)
+		intel_fbc_update(dev_priv);
+
+	if (atomic->post_enable_primary)
+		intel_post_enable_primary(&crtc->base);
+
+	drm_for_each_plane_mask(plane, dev, atomic->update_sprite_watermarks)
+		intel_update_sprite_watermarks(plane, &crtc->base,
+					       0, 0, 0, false, false);
+
+	memset(atomic, 0, sizeof(*atomic));
+}
+
+static void intel_pre_plane_update(struct intel_crtc *crtc)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc_atomic_commit *atomic = &crtc->atomic;
+	struct drm_plane *p;
+
+	/* Track fb's for any planes being disabled */
+	drm_for_each_plane_mask(p, dev, atomic->disabled_planes) {
+		struct intel_plane *plane = to_intel_plane(p);
+
+		mutex_lock(&dev->struct_mutex);
+		i915_gem_track_fb(intel_fb_obj(plane->base.fb), NULL,
+				  plane->frontbuffer_bit);
+		mutex_unlock(&dev->struct_mutex);
+	}
+
+	if (atomic->wait_for_flips)
+		intel_crtc_wait_for_pending_flips(&crtc->base);
+
+	if (atomic->disable_fbc)
+		intel_fbc_disable_crtc(crtc);
+
+	if (crtc->atomic.disable_ips)
+		hsw_disable_ips(crtc);
+
+	if (atomic->pre_disable_primary)
+		intel_pre_disable_primary(&crtc->base);
+
+	if (atomic->disable_cxsr) {
+		crtc->wm.cxsr_allowed = false;
+		intel_set_memory_cxsr(dev_priv, false);
+	}
+}
+
+static void intel_crtc_disable_planes(struct drm_crtc *crtc, unsigned plane_mask)
+{
+	struct drm_device *dev = crtc->dev;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct drm_plane *p;
+	int pipe = intel_crtc->pipe;
+
+	intel_crtc_dpms_overlay_disable(intel_crtc);
+
+	drm_for_each_plane_mask(p, dev, plane_mask)
+		to_intel_plane(p)->disable_plane(p, crtc);
+
+	/*
+	 * FIXME: Once we grow proper nuclear flip support out of this we need
+	 * to compute the mask of flip planes precisely. For the time being
+	 * consider this a flip to a NULL plane.
+	 */
+	intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe));
+}
+
+static void ironlake_crtc_enable(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct intel_encoder *encoder;
+	int pipe = intel_crtc->pipe;
+
+	if (WARN_ON(intel_crtc->active))
+		return;
+
+	if (intel_crtc->config->has_pch_encoder)
+		intel_prepare_shared_dpll(intel_crtc);
+
+	if (intel_crtc->config->has_dp_encoder)
+		intel_dp_set_m_n(intel_crtc, M1_N1);
+
+	intel_set_pipe_timings(intel_crtc);
+
+	if (intel_crtc->config->has_pch_encoder) {
+		intel_cpu_transcoder_set_m_n(intel_crtc,
+				     &intel_crtc->config->fdi_m_n, NULL);
+	}
+
+	ironlake_set_pipeconf(crtc);
+
+	intel_crtc->active = true;
+
+	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
+	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
+
+	for_each_encoder_on_crtc(dev, crtc, encoder)
+		if (encoder->pre_enable)
+			encoder->pre_enable(encoder);
+
+	if (intel_crtc->config->has_pch_encoder) {
+		/* Note: FDI PLL enabling _must_ be done before we enable the
+		 * cpu pipes, hence this is separate from all the other fdi/pch
+		 * enabling. */
+		ironlake_fdi_pll_enable(intel_crtc);
+	} else {
+		assert_fdi_tx_disabled(dev_priv, pipe);
+		assert_fdi_rx_disabled(dev_priv, pipe);
+	}
+
+	ironlake_pfit_enable(intel_crtc);
+
+	/*
+	 * On ILK+ LUT must be loaded before the pipe is running but with
+	 * clocks enabled
+	 */
+	intel_crtc_load_lut(crtc);
+
+	intel_update_watermarks(crtc);
+	intel_enable_pipe(intel_crtc);
+
+	if (intel_crtc->config->has_pch_encoder)
+		ironlake_pch_enable(crtc);
+
+	assert_vblank_disabled(crtc);
+	drm_crtc_vblank_on(crtc);
+
+	for_each_encoder_on_crtc(dev, crtc, encoder)
+		encoder->enable(encoder);
+
+	if (HAS_PCH_CPT(dev))
+		cpt_verify_modeset(dev, intel_crtc->pipe);
+}
+
+/* IPS only exists on ULT machines and is tied to pipe A. */
+static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
+{
+	return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A;
+}
+
+static void haswell_crtc_enable(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct intel_encoder *encoder;
+	int pipe = intel_crtc->pipe, hsw_workaround_pipe;
+	struct intel_crtc_state *pipe_config =
+		to_intel_crtc_state(crtc->state);
+	bool is_dsi = intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI);
+
+	if (WARN_ON(intel_crtc->active))
+		return;
+
+	if (intel_crtc_to_shared_dpll(intel_crtc))
+		intel_enable_shared_dpll(intel_crtc);
+
+	if (intel_crtc->config->has_dp_encoder)
+		intel_dp_set_m_n(intel_crtc, M1_N1);
+
+	intel_set_pipe_timings(intel_crtc);
+
+	if (intel_crtc->config->cpu_transcoder != TRANSCODER_EDP) {
+		I915_WRITE(PIPE_MULT(intel_crtc->config->cpu_transcoder),
+			   intel_crtc->config->pixel_multiplier - 1);
+	}
+
+	if (intel_crtc->config->has_pch_encoder) {
+		intel_cpu_transcoder_set_m_n(intel_crtc,
+				     &intel_crtc->config->fdi_m_n, NULL);
+	}
+
+	haswell_set_pipeconf(crtc);
+
+	intel_set_pipe_csc(crtc);
+
+	intel_crtc->active = true;
+
+	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
+	for_each_encoder_on_crtc(dev, crtc, encoder) {
+		if (encoder->pre_pll_enable)
+			encoder->pre_pll_enable(encoder);
+		if (encoder->pre_enable)
+			encoder->pre_enable(encoder);
+	}
+
+	if (intel_crtc->config->has_pch_encoder) {
+		intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
+						      true);
+		dev_priv->display.fdi_link_train(crtc);
+	}
+
+	if (!is_dsi)
+		intel_ddi_enable_pipe_clock(intel_crtc);
+
+	if (INTEL_INFO(dev)->gen >= 9)
+		skylake_pfit_enable(intel_crtc);
+	else
+		ironlake_pfit_enable(intel_crtc);
+
+	/*
+	 * On ILK+ LUT must be loaded before the pipe is running but with
+	 * clocks enabled
+	 */
+	intel_crtc_load_lut(crtc);
+
+	intel_ddi_set_pipe_settings(crtc);
+	if (!is_dsi)
+		intel_ddi_enable_transcoder_func(crtc);
+
+	intel_update_watermarks(crtc);
+	intel_enable_pipe(intel_crtc);
+
+	if (intel_crtc->config->has_pch_encoder)
+		lpt_pch_enable(crtc);
+
+	if (intel_crtc->config->dp_encoder_is_mst && !is_dsi)
+		intel_ddi_set_vc_payload_alloc(crtc, true);
+
+	assert_vblank_disabled(crtc);
+	drm_crtc_vblank_on(crtc);
+
+	for_each_encoder_on_crtc(dev, crtc, encoder) {
+		encoder->enable(encoder);
+		intel_opregion_notify_encoder(encoder, true);
+	}
+
+	/* If we change the relative order between pipe/planes enabling, we need
+	 * to change the workaround. */
+	hsw_workaround_pipe = pipe_config->hsw_workaround_pipe;
+	if (IS_HASWELL(dev) && hsw_workaround_pipe != INVALID_PIPE) {
+		intel_wait_for_vblank(dev, hsw_workaround_pipe);
+		intel_wait_for_vblank(dev, hsw_workaround_pipe);
+	}
+}
+
+static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int pipe = crtc->pipe;
+
+	/* To avoid upsetting the power well on haswell only disable the pfit if
+	 * it's in use. The hw state code will make sure we get this right. */
+	if (force || crtc->config->pch_pfit.enabled) {
+		I915_WRITE(PF_CTL(pipe), 0);
+		I915_WRITE(PF_WIN_POS(pipe), 0);
+		I915_WRITE(PF_WIN_SZ(pipe), 0);
+	}
+}
+
+static void ironlake_crtc_disable(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct intel_encoder *encoder;
+	int pipe = intel_crtc->pipe;
+	u32 reg, temp;
+
+	for_each_encoder_on_crtc(dev, crtc, encoder)
+		encoder->disable(encoder);
+
+	drm_crtc_vblank_off(crtc);
+	assert_vblank_disabled(crtc);
+
+	if (intel_crtc->config->has_pch_encoder)
+		intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
+
+	intel_disable_pipe(intel_crtc);
+
+	ironlake_pfit_disable(intel_crtc, false);
+
+	if (intel_crtc->config->has_pch_encoder)
+		ironlake_fdi_disable(crtc);
+
+	for_each_encoder_on_crtc(dev, crtc, encoder)
+		if (encoder->post_disable)
+			encoder->post_disable(encoder);
+
+	if (intel_crtc->config->has_pch_encoder) {
+		ironlake_disable_pch_transcoder(dev_priv, pipe);
+
+		if (HAS_PCH_CPT(dev)) {
+			/* disable TRANS_DP_CTL */
+			reg = TRANS_DP_CTL(pipe);
+			temp = I915_READ(reg);
+			temp &= ~(TRANS_DP_OUTPUT_ENABLE |
+				  TRANS_DP_PORT_SEL_MASK);
+			temp |= TRANS_DP_PORT_SEL_NONE;
+			I915_WRITE(reg, temp);
+
+			/* disable DPLL_SEL */
+			temp = I915_READ(PCH_DPLL_SEL);
+			temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
+			I915_WRITE(PCH_DPLL_SEL, temp);
+		}
+
+		ironlake_fdi_pll_disable(intel_crtc);
+	}
+}
+
+static void haswell_crtc_disable(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct intel_encoder *encoder;
+	enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
+	bool is_dsi = intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI);
+
+	for_each_encoder_on_crtc(dev, crtc, encoder) {
+		intel_opregion_notify_encoder(encoder, false);
+		encoder->disable(encoder);
+	}
+
+	drm_crtc_vblank_off(crtc);
+	assert_vblank_disabled(crtc);
+
+	if (intel_crtc->config->has_pch_encoder)
+		intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
+						      false);
+	intel_disable_pipe(intel_crtc);
+
+	if (intel_crtc->config->dp_encoder_is_mst)
+		intel_ddi_set_vc_payload_alloc(crtc, false);
+
+	if (!is_dsi)
+		intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
+
+	if (INTEL_INFO(dev)->gen >= 9)
+		skylake_scaler_disable(intel_crtc);
+	else
+		ironlake_pfit_disable(intel_crtc, false);
+
+	if (!is_dsi)
+		intel_ddi_disable_pipe_clock(intel_crtc);
+
+	if (intel_crtc->config->has_pch_encoder) {
+		lpt_disable_pch_transcoder(dev_priv);
+		intel_ddi_fdi_disable(crtc);
+	}
+
+	for_each_encoder_on_crtc(dev, crtc, encoder)
+		if (encoder->post_disable)
+			encoder->post_disable(encoder);
+}
+
+static void i9xx_pfit_enable(struct intel_crtc *crtc)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc_state *pipe_config = crtc->config;
+
+	if (!pipe_config->gmch_pfit.control)
+		return;
+
+	/*
+	 * The panel fitter should only be adjusted whilst the pipe is disabled,
+	 * according to register description and PRM.
+	 */
+	WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
+	assert_pipe_disabled(dev_priv, crtc->pipe);
+
+	I915_WRITE(PFIT_PGM_RATIOS, pipe_config->gmch_pfit.pgm_ratios);
+	I915_WRITE(PFIT_CONTROL, pipe_config->gmch_pfit.control);
+
+	/* Border color in case we don't scale up to the full screen. Black by
+	 * default, change to something else for debugging. */
+	I915_WRITE(BCLRPAT(crtc->pipe), 0);
+}
+
+static enum intel_display_power_domain port_to_power_domain(enum port port)
+{
+	switch (port) {
+	case PORT_A:
+		return POWER_DOMAIN_PORT_DDI_A_4_LANES;
+	case PORT_B:
+		return POWER_DOMAIN_PORT_DDI_B_4_LANES;
+	case PORT_C:
+		return POWER_DOMAIN_PORT_DDI_C_4_LANES;
+	case PORT_D:
+		return POWER_DOMAIN_PORT_DDI_D_4_LANES;
+	case PORT_E:
+		return POWER_DOMAIN_PORT_DDI_E_2_LANES;
+	default:
+		MISSING_CASE(port);
+		return POWER_DOMAIN_PORT_OTHER;
+	}
+}
+
+static enum intel_display_power_domain port_to_aux_power_domain(enum port port)
+{
+	switch (port) {
+	case PORT_A:
+		return POWER_DOMAIN_AUX_A;
+	case PORT_B:
+		return POWER_DOMAIN_AUX_B;
+	case PORT_C:
+		return POWER_DOMAIN_AUX_C;
+	case PORT_D:
+		return POWER_DOMAIN_AUX_D;
+	case PORT_E:
+		/* FIXME: Check VBT for actual wiring of PORT E */
+		return POWER_DOMAIN_AUX_D;
+	default:
+		MISSING_CASE(port);
+		return POWER_DOMAIN_AUX_A;
+	}
+}
+
+#define for_each_power_domain(domain, mask)				\
+	for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++)	\
+		if ((1 << (domain)) & (mask))
+
+enum intel_display_power_domain
+intel_display_port_power_domain(struct intel_encoder *intel_encoder)
+{
+	struct drm_device *dev = intel_encoder->base.dev;
+	struct intel_digital_port *intel_dig_port;
+
+	switch (intel_encoder->type) {
+	case INTEL_OUTPUT_UNKNOWN:
+		/* Only DDI platforms should ever use this output type */
+		WARN_ON_ONCE(!HAS_DDI(dev));
+	case INTEL_OUTPUT_DISPLAYPORT:
+	case INTEL_OUTPUT_HDMI:
+	case INTEL_OUTPUT_EDP:
+		intel_dig_port = enc_to_dig_port(&intel_encoder->base);
+		return port_to_power_domain(intel_dig_port->port);
+	case INTEL_OUTPUT_DP_MST:
+		intel_dig_port = enc_to_mst(&intel_encoder->base)->primary;
+		return port_to_power_domain(intel_dig_port->port);
+	case INTEL_OUTPUT_ANALOG:
+		return POWER_DOMAIN_PORT_CRT;
+	case INTEL_OUTPUT_DSI:
+		return POWER_DOMAIN_PORT_DSI;
+	default:
+		return POWER_DOMAIN_PORT_OTHER;
+	}
+}
+
+enum intel_display_power_domain
+intel_display_port_aux_power_domain(struct intel_encoder *intel_encoder)
+{
+	struct drm_device *dev = intel_encoder->base.dev;
+	struct intel_digital_port *intel_dig_port;
+
+	switch (intel_encoder->type) {
+	case INTEL_OUTPUT_UNKNOWN:
+	case INTEL_OUTPUT_HDMI:
+		/*
+		 * Only DDI platforms should ever use these output types.
+		 * We can get here after the HDMI detect code has already set
+		 * the type of the shared encoder. Since we can't be sure
+		 * what's the status of the given connectors, play safe and
+		 * run the DP detection too.
+		 */
+		WARN_ON_ONCE(!HAS_DDI(dev));
+	case INTEL_OUTPUT_DISPLAYPORT:
+	case INTEL_OUTPUT_EDP:
+		intel_dig_port = enc_to_dig_port(&intel_encoder->base);
+		return port_to_aux_power_domain(intel_dig_port->port);
+	case INTEL_OUTPUT_DP_MST:
+		intel_dig_port = enc_to_mst(&intel_encoder->base)->primary;
+		return port_to_aux_power_domain(intel_dig_port->port);
+	default:
+		MISSING_CASE(intel_encoder->type);
+		return POWER_DOMAIN_AUX_A;
+	}
+}
+
+static unsigned long get_crtc_power_domains(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct intel_encoder *intel_encoder;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	enum pipe pipe = intel_crtc->pipe;
+	unsigned long mask;
+	enum transcoder transcoder;
+
+	if (!crtc->state->active)
+		return 0;
+
+	transcoder = intel_pipe_to_cpu_transcoder(dev->dev_private, pipe);
+
+	mask = BIT(POWER_DOMAIN_PIPE(pipe));
+	mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
+	if (intel_crtc->config->pch_pfit.enabled ||
+	    intel_crtc->config->pch_pfit.force_thru)
+		mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
+
+	for_each_encoder_on_crtc(dev, crtc, intel_encoder)
+		mask |= BIT(intel_display_port_power_domain(intel_encoder));
+
+	return mask;
+}
+
+static unsigned long modeset_get_crtc_power_domains(struct drm_crtc *crtc)
+{
+	struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	enum intel_display_power_domain domain;
+	unsigned long domains, new_domains, old_domains;
+
+	old_domains = intel_crtc->enabled_power_domains;
+	intel_crtc->enabled_power_domains = new_domains = get_crtc_power_domains(crtc);
+
+	domains = new_domains & ~old_domains;
+
+	for_each_power_domain(domain, domains)
+		intel_display_power_get(dev_priv, domain);
+
+	return old_domains & ~new_domains;
+}
+
+static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
+				      unsigned long domains)
+{
+	enum intel_display_power_domain domain;
+
+	for_each_power_domain(domain, domains)
+		intel_display_power_put(dev_priv, domain);
+}
+
+static void modeset_update_crtc_power_domains(struct drm_atomic_state *state)
+{
+	struct drm_device *dev = state->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	unsigned long put_domains[I915_MAX_PIPES] = {};
+	struct drm_crtc_state *crtc_state;
+	struct drm_crtc *crtc;
+	int i;
+
+	for_each_crtc_in_state(state, crtc, crtc_state, i) {
+		if (needs_modeset(crtc->state))
+			put_domains[to_intel_crtc(crtc)->pipe] =
+				modeset_get_crtc_power_domains(crtc);
+	}
+
+	if (dev_priv->display.modeset_commit_cdclk) {
+		unsigned int cdclk = to_intel_atomic_state(state)->cdclk;
+
+		if (cdclk != dev_priv->cdclk_freq &&
+		    !WARN_ON(!state->allow_modeset))
+			dev_priv->display.modeset_commit_cdclk(state);
+	}
+
+	for (i = 0; i < I915_MAX_PIPES; i++)
+		if (put_domains[i])
+			modeset_put_power_domains(dev_priv, put_domains[i]);
+}
+
+static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
+{
+	int max_cdclk_freq = dev_priv->max_cdclk_freq;
+
+	if (INTEL_INFO(dev_priv)->gen >= 9 ||
+	    IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+		return max_cdclk_freq;
+	else if (IS_CHERRYVIEW(dev_priv))
+		return max_cdclk_freq*95/100;
+	else if (INTEL_INFO(dev_priv)->gen < 4)
+		return 2*max_cdclk_freq*90/100;
+	else
+		return max_cdclk_freq*90/100;
+}
+
+static void intel_update_max_cdclk(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (IS_SKYLAKE(dev)) {
+		u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK;
+
+		if (limit == SKL_DFSM_CDCLK_LIMIT_675)
+			dev_priv->max_cdclk_freq = 675000;
+		else if (limit == SKL_DFSM_CDCLK_LIMIT_540)
+			dev_priv->max_cdclk_freq = 540000;
+		else if (limit == SKL_DFSM_CDCLK_LIMIT_450)
+			dev_priv->max_cdclk_freq = 450000;
+		else
+			dev_priv->max_cdclk_freq = 337500;
+	} else if (IS_BROADWELL(dev))  {
+		/*
+		 * FIXME with extra cooling we can allow
+		 * 540 MHz for ULX and 675 Mhz for ULT.
+		 * How can we know if extra cooling is
+		 * available? PCI ID, VTB, something else?
+		 */
+		if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
+			dev_priv->max_cdclk_freq = 450000;
+		else if (IS_BDW_ULX(dev))
+			dev_priv->max_cdclk_freq = 450000;
+		else if (IS_BDW_ULT(dev))
+			dev_priv->max_cdclk_freq = 540000;
+		else
+			dev_priv->max_cdclk_freq = 675000;
+	} else if (IS_CHERRYVIEW(dev)) {
+		dev_priv->max_cdclk_freq = 320000;
+	} else if (IS_VALLEYVIEW(dev)) {
+		dev_priv->max_cdclk_freq = 400000;
+	} else {
+		/* otherwise assume cdclk is fixed */
+		dev_priv->max_cdclk_freq = dev_priv->cdclk_freq;
+	}
+
+	dev_priv->max_dotclk_freq = intel_compute_max_dotclk(dev_priv);
+
+	DRM_DEBUG_DRIVER("Max CD clock rate: %d kHz\n",
+			 dev_priv->max_cdclk_freq);
+
+	DRM_DEBUG_DRIVER("Max dotclock rate: %d kHz\n",
+			 dev_priv->max_dotclk_freq);
+}
+
+static void intel_update_cdclk(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	dev_priv->cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
+	DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz\n",
+			 dev_priv->cdclk_freq);
+
+	/*
+	 * Program the gmbus_freq based on the cdclk frequency.
+	 * BSpec erroneously claims we should aim for 4MHz, but
+	 * in fact 1MHz is the correct frequency.
+	 */
+	if (IS_VALLEYVIEW(dev)) {
+		/*
+		 * Program the gmbus_freq based on the cdclk frequency.
+		 * BSpec erroneously claims we should aim for 4MHz, but
+		 * in fact 1MHz is the correct frequency.
+		 */
+		I915_WRITE(GMBUSFREQ_VLV, DIV_ROUND_UP(dev_priv->cdclk_freq, 1000));
+	}
+
+	if (dev_priv->max_cdclk_freq == 0)
+		intel_update_max_cdclk(dev);
+}
+
+static void broxton_set_cdclk(struct drm_device *dev, int frequency)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t divider;
+	uint32_t ratio;
+	uint32_t current_freq;
+	int ret;
+
+	/* frequency = 19.2MHz * ratio / 2 / div{1,1.5,2,4} */
+	switch (frequency) {
+	case 144000:
+		divider = BXT_CDCLK_CD2X_DIV_SEL_4;
+		ratio = BXT_DE_PLL_RATIO(60);
+		break;
+	case 288000:
+		divider = BXT_CDCLK_CD2X_DIV_SEL_2;
+		ratio = BXT_DE_PLL_RATIO(60);
+		break;
+	case 384000:
+		divider = BXT_CDCLK_CD2X_DIV_SEL_1_5;
+		ratio = BXT_DE_PLL_RATIO(60);
+		break;
+	case 576000:
+		divider = BXT_CDCLK_CD2X_DIV_SEL_1;
+		ratio = BXT_DE_PLL_RATIO(60);
+		break;
+	case 624000:
+		divider = BXT_CDCLK_CD2X_DIV_SEL_1;
+		ratio = BXT_DE_PLL_RATIO(65);
+		break;
+	case 19200:
+		/*
+		 * Bypass frequency with DE PLL disabled. Init ratio, divider
+		 * to suppress GCC warning.
+		 */
+		ratio = 0;
+		divider = 0;
+		break;
+	default:
+		DRM_ERROR("unsupported CDCLK freq %d", frequency);
+
+		return;
+	}
+
+	mutex_lock(&dev_priv->rps.hw_lock);
+	/* Inform power controller of upcoming frequency change */
+	ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
+				      0x80000000);
+	mutex_unlock(&dev_priv->rps.hw_lock);
+
+	if (ret) {
+		DRM_ERROR("PCode CDCLK freq change notify failed (err %d, freq %d)\n",
+			  ret, frequency);
+		return;
+	}
+
+	current_freq = I915_READ(CDCLK_CTL) & CDCLK_FREQ_DECIMAL_MASK;
+	/* convert from .1 fixpoint MHz with -1MHz offset to kHz */
+	current_freq = current_freq * 500 + 1000;
+
+	/*
+	 * DE PLL has to be disabled when
+	 * - setting to 19.2MHz (bypass, PLL isn't used)
+	 * - before setting to 624MHz (PLL needs toggling)
+	 * - before setting to any frequency from 624MHz (PLL needs toggling)
+	 */
+	if (frequency == 19200 || frequency == 624000 ||
+	    current_freq == 624000) {
+		I915_WRITE(BXT_DE_PLL_ENABLE, ~BXT_DE_PLL_PLL_ENABLE);
+		/* Timeout 200us */
+		if (wait_for(!(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK),
+			     1))
+			DRM_ERROR("timout waiting for DE PLL unlock\n");
+	}
+
+	if (frequency != 19200) {
+		uint32_t val;
+
+		val = I915_READ(BXT_DE_PLL_CTL);
+		val &= ~BXT_DE_PLL_RATIO_MASK;
+		val |= ratio;
+		I915_WRITE(BXT_DE_PLL_CTL, val);
+
+		I915_WRITE(BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE);
+		/* Timeout 200us */
+		if (wait_for(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK, 1))
+			DRM_ERROR("timeout waiting for DE PLL lock\n");
+
+		val = I915_READ(CDCLK_CTL);
+		val &= ~BXT_CDCLK_CD2X_DIV_SEL_MASK;
+		val |= divider;
+		/*
+		 * Disable SSA Precharge when CD clock frequency < 500 MHz,
+		 * enable otherwise.
+		 */
+		val &= ~BXT_CDCLK_SSA_PRECHARGE_ENABLE;
+		if (frequency >= 500000)
+			val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
+
+		val &= ~CDCLK_FREQ_DECIMAL_MASK;
+		/* convert from kHz to .1 fixpoint MHz with -1MHz offset */
+		val |= (frequency - 1000) / 500;
+		I915_WRITE(CDCLK_CTL, val);
+	}
+
+	mutex_lock(&dev_priv->rps.hw_lock);
+	ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
+				      DIV_ROUND_UP(frequency, 25000));
+	mutex_unlock(&dev_priv->rps.hw_lock);
+
+	if (ret) {
+		DRM_ERROR("PCode CDCLK freq set failed, (err %d, freq %d)\n",
+			  ret, frequency);
+		return;
+	}
+
+	intel_update_cdclk(dev);
+}
+
+void broxton_init_cdclk(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t val;
+
+	/*
+	 * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
+	 * or else the reset will hang because there is no PCH to respond.
+	 * Move the handshake programming to initialization sequence.
+	 * Previously was left up to BIOS.
+	 */
+	val = I915_READ(HSW_NDE_RSTWRN_OPT);
+	val &= ~RESET_PCH_HANDSHAKE_ENABLE;
+	I915_WRITE(HSW_NDE_RSTWRN_OPT, val);
+
+	/* Enable PG1 for cdclk */
+	intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
+
+	/* check if cd clock is enabled */
+	if (I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_PLL_ENABLE) {
+		DRM_DEBUG_KMS("Display already initialized\n");
+		return;
+	}
+
+	/*
+	 * FIXME:
+	 * - The initial CDCLK needs to be read from VBT.
+	 *   Need to make this change after VBT has changes for BXT.
+	 * - check if setting the max (or any) cdclk freq is really necessary
+	 *   here, it belongs to modeset time
+	 */
+	broxton_set_cdclk(dev, 624000);
+
+	I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST);
+	POSTING_READ(DBUF_CTL);
+
+	udelay(10);
+
+	if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE))
+		DRM_ERROR("DBuf power enable timeout!\n");
+}
+
+void broxton_uninit_cdclk(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST);
+	POSTING_READ(DBUF_CTL);
+
+	udelay(10);
+
+	if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE)
+		DRM_ERROR("DBuf power disable timeout!\n");
+
+	/* Set minimum (bypass) frequency, in effect turning off the DE PLL */
+	broxton_set_cdclk(dev, 19200);
+
+	intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
+}
+
+static const struct skl_cdclk_entry {
+	unsigned int freq;
+	unsigned int vco;
+} skl_cdclk_frequencies[] = {
+	{ .freq = 308570, .vco = 8640 },
+	{ .freq = 337500, .vco = 8100 },
+	{ .freq = 432000, .vco = 8640 },
+	{ .freq = 450000, .vco = 8100 },
+	{ .freq = 540000, .vco = 8100 },
+	{ .freq = 617140, .vco = 8640 },
+	{ .freq = 675000, .vco = 8100 },
+};
+
+static unsigned int skl_cdclk_decimal(unsigned int freq)
+{
+	return (freq - 1000) / 500;
+}
+
+static unsigned int skl_cdclk_get_vco(unsigned int freq)
+{
+	unsigned int i;
+
+	for (i = 0; i < ARRAY_SIZE(skl_cdclk_frequencies); i++) {
+		const struct skl_cdclk_entry *e = &skl_cdclk_frequencies[i];
+
+		if (e->freq == freq)
+			return e->vco;
+	}
+
+	return 8100;
+}
+
+static void
+skl_dpll0_enable(struct drm_i915_private *dev_priv, unsigned int required_vco)
+{
+	unsigned int min_freq;
+	u32 val;
+
+	/* select the minimum CDCLK before enabling DPLL 0 */
+	val = I915_READ(CDCLK_CTL);
+	val &= ~CDCLK_FREQ_SEL_MASK | ~CDCLK_FREQ_DECIMAL_MASK;
+	val |= CDCLK_FREQ_337_308;
+
+	if (required_vco == 8640)
+		min_freq = 308570;
+	else
+		min_freq = 337500;
+
+	val = CDCLK_FREQ_337_308 | skl_cdclk_decimal(min_freq);
+
+	I915_WRITE(CDCLK_CTL, val);
+	POSTING_READ(CDCLK_CTL);
+
+	/*
+	 * We always enable DPLL0 with the lowest link rate possible, but still
+	 * taking into account the VCO required to operate the eDP panel at the
+	 * desired frequency. The usual DP link rates operate with a VCO of
+	 * 8100 while the eDP 1.4 alternate link rates need a VCO of 8640.
+	 * The modeset code is responsible for the selection of the exact link
+	 * rate later on, with the constraint of choosing a frequency that
+	 * works with required_vco.
+	 */
+	val = I915_READ(DPLL_CTRL1);
+
+	val &= ~(DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) | DPLL_CTRL1_SSC(SKL_DPLL0) |
+		 DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0));
+	val |= DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
+	if (required_vco == 8640)
+		val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
+					    SKL_DPLL0);
+	else
+		val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
+					    SKL_DPLL0);
+
+	I915_WRITE(DPLL_CTRL1, val);
+	POSTING_READ(DPLL_CTRL1);
+
+	I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) | LCPLL_PLL_ENABLE);
+
+	if (wait_for(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK, 5))
+		DRM_ERROR("DPLL0 not locked\n");
+}
+
+static bool skl_cdclk_pcu_ready(struct drm_i915_private *dev_priv)
+{
+	int ret;
+	u32 val;
+
+	/* inform PCU we want to change CDCLK */
+	val = SKL_CDCLK_PREPARE_FOR_CHANGE;
+	mutex_lock(&dev_priv->rps.hw_lock);
+	ret = sandybridge_pcode_read(dev_priv, SKL_PCODE_CDCLK_CONTROL, &val);
+	mutex_unlock(&dev_priv->rps.hw_lock);
+
+	return ret == 0 && (val & SKL_CDCLK_READY_FOR_CHANGE);
+}
+
+static bool skl_cdclk_wait_for_pcu_ready(struct drm_i915_private *dev_priv)
+{
+	unsigned int i;
+
+	for (i = 0; i < 15; i++) {
+		if (skl_cdclk_pcu_ready(dev_priv))
+			return true;
+		udelay(10);
+	}
+
+	return false;
+}
+
+static void skl_set_cdclk(struct drm_i915_private *dev_priv, unsigned int freq)
+{
+	struct drm_device *dev = dev_priv->dev;
+	u32 freq_select, pcu_ack;
+
+	DRM_DEBUG_DRIVER("Changing CDCLK to %dKHz\n", freq);
+
+	if (!skl_cdclk_wait_for_pcu_ready(dev_priv)) {
+		DRM_ERROR("failed to inform PCU about cdclk change\n");
+		return;
+	}
+
+	/* set CDCLK_CTL */
+	switch(freq) {
+	case 450000:
+	case 432000:
+		freq_select = CDCLK_FREQ_450_432;
+		pcu_ack = 1;
+		break;
+	case 540000:
+		freq_select = CDCLK_FREQ_540;
+		pcu_ack = 2;
+		break;
+	case 308570:
+	case 337500:
+	default:
+		freq_select = CDCLK_FREQ_337_308;
+		pcu_ack = 0;
+		break;
+	case 617140:
+	case 675000:
+		freq_select = CDCLK_FREQ_675_617;
+		pcu_ack = 3;
+		break;
+	}
+
+	I915_WRITE(CDCLK_CTL, freq_select | skl_cdclk_decimal(freq));
+	POSTING_READ(CDCLK_CTL);
+
+	/* inform PCU of the change */
+	mutex_lock(&dev_priv->rps.hw_lock);
+	sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, pcu_ack);
+	mutex_unlock(&dev_priv->rps.hw_lock);
+
+	intel_update_cdclk(dev);
+}
+
+void skl_uninit_cdclk(struct drm_i915_private *dev_priv)
+{
+	/* disable DBUF power */
+	I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST);
+	POSTING_READ(DBUF_CTL);
+
+	udelay(10);
+
+	if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE)
+		DRM_ERROR("DBuf power disable timeout\n");
+
+	/*
+	 * DMC assumes ownership of LCPLL and will get confused if we touch it.
+	 */
+	if (dev_priv->csr.dmc_payload) {
+		/* disable DPLL0 */
+		I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) &
+					~LCPLL_PLL_ENABLE);
+		if (wait_for(!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK), 1))
+			DRM_ERROR("Couldn't disable DPLL0\n");
+	}
+
+	intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
+}
+
+void skl_init_cdclk(struct drm_i915_private *dev_priv)
+{
+	u32 val;
+	unsigned int required_vco;
+
+	/* enable PCH reset handshake */
+	val = I915_READ(HSW_NDE_RSTWRN_OPT);
+	I915_WRITE(HSW_NDE_RSTWRN_OPT, val | RESET_PCH_HANDSHAKE_ENABLE);
+
+	/* enable PG1 and Misc I/O */
+	intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
+
+	/* DPLL0 not enabled (happens on early BIOS versions) */
+	if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE)) {
+		/* enable DPLL0 */
+		required_vco = skl_cdclk_get_vco(dev_priv->skl_boot_cdclk);
+		skl_dpll0_enable(dev_priv, required_vco);
+	}
+
+	/* set CDCLK to the frequency the BIOS chose */
+	skl_set_cdclk(dev_priv, dev_priv->skl_boot_cdclk);
+
+	/* enable DBUF power */
+	I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST);
+	POSTING_READ(DBUF_CTL);
+
+	udelay(10);
+
+	if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE))
+		DRM_ERROR("DBuf power enable timeout\n");
+}
+
+/* Adjust CDclk dividers to allow high res or save power if possible */
+static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 val, cmd;
+
+	WARN_ON(dev_priv->display.get_display_clock_speed(dev)
+					!= dev_priv->cdclk_freq);
+
+	if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */
+		cmd = 2;
+	else if (cdclk == 266667)
+		cmd = 1;
+	else
+		cmd = 0;
+
+	mutex_lock(&dev_priv->rps.hw_lock);
+	val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
+	val &= ~DSPFREQGUAR_MASK;
+	val |= (cmd << DSPFREQGUAR_SHIFT);
+	vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
+	if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
+		      DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT),
+		     50)) {
+		DRM_ERROR("timed out waiting for CDclk change\n");
+	}
+	mutex_unlock(&dev_priv->rps.hw_lock);
+
+	mutex_lock(&dev_priv->sb_lock);
+
+	if (cdclk == 400000) {
+		u32 divider;
+
+		divider = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
+
+		/* adjust cdclk divider */
+		val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
+		val &= ~CCK_FREQUENCY_VALUES;
+		val |= divider;
+		vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val);
+
+		if (wait_for((vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL) &
+			      CCK_FREQUENCY_STATUS) == (divider << CCK_FREQUENCY_STATUS_SHIFT),
+			     50))
+			DRM_ERROR("timed out waiting for CDclk change\n");
+	}
+
+	/* adjust self-refresh exit latency value */
+	val = vlv_bunit_read(dev_priv, BUNIT_REG_BISOC);
+	val &= ~0x7f;
+
+	/*
+	 * For high bandwidth configs, we set a higher latency in the bunit
+	 * so that the core display fetch happens in time to avoid underruns.
+	 */
+	if (cdclk == 400000)
+		val |= 4500 / 250; /* 4.5 usec */
+	else
+		val |= 3000 / 250; /* 3.0 usec */
+	vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val);
+
+	mutex_unlock(&dev_priv->sb_lock);
+
+	intel_update_cdclk(dev);
+}
+
+static void cherryview_set_cdclk(struct drm_device *dev, int cdclk)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 val, cmd;
+
+	WARN_ON(dev_priv->display.get_display_clock_speed(dev)
+						!= dev_priv->cdclk_freq);
+
+	switch (cdclk) {
+	case 333333:
+	case 320000:
+	case 266667:
+	case 200000:
+		break;
+	default:
+		MISSING_CASE(cdclk);
+		return;
+	}
+
+	/*
+	 * Specs are full of misinformation, but testing on actual
+	 * hardware has shown that we just need to write the desired
+	 * CCK divider into the Punit register.
+	 */
+	cmd = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
+
+	mutex_lock(&dev_priv->rps.hw_lock);
+	val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
+	val &= ~DSPFREQGUAR_MASK_CHV;
+	val |= (cmd << DSPFREQGUAR_SHIFT_CHV);
+	vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
+	if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
+		      DSPFREQSTAT_MASK_CHV) == (cmd << DSPFREQSTAT_SHIFT_CHV),
+		     50)) {
+		DRM_ERROR("timed out waiting for CDclk change\n");
+	}
+	mutex_unlock(&dev_priv->rps.hw_lock);
+
+	intel_update_cdclk(dev);
+}
+
+static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
+				 int max_pixclk)
+{
+	int freq_320 = (dev_priv->hpll_freq <<  1) % 320000 != 0 ? 333333 : 320000;
+	int limit = IS_CHERRYVIEW(dev_priv) ? 95 : 90;
+
+	/*
+	 * Really only a few cases to deal with, as only 4 CDclks are supported:
+	 *   200MHz
+	 *   267MHz
+	 *   320/333MHz (depends on HPLL freq)
+	 *   400MHz (VLV only)
+	 * So we check to see whether we're above 90% (VLV) or 95% (CHV)
+	 * of the lower bin and adjust if needed.
+	 *
+	 * We seem to get an unstable or solid color picture at 200MHz.
+	 * Not sure what's wrong. For now use 200MHz only when all pipes
+	 * are off.
+	 */
+	if (!IS_CHERRYVIEW(dev_priv) &&
+	    max_pixclk > freq_320*limit/100)
+		return 400000;
+	else if (max_pixclk > 266667*limit/100)
+		return freq_320;
+	else if (max_pixclk > 0)
+		return 266667;
+	else
+		return 200000;
+}
+
+static int broxton_calc_cdclk(struct drm_i915_private *dev_priv,
+			      int max_pixclk)
+{
+	/*
+	 * FIXME:
+	 * - remove the guardband, it's not needed on BXT
+	 * - set 19.2MHz bypass frequency if there are no active pipes
+	 */
+	if (max_pixclk > 576000*9/10)
+		return 624000;
+	else if (max_pixclk > 384000*9/10)
+		return 576000;
+	else if (max_pixclk > 288000*9/10)
+		return 384000;
+	else if (max_pixclk > 144000*9/10)
+		return 288000;
+	else
+		return 144000;
+}
+
+/* Compute the max pixel clock for new configuration. Uses atomic state if
+ * that's non-NULL, look at current state otherwise. */
+static int intel_mode_max_pixclk(struct drm_device *dev,
+				 struct drm_atomic_state *state)
+{
+	struct intel_crtc *intel_crtc;
+	struct intel_crtc_state *crtc_state;
+	int max_pixclk = 0;
+
+	for_each_intel_crtc(dev, intel_crtc) {
+		crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
+		if (IS_ERR(crtc_state))
+			return PTR_ERR(crtc_state);
+
+		if (!crtc_state->base.enable)
+			continue;
+
+		max_pixclk = max(max_pixclk,
+				 crtc_state->base.adjusted_mode.crtc_clock);
+	}
+
+	return max_pixclk;
+}
+
+static int valleyview_modeset_calc_cdclk(struct drm_atomic_state *state)
+{
+	struct drm_device *dev = state->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int max_pixclk = intel_mode_max_pixclk(dev, state);
+
+	if (max_pixclk < 0)
+		return max_pixclk;
+
+	to_intel_atomic_state(state)->cdclk =
+		valleyview_calc_cdclk(dev_priv, max_pixclk);
+
+	return 0;
+}
+
+static int broxton_modeset_calc_cdclk(struct drm_atomic_state *state)
+{
+	struct drm_device *dev = state->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int max_pixclk = intel_mode_max_pixclk(dev, state);
+
+	if (max_pixclk < 0)
+		return max_pixclk;
+
+	to_intel_atomic_state(state)->cdclk =
+		broxton_calc_cdclk(dev_priv, max_pixclk);
+
+	return 0;
+}
+
+static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv)
+{
+	unsigned int credits, default_credits;
+
+	if (IS_CHERRYVIEW(dev_priv))
+		default_credits = PFI_CREDIT(12);
+	else
+		default_credits = PFI_CREDIT(8);
+
+	if (dev_priv->cdclk_freq >= dev_priv->czclk_freq) {
+		/* CHV suggested value is 31 or 63 */
+		if (IS_CHERRYVIEW(dev_priv))
+			credits = PFI_CREDIT_63;
+		else
+			credits = PFI_CREDIT(15);
+	} else {
+		credits = default_credits;
+	}
+
+	/*
+	 * WA - write default credits before re-programming
+	 * FIXME: should we also set the resend bit here?
+	 */
+	I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE |
+		   default_credits);
+
+	I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE |
+		   credits | PFI_CREDIT_RESEND);
+
+	/*
+	 * FIXME is this guaranteed to clear
+	 * immediately or should we poll for it?
+	 */
+	WARN_ON(I915_READ(GCI_CONTROL) & PFI_CREDIT_RESEND);
+}
+
+static void valleyview_modeset_commit_cdclk(struct drm_atomic_state *old_state)
+{
+	struct drm_device *dev = old_state->dev;
+	unsigned int req_cdclk = to_intel_atomic_state(old_state)->cdclk;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	/*
+	 * FIXME: We can end up here with all power domains off, yet
+	 * with a CDCLK frequency other than the minimum. To account
+	 * for this take the PIPE-A power domain, which covers the HW
+	 * blocks needed for the following programming. This can be
+	 * removed once it's guaranteed that we get here either with
+	 * the minimum CDCLK set, or the required power domains
+	 * enabled.
+	 */
+	intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
+
+	if (IS_CHERRYVIEW(dev))
+		cherryview_set_cdclk(dev, req_cdclk);
+	else
+		valleyview_set_cdclk(dev, req_cdclk);
+
+	vlv_program_pfi_credits(dev_priv);
+
+	intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A);
+}
+
+static void valleyview_crtc_enable(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct intel_encoder *encoder;
+	int pipe = intel_crtc->pipe;
+	bool is_dsi;
+
+	if (WARN_ON(intel_crtc->active))
+		return;
+
+	is_dsi = intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI);
+
+	if (intel_crtc->config->has_dp_encoder)
+		intel_dp_set_m_n(intel_crtc, M1_N1);
+
+	intel_set_pipe_timings(intel_crtc);
+
+	if (IS_CHERRYVIEW(dev) && pipe == PIPE_B) {
+		struct drm_i915_private *dev_priv = dev->dev_private;
+
+		I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
+		I915_WRITE(CHV_CANVAS(pipe), 0);
+	}
+
+	i9xx_set_pipeconf(intel_crtc);
+
+	intel_crtc->active = true;
+
+	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
+
+	for_each_encoder_on_crtc(dev, crtc, encoder)
+		if (encoder->pre_pll_enable)
+			encoder->pre_pll_enable(encoder);
+
+	if (!is_dsi) {
+		if (IS_CHERRYVIEW(dev)) {
+			chv_prepare_pll(intel_crtc, intel_crtc->config);
+			chv_enable_pll(intel_crtc, intel_crtc->config);
+		} else {
+			vlv_prepare_pll(intel_crtc, intel_crtc->config);
+			vlv_enable_pll(intel_crtc, intel_crtc->config);
+		}
+	}
+
+	for_each_encoder_on_crtc(dev, crtc, encoder)
+		if (encoder->pre_enable)
+			encoder->pre_enable(encoder);
+
+	i9xx_pfit_enable(intel_crtc);
+
+	intel_crtc_load_lut(crtc);
+
+	intel_enable_pipe(intel_crtc);
+
+	assert_vblank_disabled(crtc);
+	drm_crtc_vblank_on(crtc);
+
+	for_each_encoder_on_crtc(dev, crtc, encoder)
+		encoder->enable(encoder);
+}
+
+static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	I915_WRITE(FP0(crtc->pipe), crtc->config->dpll_hw_state.fp0);
+	I915_WRITE(FP1(crtc->pipe), crtc->config->dpll_hw_state.fp1);
+}
+
+static void i9xx_crtc_enable(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct intel_encoder *encoder;
+	int pipe = intel_crtc->pipe;
+
+	if (WARN_ON(intel_crtc->active))
+		return;
+
+	i9xx_set_pll_dividers(intel_crtc);
+
+	if (intel_crtc->config->has_dp_encoder)
+		intel_dp_set_m_n(intel_crtc, M1_N1);
+
+	intel_set_pipe_timings(intel_crtc);
+
+	i9xx_set_pipeconf(intel_crtc);
+
+	intel_crtc->active = true;
+
+	if (!IS_GEN2(dev))
+		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
+
+	for_each_encoder_on_crtc(dev, crtc, encoder)
+		if (encoder->pre_enable)
+			encoder->pre_enable(encoder);
+
+	i9xx_enable_pll(intel_crtc);
+
+	i9xx_pfit_enable(intel_crtc);
+
+	intel_crtc_load_lut(crtc);
+
+	intel_update_watermarks(crtc);
+	intel_enable_pipe(intel_crtc);
+
+	assert_vblank_disabled(crtc);
+	drm_crtc_vblank_on(crtc);
+
+	for_each_encoder_on_crtc(dev, crtc, encoder)
+		encoder->enable(encoder);
+}
+
+static void i9xx_pfit_disable(struct intel_crtc *crtc)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (!crtc->config->gmch_pfit.control)
+		return;
+
+	assert_pipe_disabled(dev_priv, crtc->pipe);
+
+	DRM_DEBUG_DRIVER("disabling pfit, current: 0x%08x\n",
+			 I915_READ(PFIT_CONTROL));
+	I915_WRITE(PFIT_CONTROL, 0);
+}
+
+static void i9xx_crtc_disable(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct intel_encoder *encoder;
+	int pipe = intel_crtc->pipe;
+
+	/*
+	 * On gen2 planes are double buffered but the pipe isn't, so we must
+	 * wait for planes to fully turn off before disabling the pipe.
+	 * We also need to wait on all gmch platforms because of the
+	 * self-refresh mode constraint explained above.
+	 */
+	intel_wait_for_vblank(dev, pipe);
+
+	for_each_encoder_on_crtc(dev, crtc, encoder)
+		encoder->disable(encoder);
+
+	drm_crtc_vblank_off(crtc);
+	assert_vblank_disabled(crtc);
+
+	intel_disable_pipe(intel_crtc);
+
+	i9xx_pfit_disable(intel_crtc);
+
+	for_each_encoder_on_crtc(dev, crtc, encoder)
+		if (encoder->post_disable)
+			encoder->post_disable(encoder);
+
+	if (!intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI)) {
+		if (IS_CHERRYVIEW(dev))
+			chv_disable_pll(dev_priv, pipe);
+		else if (IS_VALLEYVIEW(dev))
+			vlv_disable_pll(dev_priv, pipe);
+		else
+			i9xx_disable_pll(intel_crtc);
+	}
+
+	for_each_encoder_on_crtc(dev, crtc, encoder)
+		if (encoder->post_pll_disable)
+			encoder->post_pll_disable(encoder);
+
+	if (!IS_GEN2(dev))
+		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
+}
+
+static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
+{
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+	enum intel_display_power_domain domain;
+	unsigned long domains;
+
+	if (!intel_crtc->active)
+		return;
+
+	if (to_intel_plane_state(crtc->primary->state)->visible) {
+		intel_crtc_wait_for_pending_flips(crtc);
+		intel_pre_disable_primary(crtc);
+
+		intel_crtc_disable_planes(crtc, 1 << drm_plane_index(crtc->primary));
+		to_intel_plane_state(crtc->primary->state)->visible = false;
+	}
+
+	dev_priv->display.crtc_disable(crtc);
+	intel_crtc->active = false;
+	intel_update_watermarks(crtc);
+	intel_disable_shared_dpll(intel_crtc);
+
+	domains = intel_crtc->enabled_power_domains;
+	for_each_power_domain(domain, domains)
+		intel_display_power_put(dev_priv, domain);
+	intel_crtc->enabled_power_domains = 0;
+}
+
+/*
+ * turn all crtc's off, but do not adjust state
+ * This has to be paired with a call to intel_modeset_setup_hw_state.
+ */
+int intel_display_suspend(struct drm_device *dev)
+{
+	struct drm_mode_config *config = &dev->mode_config;
+	struct drm_modeset_acquire_ctx *ctx = config->acquire_ctx;
+	struct drm_atomic_state *state;
+	struct drm_crtc *crtc;
+	unsigned crtc_mask = 0;
+	int ret = 0;
+
+	if (WARN_ON(!ctx))
+		return 0;
+
+	lockdep_assert_held(&ctx->ww_ctx);
+	state = drm_atomic_state_alloc(dev);
+	if (WARN_ON(!state))
+		return -ENOMEM;
+
+	state->acquire_ctx = ctx;
+	state->allow_modeset = true;
+
+	for_each_crtc(dev, crtc) {
+		struct drm_crtc_state *crtc_state =
+			drm_atomic_get_crtc_state(state, crtc);
+
+		ret = PTR_ERR_OR_ZERO(crtc_state);
+		if (ret)
+			goto free;
+
+		if (!crtc_state->active)
+			continue;
+
+		crtc_state->active = false;
+		crtc_mask |= 1 << drm_crtc_index(crtc);
+	}
+
+	if (crtc_mask) {
+		ret = drm_atomic_commit(state);
+
+		if (!ret) {
+			for_each_crtc(dev, crtc)
+				if (crtc_mask & (1 << drm_crtc_index(crtc)))
+					crtc->state->active = true;
+
+			return ret;
+		}
+	}
+
+free:
+	if (ret)
+		DRM_ERROR("Suspending crtc's failed with %i\n", ret);
+	drm_atomic_state_free(state);
+	return ret;
+}
+
+void intel_encoder_destroy(struct drm_encoder *encoder)
+{
+	struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
+
+	drm_encoder_cleanup(encoder);
+	kfree(intel_encoder);
+}
+
+/* Cross check the actual hw state with our own modeset state tracking (and it's
+ * internal consistency). */
+static void intel_connector_check_state(struct intel_connector *connector)
+{
+	struct drm_crtc *crtc = connector->base.state->crtc;
+
+	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+		      connector->base.base.id,
+		      connector->base.name);
+
+	if (connector->get_hw_state(connector)) {
+		struct intel_encoder *encoder = connector->encoder;
+		struct drm_connector_state *conn_state = connector->base.state;
+
+		I915_STATE_WARN(!crtc,
+			 "connector enabled without attached crtc\n");
+
+		if (!crtc)
+			return;
+
+		I915_STATE_WARN(!crtc->state->active,
+		      "connector is active, but attached crtc isn't\n");
+
+		if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
+			return;
+
+		I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
+			"atomic encoder doesn't match attached encoder\n");
+
+		I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
+			"attached encoder crtc differs from connector crtc\n");
+	} else {
+		I915_STATE_WARN(crtc && crtc->state->active,
+			"attached crtc is active, but connector isn't\n");
+		I915_STATE_WARN(!crtc && connector->base.state->best_encoder,
+			"best encoder set without crtc!\n");
+	}
+}
+
+int intel_connector_init(struct intel_connector *connector)
+{
+	struct drm_connector_state *connector_state;
+
+	connector_state = kzalloc(sizeof *connector_state, GFP_KERNEL);
+	if (!connector_state)
+		return -ENOMEM;
+
+	connector->base.state = connector_state;
+	return 0;
+}
+
+struct intel_connector *intel_connector_alloc(void)
+{
+	struct intel_connector *connector;
+
+	connector = kzalloc(sizeof *connector, GFP_KERNEL);
+	if (!connector)
+		return NULL;
+
+	if (intel_connector_init(connector) < 0) {
+		kfree(connector);
+		return NULL;
+	}
+
+	return connector;
+}
+
+/* Simple connector->get_hw_state implementation for encoders that support only
+ * one connector and no cloning and hence the encoder state determines the state
+ * of the connector. */
+bool intel_connector_get_hw_state(struct intel_connector *connector)
+{
+	enum pipe pipe = 0;
+	struct intel_encoder *encoder = connector->encoder;
+
+	return encoder->get_hw_state(encoder, &pipe);
+}
+
+static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
+{
+	if (crtc_state->base.enable && crtc_state->has_pch_encoder)
+		return crtc_state->fdi_lanes;
+
+	return 0;
+}
+
+static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
+				     struct intel_crtc_state *pipe_config)
+{
+	struct drm_atomic_state *state = pipe_config->base.state;
+	struct intel_crtc *other_crtc;
+	struct intel_crtc_state *other_crtc_state;
+
+	DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
+		      pipe_name(pipe), pipe_config->fdi_lanes);
+	if (pipe_config->fdi_lanes > 4) {
+		DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
+			      pipe_name(pipe), pipe_config->fdi_lanes);
+		return -EINVAL;
+	}
+
+	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+		if (pipe_config->fdi_lanes > 2) {
+			DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
+				      pipe_config->fdi_lanes);
+			return -EINVAL;
+		} else {
+			return 0;
+		}
+	}
+
+	if (INTEL_INFO(dev)->num_pipes == 2)
+		return 0;
+
+	/* Ivybridge 3 pipe is really complicated */
+	switch (pipe) {
+	case PIPE_A:
+		return 0;
+	case PIPE_B:
+		if (pipe_config->fdi_lanes <= 2)
+			return 0;
+
+		other_crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, PIPE_C));
+		other_crtc_state =
+			intel_atomic_get_crtc_state(state, other_crtc);
+		if (IS_ERR(other_crtc_state))
+			return PTR_ERR(other_crtc_state);
+
+		if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
+			DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
+				      pipe_name(pipe), pipe_config->fdi_lanes);
+			return -EINVAL;
+		}
+		return 0;
+	case PIPE_C:
+		if (pipe_config->fdi_lanes > 2) {
+			DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n",
+				      pipe_name(pipe), pipe_config->fdi_lanes);
+			return -EINVAL;
+		}
+
+		other_crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, PIPE_B));
+		other_crtc_state =
+			intel_atomic_get_crtc_state(state, other_crtc);
+		if (IS_ERR(other_crtc_state))
+			return PTR_ERR(other_crtc_state);
+
+		if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
+			DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
+			return -EINVAL;
+		}
+		return 0;
+	default:
+		BUG();
+	}
+}
+
+#define RETRY 1
+static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
+				       struct intel_crtc_state *pipe_config)
+{
+	struct drm_device *dev = intel_crtc->base.dev;
+	const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+	int lane, link_bw, fdi_dotclock, ret;
+	bool needs_recompute = false;
+
+retry:
+	/* FDI is a binary signal running at ~2.7GHz, encoding
+	 * each output octet as 10 bits. The actual frequency
+	 * is stored as a divider into a 100MHz clock, and the
+	 * mode pixel clock is stored in units of 1KHz.
+	 * Hence the bw of each lane in terms of the mode signal
+	 * is:
+	 */
+	link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
+
+	fdi_dotclock = adjusted_mode->crtc_clock;
+
+	lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
+					   pipe_config->pipe_bpp);
+
+	pipe_config->fdi_lanes = lane;
+
+	intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
+			       link_bw, &pipe_config->fdi_m_n);
+
+	ret = ironlake_check_fdi_lanes(intel_crtc->base.dev,
+				       intel_crtc->pipe, pipe_config);
+	if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
+		pipe_config->pipe_bpp -= 2*3;
+		DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
+			      pipe_config->pipe_bpp);
+		needs_recompute = true;
+		pipe_config->bw_constrained = true;
+
+		goto retry;
+	}
+
+	if (needs_recompute)
+		return RETRY;
+
+	return ret;
+}
+
+static bool pipe_config_supports_ips(struct drm_i915_private *dev_priv,
+				     struct intel_crtc_state *pipe_config)
+{
+	if (pipe_config->pipe_bpp > 24)
+		return false;
+
+	/* HSW can handle pixel rate up to cdclk? */
+	if (IS_HASWELL(dev_priv->dev))
+		return true;
+
+	/*
+	 * We compare against max which means we must take
+	 * the increased cdclk requirement into account when
+	 * calculating the new cdclk.
+	 *
+	 * Should measure whether using a lower cdclk w/o IPS
+	 */
+	return ilk_pipe_pixel_rate(pipe_config) <=
+		dev_priv->max_cdclk_freq * 95 / 100;
+}
+
+static void hsw_compute_ips_config(struct intel_crtc *crtc,
+				   struct intel_crtc_state *pipe_config)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	pipe_config->ips_enabled = i915.enable_ips &&
+		hsw_crtc_supports_ips(crtc) &&
+		pipe_config_supports_ips(dev_priv, pipe_config);
+}
+
+static int intel_crtc_compute_config(struct intel_crtc *crtc,
+				     struct intel_crtc_state *pipe_config)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+
+	/* FIXME should check pixel clock limits on all platforms */
+	if (INTEL_INFO(dev)->gen < 4) {
+		int clock_limit = dev_priv->max_cdclk_freq;
+
+		/*
+		 * Enable pixel doubling when the dot clock
+		 * is > 90% of the (display) core speed.
+		 *
+		 * GDG double wide on either pipe,
+		 * otherwise pipe A only.
+		 */
+		if ((crtc->pipe == PIPE_A || IS_I915G(dev)) &&
+		    adjusted_mode->crtc_clock > clock_limit * 9 / 10) {
+			clock_limit *= 2;
+			pipe_config->double_wide = true;
+		}
+
+		if (adjusted_mode->crtc_clock > clock_limit * 9 / 10)
+			return -EINVAL;
+	}
+
+	/*
+	 * Pipe horizontal size must be even in:
+	 * - DVO ganged mode
+	 * - LVDS dual channel mode
+	 * - Double wide pipe
+	 */
+	if ((intel_pipe_will_have_type(pipe_config, INTEL_OUTPUT_LVDS) &&
+	     intel_is_dual_link_lvds(dev)) || pipe_config->double_wide)
+		pipe_config->pipe_src_w &= ~1;
+
+	/* Cantiga+ cannot handle modes with a hsync front porch of 0.
+	 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
+	 */
+	if ((INTEL_INFO(dev)->gen > 4 || IS_G4X(dev)) &&
+		adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay)
+		return -EINVAL;
+
+	if (HAS_IPS(dev))
+		hsw_compute_ips_config(crtc, pipe_config);
+
+	if (pipe_config->has_pch_encoder)
+		return ironlake_fdi_compute_config(crtc, pipe_config);
+
+	return 0;
+}
+
+static int skylake_get_display_clock_speed(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	uint32_t lcpll1 = I915_READ(LCPLL1_CTL);
+	uint32_t cdctl = I915_READ(CDCLK_CTL);
+	uint32_t linkrate;
+
+	if (!(lcpll1 & LCPLL_PLL_ENABLE))
+		return 24000; /* 24MHz is the cd freq with NSSC ref */
+
+	if ((cdctl & CDCLK_FREQ_SEL_MASK) == CDCLK_FREQ_540)
+		return 540000;
+
+	linkrate = (I915_READ(DPLL_CTRL1) &
+		    DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)) >> 1;
+
+	if (linkrate == DPLL_CTRL1_LINK_RATE_2160 ||
+	    linkrate == DPLL_CTRL1_LINK_RATE_1080) {
+		/* vco 8640 */
+		switch (cdctl & CDCLK_FREQ_SEL_MASK) {
+		case CDCLK_FREQ_450_432:
+			return 432000;
+		case CDCLK_FREQ_337_308:
+			return 308570;
+		case CDCLK_FREQ_675_617:
+			return 617140;
+		default:
+			WARN(1, "Unknown cd freq selection\n");
+		}
+	} else {
+		/* vco 8100 */
+		switch (cdctl & CDCLK_FREQ_SEL_MASK) {
+		case CDCLK_FREQ_450_432:
+			return 450000;
+		case CDCLK_FREQ_337_308:
+			return 337500;
+		case CDCLK_FREQ_675_617:
+			return 675000;
+		default:
+			WARN(1, "Unknown cd freq selection\n");
+		}
+	}
+
+	/* error case, do as if DPLL0 isn't enabled */
+	return 24000;
+}
+
+static int broxton_get_display_clock_speed(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	uint32_t cdctl = I915_READ(CDCLK_CTL);
+	uint32_t pll_ratio = I915_READ(BXT_DE_PLL_CTL) & BXT_DE_PLL_RATIO_MASK;
+	uint32_t pll_enab = I915_READ(BXT_DE_PLL_ENABLE);
+	int cdclk;
+
+	if (!(pll_enab & BXT_DE_PLL_PLL_ENABLE))
+		return 19200;
+
+	cdclk = 19200 * pll_ratio / 2;
+
+	switch (cdctl & BXT_CDCLK_CD2X_DIV_SEL_MASK) {
+	case BXT_CDCLK_CD2X_DIV_SEL_1:
+		return cdclk;  /* 576MHz or 624MHz */
+	case BXT_CDCLK_CD2X_DIV_SEL_1_5:
+		return cdclk * 2 / 3; /* 384MHz */
+	case BXT_CDCLK_CD2X_DIV_SEL_2:
+		return cdclk / 2; /* 288MHz */
+	case BXT_CDCLK_CD2X_DIV_SEL_4:
+		return cdclk / 4; /* 144MHz */
+	}
+
+	/* error case, do as if DE PLL isn't enabled */
+	return 19200;
+}
+
+static int broadwell_get_display_clock_speed(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t lcpll = I915_READ(LCPLL_CTL);
+	uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
+
+	if (lcpll & LCPLL_CD_SOURCE_FCLK)
+		return 800000;
+	else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
+		return 450000;
+	else if (freq == LCPLL_CLK_FREQ_450)
+		return 450000;
+	else if (freq == LCPLL_CLK_FREQ_54O_BDW)
+		return 540000;
+	else if (freq == LCPLL_CLK_FREQ_337_5_BDW)
+		return 337500;
+	else
+		return 675000;
+}
+
+static int haswell_get_display_clock_speed(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t lcpll = I915_READ(LCPLL_CTL);
+	uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
+
+	if (lcpll & LCPLL_CD_SOURCE_FCLK)
+		return 800000;
+	else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
+		return 450000;
+	else if (freq == LCPLL_CLK_FREQ_450)
+		return 450000;
+	else if (IS_HSW_ULT(dev))
+		return 337500;
+	else
+		return 540000;
+}
+
+static int valleyview_get_display_clock_speed(struct drm_device *dev)
+{
+	return vlv_get_cck_clock_hpll(to_i915(dev), "cdclk",
+				      CCK_DISPLAY_CLOCK_CONTROL);
+}
+
+static int ilk_get_display_clock_speed(struct drm_device *dev)
+{
+	return 450000;
+}
+
+static int i945_get_display_clock_speed(struct drm_device *dev)
+{
+	return 400000;
+}
+
+static int i915_get_display_clock_speed(struct drm_device *dev)
+{
+	return 333333;
+}
+
+static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
+{
+	return 200000;
+}
+
+static int pnv_get_display_clock_speed(struct drm_device *dev)
+{
+	u16 gcfgc = 0;
+
+	pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
+
+	switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
+	case GC_DISPLAY_CLOCK_267_MHZ_PNV:
+		return 266667;
+	case GC_DISPLAY_CLOCK_333_MHZ_PNV:
+		return 333333;
+	case GC_DISPLAY_CLOCK_444_MHZ_PNV:
+		return 444444;
+	case GC_DISPLAY_CLOCK_200_MHZ_PNV:
+		return 200000;
+	default:
+		DRM_ERROR("Unknown pnv display core clock 0x%04x\n", gcfgc);
+	case GC_DISPLAY_CLOCK_133_MHZ_PNV:
+		return 133333;
+	case GC_DISPLAY_CLOCK_167_MHZ_PNV:
+		return 166667;
+	}
+}
+
+static int i915gm_get_display_clock_speed(struct drm_device *dev)
+{
+	u16 gcfgc = 0;
+
+	pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
+
+	if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
+		return 133333;
+	else {
+		switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
+		case GC_DISPLAY_CLOCK_333_MHZ:
+			return 333333;
+		default:
+		case GC_DISPLAY_CLOCK_190_200_MHZ:
+			return 190000;
+		}
+	}
+}
+
+static int i865_get_display_clock_speed(struct drm_device *dev)
+{
+	return 266667;
+}
+
+static int i85x_get_display_clock_speed(struct drm_device *dev)
+{
+	u16 hpllcc = 0;
+
+	/*
+	 * 852GM/852GMV only supports 133 MHz and the HPLLCC
+	 * encoding is different :(
+	 * FIXME is this the right way to detect 852GM/852GMV?
+	 */
+	if (dev->pdev->revision == 0x1)
+		return 133333;
+
+	pci_bus_read_config_word(dev->pdev->bus,
+				 PCI_DEVFN(0, 3), HPLLCC, &hpllcc);
+
+	/* Assume that the hardware is in the high speed state.  This
+	 * should be the default.
+	 */
+	switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
+	case GC_CLOCK_133_200:
+	case GC_CLOCK_133_200_2:
+	case GC_CLOCK_100_200:
+		return 200000;
+	case GC_CLOCK_166_250:
+		return 250000;
+	case GC_CLOCK_100_133:
+		return 133333;
+	case GC_CLOCK_133_266:
+	case GC_CLOCK_133_266_2:
+	case GC_CLOCK_166_266:
+		return 266667;
+	}
+
+	/* Shouldn't happen */
+	return 0;
+}
+
+static int i830_get_display_clock_speed(struct drm_device *dev)
+{
+	return 133333;
+}
+
+static unsigned int intel_hpll_vco(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	static const unsigned int blb_vco[8] = {
+		[0] = 3200000,
+		[1] = 4000000,
+		[2] = 5333333,
+		[3] = 4800000,
+		[4] = 6400000,
+	};
+	static const unsigned int pnv_vco[8] = {
+		[0] = 3200000,
+		[1] = 4000000,
+		[2] = 5333333,
+		[3] = 4800000,
+		[4] = 2666667,
+	};
+	static const unsigned int cl_vco[8] = {
+		[0] = 3200000,
+		[1] = 4000000,
+		[2] = 5333333,
+		[3] = 6400000,
+		[4] = 3333333,
+		[5] = 3566667,
+		[6] = 4266667,
+	};
+	static const unsigned int elk_vco[8] = {
+		[0] = 3200000,
+		[1] = 4000000,
+		[2] = 5333333,
+		[3] = 4800000,
+	};
+	static const unsigned int ctg_vco[8] = {
+		[0] = 3200000,
+		[1] = 4000000,
+		[2] = 5333333,
+		[3] = 6400000,
+		[4] = 2666667,
+		[5] = 4266667,
+	};
+	const unsigned int *vco_table;
+	unsigned int vco;
+	uint8_t tmp = 0;
+
+	/* FIXME other chipsets? */
+	if (IS_GM45(dev))
+		vco_table = ctg_vco;
+	else if (IS_G4X(dev))
+		vco_table = elk_vco;
+	else if (IS_CRESTLINE(dev))
+		vco_table = cl_vco;
+	else if (IS_PINEVIEW(dev))
+		vco_table = pnv_vco;
+	else if (IS_G33(dev))
+		vco_table = blb_vco;
+	else
+		return 0;
+
+	tmp = I915_READ(IS_MOBILE(dev) ? HPLLVCO_MOBILE : HPLLVCO);
+
+	vco = vco_table[tmp & 0x7];
+	if (vco == 0)
+		DRM_ERROR("Bad HPLL VCO (HPLLVCO=0x%02x)\n", tmp);
+	else
+		DRM_DEBUG_KMS("HPLL VCO %u kHz\n", vco);
+
+	return vco;
+}
+
+static int gm45_get_display_clock_speed(struct drm_device *dev)
+{
+	unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
+	uint16_t tmp = 0;
+
+	pci_read_config_word(dev->pdev, GCFGC, &tmp);
+
+	cdclk_sel = (tmp >> 12) & 0x1;
+
+	switch (vco) {
+	case 2666667:
+	case 4000000:
+	case 5333333:
+		return cdclk_sel ? 333333 : 222222;
+	case 3200000:
+		return cdclk_sel ? 320000 : 228571;
+	default:
+		DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u, CFGC=0x%04x\n", vco, tmp);
+		return 222222;
+	}
+}
+
+static int i965gm_get_display_clock_speed(struct drm_device *dev)
+{
+	static const uint8_t div_3200[] = { 16, 10,  8 };
+	static const uint8_t div_4000[] = { 20, 12, 10 };
+	static const uint8_t div_5333[] = { 24, 16, 14 };
+	const uint8_t *div_table;
+	unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
+	uint16_t tmp = 0;
+
+	pci_read_config_word(dev->pdev, GCFGC, &tmp);
+
+	cdclk_sel = ((tmp >> 8) & 0x1f) - 1;
+
+	if (cdclk_sel >= ARRAY_SIZE(div_3200))
+		goto fail;
+
+	switch (vco) {
+	case 3200000:
+		div_table = div_3200;
+		break;
+	case 4000000:
+		div_table = div_4000;
+		break;
+	case 5333333:
+		div_table = div_5333;
+		break;
+	default:
+		goto fail;
+	}
+
+	return DIV_ROUND_CLOSEST(vco, div_table[cdclk_sel]);
+
+fail:
+	DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%04x\n", vco, tmp);
+	return 200000;
+}
+
+static int g33_get_display_clock_speed(struct drm_device *dev)
+{
+	static const uint8_t div_3200[] = { 12, 10,  8,  7, 5, 16 };
+	static const uint8_t div_4000[] = { 14, 12, 10,  8, 6, 20 };
+	static const uint8_t div_4800[] = { 20, 14, 12, 10, 8, 24 };
+	static const uint8_t div_5333[] = { 20, 16, 12, 12, 8, 28 };
+	const uint8_t *div_table;
+	unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
+	uint16_t tmp = 0;
+
+	pci_read_config_word(dev->pdev, GCFGC, &tmp);
+
+	cdclk_sel = (tmp >> 4) & 0x7;
+
+	if (cdclk_sel >= ARRAY_SIZE(div_3200))
+		goto fail;
+
+	switch (vco) {
+	case 3200000:
+		div_table = div_3200;
+		break;
+	case 4000000:
+		div_table = div_4000;
+		break;
+	case 4800000:
+		div_table = div_4800;
+		break;
+	case 5333333:
+		div_table = div_5333;
+		break;
+	default:
+		goto fail;
+	}
+
+	return DIV_ROUND_CLOSEST(vco, div_table[cdclk_sel]);
+
+fail:
+	DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%08x\n", vco, tmp);
+	return 190476;
+}
+
+static void
+intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den)
+{
+	while (*num > DATA_LINK_M_N_MASK ||
+	       *den > DATA_LINK_M_N_MASK) {
+		*num >>= 1;
+		*den >>= 1;
+	}
+}
+
+static void compute_m_n(unsigned int m, unsigned int n,
+			uint32_t *ret_m, uint32_t *ret_n)
+{
+	*ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
+	*ret_m = div_u64((uint64_t) m * *ret_n, n);
+	intel_reduce_m_n_ratio(ret_m, ret_n);
+}
+
+void
+intel_link_compute_m_n(int bits_per_pixel, int nlanes,
+		       int pixel_clock, int link_clock,
+		       struct intel_link_m_n *m_n)
+{
+	m_n->tu = 64;
+
+	compute_m_n(bits_per_pixel * pixel_clock,
+		    link_clock * nlanes * 8,
+		    &m_n->gmch_m, &m_n->gmch_n);
+
+	compute_m_n(pixel_clock, link_clock,
+		    &m_n->link_m, &m_n->link_n);
+}
+
+static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
+{
+	if (i915.panel_use_ssc >= 0)
+		return i915.panel_use_ssc != 0;
+	return dev_priv->vbt.lvds_use_ssc
+		&& !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
+}
+
+static int i9xx_get_refclk(const struct intel_crtc_state *crtc_state,
+			   int num_connectors)
+{
+	struct drm_device *dev = crtc_state->base.crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int refclk;
+
+	WARN_ON(!crtc_state->base.state);
+
+	if (IS_VALLEYVIEW(dev) || IS_BROXTON(dev)) {
+		refclk = 100000;
+	} else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
+	    intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
+		refclk = dev_priv->vbt.lvds_ssc_freq;
+		DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
+	} else if (!IS_GEN2(dev)) {
+		refclk = 96000;
+	} else {
+		refclk = 48000;
+	}
+
+	return refclk;
+}
+
+static uint32_t pnv_dpll_compute_fp(struct dpll *dpll)
+{
+	return (1 << dpll->n) << 16 | dpll->m2;
+}
+
+static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll)
+{
+	return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
+}
+
+static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
+				     struct intel_crtc_state *crtc_state,
+				     intel_clock_t *reduced_clock)
+{
+	struct drm_device *dev = crtc->base.dev;
+	u32 fp, fp2 = 0;
+
+	if (IS_PINEVIEW(dev)) {
+		fp = pnv_dpll_compute_fp(&crtc_state->dpll);
+		if (reduced_clock)
+			fp2 = pnv_dpll_compute_fp(reduced_clock);
+	} else {
+		fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
+		if (reduced_clock)
+			fp2 = i9xx_dpll_compute_fp(reduced_clock);
+	}
+
+	crtc_state->dpll_hw_state.fp0 = fp;
+
+	crtc->lowfreq_avail = false;
+	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
+	    reduced_clock) {
+		crtc_state->dpll_hw_state.fp1 = fp2;
+		crtc->lowfreq_avail = true;
+	} else {
+		crtc_state->dpll_hw_state.fp1 = fp;
+	}
+}
+
+static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
+		pipe)
+{
+	u32 reg_val;
+
+	/*
+	 * PLLB opamp always calibrates to max value of 0x3f, force enable it
+	 * and set it to a reasonable value instead.
+	 */
+	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
+	reg_val &= 0xffffff00;
+	reg_val |= 0x00000030;
+	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
+
+	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
+	reg_val &= 0x8cffffff;
+	reg_val = 0x8c000000;
+	vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
+
+	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
+	reg_val &= 0xffffff00;
+	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
+
+	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
+	reg_val &= 0x00ffffff;
+	reg_val |= 0xb0000000;
+	vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
+}
+
+static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
+					 struct intel_link_m_n *m_n)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int pipe = crtc->pipe;
+
+	I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
+	I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
+	I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m);
+	I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
+}
+
+static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
+					 struct intel_link_m_n *m_n,
+					 struct intel_link_m_n *m2_n2)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int pipe = crtc->pipe;
+	enum transcoder transcoder = crtc->config->cpu_transcoder;
+
+	if (INTEL_INFO(dev)->gen >= 5) {
+		I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
+		I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
+		I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
+		I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
+		/* M2_N2 registers to be set only for gen < 8 (M2_N2 available
+		 * for gen < 8) and if DRRS is supported (to make sure the
+		 * registers are not unnecessarily accessed).
+		 */
+		if (m2_n2 && (IS_CHERRYVIEW(dev) || INTEL_INFO(dev)->gen < 8) &&
+			crtc->config->has_drrs) {
+			I915_WRITE(PIPE_DATA_M2(transcoder),
+					TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
+			I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
+			I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m);
+			I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n);
+		}
+	} else {
+		I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
+		I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
+		I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m);
+		I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n);
+	}
+}
+
+void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n)
+{
+	struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
+
+	if (m_n == M1_N1) {
+		dp_m_n = &crtc->config->dp_m_n;
+		dp_m2_n2 = &crtc->config->dp_m2_n2;
+	} else if (m_n == M2_N2) {
+
+		/*
+		 * M2_N2 registers are not supported. Hence m2_n2 divider value
+		 * needs to be programmed into M1_N1.
+		 */
+		dp_m_n = &crtc->config->dp_m2_n2;
+	} else {
+		DRM_ERROR("Unsupported divider value\n");
+		return;
+	}
+
+	if (crtc->config->has_pch_encoder)
+		intel_pch_transcoder_set_m_n(crtc, &crtc->config->dp_m_n);
+	else
+		intel_cpu_transcoder_set_m_n(crtc, dp_m_n, dp_m2_n2);
+}
+
+static void vlv_compute_dpll(struct intel_crtc *crtc,
+			     struct intel_crtc_state *pipe_config)
+{
+	u32 dpll, dpll_md;
+
+	/*
+	 * Enable DPIO clock input. We should never disable the reference
+	 * clock for pipe B, since VGA hotplug / manual detection depends
+	 * on it.
+	 */
+	dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REF_CLK_ENABLE_VLV |
+		DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_REF_CLK_VLV;
+	/* We should never disable this, set it here for state tracking */
+	if (crtc->pipe == PIPE_B)
+		dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
+	dpll |= DPLL_VCO_ENABLE;
+	pipe_config->dpll_hw_state.dpll = dpll;
+
+	dpll_md = (pipe_config->pixel_multiplier - 1)
+		<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
+	pipe_config->dpll_hw_state.dpll_md = dpll_md;
+}
+
+static void vlv_prepare_pll(struct intel_crtc *crtc,
+			    const struct intel_crtc_state *pipe_config)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int pipe = crtc->pipe;
+	u32 mdiv;
+	u32 bestn, bestm1, bestm2, bestp1, bestp2;
+	u32 coreclk, reg_val;
+
+	mutex_lock(&dev_priv->sb_lock);
+
+	bestn = pipe_config->dpll.n;
+	bestm1 = pipe_config->dpll.m1;
+	bestm2 = pipe_config->dpll.m2;
+	bestp1 = pipe_config->dpll.p1;
+	bestp2 = pipe_config->dpll.p2;
+
+	/* See eDP HDMI DPIO driver vbios notes doc */
+
+	/* PLL B needs special handling */
+	if (pipe == PIPE_B)
+		vlv_pllb_recal_opamp(dev_priv, pipe);
+
+	/* Set up Tx target for periodic Rcomp update */
+	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
+
+	/* Disable target IRef on PLL */
+	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
+	reg_val &= 0x00ffffff;
+	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
+
+	/* Disable fast lock */
+	vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
+
+	/* Set idtafcrecal before PLL is enabled */
+	mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
+	mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
+	mdiv |= ((bestn << DPIO_N_SHIFT));
+	mdiv |= (1 << DPIO_K_SHIFT);
+
+	/*
+	 * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
+	 * but we don't support that).
+	 * Note: don't use the DAC post divider as it seems unstable.
+	 */
+	mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
+	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
+
+	mdiv |= DPIO_ENABLE_CALIBRATION;
+	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
+
+	/* Set HBR and RBR LPF coefficients */
+	if (pipe_config->port_clock == 162000 ||
+	    intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG) ||
+	    intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
+		vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
+				 0x009f0003);
+	else
+		vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
+				 0x00d0000f);
+
+	if (pipe_config->has_dp_encoder) {
+		/* Use SSC source */
+		if (pipe == PIPE_A)
+			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
+					 0x0df40000);
+		else
+			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
+					 0x0df70000);
+	} else { /* HDMI or VGA */
+		/* Use bend source */
+		if (pipe == PIPE_A)
+			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
+					 0x0df70000);
+		else
+			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
+					 0x0df40000);
+	}
+
+	coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
+	coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
+	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
+	    intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
+		coreclk |= 0x01000000;
+	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
+
+	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
+	mutex_unlock(&dev_priv->sb_lock);
+}
+
+static void chv_compute_dpll(struct intel_crtc *crtc,
+			     struct intel_crtc_state *pipe_config)
+{
+	pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
+		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS |
+		DPLL_VCO_ENABLE;
+	if (crtc->pipe != PIPE_A)
+		pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
+
+	pipe_config->dpll_hw_state.dpll_md =
+		(pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
+}
+
+static void chv_prepare_pll(struct intel_crtc *crtc,
+			    const struct intel_crtc_state *pipe_config)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int pipe = crtc->pipe;
+	int dpll_reg = DPLL(crtc->pipe);
+	enum dpio_channel port = vlv_pipe_to_channel(pipe);
+	u32 loopfilter, tribuf_calcntr;
+	u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
+	u32 dpio_val;
+	int vco;
+
+	bestn = pipe_config->dpll.n;
+	bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
+	bestm1 = pipe_config->dpll.m1;
+	bestm2 = pipe_config->dpll.m2 >> 22;
+	bestp1 = pipe_config->dpll.p1;
+	bestp2 = pipe_config->dpll.p2;
+	vco = pipe_config->dpll.vco;
+	dpio_val = 0;
+	loopfilter = 0;
+
+	/*
+	 * Enable Refclk and SSC
+	 */
+	I915_WRITE(dpll_reg,
+		   pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
+
+	mutex_lock(&dev_priv->sb_lock);
+
+	/* p1 and p2 divider */
+	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
+			5 << DPIO_CHV_S1_DIV_SHIFT |
+			bestp1 << DPIO_CHV_P1_DIV_SHIFT |
+			bestp2 << DPIO_CHV_P2_DIV_SHIFT |
+			1 << DPIO_CHV_K_DIV_SHIFT);
+
+	/* Feedback post-divider - m2 */
+	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
+
+	/* Feedback refclk divider - n and m1 */
+	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
+			DPIO_CHV_M1_DIV_BY_2 |
+			1 << DPIO_CHV_N_DIV_SHIFT);
+
+	/* M2 fraction division */
+	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
+
+	/* M2 fraction division enable */
+	dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
+	dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
+	dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
+	if (bestm2_frac)
+		dpio_val |= DPIO_CHV_FRAC_DIV_EN;
+	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val);
+
+	/* Program digital lock detect threshold */
+	dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port));
+	dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
+					DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
+	dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
+	if (!bestm2_frac)
+		dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
+	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val);
+
+	/* Loop filter */
+	if (vco == 5400000) {
+		loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT);
+		loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT);
+		loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT);
+		tribuf_calcntr = 0x9;
+	} else if (vco <= 6200000) {
+		loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT);
+		loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT);
+		loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
+		tribuf_calcntr = 0x9;
+	} else if (vco <= 6480000) {
+		loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
+		loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
+		loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
+		tribuf_calcntr = 0x8;
+	} else {
+		/* Not supported. Apply the same limits as in the max case */
+		loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
+		loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
+		loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
+		tribuf_calcntr = 0;
+	}
+	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
+
+	dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port));
+	dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
+	dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
+	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val);
+
+	/* AFC Recal */
+	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
+			vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
+			DPIO_AFC_RECAL);
+
+	mutex_unlock(&dev_priv->sb_lock);
+}
+
+/**
+ * vlv_force_pll_on - forcibly enable just the PLL
+ * @dev_priv: i915 private structure
+ * @pipe: pipe PLL to enable
+ * @dpll: PLL configuration
+ *
+ * Enable the PLL for @pipe using the supplied @dpll config. To be used
+ * in cases where we need the PLL enabled even when @pipe is not going to
+ * be enabled.
+ */
+void vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
+		      const struct dpll *dpll)
+{
+	struct intel_crtc *crtc =
+		to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
+	struct intel_crtc_state pipe_config = {
+		.base.crtc = &crtc->base,
+		.pixel_multiplier = 1,
+		.dpll = *dpll,
+	};
+
+	if (IS_CHERRYVIEW(dev)) {
+		chv_compute_dpll(crtc, &pipe_config);
+		chv_prepare_pll(crtc, &pipe_config);
+		chv_enable_pll(crtc, &pipe_config);
+	} else {
+		vlv_compute_dpll(crtc, &pipe_config);
+		vlv_prepare_pll(crtc, &pipe_config);
+		vlv_enable_pll(crtc, &pipe_config);
+	}
+}
+
+/**
+ * vlv_force_pll_off - forcibly disable just the PLL
+ * @dev_priv: i915 private structure
+ * @pipe: pipe PLL to disable
+ *
+ * Disable the PLL for @pipe. To be used in cases where we need
+ * the PLL enabled even when @pipe is not going to be enabled.
+ */
+void vlv_force_pll_off(struct drm_device *dev, enum pipe pipe)
+{
+	if (IS_CHERRYVIEW(dev))
+		chv_disable_pll(to_i915(dev), pipe);
+	else
+		vlv_disable_pll(to_i915(dev), pipe);
+}
+
+static void i9xx_compute_dpll(struct intel_crtc *crtc,
+			      struct intel_crtc_state *crtc_state,
+			      intel_clock_t *reduced_clock,
+			      int num_connectors)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 dpll;
+	bool is_sdvo;
+	struct dpll *clock = &crtc_state->dpll;
+
+	i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
+
+	is_sdvo = intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO) ||
+		intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI);
+
+	dpll = DPLL_VGA_MODE_DIS;
+
+	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
+		dpll |= DPLLB_MODE_LVDS;
+	else
+		dpll |= DPLLB_MODE_DAC_SERIAL;
+
+	if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
+		dpll |= (crtc_state->pixel_multiplier - 1)
+			<< SDVO_MULTIPLIER_SHIFT_HIRES;
+	}
+
+	if (is_sdvo)
+		dpll |= DPLL_SDVO_HIGH_SPEED;
+
+	if (crtc_state->has_dp_encoder)
+		dpll |= DPLL_SDVO_HIGH_SPEED;
+
+	/* compute bitmask from p1 value */
+	if (IS_PINEVIEW(dev))
+		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
+	else {
+		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+		if (IS_G4X(dev) && reduced_clock)
+			dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
+	}
+	switch (clock->p2) {
+	case 5:
+		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
+		break;
+	case 7:
+		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
+		break;
+	case 10:
+		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
+		break;
+	case 14:
+		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
+		break;
+	}
+	if (INTEL_INFO(dev)->gen >= 4)
+		dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
+
+	if (crtc_state->sdvo_tv_clock)
+		dpll |= PLL_REF_INPUT_TVCLKINBC;
+	else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
+		 intel_panel_use_ssc(dev_priv) && num_connectors < 2)
+		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
+	else
+		dpll |= PLL_REF_INPUT_DREFCLK;
+
+	dpll |= DPLL_VCO_ENABLE;
+	crtc_state->dpll_hw_state.dpll = dpll;
+
+	if (INTEL_INFO(dev)->gen >= 4) {
+		u32 dpll_md = (crtc_state->pixel_multiplier - 1)
+			<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
+		crtc_state->dpll_hw_state.dpll_md = dpll_md;
+	}
+}
+
+static void i8xx_compute_dpll(struct intel_crtc *crtc,
+			      struct intel_crtc_state *crtc_state,
+			      intel_clock_t *reduced_clock,
+			      int num_connectors)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 dpll;
+	struct dpll *clock = &crtc_state->dpll;
+
+	i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
+
+	dpll = DPLL_VGA_MODE_DIS;
+
+	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+	} else {
+		if (clock->p1 == 2)
+			dpll |= PLL_P1_DIVIDE_BY_TWO;
+		else
+			dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+		if (clock->p2 == 4)
+			dpll |= PLL_P2_DIVIDE_BY_4;
+	}
+
+	if (!IS_I830(dev) && intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO))
+		dpll |= DPLL_DVO_2X_MODE;
+
+	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
+		 intel_panel_use_ssc(dev_priv) && num_connectors < 2)
+		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
+	else
+		dpll |= PLL_REF_INPUT_DREFCLK;
+
+	dpll |= DPLL_VCO_ENABLE;
+	crtc_state->dpll_hw_state.dpll = dpll;
+}
+
+static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
+{
+	struct drm_device *dev = intel_crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum pipe pipe = intel_crtc->pipe;
+	enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
+	const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
+	uint32_t crtc_vtotal, crtc_vblank_end;
+	int vsyncshift = 0;
+
+	/* We need to be careful not to changed the adjusted mode, for otherwise
+	 * the hw state checker will get angry at the mismatch. */
+	crtc_vtotal = adjusted_mode->crtc_vtotal;
+	crtc_vblank_end = adjusted_mode->crtc_vblank_end;
+
+	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
+		/* the chip adds 2 halflines automatically */
+		crtc_vtotal -= 1;
+		crtc_vblank_end -= 1;
+
+		if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
+			vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
+		else
+			vsyncshift = adjusted_mode->crtc_hsync_start -
+				adjusted_mode->crtc_htotal / 2;
+		if (vsyncshift < 0)
+			vsyncshift += adjusted_mode->crtc_htotal;
+	}
+
+	if (INTEL_INFO(dev)->gen > 3)
+		I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
+
+	I915_WRITE(HTOTAL(cpu_transcoder),
+		   (adjusted_mode->crtc_hdisplay - 1) |
+		   ((adjusted_mode->crtc_htotal - 1) << 16));
+	I915_WRITE(HBLANK(cpu_transcoder),
+		   (adjusted_mode->crtc_hblank_start - 1) |
+		   ((adjusted_mode->crtc_hblank_end - 1) << 16));
+	I915_WRITE(HSYNC(cpu_transcoder),
+		   (adjusted_mode->crtc_hsync_start - 1) |
+		   ((adjusted_mode->crtc_hsync_end - 1) << 16));
+
+	I915_WRITE(VTOTAL(cpu_transcoder),
+		   (adjusted_mode->crtc_vdisplay - 1) |
+		   ((crtc_vtotal - 1) << 16));
+	I915_WRITE(VBLANK(cpu_transcoder),
+		   (adjusted_mode->crtc_vblank_start - 1) |
+		   ((crtc_vblank_end - 1) << 16));
+	I915_WRITE(VSYNC(cpu_transcoder),
+		   (adjusted_mode->crtc_vsync_start - 1) |
+		   ((adjusted_mode->crtc_vsync_end - 1) << 16));
+
+	/* Workaround: when the EDP input selection is B, the VTOTAL_B must be
+	 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
+	 * documented on the DDI_FUNC_CTL register description, EDP Input Select
+	 * bits. */
+	if (IS_HASWELL(dev) && cpu_transcoder == TRANSCODER_EDP &&
+	    (pipe == PIPE_B || pipe == PIPE_C))
+		I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
+
+	/* pipesrc controls the size that is scaled from, which should
+	 * always be the user's requested size.
+	 */
+	I915_WRITE(PIPESRC(pipe),
+		   ((intel_crtc->config->pipe_src_w - 1) << 16) |
+		   (intel_crtc->config->pipe_src_h - 1));
+}
+
+static void intel_get_pipe_timings(struct intel_crtc *crtc,
+				   struct intel_crtc_state *pipe_config)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
+	uint32_t tmp;
+
+	tmp = I915_READ(HTOTAL(cpu_transcoder));
+	pipe_config->base.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
+	pipe_config->base.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
+	tmp = I915_READ(HBLANK(cpu_transcoder));
+	pipe_config->base.adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1;
+	pipe_config->base.adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1;
+	tmp = I915_READ(HSYNC(cpu_transcoder));
+	pipe_config->base.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
+	pipe_config->base.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
+
+	tmp = I915_READ(VTOTAL(cpu_transcoder));
+	pipe_config->base.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
+	pipe_config->base.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
+	tmp = I915_READ(VBLANK(cpu_transcoder));
+	pipe_config->base.adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1;
+	pipe_config->base.adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1;
+	tmp = I915_READ(VSYNC(cpu_transcoder));
+	pipe_config->base.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
+	pipe_config->base.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
+
+	if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) {
+		pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
+		pipe_config->base.adjusted_mode.crtc_vtotal += 1;
+		pipe_config->base.adjusted_mode.crtc_vblank_end += 1;
+	}
+
+	tmp = I915_READ(PIPESRC(crtc->pipe));
+	pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
+	pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
+
+	pipe_config->base.mode.vdisplay = pipe_config->pipe_src_h;
+	pipe_config->base.mode.hdisplay = pipe_config->pipe_src_w;
+}
+
+void intel_mode_from_pipe_config(struct drm_display_mode *mode,
+				 struct intel_crtc_state *pipe_config)
+{
+	mode->hdisplay = pipe_config->base.adjusted_mode.crtc_hdisplay;
+	mode->htotal = pipe_config->base.adjusted_mode.crtc_htotal;
+	mode->hsync_start = pipe_config->base.adjusted_mode.crtc_hsync_start;
+	mode->hsync_end = pipe_config->base.adjusted_mode.crtc_hsync_end;
+
+	mode->vdisplay = pipe_config->base.adjusted_mode.crtc_vdisplay;
+	mode->vtotal = pipe_config->base.adjusted_mode.crtc_vtotal;
+	mode->vsync_start = pipe_config->base.adjusted_mode.crtc_vsync_start;
+	mode->vsync_end = pipe_config->base.adjusted_mode.crtc_vsync_end;
+
+	mode->flags = pipe_config->base.adjusted_mode.flags;
+	mode->type = DRM_MODE_TYPE_DRIVER;
+
+	mode->clock = pipe_config->base.adjusted_mode.crtc_clock;
+	mode->flags |= pipe_config->base.adjusted_mode.flags;
+
+	mode->hsync = drm_mode_hsync(mode);
+	mode->vrefresh = drm_mode_vrefresh(mode);
+	drm_mode_set_name(mode);
+}
+
+static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
+{
+	struct drm_device *dev = intel_crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t pipeconf;
+
+	pipeconf = 0;
+
+	if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
+	    (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
+		pipeconf |= I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE;
+
+	if (intel_crtc->config->double_wide)
+		pipeconf |= PIPECONF_DOUBLE_WIDE;
+
+	/* only g4x and later have fancy bpc/dither controls */
+	if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
+		/* Bspec claims that we can't use dithering for 30bpp pipes. */
+		if (intel_crtc->config->dither && intel_crtc->config->pipe_bpp != 30)
+			pipeconf |= PIPECONF_DITHER_EN |
+				    PIPECONF_DITHER_TYPE_SP;
+
+		switch (intel_crtc->config->pipe_bpp) {
+		case 18:
+			pipeconf |= PIPECONF_6BPC;
+			break;
+		case 24:
+			pipeconf |= PIPECONF_8BPC;
+			break;
+		case 30:
+			pipeconf |= PIPECONF_10BPC;
+			break;
+		default:
+			/* Case prevented by intel_choose_pipe_bpp_dither. */
+			BUG();
+		}
+	}
+
+	if (HAS_PIPE_CXSR(dev)) {
+		if (intel_crtc->lowfreq_avail) {
+			DRM_DEBUG_KMS("enabling CxSR downclocking\n");
+			pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
+		} else {
+			DRM_DEBUG_KMS("disabling CxSR downclocking\n");
+		}
+	}
+
+	if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
+		if (INTEL_INFO(dev)->gen < 4 ||
+		    intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
+			pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
+		else
+			pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
+	} else
+		pipeconf |= PIPECONF_PROGRESSIVE;
+
+	if (IS_VALLEYVIEW(dev) && intel_crtc->config->limited_color_range)
+		pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
+
+	I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf);
+	POSTING_READ(PIPECONF(intel_crtc->pipe));
+}
+
+static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
+				   struct intel_crtc_state *crtc_state)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int refclk, num_connectors = 0;
+	intel_clock_t clock;
+	bool ok;
+	bool is_dsi = false;
+	struct intel_encoder *encoder;
+	const intel_limit_t *limit;
+	struct drm_atomic_state *state = crtc_state->base.state;
+	struct drm_connector *connector;
+	struct drm_connector_state *connector_state;
+	int i;
+
+	memset(&crtc_state->dpll_hw_state, 0,
+	       sizeof(crtc_state->dpll_hw_state));
+
+	for_each_connector_in_state(state, connector, connector_state, i) {
+		if (connector_state->crtc != &crtc->base)
+			continue;
+
+		encoder = to_intel_encoder(connector_state->best_encoder);
+
+		switch (encoder->type) {
+		case INTEL_OUTPUT_DSI:
+			is_dsi = true;
+			break;
+		default:
+			break;
+		}
+
+		num_connectors++;
+	}
+
+	if (is_dsi)
+		return 0;
+
+	if (!crtc_state->clock_set) {
+		refclk = i9xx_get_refclk(crtc_state, num_connectors);
+
+		/*
+		 * Returns a set of divisors for the desired target clock with
+		 * the given refclk, or FALSE.  The returned values represent
+		 * the clock equation: reflck * (5 * (m1 + 2) + (m2 + 2)) / (n +
+		 * 2) / p1 / p2.
+		 */
+		limit = intel_limit(crtc_state, refclk);
+		ok = dev_priv->display.find_dpll(limit, crtc_state,
+						 crtc_state->port_clock,
+						 refclk, NULL, &clock);
+		if (!ok) {
+			DRM_ERROR("Couldn't find PLL settings for mode!\n");
+			return -EINVAL;
+		}
+
+		/* Compat-code for transition, will disappear. */
+		crtc_state->dpll.n = clock.n;
+		crtc_state->dpll.m1 = clock.m1;
+		crtc_state->dpll.m2 = clock.m2;
+		crtc_state->dpll.p1 = clock.p1;
+		crtc_state->dpll.p2 = clock.p2;
+	}
+
+	if (IS_GEN2(dev)) {
+		i8xx_compute_dpll(crtc, crtc_state, NULL,
+				  num_connectors);
+	} else if (IS_CHERRYVIEW(dev)) {
+		chv_compute_dpll(crtc, crtc_state);
+	} else if (IS_VALLEYVIEW(dev)) {
+		vlv_compute_dpll(crtc, crtc_state);
+	} else {
+		i9xx_compute_dpll(crtc, crtc_state, NULL,
+				  num_connectors);
+	}
+
+	return 0;
+}
+
+static void i9xx_get_pfit_config(struct intel_crtc *crtc,
+				 struct intel_crtc_state *pipe_config)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t tmp;
+
+	if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev)))
+		return;
+
+	tmp = I915_READ(PFIT_CONTROL);
+	if (!(tmp & PFIT_ENABLE))
+		return;
+
+	/* Check whether the pfit is attached to our pipe. */
+	if (INTEL_INFO(dev)->gen < 4) {
+		if (crtc->pipe != PIPE_B)
+			return;
+	} else {
+		if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
+			return;
+	}
+
+	pipe_config->gmch_pfit.control = tmp;
+	pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
+	if (INTEL_INFO(dev)->gen < 5)
+		pipe_config->gmch_pfit.lvds_border_bits =
+			I915_READ(LVDS) & LVDS_BORDER_ENABLE;
+}
+
+static void vlv_crtc_clock_get(struct intel_crtc *crtc,
+			       struct intel_crtc_state *pipe_config)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int pipe = pipe_config->cpu_transcoder;
+	intel_clock_t clock;
+	u32 mdiv;
+	int refclk = 100000;
+
+	/* In case of MIPI DPLL will not even be used */
+	if (!(pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE))
+		return;
+
+	mutex_lock(&dev_priv->sb_lock);
+	mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
+	mutex_unlock(&dev_priv->sb_lock);
+
+	clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
+	clock.m2 = mdiv & DPIO_M2DIV_MASK;
+	clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
+	clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
+	clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
+
+	pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
+}
+
+static void
+i9xx_get_initial_plane_config(struct intel_crtc *crtc,
+			      struct intel_initial_plane_config *plane_config)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 val, base, offset;
+	int pipe = crtc->pipe, plane = crtc->plane;
+	int fourcc, pixel_format;
+	unsigned int aligned_height;
+	struct drm_framebuffer *fb;
+	struct intel_framebuffer *intel_fb;
+
+	val = I915_READ(DSPCNTR(plane));
+	if (!(val & DISPLAY_PLANE_ENABLE))
+		return;
+
+	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
+	if (!intel_fb) {
+		DRM_DEBUG_KMS("failed to alloc fb\n");
+		return;
+	}
+
+	fb = &intel_fb->base;
+
+	if (INTEL_INFO(dev)->gen >= 4) {
+		if (val & DISPPLANE_TILED) {
+			plane_config->tiling = I915_TILING_X;
+			fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
+		}
+	}
+
+	pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
+	fourcc = i9xx_format_to_fourcc(pixel_format);
+	fb->pixel_format = fourcc;
+	fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
+
+	if (INTEL_INFO(dev)->gen >= 4) {
+		if (plane_config->tiling)
+			offset = I915_READ(DSPTILEOFF(plane));
+		else
+			offset = I915_READ(DSPLINOFF(plane));
+		base = I915_READ(DSPSURF(plane)) & 0xfffff000;
+	} else {
+		base = I915_READ(DSPADDR(plane));
+	}
+	plane_config->base = base;
+
+	val = I915_READ(PIPESRC(pipe));
+	fb->width = ((val >> 16) & 0xfff) + 1;
+	fb->height = ((val >> 0) & 0xfff) + 1;
+
+	val = I915_READ(DSPSTRIDE(pipe));
+	fb->pitches[0] = val & 0xffffffc0;
+
+	aligned_height = intel_fb_align_height(dev, fb->height,
+					       fb->pixel_format,
+					       fb->modifier[0]);
+
+	plane_config->size = fb->pitches[0] * aligned_height;
+
+	DRM_DEBUG_KMS("pipe/plane %c/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
+		      pipe_name(pipe), plane, fb->width, fb->height,
+		      fb->bits_per_pixel, base, fb->pitches[0],
+		      plane_config->size);
+
+	plane_config->fb = intel_fb;
+}
+
+static void chv_crtc_clock_get(struct intel_crtc *crtc,
+			       struct intel_crtc_state *pipe_config)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int pipe = pipe_config->cpu_transcoder;
+	enum dpio_channel port = vlv_pipe_to_channel(pipe);
+	intel_clock_t clock;
+	u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
+	int refclk = 100000;
+
+	mutex_lock(&dev_priv->sb_lock);
+	cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
+	pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
+	pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
+	pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
+	pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
+	mutex_unlock(&dev_priv->sb_lock);
+
+	clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
+	clock.m2 = (pll_dw0 & 0xff) << 22;
+	if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
+		clock.m2 |= pll_dw2 & 0x3fffff;
+	clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
+	clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
+	clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
+
+	pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
+}
+
+static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
+				 struct intel_crtc_state *pipe_config)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t tmp;
+
+	if (!intel_display_power_is_enabled(dev_priv,
+					    POWER_DOMAIN_PIPE(crtc->pipe)))
+		return false;
+
+	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
+	pipe_config->shared_dpll = DPLL_ID_PRIVATE;
+
+	tmp = I915_READ(PIPECONF(crtc->pipe));
+	if (!(tmp & PIPECONF_ENABLE))
+		return false;
+
+	if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
+		switch (tmp & PIPECONF_BPC_MASK) {
+		case PIPECONF_6BPC:
+			pipe_config->pipe_bpp = 18;
+			break;
+		case PIPECONF_8BPC:
+			pipe_config->pipe_bpp = 24;
+			break;
+		case PIPECONF_10BPC:
+			pipe_config->pipe_bpp = 30;
+			break;
+		default:
+			break;
+		}
+	}
+
+	if (IS_VALLEYVIEW(dev) && (tmp & PIPECONF_COLOR_RANGE_SELECT))
+		pipe_config->limited_color_range = true;
+
+	if (INTEL_INFO(dev)->gen < 4)
+		pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
+
+	intel_get_pipe_timings(crtc, pipe_config);
+
+	i9xx_get_pfit_config(crtc, pipe_config);
+
+	if (INTEL_INFO(dev)->gen >= 4) {
+		tmp = I915_READ(DPLL_MD(crtc->pipe));
+		pipe_config->pixel_multiplier =
+			((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
+			 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
+		pipe_config->dpll_hw_state.dpll_md = tmp;
+	} else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
+		tmp = I915_READ(DPLL(crtc->pipe));
+		pipe_config->pixel_multiplier =
+			((tmp & SDVO_MULTIPLIER_MASK)
+			 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
+	} else {
+		/* Note that on i915G/GM the pixel multiplier is in the sdvo
+		 * port and will be fixed up in the encoder->get_config
+		 * function. */
+		pipe_config->pixel_multiplier = 1;
+	}
+	pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
+	if (!IS_VALLEYVIEW(dev)) {
+		/*
+		 * DPLL_DVO_2X_MODE must be enabled for both DPLLs
+		 * on 830. Filter it out here so that we don't
+		 * report errors due to that.
+		 */
+		if (IS_I830(dev))
+			pipe_config->dpll_hw_state.dpll &= ~DPLL_DVO_2X_MODE;
+
+		pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
+		pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
+	} else {
+		/* Mask out read-only status bits. */
+		pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
+						     DPLL_PORTC_READY_MASK |
+						     DPLL_PORTB_READY_MASK);
+	}
+
+	if (IS_CHERRYVIEW(dev))
+		chv_crtc_clock_get(crtc, pipe_config);
+	else if (IS_VALLEYVIEW(dev))
+		vlv_crtc_clock_get(crtc, pipe_config);
+	else
+		i9xx_crtc_clock_get(crtc, pipe_config);
+
+	/*
+	 * Normally the dotclock is filled in by the encoder .get_config()
+	 * but in case the pipe is enabled w/o any ports we need a sane
+	 * default.
+	 */
+	pipe_config->base.adjusted_mode.crtc_clock =
+		pipe_config->port_clock / pipe_config->pixel_multiplier;
+
+	return true;
+}
+
+static void ironlake_init_pch_refclk(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_encoder *encoder;
+	int i;
+	u32 val, final;
+	bool has_lvds = false;
+	bool has_cpu_edp = false;
+	bool has_panel = false;
+	bool has_ck505 = false;
+	bool can_ssc = false;
+	bool using_ssc_source = false;
+
+	/* We need to take the global config into account */
+	for_each_intel_encoder(dev, encoder) {
+		switch (encoder->type) {
+		case INTEL_OUTPUT_LVDS:
+			has_panel = true;
+			has_lvds = true;
+			break;
+		case INTEL_OUTPUT_EDP:
+			has_panel = true;
+			if (enc_to_dig_port(&encoder->base)->port == PORT_A)
+				has_cpu_edp = true;
+			break;
+		default:
+			break;
+		}
+	}
+
+	if (HAS_PCH_IBX(dev)) {
+		has_ck505 = dev_priv->vbt.display_clock_mode;
+		can_ssc = has_ck505;
+	} else {
+		has_ck505 = false;
+		can_ssc = true;
+	}
+
+	/* Check if any DPLLs are using the SSC source */
+	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
+		u32 temp = I915_READ(PCH_DPLL(i));
+
+		if (!(temp & DPLL_VCO_ENABLE))
+			continue;
+
+		if ((temp & PLL_REF_INPUT_MASK) ==
+		    PLLB_REF_INPUT_SPREADSPECTRUMIN) {
+			using_ssc_source = true;
+			break;
+		}
+	}
+
+	DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
+		      has_panel, has_lvds, has_ck505, using_ssc_source);
+
+	/* Ironlake: try to setup display ref clock before DPLL
+	 * enabling. This is only under driver's control after
+	 * PCH B stepping, previous chipset stepping should be
+	 * ignoring this setting.
+	 */
+	val = I915_READ(PCH_DREF_CONTROL);
+
+	/* As we must carefully and slowly disable/enable each source in turn,
+	 * compute the final state we want first and check if we need to
+	 * make any changes at all.
+	 */
+	final = val;
+	final &= ~DREF_NONSPREAD_SOURCE_MASK;
+	if (has_ck505)
+		final |= DREF_NONSPREAD_CK505_ENABLE;
+	else
+		final |= DREF_NONSPREAD_SOURCE_ENABLE;
+
+	final &= ~DREF_SSC_SOURCE_MASK;
+	final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
+	final &= ~DREF_SSC1_ENABLE;
+
+	if (has_panel) {
+		final |= DREF_SSC_SOURCE_ENABLE;
+
+		if (intel_panel_use_ssc(dev_priv) && can_ssc)
+			final |= DREF_SSC1_ENABLE;
+
+		if (has_cpu_edp) {
+			if (intel_panel_use_ssc(dev_priv) && can_ssc)
+				final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
+			else
+				final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
+		} else
+			final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
+	} else if (using_ssc_source) {
+		final |= DREF_SSC_SOURCE_ENABLE;
+		final |= DREF_SSC1_ENABLE;
+	}
+
+	if (final == val)
+		return;
+
+	/* Always enable nonspread source */
+	val &= ~DREF_NONSPREAD_SOURCE_MASK;
+
+	if (has_ck505)
+		val |= DREF_NONSPREAD_CK505_ENABLE;
+	else
+		val |= DREF_NONSPREAD_SOURCE_ENABLE;
+
+	if (has_panel) {
+		val &= ~DREF_SSC_SOURCE_MASK;
+		val |= DREF_SSC_SOURCE_ENABLE;
+
+		/* SSC must be turned on before enabling the CPU output  */
+		if (intel_panel_use_ssc(dev_priv) && can_ssc) {
+			DRM_DEBUG_KMS("Using SSC on panel\n");
+			val |= DREF_SSC1_ENABLE;
+		} else
+			val &= ~DREF_SSC1_ENABLE;
+
+		/* Get SSC going before enabling the outputs */
+		I915_WRITE(PCH_DREF_CONTROL, val);
+		POSTING_READ(PCH_DREF_CONTROL);
+		udelay(200);
+
+		val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
+
+		/* Enable CPU source on CPU attached eDP */
+		if (has_cpu_edp) {
+			if (intel_panel_use_ssc(dev_priv) && can_ssc) {
+				DRM_DEBUG_KMS("Using SSC on eDP\n");
+				val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
+			} else
+				val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
+		} else
+			val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
+
+		I915_WRITE(PCH_DREF_CONTROL, val);
+		POSTING_READ(PCH_DREF_CONTROL);
+		udelay(200);
+	} else {
+		DRM_DEBUG_KMS("Disabling CPU source output\n");
+
+		val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
+
+		/* Turn off CPU output */
+		val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
+
+		I915_WRITE(PCH_DREF_CONTROL, val);
+		POSTING_READ(PCH_DREF_CONTROL);
+		udelay(200);
+
+		if (!using_ssc_source) {
+			DRM_DEBUG_KMS("Disabling SSC source\n");
+
+			/* Turn off the SSC source */
+			val &= ~DREF_SSC_SOURCE_MASK;
+			val |= DREF_SSC_SOURCE_DISABLE;
+
+			/* Turn off SSC1 */
+			val &= ~DREF_SSC1_ENABLE;
+
+			I915_WRITE(PCH_DREF_CONTROL, val);
+			POSTING_READ(PCH_DREF_CONTROL);
+			udelay(200);
+		}
+	}
+
+	BUG_ON(val != final);
+}
+
+static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
+{
+	uint32_t tmp;
+
+	tmp = I915_READ(SOUTH_CHICKEN2);
+	tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
+	I915_WRITE(SOUTH_CHICKEN2, tmp);
+
+	if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) &
+			       FDI_MPHY_IOSFSB_RESET_STATUS, 100))
+		DRM_ERROR("FDI mPHY reset assert timeout\n");
+
+	tmp = I915_READ(SOUTH_CHICKEN2);
+	tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
+	I915_WRITE(SOUTH_CHICKEN2, tmp);
+
+	if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) &
+				FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
+		DRM_ERROR("FDI mPHY reset de-assert timeout\n");
+}
+
+/* WaMPhyProgramming:hsw */
+static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
+{
+	uint32_t tmp;
+
+	tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
+	tmp &= ~(0xFF << 24);
+	tmp |= (0x12 << 24);
+	intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
+
+	tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
+	tmp |= (1 << 11);
+	intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
+
+	tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
+	tmp |= (1 << 11);
+	intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
+
+	tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
+	tmp |= (1 << 24) | (1 << 21) | (1 << 18);
+	intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
+
+	tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
+	tmp |= (1 << 24) | (1 << 21) | (1 << 18);
+	intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
+
+	tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
+	tmp &= ~(7 << 13);
+	tmp |= (5 << 13);
+	intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
+
+	tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
+	tmp &= ~(7 << 13);
+	tmp |= (5 << 13);
+	intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
+
+	tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
+	tmp &= ~0xFF;
+	tmp |= 0x1C;
+	intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
+
+	tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
+	tmp &= ~0xFF;
+	tmp |= 0x1C;
+	intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
+
+	tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
+	tmp &= ~(0xFF << 16);
+	tmp |= (0x1C << 16);
+	intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
+
+	tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
+	tmp &= ~(0xFF << 16);
+	tmp |= (0x1C << 16);
+	intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
+
+	tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
+	tmp |= (1 << 27);
+	intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
+
+	tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
+	tmp |= (1 << 27);
+	intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
+
+	tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
+	tmp &= ~(0xF << 28);
+	tmp |= (4 << 28);
+	intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
+
+	tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
+	tmp &= ~(0xF << 28);
+	tmp |= (4 << 28);
+	intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
+}
+
+/* Implements 3 different sequences from BSpec chapter "Display iCLK
+ * Programming" based on the parameters passed:
+ * - Sequence to enable CLKOUT_DP
+ * - Sequence to enable CLKOUT_DP without spread
+ * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
+ */
+static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread,
+				 bool with_fdi)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t reg, tmp;
+
+	if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
+		with_spread = true;
+	if (WARN(HAS_PCH_LPT_LP(dev) && with_fdi, "LP PCH doesn't have FDI\n"))
+		with_fdi = false;
+
+	mutex_lock(&dev_priv->sb_lock);
+
+	tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
+	tmp &= ~SBI_SSCCTL_DISABLE;
+	tmp |= SBI_SSCCTL_PATHALT;
+	intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
+
+	udelay(24);
+
+	if (with_spread) {
+		tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
+		tmp &= ~SBI_SSCCTL_PATHALT;
+		intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
+
+		if (with_fdi) {
+			lpt_reset_fdi_mphy(dev_priv);
+			lpt_program_fdi_mphy(dev_priv);
+		}
+	}
+
+	reg = HAS_PCH_LPT_LP(dev) ? SBI_GEN0 : SBI_DBUFF0;
+	tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
+	tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
+	intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
+
+	mutex_unlock(&dev_priv->sb_lock);
+}
+
+/* Sequence to disable CLKOUT_DP */
+static void lpt_disable_clkout_dp(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t reg, tmp;
+
+	mutex_lock(&dev_priv->sb_lock);
+
+	reg = HAS_PCH_LPT_LP(dev) ? SBI_GEN0 : SBI_DBUFF0;
+	tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
+	tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
+	intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
+
+	tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
+	if (!(tmp & SBI_SSCCTL_DISABLE)) {
+		if (!(tmp & SBI_SSCCTL_PATHALT)) {
+			tmp |= SBI_SSCCTL_PATHALT;
+			intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
+			udelay(32);
+		}
+		tmp |= SBI_SSCCTL_DISABLE;
+		intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
+	}
+
+	mutex_unlock(&dev_priv->sb_lock);
+}
+
+static void lpt_init_pch_refclk(struct drm_device *dev)
+{
+	struct intel_encoder *encoder;
+	bool has_vga = false;
+
+	for_each_intel_encoder(dev, encoder) {
+		switch (encoder->type) {
+		case INTEL_OUTPUT_ANALOG:
+			has_vga = true;
+			break;
+		default:
+			break;
+		}
+	}
+
+	if (has_vga)
+		lpt_enable_clkout_dp(dev, true, true);
+	else
+		lpt_disable_clkout_dp(dev);
+}
+
+/*
+ * Initialize reference clocks when the driver loads
+ */
+void intel_init_pch_refclk(struct drm_device *dev)
+{
+	if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
+		ironlake_init_pch_refclk(dev);
+	else if (HAS_PCH_LPT(dev))
+		lpt_init_pch_refclk(dev);
+}
+
+static int ironlake_get_refclk(struct intel_crtc_state *crtc_state)
+{
+	struct drm_device *dev = crtc_state->base.crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_atomic_state *state = crtc_state->base.state;
+	struct drm_connector *connector;
+	struct drm_connector_state *connector_state;
+	struct intel_encoder *encoder;
+	int num_connectors = 0, i;
+	bool is_lvds = false;
+
+	for_each_connector_in_state(state, connector, connector_state, i) {
+		if (connector_state->crtc != crtc_state->base.crtc)
+			continue;
+
+		encoder = to_intel_encoder(connector_state->best_encoder);
+
+		switch (encoder->type) {
+		case INTEL_OUTPUT_LVDS:
+			is_lvds = true;
+			break;
+		default:
+			break;
+		}
+		num_connectors++;
+	}
+
+	if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
+		DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
+			      dev_priv->vbt.lvds_ssc_freq);
+		return dev_priv->vbt.lvds_ssc_freq;
+	}
+
+	return 120000;
+}
+
+static void ironlake_set_pipeconf(struct drm_crtc *crtc)
+{
+	struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	int pipe = intel_crtc->pipe;
+	uint32_t val;
+
+	val = 0;
+
+	switch (intel_crtc->config->pipe_bpp) {
+	case 18:
+		val |= PIPECONF_6BPC;
+		break;
+	case 24:
+		val |= PIPECONF_8BPC;
+		break;
+	case 30:
+		val |= PIPECONF_10BPC;
+		break;
+	case 36:
+		val |= PIPECONF_12BPC;
+		break;
+	default:
+		/* Case prevented by intel_choose_pipe_bpp_dither. */
+		BUG();
+	}
+
+	if (intel_crtc->config->dither)
+		val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
+
+	if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
+		val |= PIPECONF_INTERLACED_ILK;
+	else
+		val |= PIPECONF_PROGRESSIVE;
+
+	if (intel_crtc->config->limited_color_range)
+		val |= PIPECONF_COLOR_RANGE_SELECT;
+
+	I915_WRITE(PIPECONF(pipe), val);
+	POSTING_READ(PIPECONF(pipe));
+}
+
+/*
+ * Set up the pipe CSC unit.
+ *
+ * Currently only full range RGB to limited range RGB conversion
+ * is supported, but eventually this should handle various
+ * RGB<->YCbCr scenarios as well.
+ */
+static void intel_set_pipe_csc(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	int pipe = intel_crtc->pipe;
+	uint16_t coeff = 0x7800; /* 1.0 */
+
+	/*
+	 * TODO: Check what kind of values actually come out of the pipe
+	 * with these coeff/postoff values and adjust to get the best
+	 * accuracy. Perhaps we even need to take the bpc value into
+	 * consideration.
+	 */
+
+	if (intel_crtc->config->limited_color_range)
+		coeff = ((235 - 16) * (1 << 12) / 255) & 0xff8; /* 0.xxx... */
+
+	/*
+	 * GY/GU and RY/RU should be the other way around according
+	 * to BSpec, but reality doesn't agree. Just set them up in
+	 * a way that results in the correct picture.
+	 */
+	I915_WRITE(PIPE_CSC_COEFF_RY_GY(pipe), coeff << 16);
+	I915_WRITE(PIPE_CSC_COEFF_BY(pipe), 0);
+
+	I915_WRITE(PIPE_CSC_COEFF_RU_GU(pipe), coeff);
+	I915_WRITE(PIPE_CSC_COEFF_BU(pipe), 0);
+
+	I915_WRITE(PIPE_CSC_COEFF_RV_GV(pipe), 0);
+	I915_WRITE(PIPE_CSC_COEFF_BV(pipe), coeff << 16);
+
+	I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), 0);
+	I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), 0);
+	I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), 0);
+
+	if (INTEL_INFO(dev)->gen > 6) {
+		uint16_t postoff = 0;
+
+		if (intel_crtc->config->limited_color_range)
+			postoff = (16 * (1 << 12) / 255) & 0x1fff;
+
+		I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff);
+		I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff);
+		I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), postoff);
+
+		I915_WRITE(PIPE_CSC_MODE(pipe), 0);
+	} else {
+		uint32_t mode = CSC_MODE_YUV_TO_RGB;
+
+		if (intel_crtc->config->limited_color_range)
+			mode |= CSC_BLACK_SCREEN_OFFSET;
+
+		I915_WRITE(PIPE_CSC_MODE(pipe), mode);
+	}
+}
+
+static void haswell_set_pipeconf(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	enum pipe pipe = intel_crtc->pipe;
+	enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
+	uint32_t val;
+
+	val = 0;
+
+	if (IS_HASWELL(dev) && intel_crtc->config->dither)
+		val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
+
+	if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
+		val |= PIPECONF_INTERLACED_ILK;
+	else
+		val |= PIPECONF_PROGRESSIVE;
+
+	I915_WRITE(PIPECONF(cpu_transcoder), val);
+	POSTING_READ(PIPECONF(cpu_transcoder));
+
+	I915_WRITE(GAMMA_MODE(intel_crtc->pipe), GAMMA_MODE_MODE_8BIT);
+	POSTING_READ(GAMMA_MODE(intel_crtc->pipe));
+
+	if (IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
+		val = 0;
+
+		switch (intel_crtc->config->pipe_bpp) {
+		case 18:
+			val |= PIPEMISC_DITHER_6_BPC;
+			break;
+		case 24:
+			val |= PIPEMISC_DITHER_8_BPC;
+			break;
+		case 30:
+			val |= PIPEMISC_DITHER_10_BPC;
+			break;
+		case 36:
+			val |= PIPEMISC_DITHER_12_BPC;
+			break;
+		default:
+			/* Case prevented by pipe_config_set_bpp. */
+			BUG();
+		}
+
+		if (intel_crtc->config->dither)
+			val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
+
+		I915_WRITE(PIPEMISC(pipe), val);
+	}
+}
+
+static bool ironlake_compute_clocks(struct drm_crtc *crtc,
+				    struct intel_crtc_state *crtc_state,
+				    intel_clock_t *clock,
+				    bool *has_reduced_clock,
+				    intel_clock_t *reduced_clock)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int refclk;
+	const intel_limit_t *limit;
+	bool ret;
+
+	refclk = ironlake_get_refclk(crtc_state);
+
+	/*
+	 * Returns a set of divisors for the desired target clock with the given
+	 * refclk, or FALSE.  The returned values represent the clock equation:
+	 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
+	 */
+	limit = intel_limit(crtc_state, refclk);
+	ret = dev_priv->display.find_dpll(limit, crtc_state,
+					  crtc_state->port_clock,
+					  refclk, NULL, clock);
+	if (!ret)
+		return false;
+
+	return true;
+}
+
+int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
+{
+	/*
+	 * Account for spread spectrum to avoid
+	 * oversubscribing the link. Max center spread
+	 * is 2.5%; use 5% for safety's sake.
+	 */
+	u32 bps = target_clock * bpp * 21 / 20;
+	return DIV_ROUND_UP(bps, link_bw * 8);
+}
+
+static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
+{
+	return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
+}
+
+static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
+				      struct intel_crtc_state *crtc_state,
+				      u32 *fp,
+				      intel_clock_t *reduced_clock, u32 *fp2)
+{
+	struct drm_crtc *crtc = &intel_crtc->base;
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_atomic_state *state = crtc_state->base.state;
+	struct drm_connector *connector;
+	struct drm_connector_state *connector_state;
+	struct intel_encoder *encoder;
+	uint32_t dpll;
+	int factor, num_connectors = 0, i;
+	bool is_lvds = false, is_sdvo = false;
+
+	for_each_connector_in_state(state, connector, connector_state, i) {
+		if (connector_state->crtc != crtc_state->base.crtc)
+			continue;
+
+		encoder = to_intel_encoder(connector_state->best_encoder);
+
+		switch (encoder->type) {
+		case INTEL_OUTPUT_LVDS:
+			is_lvds = true;
+			break;
+		case INTEL_OUTPUT_SDVO:
+		case INTEL_OUTPUT_HDMI:
+			is_sdvo = true;
+			break;
+		default:
+			break;
+		}
+
+		num_connectors++;
+	}
+
+	/* Enable autotuning of the PLL clock (if permissible) */
+	factor = 21;
+	if (is_lvds) {
+		if ((intel_panel_use_ssc(dev_priv) &&
+		     dev_priv->vbt.lvds_ssc_freq == 100000) ||
+		    (HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev)))
+			factor = 25;
+	} else if (crtc_state->sdvo_tv_clock)
+		factor = 20;
+
+	if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor))
+		*fp |= FP_CB_TUNE;
+
+	if (fp2 && (reduced_clock->m < factor * reduced_clock->n))
+		*fp2 |= FP_CB_TUNE;
+
+	dpll = 0;
+
+	if (is_lvds)
+		dpll |= DPLLB_MODE_LVDS;
+	else
+		dpll |= DPLLB_MODE_DAC_SERIAL;
+
+	dpll |= (crtc_state->pixel_multiplier - 1)
+		<< PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
+
+	if (is_sdvo)
+		dpll |= DPLL_SDVO_HIGH_SPEED;
+	if (crtc_state->has_dp_encoder)
+		dpll |= DPLL_SDVO_HIGH_SPEED;
+
+	/* compute bitmask from p1 value */
+	dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+	/* also FPA1 */
+	dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
+
+	switch (crtc_state->dpll.p2) {
+	case 5:
+		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
+		break;
+	case 7:
+		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
+		break;
+	case 10:
+		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
+		break;
+	case 14:
+		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
+		break;
+	}
+
+	if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
+		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
+	else
+		dpll |= PLL_REF_INPUT_DREFCLK;
+
+	return dpll | DPLL_VCO_ENABLE;
+}
+
+static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
+				       struct intel_crtc_state *crtc_state)
+{
+	struct drm_device *dev = crtc->base.dev;
+	intel_clock_t clock, reduced_clock;
+	u32 dpll = 0, fp = 0, fp2 = 0;
+	bool ok, has_reduced_clock = false;
+	bool is_lvds = false;
+	struct intel_shared_dpll *pll;
+
+	memset(&crtc_state->dpll_hw_state, 0,
+	       sizeof(crtc_state->dpll_hw_state));
+
+	is_lvds = intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS);
+
+	WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)),
+	     "Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev));
+
+	ok = ironlake_compute_clocks(&crtc->base, crtc_state, &clock,
+				     &has_reduced_clock, &reduced_clock);
+	if (!ok && !crtc_state->clock_set) {
+		DRM_ERROR("Couldn't find PLL settings for mode!\n");
+		return -EINVAL;
+	}
+	/* Compat-code for transition, will disappear. */
+	if (!crtc_state->clock_set) {
+		crtc_state->dpll.n = clock.n;
+		crtc_state->dpll.m1 = clock.m1;
+		crtc_state->dpll.m2 = clock.m2;
+		crtc_state->dpll.p1 = clock.p1;
+		crtc_state->dpll.p2 = clock.p2;
+	}
+
+	/* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
+	if (crtc_state->has_pch_encoder) {
+		fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
+		if (has_reduced_clock)
+			fp2 = i9xx_dpll_compute_fp(&reduced_clock);
+
+		dpll = ironlake_compute_dpll(crtc, crtc_state,
+					     &fp, &reduced_clock,
+					     has_reduced_clock ? &fp2 : NULL);
+
+		crtc_state->dpll_hw_state.dpll = dpll;
+		crtc_state->dpll_hw_state.fp0 = fp;
+		if (has_reduced_clock)
+			crtc_state->dpll_hw_state.fp1 = fp2;
+		else
+			crtc_state->dpll_hw_state.fp1 = fp;
+
+		pll = intel_get_shared_dpll(crtc, crtc_state);
+		if (pll == NULL) {
+			DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
+					 pipe_name(crtc->pipe));
+			return -EINVAL;
+		}
+	}
+
+	if (is_lvds && has_reduced_clock)
+		crtc->lowfreq_avail = true;
+	else
+		crtc->lowfreq_avail = false;
+
+	return 0;
+}
+
+static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
+					 struct intel_link_m_n *m_n)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum pipe pipe = crtc->pipe;
+
+	m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
+	m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe));
+	m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe))
+		& ~TU_SIZE_MASK;
+	m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe));
+	m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe))
+		    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
+}
+
+static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
+					 enum transcoder transcoder,
+					 struct intel_link_m_n *m_n,
+					 struct intel_link_m_n *m2_n2)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum pipe pipe = crtc->pipe;
+
+	if (INTEL_INFO(dev)->gen >= 5) {
+		m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
+		m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
+		m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
+			& ~TU_SIZE_MASK;
+		m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
+		m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
+			    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
+		/* Read M2_N2 registers only for gen < 8 (M2_N2 available for
+		 * gen < 8) and if DRRS is supported (to make sure the
+		 * registers are not unnecessarily read).
+		 */
+		if (m2_n2 && INTEL_INFO(dev)->gen < 8 &&
+			crtc->config->has_drrs) {
+			m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
+			m2_n2->link_n =	I915_READ(PIPE_LINK_N2(transcoder));
+			m2_n2->gmch_m =	I915_READ(PIPE_DATA_M2(transcoder))
+					& ~TU_SIZE_MASK;
+			m2_n2->gmch_n =	I915_READ(PIPE_DATA_N2(transcoder));
+			m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder))
+					& TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
+		}
+	} else {
+		m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
+		m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
+		m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe))
+			& ~TU_SIZE_MASK;
+		m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe));
+		m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe))
+			    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
+	}
+}
+
+void intel_dp_get_m_n(struct intel_crtc *crtc,
+		      struct intel_crtc_state *pipe_config)
+{
+	if (pipe_config->has_pch_encoder)
+		intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
+	else
+		intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
+					     &pipe_config->dp_m_n,
+					     &pipe_config->dp_m2_n2);
+}
+
+static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
+					struct intel_crtc_state *pipe_config)
+{
+	intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
+				     &pipe_config->fdi_m_n, NULL);
+}
+
+static void skylake_get_pfit_config(struct intel_crtc *crtc,
+				    struct intel_crtc_state *pipe_config)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state;
+	uint32_t ps_ctrl = 0;
+	int id = -1;
+	int i;
+
+	/* find scaler attached to this pipe */
+	for (i = 0; i < crtc->num_scalers; i++) {
+		ps_ctrl = I915_READ(SKL_PS_CTRL(crtc->pipe, i));
+		if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) {
+			id = i;
+			pipe_config->pch_pfit.enabled = true;
+			pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i));
+			pipe_config->pch_pfit.size = I915_READ(SKL_PS_WIN_SZ(crtc->pipe, i));
+			break;
+		}
+	}
+
+	scaler_state->scaler_id = id;
+	if (id >= 0) {
+		scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
+	} else {
+		scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
+	}
+}
+
+static void
+skylake_get_initial_plane_config(struct intel_crtc *crtc,
+				 struct intel_initial_plane_config *plane_config)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 val, base, offset, stride_mult, tiling;
+	int pipe = crtc->pipe;
+	int fourcc, pixel_format;
+	unsigned int aligned_height;
+	struct drm_framebuffer *fb;
+	struct intel_framebuffer *intel_fb;
+
+	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
+	if (!intel_fb) {
+		DRM_DEBUG_KMS("failed to alloc fb\n");
+		return;
+	}
+
+	fb = &intel_fb->base;
+
+	val = I915_READ(PLANE_CTL(pipe, 0));
+	if (!(val & PLANE_CTL_ENABLE))
+		goto error;
+
+	pixel_format = val & PLANE_CTL_FORMAT_MASK;
+	fourcc = skl_format_to_fourcc(pixel_format,
+				      val & PLANE_CTL_ORDER_RGBX,
+				      val & PLANE_CTL_ALPHA_MASK);
+	fb->pixel_format = fourcc;
+	fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
+
+	tiling = val & PLANE_CTL_TILED_MASK;
+	switch (tiling) {
+	case PLANE_CTL_TILED_LINEAR:
+		fb->modifier[0] = DRM_FORMAT_MOD_NONE;
+		break;
+	case PLANE_CTL_TILED_X:
+		plane_config->tiling = I915_TILING_X;
+		fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
+		break;
+	case PLANE_CTL_TILED_Y:
+		fb->modifier[0] = I915_FORMAT_MOD_Y_TILED;
+		break;
+	case PLANE_CTL_TILED_YF:
+		fb->modifier[0] = I915_FORMAT_MOD_Yf_TILED;
+		break;
+	default:
+		MISSING_CASE(tiling);
+		goto error;
+	}
+
+	base = I915_READ(PLANE_SURF(pipe, 0)) & 0xfffff000;
+	plane_config->base = base;
+
+	offset = I915_READ(PLANE_OFFSET(pipe, 0));
+
+	val = I915_READ(PLANE_SIZE(pipe, 0));
+	fb->height = ((val >> 16) & 0xfff) + 1;
+	fb->width = ((val >> 0) & 0x1fff) + 1;
+
+	val = I915_READ(PLANE_STRIDE(pipe, 0));
+	stride_mult = intel_fb_stride_alignment(dev, fb->modifier[0],
+						fb->pixel_format);
+	fb->pitches[0] = (val & 0x3ff) * stride_mult;
+
+	aligned_height = intel_fb_align_height(dev, fb->height,
+					       fb->pixel_format,
+					       fb->modifier[0]);
+
+	plane_config->size = fb->pitches[0] * aligned_height;
+
+	DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
+		      pipe_name(pipe), fb->width, fb->height,
+		      fb->bits_per_pixel, base, fb->pitches[0],
+		      plane_config->size);
+
+	plane_config->fb = intel_fb;
+	return;
+
+error:
+	kfree(fb);
+}
+
+static void ironlake_get_pfit_config(struct intel_crtc *crtc,
+				     struct intel_crtc_state *pipe_config)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t tmp;
+
+	tmp = I915_READ(PF_CTL(crtc->pipe));
+
+	if (tmp & PF_ENABLE) {
+		pipe_config->pch_pfit.enabled = true;
+		pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
+		pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
+
+		/* We currently do not free assignements of panel fitters on
+		 * ivb/hsw (since we don't use the higher upscaling modes which
+		 * differentiates them) so just WARN about this case for now. */
+		if (IS_GEN7(dev)) {
+			WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
+				PF_PIPE_SEL_IVB(crtc->pipe));
+		}
+	}
+}
+
+static void
+ironlake_get_initial_plane_config(struct intel_crtc *crtc,
+				  struct intel_initial_plane_config *plane_config)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 val, base, offset;
+	int pipe = crtc->pipe;
+	int fourcc, pixel_format;
+	unsigned int aligned_height;
+	struct drm_framebuffer *fb;
+	struct intel_framebuffer *intel_fb;
+
+	val = I915_READ(DSPCNTR(pipe));
+	if (!(val & DISPLAY_PLANE_ENABLE))
+		return;
+
+	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
+	if (!intel_fb) {
+		DRM_DEBUG_KMS("failed to alloc fb\n");
+		return;
+	}
+
+	fb = &intel_fb->base;
+
+	if (INTEL_INFO(dev)->gen >= 4) {
+		if (val & DISPPLANE_TILED) {
+			plane_config->tiling = I915_TILING_X;
+			fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
+		}
+	}
+
+	pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
+	fourcc = i9xx_format_to_fourcc(pixel_format);
+	fb->pixel_format = fourcc;
+	fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
+
+	base = I915_READ(DSPSURF(pipe)) & 0xfffff000;
+	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+		offset = I915_READ(DSPOFFSET(pipe));
+	} else {
+		if (plane_config->tiling)
+			offset = I915_READ(DSPTILEOFF(pipe));
+		else
+			offset = I915_READ(DSPLINOFF(pipe));
+	}
+	plane_config->base = base;
+
+	val = I915_READ(PIPESRC(pipe));
+	fb->width = ((val >> 16) & 0xfff) + 1;
+	fb->height = ((val >> 0) & 0xfff) + 1;
+
+	val = I915_READ(DSPSTRIDE(pipe));
+	fb->pitches[0] = val & 0xffffffc0;
+
+	aligned_height = intel_fb_align_height(dev, fb->height,
+					       fb->pixel_format,
+					       fb->modifier[0]);
+
+	plane_config->size = fb->pitches[0] * aligned_height;
+
+	DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
+		      pipe_name(pipe), fb->width, fb->height,
+		      fb->bits_per_pixel, base, fb->pitches[0],
+		      plane_config->size);
+
+	plane_config->fb = intel_fb;
+}
+
+static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
+				     struct intel_crtc_state *pipe_config)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t tmp;
+
+	if (!intel_display_power_is_enabled(dev_priv,
+					    POWER_DOMAIN_PIPE(crtc->pipe)))
+		return false;
+
+	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
+	pipe_config->shared_dpll = DPLL_ID_PRIVATE;
+
+	tmp = I915_READ(PIPECONF(crtc->pipe));
+	if (!(tmp & PIPECONF_ENABLE))
+		return false;
+
+	switch (tmp & PIPECONF_BPC_MASK) {
+	case PIPECONF_6BPC:
+		pipe_config->pipe_bpp = 18;
+		break;
+	case PIPECONF_8BPC:
+		pipe_config->pipe_bpp = 24;
+		break;
+	case PIPECONF_10BPC:
+		pipe_config->pipe_bpp = 30;
+		break;
+	case PIPECONF_12BPC:
+		pipe_config->pipe_bpp = 36;
+		break;
+	default:
+		break;
+	}
+
+	if (tmp & PIPECONF_COLOR_RANGE_SELECT)
+		pipe_config->limited_color_range = true;
+
+	if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
+		struct intel_shared_dpll *pll;
+
+		pipe_config->has_pch_encoder = true;
+
+		tmp = I915_READ(FDI_RX_CTL(crtc->pipe));
+		pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
+					  FDI_DP_PORT_WIDTH_SHIFT) + 1;
+
+		ironlake_get_fdi_m_n_config(crtc, pipe_config);
+
+		if (HAS_PCH_IBX(dev_priv->dev)) {
+			pipe_config->shared_dpll =
+				(enum intel_dpll_id) crtc->pipe;
+		} else {
+			tmp = I915_READ(PCH_DPLL_SEL);
+			if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
+				pipe_config->shared_dpll = DPLL_ID_PCH_PLL_B;
+			else
+				pipe_config->shared_dpll = DPLL_ID_PCH_PLL_A;
+		}
+
+		pll = &dev_priv->shared_dplls[pipe_config->shared_dpll];
+
+		WARN_ON(!pll->get_hw_state(dev_priv, pll,
+					   &pipe_config->dpll_hw_state));
+
+		tmp = pipe_config->dpll_hw_state.dpll;
+		pipe_config->pixel_multiplier =
+			((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
+			 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
+
+		ironlake_pch_clock_get(crtc, pipe_config);
+	} else {
+		pipe_config->pixel_multiplier = 1;
+	}
+
+	intel_get_pipe_timings(crtc, pipe_config);
+
+	ironlake_get_pfit_config(crtc, pipe_config);
+
+	return true;
+}
+
+static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
+{
+	struct drm_device *dev = dev_priv->dev;
+	struct intel_crtc *crtc;
+
+	for_each_intel_crtc(dev, crtc)
+		I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
+		     pipe_name(crtc->pipe));
+
+	I915_STATE_WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
+	I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n");
+	I915_STATE_WARN(I915_READ(WRPLL_CTL1) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n");
+	I915_STATE_WARN(I915_READ(WRPLL_CTL2) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n");
+	I915_STATE_WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n");
+	I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
+	     "CPU PWM1 enabled\n");
+	if (IS_HASWELL(dev))
+		I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
+		     "CPU PWM2 enabled\n");
+	I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
+	     "PCH PWM1 enabled\n");
+	I915_STATE_WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
+	     "Utility pin enabled\n");
+	I915_STATE_WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n");
+
+	/*
+	 * In theory we can still leave IRQs enabled, as long as only the HPD
+	 * interrupts remain enabled. We used to check for that, but since it's
+	 * gen-specific and since we only disable LCPLL after we fully disable
+	 * the interrupts, the check below should be enough.
+	 */
+	I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
+}
+
+static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
+{
+	struct drm_device *dev = dev_priv->dev;
+
+	if (IS_HASWELL(dev))
+		return I915_READ(D_COMP_HSW);
+	else
+		return I915_READ(D_COMP_BDW);
+}
+
+static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
+{
+	struct drm_device *dev = dev_priv->dev;
+
+	if (IS_HASWELL(dev)) {
+		mutex_lock(&dev_priv->rps.hw_lock);
+		if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP,
+					    val))
+			DRM_ERROR("Failed to write to D_COMP\n");
+		mutex_unlock(&dev_priv->rps.hw_lock);
+	} else {
+		I915_WRITE(D_COMP_BDW, val);
+		POSTING_READ(D_COMP_BDW);
+	}
+}
+
+/*
+ * This function implements pieces of two sequences from BSpec:
+ * - Sequence for display software to disable LCPLL
+ * - Sequence for display software to allow package C8+
+ * The steps implemented here are just the steps that actually touch the LCPLL
+ * register. Callers should take care of disabling all the display engine
+ * functions, doing the mode unset, fixing interrupts, etc.
+ */
+static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
+			      bool switch_to_fclk, bool allow_power_down)
+{
+	uint32_t val;
+
+	assert_can_disable_lcpll(dev_priv);
+
+	val = I915_READ(LCPLL_CTL);
+
+	if (switch_to_fclk) {
+		val |= LCPLL_CD_SOURCE_FCLK;
+		I915_WRITE(LCPLL_CTL, val);
+
+		if (wait_for_atomic_us(I915_READ(LCPLL_CTL) &
+				       LCPLL_CD_SOURCE_FCLK_DONE, 1))
+			DRM_ERROR("Switching to FCLK failed\n");
+
+		val = I915_READ(LCPLL_CTL);
+	}
+
+	val |= LCPLL_PLL_DISABLE;
+	I915_WRITE(LCPLL_CTL, val);
+	POSTING_READ(LCPLL_CTL);
+
+	if (wait_for((I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK) == 0, 1))
+		DRM_ERROR("LCPLL still locked\n");
+
+	val = hsw_read_dcomp(dev_priv);
+	val |= D_COMP_COMP_DISABLE;
+	hsw_write_dcomp(dev_priv, val);
+	ndelay(100);
+
+	if (wait_for((hsw_read_dcomp(dev_priv) & D_COMP_RCOMP_IN_PROGRESS) == 0,
+		     1))
+		DRM_ERROR("D_COMP RCOMP still in progress\n");
+
+	if (allow_power_down) {
+		val = I915_READ(LCPLL_CTL);
+		val |= LCPLL_POWER_DOWN_ALLOW;
+		I915_WRITE(LCPLL_CTL, val);
+		POSTING_READ(LCPLL_CTL);
+	}
+}
+
+/*
+ * Fully restores LCPLL, disallowing power down and switching back to LCPLL
+ * source.
+ */
+static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
+{
+	uint32_t val;
+
+	val = I915_READ(LCPLL_CTL);
+
+	if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
+		    LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
+		return;
+
+	/*
+	 * Make sure we're not on PC8 state before disabling PC8, otherwise
+	 * we'll hang the machine. To prevent PC8 state, just enable force_wake.
+	 */
+	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+
+	if (val & LCPLL_POWER_DOWN_ALLOW) {
+		val &= ~LCPLL_POWER_DOWN_ALLOW;
+		I915_WRITE(LCPLL_CTL, val);
+		POSTING_READ(LCPLL_CTL);
+	}
+
+	val = hsw_read_dcomp(dev_priv);
+	val |= D_COMP_COMP_FORCE;
+	val &= ~D_COMP_COMP_DISABLE;
+	hsw_write_dcomp(dev_priv, val);
+
+	val = I915_READ(LCPLL_CTL);
+	val &= ~LCPLL_PLL_DISABLE;
+	I915_WRITE(LCPLL_CTL, val);
+
+	if (wait_for(I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK, 5))
+		DRM_ERROR("LCPLL not locked yet\n");
+
+	if (val & LCPLL_CD_SOURCE_FCLK) {
+		val = I915_READ(LCPLL_CTL);
+		val &= ~LCPLL_CD_SOURCE_FCLK;
+		I915_WRITE(LCPLL_CTL, val);
+
+		if (wait_for_atomic_us((I915_READ(LCPLL_CTL) &
+					LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
+			DRM_ERROR("Switching back to LCPLL failed\n");
+	}
+
+	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+	intel_update_cdclk(dev_priv->dev);
+}
+
+/*
+ * Package states C8 and deeper are really deep PC states that can only be
+ * reached when all the devices on the system allow it, so even if the graphics
+ * device allows PC8+, it doesn't mean the system will actually get to these
+ * states. Our driver only allows PC8+ when going into runtime PM.
+ *
+ * The requirements for PC8+ are that all the outputs are disabled, the power
+ * well is disabled and most interrupts are disabled, and these are also
+ * requirements for runtime PM. When these conditions are met, we manually do
+ * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
+ * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
+ * hang the machine.
+ *
+ * When we really reach PC8 or deeper states (not just when we allow it) we lose
+ * the state of some registers, so when we come back from PC8+ we need to
+ * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
+ * need to take care of the registers kept by RC6. Notice that this happens even
+ * if we don't put the device in PCI D3 state (which is what currently happens
+ * because of the runtime PM support).
+ *
+ * For more, read "Display Sequences for Package C8" on the hardware
+ * documentation.
+ */
+void hsw_enable_pc8(struct drm_i915_private *dev_priv)
+{
+	struct drm_device *dev = dev_priv->dev;
+	uint32_t val;
+
+	DRM_DEBUG_KMS("Enabling package C8+\n");
+
+	if (HAS_PCH_LPT_LP(dev)) {
+		val = I915_READ(SOUTH_DSPCLK_GATE_D);
+		val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
+		I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
+	}
+
+	lpt_disable_clkout_dp(dev);
+	hsw_disable_lcpll(dev_priv, true, true);
+}
+
+void hsw_disable_pc8(struct drm_i915_private *dev_priv)
+{
+	struct drm_device *dev = dev_priv->dev;
+	uint32_t val;
+
+	DRM_DEBUG_KMS("Disabling package C8+\n");
+
+	hsw_restore_lcpll(dev_priv);
+	lpt_init_pch_refclk(dev);
+
+	if (HAS_PCH_LPT_LP(dev)) {
+		val = I915_READ(SOUTH_DSPCLK_GATE_D);
+		val |= PCH_LP_PARTITION_LEVEL_DISABLE;
+		I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
+	}
+
+	intel_prepare_ddi(dev);
+}
+
+static void broxton_modeset_commit_cdclk(struct drm_atomic_state *old_state)
+{
+	struct drm_device *dev = old_state->dev;
+	unsigned int req_cdclk = to_intel_atomic_state(old_state)->cdclk;
+
+	broxton_set_cdclk(dev, req_cdclk);
+}
+
+/* compute the max rate for new configuration */
+static int ilk_max_pixel_rate(struct drm_atomic_state *state)
+{
+	struct intel_crtc *intel_crtc;
+	struct intel_crtc_state *crtc_state;
+	int max_pixel_rate = 0;
+
+	for_each_intel_crtc(state->dev, intel_crtc) {
+		int pixel_rate;
+
+		crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
+		if (IS_ERR(crtc_state))
+			return PTR_ERR(crtc_state);
+
+		if (!crtc_state->base.enable)
+			continue;
+
+		pixel_rate = ilk_pipe_pixel_rate(crtc_state);
+
+		/* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
+		if (IS_BROADWELL(state->dev) && crtc_state->ips_enabled)
+			pixel_rate = DIV_ROUND_UP(pixel_rate * 100, 95);
+
+		max_pixel_rate = max(max_pixel_rate, pixel_rate);
+	}
+
+	return max_pixel_rate;
+}
+
+static void broadwell_set_cdclk(struct drm_device *dev, int cdclk)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t val, data;
+	int ret;
+
+	if (WARN((I915_READ(LCPLL_CTL) &
+		  (LCPLL_PLL_DISABLE | LCPLL_PLL_LOCK |
+		   LCPLL_CD_CLOCK_DISABLE | LCPLL_ROOT_CD_CLOCK_DISABLE |
+		   LCPLL_CD2X_CLOCK_DISABLE | LCPLL_POWER_DOWN_ALLOW |
+		   LCPLL_CD_SOURCE_FCLK)) != LCPLL_PLL_LOCK,
+		 "trying to change cdclk frequency with cdclk not enabled\n"))
+		return;
+
+	mutex_lock(&dev_priv->rps.hw_lock);
+	ret = sandybridge_pcode_write(dev_priv,
+				      BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0);
+	mutex_unlock(&dev_priv->rps.hw_lock);
+	if (ret) {
+		DRM_ERROR("failed to inform pcode about cdclk change\n");
+		return;
+	}
+
+	val = I915_READ(LCPLL_CTL);
+	val |= LCPLL_CD_SOURCE_FCLK;
+	I915_WRITE(LCPLL_CTL, val);
+
+	if (wait_for_atomic_us(I915_READ(LCPLL_CTL) &
+			       LCPLL_CD_SOURCE_FCLK_DONE, 1))
+		DRM_ERROR("Switching to FCLK failed\n");
+
+	val = I915_READ(LCPLL_CTL);
+	val &= ~LCPLL_CLK_FREQ_MASK;
+
+	switch (cdclk) {
+	case 450000:
+		val |= LCPLL_CLK_FREQ_450;
+		data = 0;
+		break;
+	case 540000:
+		val |= LCPLL_CLK_FREQ_54O_BDW;
+		data = 1;
+		break;
+	case 337500:
+		val |= LCPLL_CLK_FREQ_337_5_BDW;
+		data = 2;
+		break;
+	case 675000:
+		val |= LCPLL_CLK_FREQ_675_BDW;
+		data = 3;
+		break;
+	default:
+		WARN(1, "invalid cdclk frequency\n");
+		return;
+	}
+
+	I915_WRITE(LCPLL_CTL, val);
+
+	val = I915_READ(LCPLL_CTL);
+	val &= ~LCPLL_CD_SOURCE_FCLK;
+	I915_WRITE(LCPLL_CTL, val);
+
+	if (wait_for_atomic_us((I915_READ(LCPLL_CTL) &
+				LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
+		DRM_ERROR("Switching back to LCPLL failed\n");
+
+	mutex_lock(&dev_priv->rps.hw_lock);
+	sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, data);
+	mutex_unlock(&dev_priv->rps.hw_lock);
+
+	I915_WRITE(CDCLK_FREQ, DIV_ROUND_CLOSEST(cdclk, 1000) - 1);
+
+	intel_update_cdclk(dev);
+
+	WARN(cdclk != dev_priv->cdclk_freq,
+	     "cdclk requested %d kHz but got %d kHz\n",
+	     cdclk, dev_priv->cdclk_freq);
+}
+
+static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state)
+{
+	struct drm_i915_private *dev_priv = to_i915(state->dev);
+	int max_pixclk = ilk_max_pixel_rate(state);
+	int cdclk;
+
+	/*
+	 * FIXME should also account for plane ratio
+	 * once 64bpp pixel formats are supported.
+	 */
+	if (max_pixclk > 540000)
+		cdclk = 675000;
+	else if (max_pixclk > 450000)
+		cdclk = 540000;
+	else if (max_pixclk > 337500)
+		cdclk = 450000;
+	else
+		cdclk = 337500;
+
+	/*
+	 * FIXME move the cdclk caclulation to
+	 * compute_config() so we can fail gracegully.
+	 */
+	if (cdclk > dev_priv->max_cdclk_freq) {
+		DRM_ERROR("requested cdclk (%d kHz) exceeds max (%d kHz)\n",
+			  cdclk, dev_priv->max_cdclk_freq);
+		cdclk = dev_priv->max_cdclk_freq;
+	}
+
+	to_intel_atomic_state(state)->cdclk = cdclk;
+
+	return 0;
+}
+
+static void broadwell_modeset_commit_cdclk(struct drm_atomic_state *old_state)
+{
+	struct drm_device *dev = old_state->dev;
+	unsigned int req_cdclk = to_intel_atomic_state(old_state)->cdclk;
+
+	broadwell_set_cdclk(dev, req_cdclk);
+}
+
+static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
+				      struct intel_crtc_state *crtc_state)
+{
+	if (!intel_ddi_pll_select(crtc, crtc_state))
+		return -EINVAL;
+
+	crtc->lowfreq_avail = false;
+
+	return 0;
+}
+
+static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
+				enum port port,
+				struct intel_crtc_state *pipe_config)
+{
+	switch (port) {
+	case PORT_A:
+		pipe_config->ddi_pll_sel = SKL_DPLL0;
+		pipe_config->shared_dpll = DPLL_ID_SKL_DPLL1;
+		break;
+	case PORT_B:
+		pipe_config->ddi_pll_sel = SKL_DPLL1;
+		pipe_config->shared_dpll = DPLL_ID_SKL_DPLL2;
+		break;
+	case PORT_C:
+		pipe_config->ddi_pll_sel = SKL_DPLL2;
+		pipe_config->shared_dpll = DPLL_ID_SKL_DPLL3;
+		break;
+	default:
+		DRM_ERROR("Incorrect port type\n");
+	}
+}
+
+static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
+				enum port port,
+				struct intel_crtc_state *pipe_config)
+{
+	u32 temp, dpll_ctl1;
+
+	temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
+	pipe_config->ddi_pll_sel = temp >> (port * 3 + 1);
+
+	switch (pipe_config->ddi_pll_sel) {
+	case SKL_DPLL0:
+		/*
+		 * On SKL the eDP DPLL (DPLL0 as we don't use SSC) is not part
+		 * of the shared DPLL framework and thus needs to be read out
+		 * separately
+		 */
+		dpll_ctl1 = I915_READ(DPLL_CTRL1);
+		pipe_config->dpll_hw_state.ctrl1 = dpll_ctl1 & 0x3f;
+		break;
+	case SKL_DPLL1:
+		pipe_config->shared_dpll = DPLL_ID_SKL_DPLL1;
+		break;
+	case SKL_DPLL2:
+		pipe_config->shared_dpll = DPLL_ID_SKL_DPLL2;
+		break;
+	case SKL_DPLL3:
+		pipe_config->shared_dpll = DPLL_ID_SKL_DPLL3;
+		break;
+	}
+}
+
+static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
+				enum port port,
+				struct intel_crtc_state *pipe_config)
+{
+	pipe_config->ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
+
+	switch (pipe_config->ddi_pll_sel) {
+	case PORT_CLK_SEL_WRPLL1:
+		pipe_config->shared_dpll = DPLL_ID_WRPLL1;
+		break;
+	case PORT_CLK_SEL_WRPLL2:
+		pipe_config->shared_dpll = DPLL_ID_WRPLL2;
+		break;
+	case PORT_CLK_SEL_SPLL:
+		pipe_config->shared_dpll = DPLL_ID_SPLL;
+	}
+}
+
+static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
+				       struct intel_crtc_state *pipe_config)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_shared_dpll *pll;
+	enum port port;
+	uint32_t tmp;
+
+	tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
+
+	port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
+
+	if (IS_SKYLAKE(dev))
+		skylake_get_ddi_pll(dev_priv, port, pipe_config);
+	else if (IS_BROXTON(dev))
+		bxt_get_ddi_pll(dev_priv, port, pipe_config);
+	else
+		haswell_get_ddi_pll(dev_priv, port, pipe_config);
+
+	if (pipe_config->shared_dpll >= 0) {
+		pll = &dev_priv->shared_dplls[pipe_config->shared_dpll];
+
+		WARN_ON(!pll->get_hw_state(dev_priv, pll,
+					   &pipe_config->dpll_hw_state));
+	}
+
+	/*
+	 * Haswell has only FDI/PCH transcoder A. It is which is connected to
+	 * DDI E. So just check whether this pipe is wired to DDI E and whether
+	 * the PCH transcoder is on.
+	 */
+	if (INTEL_INFO(dev)->gen < 9 &&
+	    (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
+		pipe_config->has_pch_encoder = true;
+
+		tmp = I915_READ(FDI_RX_CTL(PIPE_A));
+		pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
+					  FDI_DP_PORT_WIDTH_SHIFT) + 1;
+
+		ironlake_get_fdi_m_n_config(crtc, pipe_config);
+	}
+}
+
+static bool haswell_get_pipe_config(struct intel_crtc *crtc,
+				    struct intel_crtc_state *pipe_config)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum intel_display_power_domain pfit_domain;
+	uint32_t tmp;
+
+	if (!intel_display_power_is_enabled(dev_priv,
+					 POWER_DOMAIN_PIPE(crtc->pipe)))
+		return false;
+
+	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
+	pipe_config->shared_dpll = DPLL_ID_PRIVATE;
+
+	tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
+	if (tmp & TRANS_DDI_FUNC_ENABLE) {
+		enum pipe trans_edp_pipe;
+		switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
+		default:
+			WARN(1, "unknown pipe linked to edp transcoder\n");
+		case TRANS_DDI_EDP_INPUT_A_ONOFF:
+		case TRANS_DDI_EDP_INPUT_A_ON:
+			trans_edp_pipe = PIPE_A;
+			break;
+		case TRANS_DDI_EDP_INPUT_B_ONOFF:
+			trans_edp_pipe = PIPE_B;
+			break;
+		case TRANS_DDI_EDP_INPUT_C_ONOFF:
+			trans_edp_pipe = PIPE_C;
+			break;
+		}
+
+		if (trans_edp_pipe == crtc->pipe)
+			pipe_config->cpu_transcoder = TRANSCODER_EDP;
+	}
+
+	if (!intel_display_power_is_enabled(dev_priv,
+			POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
+		return false;
+
+	tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
+	if (!(tmp & PIPECONF_ENABLE))
+		return false;
+
+	haswell_get_ddi_port_state(crtc, pipe_config);
+
+	intel_get_pipe_timings(crtc, pipe_config);
+
+	if (INTEL_INFO(dev)->gen >= 9) {
+		skl_init_scalers(dev, crtc, pipe_config);
+	}
+
+	pfit_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
+
+	if (INTEL_INFO(dev)->gen >= 9) {
+		pipe_config->scaler_state.scaler_id = -1;
+		pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX);
+	}
+
+	if (intel_display_power_is_enabled(dev_priv, pfit_domain)) {
+		if (INTEL_INFO(dev)->gen >= 9)
+			skylake_get_pfit_config(crtc, pipe_config);
+		else
+			ironlake_get_pfit_config(crtc, pipe_config);
+	}
+
+	if (IS_HASWELL(dev))
+		pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) &&
+			(I915_READ(IPS_CTL) & IPS_ENABLE);
+
+	if (pipe_config->cpu_transcoder != TRANSCODER_EDP) {
+		pipe_config->pixel_multiplier =
+			I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
+	} else {
+		pipe_config->pixel_multiplier = 1;
+	}
+
+	return true;
+}
+
+static void i845_update_cursor(struct drm_crtc *crtc, u32 base, bool on)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	uint32_t cntl = 0, size = 0;
+
+	if (on) {
+		unsigned int width = intel_crtc->base.cursor->state->crtc_w;
+		unsigned int height = intel_crtc->base.cursor->state->crtc_h;
+		unsigned int stride = roundup_pow_of_two(width) * 4;
+
+		switch (stride) {
+		default:
+			WARN_ONCE(1, "Invalid cursor width/stride, width=%u, stride=%u\n",
+				  width, stride);
+			stride = 256;
+			/* fallthrough */
+		case 256:
+		case 512:
+		case 1024:
+		case 2048:
+			break;
+		}
+
+		cntl |= CURSOR_ENABLE |
+			CURSOR_GAMMA_ENABLE |
+			CURSOR_FORMAT_ARGB |
+			CURSOR_STRIDE(stride);
+
+		size = (height << 12) | width;
+	}
+
+	if (intel_crtc->cursor_cntl != 0 &&
+	    (intel_crtc->cursor_base != base ||
+	     intel_crtc->cursor_size != size ||
+	     intel_crtc->cursor_cntl != cntl)) {
+		/* On these chipsets we can only modify the base/size/stride
+		 * whilst the cursor is disabled.
+		 */
+		I915_WRITE(CURCNTR(PIPE_A), 0);
+		POSTING_READ(CURCNTR(PIPE_A));
+		intel_crtc->cursor_cntl = 0;
+	}
+
+	if (intel_crtc->cursor_base != base) {
+		I915_WRITE(CURBASE(PIPE_A), base);
+		intel_crtc->cursor_base = base;
+	}
+
+	if (intel_crtc->cursor_size != size) {
+		I915_WRITE(CURSIZE, size);
+		intel_crtc->cursor_size = size;
+	}
+
+	if (intel_crtc->cursor_cntl != cntl) {
+		I915_WRITE(CURCNTR(PIPE_A), cntl);
+		POSTING_READ(CURCNTR(PIPE_A));
+		intel_crtc->cursor_cntl = cntl;
+	}
+}
+
+static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base, bool on)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	int pipe = intel_crtc->pipe;
+	uint32_t cntl = 0;
+
+	if (on) {
+		cntl = MCURSOR_GAMMA_ENABLE;
+		switch (intel_crtc->base.cursor->state->crtc_w) {
+			case 64:
+				cntl |= CURSOR_MODE_64_ARGB_AX;
+				break;
+			case 128:
+				cntl |= CURSOR_MODE_128_ARGB_AX;
+				break;
+			case 256:
+				cntl |= CURSOR_MODE_256_ARGB_AX;
+				break;
+			default:
+				MISSING_CASE(intel_crtc->base.cursor->state->crtc_w);
+				return;
+		}
+		cntl |= pipe << 28; /* Connect to correct pipe */
+
+		if (HAS_DDI(dev))
+			cntl |= CURSOR_PIPE_CSC_ENABLE;
+	}
+
+	if (crtc->cursor->state->rotation == BIT(DRM_ROTATE_180))
+		cntl |= CURSOR_ROTATE_180;
+
+	if (intel_crtc->cursor_cntl != cntl) {
+		I915_WRITE(CURCNTR(pipe), cntl);
+		POSTING_READ(CURCNTR(pipe));
+		intel_crtc->cursor_cntl = cntl;
+	}
+
+	/* and commit changes on next vblank */
+	I915_WRITE(CURBASE(pipe), base);
+	POSTING_READ(CURBASE(pipe));
+
+	intel_crtc->cursor_base = base;
+}
+
+/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
+static void intel_crtc_update_cursor(struct drm_crtc *crtc,
+				     bool on)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	int pipe = intel_crtc->pipe;
+	struct drm_plane_state *cursor_state = crtc->cursor->state;
+	int x = cursor_state->crtc_x;
+	int y = cursor_state->crtc_y;
+	u32 base = 0, pos = 0;
+
+	base = intel_crtc->cursor_addr;
+
+	if (x >= intel_crtc->config->pipe_src_w)
+		on = false;
+
+	if (y >= intel_crtc->config->pipe_src_h)
+		on = false;
+
+	if (x < 0) {
+		if (x + cursor_state->crtc_w <= 0)
+			on = false;
+
+		pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
+		x = -x;
+	}
+	pos |= x << CURSOR_X_SHIFT;
+
+	if (y < 0) {
+		if (y + cursor_state->crtc_h <= 0)
+			on = false;
+
+		pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
+		y = -y;
+	}
+	pos |= y << CURSOR_Y_SHIFT;
+
+	I915_WRITE(CURPOS(pipe), pos);
+
+	/* ILK+ do this automagically */
+	if (HAS_GMCH_DISPLAY(dev) &&
+	    crtc->cursor->state->rotation == BIT(DRM_ROTATE_180)) {
+		base += (cursor_state->crtc_h *
+			 cursor_state->crtc_w - 1) * 4;
+	}
+
+	if (IS_845G(dev) || IS_I865G(dev))
+		i845_update_cursor(crtc, base, on);
+	else
+		i9xx_update_cursor(crtc, base, on);
+}
+
+static bool cursor_size_ok(struct drm_device *dev,
+			   uint32_t width, uint32_t height)
+{
+	if (width == 0 || height == 0)
+		return false;
+
+	/*
+	 * 845g/865g are special in that they are only limited by
+	 * the width of their cursors, the height is arbitrary up to
+	 * the precision of the register. Everything else requires
+	 * square cursors, limited to a few power-of-two sizes.
+	 */
+	if (IS_845G(dev) || IS_I865G(dev)) {
+		if ((width & 63) != 0)
+			return false;
+
+		if (width > (IS_845G(dev) ? 64 : 512))
+			return false;
+
+		if (height > 1023)
+			return false;
+	} else {
+		switch (width | height) {
+		case 256:
+		case 128:
+			if (IS_GEN2(dev))
+				return false;
+		case 64:
+			break;
+		default:
+			return false;
+		}
+	}
+
+	return true;
+}
+
+static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+				 u16 *blue, uint32_t start, uint32_t size)
+{
+	int end = (start + size > 256) ? 256 : start + size, i;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+	for (i = start; i < end; i++) {
+		intel_crtc->lut_r[i] = red[i] >> 8;
+		intel_crtc->lut_g[i] = green[i] >> 8;
+		intel_crtc->lut_b[i] = blue[i] >> 8;
+	}
+
+	intel_crtc_load_lut(crtc);
+}
+
+/* VESA 640x480x72Hz mode to set on the pipe */
+static struct drm_display_mode load_detect_mode = {
+	DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
+		 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+};
+
+struct drm_framebuffer *
+__intel_framebuffer_create(struct drm_device *dev,
+			   struct drm_mode_fb_cmd2 *mode_cmd,
+			   struct drm_i915_gem_object *obj)
+{
+	struct intel_framebuffer *intel_fb;
+	int ret;
+
+	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
+	if (!intel_fb) {
+		drm_gem_object_unreference(&obj->base);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
+	if (ret)
+		goto err;
+
+	return &intel_fb->base;
+err:
+	drm_gem_object_unreference(&obj->base);
+	kfree(intel_fb);
+
+	return ERR_PTR(ret);
+}
+
+static struct drm_framebuffer *
+intel_framebuffer_create(struct drm_device *dev,
+			 struct drm_mode_fb_cmd2 *mode_cmd,
+			 struct drm_i915_gem_object *obj)
+{
+	struct drm_framebuffer *fb;
+	int ret;
+
+	ret = i915_mutex_lock_interruptible(dev);
+	if (ret)
+		return ERR_PTR(ret);
+	fb = __intel_framebuffer_create(dev, mode_cmd, obj);
+	mutex_unlock(&dev->struct_mutex);
+
+	return fb;
+}
+
+static u32
+intel_framebuffer_pitch_for_width(int width, int bpp)
+{
+	u32 pitch = DIV_ROUND_UP(width * bpp, 8);
+	return ALIGN(pitch, 64);
+}
+
+static u32
+intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
+{
+	u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
+	return PAGE_ALIGN(pitch * mode->vdisplay);
+}
+
+static struct drm_framebuffer *
+intel_framebuffer_create_for_mode(struct drm_device *dev,
+				  struct drm_display_mode *mode,
+				  int depth, int bpp)
+{
+	struct drm_i915_gem_object *obj;
+	struct drm_mode_fb_cmd2 mode_cmd = { 0 };
+
+	obj = i915_gem_alloc_object(dev,
+				    intel_framebuffer_size_for_mode(mode, bpp));
+	if (obj == NULL)
+		return ERR_PTR(-ENOMEM);
+
+	mode_cmd.width = mode->hdisplay;
+	mode_cmd.height = mode->vdisplay;
+	mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width,
+								bpp);
+	mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
+
+	return intel_framebuffer_create(dev, &mode_cmd, obj);
+}
+
+static struct drm_framebuffer *
+mode_fits_in_fbdev(struct drm_device *dev,
+		   struct drm_display_mode *mode)
+{
+#ifdef CONFIG_DRM_FBDEV_EMULATION
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_gem_object *obj;
+	struct drm_framebuffer *fb;
+
+	if (!dev_priv->fbdev)
+		return NULL;
+
+	if (!dev_priv->fbdev->fb)
+		return NULL;
+
+	obj = dev_priv->fbdev->fb->obj;
+	BUG_ON(!obj);
+
+	fb = &dev_priv->fbdev->fb->base;
+	if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay,
+							       fb->bits_per_pixel))
+		return NULL;
+
+	if (obj->base.size < mode->vdisplay * fb->pitches[0])
+		return NULL;
+
+	return fb;
+#else
+	return NULL;
+#endif
+}
+
+static int intel_modeset_setup_plane_state(struct drm_atomic_state *state,
+					   struct drm_crtc *crtc,
+					   struct drm_display_mode *mode,
+					   struct drm_framebuffer *fb,
+					   int x, int y)
+{
+	struct drm_plane_state *plane_state;
+	int hdisplay, vdisplay;
+	int ret;
+
+	plane_state = drm_atomic_get_plane_state(state, crtc->primary);
+	if (IS_ERR(plane_state))
+		return PTR_ERR(plane_state);
+
+	if (mode)
+		drm_crtc_get_hv_timing(mode, &hdisplay, &vdisplay);
+	else
+		hdisplay = vdisplay = 0;
+
+	ret = drm_atomic_set_crtc_for_plane(plane_state, fb ? crtc : NULL);
+	if (ret)
+		return ret;
+	drm_atomic_set_fb_for_plane(plane_state, fb);
+	plane_state->crtc_x = 0;
+	plane_state->crtc_y = 0;
+	plane_state->crtc_w = hdisplay;
+	plane_state->crtc_h = vdisplay;
+	plane_state->src_x = x << 16;
+	plane_state->src_y = y << 16;
+	plane_state->src_w = hdisplay << 16;
+	plane_state->src_h = vdisplay << 16;
+
+	return 0;
+}
+
+bool intel_get_load_detect_pipe(struct drm_connector *connector,
+				struct drm_display_mode *mode,
+				struct intel_load_detect_pipe *old,
+				struct drm_modeset_acquire_ctx *ctx)
+{
+	struct intel_crtc *intel_crtc;
+	struct intel_encoder *intel_encoder =
+		intel_attached_encoder(connector);
+	struct drm_crtc *possible_crtc;
+	struct drm_encoder *encoder = &intel_encoder->base;
+	struct drm_crtc *crtc = NULL;
+	struct drm_device *dev = encoder->dev;
+	struct drm_framebuffer *fb;
+	struct drm_mode_config *config = &dev->mode_config;
+	struct drm_atomic_state *state = NULL;
+	struct drm_connector_state *connector_state;
+	struct intel_crtc_state *crtc_state;
+	int ret, i = -1;
+
+	DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
+		      connector->base.id, connector->name,
+		      encoder->base.id, encoder->name);
+
+retry:
+	ret = drm_modeset_lock(&config->connection_mutex, ctx);
+	if (ret)
+		goto fail;
+
+	/*
+	 * Algorithm gets a little messy:
+	 *
+	 *   - if the connector already has an assigned crtc, use it (but make
+	 *     sure it's on first)
+	 *
+	 *   - try to find the first unused crtc that can drive this connector,
+	 *     and use that if we find one
+	 */
+
+	/* See if we already have a CRTC for this connector */
+	if (encoder->crtc) {
+		crtc = encoder->crtc;
+
+		ret = drm_modeset_lock(&crtc->mutex, ctx);
+		if (ret)
+			goto fail;
+		ret = drm_modeset_lock(&crtc->primary->mutex, ctx);
+		if (ret)
+			goto fail;
+
+		old->dpms_mode = connector->dpms;
+		old->load_detect_temp = false;
+
+		/* Make sure the crtc and connector are running */
+		if (connector->dpms != DRM_MODE_DPMS_ON)
+			connector->funcs->dpms(connector, DRM_MODE_DPMS_ON);
+
+		return true;
+	}
+
+	/* Find an unused one (if possible) */
+	for_each_crtc(dev, possible_crtc) {
+		i++;
+		if (!(encoder->possible_crtcs & (1 << i)))
+			continue;
+		if (possible_crtc->state->enable)
+			continue;
+
+		crtc = possible_crtc;
+		break;
+	}
+
+	/*
+	 * If we didn't find an unused CRTC, don't use any.
+	 */
+	if (!crtc) {
+		DRM_DEBUG_KMS("no pipe available for load-detect\n");
+		goto fail;
+	}
+
+	ret = drm_modeset_lock(&crtc->mutex, ctx);
+	if (ret)
+		goto fail;
+	ret = drm_modeset_lock(&crtc->primary->mutex, ctx);
+	if (ret)
+		goto fail;
+
+	intel_crtc = to_intel_crtc(crtc);
+	old->dpms_mode = connector->dpms;
+	old->load_detect_temp = true;
+	old->release_fb = NULL;
+
+	state = drm_atomic_state_alloc(dev);
+	if (!state)
+		return false;
+
+	state->acquire_ctx = ctx;
+
+	connector_state = drm_atomic_get_connector_state(state, connector);
+	if (IS_ERR(connector_state)) {
+		ret = PTR_ERR(connector_state);
+		goto fail;
+	}
+
+	connector_state->crtc = crtc;
+	connector_state->best_encoder = &intel_encoder->base;
+
+	crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
+	if (IS_ERR(crtc_state)) {
+		ret = PTR_ERR(crtc_state);
+		goto fail;
+	}
+
+	crtc_state->base.active = crtc_state->base.enable = true;
+
+	if (!mode)
+		mode = &load_detect_mode;
+
+	/* We need a framebuffer large enough to accommodate all accesses
+	 * that the plane may generate whilst we perform load detection.
+	 * We can not rely on the fbcon either being present (we get called
+	 * during its initialisation to detect all boot displays, or it may
+	 * not even exist) or that it is large enough to satisfy the
+	 * requested mode.
+	 */
+	fb = mode_fits_in_fbdev(dev, mode);
+	if (fb == NULL) {
+		DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
+		fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
+		old->release_fb = fb;
+	} else
+		DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
+	if (IS_ERR(fb)) {
+		DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
+		goto fail;
+	}
+
+	ret = intel_modeset_setup_plane_state(state, crtc, mode, fb, 0, 0);
+	if (ret)
+		goto fail;
+
+	drm_mode_copy(&crtc_state->base.mode, mode);
+
+	if (drm_atomic_commit(state)) {
+		DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
+		if (old->release_fb)
+			old->release_fb->funcs->destroy(old->release_fb);
+		goto fail;
+	}
+	crtc->primary->crtc = crtc;
+
+	/* let the connector get through one full cycle before testing */
+	intel_wait_for_vblank(dev, intel_crtc->pipe);
+	return true;
+
+fail:
+	drm_atomic_state_free(state);
+	state = NULL;
+
+	if (ret == -EDEADLK) {
+		drm_modeset_backoff(ctx);
+		goto retry;
+	}
+
+	return false;
+}
+
+void intel_release_load_detect_pipe(struct drm_connector *connector,
+				    struct intel_load_detect_pipe *old,
+				    struct drm_modeset_acquire_ctx *ctx)
+{
+	struct drm_device *dev = connector->dev;
+	struct intel_encoder *intel_encoder =
+		intel_attached_encoder(connector);
+	struct drm_encoder *encoder = &intel_encoder->base;
+	struct drm_crtc *crtc = encoder->crtc;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct drm_atomic_state *state;
+	struct drm_connector_state *connector_state;
+	struct intel_crtc_state *crtc_state;
+	int ret;
+
+	DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
+		      connector->base.id, connector->name,
+		      encoder->base.id, encoder->name);
+
+	if (old->load_detect_temp) {
+		state = drm_atomic_state_alloc(dev);
+		if (!state)
+			goto fail;
+
+		state->acquire_ctx = ctx;
+
+		connector_state = drm_atomic_get_connector_state(state, connector);
+		if (IS_ERR(connector_state))
+			goto fail;
+
+		crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
+		if (IS_ERR(crtc_state))
+			goto fail;
+
+		connector_state->best_encoder = NULL;
+		connector_state->crtc = NULL;
+
+		crtc_state->base.enable = crtc_state->base.active = false;
+
+		ret = intel_modeset_setup_plane_state(state, crtc, NULL, NULL,
+						      0, 0);
+		if (ret)
+			goto fail;
+
+		ret = drm_atomic_commit(state);
+		if (ret)
+			goto fail;
+
+		if (old->release_fb) {
+			drm_framebuffer_unregister_private(old->release_fb);
+			drm_framebuffer_unreference(old->release_fb);
+		}
+
+		return;
+	}
+
+	/* Switch crtc and encoder back off if necessary */
+	if (old->dpms_mode != DRM_MODE_DPMS_ON)
+		connector->funcs->dpms(connector, old->dpms_mode);
+
+	return;
+fail:
+	DRM_DEBUG_KMS("Couldn't release load detect pipe.\n");
+	drm_atomic_state_free(state);
+}
+
+static int i9xx_pll_refclk(struct drm_device *dev,
+			   const struct intel_crtc_state *pipe_config)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 dpll = pipe_config->dpll_hw_state.dpll;
+
+	if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
+		return dev_priv->vbt.lvds_ssc_freq;
+	else if (HAS_PCH_SPLIT(dev))
+		return 120000;
+	else if (!IS_GEN2(dev))
+		return 96000;
+	else
+		return 48000;
+}
+
+/* Returns the clock of the currently programmed mode of the given pipe. */
+static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
+				struct intel_crtc_state *pipe_config)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int pipe = pipe_config->cpu_transcoder;
+	u32 dpll = pipe_config->dpll_hw_state.dpll;
+	u32 fp;
+	intel_clock_t clock;
+	int port_clock;
+	int refclk = i9xx_pll_refclk(dev, pipe_config);
+
+	if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
+		fp = pipe_config->dpll_hw_state.fp0;
+	else
+		fp = pipe_config->dpll_hw_state.fp1;
+
+	clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
+	if (IS_PINEVIEW(dev)) {
+		clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
+		clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
+	} else {
+		clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
+		clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
+	}
+
+	if (!IS_GEN2(dev)) {
+		if (IS_PINEVIEW(dev))
+			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
+				DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
+		else
+			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
+			       DPLL_FPA01_P1_POST_DIV_SHIFT);
+
+		switch (dpll & DPLL_MODE_MASK) {
+		case DPLLB_MODE_DAC_SERIAL:
+			clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
+				5 : 10;
+			break;
+		case DPLLB_MODE_LVDS:
+			clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
+				7 : 14;
+			break;
+		default:
+			DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
+				  "mode\n", (int)(dpll & DPLL_MODE_MASK));
+			return;
+		}
+
+		if (IS_PINEVIEW(dev))
+			port_clock = pnv_calc_dpll_params(refclk, &clock);
+		else
+			port_clock = i9xx_calc_dpll_params(refclk, &clock);
+	} else {
+		u32 lvds = IS_I830(dev) ? 0 : I915_READ(LVDS);
+		bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
+
+		if (is_lvds) {
+			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
+				       DPLL_FPA01_P1_POST_DIV_SHIFT);
+
+			if (lvds & LVDS_CLKB_POWER_UP)
+				clock.p2 = 7;
+			else
+				clock.p2 = 14;
+		} else {
+			if (dpll & PLL_P1_DIVIDE_BY_TWO)
+				clock.p1 = 2;
+			else {
+				clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
+					    DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
+			}
+			if (dpll & PLL_P2_DIVIDE_BY_4)
+				clock.p2 = 4;
+			else
+				clock.p2 = 2;
+		}
+
+		port_clock = i9xx_calc_dpll_params(refclk, &clock);
+	}
+
+	/*
+	 * This value includes pixel_multiplier. We will use
+	 * port_clock to compute adjusted_mode.crtc_clock in the
+	 * encoder's get_config() function.
+	 */
+	pipe_config->port_clock = port_clock;
+}
+
+int intel_dotclock_calculate(int link_freq,
+			     const struct intel_link_m_n *m_n)
+{
+	/*
+	 * The calculation for the data clock is:
+	 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
+	 * But we want to avoid losing precison if possible, so:
+	 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
+	 *
+	 * and the link clock is simpler:
+	 * link_clock = (m * link_clock) / n
+	 */
+
+	if (!m_n->link_n)
+		return 0;
+
+	return div_u64((u64)m_n->link_m * link_freq, m_n->link_n);
+}
+
+static void ironlake_pch_clock_get(struct intel_crtc *crtc,
+				   struct intel_crtc_state *pipe_config)
+{
+	struct drm_device *dev = crtc->base.dev;
+
+	/* read out port_clock from the DPLL */
+	i9xx_crtc_clock_get(crtc, pipe_config);
+
+	/*
+	 * This value does not include pixel_multiplier.
+	 * We will check that port_clock and adjusted_mode.crtc_clock
+	 * agree once we know their relationship in the encoder's
+	 * get_config() function.
+	 */
+	pipe_config->base.adjusted_mode.crtc_clock =
+		intel_dotclock_calculate(intel_fdi_link_freq(dev) * 10000,
+					 &pipe_config->fdi_m_n);
+}
+
+/** Returns the currently programmed mode of the given pipe. */
+struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
+					     struct drm_crtc *crtc)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
+	struct drm_display_mode *mode;
+	struct intel_crtc_state pipe_config;
+	int htot = I915_READ(HTOTAL(cpu_transcoder));
+	int hsync = I915_READ(HSYNC(cpu_transcoder));
+	int vtot = I915_READ(VTOTAL(cpu_transcoder));
+	int vsync = I915_READ(VSYNC(cpu_transcoder));
+	enum pipe pipe = intel_crtc->pipe;
+
+	mode = kzalloc(sizeof(*mode), GFP_KERNEL);
+	if (!mode)
+		return NULL;
+
+	/*
+	 * Construct a pipe_config sufficient for getting the clock info
+	 * back out of crtc_clock_get.
+	 *
+	 * Note, if LVDS ever uses a non-1 pixel multiplier, we'll need
+	 * to use a real value here instead.
+	 */
+	pipe_config.cpu_transcoder = (enum transcoder) pipe;
+	pipe_config.pixel_multiplier = 1;
+	pipe_config.dpll_hw_state.dpll = I915_READ(DPLL(pipe));
+	pipe_config.dpll_hw_state.fp0 = I915_READ(FP0(pipe));
+	pipe_config.dpll_hw_state.fp1 = I915_READ(FP1(pipe));
+	i9xx_crtc_clock_get(intel_crtc, &pipe_config);
+
+	mode->clock = pipe_config.port_clock / pipe_config.pixel_multiplier;
+	mode->hdisplay = (htot & 0xffff) + 1;
+	mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
+	mode->hsync_start = (hsync & 0xffff) + 1;
+	mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
+	mode->vdisplay = (vtot & 0xffff) + 1;
+	mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
+	mode->vsync_start = (vsync & 0xffff) + 1;
+	mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
+
+	drm_mode_set_name(mode);
+
+	return mode;
+}
+
+void intel_mark_busy(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (dev_priv->mm.busy)
+		return;
+
+	intel_runtime_pm_get(dev_priv);
+	i915_update_gfx_val(dev_priv);
+	if (INTEL_INFO(dev)->gen >= 6)
+		gen6_rps_busy(dev_priv);
+	dev_priv->mm.busy = true;
+}
+
+void intel_mark_idle(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (!dev_priv->mm.busy)
+		return;
+
+	dev_priv->mm.busy = false;
+
+	if (INTEL_INFO(dev)->gen >= 6)
+		gen6_rps_idle(dev->dev_private);
+
+	intel_runtime_pm_put(dev_priv);
+}
+
+static void intel_crtc_destroy(struct drm_crtc *crtc)
+{
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct intel_unpin_work *work;
+
+	spin_lock_irq(&dev->event_lock);
+	work = intel_crtc->unpin_work;
+	intel_crtc->unpin_work = NULL;
+	spin_unlock_irq(&dev->event_lock);
+
+	if (work) {
+		cancel_work_sync(&work->work);
+		kfree(work);
+	}
+
+	drm_crtc_cleanup(crtc);
+
+	kfree(intel_crtc);
+}
+
+static void intel_unpin_work_fn(struct work_struct *__work)
+{
+	struct intel_unpin_work *work =
+		container_of(__work, struct intel_unpin_work, work);
+	struct intel_crtc *crtc = to_intel_crtc(work->crtc);
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_plane *primary = crtc->base.primary;
+
+	mutex_lock(&dev->struct_mutex);
+	intel_unpin_fb_obj(work->old_fb, primary->state);
+	drm_gem_object_unreference(&work->pending_flip_obj->base);
+
+	if (work->flip_queued_req)
+		i915_gem_request_assign(&work->flip_queued_req, NULL);
+	mutex_unlock(&dev->struct_mutex);
+
+	intel_frontbuffer_flip_complete(dev, to_intel_plane(primary)->frontbuffer_bit);
+	drm_framebuffer_unreference(work->old_fb);
+
+	BUG_ON(atomic_read(&crtc->unpin_work_count) == 0);
+	atomic_dec(&crtc->unpin_work_count);
+
+	kfree(work);
+}
+
+static void do_intel_finish_page_flip(struct drm_device *dev,
+				      struct drm_crtc *crtc)
+{
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct intel_unpin_work *work;
+	unsigned long flags;
+
+	/* Ignore early vblank irqs */
+	if (intel_crtc == NULL)
+		return;
+
+	/*
+	 * This is called both by irq handlers and the reset code (to complete
+	 * lost pageflips) so needs the full irqsave spinlocks.
+	 */
+	spin_lock_irqsave(&dev->event_lock, flags);
+	work = intel_crtc->unpin_work;
+
+	/* Ensure we don't miss a work->pending update ... */
+	smp_rmb();
+
+	if (work == NULL || atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
+		spin_unlock_irqrestore(&dev->event_lock, flags);
+		return;
+	}
+
+	page_flip_completed(intel_crtc);
+
+	spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
+void intel_finish_page_flip(struct drm_device *dev, int pipe)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+
+	do_intel_finish_page_flip(dev, crtc);
+}
+
+void intel_finish_page_flip_plane(struct drm_device *dev, int plane)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane];
+
+	do_intel_finish_page_flip(dev, crtc);
+}
+
+/* Is 'a' after or equal to 'b'? */
+static bool g4x_flip_count_after_eq(u32 a, u32 b)
+{
+	return !((a - b) & 0x80000000);
+}
+
+static bool page_flip_finished(struct intel_crtc *crtc)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (i915_reset_in_progress(&dev_priv->gpu_error) ||
+	    crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
+		return true;
+
+	/*
+	 * The relevant registers doen't exist on pre-ctg.
+	 * As the flip done interrupt doesn't trigger for mmio
+	 * flips on gmch platforms, a flip count check isn't
+	 * really needed there. But since ctg has the registers,
+	 * include it in the check anyway.
+	 */
+	if (INTEL_INFO(dev)->gen < 5 && !IS_G4X(dev))
+		return true;
+
+	/*
+	 * A DSPSURFLIVE check isn't enough in case the mmio and CS flips
+	 * used the same base address. In that case the mmio flip might
+	 * have completed, but the CS hasn't even executed the flip yet.
+	 *
+	 * A flip count check isn't enough as the CS might have updated
+	 * the base address just after start of vblank, but before we
+	 * managed to process the interrupt. This means we'd complete the
+	 * CS flip too soon.
+	 *
+	 * Combining both checks should get us a good enough result. It may
+	 * still happen that the CS flip has been executed, but has not
+	 * yet actually completed. But in case the base address is the same
+	 * anyway, we don't really care.
+	 */
+	return (I915_READ(DSPSURFLIVE(crtc->plane)) & ~0xfff) ==
+		crtc->unpin_work->gtt_offset &&
+		g4x_flip_count_after_eq(I915_READ(PIPE_FLIPCOUNT_G4X(crtc->pipe)),
+				    crtc->unpin_work->flip_count);
+}
+
+void intel_prepare_page_flip(struct drm_device *dev, int plane)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc =
+		to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
+	unsigned long flags;
+
+
+	/*
+	 * This is called both by irq handlers and the reset code (to complete
+	 * lost pageflips) so needs the full irqsave spinlocks.
+	 *
+	 * NB: An MMIO update of the plane base pointer will also
+	 * generate a page-flip completion irq, i.e. every modeset
+	 * is also accompanied by a spurious intel_prepare_page_flip().
+	 */
+	spin_lock_irqsave(&dev->event_lock, flags);
+	if (intel_crtc->unpin_work && page_flip_finished(intel_crtc))
+		atomic_inc_not_zero(&intel_crtc->unpin_work->pending);
+	spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
+static inline void intel_mark_page_flip_active(struct intel_unpin_work *work)
+{
+	/* Ensure that the work item is consistent when activating it ... */
+	smp_wmb();
+	atomic_set(&work->pending, INTEL_FLIP_PENDING);
+	/* and that it is marked active as soon as the irq could fire. */
+	smp_wmb();
+}
+
+static int intel_gen2_queue_flip(struct drm_device *dev,
+				 struct drm_crtc *crtc,
+				 struct drm_framebuffer *fb,
+				 struct drm_i915_gem_object *obj,
+				 struct drm_i915_gem_request *req,
+				 uint32_t flags)
+{
+	struct intel_engine_cs *ring = req->ring;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	u32 flip_mask;
+	int ret;
+
+	ret = intel_ring_begin(req, 6);
+	if (ret)
+		return ret;
+
+	/* Can't queue multiple flips, so wait for the previous
+	 * one to finish before executing the next.
+	 */
+	if (intel_crtc->plane)
+		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
+	else
+		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
+	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_emit(ring, MI_DISPLAY_FLIP |
+			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
+	intel_ring_emit(ring, fb->pitches[0]);
+	intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
+	intel_ring_emit(ring, 0); /* aux display base address, unused */
+
+	intel_mark_page_flip_active(intel_crtc->unpin_work);
+	return 0;
+}
+
+static int intel_gen3_queue_flip(struct drm_device *dev,
+				 struct drm_crtc *crtc,
+				 struct drm_framebuffer *fb,
+				 struct drm_i915_gem_object *obj,
+				 struct drm_i915_gem_request *req,
+				 uint32_t flags)
+{
+	struct intel_engine_cs *ring = req->ring;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	u32 flip_mask;
+	int ret;
+
+	ret = intel_ring_begin(req, 6);
+	if (ret)
+		return ret;
+
+	if (intel_crtc->plane)
+		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
+	else
+		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
+	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
+			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
+	intel_ring_emit(ring, fb->pitches[0]);
+	intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
+	intel_ring_emit(ring, MI_NOOP);
+
+	intel_mark_page_flip_active(intel_crtc->unpin_work);
+	return 0;
+}
+
+static int intel_gen4_queue_flip(struct drm_device *dev,
+				 struct drm_crtc *crtc,
+				 struct drm_framebuffer *fb,
+				 struct drm_i915_gem_object *obj,
+				 struct drm_i915_gem_request *req,
+				 uint32_t flags)
+{
+	struct intel_engine_cs *ring = req->ring;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	uint32_t pf, pipesrc;
+	int ret;
+
+	ret = intel_ring_begin(req, 4);
+	if (ret)
+		return ret;
+
+	/* i965+ uses the linear or tiled offsets from the
+	 * Display Registers (which do not change across a page-flip)
+	 * so we need only reprogram the base address.
+	 */
+	intel_ring_emit(ring, MI_DISPLAY_FLIP |
+			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
+	intel_ring_emit(ring, fb->pitches[0]);
+	intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset |
+			obj->tiling_mode);
+
+	/* XXX Enabling the panel-fitter across page-flip is so far
+	 * untested on non-native modes, so ignore it for now.
+	 * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
+	 */
+	pf = 0;
+	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
+	intel_ring_emit(ring, pf | pipesrc);
+
+	intel_mark_page_flip_active(intel_crtc->unpin_work);
+	return 0;
+}
+
+static int intel_gen6_queue_flip(struct drm_device *dev,
+				 struct drm_crtc *crtc,
+				 struct drm_framebuffer *fb,
+				 struct drm_i915_gem_object *obj,
+				 struct drm_i915_gem_request *req,
+				 uint32_t flags)
+{
+	struct intel_engine_cs *ring = req->ring;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	uint32_t pf, pipesrc;
+	int ret;
+
+	ret = intel_ring_begin(req, 4);
+	if (ret)
+		return ret;
+
+	intel_ring_emit(ring, MI_DISPLAY_FLIP |
+			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
+	intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode);
+	intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
+
+	/* Contrary to the suggestions in the documentation,
+	 * "Enable Panel Fitter" does not seem to be required when page
+	 * flipping with a non-native mode, and worse causes a normal
+	 * modeset to fail.
+	 * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
+	 */
+	pf = 0;
+	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
+	intel_ring_emit(ring, pf | pipesrc);
+
+	intel_mark_page_flip_active(intel_crtc->unpin_work);
+	return 0;
+}
+
+static int intel_gen7_queue_flip(struct drm_device *dev,
+				 struct drm_crtc *crtc,
+				 struct drm_framebuffer *fb,
+				 struct drm_i915_gem_object *obj,
+				 struct drm_i915_gem_request *req,
+				 uint32_t flags)
+{
+	struct intel_engine_cs *ring = req->ring;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	uint32_t plane_bit = 0;
+	int len, ret;
+
+	switch (intel_crtc->plane) {
+	case PLANE_A:
+		plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A;
+		break;
+	case PLANE_B:
+		plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B;
+		break;
+	case PLANE_C:
+		plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C;
+		break;
+	default:
+		WARN_ONCE(1, "unknown plane in flip command\n");
+		return -ENODEV;
+	}
+
+	len = 4;
+	if (ring->id == RCS) {
+		len += 6;
+		/*
+		 * On Gen 8, SRM is now taking an extra dword to accommodate
+		 * 48bits addresses, and we need a NOOP for the batch size to
+		 * stay even.
+		 */
+		if (IS_GEN8(dev))
+			len += 2;
+	}
+
+	/*
+	 * BSpec MI_DISPLAY_FLIP for IVB:
+	 * "The full packet must be contained within the same cache line."
+	 *
+	 * Currently the LRI+SRM+MI_DISPLAY_FLIP all fit within the same
+	 * cacheline, if we ever start emitting more commands before
+	 * the MI_DISPLAY_FLIP we may need to first emit everything else,
+	 * then do the cacheline alignment, and finally emit the
+	 * MI_DISPLAY_FLIP.
+	 */
+	ret = intel_ring_cacheline_align(req);
+	if (ret)
+		return ret;
+
+	ret = intel_ring_begin(req, len);
+	if (ret)
+		return ret;
+
+	/* Unmask the flip-done completion message. Note that the bspec says that
+	 * we should do this for both the BCS and RCS, and that we must not unmask
+	 * more than one flip event at any time (or ensure that one flip message
+	 * can be sent by waiting for flip-done prior to queueing new flips).
+	 * Experimentation says that BCS works despite DERRMR masking all
+	 * flip-done completion events and that unmasking all planes at once
+	 * for the RCS also doesn't appear to drop events. Setting the DERRMR
+	 * to zero does lead to lockups within MI_DISPLAY_FLIP.
+	 */
+	if (ring->id == RCS) {
+		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+		intel_ring_emit(ring, DERRMR);
+		intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
+					DERRMR_PIPEB_PRI_FLIP_DONE |
+					DERRMR_PIPEC_PRI_FLIP_DONE));
+		if (IS_GEN8(dev))
+			intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8 |
+					      MI_SRM_LRM_GLOBAL_GTT);
+		else
+			intel_ring_emit(ring, MI_STORE_REGISTER_MEM |
+					      MI_SRM_LRM_GLOBAL_GTT);
+		intel_ring_emit(ring, DERRMR);
+		intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
+		if (IS_GEN8(dev)) {
+			intel_ring_emit(ring, 0);
+			intel_ring_emit(ring, MI_NOOP);
+		}
+	}
+
+	intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
+	intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
+	intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
+	intel_ring_emit(ring, (MI_NOOP));
+
+	intel_mark_page_flip_active(intel_crtc->unpin_work);
+	return 0;
+}
+
+static bool use_mmio_flip(struct intel_engine_cs *ring,
+			  struct drm_i915_gem_object *obj)
+{
+	/*
+	 * This is not being used for older platforms, because
+	 * non-availability of flip done interrupt forces us to use
+	 * CS flips. Older platforms derive flip done using some clever
+	 * tricks involving the flip_pending status bits and vblank irqs.
+	 * So using MMIO flips there would disrupt this mechanism.
+	 */
+
+	if (ring == NULL)
+		return true;
+
+	if (INTEL_INFO(ring->dev)->gen < 5)
+		return false;
+
+	if (i915.use_mmio_flip < 0)
+		return false;
+	else if (i915.use_mmio_flip > 0)
+		return true;
+	else if (i915.enable_execlists)
+		return true;
+	else
+		return ring != i915_gem_request_get_ring(obj->last_write_req);
+}
+
+static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
+			     struct intel_unpin_work *work)
+{
+	struct drm_device *dev = intel_crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_framebuffer *fb = intel_crtc->base.primary->fb;
+	const enum pipe pipe = intel_crtc->pipe;
+	u32 ctl, stride;
+
+	ctl = I915_READ(PLANE_CTL(pipe, 0));
+	ctl &= ~PLANE_CTL_TILED_MASK;
+	switch (fb->modifier[0]) {
+	case DRM_FORMAT_MOD_NONE:
+		break;
+	case I915_FORMAT_MOD_X_TILED:
+		ctl |= PLANE_CTL_TILED_X;
+		break;
+	case I915_FORMAT_MOD_Y_TILED:
+		ctl |= PLANE_CTL_TILED_Y;
+		break;
+	case I915_FORMAT_MOD_Yf_TILED:
+		ctl |= PLANE_CTL_TILED_YF;
+		break;
+	default:
+		MISSING_CASE(fb->modifier[0]);
+	}
+
+	/*
+	 * The stride is either expressed as a multiple of 64 bytes chunks for
+	 * linear buffers or in number of tiles for tiled buffers.
+	 */
+	stride = fb->pitches[0] /
+		 intel_fb_stride_alignment(dev, fb->modifier[0],
+					   fb->pixel_format);
+
+	/*
+	 * Both PLANE_CTL and PLANE_STRIDE are not updated on vblank but on
+	 * PLANE_SURF updates, the update is then guaranteed to be atomic.
+	 */
+	I915_WRITE(PLANE_CTL(pipe, 0), ctl);
+	I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
+
+	I915_WRITE(PLANE_SURF(pipe, 0), work->gtt_offset);
+	POSTING_READ(PLANE_SURF(pipe, 0));
+}
+
+static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc,
+			     struct intel_unpin_work *work)
+{
+	struct drm_device *dev = intel_crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_framebuffer *intel_fb =
+		to_intel_framebuffer(intel_crtc->base.primary->fb);
+	struct drm_i915_gem_object *obj = intel_fb->obj;
+	u32 dspcntr;
+	u32 reg;
+
+	reg = DSPCNTR(intel_crtc->plane);
+	dspcntr = I915_READ(reg);
+
+	if (obj->tiling_mode != I915_TILING_NONE)
+		dspcntr |= DISPPLANE_TILED;
+	else
+		dspcntr &= ~DISPPLANE_TILED;
+
+	I915_WRITE(reg, dspcntr);
+
+	I915_WRITE(DSPSURF(intel_crtc->plane), work->gtt_offset);
+	POSTING_READ(DSPSURF(intel_crtc->plane));
+}
+
+/*
+ * XXX: This is the temporary way to update the plane registers until we get
+ * around to using the usual plane update functions for MMIO flips
+ */
+static void intel_do_mmio_flip(struct intel_mmio_flip *mmio_flip)
+{
+	struct intel_crtc *crtc = mmio_flip->crtc;
+	struct intel_unpin_work *work;
+
+	spin_lock_irq(&crtc->base.dev->event_lock);
+	work = crtc->unpin_work;
+	spin_unlock_irq(&crtc->base.dev->event_lock);
+	if (work == NULL)
+		return;
+
+	intel_mark_page_flip_active(work);
+
+	intel_pipe_update_start(crtc);
+
+	if (INTEL_INFO(mmio_flip->i915)->gen >= 9)
+		skl_do_mmio_flip(crtc, work);
+	else
+		/* use_mmio_flip() retricts MMIO flips to ilk+ */
+		ilk_do_mmio_flip(crtc, work);
+
+	intel_pipe_update_end(crtc);
+}
+
+static void intel_mmio_flip_work_func(struct work_struct *work)
+{
+	struct intel_mmio_flip *mmio_flip =
+		container_of(work, struct intel_mmio_flip, work);
+
+	if (mmio_flip->req) {
+		WARN_ON(__i915_wait_request(mmio_flip->req,
+					    mmio_flip->crtc->reset_counter,
+					    false, NULL,
+					    &mmio_flip->i915->rps.mmioflips));
+		i915_gem_request_unreference__unlocked(mmio_flip->req);
+	}
+
+	intel_do_mmio_flip(mmio_flip);
+	kfree(mmio_flip);
+}
+
+static int intel_queue_mmio_flip(struct drm_device *dev,
+				 struct drm_crtc *crtc,
+				 struct drm_framebuffer *fb,
+				 struct drm_i915_gem_object *obj,
+				 struct intel_engine_cs *ring,
+				 uint32_t flags)
+{
+	struct intel_mmio_flip *mmio_flip;
+
+	mmio_flip = kmalloc(sizeof(*mmio_flip), GFP_KERNEL);
+	if (mmio_flip == NULL)
+		return -ENOMEM;
+
+	mmio_flip->i915 = to_i915(dev);
+	mmio_flip->req = i915_gem_request_reference(obj->last_write_req);
+	mmio_flip->crtc = to_intel_crtc(crtc);
+
+	INIT_WORK(&mmio_flip->work, intel_mmio_flip_work_func);
+	schedule_work(&mmio_flip->work);
+
+	return 0;
+}
+
+static int intel_default_queue_flip(struct drm_device *dev,
+				    struct drm_crtc *crtc,
+				    struct drm_framebuffer *fb,
+				    struct drm_i915_gem_object *obj,
+				    struct drm_i915_gem_request *req,
+				    uint32_t flags)
+{
+	return -ENODEV;
+}
+
+static bool __intel_pageflip_stall_check(struct drm_device *dev,
+					 struct drm_crtc *crtc)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct intel_unpin_work *work = intel_crtc->unpin_work;
+	u32 addr;
+
+	if (atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE)
+		return true;
+
+	if (atomic_read(&work->pending) < INTEL_FLIP_PENDING)
+		return false;
+
+	if (!work->enable_stall_check)
+		return false;
+
+	if (work->flip_ready_vblank == 0) {
+		if (work->flip_queued_req &&
+		    !i915_gem_request_completed(work->flip_queued_req, true))
+			return false;
+
+		work->flip_ready_vblank = drm_crtc_vblank_count(crtc);
+	}
+
+	if (drm_crtc_vblank_count(crtc) - work->flip_ready_vblank < 3)
+		return false;
+
+	/* Potential stall - if we see that the flip has happened,
+	 * assume a missed interrupt. */
+	if (INTEL_INFO(dev)->gen >= 4)
+		addr = I915_HI_DISPBASE(I915_READ(DSPSURF(intel_crtc->plane)));
+	else
+		addr = I915_READ(DSPADDR(intel_crtc->plane));
+
+	/* There is a potential issue here with a false positive after a flip
+	 * to the same address. We could address this by checking for a
+	 * non-incrementing frame counter.
+	 */
+	return addr == work->gtt_offset;
+}
+
+void intel_check_page_flip(struct drm_device *dev, int pipe)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct intel_unpin_work *work;
+
+	WARN_ON(!in_interrupt());
+
+	if (crtc == NULL)
+		return;
+
+	spin_lock(&dev->event_lock);
+	work = intel_crtc->unpin_work;
+	if (work != NULL && __intel_pageflip_stall_check(dev, crtc)) {
+		WARN_ONCE(1, "Kicking stuck page flip: queued at %d, now %d\n",
+			 work->flip_queued_vblank, drm_vblank_count(dev, pipe));
+		page_flip_completed(intel_crtc);
+		work = NULL;
+	}
+	if (work != NULL &&
+	    drm_vblank_count(dev, pipe) - work->flip_queued_vblank > 1)
+		intel_queue_rps_boost_for_request(dev, work->flip_queued_req);
+	spin_unlock(&dev->event_lock);
+}
+
+static int intel_crtc_page_flip(struct drm_crtc *crtc,
+				struct drm_framebuffer *fb,
+				struct drm_pending_vblank_event *event,
+				uint32_t page_flip_flags)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_framebuffer *old_fb = crtc->primary->fb;
+	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct drm_plane *primary = crtc->primary;
+	enum pipe pipe = intel_crtc->pipe;
+	struct intel_unpin_work *work;
+	struct intel_engine_cs *ring;
+	bool mmio_flip;
+	struct drm_i915_gem_request *request = NULL;
+	int ret;
+
+	/*
+	 * drm_mode_page_flip_ioctl() should already catch this, but double
+	 * check to be safe.  In the future we may enable pageflipping from
+	 * a disabled primary plane.
+	 */
+	if (WARN_ON(intel_fb_obj(old_fb) == NULL))
+		return -EBUSY;
+
+	/* Can't change pixel format via MI display flips. */
+	if (fb->pixel_format != crtc->primary->fb->pixel_format)
+		return -EINVAL;
+
+	/*
+	 * TILEOFF/LINOFF registers can't be changed via MI display flips.
+	 * Note that pitch changes could also affect these register.
+	 */
+	if (INTEL_INFO(dev)->gen > 3 &&
+	    (fb->offsets[0] != crtc->primary->fb->offsets[0] ||
+	     fb->pitches[0] != crtc->primary->fb->pitches[0]))
+		return -EINVAL;
+
+	if (i915_terminally_wedged(&dev_priv->gpu_error))
+		goto out_hang;
+
+	work = kzalloc(sizeof(*work), GFP_KERNEL);
+	if (work == NULL)
+		return -ENOMEM;
+
+	work->event = event;
+	work->crtc = crtc;
+	work->old_fb = old_fb;
+	INIT_WORK(&work->work, intel_unpin_work_fn);
+
+	ret = drm_crtc_vblank_get(crtc);
+	if (ret)
+		goto free_work;
+
+	/* We borrow the event spin lock for protecting unpin_work */
+	spin_lock_irq(&dev->event_lock);
+	if (intel_crtc->unpin_work) {
+		/* Before declaring the flip queue wedged, check if
+		 * the hardware completed the operation behind our backs.
+		 */
+		if (__intel_pageflip_stall_check(dev, crtc)) {
+			DRM_DEBUG_DRIVER("flip queue: previous flip completed, continuing\n");
+			page_flip_completed(intel_crtc);
+		} else {
+			DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
+			spin_unlock_irq(&dev->event_lock);
+
+			drm_crtc_vblank_put(crtc);
+			kfree(work);
+			return -EBUSY;
+		}
+	}
+	intel_crtc->unpin_work = work;
+	spin_unlock_irq(&dev->event_lock);
+
+	if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
+		flush_workqueue(dev_priv->wq);
+
+	/* Reference the objects for the scheduled work. */
+	drm_framebuffer_reference(work->old_fb);
+	drm_gem_object_reference(&obj->base);
+
+	crtc->primary->fb = fb;
+	update_state_fb(crtc->primary);
+
+	work->pending_flip_obj = obj;
+
+	ret = i915_mutex_lock_interruptible(dev);
+	if (ret)
+		goto cleanup;
+
+	atomic_inc(&intel_crtc->unpin_work_count);
+	intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
+
+	if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
+		work->flip_count = I915_READ(PIPE_FLIPCOUNT_G4X(pipe)) + 1;
+
+	if (IS_VALLEYVIEW(dev)) {
+		ring = &dev_priv->ring[BCS];
+		if (obj->tiling_mode != intel_fb_obj(work->old_fb)->tiling_mode)
+			/* vlv: DISPLAY_FLIP fails to change tiling */
+			ring = NULL;
+	} else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
+		ring = &dev_priv->ring[BCS];
+	} else if (INTEL_INFO(dev)->gen >= 7) {
+		ring = i915_gem_request_get_ring(obj->last_write_req);
+		if (ring == NULL || ring->id != RCS)
+			ring = &dev_priv->ring[BCS];
+	} else {
+		ring = &dev_priv->ring[RCS];
+	}
+
+	mmio_flip = use_mmio_flip(ring, obj);
+
+	/* When using CS flips, we want to emit semaphores between rings.
+	 * However, when using mmio flips we will create a task to do the
+	 * synchronisation, so all we want here is to pin the framebuffer
+	 * into the display plane and skip any waits.
+	 */
+	ret = intel_pin_and_fence_fb_obj(crtc->primary, fb,
+					 crtc->primary->state,
+					 mmio_flip ? i915_gem_request_get_ring(obj->last_write_req) : ring, &request);
+	if (ret)
+		goto cleanup_pending;
+
+	work->gtt_offset = intel_plane_obj_offset(to_intel_plane(primary),
+						  obj, 0);
+	work->gtt_offset += intel_crtc->dspaddr_offset;
+
+	if (mmio_flip) {
+		ret = intel_queue_mmio_flip(dev, crtc, fb, obj, ring,
+					    page_flip_flags);
+		if (ret)
+			goto cleanup_unpin;
+
+		i915_gem_request_assign(&work->flip_queued_req,
+					obj->last_write_req);
+	} else {
+		if (!request) {
+			ret = i915_gem_request_alloc(ring, ring->default_context, &request);
+			if (ret)
+				goto cleanup_unpin;
+		}
+
+		ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, request,
+						   page_flip_flags);
+		if (ret)
+			goto cleanup_unpin;
+
+		i915_gem_request_assign(&work->flip_queued_req, request);
+	}
+
+	if (request)
+		i915_add_request_no_flush(request);
+
+	work->flip_queued_vblank = drm_crtc_vblank_count(crtc);
+	work->enable_stall_check = true;
+
+	i915_gem_track_fb(intel_fb_obj(work->old_fb), obj,
+			  to_intel_plane(primary)->frontbuffer_bit);
+	mutex_unlock(&dev->struct_mutex);
+
+	intel_fbc_disable_crtc(intel_crtc);
+	intel_frontbuffer_flip_prepare(dev,
+				       to_intel_plane(primary)->frontbuffer_bit);
+
+	trace_i915_flip_request(intel_crtc->plane, obj);
+
+	return 0;
+
+cleanup_unpin:
+	intel_unpin_fb_obj(fb, crtc->primary->state);
+cleanup_pending:
+	if (request)
+		i915_gem_request_cancel(request);
+	atomic_dec(&intel_crtc->unpin_work_count);
+	mutex_unlock(&dev->struct_mutex);
+cleanup:
+	crtc->primary->fb = old_fb;
+	update_state_fb(crtc->primary);
+
+	drm_gem_object_unreference_unlocked(&obj->base);
+	drm_framebuffer_unreference(work->old_fb);
+
+	spin_lock_irq(&dev->event_lock);
+	intel_crtc->unpin_work = NULL;
+	spin_unlock_irq(&dev->event_lock);
+
+	drm_crtc_vblank_put(crtc);
+free_work:
+	kfree(work);
+
+	if (ret == -EIO) {
+		struct drm_atomic_state *state;
+		struct drm_plane_state *plane_state;
+
+out_hang:
+		state = drm_atomic_state_alloc(dev);
+		if (!state)
+			return -ENOMEM;
+		state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
+
+retry:
+		plane_state = drm_atomic_get_plane_state(state, primary);
+		ret = PTR_ERR_OR_ZERO(plane_state);
+		if (!ret) {
+			drm_atomic_set_fb_for_plane(plane_state, fb);
+
+			ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
+			if (!ret)
+				ret = drm_atomic_commit(state);
+		}
+
+		if (ret == -EDEADLK) {
+			drm_modeset_backoff(state->acquire_ctx);
+			drm_atomic_state_clear(state);
+			goto retry;
+		}
+
+		if (ret)
+			drm_atomic_state_free(state);
+
+		if (ret == 0 && event) {
+			spin_lock_irq(&dev->event_lock);
+			drm_send_vblank_event(dev, pipe, event);
+			spin_unlock_irq(&dev->event_lock);
+		}
+	}
+	return ret;
+}
+
+
+/**
+ * intel_wm_need_update - Check whether watermarks need updating
+ * @plane: drm plane
+ * @state: new plane state
+ *
+ * Check current plane state versus the new one to determine whether
+ * watermarks need to be recalculated.
+ *
+ * Returns true or false.
+ */
+static bool intel_wm_need_update(struct drm_plane *plane,
+				 struct drm_plane_state *state)
+{
+	/* Update watermarks on tiling changes. */
+	if (!plane->state->fb || !state->fb ||
+	    plane->state->fb->modifier[0] != state->fb->modifier[0] ||
+	    plane->state->rotation != state->rotation)
+		return true;
+
+	if (plane->state->crtc_w != state->crtc_w)
+		return true;
+
+	return false;
+}
+
+int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
+				    struct drm_plane_state *plane_state)
+{
+	struct drm_crtc *crtc = crtc_state->crtc;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct drm_plane *plane = plane_state->plane;
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_plane_state *old_plane_state =
+		to_intel_plane_state(plane->state);
+	int idx = intel_crtc->base.base.id, ret;
+	int i = drm_plane_index(plane);
+	bool mode_changed = needs_modeset(crtc_state);
+	bool was_crtc_enabled = crtc->state->active;
+	bool is_crtc_enabled = crtc_state->active;
+
+	bool turn_off, turn_on, visible, was_visible;
+	struct drm_framebuffer *fb = plane_state->fb;
+
+	if (crtc_state && INTEL_INFO(dev)->gen >= 9 &&
+	    plane->type != DRM_PLANE_TYPE_CURSOR) {
+		ret = skl_update_scaler_plane(
+			to_intel_crtc_state(crtc_state),
+			to_intel_plane_state(plane_state));
+		if (ret)
+			return ret;
+	}
+
+	/*
+	 * Disabling a plane is always okay; we just need to update
+	 * fb tracking in a special way since cleanup_fb() won't
+	 * get called by the plane helpers.
+	 */
+	if (old_plane_state->base.fb && !fb)
+		intel_crtc->atomic.disabled_planes |= 1 << i;
+
+	was_visible = old_plane_state->visible;
+	visible = to_intel_plane_state(plane_state)->visible;
+
+	if (!was_crtc_enabled && WARN_ON(was_visible))
+		was_visible = false;
+
+	if (!is_crtc_enabled && WARN_ON(visible))
+		visible = false;
+
+	if (!was_visible && !visible)
+		return 0;
+
+	turn_off = was_visible && (!visible || mode_changed);
+	turn_on = visible && (!was_visible || mode_changed);
+
+	DRM_DEBUG_ATOMIC("[CRTC:%i] has [PLANE:%i] with fb %i\n", idx,
+			 plane->base.id, fb ? fb->base.id : -1);
+
+	DRM_DEBUG_ATOMIC("[PLANE:%i] visible %i -> %i, off %i, on %i, ms %i\n",
+			 plane->base.id, was_visible, visible,
+			 turn_off, turn_on, mode_changed);
+
+	if (turn_on) {
+		intel_crtc->atomic.update_wm_pre = true;
+		/* must disable cxsr around plane enable/disable */
+		if (plane->type != DRM_PLANE_TYPE_CURSOR) {
+			intel_crtc->atomic.disable_cxsr = true;
+			/* to potentially re-enable cxsr */
+			intel_crtc->atomic.wait_vblank = true;
+			intel_crtc->atomic.update_wm_post = true;
+		}
+	} else if (turn_off) {
+		intel_crtc->atomic.update_wm_post = true;
+		/* must disable cxsr around plane enable/disable */
+		if (plane->type != DRM_PLANE_TYPE_CURSOR) {
+			if (is_crtc_enabled)
+				intel_crtc->atomic.wait_vblank = true;
+			intel_crtc->atomic.disable_cxsr = true;
+		}
+	} else if (intel_wm_need_update(plane, plane_state)) {
+		intel_crtc->atomic.update_wm_pre = true;
+	}
+
+	if (visible || was_visible)
+		intel_crtc->atomic.fb_bits |=
+			to_intel_plane(plane)->frontbuffer_bit;
+
+	switch (plane->type) {
+	case DRM_PLANE_TYPE_PRIMARY:
+		intel_crtc->atomic.wait_for_flips = true;
+		intel_crtc->atomic.pre_disable_primary = turn_off;
+		intel_crtc->atomic.post_enable_primary = turn_on;
+
+		if (turn_off) {
+			/*
+			 * FIXME: Actually if we will still have any other
+			 * plane enabled on the pipe we could let IPS enabled
+			 * still, but for now lets consider that when we make
+			 * primary invisible by setting DSPCNTR to 0 on
+			 * update_primary_plane function IPS needs to be
+			 * disable.
+			 */
+			intel_crtc->atomic.disable_ips = true;
+
+			intel_crtc->atomic.disable_fbc = true;
+		}
+
+		/*
+		 * FBC does not work on some platforms for rotated
+		 * planes, so disable it when rotation is not 0 and
+		 * update it when rotation is set back to 0.
+		 *
+		 * FIXME: This is redundant with the fbc update done in
+		 * the primary plane enable function except that that
+		 * one is done too late. We eventually need to unify
+		 * this.
+		 */
+
+		if (visible &&
+		    INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) &&
+		    dev_priv->fbc.crtc == intel_crtc &&
+		    plane_state->rotation != BIT(DRM_ROTATE_0))
+			intel_crtc->atomic.disable_fbc = true;
+
+		/*
+		 * BDW signals flip done immediately if the plane
+		 * is disabled, even if the plane enable is already
+		 * armed to occur at the next vblank :(
+		 */
+		if (turn_on && IS_BROADWELL(dev))
+			intel_crtc->atomic.wait_vblank = true;
+
+		intel_crtc->atomic.update_fbc |= visible || mode_changed;
+		break;
+	case DRM_PLANE_TYPE_CURSOR:
+		break;
+	case DRM_PLANE_TYPE_OVERLAY:
+		if (turn_off && !mode_changed) {
+			intel_crtc->atomic.wait_vblank = true;
+			intel_crtc->atomic.update_sprite_watermarks |=
+				1 << i;
+		}
+	}
+	return 0;
+}
+
+static bool encoders_cloneable(const struct intel_encoder *a,
+			       const struct intel_encoder *b)
+{
+	/* masks could be asymmetric, so check both ways */
+	return a == b || (a->cloneable & (1 << b->type) &&
+			  b->cloneable & (1 << a->type));
+}
+
+static bool check_single_encoder_cloning(struct drm_atomic_state *state,
+					 struct intel_crtc *crtc,
+					 struct intel_encoder *encoder)
+{
+	struct intel_encoder *source_encoder;
+	struct drm_connector *connector;
+	struct drm_connector_state *connector_state;
+	int i;
+
+	for_each_connector_in_state(state, connector, connector_state, i) {
+		if (connector_state->crtc != &crtc->base)
+			continue;
+
+		source_encoder =
+			to_intel_encoder(connector_state->best_encoder);
+		if (!encoders_cloneable(encoder, source_encoder))
+			return false;
+	}
+
+	return true;
+}
+
+static bool check_encoder_cloning(struct drm_atomic_state *state,
+				  struct intel_crtc *crtc)
+{
+	struct intel_encoder *encoder;
+	struct drm_connector *connector;
+	struct drm_connector_state *connector_state;
+	int i;
+
+	for_each_connector_in_state(state, connector, connector_state, i) {
+		if (connector_state->crtc != &crtc->base)
+			continue;
+
+		encoder = to_intel_encoder(connector_state->best_encoder);
+		if (!check_single_encoder_cloning(state, crtc, encoder))
+			return false;
+	}
+
+	return true;
+}
+
+static int intel_crtc_atomic_check(struct drm_crtc *crtc,
+				   struct drm_crtc_state *crtc_state)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct intel_crtc_state *pipe_config =
+		to_intel_crtc_state(crtc_state);
+	struct drm_atomic_state *state = crtc_state->state;
+	int ret;
+	bool mode_changed = needs_modeset(crtc_state);
+
+	if (mode_changed && !check_encoder_cloning(state, intel_crtc)) {
+		DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
+		return -EINVAL;
+	}
+
+	if (mode_changed && !crtc_state->active)
+		intel_crtc->atomic.update_wm_post = true;
+
+	if (mode_changed && crtc_state->enable &&
+	    dev_priv->display.crtc_compute_clock &&
+	    !WARN_ON(pipe_config->shared_dpll != DPLL_ID_PRIVATE)) {
+		ret = dev_priv->display.crtc_compute_clock(intel_crtc,
+							   pipe_config);
+		if (ret)
+			return ret;
+	}
+
+	ret = 0;
+	if (INTEL_INFO(dev)->gen >= 9) {
+		if (mode_changed)
+			ret = skl_update_scaler_crtc(pipe_config);
+
+		if (!ret)
+			ret = intel_atomic_setup_scalers(dev, intel_crtc,
+							 pipe_config);
+	}
+
+	return ret;
+}
+
+static const struct drm_crtc_helper_funcs intel_helper_funcs = {
+	.mode_set_base_atomic = intel_pipe_set_base_atomic,
+	.load_lut = intel_crtc_load_lut,
+	.atomic_begin = intel_begin_crtc_commit,
+	.atomic_flush = intel_finish_crtc_commit,
+	.atomic_check = intel_crtc_atomic_check,
+};
+
+static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
+{
+	struct intel_connector *connector;
+
+	for_each_intel_connector(dev, connector) {
+		if (connector->base.encoder) {
+			connector->base.state->best_encoder =
+				connector->base.encoder;
+			connector->base.state->crtc =
+				connector->base.encoder->crtc;
+		} else {
+			connector->base.state->best_encoder = NULL;
+			connector->base.state->crtc = NULL;
+		}
+	}
+}
+
+static void
+connected_sink_compute_bpp(struct intel_connector *connector,
+			   struct intel_crtc_state *pipe_config)
+{
+	int bpp = pipe_config->pipe_bpp;
+
+	DRM_DEBUG_KMS("[CONNECTOR:%d:%s] checking for sink bpp constrains\n",
+		connector->base.base.id,
+		connector->base.name);
+
+	/* Don't use an invalid EDID bpc value */
+	if (connector->base.display_info.bpc &&
+	    connector->base.display_info.bpc * 3 < bpp) {
+		DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n",
+			      bpp, connector->base.display_info.bpc*3);
+		pipe_config->pipe_bpp = connector->base.display_info.bpc*3;
+	}
+
+	/* Clamp bpp to 8 on screens without EDID 1.4 */
+	if (connector->base.display_info.bpc == 0 && bpp > 24) {
+		DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n",
+			      bpp);
+		pipe_config->pipe_bpp = 24;
+	}
+}
+
+static int
+compute_baseline_pipe_bpp(struct intel_crtc *crtc,
+			  struct intel_crtc_state *pipe_config)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_atomic_state *state;
+	struct drm_connector *connector;
+	struct drm_connector_state *connector_state;
+	int bpp, i;
+
+	if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)))
+		bpp = 10*3;
+	else if (INTEL_INFO(dev)->gen >= 5)
+		bpp = 12*3;
+	else
+		bpp = 8*3;
+
+
+	pipe_config->pipe_bpp = bpp;
+
+	state = pipe_config->base.state;
+
+	/* Clamp display bpp to EDID value */
+	for_each_connector_in_state(state, connector, connector_state, i) {
+		if (connector_state->crtc != &crtc->base)
+			continue;
+
+		connected_sink_compute_bpp(to_intel_connector(connector),
+					   pipe_config);
+	}
+
+	return bpp;
+}
+
+static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
+{
+	DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
+			"type: 0x%x flags: 0x%x\n",
+		mode->crtc_clock,
+		mode->crtc_hdisplay, mode->crtc_hsync_start,
+		mode->crtc_hsync_end, mode->crtc_htotal,
+		mode->crtc_vdisplay, mode->crtc_vsync_start,
+		mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags);
+}
+
+static void intel_dump_pipe_config(struct intel_crtc *crtc,
+				   struct intel_crtc_state *pipe_config,
+				   const char *context)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_plane *plane;
+	struct intel_plane *intel_plane;
+	struct intel_plane_state *state;
+	struct drm_framebuffer *fb;
+
+	DRM_DEBUG_KMS("[CRTC:%d]%s config %p for pipe %c\n", crtc->base.base.id,
+		      context, pipe_config, pipe_name(crtc->pipe));
+
+	DRM_DEBUG_KMS("cpu_transcoder: %c\n", transcoder_name(pipe_config->cpu_transcoder));
+	DRM_DEBUG_KMS("pipe bpp: %i, dithering: %i\n",
+		      pipe_config->pipe_bpp, pipe_config->dither);
+	DRM_DEBUG_KMS("fdi/pch: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
+		      pipe_config->has_pch_encoder,
+		      pipe_config->fdi_lanes,
+		      pipe_config->fdi_m_n.gmch_m, pipe_config->fdi_m_n.gmch_n,
+		      pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n,
+		      pipe_config->fdi_m_n.tu);
+	DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
+		      pipe_config->has_dp_encoder,
+		      pipe_config->lane_count,
+		      pipe_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_n,
+		      pipe_config->dp_m_n.link_m, pipe_config->dp_m_n.link_n,
+		      pipe_config->dp_m_n.tu);
+
+	DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m2: %u, gmch_n2: %u, link_m2: %u, link_n2: %u, tu2: %u\n",
+		      pipe_config->has_dp_encoder,
+		      pipe_config->lane_count,
+		      pipe_config->dp_m2_n2.gmch_m,
+		      pipe_config->dp_m2_n2.gmch_n,
+		      pipe_config->dp_m2_n2.link_m,
+		      pipe_config->dp_m2_n2.link_n,
+		      pipe_config->dp_m2_n2.tu);
+
+	DRM_DEBUG_KMS("audio: %i, infoframes: %i\n",
+		      pipe_config->has_audio,
+		      pipe_config->has_infoframe);
+
+	DRM_DEBUG_KMS("requested mode:\n");
+	drm_mode_debug_printmodeline(&pipe_config->base.mode);
+	DRM_DEBUG_KMS("adjusted mode:\n");
+	drm_mode_debug_printmodeline(&pipe_config->base.adjusted_mode);
+	intel_dump_crtc_timings(&pipe_config->base.adjusted_mode);
+	DRM_DEBUG_KMS("port clock: %d\n", pipe_config->port_clock);
+	DRM_DEBUG_KMS("pipe src size: %dx%d\n",
+		      pipe_config->pipe_src_w, pipe_config->pipe_src_h);
+	DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
+		      crtc->num_scalers,
+		      pipe_config->scaler_state.scaler_users,
+		      pipe_config->scaler_state.scaler_id);
+	DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
+		      pipe_config->gmch_pfit.control,
+		      pipe_config->gmch_pfit.pgm_ratios,
+		      pipe_config->gmch_pfit.lvds_border_bits);
+	DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n",
+		      pipe_config->pch_pfit.pos,
+		      pipe_config->pch_pfit.size,
+		      pipe_config->pch_pfit.enabled ? "enabled" : "disabled");
+	DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled);
+	DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide);
+
+	if (IS_BROXTON(dev)) {
+		DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
+			      "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
+			      "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
+			      pipe_config->ddi_pll_sel,
+			      pipe_config->dpll_hw_state.ebb0,
+			      pipe_config->dpll_hw_state.ebb4,
+			      pipe_config->dpll_hw_state.pll0,
+			      pipe_config->dpll_hw_state.pll1,
+			      pipe_config->dpll_hw_state.pll2,
+			      pipe_config->dpll_hw_state.pll3,
+			      pipe_config->dpll_hw_state.pll6,
+			      pipe_config->dpll_hw_state.pll8,
+			      pipe_config->dpll_hw_state.pll9,
+			      pipe_config->dpll_hw_state.pll10,
+			      pipe_config->dpll_hw_state.pcsdw12);
+	} else if (IS_SKYLAKE(dev)) {
+		DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: "
+			      "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
+			      pipe_config->ddi_pll_sel,
+			      pipe_config->dpll_hw_state.ctrl1,
+			      pipe_config->dpll_hw_state.cfgcr1,
+			      pipe_config->dpll_hw_state.cfgcr2);
+	} else if (HAS_DDI(dev)) {
+		DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
+			      pipe_config->ddi_pll_sel,
+			      pipe_config->dpll_hw_state.wrpll,
+			      pipe_config->dpll_hw_state.spll);
+	} else {
+		DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
+			      "fp0: 0x%x, fp1: 0x%x\n",
+			      pipe_config->dpll_hw_state.dpll,
+			      pipe_config->dpll_hw_state.dpll_md,
+			      pipe_config->dpll_hw_state.fp0,
+			      pipe_config->dpll_hw_state.fp1);
+	}
+
+	DRM_DEBUG_KMS("planes on this crtc\n");
+	list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
+		intel_plane = to_intel_plane(plane);
+		if (intel_plane->pipe != crtc->pipe)
+			continue;
+
+		state = to_intel_plane_state(plane->state);
+		fb = state->base.fb;
+		if (!fb) {
+			DRM_DEBUG_KMS("%s PLANE:%d plane: %u.%u idx: %d "
+				"disabled, scaler_id = %d\n",
+				plane->type == DRM_PLANE_TYPE_CURSOR ? "CURSOR" : "STANDARD",
+				plane->base.id, intel_plane->pipe,
+				(crtc->base.primary == plane) ? 0 : intel_plane->plane + 1,
+				drm_plane_index(plane), state->scaler_id);
+			continue;
+		}
+
+		DRM_DEBUG_KMS("%s PLANE:%d plane: %u.%u idx: %d enabled",
+			plane->type == DRM_PLANE_TYPE_CURSOR ? "CURSOR" : "STANDARD",
+			plane->base.id, intel_plane->pipe,
+			crtc->base.primary == plane ? 0 : intel_plane->plane + 1,
+			drm_plane_index(plane));
+		DRM_DEBUG_KMS("\tFB:%d, fb = %ux%u format = 0x%x",
+			fb->base.id, fb->width, fb->height, fb->pixel_format);
+		DRM_DEBUG_KMS("\tscaler:%d src (%u, %u) %ux%u dst (%u, %u) %ux%u\n",
+			state->scaler_id,
+			state->src.x1 >> 16, state->src.y1 >> 16,
+			drm_rect_width(&state->src) >> 16,
+			drm_rect_height(&state->src) >> 16,
+			state->dst.x1, state->dst.y1,
+			drm_rect_width(&state->dst), drm_rect_height(&state->dst));
+	}
+}
+
+static bool check_digital_port_conflicts(struct drm_atomic_state *state)
+{
+	struct drm_device *dev = state->dev;
+	struct drm_connector *connector;
+	unsigned int used_ports = 0;
+
+	/*
+	 * Walk the connector list instead of the encoder
+	 * list to detect the problem on ddi platforms
+	 * where there's just one encoder per digital port.
+	 */
+	drm_for_each_connector(connector, dev) {
+		struct drm_connector_state *connector_state;
+		struct intel_encoder *encoder;
+
+		connector_state = drm_atomic_get_existing_connector_state(state, connector);
+		if (!connector_state)
+			connector_state = connector->state;
+
+		if (!connector_state->best_encoder)
+			continue;
+
+		encoder = to_intel_encoder(connector_state->best_encoder);
+
+		WARN_ON(!connector_state->crtc);
+
+		switch (encoder->type) {
+			unsigned int port_mask;
+		case INTEL_OUTPUT_UNKNOWN:
+			if (WARN_ON(!HAS_DDI(dev)))
+				break;
+		case INTEL_OUTPUT_DISPLAYPORT:
+		case INTEL_OUTPUT_HDMI:
+		case INTEL_OUTPUT_EDP:
+			port_mask = 1 << enc_to_dig_port(&encoder->base)->port;
+
+			/* the same port mustn't appear more than once */
+			if (used_ports & port_mask)
+				return false;
+
+			used_ports |= port_mask;
+		default:
+			break;
+		}
+	}
+
+	return true;
+}
+
+static void
+clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
+{
+	struct drm_crtc_state tmp_state;
+	struct intel_crtc_scaler_state scaler_state;
+	struct intel_dpll_hw_state dpll_hw_state;
+	enum intel_dpll_id shared_dpll;
+	uint32_t ddi_pll_sel;
+	bool force_thru;
+
+	/* FIXME: before the switch to atomic started, a new pipe_config was
+	 * kzalloc'd. Code that depends on any field being zero should be
+	 * fixed, so that the crtc_state can be safely duplicated. For now,
+	 * only fields that are know to not cause problems are preserved. */
+
+	tmp_state = crtc_state->base;
+	scaler_state = crtc_state->scaler_state;
+	shared_dpll = crtc_state->shared_dpll;
+	dpll_hw_state = crtc_state->dpll_hw_state;
+	ddi_pll_sel = crtc_state->ddi_pll_sel;
+	force_thru = crtc_state->pch_pfit.force_thru;
+
+	memset(crtc_state, 0, sizeof *crtc_state);
+
+	crtc_state->base = tmp_state;
+	crtc_state->scaler_state = scaler_state;
+	crtc_state->shared_dpll = shared_dpll;
+	crtc_state->dpll_hw_state = dpll_hw_state;
+	crtc_state->ddi_pll_sel = ddi_pll_sel;
+	crtc_state->pch_pfit.force_thru = force_thru;
+}
+
+static int
+intel_modeset_pipe_config(struct drm_crtc *crtc,
+			  struct intel_crtc_state *pipe_config)
+{
+	struct drm_atomic_state *state = pipe_config->base.state;
+	struct intel_encoder *encoder;
+	struct drm_connector *connector;
+	struct drm_connector_state *connector_state;
+	int base_bpp, ret = -EINVAL;
+	int i;
+	bool retry = true;
+
+	clear_intel_crtc_state(pipe_config);
+
+	pipe_config->cpu_transcoder =
+		(enum transcoder) to_intel_crtc(crtc)->pipe;
+
+	/*
+	 * Sanitize sync polarity flags based on requested ones. If neither
+	 * positive or negative polarity is requested, treat this as meaning
+	 * negative polarity.
+	 */
+	if (!(pipe_config->base.adjusted_mode.flags &
+	      (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
+		pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
+
+	if (!(pipe_config->base.adjusted_mode.flags &
+	      (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
+		pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
+
+	base_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
+					     pipe_config);
+	if (base_bpp < 0)
+		goto fail;
+
+	/*
+	 * Determine the real pipe dimensions. Note that stereo modes can
+	 * increase the actual pipe size due to the frame doubling and
+	 * insertion of additional space for blanks between the frame. This
+	 * is stored in the crtc timings. We use the requested mode to do this
+	 * computation to clearly distinguish it from the adjusted mode, which
+	 * can be changed by the connectors in the below retry loop.
+	 */
+	drm_crtc_get_hv_timing(&pipe_config->base.mode,
+			       &pipe_config->pipe_src_w,
+			       &pipe_config->pipe_src_h);
+
+encoder_retry:
+	/* Ensure the port clock defaults are reset when retrying. */
+	pipe_config->port_clock = 0;
+	pipe_config->pixel_multiplier = 1;
+
+	/* Fill in default crtc timings, allow encoders to overwrite them. */
+	drm_mode_set_crtcinfo(&pipe_config->base.adjusted_mode,
+			      CRTC_STEREO_DOUBLE);
+
+	/* Pass our mode to the connectors and the CRTC to give them a chance to
+	 * adjust it according to limitations or connector properties, and also
+	 * a chance to reject the mode entirely.
+	 */
+	for_each_connector_in_state(state, connector, connector_state, i) {
+		if (connector_state->crtc != crtc)
+			continue;
+
+		encoder = to_intel_encoder(connector_state->best_encoder);
+
+		if (!(encoder->compute_config(encoder, pipe_config))) {
+			DRM_DEBUG_KMS("Encoder config failure\n");
+			goto fail;
+		}
+	}
+
+	/* Set default port clock if not overwritten by the encoder. Needs to be
+	 * done afterwards in case the encoder adjusts the mode. */
+	if (!pipe_config->port_clock)
+		pipe_config->port_clock = pipe_config->base.adjusted_mode.crtc_clock
+			* pipe_config->pixel_multiplier;
+
+	ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
+	if (ret < 0) {
+		DRM_DEBUG_KMS("CRTC fixup failed\n");
+		goto fail;
+	}
+
+	if (ret == RETRY) {
+		if (WARN(!retry, "loop in pipe configuration computation\n")) {
+			ret = -EINVAL;
+			goto fail;
+		}
+
+		DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
+		retry = false;
+		goto encoder_retry;
+	}
+
+	/* Dithering seems to not pass-through bits correctly when it should, so
+	 * only enable it on 6bpc panels. */
+	pipe_config->dither = pipe_config->pipe_bpp == 6*3;
+	DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
+		      base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
+
+fail:
+	return ret;
+}
+
+static void
+intel_modeset_update_crtc_state(struct drm_atomic_state *state)
+{
+	struct drm_crtc *crtc;
+	struct drm_crtc_state *crtc_state;
+	int i;
+
+	/* Double check state. */
+	for_each_crtc_in_state(state, crtc, crtc_state, i) {
+		to_intel_crtc(crtc)->config = to_intel_crtc_state(crtc->state);
+
+		/* Update hwmode for vblank functions */
+		if (crtc->state->active)
+			crtc->hwmode = crtc->state->adjusted_mode;
+		else
+			crtc->hwmode.crtc_clock = 0;
+	}
+}
+
+static bool intel_fuzzy_clock_check(int clock1, int clock2)
+{
+	int diff;
+
+	if (clock1 == clock2)
+		return true;
+
+	if (!clock1 || !clock2)
+		return false;
+
+	diff = abs(clock1 - clock2);
+
+	if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
+		return true;
+
+	return false;
+}
+
+#define for_each_intel_crtc_masked(dev, mask, intel_crtc) \
+	list_for_each_entry((intel_crtc), \
+			    &(dev)->mode_config.crtc_list, \
+			    base.head) \
+		if (mask & (1 <<(intel_crtc)->pipe))
+
+static bool
+intel_compare_m_n(unsigned int m, unsigned int n,
+		  unsigned int m2, unsigned int n2,
+		  bool exact)
+{
+	if (m == m2 && n == n2)
+		return true;
+
+	if (exact || !m || !n || !m2 || !n2)
+		return false;
+
+	BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
+
+	if (m > m2) {
+		while (m > m2) {
+			m2 <<= 1;
+			n2 <<= 1;
+		}
+	} else if (m < m2) {
+		while (m < m2) {
+			m <<= 1;
+			n <<= 1;
+		}
+	}
+
+	return m == m2 && n == n2;
+}
+
+static bool
+intel_compare_link_m_n(const struct intel_link_m_n *m_n,
+		       struct intel_link_m_n *m2_n2,
+		       bool adjust)
+{
+	if (m_n->tu == m2_n2->tu &&
+	    intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
+			      m2_n2->gmch_m, m2_n2->gmch_n, !adjust) &&
+	    intel_compare_m_n(m_n->link_m, m_n->link_n,
+			      m2_n2->link_m, m2_n2->link_n, !adjust)) {
+		if (adjust)
+			*m2_n2 = *m_n;
+
+		return true;
+	}
+
+	return false;
+}
+
+static bool
+intel_pipe_config_compare(struct drm_device *dev,
+			  struct intel_crtc_state *current_config,
+			  struct intel_crtc_state *pipe_config,
+			  bool adjust)
+{
+	bool ret = true;
+
+#define INTEL_ERR_OR_DBG_KMS(fmt, ...) \
+	do { \
+		if (!adjust) \
+			DRM_ERROR(fmt, ##__VA_ARGS__); \
+		else \
+			DRM_DEBUG_KMS(fmt, ##__VA_ARGS__); \
+	} while (0)
+
+#define PIPE_CONF_CHECK_X(name)	\
+	if (current_config->name != pipe_config->name) { \
+		INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
+			  "(expected 0x%08x, found 0x%08x)\n", \
+			  current_config->name, \
+			  pipe_config->name); \
+		ret = false; \
+	}
+
+#define PIPE_CONF_CHECK_I(name)	\
+	if (current_config->name != pipe_config->name) { \
+		INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
+			  "(expected %i, found %i)\n", \
+			  current_config->name, \
+			  pipe_config->name); \
+		ret = false; \
+	}
+
+#define PIPE_CONF_CHECK_M_N(name) \
+	if (!intel_compare_link_m_n(&current_config->name, \
+				    &pipe_config->name,\
+				    adjust)) { \
+		INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
+			  "(expected tu %i gmch %i/%i link %i/%i, " \
+			  "found tu %i, gmch %i/%i link %i/%i)\n", \
+			  current_config->name.tu, \
+			  current_config->name.gmch_m, \
+			  current_config->name.gmch_n, \
+			  current_config->name.link_m, \
+			  current_config->name.link_n, \
+			  pipe_config->name.tu, \
+			  pipe_config->name.gmch_m, \
+			  pipe_config->name.gmch_n, \
+			  pipe_config->name.link_m, \
+			  pipe_config->name.link_n); \
+		ret = false; \
+	}
+
+#define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) \
+	if (!intel_compare_link_m_n(&current_config->name, \
+				    &pipe_config->name, adjust) && \
+	    !intel_compare_link_m_n(&current_config->alt_name, \
+				    &pipe_config->name, adjust)) { \
+		INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
+			  "(expected tu %i gmch %i/%i link %i/%i, " \
+			  "or tu %i gmch %i/%i link %i/%i, " \
+			  "found tu %i, gmch %i/%i link %i/%i)\n", \
+			  current_config->name.tu, \
+			  current_config->name.gmch_m, \
+			  current_config->name.gmch_n, \
+			  current_config->name.link_m, \
+			  current_config->name.link_n, \
+			  current_config->alt_name.tu, \
+			  current_config->alt_name.gmch_m, \
+			  current_config->alt_name.gmch_n, \
+			  current_config->alt_name.link_m, \
+			  current_config->alt_name.link_n, \
+			  pipe_config->name.tu, \
+			  pipe_config->name.gmch_m, \
+			  pipe_config->name.gmch_n, \
+			  pipe_config->name.link_m, \
+			  pipe_config->name.link_n); \
+		ret = false; \
+	}
+
+/* This is required for BDW+ where there is only one set of registers for
+ * switching between high and low RR.
+ * This macro can be used whenever a comparison has to be made between one
+ * hw state and multiple sw state variables.
+ */
+#define PIPE_CONF_CHECK_I_ALT(name, alt_name) \
+	if ((current_config->name != pipe_config->name) && \
+		(current_config->alt_name != pipe_config->name)) { \
+			INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
+				  "(expected %i or %i, found %i)\n", \
+				  current_config->name, \
+				  current_config->alt_name, \
+				  pipe_config->name); \
+			ret = false; \
+	}
+
+#define PIPE_CONF_CHECK_FLAGS(name, mask)	\
+	if ((current_config->name ^ pipe_config->name) & (mask)) { \
+		INTEL_ERR_OR_DBG_KMS("mismatch in " #name "(" #mask ") " \
+			  "(expected %i, found %i)\n", \
+			  current_config->name & (mask), \
+			  pipe_config->name & (mask)); \
+		ret = false; \
+	}
+
+#define PIPE_CONF_CHECK_CLOCK_FUZZY(name) \
+	if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
+		INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
+			  "(expected %i, found %i)\n", \
+			  current_config->name, \
+			  pipe_config->name); \
+		ret = false; \
+	}
+
+#define PIPE_CONF_QUIRK(quirk)	\
+	((current_config->quirks | pipe_config->quirks) & (quirk))
+
+	PIPE_CONF_CHECK_I(cpu_transcoder);
+
+	PIPE_CONF_CHECK_I(has_pch_encoder);
+	PIPE_CONF_CHECK_I(fdi_lanes);
+	PIPE_CONF_CHECK_M_N(fdi_m_n);
+
+	PIPE_CONF_CHECK_I(has_dp_encoder);
+	PIPE_CONF_CHECK_I(lane_count);
+
+	if (INTEL_INFO(dev)->gen < 8) {
+		PIPE_CONF_CHECK_M_N(dp_m_n);
+
+		if (current_config->has_drrs)
+			PIPE_CONF_CHECK_M_N(dp_m2_n2);
+	} else
+		PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
+
+	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay);
+	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal);
+	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_start);
+	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_end);
+	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_start);
+	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_end);
+
+	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vdisplay);
+	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vtotal);
+	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_start);
+	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_end);
+	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_start);
+	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end);
+
+	PIPE_CONF_CHECK_I(pixel_multiplier);
+	PIPE_CONF_CHECK_I(has_hdmi_sink);
+	if ((INTEL_INFO(dev)->gen < 8 && !IS_HASWELL(dev)) ||
+	    IS_VALLEYVIEW(dev))
+		PIPE_CONF_CHECK_I(limited_color_range);
+	PIPE_CONF_CHECK_I(has_infoframe);
+
+	PIPE_CONF_CHECK_I(has_audio);
+
+	PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
+			      DRM_MODE_FLAG_INTERLACE);
+
+	if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
+		PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
+				      DRM_MODE_FLAG_PHSYNC);
+		PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
+				      DRM_MODE_FLAG_NHSYNC);
+		PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
+				      DRM_MODE_FLAG_PVSYNC);
+		PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
+				      DRM_MODE_FLAG_NVSYNC);
+	}
+
+	PIPE_CONF_CHECK_X(gmch_pfit.control);
+	/* pfit ratios are autocomputed by the hw on gen4+ */
+	if (INTEL_INFO(dev)->gen < 4)
+		PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios);
+	PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
+
+	if (!adjust) {
+		PIPE_CONF_CHECK_I(pipe_src_w);
+		PIPE_CONF_CHECK_I(pipe_src_h);
+
+		PIPE_CONF_CHECK_I(pch_pfit.enabled);
+		if (current_config->pch_pfit.enabled) {
+			PIPE_CONF_CHECK_X(pch_pfit.pos);
+			PIPE_CONF_CHECK_X(pch_pfit.size);
+		}
+
+		PIPE_CONF_CHECK_I(scaler_state.scaler_id);
+	}
+
+	/* BDW+ don't expose a synchronous way to read the state */
+	if (IS_HASWELL(dev))
+		PIPE_CONF_CHECK_I(ips_enabled);
+
+	PIPE_CONF_CHECK_I(double_wide);
+
+	PIPE_CONF_CHECK_X(ddi_pll_sel);
+
+	PIPE_CONF_CHECK_I(shared_dpll);
+	PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
+	PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
+	PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
+	PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
+	PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
+	PIPE_CONF_CHECK_X(dpll_hw_state.spll);
+	PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
+	PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
+	PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
+
+	if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
+		PIPE_CONF_CHECK_I(pipe_bpp);
+
+	PIPE_CONF_CHECK_CLOCK_FUZZY(base.adjusted_mode.crtc_clock);
+	PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
+
+#undef PIPE_CONF_CHECK_X
+#undef PIPE_CONF_CHECK_I
+#undef PIPE_CONF_CHECK_I_ALT
+#undef PIPE_CONF_CHECK_FLAGS
+#undef PIPE_CONF_CHECK_CLOCK_FUZZY
+#undef PIPE_CONF_QUIRK
+#undef INTEL_ERR_OR_DBG_KMS
+
+	return ret;
+}
+
+static void check_wm_state(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct skl_ddb_allocation hw_ddb, *sw_ddb;
+	struct intel_crtc *intel_crtc;
+	int plane;
+
+	if (INTEL_INFO(dev)->gen < 9)
+		return;
+
+	skl_ddb_get_hw_state(dev_priv, &hw_ddb);
+	sw_ddb = &dev_priv->wm.skl_hw.ddb;
+
+	for_each_intel_crtc(dev, intel_crtc) {
+		struct skl_ddb_entry *hw_entry, *sw_entry;
+		const enum pipe pipe = intel_crtc->pipe;
+
+		if (!intel_crtc->active)
+			continue;
+
+		/* planes */
+		for_each_plane(dev_priv, pipe, plane) {
+			hw_entry = &hw_ddb.plane[pipe][plane];
+			sw_entry = &sw_ddb->plane[pipe][plane];
+
+			if (skl_ddb_entry_equal(hw_entry, sw_entry))
+				continue;
+
+			DRM_ERROR("mismatch in DDB state pipe %c plane %d "
+				  "(expected (%u,%u), found (%u,%u))\n",
+				  pipe_name(pipe), plane + 1,
+				  sw_entry->start, sw_entry->end,
+				  hw_entry->start, hw_entry->end);
+		}
+
+		/* cursor */
+		hw_entry = &hw_ddb.plane[pipe][PLANE_CURSOR];
+		sw_entry = &sw_ddb->plane[pipe][PLANE_CURSOR];
+
+		if (skl_ddb_entry_equal(hw_entry, sw_entry))
+			continue;
+
+		DRM_ERROR("mismatch in DDB state pipe %c cursor "
+			  "(expected (%u,%u), found (%u,%u))\n",
+			  pipe_name(pipe),
+			  sw_entry->start, sw_entry->end,
+			  hw_entry->start, hw_entry->end);
+	}
+}
+
+static void
+check_connector_state(struct drm_device *dev,
+		      struct drm_atomic_state *old_state)
+{
+	struct drm_connector_state *old_conn_state;
+	struct drm_connector *connector;
+	int i;
+
+	for_each_connector_in_state(old_state, connector, old_conn_state, i) {
+		struct drm_encoder *encoder = connector->encoder;
+		struct drm_connector_state *state = connector->state;
+
+		/* This also checks the encoder/connector hw state with the
+		 * ->get_hw_state callbacks. */
+		intel_connector_check_state(to_intel_connector(connector));
+
+		I915_STATE_WARN(state->best_encoder != encoder,
+		     "connector's atomic encoder doesn't match legacy encoder\n");
+	}
+}
+
+static void
+check_encoder_state(struct drm_device *dev)
+{
+	struct intel_encoder *encoder;
+	struct intel_connector *connector;
+
+	for_each_intel_encoder(dev, encoder) {
+		bool enabled = false;
+		enum pipe pipe;
+
+		DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
+			      encoder->base.base.id,
+			      encoder->base.name);
+
+		for_each_intel_connector(dev, connector) {
+			if (connector->base.state->best_encoder != &encoder->base)
+				continue;
+			enabled = true;
+
+			I915_STATE_WARN(connector->base.state->crtc !=
+					encoder->base.crtc,
+			     "connector's crtc doesn't match encoder crtc\n");
+		}
+
+		I915_STATE_WARN(!!encoder->base.crtc != enabled,
+		     "encoder's enabled state mismatch "
+		     "(expected %i, found %i)\n",
+		     !!encoder->base.crtc, enabled);
+
+		if (!encoder->base.crtc) {
+			bool active;
+
+			active = encoder->get_hw_state(encoder, &pipe);
+			I915_STATE_WARN(active,
+			     "encoder detached but still enabled on pipe %c.\n",
+			     pipe_name(pipe));
+		}
+	}
+}
+
+static void
+check_crtc_state(struct drm_device *dev, struct drm_atomic_state *old_state)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_encoder *encoder;
+	struct drm_crtc_state *old_crtc_state;
+	struct drm_crtc *crtc;
+	int i;
+
+	for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
+		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+		struct intel_crtc_state *pipe_config, *sw_config;
+		bool active;
+
+		if (!needs_modeset(crtc->state) &&
+		    !to_intel_crtc_state(crtc->state)->update_pipe)
+			continue;
+
+		__drm_atomic_helper_crtc_destroy_state(crtc, old_crtc_state);
+		pipe_config = to_intel_crtc_state(old_crtc_state);
+		memset(pipe_config, 0, sizeof(*pipe_config));
+		pipe_config->base.crtc = crtc;
+		pipe_config->base.state = old_state;
+
+		DRM_DEBUG_KMS("[CRTC:%d]\n",
+			      crtc->base.id);
+
+		active = dev_priv->display.get_pipe_config(intel_crtc,
+							   pipe_config);
+
+		/* hw state is inconsistent with the pipe quirk */
+		if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
+		    (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
+			active = crtc->state->active;
+
+		I915_STATE_WARN(crtc->state->active != active,
+		     "crtc active state doesn't match with hw state "
+		     "(expected %i, found %i)\n", crtc->state->active, active);
+
+		I915_STATE_WARN(intel_crtc->active != crtc->state->active,
+		     "transitional active state does not match atomic hw state "
+		     "(expected %i, found %i)\n", crtc->state->active, intel_crtc->active);
+
+		for_each_encoder_on_crtc(dev, crtc, encoder) {
+			enum pipe pipe;
+
+			active = encoder->get_hw_state(encoder, &pipe);
+			I915_STATE_WARN(active != crtc->state->active,
+				"[ENCODER:%i] active %i with crtc active %i\n",
+				encoder->base.base.id, active, crtc->state->active);
+
+			I915_STATE_WARN(active && intel_crtc->pipe != pipe,
+					"Encoder connected to wrong pipe %c\n",
+					pipe_name(pipe));
+
+			if (active)
+				encoder->get_config(encoder, pipe_config);
+		}
+
+		if (!crtc->state->active)
+			continue;
+
+		sw_config = to_intel_crtc_state(crtc->state);
+		if (!intel_pipe_config_compare(dev, sw_config,
+					       pipe_config, false)) {
+			I915_STATE_WARN(1, "pipe state doesn't match!\n");
+			intel_dump_pipe_config(intel_crtc, pipe_config,
+					       "[hw state]");
+			intel_dump_pipe_config(intel_crtc, sw_config,
+					       "[sw state]");
+		}
+	}
+}
+
+static void
+check_shared_dpll_state(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *crtc;
+	struct intel_dpll_hw_state dpll_hw_state;
+	int i;
+
+	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
+		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
+		int enabled_crtcs = 0, active_crtcs = 0;
+		bool active;
+
+		memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
+
+		DRM_DEBUG_KMS("%s\n", pll->name);
+
+		active = pll->get_hw_state(dev_priv, pll, &dpll_hw_state);
+
+		I915_STATE_WARN(pll->active > hweight32(pll->config.crtc_mask),
+		     "more active pll users than references: %i vs %i\n",
+		     pll->active, hweight32(pll->config.crtc_mask));
+		I915_STATE_WARN(pll->active && !pll->on,
+		     "pll in active use but not on in sw tracking\n");
+		I915_STATE_WARN(pll->on && !pll->active,
+		     "pll in on but not on in use in sw tracking\n");
+		I915_STATE_WARN(pll->on != active,
+		     "pll on state mismatch (expected %i, found %i)\n",
+		     pll->on, active);
+
+		for_each_intel_crtc(dev, crtc) {
+			if (crtc->base.state->enable && intel_crtc_to_shared_dpll(crtc) == pll)
+				enabled_crtcs++;
+			if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll)
+				active_crtcs++;
+		}
+		I915_STATE_WARN(pll->active != active_crtcs,
+		     "pll active crtcs mismatch (expected %i, found %i)\n",
+		     pll->active, active_crtcs);
+		I915_STATE_WARN(hweight32(pll->config.crtc_mask) != enabled_crtcs,
+		     "pll enabled crtcs mismatch (expected %i, found %i)\n",
+		     hweight32(pll->config.crtc_mask), enabled_crtcs);
+
+		I915_STATE_WARN(pll->on && memcmp(&pll->config.hw_state, &dpll_hw_state,
+				       sizeof(dpll_hw_state)),
+		     "pll hw state mismatch\n");
+	}
+}
+
+static void
+intel_modeset_check_state(struct drm_device *dev,
+			  struct drm_atomic_state *old_state)
+{
+	check_wm_state(dev);
+	check_connector_state(dev, old_state);
+	check_encoder_state(dev);
+	check_crtc_state(dev, old_state);
+	check_shared_dpll_state(dev);
+}
+
+void ironlake_check_encoder_dotclock(const struct intel_crtc_state *pipe_config,
+				     int dotclock)
+{
+	/*
+	 * FDI already provided one idea for the dotclock.
+	 * Yell if the encoder disagrees.
+	 */
+	WARN(!intel_fuzzy_clock_check(pipe_config->base.adjusted_mode.crtc_clock, dotclock),
+	     "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
+	     pipe_config->base.adjusted_mode.crtc_clock, dotclock);
+}
+
+static void update_scanline_offset(struct intel_crtc *crtc)
+{
+	struct drm_device *dev = crtc->base.dev;
+
+	/*
+	 * The scanline counter increments at the leading edge of hsync.
+	 *
+	 * On most platforms it starts counting from vtotal-1 on the
+	 * first active line. That means the scanline counter value is
+	 * always one less than what we would expect. Ie. just after
+	 * start of vblank, which also occurs at start of hsync (on the
+	 * last active line), the scanline counter will read vblank_start-1.
+	 *
+	 * On gen2 the scanline counter starts counting from 1 instead
+	 * of vtotal-1, so we have to subtract one (or rather add vtotal-1
+	 * to keep the value positive), instead of adding one.
+	 *
+	 * On HSW+ the behaviour of the scanline counter depends on the output
+	 * type. For DP ports it behaves like most other platforms, but on HDMI
+	 * there's an extra 1 line difference. So we need to add two instead of
+	 * one to the value.
+	 */
+	if (IS_GEN2(dev)) {
+		const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
+		int vtotal;
+
+		vtotal = adjusted_mode->crtc_vtotal;
+		if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
+			vtotal /= 2;
+
+		crtc->scanline_offset = vtotal - 1;
+	} else if (HAS_DDI(dev) &&
+		   intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) {
+		crtc->scanline_offset = 2;
+	} else
+		crtc->scanline_offset = 1;
+}
+
+static void intel_modeset_clear_plls(struct drm_atomic_state *state)
+{
+	struct drm_device *dev = state->dev;
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct intel_shared_dpll_config *shared_dpll = NULL;
+	struct intel_crtc *intel_crtc;
+	struct intel_crtc_state *intel_crtc_state;
+	struct drm_crtc *crtc;
+	struct drm_crtc_state *crtc_state;
+	int i;
+
+	if (!dev_priv->display.crtc_compute_clock)
+		return;
+
+	for_each_crtc_in_state(state, crtc, crtc_state, i) {
+		int dpll;
+
+		intel_crtc = to_intel_crtc(crtc);
+		intel_crtc_state = to_intel_crtc_state(crtc_state);
+		dpll = intel_crtc_state->shared_dpll;
+
+		if (!needs_modeset(crtc_state) || dpll == DPLL_ID_PRIVATE)
+			continue;
+
+		intel_crtc_state->shared_dpll = DPLL_ID_PRIVATE;
+
+		if (!shared_dpll)
+			shared_dpll = intel_atomic_get_shared_dpll_state(state);
+
+		shared_dpll[dpll].crtc_mask &= ~(1 << intel_crtc->pipe);
+	}
+}
+
+/*
+ * This implements the workaround described in the "notes" section of the mode
+ * set sequence documentation. When going from no pipes or single pipe to
+ * multiple pipes, and planes are enabled after the pipe, we need to wait at
+ * least 2 vblanks on the first pipe before enabling planes on the second pipe.
+ */
+static int haswell_mode_set_planes_workaround(struct drm_atomic_state *state)
+{
+	struct drm_crtc_state *crtc_state;
+	struct intel_crtc *intel_crtc;
+	struct drm_crtc *crtc;
+	struct intel_crtc_state *first_crtc_state = NULL;
+	struct intel_crtc_state *other_crtc_state = NULL;
+	enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
+	int i;
+
+	/* look at all crtc's that are going to be enabled in during modeset */
+	for_each_crtc_in_state(state, crtc, crtc_state, i) {
+		intel_crtc = to_intel_crtc(crtc);
+
+		if (!crtc_state->active || !needs_modeset(crtc_state))
+			continue;
+
+		if (first_crtc_state) {
+			other_crtc_state = to_intel_crtc_state(crtc_state);
+			break;
+		} else {
+			first_crtc_state = to_intel_crtc_state(crtc_state);
+			first_pipe = intel_crtc->pipe;
+		}
+	}
+
+	/* No workaround needed? */
+	if (!first_crtc_state)
+		return 0;
+
+	/* w/a possibly needed, check how many crtc's are already enabled. */
+	for_each_intel_crtc(state->dev, intel_crtc) {
+		struct intel_crtc_state *pipe_config;
+
+		pipe_config = intel_atomic_get_crtc_state(state, intel_crtc);
+		if (IS_ERR(pipe_config))
+			return PTR_ERR(pipe_config);
+
+		pipe_config->hsw_workaround_pipe = INVALID_PIPE;
+
+		if (!pipe_config->base.active ||
+		    needs_modeset(&pipe_config->base))
+			continue;
+
+		/* 2 or more enabled crtcs means no need for w/a */
+		if (enabled_pipe != INVALID_PIPE)
+			return 0;
+
+		enabled_pipe = intel_crtc->pipe;
+	}
+
+	if (enabled_pipe != INVALID_PIPE)
+		first_crtc_state->hsw_workaround_pipe = enabled_pipe;
+	else if (other_crtc_state)
+		other_crtc_state->hsw_workaround_pipe = first_pipe;
+
+	return 0;
+}
+
+static int intel_modeset_all_pipes(struct drm_atomic_state *state)
+{
+	struct drm_crtc *crtc;
+	struct drm_crtc_state *crtc_state;
+	int ret = 0;
+
+	/* add all active pipes to the state */
+	for_each_crtc(state->dev, crtc) {
+		crtc_state = drm_atomic_get_crtc_state(state, crtc);
+		if (IS_ERR(crtc_state))
+			return PTR_ERR(crtc_state);
+
+		if (!crtc_state->active || needs_modeset(crtc_state))
+			continue;
+
+		crtc_state->mode_changed = true;
+
+		ret = drm_atomic_add_affected_connectors(state, crtc);
+		if (ret)
+			break;
+
+		ret = drm_atomic_add_affected_planes(state, crtc);
+		if (ret)
+			break;
+	}
+
+	return ret;
+}
+
+static int intel_modeset_checks(struct drm_atomic_state *state)
+{
+	struct drm_device *dev = state->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret;
+
+	if (!check_digital_port_conflicts(state)) {
+		DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * See if the config requires any additional preparation, e.g.
+	 * to adjust global state with pipes off.  We need to do this
+	 * here so we can get the modeset_pipe updated config for the new
+	 * mode set on this crtc.  For other crtcs we need to use the
+	 * adjusted_mode bits in the crtc directly.
+	 */
+	if (dev_priv->display.modeset_calc_cdclk) {
+		unsigned int cdclk;
+
+		ret = dev_priv->display.modeset_calc_cdclk(state);
+
+		cdclk = to_intel_atomic_state(state)->cdclk;
+		if (!ret && cdclk != dev_priv->cdclk_freq)
+			ret = intel_modeset_all_pipes(state);
+
+		if (ret < 0)
+			return ret;
+	} else
+		to_intel_atomic_state(state)->cdclk = dev_priv->cdclk_freq;
+
+	intel_modeset_clear_plls(state);
+
+	if (IS_HASWELL(dev))
+		return haswell_mode_set_planes_workaround(state);
+
+	return 0;
+}
+
+/**
+ * intel_atomic_check - validate state object
+ * @dev: drm device
+ * @state: state to validate
+ */
+static int intel_atomic_check(struct drm_device *dev,
+			      struct drm_atomic_state *state)
+{
+	struct drm_crtc *crtc;
+	struct drm_crtc_state *crtc_state;
+	int ret, i;
+	bool any_ms = false;
+
+	ret = drm_atomic_helper_check_modeset(dev, state);
+	if (ret)
+		return ret;
+
+	for_each_crtc_in_state(state, crtc, crtc_state, i) {
+		struct intel_crtc_state *pipe_config =
+			to_intel_crtc_state(crtc_state);
+
+		memset(&to_intel_crtc(crtc)->atomic, 0,
+		       sizeof(struct intel_crtc_atomic_commit));
+
+		/* Catch I915_MODE_FLAG_INHERITED */
+		if (crtc_state->mode.private_flags != crtc->state->mode.private_flags)
+			crtc_state->mode_changed = true;
+
+		if (!crtc_state->enable) {
+			if (needs_modeset(crtc_state))
+				any_ms = true;
+			continue;
+		}
+
+		if (!needs_modeset(crtc_state))
+			continue;
+
+		/* FIXME: For only active_changed we shouldn't need to do any
+		 * state recomputation at all. */
+
+		ret = drm_atomic_add_affected_connectors(state, crtc);
+		if (ret)
+			return ret;
+
+		ret = intel_modeset_pipe_config(crtc, pipe_config);
+		if (ret)
+			return ret;
+
+		if (i915.fastboot &&
+		    intel_pipe_config_compare(state->dev,
+					to_intel_crtc_state(crtc->state),
+					pipe_config, true)) {
+			crtc_state->mode_changed = false;
+			to_intel_crtc_state(crtc_state)->update_pipe = true;
+		}
+
+		if (needs_modeset(crtc_state)) {
+			any_ms = true;
+
+			ret = drm_atomic_add_affected_planes(state, crtc);
+			if (ret)
+				return ret;
+		}
+
+		intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
+				       needs_modeset(crtc_state) ?
+				       "[modeset]" : "[fastset]");
+	}
+
+	if (any_ms) {
+		ret = intel_modeset_checks(state);
+
+		if (ret)
+			return ret;
+	} else
+		to_intel_atomic_state(state)->cdclk =
+			to_i915(state->dev)->cdclk_freq;
+
+	return drm_atomic_helper_check_planes(state->dev, state);
+}
+
+/**
+ * intel_atomic_commit - commit validated state object
+ * @dev: DRM device
+ * @state: the top-level driver state object
+ * @async: asynchronous commit
+ *
+ * This function commits a top-level state object that has been validated
+ * with drm_atomic_helper_check().
+ *
+ * FIXME:  Atomic modeset support for i915 is not yet complete.  At the moment
+ * we can only handle plane-related operations and do not yet support
+ * asynchronous commit.
+ *
+ * RETURNS
+ * Zero for success or -errno.
+ */
+static int intel_atomic_commit(struct drm_device *dev,
+			       struct drm_atomic_state *state,
+			       bool async)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_crtc *crtc;
+	struct drm_crtc_state *crtc_state;
+	int ret = 0;
+	int i;
+	bool any_ms = false;
+
+	if (async) {
+		DRM_DEBUG_KMS("i915 does not yet support async commit\n");
+		return -EINVAL;
+	}
+
+	ret = drm_atomic_helper_prepare_planes(dev, state);
+	if (ret)
+		return ret;
+
+	drm_atomic_helper_swap_state(dev, state);
+
+	for_each_crtc_in_state(state, crtc, crtc_state, i) {
+		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+		if (!needs_modeset(crtc->state))
+			continue;
+
+		any_ms = true;
+		intel_pre_plane_update(intel_crtc);
+
+		if (crtc_state->active) {
+			intel_crtc_disable_planes(crtc, crtc_state->plane_mask);
+			dev_priv->display.crtc_disable(crtc);
+			intel_crtc->active = false;
+			intel_disable_shared_dpll(intel_crtc);
+		}
+	}
+
+	/* Only after disabling all output pipelines that will be changed can we
+	 * update the the output configuration. */
+	intel_modeset_update_crtc_state(state);
+
+	if (any_ms) {
+		intel_shared_dpll_commit(state);
+
+		drm_atomic_helper_update_legacy_modeset_state(state->dev, state);
+		modeset_update_crtc_power_domains(state);
+	}
+
+	/* Now enable the clocks, plane, pipe, and connectors that we set up. */
+	for_each_crtc_in_state(state, crtc, crtc_state, i) {
+		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+		bool modeset = needs_modeset(crtc->state);
+		bool update_pipe = !modeset &&
+			to_intel_crtc_state(crtc->state)->update_pipe;
+		unsigned long put_domains = 0;
+
+		if (modeset && crtc->state->active) {
+			update_scanline_offset(to_intel_crtc(crtc));
+			dev_priv->display.crtc_enable(crtc);
+		}
+
+		if (update_pipe) {
+			put_domains = modeset_get_crtc_power_domains(crtc);
+
+			/* make sure intel_modeset_check_state runs */
+			any_ms = true;
+		}
+
+		if (!modeset)
+			intel_pre_plane_update(intel_crtc);
+
+		drm_atomic_helper_commit_planes_on_crtc(crtc_state);
+
+		if (put_domains)
+			modeset_put_power_domains(dev_priv, put_domains);
+
+		intel_post_plane_update(intel_crtc);
+	}
+
+	/* FIXME: add subpixel order */
+
+	drm_atomic_helper_wait_for_vblanks(dev, state);
+	drm_atomic_helper_cleanup_planes(dev, state);
+
+	if (any_ms)
+		intel_modeset_check_state(dev, state);
+
+	drm_atomic_state_free(state);
+
+	return 0;
+}
+
+void intel_crtc_restore_mode(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_atomic_state *state;
+	struct drm_crtc_state *crtc_state;
+	int ret;
+
+	state = drm_atomic_state_alloc(dev);
+	if (!state) {
+		DRM_DEBUG_KMS("[CRTC:%d] crtc restore failed, out of memory",
+			      crtc->base.id);
+		return;
+	}
+
+	state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
+
+retry:
+	crtc_state = drm_atomic_get_crtc_state(state, crtc);
+	ret = PTR_ERR_OR_ZERO(crtc_state);
+	if (!ret) {
+		if (!crtc_state->active)
+			goto out;
+
+		crtc_state->mode_changed = true;
+		ret = drm_atomic_commit(state);
+	}
+
+	if (ret == -EDEADLK) {
+		drm_atomic_state_clear(state);
+		drm_modeset_backoff(state->acquire_ctx);
+		goto retry;
+	}
+
+	if (ret)
+out:
+		drm_atomic_state_free(state);
+}
+
+#undef for_each_intel_crtc_masked
+
+static const struct drm_crtc_funcs intel_crtc_funcs = {
+	.gamma_set = intel_crtc_gamma_set,
+	.set_config = drm_atomic_helper_set_config,
+	.destroy = intel_crtc_destroy,
+	.page_flip = intel_crtc_page_flip,
+	.atomic_duplicate_state = intel_crtc_duplicate_state,
+	.atomic_destroy_state = intel_crtc_destroy_state,
+};
+
+static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
+				      struct intel_shared_dpll *pll,
+				      struct intel_dpll_hw_state *hw_state)
+{
+	uint32_t val;
+
+	if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS))
+		return false;
+
+	val = I915_READ(PCH_DPLL(pll->id));
+	hw_state->dpll = val;
+	hw_state->fp0 = I915_READ(PCH_FP0(pll->id));
+	hw_state->fp1 = I915_READ(PCH_FP1(pll->id));
+
+	return val & DPLL_VCO_ENABLE;
+}
+
+static void ibx_pch_dpll_mode_set(struct drm_i915_private *dev_priv,
+				  struct intel_shared_dpll *pll)
+{
+	I915_WRITE(PCH_FP0(pll->id), pll->config.hw_state.fp0);
+	I915_WRITE(PCH_FP1(pll->id), pll->config.hw_state.fp1);
+}
+
+static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
+				struct intel_shared_dpll *pll)
+{
+	/* PCH refclock must be enabled first */
+	ibx_assert_pch_refclk_enabled(dev_priv);
+
+	I915_WRITE(PCH_DPLL(pll->id), pll->config.hw_state.dpll);
+
+	/* Wait for the clocks to stabilize. */
+	POSTING_READ(PCH_DPLL(pll->id));
+	udelay(150);
+
+	/* The pixel multiplier can only be updated once the
+	 * DPLL is enabled and the clocks are stable.
+	 *
+	 * So write it again.
+	 */
+	I915_WRITE(PCH_DPLL(pll->id), pll->config.hw_state.dpll);
+	POSTING_READ(PCH_DPLL(pll->id));
+	udelay(200);
+}
+
+static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
+				 struct intel_shared_dpll *pll)
+{
+	struct drm_device *dev = dev_priv->dev;
+	struct intel_crtc *crtc;
+
+	/* Make sure no transcoder isn't still depending on us. */
+	for_each_intel_crtc(dev, crtc) {
+		if (intel_crtc_to_shared_dpll(crtc) == pll)
+			assert_pch_transcoder_disabled(dev_priv, crtc->pipe);
+	}
+
+	I915_WRITE(PCH_DPLL(pll->id), 0);
+	POSTING_READ(PCH_DPLL(pll->id));
+	udelay(200);
+}
+
+static char *ibx_pch_dpll_names[] = {
+	"PCH DPLL A",
+	"PCH DPLL B",
+};
+
+static void ibx_pch_dpll_init(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int i;
+
+	dev_priv->num_shared_dpll = 2;
+
+	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
+		dev_priv->shared_dplls[i].id = i;
+		dev_priv->shared_dplls[i].name = ibx_pch_dpll_names[i];
+		dev_priv->shared_dplls[i].mode_set = ibx_pch_dpll_mode_set;
+		dev_priv->shared_dplls[i].enable = ibx_pch_dpll_enable;
+		dev_priv->shared_dplls[i].disable = ibx_pch_dpll_disable;
+		dev_priv->shared_dplls[i].get_hw_state =
+			ibx_pch_dpll_get_hw_state;
+	}
+}
+
+static void intel_shared_dpll_init(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (HAS_DDI(dev))
+		intel_ddi_pll_init(dev);
+	else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
+		ibx_pch_dpll_init(dev);
+	else
+		dev_priv->num_shared_dpll = 0;
+
+	BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
+}
+
+/**
+ * intel_prepare_plane_fb - Prepare fb for usage on plane
+ * @plane: drm plane to prepare for
+ * @fb: framebuffer to prepare for presentation
+ *
+ * Prepares a framebuffer for usage on a display plane.  Generally this
+ * involves pinning the underlying object and updating the frontbuffer tracking
+ * bits.  Some older platforms need special physical address handling for
+ * cursor planes.
+ *
+ * Returns 0 on success, negative error code on failure.
+ */
+int
+intel_prepare_plane_fb(struct drm_plane *plane,
+		       const struct drm_plane_state *new_state)
+{
+	struct drm_device *dev = plane->dev;
+	struct drm_framebuffer *fb = new_state->fb;
+	struct intel_plane *intel_plane = to_intel_plane(plane);
+	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+	struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb);
+	int ret = 0;
+
+	if (!obj)
+		return 0;
+
+	mutex_lock(&dev->struct_mutex);
+
+	if (plane->type == DRM_PLANE_TYPE_CURSOR &&
+	    INTEL_INFO(dev)->cursor_needs_physical) {
+		int align = IS_I830(dev) ? 16 * 1024 : 256;
+		ret = i915_gem_object_attach_phys(obj, align);
+		if (ret)
+			DRM_DEBUG_KMS("failed to attach phys object\n");
+	} else {
+		ret = intel_pin_and_fence_fb_obj(plane, fb, new_state, NULL, NULL);
+	}
+
+	if (ret == 0)
+		i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit);
+
+	mutex_unlock(&dev->struct_mutex);
+
+	return ret;
+}
+
+/**
+ * intel_cleanup_plane_fb - Cleans up an fb after plane use
+ * @plane: drm plane to clean up for
+ * @fb: old framebuffer that was on plane
+ *
+ * Cleans up a framebuffer that has just been removed from a plane.
+ */
+void
+intel_cleanup_plane_fb(struct drm_plane *plane,
+		       const struct drm_plane_state *old_state)
+{
+	struct drm_device *dev = plane->dev;
+	struct drm_i915_gem_object *obj = intel_fb_obj(old_state->fb);
+
+	if (!obj)
+		return;
+
+	if (plane->type != DRM_PLANE_TYPE_CURSOR ||
+	    !INTEL_INFO(dev)->cursor_needs_physical) {
+		mutex_lock(&dev->struct_mutex);
+		intel_unpin_fb_obj(old_state->fb, old_state);
+		mutex_unlock(&dev->struct_mutex);
+	}
+}
+
+int
+skl_max_scale(struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state)
+{
+	int max_scale;
+	struct drm_device *dev;
+	struct drm_i915_private *dev_priv;
+	int crtc_clock, cdclk;
+
+	if (!intel_crtc || !crtc_state)
+		return DRM_PLANE_HELPER_NO_SCALING;
+
+	dev = intel_crtc->base.dev;
+	dev_priv = dev->dev_private;
+	crtc_clock = crtc_state->base.adjusted_mode.crtc_clock;
+	cdclk = to_intel_atomic_state(crtc_state->base.state)->cdclk;
+
+	if (!crtc_clock || !cdclk)
+		return DRM_PLANE_HELPER_NO_SCALING;
+
+	/*
+	 * skl max scale is lower of:
+	 *    close to 3 but not 3, -1 is for that purpose
+	 *            or
+	 *    cdclk/crtc_clock
+	 */
+	max_scale = min((1 << 16) * 3 - 1, (1 << 8) * ((cdclk << 8) / crtc_clock));
+
+	return max_scale;
+}
+
+static int
+intel_check_primary_plane(struct drm_plane *plane,
+			  struct intel_crtc_state *crtc_state,
+			  struct intel_plane_state *state)
+{
+	struct drm_crtc *crtc = state->base.crtc;
+	struct drm_framebuffer *fb = state->base.fb;
+	int min_scale = DRM_PLANE_HELPER_NO_SCALING;
+	int max_scale = DRM_PLANE_HELPER_NO_SCALING;
+	bool can_position = false;
+
+	if (INTEL_INFO(plane->dev)->gen >= 9) {
+		/* use scaler when colorkey is not required */
+		if (state->ckey.flags == I915_SET_COLORKEY_NONE) {
+			min_scale = 1;
+			max_scale = skl_max_scale(to_intel_crtc(crtc), crtc_state);
+		}
+		can_position = true;
+	}
+
+	return drm_plane_helper_check_update(plane, crtc, fb, &state->src,
+					     &state->dst, &state->clip,
+					     min_scale, max_scale,
+					     can_position, true,
+					     &state->visible);
+}
+
+static void
+intel_commit_primary_plane(struct drm_plane *plane,
+			   struct intel_plane_state *state)
+{
+	struct drm_crtc *crtc = state->base.crtc;
+	struct drm_framebuffer *fb = state->base.fb;
+	struct drm_device *dev = plane->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc;
+	struct drm_rect *src = &state->src;
+
+	crtc = crtc ? crtc : plane->crtc;
+	intel_crtc = to_intel_crtc(crtc);
+
+	plane->fb = fb;
+	crtc->x = src->x1 >> 16;
+	crtc->y = src->y1 >> 16;
+
+	if (!crtc->state->active)
+		return;
+
+	dev_priv->display.update_primary_plane(crtc, fb,
+					       state->src.x1 >> 16,
+					       state->src.y1 >> 16);
+}
+
+static void
+intel_disable_primary_plane(struct drm_plane *plane,
+			    struct drm_crtc *crtc)
+{
+	struct drm_device *dev = plane->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	dev_priv->display.update_primary_plane(crtc, NULL, 0, 0);
+}
+
+static void intel_begin_crtc_commit(struct drm_crtc *crtc,
+				    struct drm_crtc_state *old_crtc_state)
+{
+	struct drm_device *dev = crtc->dev;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct intel_crtc_state *old_intel_state =
+		to_intel_crtc_state(old_crtc_state);
+	bool modeset = needs_modeset(crtc->state);
+
+	if (intel_crtc->atomic.update_wm_pre)
+		intel_update_watermarks(crtc);
+
+	/* Perform vblank evasion around commit operation */
+	if (crtc->state->active)
+		intel_pipe_update_start(intel_crtc);
+
+	if (modeset)
+		return;
+
+	if (to_intel_crtc_state(crtc->state)->update_pipe)
+		intel_update_pipe_config(intel_crtc, old_intel_state);
+	else if (INTEL_INFO(dev)->gen >= 9)
+		skl_detach_scalers(intel_crtc);
+}
+
+static void intel_finish_crtc_commit(struct drm_crtc *crtc,
+				     struct drm_crtc_state *old_crtc_state)
+{
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+	if (crtc->state->active)
+		intel_pipe_update_end(intel_crtc);
+}
+
+/**
+ * intel_plane_destroy - destroy a plane
+ * @plane: plane to destroy
+ *
+ * Common destruction function for all types of planes (primary, cursor,
+ * sprite).
+ */
+void intel_plane_destroy(struct drm_plane *plane)
+{
+	struct intel_plane *intel_plane = to_intel_plane(plane);
+	drm_plane_cleanup(plane);
+	kfree(intel_plane);
+}
+
+const struct drm_plane_funcs intel_plane_funcs = {
+	.update_plane = drm_atomic_helper_update_plane,
+	.disable_plane = drm_atomic_helper_disable_plane,
+	.destroy = intel_plane_destroy,
+	.set_property = drm_atomic_helper_plane_set_property,
+	.atomic_get_property = intel_plane_atomic_get_property,
+	.atomic_set_property = intel_plane_atomic_set_property,
+	.atomic_duplicate_state = intel_plane_duplicate_state,
+	.atomic_destroy_state = intel_plane_destroy_state,
+
+};
+
+static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
+						    int pipe)
+{
+	struct intel_plane *primary;
+	struct intel_plane_state *state;
+	const uint32_t *intel_primary_formats;
+	unsigned int num_formats;
+
+	primary = kzalloc(sizeof(*primary), GFP_KERNEL);
+	if (primary == NULL)
+		return NULL;
+
+	state = intel_create_plane_state(&primary->base);
+	if (!state) {
+		kfree(primary);
+		return NULL;
+	}
+	primary->base.state = &state->base;
+
+	primary->can_scale = false;
+	primary->max_downscale = 1;
+	if (INTEL_INFO(dev)->gen >= 9) {
+		primary->can_scale = true;
+		state->scaler_id = -1;
+	}
+	primary->pipe = pipe;
+	primary->plane = pipe;
+	primary->frontbuffer_bit = INTEL_FRONTBUFFER_PRIMARY(pipe);
+	primary->check_plane = intel_check_primary_plane;
+	primary->commit_plane = intel_commit_primary_plane;
+	primary->disable_plane = intel_disable_primary_plane;
+	if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4)
+		primary->plane = !pipe;
+
+	if (INTEL_INFO(dev)->gen >= 9) {
+		intel_primary_formats = skl_primary_formats;
+		num_formats = ARRAY_SIZE(skl_primary_formats);
+	} else if (INTEL_INFO(dev)->gen >= 4) {
+		intel_primary_formats = i965_primary_formats;
+		num_formats = ARRAY_SIZE(i965_primary_formats);
+	} else {
+		intel_primary_formats = i8xx_primary_formats;
+		num_formats = ARRAY_SIZE(i8xx_primary_formats);
+	}
+
+	drm_universal_plane_init(dev, &primary->base, 0,
+				 &intel_plane_funcs,
+				 intel_primary_formats, num_formats,
+				 DRM_PLANE_TYPE_PRIMARY);
+
+	if (INTEL_INFO(dev)->gen >= 4)
+		intel_create_rotation_property(dev, primary);
+
+	drm_plane_helper_add(&primary->base, &intel_plane_helper_funcs);
+
+	return &primary->base;
+}
+
+void intel_create_rotation_property(struct drm_device *dev, struct intel_plane *plane)
+{
+	if (!dev->mode_config.rotation_property) {
+		unsigned long flags = BIT(DRM_ROTATE_0) |
+			BIT(DRM_ROTATE_180);
+
+		if (INTEL_INFO(dev)->gen >= 9)
+			flags |= BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270);
+
+		dev->mode_config.rotation_property =
+			drm_mode_create_rotation_property(dev, flags);
+	}
+	if (dev->mode_config.rotation_property)
+		drm_object_attach_property(&plane->base.base,
+				dev->mode_config.rotation_property,
+				plane->base.state->rotation);
+}
+
+static int
+intel_check_cursor_plane(struct drm_plane *plane,
+			 struct intel_crtc_state *crtc_state,
+			 struct intel_plane_state *state)
+{
+	struct drm_crtc *crtc = crtc_state->base.crtc;
+	struct drm_framebuffer *fb = state->base.fb;
+	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+	enum pipe pipe = to_intel_plane(plane)->pipe;
+	unsigned stride;
+	int ret;
+
+	ret = drm_plane_helper_check_update(plane, crtc, fb, &state->src,
+					    &state->dst, &state->clip,
+					    DRM_PLANE_HELPER_NO_SCALING,
+					    DRM_PLANE_HELPER_NO_SCALING,
+					    true, true, &state->visible);
+	if (ret)
+		return ret;
+
+	/* if we want to turn off the cursor ignore width and height */
+	if (!obj)
+		return 0;
+
+	/* Check for which cursor types we support */
+	if (!cursor_size_ok(plane->dev, state->base.crtc_w, state->base.crtc_h)) {
+		DRM_DEBUG("Cursor dimension %dx%d not supported\n",
+			  state->base.crtc_w, state->base.crtc_h);
+		return -EINVAL;
+	}
+
+	stride = roundup_pow_of_two(state->base.crtc_w) * 4;
+	if (obj->base.size < stride * state->base.crtc_h) {
+		DRM_DEBUG_KMS("buffer is too small\n");
+		return -ENOMEM;
+	}
+
+	if (fb->modifier[0] != DRM_FORMAT_MOD_NONE) {
+		DRM_DEBUG_KMS("cursor cannot be tiled\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * There's something wrong with the cursor on CHV pipe C.
+	 * If it straddles the left edge of the screen then
+	 * moving it away from the edge or disabling it often
+	 * results in a pipe underrun, and often that can lead to
+	 * dead pipe (constant underrun reported, and it scans
+	 * out just a solid color). To recover from that, the
+	 * display power well must be turned off and on again.
+	 * Refuse the put the cursor into that compromised position.
+	 */
+	if (IS_CHERRYVIEW(plane->dev) && pipe == PIPE_C &&
+	    state->visible && state->base.crtc_x < 0) {
+		DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void
+intel_disable_cursor_plane(struct drm_plane *plane,
+			   struct drm_crtc *crtc)
+{
+	intel_crtc_update_cursor(crtc, false);
+}
+
+static void
+intel_commit_cursor_plane(struct drm_plane *plane,
+			  struct intel_plane_state *state)
+{
+	struct drm_crtc *crtc = state->base.crtc;
+	struct drm_device *dev = plane->dev;
+	struct intel_crtc *intel_crtc;
+	struct drm_i915_gem_object *obj = intel_fb_obj(state->base.fb);
+	uint32_t addr;
+
+	crtc = crtc ? crtc : plane->crtc;
+	intel_crtc = to_intel_crtc(crtc);
+
+	if (!obj)
+		addr = 0;
+	else if (!INTEL_INFO(dev)->cursor_needs_physical)
+		addr = i915_gem_obj_ggtt_offset(obj);
+	else
+		addr = obj->phys_handle->busaddr;
+
+	intel_crtc->cursor_addr = addr;
+
+	if (crtc->state->active)
+		intel_crtc_update_cursor(crtc, state->visible);
+}
+
+static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
+						   int pipe)
+{
+	struct intel_plane *cursor;
+	struct intel_plane_state *state;
+
+	cursor = kzalloc(sizeof(*cursor), GFP_KERNEL);
+	if (cursor == NULL)
+		return NULL;
+
+	state = intel_create_plane_state(&cursor->base);
+	if (!state) {
+		kfree(cursor);
+		return NULL;
+	}
+	cursor->base.state = &state->base;
+
+	cursor->can_scale = false;
+	cursor->max_downscale = 1;
+	cursor->pipe = pipe;
+	cursor->plane = pipe;
+	cursor->frontbuffer_bit = INTEL_FRONTBUFFER_CURSOR(pipe);
+	cursor->check_plane = intel_check_cursor_plane;
+	cursor->commit_plane = intel_commit_cursor_plane;
+	cursor->disable_plane = intel_disable_cursor_plane;
+
+	drm_universal_plane_init(dev, &cursor->base, 0,
+				 &intel_plane_funcs,
+				 intel_cursor_formats,
+				 ARRAY_SIZE(intel_cursor_formats),
+				 DRM_PLANE_TYPE_CURSOR);
+
+	if (INTEL_INFO(dev)->gen >= 4) {
+		if (!dev->mode_config.rotation_property)
+			dev->mode_config.rotation_property =
+				drm_mode_create_rotation_property(dev,
+							BIT(DRM_ROTATE_0) |
+							BIT(DRM_ROTATE_180));
+		if (dev->mode_config.rotation_property)
+			drm_object_attach_property(&cursor->base.base,
+				dev->mode_config.rotation_property,
+				state->base.rotation);
+	}
+
+	if (INTEL_INFO(dev)->gen >=9)
+		state->scaler_id = -1;
+
+	drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
+
+	return &cursor->base;
+}
+
+static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc,
+	struct intel_crtc_state *crtc_state)
+{
+	int i;
+	struct intel_scaler *intel_scaler;
+	struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state;
+
+	for (i = 0; i < intel_crtc->num_scalers; i++) {
+		intel_scaler = &scaler_state->scalers[i];
+		intel_scaler->in_use = 0;
+		intel_scaler->mode = PS_SCALER_MODE_DYN;
+	}
+
+	scaler_state->scaler_id = -1;
+}
+
+static void intel_crtc_init(struct drm_device *dev, int pipe)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc;
+	struct intel_crtc_state *crtc_state = NULL;
+	struct drm_plane *primary = NULL;
+	struct drm_plane *cursor = NULL;
+	int i, ret;
+
+	intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
+	if (intel_crtc == NULL)
+		return;
+
+	crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
+	if (!crtc_state)
+		goto fail;
+	intel_crtc->config = crtc_state;
+	intel_crtc->base.state = &crtc_state->base;
+	crtc_state->base.crtc = &intel_crtc->base;
+
+	/* initialize shared scalers */
+	if (INTEL_INFO(dev)->gen >= 9) {
+		if (pipe == PIPE_C)
+			intel_crtc->num_scalers = 1;
+		else
+			intel_crtc->num_scalers = SKL_NUM_SCALERS;
+
+		skl_init_scalers(dev, intel_crtc, crtc_state);
+	}
+
+	primary = intel_primary_plane_create(dev, pipe);
+	if (!primary)
+		goto fail;
+
+	cursor = intel_cursor_plane_create(dev, pipe);
+	if (!cursor)
+		goto fail;
+
+	ret = drm_crtc_init_with_planes(dev, &intel_crtc->base, primary,
+					cursor, &intel_crtc_funcs);
+	if (ret)
+		goto fail;
+
+	drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
+	for (i = 0; i < 256; i++) {
+		intel_crtc->lut_r[i] = i;
+		intel_crtc->lut_g[i] = i;
+		intel_crtc->lut_b[i] = i;
+	}
+
+	/*
+	 * On gen2/3 only plane A can do fbc, but the panel fitter and lvds port
+	 * is hooked to pipe B. Hence we want plane A feeding pipe B.
+	 */
+	intel_crtc->pipe = pipe;
+	intel_crtc->plane = pipe;
+	if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4) {
+		DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
+		intel_crtc->plane = !pipe;
+	}
+
+	intel_crtc->cursor_base = ~0;
+	intel_crtc->cursor_cntl = ~0;
+	intel_crtc->cursor_size = ~0;
+
+	intel_crtc->wm.cxsr_allowed = true;
+
+	BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
+	       dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
+	dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
+	dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
+
+	drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
+
+	WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
+	return;
+
+fail:
+	if (primary)
+		drm_plane_cleanup(primary);
+	if (cursor)
+		drm_plane_cleanup(cursor);
+	kfree(crtc_state);
+	kfree(intel_crtc);
+}
+
+enum pipe intel_get_pipe_from_connector(struct intel_connector *connector)
+{
+	struct drm_encoder *encoder = connector->base.encoder;
+	struct drm_device *dev = connector->base.dev;
+
+	WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+
+	if (!encoder || WARN_ON(!encoder->crtc))
+		return INVALID_PIPE;
+
+	return to_intel_crtc(encoder->crtc)->pipe;
+}
+
+int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
+				struct drm_file *file)
+{
+	struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
+	struct drm_crtc *drmmode_crtc;
+	struct intel_crtc *crtc;
+
+	drmmode_crtc = drm_crtc_find(dev, pipe_from_crtc_id->crtc_id);
+
+	if (!drmmode_crtc) {
+		DRM_ERROR("no such CRTC id\n");
+		return -ENOENT;
+	}
+
+	crtc = to_intel_crtc(drmmode_crtc);
+	pipe_from_crtc_id->pipe = crtc->pipe;
+
+	return 0;
+}
+
+static int intel_encoder_clones(struct intel_encoder *encoder)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct intel_encoder *source_encoder;
+	int index_mask = 0;
+	int entry = 0;
+
+	for_each_intel_encoder(dev, source_encoder) {
+		if (encoders_cloneable(encoder, source_encoder))
+			index_mask |= (1 << entry);
+
+		entry++;
+	}
+
+	return index_mask;
+}
+
+static bool has_edp_a(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (!IS_MOBILE(dev))
+		return false;
+
+	if ((I915_READ(DP_A) & DP_DETECTED) == 0)
+		return false;
+
+	if (IS_GEN5(dev) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
+		return false;
+
+	return true;
+}
+
+static bool intel_crt_present(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (INTEL_INFO(dev)->gen >= 9)
+		return false;
+
+	if (IS_HSW_ULT(dev) || IS_BDW_ULT(dev))
+		return false;
+
+	if (IS_CHERRYVIEW(dev))
+		return false;
+
+	if (IS_VALLEYVIEW(dev) && !dev_priv->vbt.int_crt_support)
+		return false;
+
+	return true;
+}
+
+static void intel_setup_outputs(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_encoder *encoder;
+	bool dpd_is_edp = false;
+
+	intel_lvds_init(dev);
+
+	if (intel_crt_present(dev))
+		intel_crt_init(dev);
+
+	if (IS_BROXTON(dev)) {
+		/*
+		 * FIXME: Broxton doesn't support port detection via the
+		 * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
+		 * detect the ports.
+		 */
+		intel_ddi_init(dev, PORT_A);
+		intel_ddi_init(dev, PORT_B);
+		intel_ddi_init(dev, PORT_C);
+	} else if (HAS_DDI(dev)) {
+		int found;
+
+		/*
+		 * Haswell uses DDI functions to detect digital outputs.
+		 * On SKL pre-D0 the strap isn't connected, so we assume
+		 * it's there.
+		 */
+		found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
+		/* WaIgnoreDDIAStrap: skl */
+		if (found || IS_SKYLAKE(dev))
+			intel_ddi_init(dev, PORT_A);
+
+		/* DDI B, C and D detection is indicated by the SFUSE_STRAP
+		 * register */
+		found = I915_READ(SFUSE_STRAP);
+
+		if (found & SFUSE_STRAP_DDIB_DETECTED)
+			intel_ddi_init(dev, PORT_B);
+		if (found & SFUSE_STRAP_DDIC_DETECTED)
+			intel_ddi_init(dev, PORT_C);
+		if (found & SFUSE_STRAP_DDID_DETECTED)
+			intel_ddi_init(dev, PORT_D);
+		/*
+		 * On SKL we don't have a way to detect DDI-E so we rely on VBT.
+		 */
+		if (IS_SKYLAKE(dev) &&
+		    (dev_priv->vbt.ddi_port_info[PORT_E].supports_dp ||
+		     dev_priv->vbt.ddi_port_info[PORT_E].supports_dvi ||
+		     dev_priv->vbt.ddi_port_info[PORT_E].supports_hdmi))
+			intel_ddi_init(dev, PORT_E);
+
+	} else if (HAS_PCH_SPLIT(dev)) {
+		int found;
+		dpd_is_edp = intel_dp_is_edp(dev, PORT_D);
+
+		if (has_edp_a(dev))
+			intel_dp_init(dev, DP_A, PORT_A);
+
+		if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
+			/* PCH SDVOB multiplex with HDMIB */
+			found = intel_sdvo_init(dev, PCH_SDVOB, true);
+			if (!found)
+				intel_hdmi_init(dev, PCH_HDMIB, PORT_B);
+			if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
+				intel_dp_init(dev, PCH_DP_B, PORT_B);
+		}
+
+		if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
+			intel_hdmi_init(dev, PCH_HDMIC, PORT_C);
+
+		if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
+			intel_hdmi_init(dev, PCH_HDMID, PORT_D);
+
+		if (I915_READ(PCH_DP_C) & DP_DETECTED)
+			intel_dp_init(dev, PCH_DP_C, PORT_C);
+
+		if (I915_READ(PCH_DP_D) & DP_DETECTED)
+			intel_dp_init(dev, PCH_DP_D, PORT_D);
+	} else if (IS_VALLEYVIEW(dev)) {
+		bool has_edp, has_port;
+
+		/*
+		 * The DP_DETECTED bit is the latched state of the DDC
+		 * SDA pin at boot. However since eDP doesn't require DDC
+		 * (no way to plug in a DP->HDMI dongle) the DDC pins for
+		 * eDP ports may have been muxed to an alternate function.
+		 * Thus we can't rely on the DP_DETECTED bit alone to detect
+		 * eDP ports. Consult the VBT as well as DP_DETECTED to
+		 * detect eDP ports.
+		 *
+		 * Sadly the straps seem to be missing sometimes even for HDMI
+		 * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
+		 * and VBT for the presence of the port. Additionally we can't
+		 * trust the port type the VBT declares as we've seen at least
+		 * HDMI ports that the VBT claim are DP or eDP.
+		 */
+		has_edp = intel_dp_is_edp(dev, PORT_B);
+		has_port = intel_bios_is_port_present(dev_priv, PORT_B);
+		if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port)
+			has_edp &= intel_dp_init(dev, VLV_DP_B, PORT_B);
+		if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
+			intel_hdmi_init(dev, VLV_HDMIB, PORT_B);
+
+		has_edp = intel_dp_is_edp(dev, PORT_C);
+		has_port = intel_bios_is_port_present(dev_priv, PORT_C);
+		if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port)
+			has_edp &= intel_dp_init(dev, VLV_DP_C, PORT_C);
+		if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
+			intel_hdmi_init(dev, VLV_HDMIC, PORT_C);
+
+		if (IS_CHERRYVIEW(dev)) {
+			/*
+			 * eDP not supported on port D,
+			 * so no need to worry about it
+			 */
+			has_port = intel_bios_is_port_present(dev_priv, PORT_D);
+			if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port)
+				intel_dp_init(dev, CHV_DP_D, PORT_D);
+			if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port)
+				intel_hdmi_init(dev, CHV_HDMID, PORT_D);
+		}
+
+		intel_dsi_init(dev);
+	} else if (!IS_GEN2(dev) && !IS_PINEVIEW(dev)) {
+		bool found = false;
+
+		if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
+			DRM_DEBUG_KMS("probing SDVOB\n");
+			found = intel_sdvo_init(dev, GEN3_SDVOB, true);
+			if (!found && IS_G4X(dev)) {
+				DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
+				intel_hdmi_init(dev, GEN4_HDMIB, PORT_B);
+			}
+
+			if (!found && IS_G4X(dev))
+				intel_dp_init(dev, DP_B, PORT_B);
+		}
+
+		/* Before G4X SDVOC doesn't have its own detect register */
+
+		if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
+			DRM_DEBUG_KMS("probing SDVOC\n");
+			found = intel_sdvo_init(dev, GEN3_SDVOC, false);
+		}
+
+		if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
+
+			if (IS_G4X(dev)) {
+				DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
+				intel_hdmi_init(dev, GEN4_HDMIC, PORT_C);
+			}
+			if (IS_G4X(dev))
+				intel_dp_init(dev, DP_C, PORT_C);
+		}
+
+		if (IS_G4X(dev) &&
+		    (I915_READ(DP_D) & DP_DETECTED))
+			intel_dp_init(dev, DP_D, PORT_D);
+	} else if (IS_GEN2(dev))
+		intel_dvo_init(dev);
+
+	if (SUPPORTS_TV(dev))
+		intel_tv_init(dev);
+
+	intel_psr_init(dev);
+
+	for_each_intel_encoder(dev, encoder) {
+		encoder->base.possible_crtcs = encoder->crtc_mask;
+		encoder->base.possible_clones =
+			intel_encoder_clones(encoder);
+	}
+
+	intel_init_pch_refclk(dev);
+
+	drm_helper_move_panel_connectors_to_head(dev);
+}
+
+static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
+{
+	struct drm_device *dev = fb->dev;
+	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+
+	drm_framebuffer_cleanup(fb);
+	mutex_lock(&dev->struct_mutex);
+	WARN_ON(!intel_fb->obj->framebuffer_references--);
+	drm_gem_object_unreference(&intel_fb->obj->base);
+	mutex_unlock(&dev->struct_mutex);
+	kfree(intel_fb);
+}
+
+static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
+						struct drm_file *file,
+						unsigned int *handle)
+{
+	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+	struct drm_i915_gem_object *obj = intel_fb->obj;
+
+	if (obj->userptr.mm) {
+		DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n");
+		return -EINVAL;
+	}
+
+	return drm_gem_handle_create(file, &obj->base, handle);
+}
+
+static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
+					struct drm_file *file,
+					unsigned flags, unsigned color,
+					struct drm_clip_rect *clips,
+					unsigned num_clips)
+{
+	struct drm_device *dev = fb->dev;
+	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+	struct drm_i915_gem_object *obj = intel_fb->obj;
+
+	mutex_lock(&dev->struct_mutex);
+	intel_fb_obj_flush(obj, false, ORIGIN_DIRTYFB);
+	mutex_unlock(&dev->struct_mutex);
+
+	return 0;
+}
+
+static const struct drm_framebuffer_funcs intel_fb_funcs = {
+	.destroy = intel_user_framebuffer_destroy,
+	.create_handle = intel_user_framebuffer_create_handle,
+	.dirty = intel_user_framebuffer_dirty,
+};
+
+static
+u32 intel_fb_pitch_limit(struct drm_device *dev, uint64_t fb_modifier,
+			 uint32_t pixel_format)
+{
+	u32 gen = INTEL_INFO(dev)->gen;
+
+	if (gen >= 9) {
+		/* "The stride in bytes must not exceed the of the size of 8K
+		 *  pixels and 32K bytes."
+		 */
+		 return min(8192*drm_format_plane_cpp(pixel_format, 0), 32768);
+	} else if (gen >= 5 && !IS_VALLEYVIEW(dev)) {
+		return 32*1024;
+	} else if (gen >= 4) {
+		if (fb_modifier == I915_FORMAT_MOD_X_TILED)
+			return 16*1024;
+		else
+			return 32*1024;
+	} else if (gen >= 3) {
+		if (fb_modifier == I915_FORMAT_MOD_X_TILED)
+			return 8*1024;
+		else
+			return 16*1024;
+	} else {
+		/* XXX DSPC is limited to 4k tiled */
+		return 8*1024;
+	}
+}
+
+static int intel_framebuffer_init(struct drm_device *dev,
+				  struct intel_framebuffer *intel_fb,
+				  struct drm_mode_fb_cmd2 *mode_cmd,
+				  struct drm_i915_gem_object *obj)
+{
+	unsigned int aligned_height;
+	int ret;
+	u32 pitch_limit, stride_alignment;
+
+	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+	if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
+		/* Enforce that fb modifier and tiling mode match, but only for
+		 * X-tiled. This is needed for FBC. */
+		if (!!(obj->tiling_mode == I915_TILING_X) !=
+		    !!(mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED)) {
+			DRM_DEBUG("tiling_mode doesn't match fb modifier\n");
+			return -EINVAL;
+		}
+	} else {
+		if (obj->tiling_mode == I915_TILING_X)
+			mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
+		else if (obj->tiling_mode == I915_TILING_Y) {
+			DRM_DEBUG("No Y tiling for legacy addfb\n");
+			return -EINVAL;
+		}
+	}
+
+	/* Passed in modifier sanity checking. */
+	switch (mode_cmd->modifier[0]) {
+	case I915_FORMAT_MOD_Y_TILED:
+	case I915_FORMAT_MOD_Yf_TILED:
+		if (INTEL_INFO(dev)->gen < 9) {
+			DRM_DEBUG("Unsupported tiling 0x%llx!\n",
+				  mode_cmd->modifier[0]);
+			return -EINVAL;
+		}
+	case DRM_FORMAT_MOD_NONE:
+	case I915_FORMAT_MOD_X_TILED:
+		break;
+	default:
+		DRM_DEBUG("Unsupported fb modifier 0x%llx!\n",
+			  mode_cmd->modifier[0]);
+		return -EINVAL;
+	}
+
+	stride_alignment = intel_fb_stride_alignment(dev, mode_cmd->modifier[0],
+						     mode_cmd->pixel_format);
+	if (mode_cmd->pitches[0] & (stride_alignment - 1)) {
+		DRM_DEBUG("pitch (%d) must be at least %u byte aligned\n",
+			  mode_cmd->pitches[0], stride_alignment);
+		return -EINVAL;
+	}
+
+	pitch_limit = intel_fb_pitch_limit(dev, mode_cmd->modifier[0],
+					   mode_cmd->pixel_format);
+	if (mode_cmd->pitches[0] > pitch_limit) {
+		DRM_DEBUG("%s pitch (%u) must be at less than %d\n",
+			  mode_cmd->modifier[0] != DRM_FORMAT_MOD_NONE ?
+			  "tiled" : "linear",
+			  mode_cmd->pitches[0], pitch_limit);
+		return -EINVAL;
+	}
+
+	if (mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED &&
+	    mode_cmd->pitches[0] != obj->stride) {
+		DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n",
+			  mode_cmd->pitches[0], obj->stride);
+		return -EINVAL;
+	}
+
+	/* Reject formats not supported by any plane early. */
+	switch (mode_cmd->pixel_format) {
+	case DRM_FORMAT_C8:
+	case DRM_FORMAT_RGB565:
+	case DRM_FORMAT_XRGB8888:
+	case DRM_FORMAT_ARGB8888:
+		break;
+	case DRM_FORMAT_XRGB1555:
+		if (INTEL_INFO(dev)->gen > 3) {
+			DRM_DEBUG("unsupported pixel format: %s\n",
+				  drm_get_format_name(mode_cmd->pixel_format));
+			return -EINVAL;
+		}
+		break;
+	case DRM_FORMAT_ABGR8888:
+		if (!IS_VALLEYVIEW(dev) && INTEL_INFO(dev)->gen < 9) {
+			DRM_DEBUG("unsupported pixel format: %s\n",
+				  drm_get_format_name(mode_cmd->pixel_format));
+			return -EINVAL;
+		}
+		break;
+	case DRM_FORMAT_XBGR8888:
+	case DRM_FORMAT_XRGB2101010:
+	case DRM_FORMAT_XBGR2101010:
+		if (INTEL_INFO(dev)->gen < 4) {
+			DRM_DEBUG("unsupported pixel format: %s\n",
+				  drm_get_format_name(mode_cmd->pixel_format));
+			return -EINVAL;
+		}
+		break;
+	case DRM_FORMAT_ABGR2101010:
+		if (!IS_VALLEYVIEW(dev)) {
+			DRM_DEBUG("unsupported pixel format: %s\n",
+				  drm_get_format_name(mode_cmd->pixel_format));
+			return -EINVAL;
+		}
+		break;
+	case DRM_FORMAT_YUYV:
+	case DRM_FORMAT_UYVY:
+	case DRM_FORMAT_YVYU:
+	case DRM_FORMAT_VYUY:
+		if (INTEL_INFO(dev)->gen < 5) {
+			DRM_DEBUG("unsupported pixel format: %s\n",
+				  drm_get_format_name(mode_cmd->pixel_format));
+			return -EINVAL;
+		}
+		break;
+	default:
+		DRM_DEBUG("unsupported pixel format: %s\n",
+			  drm_get_format_name(mode_cmd->pixel_format));
+		return -EINVAL;
+	}
+
+	/* FIXME need to adjust LINOFF/TILEOFF accordingly. */
+	if (mode_cmd->offsets[0] != 0)
+		return -EINVAL;
+
+	aligned_height = intel_fb_align_height(dev, mode_cmd->height,
+					       mode_cmd->pixel_format,
+					       mode_cmd->modifier[0]);
+	/* FIXME drm helper for size checks (especially planar formats)? */
+	if (obj->base.size < aligned_height * mode_cmd->pitches[0])
+		return -EINVAL;
+
+	drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
+	intel_fb->obj = obj;
+	intel_fb->obj->framebuffer_references++;
+
+	ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
+	if (ret) {
+		DRM_ERROR("framebuffer init failed %d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static struct drm_framebuffer *
+intel_user_framebuffer_create(struct drm_device *dev,
+			      struct drm_file *filp,
+			      struct drm_mode_fb_cmd2 *user_mode_cmd)
+{
+	struct drm_i915_gem_object *obj;
+	struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
+
+	obj = to_intel_bo(drm_gem_object_lookup(dev, filp,
+						mode_cmd.handles[0]));
+	if (&obj->base == NULL)
+		return ERR_PTR(-ENOENT);
+
+	return intel_framebuffer_create(dev, &mode_cmd, obj);
+}
+
+#ifndef CONFIG_DRM_FBDEV_EMULATION
+static inline void intel_fbdev_output_poll_changed(struct drm_device *dev)
+{
+}
+#endif
+
+static const struct drm_mode_config_funcs intel_mode_funcs = {
+	.fb_create = intel_user_framebuffer_create,
+	.output_poll_changed = intel_fbdev_output_poll_changed,
+	.atomic_check = intel_atomic_check,
+	.atomic_commit = intel_atomic_commit,
+	.atomic_state_alloc = intel_atomic_state_alloc,
+	.atomic_state_clear = intel_atomic_state_clear,
+};
+
+/* Set up chip specific display functions */
+static void intel_init_display(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (HAS_PCH_SPLIT(dev) || IS_G4X(dev))
+		dev_priv->display.find_dpll = g4x_find_best_dpll;
+	else if (IS_CHERRYVIEW(dev))
+		dev_priv->display.find_dpll = chv_find_best_dpll;
+	else if (IS_VALLEYVIEW(dev))
+		dev_priv->display.find_dpll = vlv_find_best_dpll;
+	else if (IS_PINEVIEW(dev))
+		dev_priv->display.find_dpll = pnv_find_best_dpll;
+	else
+		dev_priv->display.find_dpll = i9xx_find_best_dpll;
+
+	if (INTEL_INFO(dev)->gen >= 9) {
+		dev_priv->display.get_pipe_config = haswell_get_pipe_config;
+		dev_priv->display.get_initial_plane_config =
+			skylake_get_initial_plane_config;
+		dev_priv->display.crtc_compute_clock =
+			haswell_crtc_compute_clock;
+		dev_priv->display.crtc_enable = haswell_crtc_enable;
+		dev_priv->display.crtc_disable = haswell_crtc_disable;
+		dev_priv->display.update_primary_plane =
+			skylake_update_primary_plane;
+	} else if (HAS_DDI(dev)) {
+		dev_priv->display.get_pipe_config = haswell_get_pipe_config;
+		dev_priv->display.get_initial_plane_config =
+			ironlake_get_initial_plane_config;
+		dev_priv->display.crtc_compute_clock =
+			haswell_crtc_compute_clock;
+		dev_priv->display.crtc_enable = haswell_crtc_enable;
+		dev_priv->display.crtc_disable = haswell_crtc_disable;
+		dev_priv->display.update_primary_plane =
+			ironlake_update_primary_plane;
+	} else if (HAS_PCH_SPLIT(dev)) {
+		dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
+		dev_priv->display.get_initial_plane_config =
+			ironlake_get_initial_plane_config;
+		dev_priv->display.crtc_compute_clock =
+			ironlake_crtc_compute_clock;
+		dev_priv->display.crtc_enable = ironlake_crtc_enable;
+		dev_priv->display.crtc_disable = ironlake_crtc_disable;
+		dev_priv->display.update_primary_plane =
+			ironlake_update_primary_plane;
+	} else if (IS_VALLEYVIEW(dev)) {
+		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
+		dev_priv->display.get_initial_plane_config =
+			i9xx_get_initial_plane_config;
+		dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
+		dev_priv->display.crtc_enable = valleyview_crtc_enable;
+		dev_priv->display.crtc_disable = i9xx_crtc_disable;
+		dev_priv->display.update_primary_plane =
+			i9xx_update_primary_plane;
+	} else {
+		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
+		dev_priv->display.get_initial_plane_config =
+			i9xx_get_initial_plane_config;
+		dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
+		dev_priv->display.crtc_enable = i9xx_crtc_enable;
+		dev_priv->display.crtc_disable = i9xx_crtc_disable;
+		dev_priv->display.update_primary_plane =
+			i9xx_update_primary_plane;
+	}
+
+	/* Returns the core display clock speed */
+	if (IS_SKYLAKE(dev))
+		dev_priv->display.get_display_clock_speed =
+			skylake_get_display_clock_speed;
+	else if (IS_BROXTON(dev))
+		dev_priv->display.get_display_clock_speed =
+			broxton_get_display_clock_speed;
+	else if (IS_BROADWELL(dev))
+		dev_priv->display.get_display_clock_speed =
+			broadwell_get_display_clock_speed;
+	else if (IS_HASWELL(dev))
+		dev_priv->display.get_display_clock_speed =
+			haswell_get_display_clock_speed;
+	else if (IS_VALLEYVIEW(dev))
+		dev_priv->display.get_display_clock_speed =
+			valleyview_get_display_clock_speed;
+	else if (IS_GEN5(dev))
+		dev_priv->display.get_display_clock_speed =
+			ilk_get_display_clock_speed;
+	else if (IS_I945G(dev) || IS_BROADWATER(dev) ||
+		 IS_GEN6(dev) || IS_IVYBRIDGE(dev))
+		dev_priv->display.get_display_clock_speed =
+			i945_get_display_clock_speed;
+	else if (IS_GM45(dev))
+		dev_priv->display.get_display_clock_speed =
+			gm45_get_display_clock_speed;
+	else if (IS_CRESTLINE(dev))
+		dev_priv->display.get_display_clock_speed =
+			i965gm_get_display_clock_speed;
+	else if (IS_PINEVIEW(dev))
+		dev_priv->display.get_display_clock_speed =
+			pnv_get_display_clock_speed;
+	else if (IS_G33(dev) || IS_G4X(dev))
+		dev_priv->display.get_display_clock_speed =
+			g33_get_display_clock_speed;
+	else if (IS_I915G(dev))
+		dev_priv->display.get_display_clock_speed =
+			i915_get_display_clock_speed;
+	else if (IS_I945GM(dev) || IS_845G(dev))
+		dev_priv->display.get_display_clock_speed =
+			i9xx_misc_get_display_clock_speed;
+	else if (IS_PINEVIEW(dev))
+		dev_priv->display.get_display_clock_speed =
+			pnv_get_display_clock_speed;
+	else if (IS_I915GM(dev))
+		dev_priv->display.get_display_clock_speed =
+			i915gm_get_display_clock_speed;
+	else if (IS_I865G(dev))
+		dev_priv->display.get_display_clock_speed =
+			i865_get_display_clock_speed;
+	else if (IS_I85X(dev))
+		dev_priv->display.get_display_clock_speed =
+			i85x_get_display_clock_speed;
+	else { /* 830 */
+		WARN(!IS_I830(dev), "Unknown platform. Assuming 133 MHz CDCLK\n");
+		dev_priv->display.get_display_clock_speed =
+			i830_get_display_clock_speed;
+	}
+
+	if (IS_GEN5(dev)) {
+		dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
+	} else if (IS_GEN6(dev)) {
+		dev_priv->display.fdi_link_train = gen6_fdi_link_train;
+	} else if (IS_IVYBRIDGE(dev)) {
+		/* FIXME: detect B0+ stepping and use auto training */
+		dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
+	} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+		dev_priv->display.fdi_link_train = hsw_fdi_link_train;
+		if (IS_BROADWELL(dev)) {
+			dev_priv->display.modeset_commit_cdclk =
+				broadwell_modeset_commit_cdclk;
+			dev_priv->display.modeset_calc_cdclk =
+				broadwell_modeset_calc_cdclk;
+		}
+	} else if (IS_VALLEYVIEW(dev)) {
+		dev_priv->display.modeset_commit_cdclk =
+			valleyview_modeset_commit_cdclk;
+		dev_priv->display.modeset_calc_cdclk =
+			valleyview_modeset_calc_cdclk;
+	} else if (IS_BROXTON(dev)) {
+		dev_priv->display.modeset_commit_cdclk =
+			broxton_modeset_commit_cdclk;
+		dev_priv->display.modeset_calc_cdclk =
+			broxton_modeset_calc_cdclk;
+	}
+
+	switch (INTEL_INFO(dev)->gen) {
+	case 2:
+		dev_priv->display.queue_flip = intel_gen2_queue_flip;
+		break;
+
+	case 3:
+		dev_priv->display.queue_flip = intel_gen3_queue_flip;
+		break;
+
+	case 4:
+	case 5:
+		dev_priv->display.queue_flip = intel_gen4_queue_flip;
+		break;
+
+	case 6:
+		dev_priv->display.queue_flip = intel_gen6_queue_flip;
+		break;
+	case 7:
+	case 8: /* FIXME(BDW): Check that the gen8 RCS flip works. */
+		dev_priv->display.queue_flip = intel_gen7_queue_flip;
+		break;
+	case 9:
+		/* Drop through - unsupported since execlist only. */
+	default:
+		/* Default just returns -ENODEV to indicate unsupported */
+		dev_priv->display.queue_flip = intel_default_queue_flip;
+	}
+
+	mutex_init(&dev_priv->pps_mutex);
+}
+
+/*
+ * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend,
+ * resume, or other times.  This quirk makes sure that's the case for
+ * affected systems.
+ */
+static void quirk_pipea_force(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	dev_priv->quirks |= QUIRK_PIPEA_FORCE;
+	DRM_INFO("applying pipe a force quirk\n");
+}
+
+static void quirk_pipeb_force(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	dev_priv->quirks |= QUIRK_PIPEB_FORCE;
+	DRM_INFO("applying pipe b force quirk\n");
+}
+
+/*
+ * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
+ */
+static void quirk_ssc_force_disable(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
+	DRM_INFO("applying lvds SSC disable quirk\n");
+}
+
+/*
+ * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
+ * brightness value
+ */
+static void quirk_invert_brightness(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
+	DRM_INFO("applying inverted panel brightness quirk\n");
+}
+
+/* Some VBT's incorrectly indicate no backlight is present */
+static void quirk_backlight_present(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	dev_priv->quirks |= QUIRK_BACKLIGHT_PRESENT;
+	DRM_INFO("applying backlight present quirk\n");
+}
+
+struct intel_quirk {
+	int device;
+	int subsystem_vendor;
+	int subsystem_device;
+	void (*hook)(struct drm_device *dev);
+};
+
+/* For systems that don't have a meaningful PCI subdevice/subvendor ID */
+struct intel_dmi_quirk {
+	void (*hook)(struct drm_device *dev);
+	const struct dmi_system_id (*dmi_id_list)[];
+};
+
+static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
+{
+	DRM_INFO("Backlight polarity reversed on %s\n", id->ident);
+	return 1;
+}
+
+static const struct intel_dmi_quirk intel_dmi_quirks[] = {
+	{
+		.dmi_id_list = &(const struct dmi_system_id[]) {
+			{
+				.callback = intel_dmi_reverse_brightness,
+				.ident = "NCR Corporation",
+				.matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
+					    DMI_MATCH(DMI_PRODUCT_NAME, ""),
+				},
+			},
+			{ }  /* terminating entry */
+		},
+		.hook = quirk_invert_brightness,
+	},
+};
+
+static struct intel_quirk intel_quirks[] = {
+	/* Toshiba Protege R-205, S-209 needs pipe A force quirk */
+	{ 0x2592, 0x1179, 0x0001, quirk_pipea_force },
+
+	/* ThinkPad T60 needs pipe A force quirk (bug #16494) */
+	{ 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
+
+	/* 830 needs to leave pipe A & dpll A up */
+	{ 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
+
+	/* 830 needs to leave pipe B & dpll B up */
+	{ 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipeb_force },
+
+	/* Lenovo U160 cannot use SSC on LVDS */
+	{ 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
+
+	/* Sony Vaio Y cannot use SSC on LVDS */
+	{ 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
+
+	/* Acer Aspire 5734Z must invert backlight brightness */
+	{ 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
+
+	/* Acer/eMachines G725 */
+	{ 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
+
+	/* Acer/eMachines e725 */
+	{ 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
+
+	/* Acer/Packard Bell NCL20 */
+	{ 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
+
+	/* Acer Aspire 4736Z */
+	{ 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
+
+	/* Acer Aspire 5336 */
+	{ 0x2a42, 0x1025, 0x048a, quirk_invert_brightness },
+
+	/* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */
+	{ 0x0a06, 0x1025, 0x0a11, quirk_backlight_present },
+
+	/* Acer C720 Chromebook (Core i3 4005U) */
+	{ 0x0a16, 0x1025, 0x0a11, quirk_backlight_present },
+
+	/* Apple Macbook 2,1 (Core 2 T7400) */
+	{ 0x27a2, 0x8086, 0x7270, quirk_backlight_present },
+
+	/* Apple Macbook 4,1 */
+	{ 0x2a02, 0x106b, 0x00a1, quirk_backlight_present },
+
+	/* Toshiba CB35 Chromebook (Celeron 2955U) */
+	{ 0x0a06, 0x1179, 0x0a88, quirk_backlight_present },
+
+	/* HP Chromebook 14 (Celeron 2955U) */
+	{ 0x0a06, 0x103c, 0x21ed, quirk_backlight_present },
+
+	/* Dell Chromebook 11 */
+	{ 0x0a06, 0x1028, 0x0a35, quirk_backlight_present },
+
+	/* Dell Chromebook 11 (2015 version) */
+	{ 0x0a16, 0x1028, 0x0a35, quirk_backlight_present },
+};
+
+static void intel_init_quirks(struct drm_device *dev)
+{
+	struct pci_dev *d = dev->pdev;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
+		struct intel_quirk *q = &intel_quirks[i];
+
+		if (d->device == q->device &&
+		    (d->subsystem_vendor == q->subsystem_vendor ||
+		     q->subsystem_vendor == PCI_ANY_ID) &&
+		    (d->subsystem_device == q->subsystem_device ||
+		     q->subsystem_device == PCI_ANY_ID))
+			q->hook(dev);
+	}
+	for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) {
+		if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0)
+			intel_dmi_quirks[i].hook(dev);
+	}
+}
+
+/* Disable the VGA plane that we never use */
+static void i915_disable_vga(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u8 sr1;
+	u32 vga_reg = i915_vgacntrl_reg(dev);
+
+	/* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
+	vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
+	outb(SR01, VGA_SR_INDEX);
+	sr1 = inb(VGA_SR_DATA);
+	outb(sr1 | 1<<5, VGA_SR_DATA);
+	vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
+	udelay(300);
+
+	I915_WRITE(vga_reg, VGA_DISP_DISABLE);
+	POSTING_READ(vga_reg);
+}
+
+void intel_modeset_init_hw(struct drm_device *dev)
+{
+	intel_update_cdclk(dev);
+	intel_prepare_ddi(dev);
+	intel_init_clock_gating(dev);
+	intel_enable_gt_powersave(dev);
+}
+
+void intel_modeset_init(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int sprite, ret;
+	enum pipe pipe;
+	struct intel_crtc *crtc;
+
+	drm_mode_config_init(dev);
+
+	dev->mode_config.min_width = 0;
+	dev->mode_config.min_height = 0;
+
+	dev->mode_config.preferred_depth = 24;
+	dev->mode_config.prefer_shadow = 1;
+
+	dev->mode_config.allow_fb_modifiers = true;
+
+	dev->mode_config.funcs = &intel_mode_funcs;
+
+	intel_init_quirks(dev);
+
+	intel_init_pm(dev);
+
+	if (INTEL_INFO(dev)->num_pipes == 0)
+		return;
+
+	/*
+	 * There may be no VBT; and if the BIOS enabled SSC we can
+	 * just keep using it to avoid unnecessary flicker.  Whereas if the
+	 * BIOS isn't using it, don't assume it will work even if the VBT
+	 * indicates as much.
+	 */
+	if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
+		bool bios_lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
+					    DREF_SSC1_ENABLE);
+
+		if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
+			DRM_DEBUG_KMS("SSC %sabled by BIOS, overriding VBT which says %sabled\n",
+				     bios_lvds_use_ssc ? "en" : "dis",
+				     dev_priv->vbt.lvds_use_ssc ? "en" : "dis");
+			dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
+		}
+	}
+
+	intel_init_display(dev);
+	intel_init_audio(dev);
+
+	if (IS_GEN2(dev)) {
+		dev->mode_config.max_width = 2048;
+		dev->mode_config.max_height = 2048;
+	} else if (IS_GEN3(dev)) {
+		dev->mode_config.max_width = 4096;
+		dev->mode_config.max_height = 4096;
+	} else {
+		dev->mode_config.max_width = 8192;
+		dev->mode_config.max_height = 8192;
+	}
+
+	if (IS_845G(dev) || IS_I865G(dev)) {
+		dev->mode_config.cursor_width = IS_845G(dev) ? 64 : 512;
+		dev->mode_config.cursor_height = 1023;
+	} else if (IS_GEN2(dev)) {
+		dev->mode_config.cursor_width = GEN2_CURSOR_WIDTH;
+		dev->mode_config.cursor_height = GEN2_CURSOR_HEIGHT;
+	} else {
+		dev->mode_config.cursor_width = MAX_CURSOR_WIDTH;
+		dev->mode_config.cursor_height = MAX_CURSOR_HEIGHT;
+	}
+
+	dev->mode_config.fb_base = dev_priv->gtt.mappable_base;
+
+	DRM_DEBUG_KMS("%d display pipe%s available.\n",
+		      INTEL_INFO(dev)->num_pipes,
+		      INTEL_INFO(dev)->num_pipes > 1 ? "s" : "");
+
+	for_each_pipe(dev_priv, pipe) {
+		intel_crtc_init(dev, pipe);
+		for_each_sprite(dev_priv, pipe, sprite) {
+			ret = intel_plane_init(dev, pipe, sprite);
+			if (ret)
+				DRM_DEBUG_KMS("pipe %c sprite %c init failed: %d\n",
+					      pipe_name(pipe), sprite_name(pipe, sprite), ret);
+		}
+	}
+
+	intel_update_czclk(dev_priv);
+	intel_update_cdclk(dev);
+
+	intel_shared_dpll_init(dev);
+
+	/* Just disable it once at startup */
+	i915_disable_vga(dev);
+	intel_setup_outputs(dev);
+
+	/* Just in case the BIOS is doing something questionable. */
+	intel_fbc_disable(dev_priv);
+
+	drm_modeset_lock_all(dev);
+	intel_modeset_setup_hw_state(dev);
+	drm_modeset_unlock_all(dev);
+
+	for_each_intel_crtc(dev, crtc) {
+		struct intel_initial_plane_config plane_config = {};
+
+		if (!crtc->active)
+			continue;
+
+		/*
+		 * Note that reserving the BIOS fb up front prevents us
+		 * from stuffing other stolen allocations like the ring
+		 * on top.  This prevents some ugliness at boot time, and
+		 * can even allow for smooth boot transitions if the BIOS
+		 * fb is large enough for the active pipe configuration.
+		 */
+		dev_priv->display.get_initial_plane_config(crtc,
+							   &plane_config);
+
+		/*
+		 * If the fb is shared between multiple heads, we'll
+		 * just get the first one.
+		 */
+		intel_find_initial_plane_obj(crtc, &plane_config);
+	}
+}
+
+static void intel_enable_pipe_a(struct drm_device *dev)
+{
+	struct intel_connector *connector;
+	struct drm_connector *crt = NULL;
+	struct intel_load_detect_pipe load_detect_temp;
+	struct drm_modeset_acquire_ctx *ctx = dev->mode_config.acquire_ctx;
+
+	/* We can't just switch on the pipe A, we need to set things up with a
+	 * proper mode and output configuration. As a gross hack, enable pipe A
+	 * by enabling the load detect pipe once. */
+	for_each_intel_connector(dev, connector) {
+		if (connector->encoder->type == INTEL_OUTPUT_ANALOG) {
+			crt = &connector->base;
+			break;
+		}
+	}
+
+	if (!crt)
+		return;
+
+	if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, ctx))
+		intel_release_load_detect_pipe(crt, &load_detect_temp, ctx);
+}
+
+static bool
+intel_check_plane_mapping(struct intel_crtc *crtc)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 val;
+
+	if (INTEL_INFO(dev)->num_pipes == 1)
+		return true;
+
+	val = I915_READ(DSPCNTR(!crtc->plane));
+
+	if ((val & DISPLAY_PLANE_ENABLE) &&
+	    (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe))
+		return false;
+
+	return true;
+}
+
+static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct intel_encoder *encoder;
+
+	for_each_encoder_on_crtc(dev, &crtc->base, encoder)
+		return true;
+
+	return false;
+}
+
+static void intel_sanitize_crtc(struct intel_crtc *crtc)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 reg;
+
+	/* Clear any frame start delays used for debugging left by the BIOS */
+	reg = PIPECONF(crtc->config->cpu_transcoder);
+	I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
+
+	/* restore vblank interrupts to correct state */
+	drm_crtc_vblank_reset(&crtc->base);
+	if (crtc->active) {
+		struct intel_plane *plane;
+
+		drm_crtc_vblank_on(&crtc->base);
+
+		/* Disable everything but the primary plane */
+		for_each_intel_plane_on_crtc(dev, crtc, plane) {
+			if (plane->base.type == DRM_PLANE_TYPE_PRIMARY)
+				continue;
+
+			plane->disable_plane(&plane->base, &crtc->base);
+		}
+	}
+
+	/* We need to sanitize the plane -> pipe mapping first because this will
+	 * disable the crtc (and hence change the state) if it is wrong. Note
+	 * that gen4+ has a fixed plane -> pipe mapping.  */
+	if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) {
+		bool plane;
+
+		DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n",
+			      crtc->base.base.id);
+
+		/* Pipe has the wrong plane attached and the plane is active.
+		 * Temporarily change the plane mapping and disable everything
+		 * ...  */
+		plane = crtc->plane;
+		to_intel_plane_state(crtc->base.primary->state)->visible = true;
+		crtc->plane = !plane;
+		intel_crtc_disable_noatomic(&crtc->base);
+		crtc->plane = plane;
+	}
+
+	if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
+	    crtc->pipe == PIPE_A && !crtc->active) {
+		/* BIOS forgot to enable pipe A, this mostly happens after
+		 * resume. Force-enable the pipe to fix this, the update_dpms
+		 * call below we restore the pipe to the right state, but leave
+		 * the required bits on. */
+		intel_enable_pipe_a(dev);
+	}
+
+	/* Adjust the state of the output pipe according to whether we
+	 * have active connectors/encoders. */
+	if (!intel_crtc_has_encoders(crtc))
+		intel_crtc_disable_noatomic(&crtc->base);
+
+	if (crtc->active != crtc->base.state->active) {
+		struct intel_encoder *encoder;
+
+		/* This can happen either due to bugs in the get_hw_state
+		 * functions or because of calls to intel_crtc_disable_noatomic,
+		 * or because the pipe is force-enabled due to the
+		 * pipe A quirk. */
+		DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was %s, now %s\n",
+			      crtc->base.base.id,
+			      crtc->base.state->enable ? "enabled" : "disabled",
+			      crtc->active ? "enabled" : "disabled");
+
+		WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, NULL) < 0);
+		crtc->base.state->active = crtc->active;
+		crtc->base.enabled = crtc->active;
+
+		/* Because we only establish the connector -> encoder ->
+		 * crtc links if something is active, this means the
+		 * crtc is now deactivated. Break the links. connector
+		 * -> encoder links are only establish when things are
+		 *  actually up, hence no need to break them. */
+		WARN_ON(crtc->active);
+
+		for_each_encoder_on_crtc(dev, &crtc->base, encoder)
+			encoder->base.crtc = NULL;
+	}
+
+	if (crtc->active || HAS_GMCH_DISPLAY(dev)) {
+		/*
+		 * We start out with underrun reporting disabled to avoid races.
+		 * For correct bookkeeping mark this on active crtcs.
+		 *
+		 * Also on gmch platforms we dont have any hardware bits to
+		 * disable the underrun reporting. Which means we need to start
+		 * out with underrun reporting disabled also on inactive pipes,
+		 * since otherwise we'll complain about the garbage we read when
+		 * e.g. coming up after runtime pm.
+		 *
+		 * No protection against concurrent access is required - at
+		 * worst a fifo underrun happens which also sets this to false.
+		 */
+		crtc->cpu_fifo_underrun_disabled = true;
+		crtc->pch_fifo_underrun_disabled = true;
+	}
+}
+
+static void intel_sanitize_encoder(struct intel_encoder *encoder)
+{
+	struct intel_connector *connector;
+	struct drm_device *dev = encoder->base.dev;
+	bool active = false;
+
+	/* We need to check both for a crtc link (meaning that the
+	 * encoder is active and trying to read from a pipe) and the
+	 * pipe itself being active. */
+	bool has_active_crtc = encoder->base.crtc &&
+		to_intel_crtc(encoder->base.crtc)->active;
+
+	for_each_intel_connector(dev, connector) {
+		if (connector->base.encoder != &encoder->base)
+			continue;
+
+		active = true;
+		break;
+	}
+
+	if (active && !has_active_crtc) {
+		DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
+			      encoder->base.base.id,
+			      encoder->base.name);
+
+		/* Connector is active, but has no active pipe. This is
+		 * fallout from our resume register restoring. Disable
+		 * the encoder manually again. */
+		if (encoder->base.crtc) {
+			DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
+				      encoder->base.base.id,
+				      encoder->base.name);
+			encoder->disable(encoder);
+			if (encoder->post_disable)
+				encoder->post_disable(encoder);
+		}
+		encoder->base.crtc = NULL;
+
+		/* Inconsistent output/port/pipe state happens presumably due to
+		 * a bug in one of the get_hw_state functions. Or someplace else
+		 * in our code, like the register restore mess on resume. Clamp
+		 * things to off as a safer default. */
+		for_each_intel_connector(dev, connector) {
+			if (connector->encoder != encoder)
+				continue;
+			connector->base.dpms = DRM_MODE_DPMS_OFF;
+			connector->base.encoder = NULL;
+		}
+	}
+	/* Enabled encoders without active connectors will be fixed in
+	 * the crtc fixup. */
+}
+
+void i915_redisable_vga_power_on(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 vga_reg = i915_vgacntrl_reg(dev);
+
+	if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
+		DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
+		i915_disable_vga(dev);
+	}
+}
+
+void i915_redisable_vga(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	/* This function can be called both from intel_modeset_setup_hw_state or
+	 * at a very early point in our resume sequence, where the power well
+	 * structures are not yet restored. Since this function is at a very
+	 * paranoid "someone might have enabled VGA while we were not looking"
+	 * level, just check if the power well is enabled instead of trying to
+	 * follow the "don't touch the power well if we don't need it" policy
+	 * the rest of the driver uses. */
+	if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_VGA))
+		return;
+
+	i915_redisable_vga_power_on(dev);
+}
+
+static bool primary_get_hw_state(struct intel_plane *plane)
+{
+	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+
+	return I915_READ(DSPCNTR(plane->plane)) & DISPLAY_PLANE_ENABLE;
+}
+
+/* FIXME read out full plane state for all planes */
+static void readout_plane_state(struct intel_crtc *crtc)
+{
+	struct drm_plane *primary = crtc->base.primary;
+	struct intel_plane_state *plane_state =
+		to_intel_plane_state(primary->state);
+
+	plane_state->visible =
+		primary_get_hw_state(to_intel_plane(primary));
+
+	if (plane_state->visible)
+		crtc->base.state->plane_mask |= 1 << drm_plane_index(primary);
+}
+
+static void intel_modeset_readout_hw_state(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum pipe pipe;
+	struct intel_crtc *crtc;
+	struct intel_encoder *encoder;
+	struct intel_connector *connector;
+	int i;
+
+	for_each_intel_crtc(dev, crtc) {
+		__drm_atomic_helper_crtc_destroy_state(&crtc->base, crtc->base.state);
+		memset(crtc->config, 0, sizeof(*crtc->config));
+		crtc->config->base.crtc = &crtc->base;
+
+		crtc->active = dev_priv->display.get_pipe_config(crtc,
+								 crtc->config);
+
+		crtc->base.state->active = crtc->active;
+		crtc->base.enabled = crtc->active;
+
+		readout_plane_state(crtc);
+
+		DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n",
+			      crtc->base.base.id,
+			      crtc->active ? "enabled" : "disabled");
+	}
+
+	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
+		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
+
+		pll->on = pll->get_hw_state(dev_priv, pll,
+					    &pll->config.hw_state);
+		pll->active = 0;
+		pll->config.crtc_mask = 0;
+		for_each_intel_crtc(dev, crtc) {
+			if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll) {
+				pll->active++;
+				pll->config.crtc_mask |= 1 << crtc->pipe;
+			}
+		}
+
+		DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n",
+			      pll->name, pll->config.crtc_mask, pll->on);
+
+		if (pll->config.crtc_mask)
+			intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
+	}
+
+	for_each_intel_encoder(dev, encoder) {
+		pipe = 0;
+
+		if (encoder->get_hw_state(encoder, &pipe)) {
+			crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+			encoder->base.crtc = &crtc->base;
+			encoder->get_config(encoder, crtc->config);
+		} else {
+			encoder->base.crtc = NULL;
+		}
+
+		DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
+			      encoder->base.base.id,
+			      encoder->base.name,
+			      encoder->base.crtc ? "enabled" : "disabled",
+			      pipe_name(pipe));
+	}
+
+	for_each_intel_connector(dev, connector) {
+		if (connector->get_hw_state(connector)) {
+			connector->base.dpms = DRM_MODE_DPMS_ON;
+			connector->base.encoder = &connector->encoder->base;
+		} else {
+			connector->base.dpms = DRM_MODE_DPMS_OFF;
+			connector->base.encoder = NULL;
+		}
+		DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
+			      connector->base.base.id,
+			      connector->base.name,
+			      connector->base.encoder ? "enabled" : "disabled");
+	}
+
+	for_each_intel_crtc(dev, crtc) {
+		crtc->base.hwmode = crtc->config->base.adjusted_mode;
+
+		memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
+		if (crtc->base.state->active) {
+			intel_mode_from_pipe_config(&crtc->base.mode, crtc->config);
+			intel_mode_from_pipe_config(&crtc->base.state->adjusted_mode, crtc->config);
+			WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode));
+
+			/*
+			 * The initial mode needs to be set in order to keep
+			 * the atomic core happy. It wants a valid mode if the
+			 * crtc's enabled, so we do the above call.
+			 *
+			 * At this point some state updated by the connectors
+			 * in their ->detect() callback has not run yet, so
+			 * no recalculation can be done yet.
+			 *
+			 * Even if we could do a recalculation and modeset
+			 * right now it would cause a double modeset if
+			 * fbdev or userspace chooses a different initial mode.
+			 *
+			 * If that happens, someone indicated they wanted a
+			 * mode change, which means it's safe to do a full
+			 * recalculation.
+			 */
+			crtc->base.state->mode.private_flags = I915_MODE_FLAG_INHERITED;
+
+			drm_calc_timestamping_constants(&crtc->base, &crtc->base.hwmode);
+			update_scanline_offset(crtc);
+		}
+	}
+}
+
+/* Scan out the current hw modeset state,
+ * and sanitizes it to the current state
+ */
+static void
+intel_modeset_setup_hw_state(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum pipe pipe;
+	struct intel_crtc *crtc;
+	struct intel_encoder *encoder;
+	int i;
+
+	intel_modeset_readout_hw_state(dev);
+
+	/* HW state is read out, now we need to sanitize this mess. */
+	for_each_intel_encoder(dev, encoder) {
+		intel_sanitize_encoder(encoder);
+	}
+
+	for_each_pipe(dev_priv, pipe) {
+		crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+		intel_sanitize_crtc(crtc);
+		intel_dump_pipe_config(crtc, crtc->config,
+				       "[setup_hw_state]");
+	}
+
+	intel_modeset_update_connector_atomic_state(dev);
+
+	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
+		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
+
+		if (!pll->on || pll->active)
+			continue;
+
+		DRM_DEBUG_KMS("%s enabled but not in use, disabling\n", pll->name);
+
+		pll->disable(dev_priv, pll);
+		pll->on = false;
+	}
+
+	if (IS_VALLEYVIEW(dev))
+		vlv_wm_get_hw_state(dev);
+	else if (IS_GEN9(dev))
+		skl_wm_get_hw_state(dev);
+	else if (HAS_PCH_SPLIT(dev))
+		ilk_wm_get_hw_state(dev);
+
+	for_each_intel_crtc(dev, crtc) {
+		unsigned long put_domains;
+
+		put_domains = modeset_get_crtc_power_domains(&crtc->base);
+		if (WARN_ON(put_domains))
+			modeset_put_power_domains(dev_priv, put_domains);
+	}
+	intel_display_set_init_power(dev_priv, false);
+}
+
+void intel_display_resume(struct drm_device *dev)
+{
+	struct drm_atomic_state *state = drm_atomic_state_alloc(dev);
+	struct intel_connector *conn;
+	struct intel_plane *plane;
+	struct drm_crtc *crtc;
+	int ret;
+
+	if (!state)
+		return;
+
+	state->acquire_ctx = dev->mode_config.acquire_ctx;
+
+	/* preserve complete old state, including dpll */
+	intel_atomic_get_shared_dpll_state(state);
+
+	for_each_crtc(dev, crtc) {
+		struct drm_crtc_state *crtc_state =
+			drm_atomic_get_crtc_state(state, crtc);
+
+		ret = PTR_ERR_OR_ZERO(crtc_state);
+		if (ret)
+			goto err;
+
+		/* force a restore */
+		crtc_state->mode_changed = true;
+	}
+
+	for_each_intel_plane(dev, plane) {
+		ret = PTR_ERR_OR_ZERO(drm_atomic_get_plane_state(state, &plane->base));
+		if (ret)
+			goto err;
+	}
+
+	for_each_intel_connector(dev, conn) {
+		ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(state, &conn->base));
+		if (ret)
+			goto err;
+	}
+
+	intel_modeset_setup_hw_state(dev);
+
+	i915_redisable_vga(dev);
+	ret = drm_atomic_commit(state);
+	if (!ret)
+		return;
+
+err:
+	DRM_ERROR("Restoring old state failed with %i\n", ret);
+	drm_atomic_state_free(state);
+}
+
+void intel_modeset_gem_init(struct drm_device *dev)
+{
+	struct drm_crtc *c;
+	struct drm_i915_gem_object *obj;
+	int ret;
+
+	mutex_lock(&dev->struct_mutex);
+	intel_init_gt_powersave(dev);
+	mutex_unlock(&dev->struct_mutex);
+
+	intel_modeset_init_hw(dev);
+
+	intel_setup_overlay(dev);
+
+	/*
+	 * Make sure any fbs we allocated at startup are properly
+	 * pinned & fenced.  When we do the allocation it's too early
+	 * for this.
+	 */
+	for_each_crtc(dev, c) {
+		obj = intel_fb_obj(c->primary->fb);
+		if (obj == NULL)
+			continue;
+
+		mutex_lock(&dev->struct_mutex);
+		ret = intel_pin_and_fence_fb_obj(c->primary,
+						 c->primary->fb,
+						 c->primary->state,
+						 NULL, NULL);
+		mutex_unlock(&dev->struct_mutex);
+		if (ret) {
+			DRM_ERROR("failed to pin boot fb on pipe %d\n",
+				  to_intel_crtc(c)->pipe);
+			drm_framebuffer_unreference(c->primary->fb);
+			c->primary->fb = NULL;
+			c->primary->crtc = c->primary->state->crtc = NULL;
+			update_state_fb(c->primary);
+			c->state->plane_mask &= ~(1 << drm_plane_index(c->primary));
+		}
+	}
+
+	intel_backlight_register(dev);
+}
+
+void intel_connector_unregister(struct intel_connector *intel_connector)
+{
+	struct drm_connector *connector = &intel_connector->base;
+
+	intel_panel_destroy_backlight(connector);
+	drm_connector_unregister(connector);
+}
+
+void intel_modeset_cleanup(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_connector *connector;
+
+	intel_disable_gt_powersave(dev);
+
+	intel_backlight_unregister(dev);
+
+	/*
+	 * Interrupts and polling as the first thing to avoid creating havoc.
+	 * Too much stuff here (turning of connectors, ...) would
+	 * experience fancy races otherwise.
+	 */
+	intel_irq_uninstall(dev_priv);
+
+	/*
+	 * Due to the hpd irq storm handling the hotplug work can re-arm the
+	 * poll handlers. Hence disable polling after hpd handling is shut down.
+	 */
+	drm_kms_helper_poll_fini(dev);
+
+	intel_unregister_dsm_handler();
+
+	intel_fbc_disable(dev_priv);
+
+	/* flush any delayed tasks or pending work */
+	flush_scheduled_work();
+
+	/* destroy the backlight and sysfs files before encoders/connectors */
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		struct intel_connector *intel_connector;
+
+		intel_connector = to_intel_connector(connector);
+		intel_connector->unregister(intel_connector);
+	}
+
+	drm_mode_config_cleanup(dev);
+
+	intel_cleanup_overlay(dev);
+
+	mutex_lock(&dev->struct_mutex);
+	intel_cleanup_gt_powersave(dev);
+	mutex_unlock(&dev->struct_mutex);
+
+	intel_teardown_gmbus(dev);
+}
+
+/*
+ * Return which encoder is currently attached for connector.
+ */
+struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
+{
+	return &intel_attached_encoder(connector)->base;
+}
+
+void intel_connector_attach_encoder(struct intel_connector *connector,
+				    struct intel_encoder *encoder)
+{
+	connector->encoder = encoder;
+	drm_mode_connector_attach_encoder(&connector->base,
+					  &encoder->base);
+}
+
+/*
+ * set vga decode state - true == enable VGA decode
+ */
+int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
+	u16 gmch_ctrl;
+
+	if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) {
+		DRM_ERROR("failed to read control word\n");
+		return -EIO;
+	}
+
+	if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state)
+		return 0;
+
+	if (state)
+		gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
+	else
+		gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
+
+	if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) {
+		DRM_ERROR("failed to write control word\n");
+		return -EIO;
+	}
+
+	return 0;
+}
+
+struct intel_display_error_state {
+
+	u32 power_well_driver;
+
+	int num_transcoders;
+
+	struct intel_cursor_error_state {
+		u32 control;
+		u32 position;
+		u32 base;
+		u32 size;
+	} cursor[I915_MAX_PIPES];
+
+	struct intel_pipe_error_state {
+		bool power_domain_on;
+		u32 source;
+		u32 stat;
+	} pipe[I915_MAX_PIPES];
+
+	struct intel_plane_error_state {
+		u32 control;
+		u32 stride;
+		u32 size;
+		u32 pos;
+		u32 addr;
+		u32 surface;
+		u32 tile_offset;
+	} plane[I915_MAX_PIPES];
+
+	struct intel_transcoder_error_state {
+		bool power_domain_on;
+		enum transcoder cpu_transcoder;
+
+		u32 conf;
+
+		u32 htotal;
+		u32 hblank;
+		u32 hsync;
+		u32 vtotal;
+		u32 vblank;
+		u32 vsync;
+	} transcoder[4];
+};
+
+struct intel_display_error_state *
+intel_display_capture_error_state(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_display_error_state *error;
+	int transcoders[] = {
+		TRANSCODER_A,
+		TRANSCODER_B,
+		TRANSCODER_C,
+		TRANSCODER_EDP,
+	};
+	int i;
+
+	if (INTEL_INFO(dev)->num_pipes == 0)
+		return NULL;
+
+	error = kzalloc(sizeof(*error), GFP_ATOMIC);
+	if (error == NULL)
+		return NULL;
+
+	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+		error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
+
+	for_each_pipe(dev_priv, i) {
+		error->pipe[i].power_domain_on =
+			__intel_display_power_is_enabled(dev_priv,
+							 POWER_DOMAIN_PIPE(i));
+		if (!error->pipe[i].power_domain_on)
+			continue;
+
+		error->cursor[i].control = I915_READ(CURCNTR(i));
+		error->cursor[i].position = I915_READ(CURPOS(i));
+		error->cursor[i].base = I915_READ(CURBASE(i));
+
+		error->plane[i].control = I915_READ(DSPCNTR(i));
+		error->plane[i].stride = I915_READ(DSPSTRIDE(i));
+		if (INTEL_INFO(dev)->gen <= 3) {
+			error->plane[i].size = I915_READ(DSPSIZE(i));
+			error->plane[i].pos = I915_READ(DSPPOS(i));
+		}
+		if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
+			error->plane[i].addr = I915_READ(DSPADDR(i));
+		if (INTEL_INFO(dev)->gen >= 4) {
+			error->plane[i].surface = I915_READ(DSPSURF(i));
+			error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
+		}
+
+		error->pipe[i].source = I915_READ(PIPESRC(i));
+
+		if (HAS_GMCH_DISPLAY(dev))
+			error->pipe[i].stat = I915_READ(PIPESTAT(i));
+	}
+
+	error->num_transcoders = INTEL_INFO(dev)->num_pipes;
+	if (HAS_DDI(dev_priv->dev))
+		error->num_transcoders++; /* Account for eDP. */
+
+	for (i = 0; i < error->num_transcoders; i++) {
+		enum transcoder cpu_transcoder = transcoders[i];
+
+		error->transcoder[i].power_domain_on =
+			__intel_display_power_is_enabled(dev_priv,
+				POWER_DOMAIN_TRANSCODER(cpu_transcoder));
+		if (!error->transcoder[i].power_domain_on)
+			continue;
+
+		error->transcoder[i].cpu_transcoder = cpu_transcoder;
+
+		error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
+		error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
+		error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
+		error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
+		error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
+		error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
+		error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
+	}
+
+	return error;
+}
+
+#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
+
+void
+intel_display_print_error_state(struct drm_i915_error_state_buf *m,
+				struct drm_device *dev,
+				struct intel_display_error_state *error)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int i;
+
+	if (!error)
+		return;
+
+	err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes);
+	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+		err_printf(m, "PWR_WELL_CTL2: %08x\n",
+			   error->power_well_driver);
+	for_each_pipe(dev_priv, i) {
+		err_printf(m, "Pipe [%d]:\n", i);
+		err_printf(m, "  Power: %s\n",
+			   error->pipe[i].power_domain_on ? "on" : "off");
+		err_printf(m, "  SRC: %08x\n", error->pipe[i].source);
+		err_printf(m, "  STAT: %08x\n", error->pipe[i].stat);
+
+		err_printf(m, "Plane [%d]:\n", i);
+		err_printf(m, "  CNTR: %08x\n", error->plane[i].control);
+		err_printf(m, "  STRIDE: %08x\n", error->plane[i].stride);
+		if (INTEL_INFO(dev)->gen <= 3) {
+			err_printf(m, "  SIZE: %08x\n", error->plane[i].size);
+			err_printf(m, "  POS: %08x\n", error->plane[i].pos);
+		}
+		if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
+			err_printf(m, "  ADDR: %08x\n", error->plane[i].addr);
+		if (INTEL_INFO(dev)->gen >= 4) {
+			err_printf(m, "  SURF: %08x\n", error->plane[i].surface);
+			err_printf(m, "  TILEOFF: %08x\n", error->plane[i].tile_offset);
+		}
+
+		err_printf(m, "Cursor [%d]:\n", i);
+		err_printf(m, "  CNTR: %08x\n", error->cursor[i].control);
+		err_printf(m, "  POS: %08x\n", error->cursor[i].position);
+		err_printf(m, "  BASE: %08x\n", error->cursor[i].base);
+	}
+
+	for (i = 0; i < error->num_transcoders; i++) {
+		err_printf(m, "CPU transcoder: %c\n",
+			   transcoder_name(error->transcoder[i].cpu_transcoder));
+		err_printf(m, "  Power: %s\n",
+			   error->transcoder[i].power_domain_on ? "on" : "off");
+		err_printf(m, "  CONF: %08x\n", error->transcoder[i].conf);
+		err_printf(m, "  HTOTAL: %08x\n", error->transcoder[i].htotal);
+		err_printf(m, "  HBLANK: %08x\n", error->transcoder[i].hblank);
+		err_printf(m, "  HSYNC: %08x\n", error->transcoder[i].hsync);
+		err_printf(m, "  VTOTAL: %08x\n", error->transcoder[i].vtotal);
+		err_printf(m, "  VBLANK: %08x\n", error->transcoder[i].vblank);
+		err_printf(m, "  VSYNC: %08x\n", error->transcoder[i].vsync);
+	}
+}
+
+void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file)
+{
+	struct intel_crtc *crtc;
+
+	for_each_intel_crtc(dev, crtc) {
+		struct intel_unpin_work *work;
+
+		spin_lock_irq(&dev->event_lock);
+
+		work = crtc->unpin_work;
+
+		if (work && work->event &&
+		    work->event->base.file_priv == file) {
+			kfree(work->event);
+			work->event = NULL;
+		}
+
+		spin_unlock_irq(&dev->event_lock);
+	}
+}
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
new file mode 100644
index 0000000..0f8367d
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -0,0 +1,6234 @@
+/*
+ * Copyright © 2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Keith Packard <keithp@keithp.com>
+ *
+ */
+
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+#include <linux/notifier.h>
+#include <linux/reboot.h>
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_edid.h>
+#include "intel_drv.h"
+#include <drm/i915_drm.h>
+#include "i915_drv.h"
+
+#define DP_LINK_CHECK_TIMEOUT	(10 * 1000)
+
+/* Compliance test status bits  */
+#define INTEL_DP_RESOLUTION_SHIFT_MASK	0
+#define INTEL_DP_RESOLUTION_PREFERRED	(1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
+#define INTEL_DP_RESOLUTION_STANDARD	(2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
+#define INTEL_DP_RESOLUTION_FAILSAFE	(3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
+
+struct dp_link_dpll {
+	int clock;
+	struct dpll dpll;
+};
+
+static const struct dp_link_dpll gen4_dpll[] = {
+	{ 162000,
+		{ .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
+	{ 270000,
+		{ .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
+};
+
+static const struct dp_link_dpll pch_dpll[] = {
+	{ 162000,
+		{ .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
+	{ 270000,
+		{ .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
+};
+
+static const struct dp_link_dpll vlv_dpll[] = {
+	{ 162000,
+		{ .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
+	{ 270000,
+		{ .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
+};
+
+/*
+ * CHV supports eDP 1.4 that have  more link rates.
+ * Below only provides the fixed rate but exclude variable rate.
+ */
+static const struct dp_link_dpll chv_dpll[] = {
+	/*
+	 * CHV requires to program fractional division for m2.
+	 * m2 is stored in fixed point format using formula below
+	 * (m2_int << 22) | m2_fraction
+	 */
+	{ 162000,	/* m2_int = 32, m2_fraction = 1677722 */
+		{ .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
+	{ 270000,	/* m2_int = 27, m2_fraction = 0 */
+		{ .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
+	{ 540000,	/* m2_int = 27, m2_fraction = 0 */
+		{ .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
+};
+
+static const int bxt_rates[] = { 162000, 216000, 243000, 270000,
+				  324000, 432000, 540000 };
+static const int skl_rates[] = { 162000, 216000, 270000,
+				  324000, 432000, 540000 };
+static const int default_rates[] = { 162000, 270000, 540000 };
+
+/**
+ * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
+ * @intel_dp: DP struct
+ *
+ * If a CPU or PCH DP output is attached to an eDP panel, this function
+ * will return true, and false otherwise.
+ */
+static bool is_edp(struct intel_dp *intel_dp)
+{
+	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+
+	return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
+}
+
+static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
+{
+	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+
+	return intel_dig_port->base.base.dev;
+}
+
+static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
+{
+	return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
+}
+
+static void intel_dp_link_down(struct intel_dp *intel_dp);
+static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
+static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
+static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
+static void vlv_steal_power_sequencer(struct drm_device *dev,
+				      enum pipe pipe);
+
+static unsigned int intel_dp_unused_lane_mask(int lane_count)
+{
+	return ~((1 << lane_count) - 1) & 0xf;
+}
+
+static int
+intel_dp_max_link_bw(struct intel_dp  *intel_dp)
+{
+	int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
+
+	switch (max_link_bw) {
+	case DP_LINK_BW_1_62:
+	case DP_LINK_BW_2_7:
+	case DP_LINK_BW_5_4:
+		break;
+	default:
+		WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
+		     max_link_bw);
+		max_link_bw = DP_LINK_BW_1_62;
+		break;
+	}
+	return max_link_bw;
+}
+
+static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
+{
+	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+	struct drm_device *dev = intel_dig_port->base.base.dev;
+	u8 source_max, sink_max;
+
+	source_max = 4;
+	if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
+	    (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
+		source_max = 2;
+
+	sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
+
+	return min(source_max, sink_max);
+}
+
+/*
+ * The units on the numbers in the next two are... bizarre.  Examples will
+ * make it clearer; this one parallels an example in the eDP spec.
+ *
+ * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
+ *
+ *     270000 * 1 * 8 / 10 == 216000
+ *
+ * The actual data capacity of that configuration is 2.16Gbit/s, so the
+ * units are decakilobits.  ->clock in a drm_display_mode is in kilohertz -
+ * or equivalently, kilopixels per second - so for 1680x1050R it'd be
+ * 119000.  At 18bpp that's 2142000 kilobits per second.
+ *
+ * Thus the strange-looking division by 10 in intel_dp_link_required, to
+ * get the result in decakilobits instead of kilobits.
+ */
+
+static int
+intel_dp_link_required(int pixel_clock, int bpp)
+{
+	return (pixel_clock * bpp + 9) / 10;
+}
+
+static int
+intel_dp_max_data_rate(int max_link_clock, int max_lanes)
+{
+	return (max_link_clock * max_lanes * 8) / 10;
+}
+
+static enum drm_mode_status
+intel_dp_mode_valid(struct drm_connector *connector,
+		    struct drm_display_mode *mode)
+{
+	struct intel_dp *intel_dp = intel_attached_dp(connector);
+	struct intel_connector *intel_connector = to_intel_connector(connector);
+	struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
+	int target_clock = mode->clock;
+	int max_rate, mode_rate, max_lanes, max_link_clock;
+
+	if (is_edp(intel_dp) && fixed_mode) {
+		if (mode->hdisplay > fixed_mode->hdisplay)
+			return MODE_PANEL;
+
+		if (mode->vdisplay > fixed_mode->vdisplay)
+			return MODE_PANEL;
+
+		target_clock = fixed_mode->clock;
+	}
+
+	max_link_clock = intel_dp_max_link_rate(intel_dp);
+	max_lanes = intel_dp_max_lane_count(intel_dp);
+
+	max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
+	mode_rate = intel_dp_link_required(target_clock, 18);
+
+	if (mode_rate > max_rate)
+		return MODE_CLOCK_HIGH;
+
+	if (mode->clock < 10000)
+		return MODE_CLOCK_LOW;
+
+	if (mode->flags & DRM_MODE_FLAG_DBLCLK)
+		return MODE_H_ILLEGAL;
+
+	return MODE_OK;
+}
+
+uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
+{
+	int	i;
+	uint32_t v = 0;
+
+	if (src_bytes > 4)
+		src_bytes = 4;
+	for (i = 0; i < src_bytes; i++)
+		v |= ((uint32_t) src[i]) << ((3-i) * 8);
+	return v;
+}
+
+static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
+{
+	int i;
+	if (dst_bytes > 4)
+		dst_bytes = 4;
+	for (i = 0; i < dst_bytes; i++)
+		dst[i] = src >> ((3-i) * 8);
+}
+
+static void
+intel_dp_init_panel_power_sequencer(struct drm_device *dev,
+				    struct intel_dp *intel_dp);
+static void
+intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
+					      struct intel_dp *intel_dp);
+
+static void pps_lock(struct intel_dp *intel_dp)
+{
+	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+	struct intel_encoder *encoder = &intel_dig_port->base;
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum intel_display_power_domain power_domain;
+
+	/*
+	 * See vlv_power_sequencer_reset() why we need
+	 * a power domain reference here.
+	 */
+	power_domain = intel_display_port_aux_power_domain(encoder);
+	intel_display_power_get(dev_priv, power_domain);
+
+	mutex_lock(&dev_priv->pps_mutex);
+}
+
+static void pps_unlock(struct intel_dp *intel_dp)
+{
+	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+	struct intel_encoder *encoder = &intel_dig_port->base;
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum intel_display_power_domain power_domain;
+
+	mutex_unlock(&dev_priv->pps_mutex);
+
+	power_domain = intel_display_port_aux_power_domain(encoder);
+	intel_display_power_put(dev_priv, power_domain);
+}
+
+static void
+vlv_power_sequencer_kick(struct intel_dp *intel_dp)
+{
+	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+	struct drm_device *dev = intel_dig_port->base.base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum pipe pipe = intel_dp->pps_pipe;
+	bool pll_enabled, release_cl_override = false;
+	enum dpio_phy phy = DPIO_PHY(pipe);
+	enum dpio_channel ch = vlv_pipe_to_channel(pipe);
+	uint32_t DP;
+
+	if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
+		 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
+		 pipe_name(pipe), port_name(intel_dig_port->port)))
+		return;
+
+	DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
+		      pipe_name(pipe), port_name(intel_dig_port->port));
+
+	/* Preserve the BIOS-computed detected bit. This is
+	 * supposed to be read-only.
+	 */
+	DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
+	DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
+	DP |= DP_PORT_WIDTH(1);
+	DP |= DP_LINK_TRAIN_PAT_1;
+
+	if (IS_CHERRYVIEW(dev))
+		DP |= DP_PIPE_SELECT_CHV(pipe);
+	else if (pipe == PIPE_B)
+		DP |= DP_PIPEB_SELECT;
+
+	pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
+
+	/*
+	 * The DPLL for the pipe must be enabled for this to work.
+	 * So enable temporarily it if it's not already enabled.
+	 */
+	if (!pll_enabled) {
+		release_cl_override = IS_CHERRYVIEW(dev) &&
+			!chv_phy_powergate_ch(dev_priv, phy, ch, true);
+
+		vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
+				 &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
+	}
+
+	/*
+	 * Similar magic as in intel_dp_enable_port().
+	 * We _must_ do this port enable + disable trick
+	 * to make this power seqeuencer lock onto the port.
+	 * Otherwise even VDD force bit won't work.
+	 */
+	I915_WRITE(intel_dp->output_reg, DP);
+	POSTING_READ(intel_dp->output_reg);
+
+	I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
+	POSTING_READ(intel_dp->output_reg);
+
+	I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
+	POSTING_READ(intel_dp->output_reg);
+
+	if (!pll_enabled) {
+		vlv_force_pll_off(dev, pipe);
+
+		if (release_cl_override)
+			chv_phy_powergate_ch(dev_priv, phy, ch, false);
+	}
+}
+
+static enum pipe
+vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
+{
+	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+	struct drm_device *dev = intel_dig_port->base.base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_encoder *encoder;
+	unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
+	enum pipe pipe;
+
+	lockdep_assert_held(&dev_priv->pps_mutex);
+
+	/* We should never land here with regular DP ports */
+	WARN_ON(!is_edp(intel_dp));
+
+	if (intel_dp->pps_pipe != INVALID_PIPE)
+		return intel_dp->pps_pipe;
+
+	/*
+	 * We don't have power sequencer currently.
+	 * Pick one that's not used by other ports.
+	 */
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+			    base.head) {
+		struct intel_dp *tmp;
+
+		if (encoder->type != INTEL_OUTPUT_EDP)
+			continue;
+
+		tmp = enc_to_intel_dp(&encoder->base);
+
+		if (tmp->pps_pipe != INVALID_PIPE)
+			pipes &= ~(1 << tmp->pps_pipe);
+	}
+
+	/*
+	 * Didn't find one. This should not happen since there
+	 * are two power sequencers and up to two eDP ports.
+	 */
+	if (WARN_ON(pipes == 0))
+		pipe = PIPE_A;
+	else
+		pipe = ffs(pipes) - 1;
+
+	vlv_steal_power_sequencer(dev, pipe);
+	intel_dp->pps_pipe = pipe;
+
+	DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
+		      pipe_name(intel_dp->pps_pipe),
+		      port_name(intel_dig_port->port));
+
+	/* init power sequencer on this pipe and port */
+	intel_dp_init_panel_power_sequencer(dev, intel_dp);
+	intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
+
+	/*
+	 * Even vdd force doesn't work until we've made
+	 * the power sequencer lock in on the port.
+	 */
+	vlv_power_sequencer_kick(intel_dp);
+
+	return intel_dp->pps_pipe;
+}
+
+typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
+			       enum pipe pipe);
+
+static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
+			       enum pipe pipe)
+{
+	return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
+}
+
+static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
+				enum pipe pipe)
+{
+	return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
+}
+
+static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
+			 enum pipe pipe)
+{
+	return true;
+}
+
+static enum pipe
+vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
+		     enum port port,
+		     vlv_pipe_check pipe_check)
+{
+	enum pipe pipe;
+
+	for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
+		u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
+			PANEL_PORT_SELECT_MASK;
+
+		if (port_sel != PANEL_PORT_SELECT_VLV(port))
+			continue;
+
+		if (!pipe_check(dev_priv, pipe))
+			continue;
+
+		return pipe;
+	}
+
+	return INVALID_PIPE;
+}
+
+static void
+vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
+{
+	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+	struct drm_device *dev = intel_dig_port->base.base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum port port = intel_dig_port->port;
+
+	lockdep_assert_held(&dev_priv->pps_mutex);
+
+	/* try to find a pipe with this port selected */
+	/* first pick one where the panel is on */
+	intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
+						  vlv_pipe_has_pp_on);
+	/* didn't find one? pick one where vdd is on */
+	if (intel_dp->pps_pipe == INVALID_PIPE)
+		intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
+							  vlv_pipe_has_vdd_on);
+	/* didn't find one? pick one with just the correct port */
+	if (intel_dp->pps_pipe == INVALID_PIPE)
+		intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
+							  vlv_pipe_any);
+
+	/* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
+	if (intel_dp->pps_pipe == INVALID_PIPE) {
+		DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
+			      port_name(port));
+		return;
+	}
+
+	DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
+		      port_name(port), pipe_name(intel_dp->pps_pipe));
+
+	intel_dp_init_panel_power_sequencer(dev, intel_dp);
+	intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
+}
+
+void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
+{
+	struct drm_device *dev = dev_priv->dev;
+	struct intel_encoder *encoder;
+
+	if (WARN_ON(!IS_VALLEYVIEW(dev)))
+		return;
+
+	/*
+	 * We can't grab pps_mutex here due to deadlock with power_domain
+	 * mutex when power_domain functions are called while holding pps_mutex.
+	 * That also means that in order to use pps_pipe the code needs to
+	 * hold both a power domain reference and pps_mutex, and the power domain
+	 * reference get/put must be done while _not_ holding pps_mutex.
+	 * pps_{lock,unlock}() do these steps in the correct order, so one
+	 * should use them always.
+	 */
+
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
+		struct intel_dp *intel_dp;
+
+		if (encoder->type != INTEL_OUTPUT_EDP)
+			continue;
+
+		intel_dp = enc_to_intel_dp(&encoder->base);
+		intel_dp->pps_pipe = INVALID_PIPE;
+	}
+}
+
+static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
+{
+	struct drm_device *dev = intel_dp_to_dev(intel_dp);
+
+	if (IS_BROXTON(dev))
+		return BXT_PP_CONTROL(0);
+	else if (HAS_PCH_SPLIT(dev))
+		return PCH_PP_CONTROL;
+	else
+		return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
+}
+
+static u32 _pp_stat_reg(struct intel_dp *intel_dp)
+{
+	struct drm_device *dev = intel_dp_to_dev(intel_dp);
+
+	if (IS_BROXTON(dev))
+		return BXT_PP_STATUS(0);
+	else if (HAS_PCH_SPLIT(dev))
+		return PCH_PP_STATUS;
+	else
+		return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
+}
+
+/* Reboot notifier handler to shutdown panel power to guarantee T12 timing
+   This function only applicable when panel PM state is not to be tracked */
+static int edp_notify_handler(struct notifier_block *this, unsigned long code,
+			      void *unused)
+{
+	struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
+						 edp_notifier);
+	struct drm_device *dev = intel_dp_to_dev(intel_dp);
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (!is_edp(intel_dp) || code != SYS_RESTART)
+		return 0;
+
+	pps_lock(intel_dp);
+
+	if (IS_VALLEYVIEW(dev)) {
+		enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
+		u32 pp_ctrl_reg, pp_div_reg;
+		u32 pp_div;
+
+		pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
+		pp_div_reg  = VLV_PIPE_PP_DIVISOR(pipe);
+		pp_div = I915_READ(pp_div_reg);
+		pp_div &= PP_REFERENCE_DIVIDER_MASK;
+
+		/* 0x1F write to PP_DIV_REG sets max cycle delay */
+		I915_WRITE(pp_div_reg, pp_div | 0x1F);
+		I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
+		msleep(intel_dp->panel_power_cycle_delay);
+	}
+
+	pps_unlock(intel_dp);
+
+	return 0;
+}
+
+static bool edp_have_panel_power(struct intel_dp *intel_dp)
+{
+	struct drm_device *dev = intel_dp_to_dev(intel_dp);
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	lockdep_assert_held(&dev_priv->pps_mutex);
+
+	if (IS_VALLEYVIEW(dev) &&
+	    intel_dp->pps_pipe == INVALID_PIPE)
+		return false;
+
+	return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
+}
+
+static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
+{
+	struct drm_device *dev = intel_dp_to_dev(intel_dp);
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	lockdep_assert_held(&dev_priv->pps_mutex);
+
+	if (IS_VALLEYVIEW(dev) &&
+	    intel_dp->pps_pipe == INVALID_PIPE)
+		return false;
+
+	return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
+}
+
+static void
+intel_dp_check_edp(struct intel_dp *intel_dp)
+{
+	struct drm_device *dev = intel_dp_to_dev(intel_dp);
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (!is_edp(intel_dp))
+		return;
+
+	if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
+		WARN(1, "eDP powered off while attempting aux channel communication.\n");
+		DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
+			      I915_READ(_pp_stat_reg(intel_dp)),
+			      I915_READ(_pp_ctrl_reg(intel_dp)));
+	}
+}
+
+static uint32_t
+intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
+{
+	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+	struct drm_device *dev = intel_dig_port->base.base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
+	uint32_t status;
+	bool done;
+
+#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
+	if (has_aux_irq)
+		done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
+					  msecs_to_jiffies_timeout(10));
+	else
+		done = wait_for_atomic(C, 10) == 0;
+	if (!done)
+		DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
+			  has_aux_irq);
+#undef C
+
+	return status;
+}
+
+static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
+{
+	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+	struct drm_device *dev = intel_dig_port->base.base.dev;
+
+	/*
+	 * The clock divider is based off the hrawclk, and would like to run at
+	 * 2MHz.  So, take the hrawclk value and divide by 2 and use that
+	 */
+	return index ? 0 : intel_hrawclk(dev) / 2;
+}
+
+static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
+{
+	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+	struct drm_device *dev = intel_dig_port->base.base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (index)
+		return 0;
+
+	if (intel_dig_port->port == PORT_A) {
+		return DIV_ROUND_UP(dev_priv->cdclk_freq, 2000);
+
+	} else {
+		return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
+	}
+}
+
+static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
+{
+	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+	struct drm_device *dev = intel_dig_port->base.base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (intel_dig_port->port == PORT_A) {
+		if (index)
+			return 0;
+		return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
+	} else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
+		/* Workaround for non-ULT HSW */
+		switch (index) {
+		case 0: return 63;
+		case 1: return 72;
+		default: return 0;
+		}
+	} else  {
+		return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
+	}
+}
+
+static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
+{
+	return index ? 0 : 100;
+}
+
+static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
+{
+	/*
+	 * SKL doesn't need us to program the AUX clock divider (Hardware will
+	 * derive the clock from CDCLK automatically). We still implement the
+	 * get_aux_clock_divider vfunc to plug-in into the existing code.
+	 */
+	return index ? 0 : 1;
+}
+
+static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
+				      bool has_aux_irq,
+				      int send_bytes,
+				      uint32_t aux_clock_divider)
+{
+	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+	struct drm_device *dev = intel_dig_port->base.base.dev;
+	uint32_t precharge, timeout;
+
+	if (IS_GEN6(dev))
+		precharge = 3;
+	else
+		precharge = 5;
+
+	if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
+		timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
+	else
+		timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
+
+	return DP_AUX_CH_CTL_SEND_BUSY |
+	       DP_AUX_CH_CTL_DONE |
+	       (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
+	       DP_AUX_CH_CTL_TIME_OUT_ERROR |
+	       timeout |
+	       DP_AUX_CH_CTL_RECEIVE_ERROR |
+	       (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
+	       (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
+	       (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
+}
+
+static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
+				      bool has_aux_irq,
+				      int send_bytes,
+				      uint32_t unused)
+{
+	return DP_AUX_CH_CTL_SEND_BUSY |
+	       DP_AUX_CH_CTL_DONE |
+	       (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
+	       DP_AUX_CH_CTL_TIME_OUT_ERROR |
+	       DP_AUX_CH_CTL_TIME_OUT_1600us |
+	       DP_AUX_CH_CTL_RECEIVE_ERROR |
+	       (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
+	       DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
+}
+
+static int
+intel_dp_aux_ch(struct intel_dp *intel_dp,
+		const uint8_t *send, int send_bytes,
+		uint8_t *recv, int recv_size)
+{
+	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+	struct drm_device *dev = intel_dig_port->base.base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
+	uint32_t ch_data = ch_ctl + 4;
+	uint32_t aux_clock_divider;
+	int i, ret, recv_bytes;
+	uint32_t status;
+	int try, clock = 0;
+	bool has_aux_irq = HAS_AUX_IRQ(dev);
+	bool vdd;
+
+	pps_lock(intel_dp);
+
+	/*
+	 * We will be called with VDD already enabled for dpcd/edid/oui reads.
+	 * In such cases we want to leave VDD enabled and it's up to upper layers
+	 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
+	 * ourselves.
+	 */
+	vdd = edp_panel_vdd_on(intel_dp);
+
+	/* dp aux is extremely sensitive to irq latency, hence request the
+	 * lowest possible wakeup latency and so prevent the cpu from going into
+	 * deep sleep states.
+	 */
+	pm_qos_update_request(&dev_priv->pm_qos, 0);
+
+	intel_dp_check_edp(intel_dp);
+
+	/* Try to wait for any previous AUX channel activity */
+	for (try = 0; try < 3; try++) {
+		status = I915_READ_NOTRACE(ch_ctl);
+		if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
+			break;
+		msleep(1);
+	}
+
+	if (try == 3) {
+		static u32 last_status = -1;
+		const u32 status = I915_READ(ch_ctl);
+
+		if (status != last_status) {
+			WARN(1, "dp_aux_ch not started status 0x%08x\n",
+			     status);
+			last_status = status;
+		}
+
+		ret = -EBUSY;
+		goto out;
+	}
+
+	/* Only 5 data registers! */
+	if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
+		ret = -E2BIG;
+		goto out;
+	}
+
+	while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
+		u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
+							  has_aux_irq,
+							  send_bytes,
+							  aux_clock_divider);
+
+		/* Must try at least 3 times according to DP spec */
+		for (try = 0; try < 5; try++) {
+			/* Load the send data into the aux channel data registers */
+			for (i = 0; i < send_bytes; i += 4)
+				I915_WRITE(ch_data + i,
+					   intel_dp_pack_aux(send + i,
+							     send_bytes - i));
+
+			/* Send the command and wait for it to complete */
+			I915_WRITE(ch_ctl, send_ctl);
+
+			status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
+
+			/* Clear done status and any errors */
+			I915_WRITE(ch_ctl,
+				   status |
+				   DP_AUX_CH_CTL_DONE |
+				   DP_AUX_CH_CTL_TIME_OUT_ERROR |
+				   DP_AUX_CH_CTL_RECEIVE_ERROR);
+
+			if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
+				continue;
+
+			/* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
+			 *   400us delay required for errors and timeouts
+			 *   Timeout errors from the HW already meet this
+			 *   requirement so skip to next iteration
+			 */
+			if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
+				usleep_range(400, 500);
+				continue;
+			}
+			if (status & DP_AUX_CH_CTL_DONE)
+				goto done;
+		}
+	}
+
+	if ((status & DP_AUX_CH_CTL_DONE) == 0) {
+		DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
+		ret = -EBUSY;
+		goto out;
+	}
+
+done:
+	/* Check for timeout or receive error.
+	 * Timeouts occur when the sink is not connected
+	 */
+	if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
+		DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
+		ret = -EIO;
+		goto out;
+	}
+
+	/* Timeouts occur when the device isn't connected, so they're
+	 * "normal" -- don't fill the kernel log with these */
+	if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
+		DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
+		ret = -ETIMEDOUT;
+		goto out;
+	}
+
+	/* Unload any bytes sent back from the other side */
+	recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
+		      DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
+	if (recv_bytes > recv_size)
+		recv_bytes = recv_size;
+
+	for (i = 0; i < recv_bytes; i += 4)
+		intel_dp_unpack_aux(I915_READ(ch_data + i),
+				    recv + i, recv_bytes - i);
+
+	ret = recv_bytes;
+out:
+	pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
+
+	if (vdd)
+		edp_panel_vdd_off(intel_dp, false);
+
+	pps_unlock(intel_dp);
+
+	return ret;
+}
+
+#define BARE_ADDRESS_SIZE	3
+#define HEADER_SIZE		(BARE_ADDRESS_SIZE + 1)
+static ssize_t
+intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
+{
+	struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
+	uint8_t txbuf[20], rxbuf[20];
+	size_t txsize, rxsize;
+	int ret;
+
+	txbuf[0] = (msg->request << 4) |
+		((msg->address >> 16) & 0xf);
+	txbuf[1] = (msg->address >> 8) & 0xff;
+	txbuf[2] = msg->address & 0xff;
+	txbuf[3] = msg->size - 1;
+
+	switch (msg->request & ~DP_AUX_I2C_MOT) {
+	case DP_AUX_NATIVE_WRITE:
+	case DP_AUX_I2C_WRITE:
+	case DP_AUX_I2C_WRITE_STATUS_UPDATE:
+		txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
+		rxsize = 2; /* 0 or 1 data bytes */
+
+		if (WARN_ON(txsize > 20))
+			return -E2BIG;
+
+		memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
+
+		ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
+		if (ret > 0) {
+			msg->reply = rxbuf[0] >> 4;
+
+			if (ret > 1) {
+				/* Number of bytes written in a short write. */
+				ret = clamp_t(int, rxbuf[1], 0, msg->size);
+			} else {
+				/* Return payload size. */
+				ret = msg->size;
+			}
+		}
+		break;
+
+	case DP_AUX_NATIVE_READ:
+	case DP_AUX_I2C_READ:
+		txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
+		rxsize = msg->size + 1;
+
+		if (WARN_ON(rxsize > 20))
+			return -E2BIG;
+
+		ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
+		if (ret > 0) {
+			msg->reply = rxbuf[0] >> 4;
+			/*
+			 * Assume happy day, and copy the data. The caller is
+			 * expected to check msg->reply before touching it.
+			 *
+			 * Return payload size.
+			 */
+			ret--;
+			memcpy(msg->buffer, rxbuf + 1, ret);
+		}
+		break;
+
+	default:
+		ret = -EINVAL;
+		break;
+	}
+
+	return ret;
+}
+
+static void
+intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
+{
+	struct drm_device *dev = intel_dp_to_dev(intel_dp);
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+	enum port port = intel_dig_port->port;
+	struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port];
+	const char *name = NULL;
+	uint32_t porte_aux_ctl_reg = DPA_AUX_CH_CTL;
+	int ret;
+
+	/* On SKL we don't have Aux for port E so we rely on VBT to set
+	 * a proper alternate aux channel.
+	 */
+	if (IS_SKYLAKE(dev) && port == PORT_E) {
+		switch (info->alternate_aux_channel) {
+		case DP_AUX_B:
+			porte_aux_ctl_reg = DPB_AUX_CH_CTL;
+			break;
+		case DP_AUX_C:
+			porte_aux_ctl_reg = DPC_AUX_CH_CTL;
+			break;
+		case DP_AUX_D:
+			porte_aux_ctl_reg = DPD_AUX_CH_CTL;
+			break;
+		case DP_AUX_A:
+		default:
+			porte_aux_ctl_reg = DPA_AUX_CH_CTL;
+		}
+	}
+
+	switch (port) {
+	case PORT_A:
+		intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
+		name = "DPDDC-A";
+		break;
+	case PORT_B:
+		intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
+		name = "DPDDC-B";
+		break;
+	case PORT_C:
+		intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
+		name = "DPDDC-C";
+		break;
+	case PORT_D:
+		intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
+		name = "DPDDC-D";
+		break;
+	case PORT_E:
+		intel_dp->aux_ch_ctl_reg = porte_aux_ctl_reg;
+		name = "DPDDC-E";
+		break;
+	default:
+		BUG();
+	}
+
+	/*
+	 * The AUX_CTL register is usually DP_CTL + 0x10.
+	 *
+	 * On Haswell and Broadwell though:
+	 *   - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
+	 *   - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
+	 *
+	 * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
+	 */
+	if (!IS_HASWELL(dev) && !IS_BROADWELL(dev) && port != PORT_E)
+		intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
+
+	intel_dp->aux.name = name;
+	intel_dp->aux.dev = dev->dev;
+	intel_dp->aux.transfer = intel_dp_aux_transfer;
+
+	DRM_DEBUG_KMS("registering %s bus for %s\n", name,
+		      connector->base.kdev->kobj.name);
+
+	ret = drm_dp_aux_register(&intel_dp->aux);
+	if (ret < 0) {
+		DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
+			  name, ret);
+		return;
+	}
+
+	ret = sysfs_create_link(&connector->base.kdev->kobj,
+				&intel_dp->aux.ddc.dev.kobj,
+				intel_dp->aux.ddc.dev.kobj.name);
+	if (ret < 0) {
+		DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret);
+		drm_dp_aux_unregister(&intel_dp->aux);
+	}
+}
+
+static void
+intel_dp_connector_unregister(struct intel_connector *intel_connector)
+{
+	struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
+
+	if (!intel_connector->mst_port)
+		sysfs_remove_link(&intel_connector->base.kdev->kobj,
+				  intel_dp->aux.ddc.dev.kobj.name);
+	intel_connector_unregister(intel_connector);
+}
+
+static void
+skl_edp_set_pll_config(struct intel_crtc_state *pipe_config)
+{
+	u32 ctrl1;
+
+	memset(&pipe_config->dpll_hw_state, 0,
+	       sizeof(pipe_config->dpll_hw_state));
+
+	pipe_config->ddi_pll_sel = SKL_DPLL0;
+	pipe_config->dpll_hw_state.cfgcr1 = 0;
+	pipe_config->dpll_hw_state.cfgcr2 = 0;
+
+	ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
+	switch (pipe_config->port_clock / 2) {
+	case 81000:
+		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
+					      SKL_DPLL0);
+		break;
+	case 135000:
+		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350,
+					      SKL_DPLL0);
+		break;
+	case 270000:
+		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700,
+					      SKL_DPLL0);
+		break;
+	case 162000:
+		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620,
+					      SKL_DPLL0);
+		break;
+	/* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
+	results in CDCLK change. Need to handle the change of CDCLK by
+	disabling pipes and re-enabling them */
+	case 108000:
+		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
+					      SKL_DPLL0);
+		break;
+	case 216000:
+		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160,
+					      SKL_DPLL0);
+		break;
+
+	}
+	pipe_config->dpll_hw_state.ctrl1 = ctrl1;
+}
+
+void
+hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config)
+{
+	memset(&pipe_config->dpll_hw_state, 0,
+	       sizeof(pipe_config->dpll_hw_state));
+
+	switch (pipe_config->port_clock / 2) {
+	case 81000:
+		pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
+		break;
+	case 135000:
+		pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
+		break;
+	case 270000:
+		pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
+		break;
+	}
+}
+
+static int
+intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
+{
+	if (intel_dp->num_sink_rates) {
+		*sink_rates = intel_dp->sink_rates;
+		return intel_dp->num_sink_rates;
+	}
+
+	*sink_rates = default_rates;
+
+	return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
+}
+
+static bool intel_dp_source_supports_hbr2(struct drm_device *dev)
+{
+	/* WaDisableHBR2:skl */
+	if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
+		return false;
+
+	if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) ||
+	    (INTEL_INFO(dev)->gen >= 9))
+		return true;
+	else
+		return false;
+}
+
+static int
+intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
+{
+	int size;
+
+	if (IS_BROXTON(dev)) {
+		*source_rates = bxt_rates;
+		size = ARRAY_SIZE(bxt_rates);
+	} else if (IS_SKYLAKE(dev)) {
+		*source_rates = skl_rates;
+		size = ARRAY_SIZE(skl_rates);
+	} else {
+		*source_rates = default_rates;
+		size = ARRAY_SIZE(default_rates);
+	}
+
+	/* This depends on the fact that 5.4 is last value in the array */
+	if (!intel_dp_source_supports_hbr2(dev))
+		size--;
+
+	return size;
+}
+
+static void
+intel_dp_set_clock(struct intel_encoder *encoder,
+		   struct intel_crtc_state *pipe_config)
+{
+	struct drm_device *dev = encoder->base.dev;
+	const struct dp_link_dpll *divisor = NULL;
+	int i, count = 0;
+
+	if (IS_G4X(dev)) {
+		divisor = gen4_dpll;
+		count = ARRAY_SIZE(gen4_dpll);
+	} else if (HAS_PCH_SPLIT(dev)) {
+		divisor = pch_dpll;
+		count = ARRAY_SIZE(pch_dpll);
+	} else if (IS_CHERRYVIEW(dev)) {
+		divisor = chv_dpll;
+		count = ARRAY_SIZE(chv_dpll);
+	} else if (IS_VALLEYVIEW(dev)) {
+		divisor = vlv_dpll;
+		count = ARRAY_SIZE(vlv_dpll);
+	}
+
+	if (divisor && count) {
+		for (i = 0; i < count; i++) {
+			if (pipe_config->port_clock == divisor[i].clock) {
+				pipe_config->dpll = divisor[i].dpll;
+				pipe_config->clock_set = true;
+				break;
+			}
+		}
+	}
+}
+
+static int intersect_rates(const int *source_rates, int source_len,
+			   const int *sink_rates, int sink_len,
+			   int *common_rates)
+{
+	int i = 0, j = 0, k = 0;
+
+	while (i < source_len && j < sink_len) {
+		if (source_rates[i] == sink_rates[j]) {
+			if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
+				return k;
+			common_rates[k] = source_rates[i];
+			++k;
+			++i;
+			++j;
+		} else if (source_rates[i] < sink_rates[j]) {
+			++i;
+		} else {
+			++j;
+		}
+	}
+	return k;
+}
+
+static int intel_dp_common_rates(struct intel_dp *intel_dp,
+				 int *common_rates)
+{
+	struct drm_device *dev = intel_dp_to_dev(intel_dp);
+	const int *source_rates, *sink_rates;
+	int source_len, sink_len;
+
+	sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
+	source_len = intel_dp_source_rates(dev, &source_rates);
+
+	return intersect_rates(source_rates, source_len,
+			       sink_rates, sink_len,
+			       common_rates);
+}
+
+static void snprintf_int_array(char *str, size_t len,
+			       const int *array, int nelem)
+{
+	int i;
+
+	str[0] = '\0';
+
+	for (i = 0; i < nelem; i++) {
+		int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
+		if (r >= len)
+			return;
+		str += r;
+		len -= r;
+	}
+}
+
+static void intel_dp_print_rates(struct intel_dp *intel_dp)
+{
+	struct drm_device *dev = intel_dp_to_dev(intel_dp);
+	const int *source_rates, *sink_rates;
+	int source_len, sink_len, common_len;
+	int common_rates[DP_MAX_SUPPORTED_RATES];
+	char str[128]; /* FIXME: too big for stack? */
+
+	if ((drm_debug & DRM_UT_KMS) == 0)
+		return;
+
+	source_len = intel_dp_source_rates(dev, &source_rates);
+	snprintf_int_array(str, sizeof(str), source_rates, source_len);
+	DRM_DEBUG_KMS("source rates: %s\n", str);
+
+	sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
+	snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
+	DRM_DEBUG_KMS("sink rates: %s\n", str);
+
+	common_len = intel_dp_common_rates(intel_dp, common_rates);
+	snprintf_int_array(str, sizeof(str), common_rates, common_len);
+	DRM_DEBUG_KMS("common rates: %s\n", str);
+}
+
+static int rate_to_index(int find, const int *rates)
+{
+	int i = 0;
+
+	for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
+		if (find == rates[i])
+			break;
+
+	return i;
+}
+
+int
+intel_dp_max_link_rate(struct intel_dp *intel_dp)
+{
+	int rates[DP_MAX_SUPPORTED_RATES] = {};
+	int len;
+
+	len = intel_dp_common_rates(intel_dp, rates);
+	if (WARN_ON(len <= 0))
+		return 162000;
+
+	return rates[rate_to_index(0, rates) - 1];
+}
+
+int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
+{
+	return rate_to_index(rate, intel_dp->sink_rates);
+}
+
+static void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
+				  uint8_t *link_bw, uint8_t *rate_select)
+{
+	if (intel_dp->num_sink_rates) {
+		*link_bw = 0;
+		*rate_select =
+			intel_dp_rate_select(intel_dp, port_clock);
+	} else {
+		*link_bw = drm_dp_link_rate_to_bw_code(port_clock);
+		*rate_select = 0;
+	}
+}
+
+bool
+intel_dp_compute_config(struct intel_encoder *encoder,
+			struct intel_crtc_state *pipe_config)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+	enum port port = dp_to_dig_port(intel_dp)->port;
+	struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
+	struct intel_connector *intel_connector = intel_dp->attached_connector;
+	int lane_count, clock;
+	int min_lane_count = 1;
+	int max_lane_count = intel_dp_max_lane_count(intel_dp);
+	/* Conveniently, the link BW constants become indices with a shift...*/
+	int min_clock = 0;
+	int max_clock;
+	int bpp, mode_rate;
+	int link_avail, link_clock;
+	int common_rates[DP_MAX_SUPPORTED_RATES] = {};
+	int common_len;
+	uint8_t link_bw, rate_select;
+
+	common_len = intel_dp_common_rates(intel_dp, common_rates);
+
+	/* No common link rates between source and sink */
+	WARN_ON(common_len <= 0);
+
+	max_clock = common_len - 1;
+
+	if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
+		pipe_config->has_pch_encoder = true;
+
+	pipe_config->has_dp_encoder = true;
+	pipe_config->has_drrs = false;
+	pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
+
+	if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
+		intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
+				       adjusted_mode);
+
+		if (INTEL_INFO(dev)->gen >= 9) {
+			int ret;
+			ret = skl_update_scaler_crtc(pipe_config);
+			if (ret)
+				return ret;
+		}
+
+		if (!HAS_PCH_SPLIT(dev))
+			intel_gmch_panel_fitting(intel_crtc, pipe_config,
+						 intel_connector->panel.fitting_mode);
+		else
+			intel_pch_panel_fitting(intel_crtc, pipe_config,
+						intel_connector->panel.fitting_mode);
+	}
+
+	if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
+		return false;
+
+	DRM_DEBUG_KMS("DP link computation with max lane count %i "
+		      "max bw %d pixel clock %iKHz\n",
+		      max_lane_count, common_rates[max_clock],
+		      adjusted_mode->crtc_clock);
+
+	/* Walk through all bpp values. Luckily they're all nicely spaced with 2
+	 * bpc in between. */
+	bpp = pipe_config->pipe_bpp;
+	if (is_edp(intel_dp)) {
+
+		/* Get bpp from vbt only for panels that dont have bpp in edid */
+		if (intel_connector->base.display_info.bpc == 0 &&
+			(dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp)) {
+			DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
+				      dev_priv->vbt.edp_bpp);
+			bpp = dev_priv->vbt.edp_bpp;
+		}
+
+		/*
+		 * Use the maximum clock and number of lanes the eDP panel
+		 * advertizes being capable of. The panels are generally
+		 * designed to support only a single clock and lane
+		 * configuration, and typically these values correspond to the
+		 * native resolution of the panel.
+		 */
+		min_lane_count = max_lane_count;
+		min_clock = max_clock;
+	}
+
+	for (; bpp >= 6*3; bpp -= 2*3) {
+		mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
+						   bpp);
+
+		for (clock = min_clock; clock <= max_clock; clock++) {
+			for (lane_count = min_lane_count;
+				lane_count <= max_lane_count;
+				lane_count <<= 1) {
+
+				link_clock = common_rates[clock];
+				link_avail = intel_dp_max_data_rate(link_clock,
+								    lane_count);
+
+				if (mode_rate <= link_avail) {
+					goto found;
+				}
+			}
+		}
+	}
+
+	return false;
+
+found:
+	if (intel_dp->color_range_auto) {
+		/*
+		 * See:
+		 * CEA-861-E - 5.1 Default Encoding Parameters
+		 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
+		 */
+		pipe_config->limited_color_range =
+			bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1;
+	} else {
+		pipe_config->limited_color_range =
+			intel_dp->limited_color_range;
+	}
+
+	pipe_config->lane_count = lane_count;
+
+	pipe_config->pipe_bpp = bpp;
+	pipe_config->port_clock = common_rates[clock];
+
+	intel_dp_compute_rate(intel_dp, pipe_config->port_clock,
+			      &link_bw, &rate_select);
+
+	DRM_DEBUG_KMS("DP link bw %02x rate select %02x lane count %d clock %d bpp %d\n",
+		      link_bw, rate_select, pipe_config->lane_count,
+		      pipe_config->port_clock, bpp);
+	DRM_DEBUG_KMS("DP link bw required %i available %i\n",
+		      mode_rate, link_avail);
+
+	intel_link_compute_m_n(bpp, lane_count,
+			       adjusted_mode->crtc_clock,
+			       pipe_config->port_clock,
+			       &pipe_config->dp_m_n);
+
+	if (intel_connector->panel.downclock_mode != NULL &&
+		dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
+			pipe_config->has_drrs = true;
+			intel_link_compute_m_n(bpp, lane_count,
+				intel_connector->panel.downclock_mode->clock,
+				pipe_config->port_clock,
+				&pipe_config->dp_m2_n2);
+	}
+
+	if (IS_SKYLAKE(dev) && is_edp(intel_dp))
+		skl_edp_set_pll_config(pipe_config);
+	else if (IS_BROXTON(dev))
+		/* handled in ddi */;
+	else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+		hsw_dp_set_ddi_pll_sel(pipe_config);
+	else
+		intel_dp_set_clock(encoder, pipe_config);
+
+	return true;
+}
+
+static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
+{
+	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+	struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 dpa_ctl;
+
+	DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
+		      crtc->config->port_clock);
+	dpa_ctl = I915_READ(DP_A);
+	dpa_ctl &= ~DP_PLL_FREQ_MASK;
+
+	if (crtc->config->port_clock == 162000) {
+		/* For a long time we've carried around a ILK-DevA w/a for the
+		 * 160MHz clock. If we're really unlucky, it's still required.
+		 */
+		DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
+		dpa_ctl |= DP_PLL_FREQ_160MHZ;
+		intel_dp->DP |= DP_PLL_FREQ_160MHZ;
+	} else {
+		dpa_ctl |= DP_PLL_FREQ_270MHZ;
+		intel_dp->DP |= DP_PLL_FREQ_270MHZ;
+	}
+
+	I915_WRITE(DP_A, dpa_ctl);
+
+	POSTING_READ(DP_A);
+	udelay(500);
+}
+
+void intel_dp_set_link_params(struct intel_dp *intel_dp,
+			      const struct intel_crtc_state *pipe_config)
+{
+	intel_dp->link_rate = pipe_config->port_clock;
+	intel_dp->lane_count = pipe_config->lane_count;
+}
+
+static void intel_dp_prepare(struct intel_encoder *encoder)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+	enum port port = dp_to_dig_port(intel_dp)->port;
+	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+	const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
+
+	intel_dp_set_link_params(intel_dp, crtc->config);
+
+	/*
+	 * There are four kinds of DP registers:
+	 *
+	 * 	IBX PCH
+	 * 	SNB CPU
+	 *	IVB CPU
+	 * 	CPT PCH
+	 *
+	 * IBX PCH and CPU are the same for almost everything,
+	 * except that the CPU DP PLL is configured in this
+	 * register
+	 *
+	 * CPT PCH is quite different, having many bits moved
+	 * to the TRANS_DP_CTL register instead. That
+	 * configuration happens (oddly) in ironlake_pch_enable
+	 */
+
+	/* Preserve the BIOS-computed detected bit. This is
+	 * supposed to be read-only.
+	 */
+	intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
+
+	/* Handle DP bits in common between all three register formats */
+	intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
+	intel_dp->DP |= DP_PORT_WIDTH(crtc->config->lane_count);
+
+	if (crtc->config->has_audio)
+		intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
+
+	/* Split out the IBX/CPU vs CPT settings */
+
+	if (IS_GEN7(dev) && port == PORT_A) {
+		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+			intel_dp->DP |= DP_SYNC_HS_HIGH;
+		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+			intel_dp->DP |= DP_SYNC_VS_HIGH;
+		intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
+
+		if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
+			intel_dp->DP |= DP_ENHANCED_FRAMING;
+
+		intel_dp->DP |= crtc->pipe << 29;
+	} else if (HAS_PCH_CPT(dev) && port != PORT_A) {
+		u32 trans_dp;
+
+		intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
+
+		trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
+		if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
+			trans_dp |= TRANS_DP_ENH_FRAMING;
+		else
+			trans_dp &= ~TRANS_DP_ENH_FRAMING;
+		I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
+	} else {
+		if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
+		    crtc->config->limited_color_range)
+			intel_dp->DP |= DP_COLOR_RANGE_16_235;
+
+		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+			intel_dp->DP |= DP_SYNC_HS_HIGH;
+		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+			intel_dp->DP |= DP_SYNC_VS_HIGH;
+		intel_dp->DP |= DP_LINK_TRAIN_OFF;
+
+		if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
+			intel_dp->DP |= DP_ENHANCED_FRAMING;
+
+		if (IS_CHERRYVIEW(dev))
+			intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
+		else if (crtc->pipe == PIPE_B)
+			intel_dp->DP |= DP_PIPEB_SELECT;
+	}
+}
+
+#define IDLE_ON_MASK		(PP_ON | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
+#define IDLE_ON_VALUE   	(PP_ON | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_ON_IDLE)
+
+#define IDLE_OFF_MASK		(PP_ON | PP_SEQUENCE_MASK | 0                     | 0)
+#define IDLE_OFF_VALUE		(0     | PP_SEQUENCE_NONE | 0                     | 0)
+
+#define IDLE_CYCLE_MASK		(PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
+#define IDLE_CYCLE_VALUE	(0     | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
+
+static void wait_panel_status(struct intel_dp *intel_dp,
+				       u32 mask,
+				       u32 value)
+{
+	struct drm_device *dev = intel_dp_to_dev(intel_dp);
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 pp_stat_reg, pp_ctrl_reg;
+
+	lockdep_assert_held(&dev_priv->pps_mutex);
+
+	pp_stat_reg = _pp_stat_reg(intel_dp);
+	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
+
+	DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
+			mask, value,
+			I915_READ(pp_stat_reg),
+			I915_READ(pp_ctrl_reg));
+
+	if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
+		DRM_ERROR("Panel status timeout: status %08x control %08x\n",
+				I915_READ(pp_stat_reg),
+				I915_READ(pp_ctrl_reg));
+	}
+
+	DRM_DEBUG_KMS("Wait complete\n");
+}
+
+static void wait_panel_on(struct intel_dp *intel_dp)
+{
+	DRM_DEBUG_KMS("Wait for panel power on\n");
+	wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
+}
+
+static void wait_panel_off(struct intel_dp *intel_dp)
+{
+	DRM_DEBUG_KMS("Wait for panel power off time\n");
+	wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
+}
+
+static void wait_panel_power_cycle(struct intel_dp *intel_dp)
+{
+	DRM_DEBUG_KMS("Wait for panel power cycle\n");
+
+	/* When we disable the VDD override bit last we have to do the manual
+	 * wait. */
+	wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
+				       intel_dp->panel_power_cycle_delay);
+
+	wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
+}
+
+static void wait_backlight_on(struct intel_dp *intel_dp)
+{
+	wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
+				       intel_dp->backlight_on_delay);
+}
+
+static void edp_wait_backlight_off(struct intel_dp *intel_dp)
+{
+	wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
+				       intel_dp->backlight_off_delay);
+}
+
+/* Read the current pp_control value, unlocking the register if it
+ * is locked
+ */
+
+static  u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
+{
+	struct drm_device *dev = intel_dp_to_dev(intel_dp);
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 control;
+
+	lockdep_assert_held(&dev_priv->pps_mutex);
+
+	control = I915_READ(_pp_ctrl_reg(intel_dp));
+	if (!IS_BROXTON(dev)) {
+		control &= ~PANEL_UNLOCK_MASK;
+		control |= PANEL_UNLOCK_REGS;
+	}
+	return control;
+}
+
+/*
+ * Must be paired with edp_panel_vdd_off().
+ * Must hold pps_mutex around the whole on/off sequence.
+ * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
+ */
+static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
+{
+	struct drm_device *dev = intel_dp_to_dev(intel_dp);
+	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+	struct intel_encoder *intel_encoder = &intel_dig_port->base;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum intel_display_power_domain power_domain;
+	u32 pp;
+	u32 pp_stat_reg, pp_ctrl_reg;
+	bool need_to_disable = !intel_dp->want_panel_vdd;
+
+	lockdep_assert_held(&dev_priv->pps_mutex);
+
+	if (!is_edp(intel_dp))
+		return false;
+
+	cancel_delayed_work(&intel_dp->panel_vdd_work);
+	intel_dp->want_panel_vdd = true;
+
+	if (edp_have_panel_vdd(intel_dp))
+		return need_to_disable;
+
+	power_domain = intel_display_port_aux_power_domain(intel_encoder);
+	intel_display_power_get(dev_priv, power_domain);
+
+	DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
+		      port_name(intel_dig_port->port));
+
+	if (!edp_have_panel_power(intel_dp))
+		wait_panel_power_cycle(intel_dp);
+
+	pp = ironlake_get_pp_control(intel_dp);
+	pp |= EDP_FORCE_VDD;
+
+	pp_stat_reg = _pp_stat_reg(intel_dp);
+	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
+
+	I915_WRITE(pp_ctrl_reg, pp);
+	POSTING_READ(pp_ctrl_reg);
+	DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
+			I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
+	/*
+	 * If the panel wasn't on, delay before accessing aux channel
+	 */
+	if (!edp_have_panel_power(intel_dp)) {
+		DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
+			      port_name(intel_dig_port->port));
+		msleep(intel_dp->panel_power_up_delay);
+	}
+
+	return need_to_disable;
+}
+
+/*
+ * Must be paired with intel_edp_panel_vdd_off() or
+ * intel_edp_panel_off().
+ * Nested calls to these functions are not allowed since
+ * we drop the lock. Caller must use some higher level
+ * locking to prevent nested calls from other threads.
+ */
+void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
+{
+	bool vdd;
+
+	if (!is_edp(intel_dp))
+		return;
+
+	pps_lock(intel_dp);
+	vdd = edp_panel_vdd_on(intel_dp);
+	pps_unlock(intel_dp);
+
+	I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
+	     port_name(dp_to_dig_port(intel_dp)->port));
+}
+
+static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
+{
+	struct drm_device *dev = intel_dp_to_dev(intel_dp);
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_digital_port *intel_dig_port =
+		dp_to_dig_port(intel_dp);
+	struct intel_encoder *intel_encoder = &intel_dig_port->base;
+	enum intel_display_power_domain power_domain;
+	u32 pp;
+	u32 pp_stat_reg, pp_ctrl_reg;
+
+	lockdep_assert_held(&dev_priv->pps_mutex);
+
+	WARN_ON(intel_dp->want_panel_vdd);
+
+	if (!edp_have_panel_vdd(intel_dp))
+		return;
+
+	DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
+		      port_name(intel_dig_port->port));
+
+	pp = ironlake_get_pp_control(intel_dp);
+	pp &= ~EDP_FORCE_VDD;
+
+	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
+	pp_stat_reg = _pp_stat_reg(intel_dp);
+
+	I915_WRITE(pp_ctrl_reg, pp);
+	POSTING_READ(pp_ctrl_reg);
+
+	/* Make sure sequencer is idle before allowing subsequent activity */
+	DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
+	I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
+
+	if ((pp & POWER_TARGET_ON) == 0)
+		intel_dp->last_power_cycle = jiffies;
+
+	power_domain = intel_display_port_aux_power_domain(intel_encoder);
+	intel_display_power_put(dev_priv, power_domain);
+}
+
+static void edp_panel_vdd_work(struct work_struct *__work)
+{
+	struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
+						 struct intel_dp, panel_vdd_work);
+
+	pps_lock(intel_dp);
+	if (!intel_dp->want_panel_vdd)
+		edp_panel_vdd_off_sync(intel_dp);
+	pps_unlock(intel_dp);
+}
+
+static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
+{
+	unsigned long delay;
+
+	/*
+	 * Queue the timer to fire a long time from now (relative to the power
+	 * down delay) to keep the panel power up across a sequence of
+	 * operations.
+	 */
+	delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
+	schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
+}
+
+/*
+ * Must be paired with edp_panel_vdd_on().
+ * Must hold pps_mutex around the whole on/off sequence.
+ * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
+ */
+static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
+{
+	struct drm_i915_private *dev_priv =
+		intel_dp_to_dev(intel_dp)->dev_private;
+
+	lockdep_assert_held(&dev_priv->pps_mutex);
+
+	if (!is_edp(intel_dp))
+		return;
+
+	I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
+	     port_name(dp_to_dig_port(intel_dp)->port));
+
+	intel_dp->want_panel_vdd = false;
+
+	if (sync)
+		edp_panel_vdd_off_sync(intel_dp);
+	else
+		edp_panel_vdd_schedule_off(intel_dp);
+}
+
+static void edp_panel_on(struct intel_dp *intel_dp)
+{
+	struct drm_device *dev = intel_dp_to_dev(intel_dp);
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 pp;
+	u32 pp_ctrl_reg;
+
+	lockdep_assert_held(&dev_priv->pps_mutex);
+
+	if (!is_edp(intel_dp))
+		return;
+
+	DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
+		      port_name(dp_to_dig_port(intel_dp)->port));
+
+	if (WARN(edp_have_panel_power(intel_dp),
+		 "eDP port %c panel power already on\n",
+		 port_name(dp_to_dig_port(intel_dp)->port)))
+		return;
+
+	wait_panel_power_cycle(intel_dp);
+
+	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
+	pp = ironlake_get_pp_control(intel_dp);
+	if (IS_GEN5(dev)) {
+		/* ILK workaround: disable reset around power sequence */
+		pp &= ~PANEL_POWER_RESET;
+		I915_WRITE(pp_ctrl_reg, pp);
+		POSTING_READ(pp_ctrl_reg);
+	}
+
+	pp |= POWER_TARGET_ON;
+	if (!IS_GEN5(dev))
+		pp |= PANEL_POWER_RESET;
+
+	I915_WRITE(pp_ctrl_reg, pp);
+	POSTING_READ(pp_ctrl_reg);
+
+	wait_panel_on(intel_dp);
+	intel_dp->last_power_on = jiffies;
+
+	if (IS_GEN5(dev)) {
+		pp |= PANEL_POWER_RESET; /* restore panel reset bit */
+		I915_WRITE(pp_ctrl_reg, pp);
+		POSTING_READ(pp_ctrl_reg);
+	}
+}
+
+void intel_edp_panel_on(struct intel_dp *intel_dp)
+{
+	if (!is_edp(intel_dp))
+		return;
+
+	pps_lock(intel_dp);
+	edp_panel_on(intel_dp);
+	pps_unlock(intel_dp);
+}
+
+
+static void edp_panel_off(struct intel_dp *intel_dp)
+{
+	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+	struct intel_encoder *intel_encoder = &intel_dig_port->base;
+	struct drm_device *dev = intel_dp_to_dev(intel_dp);
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum intel_display_power_domain power_domain;
+	u32 pp;
+	u32 pp_ctrl_reg;
+
+	lockdep_assert_held(&dev_priv->pps_mutex);
+
+	if (!is_edp(intel_dp))
+		return;
+
+	DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
+		      port_name(dp_to_dig_port(intel_dp)->port));
+
+	WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
+	     port_name(dp_to_dig_port(intel_dp)->port));
+
+	pp = ironlake_get_pp_control(intel_dp);
+	/* We need to switch off panel power _and_ force vdd, for otherwise some
+	 * panels get very unhappy and cease to work. */
+	pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
+		EDP_BLC_ENABLE);
+
+	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
+
+	intel_dp->want_panel_vdd = false;
+
+	I915_WRITE(pp_ctrl_reg, pp);
+	POSTING_READ(pp_ctrl_reg);
+
+	intel_dp->last_power_cycle = jiffies;
+	wait_panel_off(intel_dp);
+
+	/* We got a reference when we enabled the VDD. */
+	power_domain = intel_display_port_aux_power_domain(intel_encoder);
+	intel_display_power_put(dev_priv, power_domain);
+}
+
+void intel_edp_panel_off(struct intel_dp *intel_dp)
+{
+	if (!is_edp(intel_dp))
+		return;
+
+	pps_lock(intel_dp);
+	edp_panel_off(intel_dp);
+	pps_unlock(intel_dp);
+}
+
+/* Enable backlight in the panel power control. */
+static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
+{
+	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+	struct drm_device *dev = intel_dig_port->base.base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 pp;
+	u32 pp_ctrl_reg;
+
+	/*
+	 * If we enable the backlight right away following a panel power
+	 * on, we may see slight flicker as the panel syncs with the eDP
+	 * link.  So delay a bit to make sure the image is solid before
+	 * allowing it to appear.
+	 */
+	wait_backlight_on(intel_dp);
+
+	pps_lock(intel_dp);
+
+	pp = ironlake_get_pp_control(intel_dp);
+	pp |= EDP_BLC_ENABLE;
+
+	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
+
+	I915_WRITE(pp_ctrl_reg, pp);
+	POSTING_READ(pp_ctrl_reg);
+
+	pps_unlock(intel_dp);
+}
+
+/* Enable backlight PWM and backlight PP control. */
+void intel_edp_backlight_on(struct intel_dp *intel_dp)
+{
+	if (!is_edp(intel_dp))
+		return;
+
+	DRM_DEBUG_KMS("\n");
+
+	intel_panel_enable_backlight(intel_dp->attached_connector);
+	_intel_edp_backlight_on(intel_dp);
+}
+
+/* Disable backlight in the panel power control. */
+static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
+{
+	struct drm_device *dev = intel_dp_to_dev(intel_dp);
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 pp;
+	u32 pp_ctrl_reg;
+
+	if (!is_edp(intel_dp))
+		return;
+
+	pps_lock(intel_dp);
+
+	pp = ironlake_get_pp_control(intel_dp);
+	pp &= ~EDP_BLC_ENABLE;
+
+	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
+
+	I915_WRITE(pp_ctrl_reg, pp);
+	POSTING_READ(pp_ctrl_reg);
+
+	pps_unlock(intel_dp);
+
+	intel_dp->last_backlight_off = jiffies;
+	edp_wait_backlight_off(intel_dp);
+}
+
+/* Disable backlight PP control and backlight PWM. */
+void intel_edp_backlight_off(struct intel_dp *intel_dp)
+{
+	if (!is_edp(intel_dp))
+		return;
+
+	DRM_DEBUG_KMS("\n");
+
+	_intel_edp_backlight_off(intel_dp);
+	intel_panel_disable_backlight(intel_dp->attached_connector);
+}
+
+/*
+ * Hook for controlling the panel power control backlight through the bl_power
+ * sysfs attribute. Take care to handle multiple calls.
+ */
+static void intel_edp_backlight_power(struct intel_connector *connector,
+				      bool enable)
+{
+	struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
+	bool is_enabled;
+
+	pps_lock(intel_dp);
+	is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
+	pps_unlock(intel_dp);
+
+	if (is_enabled == enable)
+		return;
+
+	DRM_DEBUG_KMS("panel power control backlight %s\n",
+		      enable ? "enable" : "disable");
+
+	if (enable)
+		_intel_edp_backlight_on(intel_dp);
+	else
+		_intel_edp_backlight_off(intel_dp);
+}
+
+static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
+{
+	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+	struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 dpa_ctl;
+
+	assert_pipe_disabled(dev_priv,
+			     to_intel_crtc(crtc)->pipe);
+
+	DRM_DEBUG_KMS("\n");
+	dpa_ctl = I915_READ(DP_A);
+	WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
+	WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
+
+	/* We don't adjust intel_dp->DP while tearing down the link, to
+	 * facilitate link retraining (e.g. after hotplug). Hence clear all
+	 * enable bits here to ensure that we don't enable too much. */
+	intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
+	intel_dp->DP |= DP_PLL_ENABLE;
+	I915_WRITE(DP_A, intel_dp->DP);
+	POSTING_READ(DP_A);
+	udelay(200);
+}
+
+static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
+{
+	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+	struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 dpa_ctl;
+
+	assert_pipe_disabled(dev_priv,
+			     to_intel_crtc(crtc)->pipe);
+
+	dpa_ctl = I915_READ(DP_A);
+	WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
+	     "dp pll off, should be on\n");
+	WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
+
+	/* We can't rely on the value tracked for the DP register in
+	 * intel_dp->DP because link_down must not change that (otherwise link
+	 * re-training will fail. */
+	dpa_ctl &= ~DP_PLL_ENABLE;
+	I915_WRITE(DP_A, dpa_ctl);
+	POSTING_READ(DP_A);
+	udelay(200);
+}
+
+/* If the sink supports it, try to set the power state appropriately */
+void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
+{
+	int ret, i;
+
+	/* Should have a valid DPCD by this point */
+	if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
+		return;
+
+	if (mode != DRM_MODE_DPMS_ON) {
+		ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
+					 DP_SET_POWER_D3);
+	} else {
+		/*
+		 * When turning on, we need to retry for 1ms to give the sink
+		 * time to wake up.
+		 */
+		for (i = 0; i < 3; i++) {
+			ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
+						 DP_SET_POWER_D0);
+			if (ret == 1)
+				break;
+			msleep(1);
+		}
+	}
+
+	if (ret != 1)
+		DRM_DEBUG_KMS("failed to %s sink power state\n",
+			      mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
+}
+
+static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
+				  enum pipe *pipe)
+{
+	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+	enum port port = dp_to_dig_port(intel_dp)->port;
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum intel_display_power_domain power_domain;
+	u32 tmp;
+
+	power_domain = intel_display_port_power_domain(encoder);
+	if (!intel_display_power_is_enabled(dev_priv, power_domain))
+		return false;
+
+	tmp = I915_READ(intel_dp->output_reg);
+
+	if (!(tmp & DP_PORT_EN))
+		return false;
+
+	if (IS_GEN7(dev) && port == PORT_A) {
+		*pipe = PORT_TO_PIPE_CPT(tmp);
+	} else if (HAS_PCH_CPT(dev) && port != PORT_A) {
+		enum pipe p;
+
+		for_each_pipe(dev_priv, p) {
+			u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
+			if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
+				*pipe = p;
+				return true;
+			}
+		}
+
+		DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
+			      intel_dp->output_reg);
+	} else if (IS_CHERRYVIEW(dev)) {
+		*pipe = DP_PORT_TO_PIPE_CHV(tmp);
+	} else {
+		*pipe = PORT_TO_PIPE(tmp);
+	}
+
+	return true;
+}
+
+static void intel_dp_get_config(struct intel_encoder *encoder,
+				struct intel_crtc_state *pipe_config)
+{
+	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+	u32 tmp, flags = 0;
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum port port = dp_to_dig_port(intel_dp)->port;
+	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+	int dotclock;
+
+	tmp = I915_READ(intel_dp->output_reg);
+
+	pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
+
+	if (HAS_PCH_CPT(dev) && port != PORT_A) {
+		u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
+
+		if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
+			flags |= DRM_MODE_FLAG_PHSYNC;
+		else
+			flags |= DRM_MODE_FLAG_NHSYNC;
+
+		if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
+			flags |= DRM_MODE_FLAG_PVSYNC;
+		else
+			flags |= DRM_MODE_FLAG_NVSYNC;
+	} else {
+		if (tmp & DP_SYNC_HS_HIGH)
+			flags |= DRM_MODE_FLAG_PHSYNC;
+		else
+			flags |= DRM_MODE_FLAG_NHSYNC;
+
+		if (tmp & DP_SYNC_VS_HIGH)
+			flags |= DRM_MODE_FLAG_PVSYNC;
+		else
+			flags |= DRM_MODE_FLAG_NVSYNC;
+	}
+
+	pipe_config->base.adjusted_mode.flags |= flags;
+
+	if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
+	    tmp & DP_COLOR_RANGE_16_235)
+		pipe_config->limited_color_range = true;
+
+	pipe_config->has_dp_encoder = true;
+
+	pipe_config->lane_count =
+		((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
+
+	intel_dp_get_m_n(crtc, pipe_config);
+
+	if (port == PORT_A) {
+		if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
+			pipe_config->port_clock = 162000;
+		else
+			pipe_config->port_clock = 270000;
+	}
+
+	dotclock = intel_dotclock_calculate(pipe_config->port_clock,
+					    &pipe_config->dp_m_n);
+
+	if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
+		ironlake_check_encoder_dotclock(pipe_config, dotclock);
+
+	pipe_config->base.adjusted_mode.crtc_clock = dotclock;
+
+	if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
+	    pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
+		/*
+		 * This is a big fat ugly hack.
+		 *
+		 * Some machines in UEFI boot mode provide us a VBT that has 18
+		 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
+		 * unknown we fail to light up. Yet the same BIOS boots up with
+		 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
+		 * max, not what it tells us to use.
+		 *
+		 * Note: This will still be broken if the eDP panel is not lit
+		 * up by the BIOS, and thus we can't get the mode at module
+		 * load.
+		 */
+		DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
+			      pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
+		dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
+	}
+}
+
+static void intel_disable_dp(struct intel_encoder *encoder)
+{
+	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+	struct drm_device *dev = encoder->base.dev;
+	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+
+	if (crtc->config->has_audio)
+		intel_audio_codec_disable(encoder);
+
+	if (HAS_PSR(dev) && !HAS_DDI(dev))
+		intel_psr_disable(intel_dp);
+
+	/* Make sure the panel is off before trying to change the mode. But also
+	 * ensure that we have vdd while we switch off the panel. */
+	intel_edp_panel_vdd_on(intel_dp);
+	intel_edp_backlight_off(intel_dp);
+	intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
+	intel_edp_panel_off(intel_dp);
+
+	/* disable the port before the pipe on g4x */
+	if (INTEL_INFO(dev)->gen < 5)
+		intel_dp_link_down(intel_dp);
+}
+
+static void ilk_post_disable_dp(struct intel_encoder *encoder)
+{
+	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+	enum port port = dp_to_dig_port(intel_dp)->port;
+
+	intel_dp_link_down(intel_dp);
+	if (port == PORT_A)
+		ironlake_edp_pll_off(intel_dp);
+}
+
+static void vlv_post_disable_dp(struct intel_encoder *encoder)
+{
+	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+
+	intel_dp_link_down(intel_dp);
+}
+
+static void chv_data_lane_soft_reset(struct intel_encoder *encoder,
+				     bool reset)
+{
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
+	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+	enum pipe pipe = crtc->pipe;
+	uint32_t val;
+
+	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
+	if (reset)
+		val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
+	else
+		val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
+	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
+
+	if (crtc->config->lane_count > 2) {
+		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
+		if (reset)
+			val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
+		else
+			val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
+		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
+	}
+
+	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
+	val |= CHV_PCS_REQ_SOFTRESET_EN;
+	if (reset)
+		val &= ~DPIO_PCS_CLK_SOFT_RESET;
+	else
+		val |= DPIO_PCS_CLK_SOFT_RESET;
+	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
+
+	if (crtc->config->lane_count > 2) {
+		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
+		val |= CHV_PCS_REQ_SOFTRESET_EN;
+		if (reset)
+			val &= ~DPIO_PCS_CLK_SOFT_RESET;
+		else
+			val |= DPIO_PCS_CLK_SOFT_RESET;
+		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
+	}
+}
+
+static void chv_post_disable_dp(struct intel_encoder *encoder)
+{
+	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	intel_dp_link_down(intel_dp);
+
+	mutex_lock(&dev_priv->sb_lock);
+
+	/* Assert data lane reset */
+	chv_data_lane_soft_reset(encoder, true);
+
+	mutex_unlock(&dev_priv->sb_lock);
+}
+
+static void
+_intel_dp_set_link_train(struct intel_dp *intel_dp,
+			 uint32_t *DP,
+			 uint8_t dp_train_pat)
+{
+	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+	struct drm_device *dev = intel_dig_port->base.base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum port port = intel_dig_port->port;
+
+	if (HAS_DDI(dev)) {
+		uint32_t temp = I915_READ(DP_TP_CTL(port));
+
+		if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
+			temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
+		else
+			temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
+
+		temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
+		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
+		case DP_TRAINING_PATTERN_DISABLE:
+			temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
+
+			break;
+		case DP_TRAINING_PATTERN_1:
+			temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
+			break;
+		case DP_TRAINING_PATTERN_2:
+			temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
+			break;
+		case DP_TRAINING_PATTERN_3:
+			temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
+			break;
+		}
+		I915_WRITE(DP_TP_CTL(port), temp);
+
+	} else if ((IS_GEN7(dev) && port == PORT_A) ||
+		   (HAS_PCH_CPT(dev) && port != PORT_A)) {
+		*DP &= ~DP_LINK_TRAIN_MASK_CPT;
+
+		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
+		case DP_TRAINING_PATTERN_DISABLE:
+			*DP |= DP_LINK_TRAIN_OFF_CPT;
+			break;
+		case DP_TRAINING_PATTERN_1:
+			*DP |= DP_LINK_TRAIN_PAT_1_CPT;
+			break;
+		case DP_TRAINING_PATTERN_2:
+			*DP |= DP_LINK_TRAIN_PAT_2_CPT;
+			break;
+		case DP_TRAINING_PATTERN_3:
+			DRM_ERROR("DP training pattern 3 not supported\n");
+			*DP |= DP_LINK_TRAIN_PAT_2_CPT;
+			break;
+		}
+
+	} else {
+		if (IS_CHERRYVIEW(dev))
+			*DP &= ~DP_LINK_TRAIN_MASK_CHV;
+		else
+			*DP &= ~DP_LINK_TRAIN_MASK;
+
+		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
+		case DP_TRAINING_PATTERN_DISABLE:
+			*DP |= DP_LINK_TRAIN_OFF;
+			break;
+		case DP_TRAINING_PATTERN_1:
+			*DP |= DP_LINK_TRAIN_PAT_1;
+			break;
+		case DP_TRAINING_PATTERN_2:
+			*DP |= DP_LINK_TRAIN_PAT_2;
+			break;
+		case DP_TRAINING_PATTERN_3:
+			if (IS_CHERRYVIEW(dev)) {
+				*DP |= DP_LINK_TRAIN_PAT_3_CHV;
+			} else {
+				DRM_ERROR("DP training pattern 3 not supported\n");
+				*DP |= DP_LINK_TRAIN_PAT_2;
+			}
+			break;
+		}
+	}
+}
+
+static void intel_dp_enable_port(struct intel_dp *intel_dp)
+{
+	struct drm_device *dev = intel_dp_to_dev(intel_dp);
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	/* enable with pattern 1 (as per spec) */
+	_intel_dp_set_link_train(intel_dp, &intel_dp->DP,
+				 DP_TRAINING_PATTERN_1);
+
+	I915_WRITE(intel_dp->output_reg, intel_dp->DP);
+	POSTING_READ(intel_dp->output_reg);
+
+	/*
+	 * Magic for VLV/CHV. We _must_ first set up the register
+	 * without actually enabling the port, and then do another
+	 * write to enable the port. Otherwise link training will
+	 * fail when the power sequencer is freshly used for this port.
+	 */
+	intel_dp->DP |= DP_PORT_EN;
+
+	I915_WRITE(intel_dp->output_reg, intel_dp->DP);
+	POSTING_READ(intel_dp->output_reg);
+}
+
+static void intel_enable_dp(struct intel_encoder *encoder)
+{
+	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+	uint32_t dp_reg = I915_READ(intel_dp->output_reg);
+
+	if (WARN_ON(dp_reg & DP_PORT_EN))
+		return;
+
+	pps_lock(intel_dp);
+
+	if (IS_VALLEYVIEW(dev))
+		vlv_init_panel_power_sequencer(intel_dp);
+
+	intel_dp_enable_port(intel_dp);
+
+	edp_panel_vdd_on(intel_dp);
+	edp_panel_on(intel_dp);
+	edp_panel_vdd_off(intel_dp, true);
+
+	pps_unlock(intel_dp);
+
+	if (IS_VALLEYVIEW(dev)) {
+		unsigned int lane_mask = 0x0;
+
+		if (IS_CHERRYVIEW(dev))
+			lane_mask = intel_dp_unused_lane_mask(crtc->config->lane_count);
+
+		vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
+				    lane_mask);
+	}
+
+	intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
+	intel_dp_start_link_train(intel_dp);
+	intel_dp_stop_link_train(intel_dp);
+
+	if (crtc->config->has_audio) {
+		DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
+				 pipe_name(crtc->pipe));
+		intel_audio_codec_enable(encoder);
+	}
+}
+
+static void g4x_enable_dp(struct intel_encoder *encoder)
+{
+	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+
+	intel_enable_dp(encoder);
+	intel_edp_backlight_on(intel_dp);
+}
+
+static void vlv_enable_dp(struct intel_encoder *encoder)
+{
+	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+
+	intel_edp_backlight_on(intel_dp);
+	intel_psr_enable(intel_dp);
+}
+
+static void g4x_pre_enable_dp(struct intel_encoder *encoder)
+{
+	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
+
+	intel_dp_prepare(encoder);
+
+	/* Only ilk+ has port A */
+	if (dport->port == PORT_A) {
+		ironlake_set_pll_cpu_edp(intel_dp);
+		ironlake_edp_pll_on(intel_dp);
+	}
+}
+
+static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
+{
+	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+	struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
+	enum pipe pipe = intel_dp->pps_pipe;
+	int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
+
+	edp_panel_vdd_off_sync(intel_dp);
+
+	/*
+	 * VLV seems to get confused when multiple power seqeuencers
+	 * have the same port selected (even if only one has power/vdd
+	 * enabled). The failure manifests as vlv_wait_port_ready() failing
+	 * CHV on the other hand doesn't seem to mind having the same port
+	 * selected in multiple power seqeuencers, but let's clear the
+	 * port select always when logically disconnecting a power sequencer
+	 * from a port.
+	 */
+	DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
+		      pipe_name(pipe), port_name(intel_dig_port->port));
+	I915_WRITE(pp_on_reg, 0);
+	POSTING_READ(pp_on_reg);
+
+	intel_dp->pps_pipe = INVALID_PIPE;
+}
+
+static void vlv_steal_power_sequencer(struct drm_device *dev,
+				      enum pipe pipe)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_encoder *encoder;
+
+	lockdep_assert_held(&dev_priv->pps_mutex);
+
+	if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
+		return;
+
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+			    base.head) {
+		struct intel_dp *intel_dp;
+		enum port port;
+
+		if (encoder->type != INTEL_OUTPUT_EDP)
+			continue;
+
+		intel_dp = enc_to_intel_dp(&encoder->base);
+		port = dp_to_dig_port(intel_dp)->port;
+
+		if (intel_dp->pps_pipe != pipe)
+			continue;
+
+		DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
+			      pipe_name(pipe), port_name(port));
+
+		WARN(encoder->base.crtc,
+		     "stealing pipe %c power sequencer from active eDP port %c\n",
+		     pipe_name(pipe), port_name(port));
+
+		/* make sure vdd is off before we steal it */
+		vlv_detach_power_sequencer(intel_dp);
+	}
+}
+
+static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
+{
+	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+	struct intel_encoder *encoder = &intel_dig_port->base;
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+
+	lockdep_assert_held(&dev_priv->pps_mutex);
+
+	if (!is_edp(intel_dp))
+		return;
+
+	if (intel_dp->pps_pipe == crtc->pipe)
+		return;
+
+	/*
+	 * If another power sequencer was being used on this
+	 * port previously make sure to turn off vdd there while
+	 * we still have control of it.
+	 */
+	if (intel_dp->pps_pipe != INVALID_PIPE)
+		vlv_detach_power_sequencer(intel_dp);
+
+	/*
+	 * We may be stealing the power
+	 * sequencer from another port.
+	 */
+	vlv_steal_power_sequencer(dev, crtc->pipe);
+
+	/* now it's all ours */
+	intel_dp->pps_pipe = crtc->pipe;
+
+	DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
+		      pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
+
+	/* init power sequencer on this pipe and port */
+	intel_dp_init_panel_power_sequencer(dev, intel_dp);
+	intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
+}
+
+static void vlv_pre_enable_dp(struct intel_encoder *encoder)
+{
+	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+	enum dpio_channel port = vlv_dport_to_channel(dport);
+	int pipe = intel_crtc->pipe;
+	u32 val;
+
+	mutex_lock(&dev_priv->sb_lock);
+
+	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
+	val = 0;
+	if (pipe)
+		val |= (1<<21);
+	else
+		val &= ~(1<<21);
+	val |= 0x001000c4;
+	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
+	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
+	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
+
+	mutex_unlock(&dev_priv->sb_lock);
+
+	intel_enable_dp(encoder);
+}
+
+static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
+{
+	struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc =
+		to_intel_crtc(encoder->base.crtc);
+	enum dpio_channel port = vlv_dport_to_channel(dport);
+	int pipe = intel_crtc->pipe;
+
+	intel_dp_prepare(encoder);
+
+	/* Program Tx lane resets to default */
+	mutex_lock(&dev_priv->sb_lock);
+	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
+			 DPIO_PCS_TX_LANE2_RESET |
+			 DPIO_PCS_TX_LANE1_RESET);
+	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
+			 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
+			 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
+			 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
+				 DPIO_PCS_CLK_SOFT_RESET);
+
+	/* Fix up inter-pair skew failure */
+	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
+	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
+	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
+	mutex_unlock(&dev_priv->sb_lock);
+}
+
+static void chv_pre_enable_dp(struct intel_encoder *encoder)
+{
+	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc =
+		to_intel_crtc(encoder->base.crtc);
+	enum dpio_channel ch = vlv_dport_to_channel(dport);
+	int pipe = intel_crtc->pipe;
+	int data, i, stagger;
+	u32 val;
+
+	mutex_lock(&dev_priv->sb_lock);
+
+	/* allow hardware to manage TX FIFO reset source */
+	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
+	val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
+	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
+
+	if (intel_crtc->config->lane_count > 2) {
+		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
+		val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
+		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
+	}
+
+	/* Program Tx lane latency optimal setting*/
+	for (i = 0; i < intel_crtc->config->lane_count; i++) {
+		/* Set the upar bit */
+		if (intel_crtc->config->lane_count == 1)
+			data = 0x0;
+		else
+			data = (i == 1) ? 0x0 : 0x1;
+		vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
+				data << DPIO_UPAR_SHIFT);
+	}
+
+	/* Data lane stagger programming */
+	if (intel_crtc->config->port_clock > 270000)
+		stagger = 0x18;
+	else if (intel_crtc->config->port_clock > 135000)
+		stagger = 0xd;
+	else if (intel_crtc->config->port_clock > 67500)
+		stagger = 0x7;
+	else if (intel_crtc->config->port_clock > 33750)
+		stagger = 0x4;
+	else
+		stagger = 0x2;
+
+	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
+	val |= DPIO_TX2_STAGGER_MASK(0x1f);
+	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
+
+	if (intel_crtc->config->lane_count > 2) {
+		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
+		val |= DPIO_TX2_STAGGER_MASK(0x1f);
+		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
+	}
+
+	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
+		       DPIO_LANESTAGGER_STRAP(stagger) |
+		       DPIO_LANESTAGGER_STRAP_OVRD |
+		       DPIO_TX1_STAGGER_MASK(0x1f) |
+		       DPIO_TX1_STAGGER_MULT(6) |
+		       DPIO_TX2_STAGGER_MULT(0));
+
+	if (intel_crtc->config->lane_count > 2) {
+		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
+			       DPIO_LANESTAGGER_STRAP(stagger) |
+			       DPIO_LANESTAGGER_STRAP_OVRD |
+			       DPIO_TX1_STAGGER_MASK(0x1f) |
+			       DPIO_TX1_STAGGER_MULT(7) |
+			       DPIO_TX2_STAGGER_MULT(5));
+	}
+
+	/* Deassert data lane reset */
+	chv_data_lane_soft_reset(encoder, false);
+
+	mutex_unlock(&dev_priv->sb_lock);
+
+	intel_enable_dp(encoder);
+
+	/* Second common lane will stay alive on its own now */
+	if (dport->release_cl2_override) {
+		chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
+		dport->release_cl2_override = false;
+	}
+}
+
+static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
+{
+	struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc =
+		to_intel_crtc(encoder->base.crtc);
+	enum dpio_channel ch = vlv_dport_to_channel(dport);
+	enum pipe pipe = intel_crtc->pipe;
+	unsigned int lane_mask =
+		intel_dp_unused_lane_mask(intel_crtc->config->lane_count);
+	u32 val;
+
+	intel_dp_prepare(encoder);
+
+	/*
+	 * Must trick the second common lane into life.
+	 * Otherwise we can't even access the PLL.
+	 */
+	if (ch == DPIO_CH0 && pipe == PIPE_B)
+		dport->release_cl2_override =
+			!chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
+
+	chv_phy_powergate_lanes(encoder, true, lane_mask);
+
+	mutex_lock(&dev_priv->sb_lock);
+
+	/* Assert data lane reset */
+	chv_data_lane_soft_reset(encoder, true);
+
+	/* program left/right clock distribution */
+	if (pipe != PIPE_B) {
+		val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
+		val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
+		if (ch == DPIO_CH0)
+			val |= CHV_BUFLEFTENA1_FORCE;
+		if (ch == DPIO_CH1)
+			val |= CHV_BUFRIGHTENA1_FORCE;
+		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
+	} else {
+		val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
+		val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
+		if (ch == DPIO_CH0)
+			val |= CHV_BUFLEFTENA2_FORCE;
+		if (ch == DPIO_CH1)
+			val |= CHV_BUFRIGHTENA2_FORCE;
+		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
+	}
+
+	/* program clock channel usage */
+	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
+	val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
+	if (pipe != PIPE_B)
+		val &= ~CHV_PCS_USEDCLKCHANNEL;
+	else
+		val |= CHV_PCS_USEDCLKCHANNEL;
+	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
+
+	if (intel_crtc->config->lane_count > 2) {
+		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
+		val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
+		if (pipe != PIPE_B)
+			val &= ~CHV_PCS_USEDCLKCHANNEL;
+		else
+			val |= CHV_PCS_USEDCLKCHANNEL;
+		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
+	}
+
+	/*
+	 * This a a bit weird since generally CL
+	 * matches the pipe, but here we need to
+	 * pick the CL based on the port.
+	 */
+	val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
+	if (pipe != PIPE_B)
+		val &= ~CHV_CMN_USEDCLKCHANNEL;
+	else
+		val |= CHV_CMN_USEDCLKCHANNEL;
+	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
+
+	mutex_unlock(&dev_priv->sb_lock);
+}
+
+static void chv_dp_post_pll_disable(struct intel_encoder *encoder)
+{
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
+	u32 val;
+
+	mutex_lock(&dev_priv->sb_lock);
+
+	/* disable left/right clock distribution */
+	if (pipe != PIPE_B) {
+		val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
+		val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
+		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
+	} else {
+		val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
+		val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
+		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
+	}
+
+	mutex_unlock(&dev_priv->sb_lock);
+
+	/*
+	 * Leave the power down bit cleared for at least one
+	 * lane so that chv_powergate_phy_ch() will power
+	 * on something when the channel is otherwise unused.
+	 * When the port is off and the override is removed
+	 * the lanes power down anyway, so otherwise it doesn't
+	 * really matter what the state of power down bits is
+	 * after this.
+	 */
+	chv_phy_powergate_lanes(encoder, false, 0x0);
+}
+
+/*
+ * Native read with retry for link status and receiver capability reads for
+ * cases where the sink may still be asleep.
+ *
+ * Sinks are *supposed* to come up within 1ms from an off state, but we're also
+ * supposed to retry 3 times per the spec.
+ */
+static ssize_t
+intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
+			void *buffer, size_t size)
+{
+	ssize_t ret;
+	int i;
+
+	/*
+	 * Sometime we just get the same incorrect byte repeated
+	 * over the entire buffer. Doing just one throw away read
+	 * initially seems to "solve" it.
+	 */
+	drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
+
+	for (i = 0; i < 3; i++) {
+		ret = drm_dp_dpcd_read(aux, offset, buffer, size);
+		if (ret == size)
+			return ret;
+		msleep(1);
+	}
+
+	return ret;
+}
+
+/*
+ * Fetch AUX CH registers 0x202 - 0x207 which contain
+ * link status information
+ */
+static bool
+intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
+{
+	return intel_dp_dpcd_read_wake(&intel_dp->aux,
+				       DP_LANE0_1_STATUS,
+				       link_status,
+				       DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
+}
+
+/* These are source-specific values. */
+static uint8_t
+intel_dp_voltage_max(struct intel_dp *intel_dp)
+{
+	struct drm_device *dev = intel_dp_to_dev(intel_dp);
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum port port = dp_to_dig_port(intel_dp)->port;
+
+	if (IS_BROXTON(dev))
+		return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
+	else if (INTEL_INFO(dev)->gen >= 9) {
+		if (dev_priv->edp_low_vswing && port == PORT_A)
+			return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
+		return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
+	} else if (IS_VALLEYVIEW(dev))
+		return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
+	else if (IS_GEN7(dev) && port == PORT_A)
+		return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
+	else if (HAS_PCH_CPT(dev) && port != PORT_A)
+		return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
+	else
+		return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
+}
+
+static uint8_t
+intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
+{
+	struct drm_device *dev = intel_dp_to_dev(intel_dp);
+	enum port port = dp_to_dig_port(intel_dp)->port;
+
+	if (INTEL_INFO(dev)->gen >= 9) {
+		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
+		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
+			return DP_TRAIN_PRE_EMPH_LEVEL_3;
+		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
+			return DP_TRAIN_PRE_EMPH_LEVEL_2;
+		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
+			return DP_TRAIN_PRE_EMPH_LEVEL_1;
+		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
+			return DP_TRAIN_PRE_EMPH_LEVEL_0;
+		default:
+			return DP_TRAIN_PRE_EMPH_LEVEL_0;
+		}
+	} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
+		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
+			return DP_TRAIN_PRE_EMPH_LEVEL_3;
+		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
+			return DP_TRAIN_PRE_EMPH_LEVEL_2;
+		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
+			return DP_TRAIN_PRE_EMPH_LEVEL_1;
+		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
+		default:
+			return DP_TRAIN_PRE_EMPH_LEVEL_0;
+		}
+	} else if (IS_VALLEYVIEW(dev)) {
+		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
+		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
+			return DP_TRAIN_PRE_EMPH_LEVEL_3;
+		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
+			return DP_TRAIN_PRE_EMPH_LEVEL_2;
+		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
+			return DP_TRAIN_PRE_EMPH_LEVEL_1;
+		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
+		default:
+			return DP_TRAIN_PRE_EMPH_LEVEL_0;
+		}
+	} else if (IS_GEN7(dev) && port == PORT_A) {
+		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
+		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
+			return DP_TRAIN_PRE_EMPH_LEVEL_2;
+		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
+		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
+			return DP_TRAIN_PRE_EMPH_LEVEL_1;
+		default:
+			return DP_TRAIN_PRE_EMPH_LEVEL_0;
+		}
+	} else {
+		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
+		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
+			return DP_TRAIN_PRE_EMPH_LEVEL_2;
+		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
+			return DP_TRAIN_PRE_EMPH_LEVEL_2;
+		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
+			return DP_TRAIN_PRE_EMPH_LEVEL_1;
+		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
+		default:
+			return DP_TRAIN_PRE_EMPH_LEVEL_0;
+		}
+	}
+}
+
+static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
+{
+	struct drm_device *dev = intel_dp_to_dev(intel_dp);
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
+	struct intel_crtc *intel_crtc =
+		to_intel_crtc(dport->base.base.crtc);
+	unsigned long demph_reg_value, preemph_reg_value,
+		uniqtranscale_reg_value;
+	uint8_t train_set = intel_dp->train_set[0];
+	enum dpio_channel port = vlv_dport_to_channel(dport);
+	int pipe = intel_crtc->pipe;
+
+	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
+	case DP_TRAIN_PRE_EMPH_LEVEL_0:
+		preemph_reg_value = 0x0004000;
+		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
+		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
+			demph_reg_value = 0x2B405555;
+			uniqtranscale_reg_value = 0x552AB83A;
+			break;
+		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
+			demph_reg_value = 0x2B404040;
+			uniqtranscale_reg_value = 0x5548B83A;
+			break;
+		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
+			demph_reg_value = 0x2B245555;
+			uniqtranscale_reg_value = 0x5560B83A;
+			break;
+		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
+			demph_reg_value = 0x2B405555;
+			uniqtranscale_reg_value = 0x5598DA3A;
+			break;
+		default:
+			return 0;
+		}
+		break;
+	case DP_TRAIN_PRE_EMPH_LEVEL_1:
+		preemph_reg_value = 0x0002000;
+		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
+		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
+			demph_reg_value = 0x2B404040;
+			uniqtranscale_reg_value = 0x5552B83A;
+			break;
+		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
+			demph_reg_value = 0x2B404848;
+			uniqtranscale_reg_value = 0x5580B83A;
+			break;
+		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
+			demph_reg_value = 0x2B404040;
+			uniqtranscale_reg_value = 0x55ADDA3A;
+			break;
+		default:
+			return 0;
+		}
+		break;
+	case DP_TRAIN_PRE_EMPH_LEVEL_2:
+		preemph_reg_value = 0x0000000;
+		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
+		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
+			demph_reg_value = 0x2B305555;
+			uniqtranscale_reg_value = 0x5570B83A;
+			break;
+		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
+			demph_reg_value = 0x2B2B4040;
+			uniqtranscale_reg_value = 0x55ADDA3A;
+			break;
+		default:
+			return 0;
+		}
+		break;
+	case DP_TRAIN_PRE_EMPH_LEVEL_3:
+		preemph_reg_value = 0x0006000;
+		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
+		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
+			demph_reg_value = 0x1B405555;
+			uniqtranscale_reg_value = 0x55ADDA3A;
+			break;
+		default:
+			return 0;
+		}
+		break;
+	default:
+		return 0;
+	}
+
+	mutex_lock(&dev_priv->sb_lock);
+	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
+	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
+	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
+			 uniqtranscale_reg_value);
+	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
+	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
+	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
+	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
+	mutex_unlock(&dev_priv->sb_lock);
+
+	return 0;
+}
+
+static bool chv_need_uniq_trans_scale(uint8_t train_set)
+{
+	return (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) == DP_TRAIN_PRE_EMPH_LEVEL_0 &&
+		(train_set & DP_TRAIN_VOLTAGE_SWING_MASK) == DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
+}
+
+static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
+{
+	struct drm_device *dev = intel_dp_to_dev(intel_dp);
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
+	struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
+	u32 deemph_reg_value, margin_reg_value, val;
+	uint8_t train_set = intel_dp->train_set[0];
+	enum dpio_channel ch = vlv_dport_to_channel(dport);
+	enum pipe pipe = intel_crtc->pipe;
+	int i;
+
+	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
+	case DP_TRAIN_PRE_EMPH_LEVEL_0:
+		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
+		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
+			deemph_reg_value = 128;
+			margin_reg_value = 52;
+			break;
+		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
+			deemph_reg_value = 128;
+			margin_reg_value = 77;
+			break;
+		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
+			deemph_reg_value = 128;
+			margin_reg_value = 102;
+			break;
+		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
+			deemph_reg_value = 128;
+			margin_reg_value = 154;
+			/* FIXME extra to set for 1200 */
+			break;
+		default:
+			return 0;
+		}
+		break;
+	case DP_TRAIN_PRE_EMPH_LEVEL_1:
+		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
+		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
+			deemph_reg_value = 85;
+			margin_reg_value = 78;
+			break;
+		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
+			deemph_reg_value = 85;
+			margin_reg_value = 116;
+			break;
+		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
+			deemph_reg_value = 85;
+			margin_reg_value = 154;
+			break;
+		default:
+			return 0;
+		}
+		break;
+	case DP_TRAIN_PRE_EMPH_LEVEL_2:
+		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
+		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
+			deemph_reg_value = 64;
+			margin_reg_value = 104;
+			break;
+		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
+			deemph_reg_value = 64;
+			margin_reg_value = 154;
+			break;
+		default:
+			return 0;
+		}
+		break;
+	case DP_TRAIN_PRE_EMPH_LEVEL_3:
+		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
+		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
+			deemph_reg_value = 43;
+			margin_reg_value = 154;
+			break;
+		default:
+			return 0;
+		}
+		break;
+	default:
+		return 0;
+	}
+
+	mutex_lock(&dev_priv->sb_lock);
+
+	/* Clear calc init */
+	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
+	val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
+	val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
+	val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
+	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
+
+	if (intel_crtc->config->lane_count > 2) {
+		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
+		val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
+		val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
+		val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
+		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
+	}
+
+	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
+	val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
+	val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
+	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
+
+	if (intel_crtc->config->lane_count > 2) {
+		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
+		val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
+		val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
+		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
+	}
+
+	/* Program swing deemph */
+	for (i = 0; i < intel_crtc->config->lane_count; i++) {
+		val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
+		val &= ~DPIO_SWING_DEEMPH9P5_MASK;
+		val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
+		vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
+	}
+
+	/* Program swing margin */
+	for (i = 0; i < intel_crtc->config->lane_count; i++) {
+		val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
+
+		val &= ~DPIO_SWING_MARGIN000_MASK;
+		val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
+
+		/*
+		 * Supposedly this value shouldn't matter when unique transition
+		 * scale is disabled, but in fact it does matter. Let's just
+		 * always program the same value and hope it's OK.
+		 */
+		val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
+		val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
+
+		vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
+	}
+
+	/*
+	 * The document said it needs to set bit 27 for ch0 and bit 26
+	 * for ch1. Might be a typo in the doc.
+	 * For now, for this unique transition scale selection, set bit
+	 * 27 for ch0 and ch1.
+	 */
+	for (i = 0; i < intel_crtc->config->lane_count; i++) {
+		val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
+		if (chv_need_uniq_trans_scale(train_set))
+			val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
+		else
+			val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
+		vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
+	}
+
+	/* Start swing calculation */
+	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
+	val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
+	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
+
+	if (intel_crtc->config->lane_count > 2) {
+		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
+		val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
+		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
+	}
+
+	mutex_unlock(&dev_priv->sb_lock);
+
+	return 0;
+}
+
+static void
+intel_get_adjust_train(struct intel_dp *intel_dp,
+		       const uint8_t link_status[DP_LINK_STATUS_SIZE])
+{
+	uint8_t v = 0;
+	uint8_t p = 0;
+	int lane;
+	uint8_t voltage_max;
+	uint8_t preemph_max;
+
+	for (lane = 0; lane < intel_dp->lane_count; lane++) {
+		uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
+		uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
+
+		if (this_v > v)
+			v = this_v;
+		if (this_p > p)
+			p = this_p;
+	}
+
+	voltage_max = intel_dp_voltage_max(intel_dp);
+	if (v >= voltage_max)
+		v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
+
+	preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
+	if (p >= preemph_max)
+		p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
+
+	for (lane = 0; lane < 4; lane++)
+		intel_dp->train_set[lane] = v | p;
+}
+
+static uint32_t
+gen4_signal_levels(uint8_t train_set)
+{
+	uint32_t	signal_levels = 0;
+
+	switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
+	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
+	default:
+		signal_levels |= DP_VOLTAGE_0_4;
+		break;
+	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
+		signal_levels |= DP_VOLTAGE_0_6;
+		break;
+	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
+		signal_levels |= DP_VOLTAGE_0_8;
+		break;
+	case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
+		signal_levels |= DP_VOLTAGE_1_2;
+		break;
+	}
+	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
+	case DP_TRAIN_PRE_EMPH_LEVEL_0:
+	default:
+		signal_levels |= DP_PRE_EMPHASIS_0;
+		break;
+	case DP_TRAIN_PRE_EMPH_LEVEL_1:
+		signal_levels |= DP_PRE_EMPHASIS_3_5;
+		break;
+	case DP_TRAIN_PRE_EMPH_LEVEL_2:
+		signal_levels |= DP_PRE_EMPHASIS_6;
+		break;
+	case DP_TRAIN_PRE_EMPH_LEVEL_3:
+		signal_levels |= DP_PRE_EMPHASIS_9_5;
+		break;
+	}
+	return signal_levels;
+}
+
+/* Gen6's DP voltage swing and pre-emphasis control */
+static uint32_t
+gen6_edp_signal_levels(uint8_t train_set)
+{
+	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
+					 DP_TRAIN_PRE_EMPHASIS_MASK);
+	switch (signal_levels) {
+	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
+	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
+		return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
+	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
+		return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
+	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
+	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
+		return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
+	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
+	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
+		return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
+	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
+	case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
+		return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
+	default:
+		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
+			      "0x%x\n", signal_levels);
+		return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
+	}
+}
+
+/* Gen7's DP voltage swing and pre-emphasis control */
+static uint32_t
+gen7_edp_signal_levels(uint8_t train_set)
+{
+	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
+					 DP_TRAIN_PRE_EMPHASIS_MASK);
+	switch (signal_levels) {
+	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
+		return EDP_LINK_TRAIN_400MV_0DB_IVB;
+	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
+		return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
+	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
+		return EDP_LINK_TRAIN_400MV_6DB_IVB;
+
+	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
+		return EDP_LINK_TRAIN_600MV_0DB_IVB;
+	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
+		return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
+
+	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
+		return EDP_LINK_TRAIN_800MV_0DB_IVB;
+	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
+		return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
+
+	default:
+		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
+			      "0x%x\n", signal_levels);
+		return EDP_LINK_TRAIN_500MV_0DB_IVB;
+	}
+}
+
+/* Properly updates "DP" with the correct signal levels. */
+static void
+intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
+{
+	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+	enum port port = intel_dig_port->port;
+	struct drm_device *dev = intel_dig_port->base.base.dev;
+	uint32_t signal_levels, mask = 0;
+	uint8_t train_set = intel_dp->train_set[0];
+
+	if (HAS_DDI(dev)) {
+		signal_levels = ddi_signal_levels(intel_dp);
+
+		if (IS_BROXTON(dev))
+			signal_levels = 0;
+		else
+			mask = DDI_BUF_EMP_MASK;
+	} else if (IS_CHERRYVIEW(dev)) {
+		signal_levels = chv_signal_levels(intel_dp);
+	} else if (IS_VALLEYVIEW(dev)) {
+		signal_levels = vlv_signal_levels(intel_dp);
+	} else if (IS_GEN7(dev) && port == PORT_A) {
+		signal_levels = gen7_edp_signal_levels(train_set);
+		mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
+	} else if (IS_GEN6(dev) && port == PORT_A) {
+		signal_levels = gen6_edp_signal_levels(train_set);
+		mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
+	} else {
+		signal_levels = gen4_signal_levels(train_set);
+		mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
+	}
+
+	if (mask)
+		DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
+
+	DRM_DEBUG_KMS("Using vswing level %d\n",
+		train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
+	DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
+		(train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
+			DP_TRAIN_PRE_EMPHASIS_SHIFT);
+
+	*DP = (*DP & ~mask) | signal_levels;
+}
+
+static bool
+intel_dp_set_link_train(struct intel_dp *intel_dp,
+			uint32_t *DP,
+			uint8_t dp_train_pat)
+{
+	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+	struct drm_i915_private *dev_priv =
+		to_i915(intel_dig_port->base.base.dev);
+	uint8_t buf[sizeof(intel_dp->train_set) + 1];
+	int ret, len;
+
+	_intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
+
+	I915_WRITE(intel_dp->output_reg, *DP);
+	POSTING_READ(intel_dp->output_reg);
+
+	buf[0] = dp_train_pat;
+	if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
+	    DP_TRAINING_PATTERN_DISABLE) {
+		/* don't write DP_TRAINING_LANEx_SET on disable */
+		len = 1;
+	} else {
+		/* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
+		memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
+		len = intel_dp->lane_count + 1;
+	}
+
+	ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
+				buf, len);
+
+	return ret == len;
+}
+
+static bool
+intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
+			uint8_t dp_train_pat)
+{
+	memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
+	intel_dp_set_signal_levels(intel_dp, DP);
+	return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
+}
+
+static bool
+intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
+			   const uint8_t link_status[DP_LINK_STATUS_SIZE])
+{
+	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+	struct drm_i915_private *dev_priv =
+		to_i915(intel_dig_port->base.base.dev);
+	int ret;
+
+	intel_get_adjust_train(intel_dp, link_status);
+	intel_dp_set_signal_levels(intel_dp, DP);
+
+	I915_WRITE(intel_dp->output_reg, *DP);
+	POSTING_READ(intel_dp->output_reg);
+
+	ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
+				intel_dp->train_set, intel_dp->lane_count);
+
+	return ret == intel_dp->lane_count;
+}
+
+static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
+{
+	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+	struct drm_device *dev = intel_dig_port->base.base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum port port = intel_dig_port->port;
+	uint32_t val;
+
+	if (!HAS_DDI(dev))
+		return;
+
+	val = I915_READ(DP_TP_CTL(port));
+	val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
+	val |= DP_TP_CTL_LINK_TRAIN_IDLE;
+	I915_WRITE(DP_TP_CTL(port), val);
+
+	/*
+	 * On PORT_A we can have only eDP in SST mode. There the only reason
+	 * we need to set idle transmission mode is to work around a HW issue
+	 * where we enable the pipe while not in idle link-training mode.
+	 * In this case there is requirement to wait for a minimum number of
+	 * idle patterns to be sent.
+	 */
+	if (port == PORT_A)
+		return;
+
+	if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
+		     1))
+		DRM_ERROR("Timed out waiting for DP idle patterns\n");
+}
+
+/* Enable corresponding port and start training pattern 1 */
+static void
+intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
+{
+	struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
+	struct drm_device *dev = encoder->dev;
+	int i;
+	uint8_t voltage;
+	int voltage_tries, loop_tries;
+	uint32_t DP = intel_dp->DP;
+	uint8_t link_config[2];
+	uint8_t link_bw, rate_select;
+
+	if (HAS_DDI(dev))
+		intel_ddi_prepare_link_retrain(encoder);
+
+	intel_dp_compute_rate(intel_dp, intel_dp->link_rate,
+			      &link_bw, &rate_select);
+
+	/* Write the link configuration data */
+	link_config[0] = link_bw;
+	link_config[1] = intel_dp->lane_count;
+	if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
+		link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+	drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
+	if (intel_dp->num_sink_rates)
+		drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
+				  &rate_select, 1);
+
+	link_config[0] = 0;
+	link_config[1] = DP_SET_ANSI_8B10B;
+	drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
+
+	DP |= DP_PORT_EN;
+
+	/* clock recovery */
+	if (!intel_dp_reset_link_train(intel_dp, &DP,
+				       DP_TRAINING_PATTERN_1 |
+				       DP_LINK_SCRAMBLING_DISABLE)) {
+		DRM_ERROR("failed to enable link training\n");
+		return;
+	}
+
+	voltage = 0xff;
+	voltage_tries = 0;
+	loop_tries = 0;
+	for (;;) {
+		uint8_t link_status[DP_LINK_STATUS_SIZE];
+
+		drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
+		if (!intel_dp_get_link_status(intel_dp, link_status)) {
+			DRM_ERROR("failed to get link status\n");
+			break;
+		}
+
+		if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
+			DRM_DEBUG_KMS("clock recovery OK\n");
+			break;
+		}
+
+
+		/* Check to see if we've tried the max voltage */
+		for (i = 0; i < intel_dp->lane_count; i++)
+			if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
+				break;
+		if (i == intel_dp->lane_count) {
+			++loop_tries;
+			if (loop_tries == 5) {
+				DRM_ERROR("too many full retries, give up\n");
+				break;
+			}
+			intel_dp_reset_link_train(intel_dp, &DP,
+						  DP_TRAINING_PATTERN_1 |
+						  DP_LINK_SCRAMBLING_DISABLE);
+			voltage_tries = 0;
+			continue;
+		}
+
+		/* Check to see if we've tried the same voltage 5 times */
+		if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
+			++voltage_tries;
+			if (voltage_tries == 5) {
+				DRM_ERROR("too many voltage retries, give up\n");
+				break;
+			}
+		} else
+			voltage_tries = 0;
+		voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
+
+		/* Update training set as requested by target */
+		if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
+			DRM_ERROR("failed to update link training\n");
+			break;
+		}
+	}
+
+	intel_dp->DP = DP;
+}
+
+static void
+intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
+{
+	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+	struct drm_device *dev = dig_port->base.base.dev;
+	bool channel_eq = false;
+	int tries, cr_tries;
+	uint32_t DP = intel_dp->DP;
+	uint32_t training_pattern = DP_TRAINING_PATTERN_2;
+
+	/*
+	 * Training Pattern 3 for HBR2 or 1.2 devices that support it.
+	 *
+	 * Intel platforms that support HBR2 also support TPS3. TPS3 support is
+	 * also mandatory for downstream devices that support HBR2.
+	 *
+	 * Due to WaDisableHBR2 SKL < B0 is the only exception where TPS3 is
+	 * supported but still not enabled.
+	 */
+	if (intel_dp_source_supports_hbr2(dev) &&
+	    drm_dp_tps3_supported(intel_dp->dpcd))
+		training_pattern = DP_TRAINING_PATTERN_3;
+	else if (intel_dp->link_rate == 540000)
+		DRM_ERROR("5.4 Gbps link rate without HBR2/TPS3 support\n");
+
+	/* channel equalization */
+	if (!intel_dp_set_link_train(intel_dp, &DP,
+				     training_pattern |
+				     DP_LINK_SCRAMBLING_DISABLE)) {
+		DRM_ERROR("failed to start channel equalization\n");
+		return;
+	}
+
+	tries = 0;
+	cr_tries = 0;
+	channel_eq = false;
+	for (;;) {
+		uint8_t link_status[DP_LINK_STATUS_SIZE];
+
+		if (cr_tries > 5) {
+			DRM_ERROR("failed to train DP, aborting\n");
+			break;
+		}
+
+		drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
+		if (!intel_dp_get_link_status(intel_dp, link_status)) {
+			DRM_ERROR("failed to get link status\n");
+			break;
+		}
+
+		/* Make sure clock is still ok */
+		if (!drm_dp_clock_recovery_ok(link_status,
+					      intel_dp->lane_count)) {
+			intel_dp_link_training_clock_recovery(intel_dp);
+			intel_dp_set_link_train(intel_dp, &DP,
+						training_pattern |
+						DP_LINK_SCRAMBLING_DISABLE);
+			cr_tries++;
+			continue;
+		}
+
+		if (drm_dp_channel_eq_ok(link_status,
+					 intel_dp->lane_count)) {
+			channel_eq = true;
+			break;
+		}
+
+		/* Try 5 times, then try clock recovery if that fails */
+		if (tries > 5) {
+			intel_dp_link_training_clock_recovery(intel_dp);
+			intel_dp_set_link_train(intel_dp, &DP,
+						training_pattern |
+						DP_LINK_SCRAMBLING_DISABLE);
+			tries = 0;
+			cr_tries++;
+			continue;
+		}
+
+		/* Update training set as requested by target */
+		if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
+			DRM_ERROR("failed to update link training\n");
+			break;
+		}
+		++tries;
+	}
+
+	intel_dp_set_idle_link_train(intel_dp);
+
+	intel_dp->DP = DP;
+
+	if (channel_eq)
+		DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
+}
+
+void intel_dp_stop_link_train(struct intel_dp *intel_dp)
+{
+	intel_dp_set_link_train(intel_dp, &intel_dp->DP,
+				DP_TRAINING_PATTERN_DISABLE);
+}
+
+void
+intel_dp_start_link_train(struct intel_dp *intel_dp)
+{
+	intel_dp_link_training_clock_recovery(intel_dp);
+	intel_dp_link_training_channel_equalization(intel_dp);
+}
+
+static void
+intel_dp_link_down(struct intel_dp *intel_dp)
+{
+	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+	struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
+	enum port port = intel_dig_port->port;
+	struct drm_device *dev = intel_dig_port->base.base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t DP = intel_dp->DP;
+
+	if (WARN_ON(HAS_DDI(dev)))
+		return;
+
+	if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
+		return;
+
+	DRM_DEBUG_KMS("\n");
+
+	if ((IS_GEN7(dev) && port == PORT_A) ||
+	    (HAS_PCH_CPT(dev) && port != PORT_A)) {
+		DP &= ~DP_LINK_TRAIN_MASK_CPT;
+		DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
+	} else {
+		if (IS_CHERRYVIEW(dev))
+			DP &= ~DP_LINK_TRAIN_MASK_CHV;
+		else
+			DP &= ~DP_LINK_TRAIN_MASK;
+		DP |= DP_LINK_TRAIN_PAT_IDLE;
+	}
+	I915_WRITE(intel_dp->output_reg, DP);
+	POSTING_READ(intel_dp->output_reg);
+
+	DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
+	I915_WRITE(intel_dp->output_reg, DP);
+	POSTING_READ(intel_dp->output_reg);
+
+	/*
+	 * HW workaround for IBX, we need to move the port
+	 * to transcoder A after disabling it to allow the
+	 * matching HDMI port to be enabled on transcoder A.
+	 */
+	if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) {
+		/* always enable with pattern 1 (as per spec) */
+		DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
+		DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
+		I915_WRITE(intel_dp->output_reg, DP);
+		POSTING_READ(intel_dp->output_reg);
+
+		DP &= ~DP_PORT_EN;
+		I915_WRITE(intel_dp->output_reg, DP);
+		POSTING_READ(intel_dp->output_reg);
+	}
+
+	msleep(intel_dp->panel_power_down_delay);
+}
+
+static bool
+intel_dp_get_dpcd(struct intel_dp *intel_dp)
+{
+	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+	struct drm_device *dev = dig_port->base.base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint8_t rev;
+
+	if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
+				    sizeof(intel_dp->dpcd)) < 0)
+		return false; /* aux transfer failed */
+
+	DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
+
+	if (intel_dp->dpcd[DP_DPCD_REV] == 0)
+		return false; /* DPCD not present */
+
+	/* Check if the panel supports PSR */
+	memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
+	if (is_edp(intel_dp)) {
+		intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
+					intel_dp->psr_dpcd,
+					sizeof(intel_dp->psr_dpcd));
+		if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
+			dev_priv->psr.sink_support = true;
+			DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
+		}
+
+		if (INTEL_INFO(dev)->gen >= 9 &&
+			(intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
+			uint8_t frame_sync_cap;
+
+			dev_priv->psr.sink_support = true;
+			intel_dp_dpcd_read_wake(&intel_dp->aux,
+					DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
+					&frame_sync_cap, 1);
+			dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
+			/* PSR2 needs frame sync as well */
+			dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
+			DRM_DEBUG_KMS("PSR2 %s on sink",
+				dev_priv->psr.psr2_support ? "supported" : "not supported");
+		}
+	}
+
+	DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n",
+		      yesno(intel_dp_source_supports_hbr2(dev)),
+		      yesno(drm_dp_tps3_supported(intel_dp->dpcd)));
+
+	/* Intermediate frequency support */
+	if (is_edp(intel_dp) &&
+	    (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] &	DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
+	    (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
+	    (rev >= 0x03)) { /* eDp v1.4 or higher */
+		__le16 sink_rates[DP_MAX_SUPPORTED_RATES];
+		int i;
+
+		intel_dp_dpcd_read_wake(&intel_dp->aux,
+				DP_SUPPORTED_LINK_RATES,
+				sink_rates,
+				sizeof(sink_rates));
+
+		for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
+			int val = le16_to_cpu(sink_rates[i]);
+
+			if (val == 0)
+				break;
+
+			/* Value read is in kHz while drm clock is saved in deca-kHz */
+			intel_dp->sink_rates[i] = (val * 200) / 10;
+		}
+		intel_dp->num_sink_rates = i;
+	}
+
+	intel_dp_print_rates(intel_dp);
+
+	if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
+	      DP_DWN_STRM_PORT_PRESENT))
+		return true; /* native DP sink */
+
+	if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
+		return true; /* no per-port downstream info */
+
+	if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
+				    intel_dp->downstream_ports,
+				    DP_MAX_DOWNSTREAM_PORTS) < 0)
+		return false; /* downstream port status fetch failed */
+
+	return true;
+}
+
+static void
+intel_dp_probe_oui(struct intel_dp *intel_dp)
+{
+	u8 buf[3];
+
+	if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
+		return;
+
+	if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
+		DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
+			      buf[0], buf[1], buf[2]);
+
+	if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
+		DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
+			      buf[0], buf[1], buf[2]);
+}
+
+static bool
+intel_dp_probe_mst(struct intel_dp *intel_dp)
+{
+	u8 buf[1];
+
+	if (!intel_dp->can_mst)
+		return false;
+
+	if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
+		return false;
+
+	if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
+		if (buf[0] & DP_MST_CAP) {
+			DRM_DEBUG_KMS("Sink is MST capable\n");
+			intel_dp->is_mst = true;
+		} else {
+			DRM_DEBUG_KMS("Sink is not MST capable\n");
+			intel_dp->is_mst = false;
+		}
+	}
+
+	drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
+	return intel_dp->is_mst;
+}
+
+static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
+{
+	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+	struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
+	u8 buf;
+	int ret = 0;
+
+	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
+		DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
+		ret = -EIO;
+		goto out;
+	}
+
+	if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
+			       buf & ~DP_TEST_SINK_START) < 0) {
+		DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
+		ret = -EIO;
+		goto out;
+	}
+
+	intel_dp->sink_crc.started = false;
+ out:
+	hsw_enable_ips(intel_crtc);
+	return ret;
+}
+
+static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
+{
+	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+	struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
+	u8 buf;
+	int ret;
+
+	if (intel_dp->sink_crc.started) {
+		ret = intel_dp_sink_crc_stop(intel_dp);
+		if (ret)
+			return ret;
+	}
+
+	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
+		return -EIO;
+
+	if (!(buf & DP_TEST_CRC_SUPPORTED))
+		return -ENOTTY;
+
+	intel_dp->sink_crc.last_count = buf & DP_TEST_COUNT_MASK;
+
+	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
+		return -EIO;
+
+	hsw_disable_ips(intel_crtc);
+
+	if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
+			       buf | DP_TEST_SINK_START) < 0) {
+		hsw_enable_ips(intel_crtc);
+		return -EIO;
+	}
+
+	intel_dp->sink_crc.started = true;
+	return 0;
+}
+
+int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
+{
+	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+	struct drm_device *dev = dig_port->base.base.dev;
+	struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
+	u8 buf;
+	int count, ret;
+	int attempts = 6;
+	bool old_equal_new;
+
+	ret = intel_dp_sink_crc_start(intel_dp);
+	if (ret)
+		return ret;
+
+	do {
+		intel_wait_for_vblank(dev, intel_crtc->pipe);
+
+		if (drm_dp_dpcd_readb(&intel_dp->aux,
+				      DP_TEST_SINK_MISC, &buf) < 0) {
+			ret = -EIO;
+			goto stop;
+		}
+		count = buf & DP_TEST_COUNT_MASK;
+
+		/*
+		 * Count might be reset during the loop. In this case
+		 * last known count needs to be reset as well.
+		 */
+		if (count == 0)
+			intel_dp->sink_crc.last_count = 0;
+
+		if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
+			ret = -EIO;
+			goto stop;
+		}
+
+		old_equal_new = (count == intel_dp->sink_crc.last_count &&
+				 !memcmp(intel_dp->sink_crc.last_crc, crc,
+					 6 * sizeof(u8)));
+
+	} while (--attempts && (count == 0 || old_equal_new));
+
+	intel_dp->sink_crc.last_count = buf & DP_TEST_COUNT_MASK;
+	memcpy(intel_dp->sink_crc.last_crc, crc, 6 * sizeof(u8));
+
+	if (attempts == 0) {
+		if (old_equal_new) {
+			DRM_DEBUG_KMS("Unreliable Sink CRC counter: Current returned CRC is identical to the previous one\n");
+		} else {
+			DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n");
+			ret = -ETIMEDOUT;
+			goto stop;
+		}
+	}
+
+stop:
+	intel_dp_sink_crc_stop(intel_dp);
+	return ret;
+}
+
+static bool
+intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
+{
+	return intel_dp_dpcd_read_wake(&intel_dp->aux,
+				       DP_DEVICE_SERVICE_IRQ_VECTOR,
+				       sink_irq_vector, 1) == 1;
+}
+
+static bool
+intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
+{
+	int ret;
+
+	ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
+					     DP_SINK_COUNT_ESI,
+					     sink_irq_vector, 14);
+	if (ret != 14)
+		return false;
+
+	return true;
+}
+
+static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
+{
+	uint8_t test_result = DP_TEST_ACK;
+	return test_result;
+}
+
+static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
+{
+	uint8_t test_result = DP_TEST_NAK;
+	return test_result;
+}
+
+static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
+{
+	uint8_t test_result = DP_TEST_NAK;
+	struct intel_connector *intel_connector = intel_dp->attached_connector;
+	struct drm_connector *connector = &intel_connector->base;
+
+	if (intel_connector->detect_edid == NULL ||
+	    connector->edid_corrupt ||
+	    intel_dp->aux.i2c_defer_count > 6) {
+		/* Check EDID read for NACKs, DEFERs and corruption
+		 * (DP CTS 1.2 Core r1.1)
+		 *    4.2.2.4 : Failed EDID read, I2C_NAK
+		 *    4.2.2.5 : Failed EDID read, I2C_DEFER
+		 *    4.2.2.6 : EDID corruption detected
+		 * Use failsafe mode for all cases
+		 */
+		if (intel_dp->aux.i2c_nack_count > 0 ||
+			intel_dp->aux.i2c_defer_count > 0)
+			DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
+				      intel_dp->aux.i2c_nack_count,
+				      intel_dp->aux.i2c_defer_count);
+		intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
+	} else {
+		struct edid *block = intel_connector->detect_edid;
+
+		/* We have to write the checksum
+		 * of the last block read
+		 */
+		block += intel_connector->detect_edid->extensions;
+
+		if (!drm_dp_dpcd_write(&intel_dp->aux,
+					DP_TEST_EDID_CHECKSUM,
+					&block->checksum,
+					1))
+			DRM_DEBUG_KMS("Failed to write EDID checksum\n");
+
+		test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
+		intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_STANDARD;
+	}
+
+	/* Set test active flag here so userspace doesn't interrupt things */
+	intel_dp->compliance_test_active = 1;
+
+	return test_result;
+}
+
+static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
+{
+	uint8_t test_result = DP_TEST_NAK;
+	return test_result;
+}
+
+static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
+{
+	uint8_t response = DP_TEST_NAK;
+	uint8_t rxdata = 0;
+	int status = 0;
+
+	intel_dp->compliance_test_active = 0;
+	intel_dp->compliance_test_type = 0;
+	intel_dp->compliance_test_data = 0;
+
+	intel_dp->aux.i2c_nack_count = 0;
+	intel_dp->aux.i2c_defer_count = 0;
+
+	status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
+	if (status <= 0) {
+		DRM_DEBUG_KMS("Could not read test request from sink\n");
+		goto update_status;
+	}
+
+	switch (rxdata) {
+	case DP_TEST_LINK_TRAINING:
+		DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
+		intel_dp->compliance_test_type = DP_TEST_LINK_TRAINING;
+		response = intel_dp_autotest_link_training(intel_dp);
+		break;
+	case DP_TEST_LINK_VIDEO_PATTERN:
+		DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
+		intel_dp->compliance_test_type = DP_TEST_LINK_VIDEO_PATTERN;
+		response = intel_dp_autotest_video_pattern(intel_dp);
+		break;
+	case DP_TEST_LINK_EDID_READ:
+		DRM_DEBUG_KMS("EDID test requested\n");
+		intel_dp->compliance_test_type = DP_TEST_LINK_EDID_READ;
+		response = intel_dp_autotest_edid(intel_dp);
+		break;
+	case DP_TEST_LINK_PHY_TEST_PATTERN:
+		DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
+		intel_dp->compliance_test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
+		response = intel_dp_autotest_phy_pattern(intel_dp);
+		break;
+	default:
+		DRM_DEBUG_KMS("Invalid test request '%02x'\n", rxdata);
+		break;
+	}
+
+update_status:
+	status = drm_dp_dpcd_write(&intel_dp->aux,
+				   DP_TEST_RESPONSE,
+				   &response, 1);
+	if (status <= 0)
+		DRM_DEBUG_KMS("Could not write test response to sink\n");
+}
+
+static int
+intel_dp_check_mst_status(struct intel_dp *intel_dp)
+{
+	bool bret;
+
+	if (intel_dp->is_mst) {
+		u8 esi[16] = { 0 };
+		int ret = 0;
+		int retry;
+		bool handled;
+		bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
+go_again:
+		if (bret == true) {
+
+			/* check link status - esi[10] = 0x200c */
+			if (intel_dp->active_mst_links &&
+			    !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
+				DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
+				intel_dp_start_link_train(intel_dp);
+				intel_dp_stop_link_train(intel_dp);
+			}
+
+			DRM_DEBUG_KMS("got esi %3ph\n", esi);
+			ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
+
+			if (handled) {
+				for (retry = 0; retry < 3; retry++) {
+					int wret;
+					wret = drm_dp_dpcd_write(&intel_dp->aux,
+								 DP_SINK_COUNT_ESI+1,
+								 &esi[1], 3);
+					if (wret == 3) {
+						break;
+					}
+				}
+
+				bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
+				if (bret == true) {
+					DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
+					goto go_again;
+				}
+			} else
+				ret = 0;
+
+			return ret;
+		} else {
+			struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+			DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
+			intel_dp->is_mst = false;
+			drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
+			/* send a hotplug event */
+			drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
+		}
+	}
+	return -EINVAL;
+}
+
+/*
+ * According to DP spec
+ * 5.1.2:
+ *  1. Read DPCD
+ *  2. Configure link according to Receiver Capabilities
+ *  3. Use Link Training from 2.5.3.3 and 3.5.1.3
+ *  4. Check link status on receipt of hot-plug interrupt
+ */
+static void
+intel_dp_check_link_status(struct intel_dp *intel_dp)
+{
+	struct drm_device *dev = intel_dp_to_dev(intel_dp);
+	struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
+	u8 sink_irq_vector;
+	u8 link_status[DP_LINK_STATUS_SIZE];
+
+	WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+
+	if (!intel_encoder->base.crtc)
+		return;
+
+	if (!to_intel_crtc(intel_encoder->base.crtc)->active)
+		return;
+
+	/* Try to read receiver status if the link appears to be up */
+	if (!intel_dp_get_link_status(intel_dp, link_status)) {
+		return;
+	}
+
+	/* Now read the DPCD to see if it's actually running */
+	if (!intel_dp_get_dpcd(intel_dp)) {
+		return;
+	}
+
+	/* Try to read the source of the interrupt */
+	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
+	    intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
+		/* Clear interrupt source */
+		drm_dp_dpcd_writeb(&intel_dp->aux,
+				   DP_DEVICE_SERVICE_IRQ_VECTOR,
+				   sink_irq_vector);
+
+		if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
+			DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
+		if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
+			DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
+	}
+
+	if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
+		DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
+			      intel_encoder->base.name);
+		intel_dp_start_link_train(intel_dp);
+		intel_dp_stop_link_train(intel_dp);
+	}
+}
+
+/* XXX this is probably wrong for multiple downstream ports */
+static enum drm_connector_status
+intel_dp_detect_dpcd(struct intel_dp *intel_dp)
+{
+	uint8_t *dpcd = intel_dp->dpcd;
+	uint8_t type;
+
+	if (!intel_dp_get_dpcd(intel_dp))
+		return connector_status_disconnected;
+
+	/* if there's no downstream port, we're done */
+	if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
+		return connector_status_connected;
+
+	/* If we're HPD-aware, SINK_COUNT changes dynamically */
+	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
+	    intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
+		uint8_t reg;
+
+		if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
+					    &reg, 1) < 0)
+			return connector_status_unknown;
+
+		return DP_GET_SINK_COUNT(reg) ? connector_status_connected
+					      : connector_status_disconnected;
+	}
+
+	/* If no HPD, poke DDC gently */
+	if (drm_probe_ddc(&intel_dp->aux.ddc))
+		return connector_status_connected;
+
+	/* Well we tried, say unknown for unreliable port types */
+	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
+		type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
+		if (type == DP_DS_PORT_TYPE_VGA ||
+		    type == DP_DS_PORT_TYPE_NON_EDID)
+			return connector_status_unknown;
+	} else {
+		type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
+			DP_DWN_STRM_PORT_TYPE_MASK;
+		if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
+		    type == DP_DWN_STRM_PORT_TYPE_OTHER)
+			return connector_status_unknown;
+	}
+
+	/* Anything else is out of spec, warn and ignore */
+	DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
+	return connector_status_disconnected;
+}
+
+static enum drm_connector_status
+edp_detect(struct intel_dp *intel_dp)
+{
+	struct drm_device *dev = intel_dp_to_dev(intel_dp);
+	enum drm_connector_status status;
+
+	status = intel_panel_detect(dev);
+	if (status == connector_status_unknown)
+		status = connector_status_connected;
+
+	return status;
+}
+
+static bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
+				       struct intel_digital_port *port)
+{
+	u32 bit;
+
+	switch (port->port) {
+	case PORT_A:
+		return true;
+	case PORT_B:
+		bit = SDE_PORTB_HOTPLUG;
+		break;
+	case PORT_C:
+		bit = SDE_PORTC_HOTPLUG;
+		break;
+	case PORT_D:
+		bit = SDE_PORTD_HOTPLUG;
+		break;
+	default:
+		MISSING_CASE(port->port);
+		return false;
+	}
+
+	return I915_READ(SDEISR) & bit;
+}
+
+static bool cpt_digital_port_connected(struct drm_i915_private *dev_priv,
+				       struct intel_digital_port *port)
+{
+	u32 bit;
+
+	switch (port->port) {
+	case PORT_A:
+		return true;
+	case PORT_B:
+		bit = SDE_PORTB_HOTPLUG_CPT;
+		break;
+	case PORT_C:
+		bit = SDE_PORTC_HOTPLUG_CPT;
+		break;
+	case PORT_D:
+		bit = SDE_PORTD_HOTPLUG_CPT;
+		break;
+	case PORT_E:
+		bit = SDE_PORTE_HOTPLUG_SPT;
+		break;
+	default:
+		MISSING_CASE(port->port);
+		return false;
+	}
+
+	return I915_READ(SDEISR) & bit;
+}
+
+static bool g4x_digital_port_connected(struct drm_i915_private *dev_priv,
+				       struct intel_digital_port *port)
+{
+	u32 bit;
+
+	switch (port->port) {
+	case PORT_B:
+		bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
+		break;
+	case PORT_C:
+		bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
+		break;
+	case PORT_D:
+		bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
+		break;
+	default:
+		MISSING_CASE(port->port);
+		return false;
+	}
+
+	return I915_READ(PORT_HOTPLUG_STAT) & bit;
+}
+
+static bool gm45_digital_port_connected(struct drm_i915_private *dev_priv,
+					struct intel_digital_port *port)
+{
+	u32 bit;
+
+	switch (port->port) {
+	case PORT_B:
+		bit = PORTB_HOTPLUG_LIVE_STATUS_GM45;
+		break;
+	case PORT_C:
+		bit = PORTC_HOTPLUG_LIVE_STATUS_GM45;
+		break;
+	case PORT_D:
+		bit = PORTD_HOTPLUG_LIVE_STATUS_GM45;
+		break;
+	default:
+		MISSING_CASE(port->port);
+		return false;
+	}
+
+	return I915_READ(PORT_HOTPLUG_STAT) & bit;
+}
+
+static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv,
+				       struct intel_digital_port *intel_dig_port)
+{
+	struct intel_encoder *intel_encoder = &intel_dig_port->base;
+	enum port port;
+	u32 bit;
+
+	intel_hpd_pin_to_port(intel_encoder->hpd_pin, &port);
+	switch (port) {
+	case PORT_A:
+		bit = BXT_DE_PORT_HP_DDIA;
+		break;
+	case PORT_B:
+		bit = BXT_DE_PORT_HP_DDIB;
+		break;
+	case PORT_C:
+		bit = BXT_DE_PORT_HP_DDIC;
+		break;
+	default:
+		MISSING_CASE(port);
+		return false;
+	}
+
+	return I915_READ(GEN8_DE_PORT_ISR) & bit;
+}
+
+/*
+ * intel_digital_port_connected - is the specified port connected?
+ * @dev_priv: i915 private structure
+ * @port: the port to test
+ *
+ * Return %true if @port is connected, %false otherwise.
+ */
+static bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
+					 struct intel_digital_port *port)
+{
+	if (HAS_PCH_IBX(dev_priv))
+		return ibx_digital_port_connected(dev_priv, port);
+	if (HAS_PCH_SPLIT(dev_priv))
+		return cpt_digital_port_connected(dev_priv, port);
+	else if (IS_BROXTON(dev_priv))
+		return bxt_digital_port_connected(dev_priv, port);
+	else if (IS_GM45(dev_priv))
+		return gm45_digital_port_connected(dev_priv, port);
+	else
+		return g4x_digital_port_connected(dev_priv, port);
+}
+
+static enum drm_connector_status
+ironlake_dp_detect(struct intel_dp *intel_dp)
+{
+	struct drm_device *dev = intel_dp_to_dev(intel_dp);
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+
+	if (!intel_digital_port_connected(dev_priv, intel_dig_port))
+		return connector_status_disconnected;
+
+	return intel_dp_detect_dpcd(intel_dp);
+}
+
+static enum drm_connector_status
+g4x_dp_detect(struct intel_dp *intel_dp)
+{
+	struct drm_device *dev = intel_dp_to_dev(intel_dp);
+	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+
+	/* Can't disconnect eDP, but you can close the lid... */
+	if (is_edp(intel_dp)) {
+		enum drm_connector_status status;
+
+		status = intel_panel_detect(dev);
+		if (status == connector_status_unknown)
+			status = connector_status_connected;
+		return status;
+	}
+
+	if (!intel_digital_port_connected(dev->dev_private, intel_dig_port))
+		return connector_status_disconnected;
+
+	return intel_dp_detect_dpcd(intel_dp);
+}
+
+static struct edid *
+intel_dp_get_edid(struct intel_dp *intel_dp)
+{
+	struct intel_connector *intel_connector = intel_dp->attached_connector;
+
+	/* use cached edid if we have one */
+	if (intel_connector->edid) {
+		/* invalid edid */
+		if (IS_ERR(intel_connector->edid))
+			return NULL;
+
+		return drm_edid_duplicate(intel_connector->edid);
+	} else
+		return drm_get_edid(&intel_connector->base,
+				    &intel_dp->aux.ddc);
+}
+
+static void
+intel_dp_set_edid(struct intel_dp *intel_dp)
+{
+	struct intel_connector *intel_connector = intel_dp->attached_connector;
+	struct edid *edid;
+
+	edid = intel_dp_get_edid(intel_dp);
+	intel_connector->detect_edid = edid;
+
+	if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
+		intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
+	else
+		intel_dp->has_audio = drm_detect_monitor_audio(edid);
+}
+
+static void
+intel_dp_unset_edid(struct intel_dp *intel_dp)
+{
+	struct intel_connector *intel_connector = intel_dp->attached_connector;
+
+	kfree(intel_connector->detect_edid);
+	intel_connector->detect_edid = NULL;
+
+	intel_dp->has_audio = false;
+}
+
+static enum drm_connector_status
+intel_dp_detect(struct drm_connector *connector, bool force)
+{
+	struct intel_dp *intel_dp = intel_attached_dp(connector);
+	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+	struct intel_encoder *intel_encoder = &intel_dig_port->base;
+	struct drm_device *dev = connector->dev;
+	enum drm_connector_status status;
+	enum intel_display_power_domain power_domain;
+	bool ret;
+	u8 sink_irq_vector;
+
+	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+		      connector->base.id, connector->name);
+	intel_dp_unset_edid(intel_dp);
+
+	if (intel_dp->is_mst) {
+		/* MST devices are disconnected from a monitor POV */
+		if (intel_encoder->type != INTEL_OUTPUT_EDP)
+			intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
+		return connector_status_disconnected;
+	}
+
+	power_domain = intel_display_port_aux_power_domain(intel_encoder);
+	intel_display_power_get(to_i915(dev), power_domain);
+
+	/* Can't disconnect eDP, but you can close the lid... */
+	if (is_edp(intel_dp))
+		status = edp_detect(intel_dp);
+	else if (HAS_PCH_SPLIT(dev))
+		status = ironlake_dp_detect(intel_dp);
+	else
+		status = g4x_dp_detect(intel_dp);
+	if (status != connector_status_connected)
+		goto out;
+
+	intel_dp_probe_oui(intel_dp);
+
+	ret = intel_dp_probe_mst(intel_dp);
+	if (ret) {
+		/* if we are in MST mode then this connector
+		   won't appear connected or have anything with EDID on it */
+		if (intel_encoder->type != INTEL_OUTPUT_EDP)
+			intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
+		status = connector_status_disconnected;
+		goto out;
+	}
+
+	intel_dp_set_edid(intel_dp);
+
+	if (intel_encoder->type != INTEL_OUTPUT_EDP)
+		intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
+	status = connector_status_connected;
+
+	/* Try to read the source of the interrupt */
+	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
+	    intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
+		/* Clear interrupt source */
+		drm_dp_dpcd_writeb(&intel_dp->aux,
+				   DP_DEVICE_SERVICE_IRQ_VECTOR,
+				   sink_irq_vector);
+
+		if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
+			intel_dp_handle_test_request(intel_dp);
+		if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
+			DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
+	}
+
+out:
+	intel_display_power_put(to_i915(dev), power_domain);
+	return status;
+}
+
+static void
+intel_dp_force(struct drm_connector *connector)
+{
+	struct intel_dp *intel_dp = intel_attached_dp(connector);
+	struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
+	struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
+	enum intel_display_power_domain power_domain;
+
+	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+		      connector->base.id, connector->name);
+	intel_dp_unset_edid(intel_dp);
+
+	if (connector->status != connector_status_connected)
+		return;
+
+	power_domain = intel_display_port_aux_power_domain(intel_encoder);
+	intel_display_power_get(dev_priv, power_domain);
+
+	intel_dp_set_edid(intel_dp);
+
+	intel_display_power_put(dev_priv, power_domain);
+
+	if (intel_encoder->type != INTEL_OUTPUT_EDP)
+		intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
+}
+
+static int intel_dp_get_modes(struct drm_connector *connector)
+{
+	struct intel_connector *intel_connector = to_intel_connector(connector);
+	struct edid *edid;
+
+	edid = intel_connector->detect_edid;
+	if (edid) {
+		int ret = intel_connector_update_modes(connector, edid);
+		if (ret)
+			return ret;
+	}
+
+	/* if eDP has no EDID, fall back to fixed mode */
+	if (is_edp(intel_attached_dp(connector)) &&
+	    intel_connector->panel.fixed_mode) {
+		struct drm_display_mode *mode;
+
+		mode = drm_mode_duplicate(connector->dev,
+					  intel_connector->panel.fixed_mode);
+		if (mode) {
+			drm_mode_probed_add(connector, mode);
+			return 1;
+		}
+	}
+
+	return 0;
+}
+
+static bool
+intel_dp_detect_audio(struct drm_connector *connector)
+{
+	bool has_audio = false;
+	struct edid *edid;
+
+	edid = to_intel_connector(connector)->detect_edid;
+	if (edid)
+		has_audio = drm_detect_monitor_audio(edid);
+
+	return has_audio;
+}
+
+static int
+intel_dp_set_property(struct drm_connector *connector,
+		      struct drm_property *property,
+		      uint64_t val)
+{
+	struct drm_i915_private *dev_priv = connector->dev->dev_private;
+	struct intel_connector *intel_connector = to_intel_connector(connector);
+	struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
+	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
+	int ret;
+
+	ret = drm_object_property_set_value(&connector->base, property, val);
+	if (ret)
+		return ret;
+
+	if (property == dev_priv->force_audio_property) {
+		int i = val;
+		bool has_audio;
+
+		if (i == intel_dp->force_audio)
+			return 0;
+
+		intel_dp->force_audio = i;
+
+		if (i == HDMI_AUDIO_AUTO)
+			has_audio = intel_dp_detect_audio(connector);
+		else
+			has_audio = (i == HDMI_AUDIO_ON);
+
+		if (has_audio == intel_dp->has_audio)
+			return 0;
+
+		intel_dp->has_audio = has_audio;
+		goto done;
+	}
+
+	if (property == dev_priv->broadcast_rgb_property) {
+		bool old_auto = intel_dp->color_range_auto;
+		bool old_range = intel_dp->limited_color_range;
+
+		switch (val) {
+		case INTEL_BROADCAST_RGB_AUTO:
+			intel_dp->color_range_auto = true;
+			break;
+		case INTEL_BROADCAST_RGB_FULL:
+			intel_dp->color_range_auto = false;
+			intel_dp->limited_color_range = false;
+			break;
+		case INTEL_BROADCAST_RGB_LIMITED:
+			intel_dp->color_range_auto = false;
+			intel_dp->limited_color_range = true;
+			break;
+		default:
+			return -EINVAL;
+		}
+
+		if (old_auto == intel_dp->color_range_auto &&
+		    old_range == intel_dp->limited_color_range)
+			return 0;
+
+		goto done;
+	}
+
+	if (is_edp(intel_dp) &&
+	    property == connector->dev->mode_config.scaling_mode_property) {
+		if (val == DRM_MODE_SCALE_NONE) {
+			DRM_DEBUG_KMS("no scaling not supported\n");
+			return -EINVAL;
+		}
+
+		if (intel_connector->panel.fitting_mode == val) {
+			/* the eDP scaling property is not changed */
+			return 0;
+		}
+		intel_connector->panel.fitting_mode = val;
+
+		goto done;
+	}
+
+	return -EINVAL;
+
+done:
+	if (intel_encoder->base.crtc)
+		intel_crtc_restore_mode(intel_encoder->base.crtc);
+
+	return 0;
+}
+
+static void
+intel_dp_connector_destroy(struct drm_connector *connector)
+{
+	struct intel_connector *intel_connector = to_intel_connector(connector);
+
+	kfree(intel_connector->detect_edid);
+
+	if (!IS_ERR_OR_NULL(intel_connector->edid))
+		kfree(intel_connector->edid);
+
+	/* Can't call is_edp() since the encoder may have been destroyed
+	 * already. */
+	if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
+		intel_panel_fini(&intel_connector->panel);
+
+	drm_connector_cleanup(connector);
+	kfree(connector);
+}
+
+void intel_dp_encoder_destroy(struct drm_encoder *encoder)
+{
+	struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+	struct intel_dp *intel_dp = &intel_dig_port->dp;
+
+	drm_dp_aux_unregister(&intel_dp->aux);
+	intel_dp_mst_encoder_cleanup(intel_dig_port);
+	if (is_edp(intel_dp)) {
+		cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
+		/*
+		 * vdd might still be enabled do to the delayed vdd off.
+		 * Make sure vdd is actually turned off here.
+		 */
+		pps_lock(intel_dp);
+		edp_panel_vdd_off_sync(intel_dp);
+		pps_unlock(intel_dp);
+
+		if (intel_dp->edp_notifier.notifier_call) {
+			unregister_reboot_notifier(&intel_dp->edp_notifier);
+			intel_dp->edp_notifier.notifier_call = NULL;
+		}
+	}
+	drm_encoder_cleanup(encoder);
+	kfree(intel_dig_port);
+}
+
+void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
+{
+	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
+
+	if (!is_edp(intel_dp))
+		return;
+
+	/*
+	 * vdd might still be enabled do to the delayed vdd off.
+	 * Make sure vdd is actually turned off here.
+	 */
+	cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
+	pps_lock(intel_dp);
+	edp_panel_vdd_off_sync(intel_dp);
+	pps_unlock(intel_dp);
+}
+
+static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
+{
+	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+	struct drm_device *dev = intel_dig_port->base.base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum intel_display_power_domain power_domain;
+
+	lockdep_assert_held(&dev_priv->pps_mutex);
+
+	if (!edp_have_panel_vdd(intel_dp))
+		return;
+
+	/*
+	 * The VDD bit needs a power domain reference, so if the bit is
+	 * already enabled when we boot or resume, grab this reference and
+	 * schedule a vdd off, so we don't hold on to the reference
+	 * indefinitely.
+	 */
+	DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
+	power_domain = intel_display_port_aux_power_domain(&intel_dig_port->base);
+	intel_display_power_get(dev_priv, power_domain);
+
+	edp_panel_vdd_schedule_off(intel_dp);
+}
+
+void intel_dp_encoder_reset(struct drm_encoder *encoder)
+{
+	struct drm_i915_private *dev_priv = to_i915(encoder->dev);
+	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+	if (!HAS_DDI(dev_priv))
+		intel_dp->DP = I915_READ(intel_dp->output_reg);
+
+	if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
+		return;
+
+	pps_lock(intel_dp);
+
+	/*
+	 * Read out the current power sequencer assignment,
+	 * in case the BIOS did something with it.
+	 */
+	if (IS_VALLEYVIEW(encoder->dev))
+		vlv_initial_power_sequencer_setup(intel_dp);
+
+	intel_edp_panel_vdd_sanitize(intel_dp);
+
+	pps_unlock(intel_dp);
+}
+
+static const struct drm_connector_funcs intel_dp_connector_funcs = {
+	.dpms = drm_atomic_helper_connector_dpms,
+	.detect = intel_dp_detect,
+	.force = intel_dp_force,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.set_property = intel_dp_set_property,
+	.atomic_get_property = intel_connector_atomic_get_property,
+	.destroy = intel_dp_connector_destroy,
+	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+	.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+};
+
+static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
+	.get_modes = intel_dp_get_modes,
+	.mode_valid = intel_dp_mode_valid,
+	.best_encoder = intel_best_encoder,
+};
+
+static const struct drm_encoder_funcs intel_dp_enc_funcs = {
+	.reset = intel_dp_encoder_reset,
+	.destroy = intel_dp_encoder_destroy,
+};
+
+enum irqreturn
+intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
+{
+	struct intel_dp *intel_dp = &intel_dig_port->dp;
+	struct intel_encoder *intel_encoder = &intel_dig_port->base;
+	struct drm_device *dev = intel_dig_port->base.base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum intel_display_power_domain power_domain;
+	enum irqreturn ret = IRQ_NONE;
+
+	if (intel_dig_port->base.type != INTEL_OUTPUT_EDP &&
+	    intel_dig_port->base.type != INTEL_OUTPUT_HDMI)
+		intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
+
+	if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
+		/*
+		 * vdd off can generate a long pulse on eDP which
+		 * would require vdd on to handle it, and thus we
+		 * would end up in an endless cycle of
+		 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
+		 */
+		DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
+			      port_name(intel_dig_port->port));
+		return IRQ_HANDLED;
+	}
+
+	DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
+		      port_name(intel_dig_port->port),
+		      long_hpd ? "long" : "short");
+
+	power_domain = intel_display_port_aux_power_domain(intel_encoder);
+	intel_display_power_get(dev_priv, power_domain);
+
+	if (long_hpd) {
+		if (!intel_digital_port_connected(dev_priv, intel_dig_port))
+			goto mst_fail;
+
+		if (!intel_dp_get_dpcd(intel_dp)) {
+			goto mst_fail;
+		}
+
+		intel_dp_probe_oui(intel_dp);
+
+		if (!intel_dp_probe_mst(intel_dp)) {
+			drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+			intel_dp_check_link_status(intel_dp);
+			drm_modeset_unlock(&dev->mode_config.connection_mutex);
+			goto mst_fail;
+		}
+	} else {
+		if (intel_dp->is_mst) {
+			if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
+				goto mst_fail;
+		}
+
+		if (!intel_dp->is_mst) {
+			drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+			intel_dp_check_link_status(intel_dp);
+			drm_modeset_unlock(&dev->mode_config.connection_mutex);
+		}
+	}
+
+	ret = IRQ_HANDLED;
+
+	goto put_power;
+mst_fail:
+	/* if we were in MST mode, and device is not there get out of MST mode */
+	if (intel_dp->is_mst) {
+		DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
+		intel_dp->is_mst = false;
+		drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
+	}
+put_power:
+	intel_display_power_put(dev_priv, power_domain);
+
+	return ret;
+}
+
+/* Return which DP Port should be selected for Transcoder DP control */
+int
+intel_trans_dp_port_sel(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct intel_encoder *intel_encoder;
+	struct intel_dp *intel_dp;
+
+	for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
+		intel_dp = enc_to_intel_dp(&intel_encoder->base);
+
+		if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
+		    intel_encoder->type == INTEL_OUTPUT_EDP)
+			return intel_dp->output_reg;
+	}
+
+	return -1;
+}
+
+/* check the VBT to see whether the eDP is on another port */
+bool intel_dp_is_edp(struct drm_device *dev, enum port port)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	union child_device_config *p_child;
+	int i;
+	static const short port_mapping[] = {
+		[PORT_B] = DVO_PORT_DPB,
+		[PORT_C] = DVO_PORT_DPC,
+		[PORT_D] = DVO_PORT_DPD,
+		[PORT_E] = DVO_PORT_DPE,
+	};
+
+	/*
+	 * eDP not supported on g4x. so bail out early just
+	 * for a bit extra safety in case the VBT is bonkers.
+	 */
+	if (INTEL_INFO(dev)->gen < 5)
+		return false;
+
+	if (port == PORT_A)
+		return true;
+
+	if (!dev_priv->vbt.child_dev_num)
+		return false;
+
+	for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
+		p_child = dev_priv->vbt.child_dev + i;
+
+		if (p_child->common.dvo_port == port_mapping[port] &&
+		    (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
+		    (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
+			return true;
+	}
+	return false;
+}
+
+void
+intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
+{
+	struct intel_connector *intel_connector = to_intel_connector(connector);
+
+	intel_attach_force_audio_property(connector);
+	intel_attach_broadcast_rgb_property(connector);
+	intel_dp->color_range_auto = true;
+
+	if (is_edp(intel_dp)) {
+		drm_mode_create_scaling_mode_property(connector->dev);
+		drm_object_attach_property(
+			&connector->base,
+			connector->dev->mode_config.scaling_mode_property,
+			DRM_MODE_SCALE_ASPECT);
+		intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
+	}
+}
+
+static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
+{
+	intel_dp->last_power_cycle = jiffies;
+	intel_dp->last_power_on = jiffies;
+	intel_dp->last_backlight_off = jiffies;
+}
+
+static void
+intel_dp_init_panel_power_sequencer(struct drm_device *dev,
+				    struct intel_dp *intel_dp)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct edp_power_seq cur, vbt, spec,
+		*final = &intel_dp->pps_delays;
+	u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
+	int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg = 0;
+
+	lockdep_assert_held(&dev_priv->pps_mutex);
+
+	/* already initialized? */
+	if (final->t11_t12 != 0)
+		return;
+
+	if (IS_BROXTON(dev)) {
+		/*
+		 * TODO: BXT has 2 sets of PPS registers.
+		 * Correct Register for Broxton need to be identified
+		 * using VBT. hardcoding for now
+		 */
+		pp_ctrl_reg = BXT_PP_CONTROL(0);
+		pp_on_reg = BXT_PP_ON_DELAYS(0);
+		pp_off_reg = BXT_PP_OFF_DELAYS(0);
+	} else if (HAS_PCH_SPLIT(dev)) {
+		pp_ctrl_reg = PCH_PP_CONTROL;
+		pp_on_reg = PCH_PP_ON_DELAYS;
+		pp_off_reg = PCH_PP_OFF_DELAYS;
+		pp_div_reg = PCH_PP_DIVISOR;
+	} else {
+		enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
+
+		pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
+		pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
+		pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
+		pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
+	}
+
+	/* Workaround: Need to write PP_CONTROL with the unlock key as
+	 * the very first thing. */
+	pp_ctl = ironlake_get_pp_control(intel_dp);
+
+	pp_on = I915_READ(pp_on_reg);
+	pp_off = I915_READ(pp_off_reg);
+	if (!IS_BROXTON(dev)) {
+		I915_WRITE(pp_ctrl_reg, pp_ctl);
+		pp_div = I915_READ(pp_div_reg);
+	}
+
+	/* Pull timing values out of registers */
+	cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
+		PANEL_POWER_UP_DELAY_SHIFT;
+
+	cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
+		PANEL_LIGHT_ON_DELAY_SHIFT;
+
+	cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
+		PANEL_LIGHT_OFF_DELAY_SHIFT;
+
+	cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
+		PANEL_POWER_DOWN_DELAY_SHIFT;
+
+	if (IS_BROXTON(dev)) {
+		u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
+			BXT_POWER_CYCLE_DELAY_SHIFT;
+		if (tmp > 0)
+			cur.t11_t12 = (tmp - 1) * 1000;
+		else
+			cur.t11_t12 = 0;
+	} else {
+		cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
+		       PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
+	}
+
+	DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
+		      cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
+
+	vbt = dev_priv->vbt.edp_pps;
+
+	/* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
+	 * our hw here, which are all in 100usec. */
+	spec.t1_t3 = 210 * 10;
+	spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
+	spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
+	spec.t10 = 500 * 10;
+	/* This one is special and actually in units of 100ms, but zero
+	 * based in the hw (so we need to add 100 ms). But the sw vbt
+	 * table multiplies it with 1000 to make it in units of 100usec,
+	 * too. */
+	spec.t11_t12 = (510 + 100) * 10;
+
+	DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
+		      vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
+
+	/* Use the max of the register settings and vbt. If both are
+	 * unset, fall back to the spec limits. */
+#define assign_final(field)	final->field = (max(cur.field, vbt.field) == 0 ? \
+				       spec.field : \
+				       max(cur.field, vbt.field))
+	assign_final(t1_t3);
+	assign_final(t8);
+	assign_final(t9);
+	assign_final(t10);
+	assign_final(t11_t12);
+#undef assign_final
+
+#define get_delay(field)	(DIV_ROUND_UP(final->field, 10))
+	intel_dp->panel_power_up_delay = get_delay(t1_t3);
+	intel_dp->backlight_on_delay = get_delay(t8);
+	intel_dp->backlight_off_delay = get_delay(t9);
+	intel_dp->panel_power_down_delay = get_delay(t10);
+	intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
+#undef get_delay
+
+	DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
+		      intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
+		      intel_dp->panel_power_cycle_delay);
+
+	DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
+		      intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
+}
+
+static void
+intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
+					      struct intel_dp *intel_dp)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 pp_on, pp_off, pp_div, port_sel = 0;
+	int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
+	int pp_on_reg, pp_off_reg, pp_div_reg = 0, pp_ctrl_reg;
+	enum port port = dp_to_dig_port(intel_dp)->port;
+	const struct edp_power_seq *seq = &intel_dp->pps_delays;
+
+	lockdep_assert_held(&dev_priv->pps_mutex);
+
+	if (IS_BROXTON(dev)) {
+		/*
+		 * TODO: BXT has 2 sets of PPS registers.
+		 * Correct Register for Broxton need to be identified
+		 * using VBT. hardcoding for now
+		 */
+		pp_ctrl_reg = BXT_PP_CONTROL(0);
+		pp_on_reg = BXT_PP_ON_DELAYS(0);
+		pp_off_reg = BXT_PP_OFF_DELAYS(0);
+
+	} else if (HAS_PCH_SPLIT(dev)) {
+		pp_on_reg = PCH_PP_ON_DELAYS;
+		pp_off_reg = PCH_PP_OFF_DELAYS;
+		pp_div_reg = PCH_PP_DIVISOR;
+	} else {
+		enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
+
+		pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
+		pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
+		pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
+	}
+
+	/*
+	 * And finally store the new values in the power sequencer. The
+	 * backlight delays are set to 1 because we do manual waits on them. For
+	 * T8, even BSpec recommends doing it. For T9, if we don't do this,
+	 * we'll end up waiting for the backlight off delay twice: once when we
+	 * do the manual sleep, and once when we disable the panel and wait for
+	 * the PP_STATUS bit to become zero.
+	 */
+	pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
+		(1 << PANEL_LIGHT_ON_DELAY_SHIFT);
+	pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
+		 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
+	/* Compute the divisor for the pp clock, simply match the Bspec
+	 * formula. */
+	if (IS_BROXTON(dev)) {
+		pp_div = I915_READ(pp_ctrl_reg);
+		pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
+		pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
+				<< BXT_POWER_CYCLE_DELAY_SHIFT);
+	} else {
+		pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
+		pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
+				<< PANEL_POWER_CYCLE_DELAY_SHIFT);
+	}
+
+	/* Haswell doesn't have any port selection bits for the panel
+	 * power sequencer any more. */
+	if (IS_VALLEYVIEW(dev)) {
+		port_sel = PANEL_PORT_SELECT_VLV(port);
+	} else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
+		if (port == PORT_A)
+			port_sel = PANEL_PORT_SELECT_DPA;
+		else
+			port_sel = PANEL_PORT_SELECT_DPD;
+	}
+
+	pp_on |= port_sel;
+
+	I915_WRITE(pp_on_reg, pp_on);
+	I915_WRITE(pp_off_reg, pp_off);
+	if (IS_BROXTON(dev))
+		I915_WRITE(pp_ctrl_reg, pp_div);
+	else
+		I915_WRITE(pp_div_reg, pp_div);
+
+	DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
+		      I915_READ(pp_on_reg),
+		      I915_READ(pp_off_reg),
+		      IS_BROXTON(dev) ?
+		      (I915_READ(pp_ctrl_reg) & BXT_POWER_CYCLE_DELAY_MASK) :
+		      I915_READ(pp_div_reg));
+}
+
+/**
+ * intel_dp_set_drrs_state - program registers for RR switch to take effect
+ * @dev: DRM device
+ * @refresh_rate: RR to be programmed
+ *
+ * This function gets called when refresh rate (RR) has to be changed from
+ * one frequency to another. Switches can be between high and low RR
+ * supported by the panel or to any other RR based on media playback (in
+ * this case, RR value needs to be passed from user space).
+ *
+ * The caller of this function needs to take a lock on dev_priv->drrs.
+ */
+static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_encoder *encoder;
+	struct intel_digital_port *dig_port = NULL;
+	struct intel_dp *intel_dp = dev_priv->drrs.dp;
+	struct intel_crtc_state *config = NULL;
+	struct intel_crtc *intel_crtc = NULL;
+	enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
+
+	if (refresh_rate <= 0) {
+		DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
+		return;
+	}
+
+	if (intel_dp == NULL) {
+		DRM_DEBUG_KMS("DRRS not supported.\n");
+		return;
+	}
+
+	/*
+	 * FIXME: This needs proper synchronization with psr state for some
+	 * platforms that cannot have PSR and DRRS enabled at the same time.
+	 */
+
+	dig_port = dp_to_dig_port(intel_dp);
+	encoder = &dig_port->base;
+	intel_crtc = to_intel_crtc(encoder->base.crtc);
+
+	if (!intel_crtc) {
+		DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
+		return;
+	}
+
+	config = intel_crtc->config;
+
+	if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
+		DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
+		return;
+	}
+
+	if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
+			refresh_rate)
+		index = DRRS_LOW_RR;
+
+	if (index == dev_priv->drrs.refresh_rate_type) {
+		DRM_DEBUG_KMS(
+			"DRRS requested for previously set RR...ignoring\n");
+		return;
+	}
+
+	if (!intel_crtc->active) {
+		DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
+		return;
+	}
+
+	if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
+		switch (index) {
+		case DRRS_HIGH_RR:
+			intel_dp_set_m_n(intel_crtc, M1_N1);
+			break;
+		case DRRS_LOW_RR:
+			intel_dp_set_m_n(intel_crtc, M2_N2);
+			break;
+		case DRRS_MAX_RR:
+		default:
+			DRM_ERROR("Unsupported refreshrate type\n");
+		}
+	} else if (INTEL_INFO(dev)->gen > 6) {
+		u32 reg = PIPECONF(intel_crtc->config->cpu_transcoder);
+		u32 val;
+
+		val = I915_READ(reg);
+		if (index > DRRS_HIGH_RR) {
+			if (IS_VALLEYVIEW(dev))
+				val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
+			else
+				val |= PIPECONF_EDP_RR_MODE_SWITCH;
+		} else {
+			if (IS_VALLEYVIEW(dev))
+				val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
+			else
+				val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
+		}
+		I915_WRITE(reg, val);
+	}
+
+	dev_priv->drrs.refresh_rate_type = index;
+
+	DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
+}
+
+/**
+ * intel_edp_drrs_enable - init drrs struct if supported
+ * @intel_dp: DP struct
+ *
+ * Initializes frontbuffer_bits and drrs.dp
+ */
+void intel_edp_drrs_enable(struct intel_dp *intel_dp)
+{
+	struct drm_device *dev = intel_dp_to_dev(intel_dp);
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+	struct drm_crtc *crtc = dig_port->base.base.crtc;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+	if (!intel_crtc->config->has_drrs) {
+		DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
+		return;
+	}
+
+	mutex_lock(&dev_priv->drrs.mutex);
+	if (WARN_ON(dev_priv->drrs.dp)) {
+		DRM_ERROR("DRRS already enabled\n");
+		goto unlock;
+	}
+
+	dev_priv->drrs.busy_frontbuffer_bits = 0;
+
+	dev_priv->drrs.dp = intel_dp;
+
+unlock:
+	mutex_unlock(&dev_priv->drrs.mutex);
+}
+
+/**
+ * intel_edp_drrs_disable - Disable DRRS
+ * @intel_dp: DP struct
+ *
+ */
+void intel_edp_drrs_disable(struct intel_dp *intel_dp)
+{
+	struct drm_device *dev = intel_dp_to_dev(intel_dp);
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+	struct drm_crtc *crtc = dig_port->base.base.crtc;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+	if (!intel_crtc->config->has_drrs)
+		return;
+
+	mutex_lock(&dev_priv->drrs.mutex);
+	if (!dev_priv->drrs.dp) {
+		mutex_unlock(&dev_priv->drrs.mutex);
+		return;
+	}
+
+	if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
+		intel_dp_set_drrs_state(dev_priv->dev,
+			intel_dp->attached_connector->panel.
+			fixed_mode->vrefresh);
+
+	dev_priv->drrs.dp = NULL;
+	mutex_unlock(&dev_priv->drrs.mutex);
+
+	cancel_delayed_work_sync(&dev_priv->drrs.work);
+}
+
+static void intel_edp_drrs_downclock_work(struct work_struct *work)
+{
+	struct drm_i915_private *dev_priv =
+		container_of(work, typeof(*dev_priv), drrs.work.work);
+	struct intel_dp *intel_dp;
+
+	mutex_lock(&dev_priv->drrs.mutex);
+
+	intel_dp = dev_priv->drrs.dp;
+
+	if (!intel_dp)
+		goto unlock;
+
+	/*
+	 * The delayed work can race with an invalidate hence we need to
+	 * recheck.
+	 */
+
+	if (dev_priv->drrs.busy_frontbuffer_bits)
+		goto unlock;
+
+	if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
+		intel_dp_set_drrs_state(dev_priv->dev,
+			intel_dp->attached_connector->panel.
+			downclock_mode->vrefresh);
+
+unlock:
+	mutex_unlock(&dev_priv->drrs.mutex);
+}
+
+/**
+ * intel_edp_drrs_invalidate - Disable Idleness DRRS
+ * @dev: DRM device
+ * @frontbuffer_bits: frontbuffer plane tracking bits
+ *
+ * This function gets called everytime rendering on the given planes start.
+ * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
+ *
+ * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
+ */
+void intel_edp_drrs_invalidate(struct drm_device *dev,
+		unsigned frontbuffer_bits)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_crtc *crtc;
+	enum pipe pipe;
+
+	if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
+		return;
+
+	cancel_delayed_work(&dev_priv->drrs.work);
+
+	mutex_lock(&dev_priv->drrs.mutex);
+	if (!dev_priv->drrs.dp) {
+		mutex_unlock(&dev_priv->drrs.mutex);
+		return;
+	}
+
+	crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
+	pipe = to_intel_crtc(crtc)->pipe;
+
+	frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
+	dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
+
+	/* invalidate means busy screen hence upclock */
+	if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
+		intel_dp_set_drrs_state(dev_priv->dev,
+				dev_priv->drrs.dp->attached_connector->panel.
+				fixed_mode->vrefresh);
+
+	mutex_unlock(&dev_priv->drrs.mutex);
+}
+
+/**
+ * intel_edp_drrs_flush - Restart Idleness DRRS
+ * @dev: DRM device
+ * @frontbuffer_bits: frontbuffer plane tracking bits
+ *
+ * This function gets called every time rendering on the given planes has
+ * completed or flip on a crtc is completed. So DRRS should be upclocked
+ * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
+ * if no other planes are dirty.
+ *
+ * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
+ */
+void intel_edp_drrs_flush(struct drm_device *dev,
+		unsigned frontbuffer_bits)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_crtc *crtc;
+	enum pipe pipe;
+
+	if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
+		return;
+
+	cancel_delayed_work(&dev_priv->drrs.work);
+
+	mutex_lock(&dev_priv->drrs.mutex);
+	if (!dev_priv->drrs.dp) {
+		mutex_unlock(&dev_priv->drrs.mutex);
+		return;
+	}
+
+	crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
+	pipe = to_intel_crtc(crtc)->pipe;
+
+	frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
+	dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
+
+	/* flush means busy screen hence upclock */
+	if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
+		intel_dp_set_drrs_state(dev_priv->dev,
+				dev_priv->drrs.dp->attached_connector->panel.
+				fixed_mode->vrefresh);
+
+	/*
+	 * flush also means no more activity hence schedule downclock, if all
+	 * other fbs are quiescent too
+	 */
+	if (!dev_priv->drrs.busy_frontbuffer_bits)
+		schedule_delayed_work(&dev_priv->drrs.work,
+				msecs_to_jiffies(1000));
+	mutex_unlock(&dev_priv->drrs.mutex);
+}
+
+/**
+ * DOC: Display Refresh Rate Switching (DRRS)
+ *
+ * Display Refresh Rate Switching (DRRS) is a power conservation feature
+ * which enables swtching between low and high refresh rates,
+ * dynamically, based on the usage scenario. This feature is applicable
+ * for internal panels.
+ *
+ * Indication that the panel supports DRRS is given by the panel EDID, which
+ * would list multiple refresh rates for one resolution.
+ *
+ * DRRS is of 2 types - static and seamless.
+ * Static DRRS involves changing refresh rate (RR) by doing a full modeset
+ * (may appear as a blink on screen) and is used in dock-undock scenario.
+ * Seamless DRRS involves changing RR without any visual effect to the user
+ * and can be used during normal system usage. This is done by programming
+ * certain registers.
+ *
+ * Support for static/seamless DRRS may be indicated in the VBT based on
+ * inputs from the panel spec.
+ *
+ * DRRS saves power by switching to low RR based on usage scenarios.
+ *
+ * eDP DRRS:-
+ *        The implementation is based on frontbuffer tracking implementation.
+ * When there is a disturbance on the screen triggered by user activity or a
+ * periodic system activity, DRRS is disabled (RR is changed to high RR).
+ * When there is no movement on screen, after a timeout of 1 second, a switch
+ * to low RR is made.
+ *        For integration with frontbuffer tracking code,
+ * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
+ *
+ * DRRS can be further extended to support other internal panels and also
+ * the scenario of video playback wherein RR is set based on the rate
+ * requested by userspace.
+ */
+
+/**
+ * intel_dp_drrs_init - Init basic DRRS work and mutex.
+ * @intel_connector: eDP connector
+ * @fixed_mode: preferred mode of panel
+ *
+ * This function is  called only once at driver load to initialize basic
+ * DRRS stuff.
+ *
+ * Returns:
+ * Downclock mode if panel supports it, else return NULL.
+ * DRRS support is determined by the presence of downclock mode (apart
+ * from VBT setting).
+ */
+static struct drm_display_mode *
+intel_dp_drrs_init(struct intel_connector *intel_connector,
+		struct drm_display_mode *fixed_mode)
+{
+	struct drm_connector *connector = &intel_connector->base;
+	struct drm_device *dev = connector->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_display_mode *downclock_mode = NULL;
+
+	INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
+	mutex_init(&dev_priv->drrs.mutex);
+
+	if (INTEL_INFO(dev)->gen <= 6) {
+		DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
+		return NULL;
+	}
+
+	if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
+		DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
+		return NULL;
+	}
+
+	downclock_mode = intel_find_panel_downclock
+					(dev, fixed_mode, connector);
+
+	if (!downclock_mode) {
+		DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
+		return NULL;
+	}
+
+	dev_priv->drrs.type = dev_priv->vbt.drrs_type;
+
+	dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
+	DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
+	return downclock_mode;
+}
+
+static bool intel_edp_init_connector(struct intel_dp *intel_dp,
+				     struct intel_connector *intel_connector)
+{
+	struct drm_connector *connector = &intel_connector->base;
+	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+	struct intel_encoder *intel_encoder = &intel_dig_port->base;
+	struct drm_device *dev = intel_encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_display_mode *fixed_mode = NULL;
+	struct drm_display_mode *downclock_mode = NULL;
+	bool has_dpcd;
+	struct drm_display_mode *scan;
+	struct edid *edid;
+	enum pipe pipe = INVALID_PIPE;
+
+	if (!is_edp(intel_dp))
+		return true;
+
+	pps_lock(intel_dp);
+	intel_edp_panel_vdd_sanitize(intel_dp);
+	pps_unlock(intel_dp);
+
+	/* Cache DPCD and EDID for edp. */
+	has_dpcd = intel_dp_get_dpcd(intel_dp);
+
+	if (has_dpcd) {
+		if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
+			dev_priv->no_aux_handshake =
+				intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
+				DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
+	} else {
+		/* if this fails, presume the device is a ghost */
+		DRM_INFO("failed to retrieve link info, disabling eDP\n");
+		return false;
+	}
+
+	/* We now know it's not a ghost, init power sequence regs. */
+	pps_lock(intel_dp);
+	intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
+	pps_unlock(intel_dp);
+
+	mutex_lock(&dev->mode_config.mutex);
+	edid = drm_get_edid(connector, &intel_dp->aux.ddc);
+	if (edid) {
+		if (drm_add_edid_modes(connector, edid)) {
+			drm_mode_connector_update_edid_property(connector,
+								edid);
+			drm_edid_to_eld(connector, edid);
+		} else {
+			kfree(edid);
+			edid = ERR_PTR(-EINVAL);
+		}
+	} else {
+		edid = ERR_PTR(-ENOENT);
+	}
+	intel_connector->edid = edid;
+
+	/* prefer fixed mode from EDID if available */
+	list_for_each_entry(scan, &connector->probed_modes, head) {
+		if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
+			fixed_mode = drm_mode_duplicate(dev, scan);
+			downclock_mode = intel_dp_drrs_init(
+						intel_connector, fixed_mode);
+			break;
+		}
+	}
+
+	/* fallback to VBT if available for eDP */
+	if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
+		fixed_mode = drm_mode_duplicate(dev,
+					dev_priv->vbt.lfp_lvds_vbt_mode);
+		if (fixed_mode)
+			fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
+	}
+	mutex_unlock(&dev->mode_config.mutex);
+
+	if (IS_VALLEYVIEW(dev)) {
+		intel_dp->edp_notifier.notifier_call = edp_notify_handler;
+		register_reboot_notifier(&intel_dp->edp_notifier);
+
+		/*
+		 * Figure out the current pipe for the initial backlight setup.
+		 * If the current pipe isn't valid, try the PPS pipe, and if that
+		 * fails just assume pipe A.
+		 */
+		if (IS_CHERRYVIEW(dev))
+			pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
+		else
+			pipe = PORT_TO_PIPE(intel_dp->DP);
+
+		if (pipe != PIPE_A && pipe != PIPE_B)
+			pipe = intel_dp->pps_pipe;
+
+		if (pipe != PIPE_A && pipe != PIPE_B)
+			pipe = PIPE_A;
+
+		DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
+			      pipe_name(pipe));
+	}
+
+	intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
+	intel_connector->panel.backlight.power = intel_edp_backlight_power;
+	intel_panel_setup_backlight(connector, pipe);
+
+	return true;
+}
+
+bool
+intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
+			struct intel_connector *intel_connector)
+{
+	struct drm_connector *connector = &intel_connector->base;
+	struct intel_dp *intel_dp = &intel_dig_port->dp;
+	struct intel_encoder *intel_encoder = &intel_dig_port->base;
+	struct drm_device *dev = intel_encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum port port = intel_dig_port->port;
+	int type;
+
+	intel_dp->pps_pipe = INVALID_PIPE;
+
+	/* intel_dp vfuncs */
+	if (INTEL_INFO(dev)->gen >= 9)
+		intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
+	else if (IS_VALLEYVIEW(dev))
+		intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
+	else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+		intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
+	else if (HAS_PCH_SPLIT(dev))
+		intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
+	else
+		intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
+
+	if (INTEL_INFO(dev)->gen >= 9)
+		intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
+	else
+		intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
+
+	/* Preserve the current hw state. */
+	intel_dp->DP = I915_READ(intel_dp->output_reg);
+	intel_dp->attached_connector = intel_connector;
+
+	if (intel_dp_is_edp(dev, port))
+		type = DRM_MODE_CONNECTOR_eDP;
+	else
+		type = DRM_MODE_CONNECTOR_DisplayPort;
+
+	/*
+	 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
+	 * for DP the encoder type can be set by the caller to
+	 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
+	 */
+	if (type == DRM_MODE_CONNECTOR_eDP)
+		intel_encoder->type = INTEL_OUTPUT_EDP;
+
+	/* eDP only on port B and/or C on vlv/chv */
+	if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
+		    port != PORT_B && port != PORT_C))
+		return false;
+
+	DRM_DEBUG_KMS("Adding %s connector on port %c\n",
+			type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
+			port_name(port));
+
+	drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
+	drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
+
+	connector->interlace_allowed = true;
+	connector->doublescan_allowed = 0;
+
+	INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
+			  edp_panel_vdd_work);
+
+	intel_connector_attach_encoder(intel_connector, intel_encoder);
+	drm_connector_register(connector);
+
+	if (HAS_DDI(dev))
+		intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
+	else
+		intel_connector->get_hw_state = intel_connector_get_hw_state;
+	intel_connector->unregister = intel_dp_connector_unregister;
+
+	/* Set up the hotplug pin. */
+	switch (port) {
+	case PORT_A:
+		intel_encoder->hpd_pin = HPD_PORT_A;
+		break;
+	case PORT_B:
+		intel_encoder->hpd_pin = HPD_PORT_B;
+		if (IS_BROXTON(dev_priv) && (INTEL_REVID(dev) < BXT_REVID_B0))
+			intel_encoder->hpd_pin = HPD_PORT_A;
+		break;
+	case PORT_C:
+		intel_encoder->hpd_pin = HPD_PORT_C;
+		break;
+	case PORT_D:
+		intel_encoder->hpd_pin = HPD_PORT_D;
+		break;
+	case PORT_E:
+		intel_encoder->hpd_pin = HPD_PORT_E;
+		break;
+	default:
+		BUG();
+	}
+
+	if (is_edp(intel_dp)) {
+		pps_lock(intel_dp);
+		intel_dp_init_panel_power_timestamps(intel_dp);
+		if (IS_VALLEYVIEW(dev))
+			vlv_initial_power_sequencer_setup(intel_dp);
+		else
+			intel_dp_init_panel_power_sequencer(dev, intel_dp);
+		pps_unlock(intel_dp);
+	}
+
+	intel_dp_aux_init(intel_dp, intel_connector);
+
+	/* init MST on ports that can support it */
+	if (HAS_DP_MST(dev) &&
+	    (port == PORT_B || port == PORT_C || port == PORT_D))
+		intel_dp_mst_encoder_init(intel_dig_port,
+					  intel_connector->base.base.id);
+
+	if (!intel_edp_init_connector(intel_dp, intel_connector)) {
+		drm_dp_aux_unregister(&intel_dp->aux);
+		if (is_edp(intel_dp)) {
+			cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
+			/*
+			 * vdd might still be enabled do to the delayed vdd off.
+			 * Make sure vdd is actually turned off here.
+			 */
+			pps_lock(intel_dp);
+			edp_panel_vdd_off_sync(intel_dp);
+			pps_unlock(intel_dp);
+		}
+		drm_connector_unregister(connector);
+		drm_connector_cleanup(connector);
+		return false;
+	}
+
+	intel_dp_add_properties(intel_dp, connector);
+
+	/* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
+	 * 0xd.  Failure to do so will result in spurious interrupts being
+	 * generated on the port when a cable is not attached.
+	 */
+	if (IS_G4X(dev) && !IS_GM45(dev)) {
+		u32 temp = I915_READ(PEG_BAND_GAP_DATA);
+		I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
+	}
+
+	i915_debugfs_connector_add(connector);
+
+	return true;
+}
+
+bool intel_dp_init(struct drm_device *dev,
+		   int output_reg,
+		   enum port port)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_digital_port *intel_dig_port;
+	struct intel_encoder *intel_encoder;
+	struct drm_encoder *encoder;
+	struct intel_connector *intel_connector;
+
+	intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
+	if (!intel_dig_port)
+		return false;
+
+	intel_connector = intel_connector_alloc();
+	if (!intel_connector)
+		goto err_connector_alloc;
+
+	intel_encoder = &intel_dig_port->base;
+	encoder = &intel_encoder->base;
+
+	drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
+			 DRM_MODE_ENCODER_TMDS);
+
+	intel_encoder->compute_config = intel_dp_compute_config;
+	intel_encoder->disable = intel_disable_dp;
+	intel_encoder->get_hw_state = intel_dp_get_hw_state;
+	intel_encoder->get_config = intel_dp_get_config;
+	intel_encoder->suspend = intel_dp_encoder_suspend;
+	if (IS_CHERRYVIEW(dev)) {
+		intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
+		intel_encoder->pre_enable = chv_pre_enable_dp;
+		intel_encoder->enable = vlv_enable_dp;
+		intel_encoder->post_disable = chv_post_disable_dp;
+		intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
+	} else if (IS_VALLEYVIEW(dev)) {
+		intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
+		intel_encoder->pre_enable = vlv_pre_enable_dp;
+		intel_encoder->enable = vlv_enable_dp;
+		intel_encoder->post_disable = vlv_post_disable_dp;
+	} else {
+		intel_encoder->pre_enable = g4x_pre_enable_dp;
+		intel_encoder->enable = g4x_enable_dp;
+		if (INTEL_INFO(dev)->gen >= 5)
+			intel_encoder->post_disable = ilk_post_disable_dp;
+	}
+
+	intel_dig_port->port = port;
+	intel_dig_port->dp.output_reg = output_reg;
+
+	intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
+	if (IS_CHERRYVIEW(dev)) {
+		if (port == PORT_D)
+			intel_encoder->crtc_mask = 1 << 2;
+		else
+			intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
+	} else {
+		intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+	}
+	intel_encoder->cloneable = 0;
+
+	intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
+	dev_priv->hotplug.irq_port[port] = intel_dig_port;
+
+	if (!intel_dp_init_connector(intel_dig_port, intel_connector))
+		goto err_init_connector;
+
+	return true;
+
+err_init_connector:
+	drm_encoder_cleanup(encoder);
+	kfree(intel_connector);
+err_connector_alloc:
+	kfree(intel_dig_port);
+	return false;
+}
+
+void intel_dp_mst_suspend(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int i;
+
+	/* disable MST */
+	for (i = 0; i < I915_MAX_PORTS; i++) {
+		struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
+		if (!intel_dig_port)
+			continue;
+
+		if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
+			if (!intel_dig_port->dp.can_mst)
+				continue;
+			if (intel_dig_port->dp.is_mst)
+				drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
+		}
+	}
+}
+
+void intel_dp_mst_resume(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int i;
+
+	for (i = 0; i < I915_MAX_PORTS; i++) {
+		struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
+		if (!intel_dig_port)
+			continue;
+		if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
+			int ret;
+
+			if (!intel_dig_port->dp.can_mst)
+				continue;
+
+			ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
+			if (ret != 0) {
+				intel_dp_check_mst_status(&intel_dig_port->dp);
+			}
+		}
+	}
+}
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
new file mode 100644
index 0000000..06bd925
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -0,0 +1,596 @@
+/*
+ * Copyright © 2008 Intel Corporation
+ *             2014 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include <drm/drmP.h>
+#include "i915_drv.h"
+#include "intel_drv.h"
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_edid.h>
+
+static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
+					struct intel_crtc_state *pipe_config)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+	struct intel_digital_port *intel_dig_port = intel_mst->primary;
+	struct intel_dp *intel_dp = &intel_dig_port->dp;
+	struct drm_atomic_state *state;
+	int bpp, i;
+	int lane_count, slots;
+	const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+	struct drm_connector *drm_connector;
+	struct intel_connector *connector, *found = NULL;
+	struct drm_connector_state *connector_state;
+	int mst_pbn;
+
+	pipe_config->dp_encoder_is_mst = true;
+	pipe_config->has_pch_encoder = false;
+	pipe_config->has_dp_encoder = true;
+	bpp = 24;
+	/*
+	 * for MST we always configure max link bw - the spec doesn't
+	 * seem to suggest we should do otherwise.
+	 */
+	lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
+
+
+	pipe_config->lane_count = lane_count;
+
+	pipe_config->pipe_bpp = 24;
+	pipe_config->port_clock = intel_dp_max_link_rate(intel_dp);
+
+	state = pipe_config->base.state;
+
+	for_each_connector_in_state(state, drm_connector, connector_state, i) {
+		connector = to_intel_connector(drm_connector);
+
+		if (connector_state->best_encoder == &encoder->base) {
+			found = connector;
+			break;
+		}
+	}
+
+	if (!found) {
+		DRM_ERROR("can't find connector\n");
+		return false;
+	}
+
+	mst_pbn = drm_dp_calc_pbn_mode(adjusted_mode->crtc_clock, bpp);
+
+	pipe_config->pbn = mst_pbn;
+	slots = drm_dp_find_vcpi_slots(&intel_dp->mst_mgr, mst_pbn);
+
+	intel_link_compute_m_n(bpp, lane_count,
+			       adjusted_mode->crtc_clock,
+			       pipe_config->port_clock,
+			       &pipe_config->dp_m_n);
+
+	pipe_config->dp_m_n.tu = slots;
+
+	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+		hsw_dp_set_ddi_pll_sel(pipe_config);
+
+	return true;
+
+}
+
+static void intel_mst_disable_dp(struct intel_encoder *encoder)
+{
+	struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+	struct intel_digital_port *intel_dig_port = intel_mst->primary;
+	struct intel_dp *intel_dp = &intel_dig_port->dp;
+	int ret;
+
+	DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
+
+	drm_dp_mst_reset_vcpi_slots(&intel_dp->mst_mgr, intel_mst->port);
+
+	ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr);
+	if (ret) {
+		DRM_ERROR("failed to update payload %d\n", ret);
+	}
+}
+
+static void intel_mst_post_disable_dp(struct intel_encoder *encoder)
+{
+	struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+	struct intel_digital_port *intel_dig_port = intel_mst->primary;
+	struct intel_dp *intel_dp = &intel_dig_port->dp;
+
+	DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
+
+	/* this can fail */
+	drm_dp_check_act_status(&intel_dp->mst_mgr);
+	/* and this can also fail */
+	drm_dp_update_payload_part2(&intel_dp->mst_mgr);
+
+	drm_dp_mst_deallocate_vcpi(&intel_dp->mst_mgr, intel_mst->port);
+
+	intel_dp->active_mst_links--;
+	intel_mst->port = NULL;
+	if (intel_dp->active_mst_links == 0) {
+		intel_dig_port->base.post_disable(&intel_dig_port->base);
+		intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
+	}
+}
+
+static void intel_mst_pre_enable_dp(struct intel_encoder *encoder)
+{
+	struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+	struct intel_digital_port *intel_dig_port = intel_mst->primary;
+	struct intel_dp *intel_dp = &intel_dig_port->dp;
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum port port = intel_dig_port->port;
+	int ret;
+	uint32_t temp;
+	struct intel_connector *found = NULL, *connector;
+	int slots;
+	struct drm_crtc *crtc = encoder->base.crtc;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+	for_each_intel_connector(dev, connector) {
+		if (connector->base.state->best_encoder == &encoder->base) {
+			found = connector;
+			break;
+		}
+	}
+
+	if (!found) {
+		DRM_ERROR("can't find connector\n");
+		return;
+	}
+
+	/* MST encoders are bound to a crtc, not to a connector,
+	 * force the mapping here for get_hw_state.
+	 */
+	found->encoder = encoder;
+
+	DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
+	intel_mst->port = found->port;
+
+	if (intel_dp->active_mst_links == 0) {
+		enum port port = intel_ddi_get_encoder_port(encoder);
+
+		intel_dp_set_link_params(intel_dp, intel_crtc->config);
+
+		/* FIXME: add support for SKL */
+		if (INTEL_INFO(dev)->gen < 9)
+			I915_WRITE(PORT_CLK_SEL(port),
+				   intel_crtc->config->ddi_pll_sel);
+
+		intel_ddi_init_dp_buf_reg(&intel_dig_port->base);
+
+		intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
+
+
+		intel_dp_start_link_train(intel_dp);
+		intel_dp_stop_link_train(intel_dp);
+	}
+
+	ret = drm_dp_mst_allocate_vcpi(&intel_dp->mst_mgr,
+				       intel_mst->port,
+				       intel_crtc->config->pbn, &slots);
+	if (ret == false) {
+		DRM_ERROR("failed to allocate vcpi\n");
+		return;
+	}
+
+
+	intel_dp->active_mst_links++;
+	temp = I915_READ(DP_TP_STATUS(port));
+	I915_WRITE(DP_TP_STATUS(port), temp);
+
+	ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr);
+}
+
+static void intel_mst_enable_dp(struct intel_encoder *encoder)
+{
+	struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+	struct intel_digital_port *intel_dig_port = intel_mst->primary;
+	struct intel_dp *intel_dp = &intel_dig_port->dp;
+	struct drm_device *dev = intel_dig_port->base.base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum port port = intel_dig_port->port;
+	int ret;
+
+	DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
+
+	if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_ACT_SENT),
+		     1))
+		DRM_ERROR("Timed out waiting for ACT sent\n");
+
+	ret = drm_dp_check_act_status(&intel_dp->mst_mgr);
+
+	ret = drm_dp_update_payload_part2(&intel_dp->mst_mgr);
+}
+
+static bool intel_dp_mst_enc_get_hw_state(struct intel_encoder *encoder,
+				      enum pipe *pipe)
+{
+	struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+	*pipe = intel_mst->pipe;
+	if (intel_mst->port)
+		return true;
+	return false;
+}
+
+static void intel_dp_mst_enc_get_config(struct intel_encoder *encoder,
+					struct intel_crtc_state *pipe_config)
+{
+	struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+	struct intel_digital_port *intel_dig_port = intel_mst->primary;
+	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
+	u32 temp, flags = 0;
+
+	pipe_config->has_dp_encoder = true;
+
+	temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
+	if (temp & TRANS_DDI_PHSYNC)
+		flags |= DRM_MODE_FLAG_PHSYNC;
+	else
+		flags |= DRM_MODE_FLAG_NHSYNC;
+	if (temp & TRANS_DDI_PVSYNC)
+		flags |= DRM_MODE_FLAG_PVSYNC;
+	else
+		flags |= DRM_MODE_FLAG_NVSYNC;
+
+	switch (temp & TRANS_DDI_BPC_MASK) {
+	case TRANS_DDI_BPC_6:
+		pipe_config->pipe_bpp = 18;
+		break;
+	case TRANS_DDI_BPC_8:
+		pipe_config->pipe_bpp = 24;
+		break;
+	case TRANS_DDI_BPC_10:
+		pipe_config->pipe_bpp = 30;
+		break;
+	case TRANS_DDI_BPC_12:
+		pipe_config->pipe_bpp = 36;
+		break;
+	default:
+		break;
+	}
+	pipe_config->base.adjusted_mode.flags |= flags;
+
+	pipe_config->lane_count =
+		((temp & DDI_PORT_WIDTH_MASK) >> DDI_PORT_WIDTH_SHIFT) + 1;
+
+	intel_dp_get_m_n(crtc, pipe_config);
+
+	intel_ddi_clock_get(&intel_dig_port->base, pipe_config);
+}
+
+static int intel_dp_mst_get_ddc_modes(struct drm_connector *connector)
+{
+	struct intel_connector *intel_connector = to_intel_connector(connector);
+	struct intel_dp *intel_dp = intel_connector->mst_port;
+	struct edid *edid;
+	int ret;
+
+	edid = drm_dp_mst_get_edid(connector, &intel_dp->mst_mgr, intel_connector->port);
+	if (!edid)
+		return 0;
+
+	ret = intel_connector_update_modes(connector, edid);
+	kfree(edid);
+
+	return ret;
+}
+
+static enum drm_connector_status
+intel_dp_mst_detect(struct drm_connector *connector, bool force)
+{
+	struct intel_connector *intel_connector = to_intel_connector(connector);
+	struct intel_dp *intel_dp = intel_connector->mst_port;
+
+	return drm_dp_mst_detect_port(connector, &intel_dp->mst_mgr, intel_connector->port);
+}
+
+static int
+intel_dp_mst_set_property(struct drm_connector *connector,
+			  struct drm_property *property,
+			  uint64_t val)
+{
+	return 0;
+}
+
+static void
+intel_dp_mst_connector_destroy(struct drm_connector *connector)
+{
+	struct intel_connector *intel_connector = to_intel_connector(connector);
+
+	if (!IS_ERR_OR_NULL(intel_connector->edid))
+		kfree(intel_connector->edid);
+
+	drm_connector_cleanup(connector);
+	kfree(connector);
+}
+
+static const struct drm_connector_funcs intel_dp_mst_connector_funcs = {
+	.dpms = drm_atomic_helper_connector_dpms,
+	.detect = intel_dp_mst_detect,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.set_property = intel_dp_mst_set_property,
+	.atomic_get_property = intel_connector_atomic_get_property,
+	.destroy = intel_dp_mst_connector_destroy,
+	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+	.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+};
+
+static int intel_dp_mst_get_modes(struct drm_connector *connector)
+{
+	return intel_dp_mst_get_ddc_modes(connector);
+}
+
+static enum drm_mode_status
+intel_dp_mst_mode_valid(struct drm_connector *connector,
+			struct drm_display_mode *mode)
+{
+	/* TODO - validate mode against available PBN for link */
+	if (mode->clock < 10000)
+		return MODE_CLOCK_LOW;
+
+	if (mode->flags & DRM_MODE_FLAG_DBLCLK)
+		return MODE_H_ILLEGAL;
+
+	return MODE_OK;
+}
+
+static struct drm_encoder *intel_mst_atomic_best_encoder(struct drm_connector *connector,
+							 struct drm_connector_state *state)
+{
+	struct intel_connector *intel_connector = to_intel_connector(connector);
+	struct intel_dp *intel_dp = intel_connector->mst_port;
+	struct intel_crtc *crtc = to_intel_crtc(state->crtc);
+
+	return &intel_dp->mst_encoders[crtc->pipe]->base.base;
+}
+
+static struct drm_encoder *intel_mst_best_encoder(struct drm_connector *connector)
+{
+	struct intel_connector *intel_connector = to_intel_connector(connector);
+	struct intel_dp *intel_dp = intel_connector->mst_port;
+	return &intel_dp->mst_encoders[0]->base.base;
+}
+
+static const struct drm_connector_helper_funcs intel_dp_mst_connector_helper_funcs = {
+	.get_modes = intel_dp_mst_get_modes,
+	.mode_valid = intel_dp_mst_mode_valid,
+	.atomic_best_encoder = intel_mst_atomic_best_encoder,
+	.best_encoder = intel_mst_best_encoder,
+};
+
+static void intel_dp_mst_encoder_destroy(struct drm_encoder *encoder)
+{
+	struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
+
+	drm_encoder_cleanup(encoder);
+	kfree(intel_mst);
+}
+
+static const struct drm_encoder_funcs intel_dp_mst_enc_funcs = {
+	.destroy = intel_dp_mst_encoder_destroy,
+};
+
+static bool intel_dp_mst_get_hw_state(struct intel_connector *connector)
+{
+	if (connector->encoder && connector->base.state->crtc) {
+		enum pipe pipe;
+		if (!connector->encoder->get_hw_state(connector->encoder, &pipe))
+			return false;
+		return true;
+	}
+	return false;
+}
+
+static void intel_connector_add_to_fbdev(struct intel_connector *connector)
+{
+#ifdef CONFIG_DRM_FBDEV_EMULATION
+	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	drm_fb_helper_add_one_connector(&dev_priv->fbdev->helper, &connector->base);
+#endif
+}
+
+static void intel_connector_remove_from_fbdev(struct intel_connector *connector)
+{
+#ifdef CONFIG_DRM_FBDEV_EMULATION
+	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	drm_fb_helper_remove_one_connector(&dev_priv->fbdev->helper, &connector->base);
+#endif
+}
+
+static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, const char *pathprop)
+{
+	struct intel_dp *intel_dp = container_of(mgr, struct intel_dp, mst_mgr);
+	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+	struct drm_device *dev = intel_dig_port->base.base.dev;
+	struct intel_connector *intel_connector;
+	struct drm_connector *connector;
+	int i;
+
+	intel_connector = intel_connector_alloc();
+	if (!intel_connector)
+		return NULL;
+
+	connector = &intel_connector->base;
+	drm_connector_init(dev, connector, &intel_dp_mst_connector_funcs, DRM_MODE_CONNECTOR_DisplayPort);
+	drm_connector_helper_add(connector, &intel_dp_mst_connector_helper_funcs);
+
+	intel_connector->unregister = intel_connector_unregister;
+	intel_connector->get_hw_state = intel_dp_mst_get_hw_state;
+	intel_connector->mst_port = intel_dp;
+	intel_connector->port = port;
+
+	for (i = PIPE_A; i <= PIPE_C; i++) {
+		drm_mode_connector_attach_encoder(&intel_connector->base,
+						  &intel_dp->mst_encoders[i]->base.base);
+	}
+	intel_dp_add_properties(intel_dp, connector);
+
+	drm_object_attach_property(&connector->base, dev->mode_config.path_property, 0);
+	drm_object_attach_property(&connector->base, dev->mode_config.tile_property, 0);
+
+	drm_mode_connector_set_path_property(connector, pathprop);
+	return connector;
+}
+
+static void intel_dp_register_mst_connector(struct drm_connector *connector)
+{
+	struct intel_connector *intel_connector = to_intel_connector(connector);
+	struct drm_device *dev = connector->dev;
+	drm_modeset_lock_all(dev);
+	intel_connector_add_to_fbdev(intel_connector);
+	drm_modeset_unlock_all(dev);
+	drm_connector_register(&intel_connector->base);
+}
+
+static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
+					   struct drm_connector *connector)
+{
+	struct intel_connector *intel_connector = to_intel_connector(connector);
+	struct drm_device *dev = connector->dev;
+
+	intel_connector->unregister(intel_connector);
+
+	/* need to nuke the connector */
+	drm_modeset_lock_all(dev);
+	if (connector->state->crtc) {
+		struct drm_mode_set set;
+		int ret;
+
+		memset(&set, 0, sizeof(set));
+		set.crtc = connector->state->crtc,
+
+		ret = drm_atomic_helper_set_config(&set);
+
+		WARN(ret, "Disabling mst crtc failed with %i\n", ret);
+	}
+
+	intel_connector_remove_from_fbdev(intel_connector);
+	drm_connector_cleanup(connector);
+	drm_modeset_unlock_all(dev);
+
+	kfree(intel_connector);
+	DRM_DEBUG_KMS("\n");
+}
+
+static void intel_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
+{
+	struct intel_dp *intel_dp = container_of(mgr, struct intel_dp, mst_mgr);
+	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+	struct drm_device *dev = intel_dig_port->base.base.dev;
+
+	drm_kms_helper_hotplug_event(dev);
+}
+
+static struct drm_dp_mst_topology_cbs mst_cbs = {
+	.add_connector = intel_dp_add_mst_connector,
+	.register_connector = intel_dp_register_mst_connector,
+	.destroy_connector = intel_dp_destroy_mst_connector,
+	.hotplug = intel_dp_mst_hotplug,
+};
+
+static struct intel_dp_mst_encoder *
+intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum pipe pipe)
+{
+	struct intel_dp_mst_encoder *intel_mst;
+	struct intel_encoder *intel_encoder;
+	struct drm_device *dev = intel_dig_port->base.base.dev;
+
+	intel_mst = kzalloc(sizeof(*intel_mst), GFP_KERNEL);
+
+	if (!intel_mst)
+		return NULL;
+
+	intel_mst->pipe = pipe;
+	intel_encoder = &intel_mst->base;
+	intel_mst->primary = intel_dig_port;
+
+	drm_encoder_init(dev, &intel_encoder->base, &intel_dp_mst_enc_funcs,
+			 DRM_MODE_ENCODER_DPMST);
+
+	intel_encoder->type = INTEL_OUTPUT_DP_MST;
+	intel_encoder->crtc_mask = 0x7;
+	intel_encoder->cloneable = 0;
+
+	intel_encoder->compute_config = intel_dp_mst_compute_config;
+	intel_encoder->disable = intel_mst_disable_dp;
+	intel_encoder->post_disable = intel_mst_post_disable_dp;
+	intel_encoder->pre_enable = intel_mst_pre_enable_dp;
+	intel_encoder->enable = intel_mst_enable_dp;
+	intel_encoder->get_hw_state = intel_dp_mst_enc_get_hw_state;
+	intel_encoder->get_config = intel_dp_mst_enc_get_config;
+
+	return intel_mst;
+
+}
+
+static bool
+intel_dp_create_fake_mst_encoders(struct intel_digital_port *intel_dig_port)
+{
+	int i;
+	struct intel_dp *intel_dp = &intel_dig_port->dp;
+
+	for (i = PIPE_A; i <= PIPE_C; i++)
+		intel_dp->mst_encoders[i] = intel_dp_create_fake_mst_encoder(intel_dig_port, i);
+	return true;
+}
+
+int
+intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_base_id)
+{
+	struct intel_dp *intel_dp = &intel_dig_port->dp;
+	struct drm_device *dev = intel_dig_port->base.base.dev;
+	int ret;
+
+	intel_dp->can_mst = true;
+	intel_dp->mst_mgr.cbs = &mst_cbs;
+
+	/* create encoders */
+	intel_dp_create_fake_mst_encoders(intel_dig_port);
+	ret = drm_dp_mst_topology_mgr_init(&intel_dp->mst_mgr, dev->dev, &intel_dp->aux, 16, 3, conn_base_id);
+	if (ret) {
+		intel_dp->can_mst = false;
+		return ret;
+	}
+	return 0;
+}
+
+void
+intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port)
+{
+	struct intel_dp *intel_dp = &intel_dig_port->dp;
+
+	if (!intel_dp->can_mst)
+		return;
+
+	drm_dp_mst_topology_mgr_destroy(&intel_dp->mst_mgr);
+	/* encoders will get killed by normal cleanup */
+}
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
new file mode 100644
index 0000000..722aa15
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -0,0 +1,1478 @@
+/*
+ * Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
+ * Copyright (c) 2007-2008 Intel Corporation
+ *   Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifndef __INTEL_DRV_H__
+#define __INTEL_DRV_H__
+
+#include <linux/async.h>
+#include <linux/i2c.h>
+#include <linux/hdmi.h>
+#include <drm/i915_drm.h>
+#include "i915_drv.h"
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_dp_mst_helper.h>
+#include <drm/drm_rect.h>
+#include <drm/drm_atomic.h>
+
+/**
+ * _wait_for - magic (register) wait macro
+ *
+ * Does the right thing for modeset paths when run under kdgb or similar atomic
+ * contexts. Note that it's important that we check the condition again after
+ * having timed out, since the timeout could be due to preemption or similar and
+ * we've never had a chance to check the condition before the timeout.
+ */
+#define _wait_for(COND, MS, W) ({ \
+	unsigned long timeout__ = jiffies + msecs_to_jiffies(MS) + 1;	\
+	int ret__ = 0;							\
+	while (!(COND)) {						\
+		if (time_after(jiffies, timeout__)) {			\
+			if (!(COND))					\
+				ret__ = -ETIMEDOUT;			\
+			break;						\
+		}							\
+		if ((W) && drm_can_sleep()) {				\
+			usleep_range((W)*1000, (W)*2000);		\
+		} else {						\
+			cpu_relax();					\
+		}							\
+	}								\
+	ret__;								\
+})
+
+#define wait_for(COND, MS) _wait_for(COND, MS, 1)
+#define wait_for_atomic(COND, MS) _wait_for(COND, MS, 0)
+#define wait_for_atomic_us(COND, US) _wait_for((COND), \
+					       DIV_ROUND_UP((US), 1000), 0)
+
+#define KHz(x) (1000 * (x))
+#define MHz(x) KHz(1000 * (x))
+
+/*
+ * Display related stuff
+ */
+
+/* store information about an Ixxx DVO */
+/* The i830->i865 use multiple DVOs with multiple i2cs */
+/* the i915, i945 have a single sDVO i2c bus - which is different */
+#define MAX_OUTPUTS 6
+/* maximum connectors per crtcs in the mode set */
+
+/* Maximum cursor sizes */
+#define GEN2_CURSOR_WIDTH 64
+#define GEN2_CURSOR_HEIGHT 64
+#define MAX_CURSOR_WIDTH 256
+#define MAX_CURSOR_HEIGHT 256
+
+#define INTEL_I2C_BUS_DVO 1
+#define INTEL_I2C_BUS_SDVO 2
+
+/* these are outputs from the chip - integrated only
+   external chips are via DVO or SDVO output */
+enum intel_output_type {
+	INTEL_OUTPUT_UNUSED = 0,
+	INTEL_OUTPUT_ANALOG = 1,
+	INTEL_OUTPUT_DVO = 2,
+	INTEL_OUTPUT_SDVO = 3,
+	INTEL_OUTPUT_LVDS = 4,
+	INTEL_OUTPUT_TVOUT = 5,
+	INTEL_OUTPUT_HDMI = 6,
+	INTEL_OUTPUT_DISPLAYPORT = 7,
+	INTEL_OUTPUT_EDP = 8,
+	INTEL_OUTPUT_DSI = 9,
+	INTEL_OUTPUT_UNKNOWN = 10,
+	INTEL_OUTPUT_DP_MST = 11,
+};
+
+#define INTEL_DVO_CHIP_NONE 0
+#define INTEL_DVO_CHIP_LVDS 1
+#define INTEL_DVO_CHIP_TMDS 2
+#define INTEL_DVO_CHIP_TVOUT 4
+
+#define INTEL_DSI_VIDEO_MODE	0
+#define INTEL_DSI_COMMAND_MODE	1
+
+struct intel_framebuffer {
+	struct drm_framebuffer base;
+	struct drm_i915_gem_object *obj;
+};
+
+struct intel_fbdev {
+	struct drm_fb_helper helper;
+	struct intel_framebuffer *fb;
+	struct list_head fbdev_list;
+	struct drm_display_mode *our_mode;
+	int preferred_bpp;
+};
+
+struct intel_encoder {
+	struct drm_encoder base;
+
+	enum intel_output_type type;
+	unsigned int cloneable;
+	void (*hot_plug)(struct intel_encoder *);
+	bool (*compute_config)(struct intel_encoder *,
+			       struct intel_crtc_state *);
+	void (*pre_pll_enable)(struct intel_encoder *);
+	void (*pre_enable)(struct intel_encoder *);
+	void (*enable)(struct intel_encoder *);
+	void (*mode_set)(struct intel_encoder *intel_encoder);
+	void (*disable)(struct intel_encoder *);
+	void (*post_disable)(struct intel_encoder *);
+	void (*post_pll_disable)(struct intel_encoder *);
+	/* Read out the current hw state of this connector, returning true if
+	 * the encoder is active. If the encoder is enabled it also set the pipe
+	 * it is connected to in the pipe parameter. */
+	bool (*get_hw_state)(struct intel_encoder *, enum pipe *pipe);
+	/* Reconstructs the equivalent mode flags for the current hardware
+	 * state. This must be called _after_ display->get_pipe_config has
+	 * pre-filled the pipe config. Note that intel_encoder->base.crtc must
+	 * be set correctly before calling this function. */
+	void (*get_config)(struct intel_encoder *,
+			   struct intel_crtc_state *pipe_config);
+	/*
+	 * Called during system suspend after all pending requests for the
+	 * encoder are flushed (for example for DP AUX transactions) and
+	 * device interrupts are disabled.
+	 */
+	void (*suspend)(struct intel_encoder *);
+	int crtc_mask;
+	enum hpd_pin hpd_pin;
+};
+
+struct intel_panel {
+	struct drm_display_mode *fixed_mode;
+	struct drm_display_mode *downclock_mode;
+	int fitting_mode;
+
+	/* backlight */
+	struct {
+		bool present;
+		u32 level;
+		u32 min;
+		u32 max;
+		bool enabled;
+		bool combination_mode;	/* gen 2/4 only */
+		bool active_low_pwm;
+
+		/* PWM chip */
+		bool util_pin_active_low;	/* bxt+ */
+		u8 controller;		/* bxt+ only */
+		struct pwm_device *pwm;
+
+		struct backlight_device *device;
+
+		/* Connector and platform specific backlight functions */
+		int (*setup)(struct intel_connector *connector, enum pipe pipe);
+		uint32_t (*get)(struct intel_connector *connector);
+		void (*set)(struct intel_connector *connector, uint32_t level);
+		void (*disable)(struct intel_connector *connector);
+		void (*enable)(struct intel_connector *connector);
+		uint32_t (*hz_to_pwm)(struct intel_connector *connector,
+				      uint32_t hz);
+		void (*power)(struct intel_connector *, bool enable);
+	} backlight;
+};
+
+struct intel_connector {
+	struct drm_connector base;
+	/*
+	 * The fixed encoder this connector is connected to.
+	 */
+	struct intel_encoder *encoder;
+
+	/* Reads out the current hw, returning true if the connector is enabled
+	 * and active (i.e. dpms ON state). */
+	bool (*get_hw_state)(struct intel_connector *);
+
+	/*
+	 * Removes all interfaces through which the connector is accessible
+	 * - like sysfs, debugfs entries -, so that no new operations can be
+	 * started on the connector. Also makes sure all currently pending
+	 * operations finish before returing.
+	 */
+	void (*unregister)(struct intel_connector *);
+
+	/* Panel info for eDP and LVDS */
+	struct intel_panel panel;
+
+	/* Cached EDID for eDP and LVDS. May hold ERR_PTR for invalid EDID. */
+	struct edid *edid;
+	struct edid *detect_edid;
+
+	/* since POLL and HPD connectors may use the same HPD line keep the native
+	   state of connector->polled in case hotplug storm detection changes it */
+	u8 polled;
+
+	void *port; /* store this opaque as its illegal to dereference it */
+
+	struct intel_dp *mst_port;
+};
+
+typedef struct dpll {
+	/* given values */
+	int n;
+	int m1, m2;
+	int p1, p2;
+	/* derived values */
+	int	dot;
+	int	vco;
+	int	m;
+	int	p;
+} intel_clock_t;
+
+struct intel_atomic_state {
+	struct drm_atomic_state base;
+
+	unsigned int cdclk;
+	bool dpll_set;
+	struct intel_shared_dpll_config shared_dpll[I915_NUM_PLLS];
+};
+
+struct intel_plane_state {
+	struct drm_plane_state base;
+	struct drm_rect src;
+	struct drm_rect dst;
+	struct drm_rect clip;
+	bool visible;
+
+	/*
+	 * scaler_id
+	 *    = -1 : not using a scaler
+	 *    >=  0 : using a scalers
+	 *
+	 * plane requiring a scaler:
+	 *   - During check_plane, its bit is set in
+	 *     crtc_state->scaler_state.scaler_users by calling helper function
+	 *     update_scaler_plane.
+	 *   - scaler_id indicates the scaler it got assigned.
+	 *
+	 * plane doesn't require a scaler:
+	 *   - this can happen when scaling is no more required or plane simply
+	 *     got disabled.
+	 *   - During check_plane, corresponding bit is reset in
+	 *     crtc_state->scaler_state.scaler_users by calling helper function
+	 *     update_scaler_plane.
+	 */
+	int scaler_id;
+
+	struct drm_intel_sprite_colorkey ckey;
+};
+
+struct intel_initial_plane_config {
+	struct intel_framebuffer *fb;
+	unsigned int tiling;
+	int size;
+	u32 base;
+};
+
+#define SKL_MIN_SRC_W 8
+#define SKL_MAX_SRC_W 4096
+#define SKL_MIN_SRC_H 8
+#define SKL_MAX_SRC_H 4096
+#define SKL_MIN_DST_W 8
+#define SKL_MAX_DST_W 4096
+#define SKL_MIN_DST_H 8
+#define SKL_MAX_DST_H 4096
+
+struct intel_scaler {
+	int in_use;
+	uint32_t mode;
+};
+
+struct intel_crtc_scaler_state {
+#define SKL_NUM_SCALERS 2
+	struct intel_scaler scalers[SKL_NUM_SCALERS];
+
+	/*
+	 * scaler_users: keeps track of users requesting scalers on this crtc.
+	 *
+	 *     If a bit is set, a user is using a scaler.
+	 *     Here user can be a plane or crtc as defined below:
+	 *       bits 0-30 - plane (bit position is index from drm_plane_index)
+	 *       bit 31    - crtc
+	 *
+	 * Instead of creating a new index to cover planes and crtc, using
+	 * existing drm_plane_index for planes which is well less than 31
+	 * planes and bit 31 for crtc. This should be fine to cover all
+	 * our platforms.
+	 *
+	 * intel_atomic_setup_scalers will setup available scalers to users
+	 * requesting scalers. It will gracefully fail if request exceeds
+	 * avilability.
+	 */
+#define SKL_CRTC_INDEX 31
+	unsigned scaler_users;
+
+	/* scaler used by crtc for panel fitting purpose */
+	int scaler_id;
+};
+
+/* drm_mode->private_flags */
+#define I915_MODE_FLAG_INHERITED 1
+
+struct intel_crtc_state {
+	struct drm_crtc_state base;
+
+	/**
+	 * quirks - bitfield with hw state readout quirks
+	 *
+	 * For various reasons the hw state readout code might not be able to
+	 * completely faithfully read out the current state. These cases are
+	 * tracked with quirk flags so that fastboot and state checker can act
+	 * accordingly.
+	 */
+#define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS	(1<<0) /* unreliable sync mode.flags */
+	unsigned long quirks;
+
+	bool update_pipe;
+
+	/* Pipe source size (ie. panel fitter input size)
+	 * All planes will be positioned inside this space,
+	 * and get clipped at the edges. */
+	int pipe_src_w, pipe_src_h;
+
+	/* Whether to set up the PCH/FDI. Note that we never allow sharing
+	 * between pch encoders and cpu encoders. */
+	bool has_pch_encoder;
+
+	/* Are we sending infoframes on the attached port */
+	bool has_infoframe;
+
+	/* CPU Transcoder for the pipe. Currently this can only differ from the
+	 * pipe on Haswell (where we have a special eDP transcoder). */
+	enum transcoder cpu_transcoder;
+
+	/*
+	 * Use reduced/limited/broadcast rbg range, compressing from the full
+	 * range fed into the crtcs.
+	 */
+	bool limited_color_range;
+
+	/* DP has a bunch of special case unfortunately, so mark the pipe
+	 * accordingly. */
+	bool has_dp_encoder;
+
+	/* Whether we should send NULL infoframes. Required for audio. */
+	bool has_hdmi_sink;
+
+	/* Audio enabled on this pipe. Only valid if either has_hdmi_sink or
+	 * has_dp_encoder is set. */
+	bool has_audio;
+
+	/*
+	 * Enable dithering, used when the selected pipe bpp doesn't match the
+	 * plane bpp.
+	 */
+	bool dither;
+
+	/* Controls for the clock computation, to override various stages. */
+	bool clock_set;
+
+	/* SDVO TV has a bunch of special case. To make multifunction encoders
+	 * work correctly, we need to track this at runtime.*/
+	bool sdvo_tv_clock;
+
+	/*
+	 * crtc bandwidth limit, don't increase pipe bpp or clock if not really
+	 * required. This is set in the 2nd loop of calling encoder's
+	 * ->compute_config if the first pick doesn't work out.
+	 */
+	bool bw_constrained;
+
+	/* Settings for the intel dpll used on pretty much everything but
+	 * haswell. */
+	struct dpll dpll;
+
+	/* Selected dpll when shared or DPLL_ID_PRIVATE. */
+	enum intel_dpll_id shared_dpll;
+
+	/*
+	 * - PORT_CLK_SEL for DDI ports on HSW/BDW.
+	 * - enum skl_dpll on SKL
+	 */
+	uint32_t ddi_pll_sel;
+
+	/* Actual register state of the dpll, for shared dpll cross-checking. */
+	struct intel_dpll_hw_state dpll_hw_state;
+
+	int pipe_bpp;
+	struct intel_link_m_n dp_m_n;
+
+	/* m2_n2 for eDP downclock */
+	struct intel_link_m_n dp_m2_n2;
+	bool has_drrs;
+
+	/*
+	 * Frequence the dpll for the port should run at. Differs from the
+	 * adjusted dotclock e.g. for DP or 12bpc hdmi mode. This is also
+	 * already multiplied by pixel_multiplier.
+	 */
+	int port_clock;
+
+	/* Used by SDVO (and if we ever fix it, HDMI). */
+	unsigned pixel_multiplier;
+
+	uint8_t lane_count;
+
+	/* Panel fitter controls for gen2-gen4 + VLV */
+	struct {
+		u32 control;
+		u32 pgm_ratios;
+		u32 lvds_border_bits;
+	} gmch_pfit;
+
+	/* Panel fitter placement and size for Ironlake+ */
+	struct {
+		u32 pos;
+		u32 size;
+		bool enabled;
+		bool force_thru;
+	} pch_pfit;
+
+	/* FDI configuration, only valid if has_pch_encoder is set. */
+	int fdi_lanes;
+	struct intel_link_m_n fdi_m_n;
+
+	bool ips_enabled;
+
+	bool double_wide;
+
+	bool dp_encoder_is_mst;
+	int pbn;
+
+	struct intel_crtc_scaler_state scaler_state;
+
+	/* w/a for waiting 2 vblanks during crtc enable */
+	enum pipe hsw_workaround_pipe;
+};
+
+struct vlv_wm_state {
+	struct vlv_pipe_wm wm[3];
+	struct vlv_sr_wm sr[3];
+	uint8_t num_active_planes;
+	uint8_t num_levels;
+	uint8_t level;
+	bool cxsr;
+};
+
+struct intel_pipe_wm {
+	struct intel_wm_level wm[5];
+	uint32_t linetime;
+	bool fbc_wm_enabled;
+	bool pipe_enabled;
+	bool sprites_enabled;
+	bool sprites_scaled;
+};
+
+struct intel_mmio_flip {
+	struct work_struct work;
+	struct drm_i915_private *i915;
+	struct drm_i915_gem_request *req;
+	struct intel_crtc *crtc;
+};
+
+struct skl_pipe_wm {
+	struct skl_wm_level wm[8];
+	struct skl_wm_level trans_wm;
+	uint32_t linetime;
+};
+
+/*
+ * Tracking of operations that need to be performed at the beginning/end of an
+ * atomic commit, outside the atomic section where interrupts are disabled.
+ * These are generally operations that grab mutexes or might otherwise sleep
+ * and thus can't be run with interrupts disabled.
+ */
+struct intel_crtc_atomic_commit {
+	/* Sleepable operations to perform before commit */
+	bool wait_for_flips;
+	bool disable_fbc;
+	bool disable_ips;
+	bool disable_cxsr;
+	bool pre_disable_primary;
+	bool update_wm_pre, update_wm_post;
+	unsigned disabled_planes;
+
+	/* Sleepable operations to perform after commit */
+	unsigned fb_bits;
+	bool wait_vblank;
+	bool update_fbc;
+	bool post_enable_primary;
+	unsigned update_sprite_watermarks;
+};
+
+struct intel_crtc {
+	struct drm_crtc base;
+	enum pipe pipe;
+	enum plane plane;
+	u8 lut_r[256], lut_g[256], lut_b[256];
+	/*
+	 * Whether the crtc and the connected output pipeline is active. Implies
+	 * that crtc->enabled is set, i.e. the current mode configuration has
+	 * some outputs connected to this crtc.
+	 */
+	bool active;
+	unsigned long enabled_power_domains;
+	bool lowfreq_avail;
+	struct intel_overlay *overlay;
+	struct intel_unpin_work *unpin_work;
+
+	atomic_t unpin_work_count;
+
+	/* Display surface base address adjustement for pageflips. Note that on
+	 * gen4+ this only adjusts up to a tile, offsets within a tile are
+	 * handled in the hw itself (with the TILEOFF register). */
+	unsigned long dspaddr_offset;
+	int adjusted_x;
+	int adjusted_y;
+
+	uint32_t cursor_addr;
+	uint32_t cursor_cntl;
+	uint32_t cursor_size;
+	uint32_t cursor_base;
+
+	struct intel_crtc_state *config;
+
+	/* reset counter value when the last flip was submitted */
+	unsigned int reset_counter;
+
+	/* Access to these should be protected by dev_priv->irq_lock. */
+	bool cpu_fifo_underrun_disabled;
+	bool pch_fifo_underrun_disabled;
+
+	/* per-pipe watermark state */
+	struct {
+		/* watermarks currently being used  */
+		struct intel_pipe_wm active;
+		/* SKL wm values currently in use */
+		struct skl_pipe_wm skl_active;
+		/* allow CxSR on this pipe */
+		bool cxsr_allowed;
+	} wm;
+
+	int scanline_offset;
+
+	struct {
+		unsigned start_vbl_count;
+		ktime_t start_vbl_time;
+		int min_vbl, max_vbl;
+		int scanline_start;
+	} debug;
+
+	struct intel_crtc_atomic_commit atomic;
+
+	/* scalers available on this crtc */
+	int num_scalers;
+
+	struct vlv_wm_state wm_state;
+};
+
+struct intel_plane_wm_parameters {
+	uint32_t horiz_pixels;
+	uint32_t vert_pixels;
+	/*
+	 *   For packed pixel formats:
+	 *     bytes_per_pixel - holds bytes per pixel
+	 *   For planar pixel formats:
+	 *     bytes_per_pixel - holds bytes per pixel for uv-plane
+	 *     y_bytes_per_pixel - holds bytes per pixel for y-plane
+	 */
+	uint8_t bytes_per_pixel;
+	uint8_t y_bytes_per_pixel;
+	bool enabled;
+	bool scaled;
+	u64 tiling;
+	unsigned int rotation;
+	uint16_t fifo_size;
+};
+
+struct intel_plane {
+	struct drm_plane base;
+	int plane;
+	enum pipe pipe;
+	bool can_scale;
+	int max_downscale;
+	uint32_t frontbuffer_bit;
+
+	/* Since we need to change the watermarks before/after
+	 * enabling/disabling the planes, we need to store the parameters here
+	 * as the other pieces of the struct may not reflect the values we want
+	 * for the watermark calculations. Currently only Haswell uses this.
+	 */
+	struct intel_plane_wm_parameters wm;
+
+	/*
+	 * NOTE: Do not place new plane state fields here (e.g., when adding
+	 * new plane properties).  New runtime state should now be placed in
+	 * the intel_plane_state structure and accessed via drm_plane->state.
+	 */
+
+	void (*update_plane)(struct drm_plane *plane,
+			     struct drm_crtc *crtc,
+			     struct drm_framebuffer *fb,
+			     int crtc_x, int crtc_y,
+			     unsigned int crtc_w, unsigned int crtc_h,
+			     uint32_t x, uint32_t y,
+			     uint32_t src_w, uint32_t src_h);
+	void (*disable_plane)(struct drm_plane *plane,
+			      struct drm_crtc *crtc);
+	int (*check_plane)(struct drm_plane *plane,
+			   struct intel_crtc_state *crtc_state,
+			   struct intel_plane_state *state);
+	void (*commit_plane)(struct drm_plane *plane,
+			     struct intel_plane_state *state);
+};
+
+struct intel_watermark_params {
+	unsigned long fifo_size;
+	unsigned long max_wm;
+	unsigned long default_wm;
+	unsigned long guard_size;
+	unsigned long cacheline_size;
+};
+
+struct cxsr_latency {
+	int is_desktop;
+	int is_ddr3;
+	unsigned long fsb_freq;
+	unsigned long mem_freq;
+	unsigned long display_sr;
+	unsigned long display_hpll_disable;
+	unsigned long cursor_sr;
+	unsigned long cursor_hpll_disable;
+};
+
+#define to_intel_atomic_state(x) container_of(x, struct intel_atomic_state, base)
+#define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
+#define to_intel_crtc_state(x) container_of(x, struct intel_crtc_state, base)
+#define to_intel_connector(x) container_of(x, struct intel_connector, base)
+#define to_intel_encoder(x) container_of(x, struct intel_encoder, base)
+#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
+#define to_intel_plane(x) container_of(x, struct intel_plane, base)
+#define to_intel_plane_state(x) container_of(x, struct intel_plane_state, base)
+#define intel_fb_obj(x) (x ? to_intel_framebuffer(x)->obj : NULL)
+
+struct intel_hdmi {
+	u32 hdmi_reg;
+	int ddc_bus;
+	bool limited_color_range;
+	bool color_range_auto;
+	bool has_hdmi_sink;
+	bool has_audio;
+	enum hdmi_force_audio force_audio;
+	bool rgb_quant_range_selectable;
+	enum hdmi_picture_aspect aspect_ratio;
+	struct intel_connector *attached_connector;
+	void (*write_infoframe)(struct drm_encoder *encoder,
+				enum hdmi_infoframe_type type,
+				const void *frame, ssize_t len);
+	void (*set_infoframes)(struct drm_encoder *encoder,
+			       bool enable,
+			       const struct drm_display_mode *adjusted_mode);
+	bool (*infoframe_enabled)(struct drm_encoder *encoder);
+};
+
+struct intel_dp_mst_encoder;
+#define DP_MAX_DOWNSTREAM_PORTS		0x10
+
+/*
+ * enum link_m_n_set:
+ *	When platform provides two set of M_N registers for dp, we can
+ *	program them and switch between them incase of DRRS.
+ *	But When only one such register is provided, we have to program the
+ *	required divider value on that registers itself based on the DRRS state.
+ *
+ * M1_N1	: Program dp_m_n on M1_N1 registers
+ *			  dp_m2_n2 on M2_N2 registers (If supported)
+ *
+ * M2_N2	: Program dp_m2_n2 on M1_N1 registers
+ *			  M2_N2 registers are not supported
+ */
+
+enum link_m_n_set {
+	/* Sets the m1_n1 and m2_n2 */
+	M1_N1 = 0,
+	M2_N2
+};
+
+struct sink_crc {
+	bool started;
+	u8 last_crc[6];
+	int last_count;
+};
+
+struct intel_dp {
+	uint32_t output_reg;
+	uint32_t aux_ch_ctl_reg;
+	uint32_t DP;
+	int link_rate;
+	uint8_t lane_count;
+	bool has_audio;
+	enum hdmi_force_audio force_audio;
+	bool limited_color_range;
+	bool color_range_auto;
+	uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
+	uint8_t psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE];
+	uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS];
+	/* sink rates as reported by DP_SUPPORTED_LINK_RATES */
+	uint8_t num_sink_rates;
+	int sink_rates[DP_MAX_SUPPORTED_RATES];
+	struct sink_crc sink_crc;
+	struct drm_dp_aux aux;
+	uint8_t train_set[4];
+	int panel_power_up_delay;
+	int panel_power_down_delay;
+	int panel_power_cycle_delay;
+	int backlight_on_delay;
+	int backlight_off_delay;
+	struct delayed_work panel_vdd_work;
+	bool want_panel_vdd;
+	unsigned long last_power_cycle;
+	unsigned long last_power_on;
+	unsigned long last_backlight_off;
+
+	struct notifier_block edp_notifier;
+
+	/*
+	 * Pipe whose power sequencer is currently locked into
+	 * this port. Only relevant on VLV/CHV.
+	 */
+	enum pipe pps_pipe;
+	struct edp_power_seq pps_delays;
+
+	bool can_mst; /* this port supports mst */
+	bool is_mst;
+	int active_mst_links;
+	/* connector directly attached - won't be use for modeset in mst world */
+	struct intel_connector *attached_connector;
+
+	/* mst connector list */
+	struct intel_dp_mst_encoder *mst_encoders[I915_MAX_PIPES];
+	struct drm_dp_mst_topology_mgr mst_mgr;
+
+	uint32_t (*get_aux_clock_divider)(struct intel_dp *dp, int index);
+	/*
+	 * This function returns the value we have to program the AUX_CTL
+	 * register with to kick off an AUX transaction.
+	 */
+	uint32_t (*get_aux_send_ctl)(struct intel_dp *dp,
+				     bool has_aux_irq,
+				     int send_bytes,
+				     uint32_t aux_clock_divider);
+
+	/* Displayport compliance testing */
+	unsigned long compliance_test_type;
+	unsigned long compliance_test_data;
+	bool compliance_test_active;
+};
+
+struct intel_digital_port {
+	struct intel_encoder base;
+	enum port port;
+	u32 saved_port_bits;
+	struct intel_dp dp;
+	struct intel_hdmi hdmi;
+	enum irqreturn (*hpd_pulse)(struct intel_digital_port *, bool);
+	bool release_cl2_override;
+};
+
+struct intel_dp_mst_encoder {
+	struct intel_encoder base;
+	enum pipe pipe;
+	struct intel_digital_port *primary;
+	void *port; /* store this opaque as its illegal to dereference it */
+};
+
+static inline enum dpio_channel
+vlv_dport_to_channel(struct intel_digital_port *dport)
+{
+	switch (dport->port) {
+	case PORT_B:
+	case PORT_D:
+		return DPIO_CH0;
+	case PORT_C:
+		return DPIO_CH1;
+	default:
+		BUG();
+	}
+}
+
+static inline enum dpio_phy
+vlv_dport_to_phy(struct intel_digital_port *dport)
+{
+	switch (dport->port) {
+	case PORT_B:
+	case PORT_C:
+		return DPIO_PHY0;
+	case PORT_D:
+		return DPIO_PHY1;
+	default:
+		BUG();
+	}
+}
+
+static inline enum dpio_channel
+vlv_pipe_to_channel(enum pipe pipe)
+{
+	switch (pipe) {
+	case PIPE_A:
+	case PIPE_C:
+		return DPIO_CH0;
+	case PIPE_B:
+		return DPIO_CH1;
+	default:
+		BUG();
+	}
+}
+
+static inline struct drm_crtc *
+intel_get_crtc_for_pipe(struct drm_device *dev, int pipe)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	return dev_priv->pipe_to_crtc_mapping[pipe];
+}
+
+static inline struct drm_crtc *
+intel_get_crtc_for_plane(struct drm_device *dev, int plane)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	return dev_priv->plane_to_crtc_mapping[plane];
+}
+
+struct intel_unpin_work {
+	struct work_struct work;
+	struct drm_crtc *crtc;
+	struct drm_framebuffer *old_fb;
+	struct drm_i915_gem_object *pending_flip_obj;
+	struct drm_pending_vblank_event *event;
+	atomic_t pending;
+#define INTEL_FLIP_INACTIVE	0
+#define INTEL_FLIP_PENDING	1
+#define INTEL_FLIP_COMPLETE	2
+	u32 flip_count;
+	u32 gtt_offset;
+	struct drm_i915_gem_request *flip_queued_req;
+	u32 flip_queued_vblank;
+	u32 flip_ready_vblank;
+	bool enable_stall_check;
+};
+
+struct intel_load_detect_pipe {
+	struct drm_framebuffer *release_fb;
+	bool load_detect_temp;
+	int dpms_mode;
+};
+
+static inline struct intel_encoder *
+intel_attached_encoder(struct drm_connector *connector)
+{
+	return to_intel_connector(connector)->encoder;
+}
+
+static inline struct intel_digital_port *
+enc_to_dig_port(struct drm_encoder *encoder)
+{
+	return container_of(encoder, struct intel_digital_port, base.base);
+}
+
+static inline struct intel_dp_mst_encoder *
+enc_to_mst(struct drm_encoder *encoder)
+{
+	return container_of(encoder, struct intel_dp_mst_encoder, base.base);
+}
+
+static inline struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder)
+{
+	return &enc_to_dig_port(encoder)->dp;
+}
+
+static inline struct intel_digital_port *
+dp_to_dig_port(struct intel_dp *intel_dp)
+{
+	return container_of(intel_dp, struct intel_digital_port, dp);
+}
+
+static inline struct intel_digital_port *
+hdmi_to_dig_port(struct intel_hdmi *intel_hdmi)
+{
+	return container_of(intel_hdmi, struct intel_digital_port, hdmi);
+}
+
+/*
+ * Returns the number of planes for this pipe, ie the number of sprites + 1
+ * (primary plane). This doesn't count the cursor plane then.
+ */
+static inline unsigned int intel_num_planes(struct intel_crtc *crtc)
+{
+	return INTEL_INFO(crtc->base.dev)->num_sprites[crtc->pipe] + 1;
+}
+
+/* intel_fifo_underrun.c */
+bool intel_set_cpu_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
+					   enum pipe pipe, bool enable);
+bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
+					   enum transcoder pch_transcoder,
+					   bool enable);
+void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
+					 enum pipe pipe);
+void intel_pch_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
+					 enum transcoder pch_transcoder);
+void i9xx_check_fifo_underruns(struct drm_i915_private *dev_priv);
+
+/* i915_irq.c */
+void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
+void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
+void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
+void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
+void gen6_reset_rps_interrupts(struct drm_device *dev);
+void gen6_enable_rps_interrupts(struct drm_device *dev);
+void gen6_disable_rps_interrupts(struct drm_device *dev);
+u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask);
+void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv);
+void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv);
+static inline bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
+{
+	/*
+	 * We only use drm_irq_uninstall() at unload and VT switch, so
+	 * this is the only thing we need to check.
+	 */
+	return dev_priv->pm.irqs_enabled;
+}
+
+int intel_get_crtc_scanline(struct intel_crtc *crtc);
+void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
+				     unsigned int pipe_mask);
+
+/* intel_crt.c */
+void intel_crt_init(struct drm_device *dev);
+
+
+/* intel_ddi.c */
+void intel_prepare_ddi(struct drm_device *dev);
+void hsw_fdi_link_train(struct drm_crtc *crtc);
+void intel_ddi_init(struct drm_device *dev, enum port port);
+enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder);
+bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe);
+void intel_ddi_pll_init(struct drm_device *dev);
+void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc);
+void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
+				       enum transcoder cpu_transcoder);
+void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc);
+void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc);
+bool intel_ddi_pll_select(struct intel_crtc *crtc,
+			  struct intel_crtc_state *crtc_state);
+void intel_ddi_set_pipe_settings(struct drm_crtc *crtc);
+void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder);
+bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
+void intel_ddi_fdi_disable(struct drm_crtc *crtc);
+void intel_ddi_get_config(struct intel_encoder *encoder,
+			  struct intel_crtc_state *pipe_config);
+struct intel_encoder *
+intel_ddi_get_crtc_new_encoder(struct intel_crtc_state *crtc_state);
+
+void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder);
+void intel_ddi_clock_get(struct intel_encoder *encoder,
+			 struct intel_crtc_state *pipe_config);
+void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc, bool state);
+uint32_t ddi_signal_levels(struct intel_dp *intel_dp);
+
+/* intel_frontbuffer.c */
+void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
+			     enum fb_op_origin origin);
+void intel_frontbuffer_flip_prepare(struct drm_device *dev,
+				    unsigned frontbuffer_bits);
+void intel_frontbuffer_flip_complete(struct drm_device *dev,
+				     unsigned frontbuffer_bits);
+void intel_frontbuffer_flip(struct drm_device *dev,
+			    unsigned frontbuffer_bits);
+unsigned int intel_fb_align_height(struct drm_device *dev,
+				   unsigned int height,
+				   uint32_t pixel_format,
+				   uint64_t fb_format_modifier);
+void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire,
+			enum fb_op_origin origin);
+u32 intel_fb_stride_alignment(struct drm_device *dev, uint64_t fb_modifier,
+			      uint32_t pixel_format);
+
+/* intel_audio.c */
+void intel_init_audio(struct drm_device *dev);
+void intel_audio_codec_enable(struct intel_encoder *encoder);
+void intel_audio_codec_disable(struct intel_encoder *encoder);
+void i915_audio_component_init(struct drm_i915_private *dev_priv);
+void i915_audio_component_cleanup(struct drm_i915_private *dev_priv);
+
+/* intel_display.c */
+extern const struct drm_plane_funcs intel_plane_funcs;
+bool intel_has_pending_fb_unpin(struct drm_device *dev);
+int intel_pch_rawclk(struct drm_device *dev);
+int intel_hrawclk(struct drm_device *dev);
+void intel_mark_busy(struct drm_device *dev);
+void intel_mark_idle(struct drm_device *dev);
+void intel_crtc_restore_mode(struct drm_crtc *crtc);
+int intel_display_suspend(struct drm_device *dev);
+void intel_encoder_destroy(struct drm_encoder *encoder);
+int intel_connector_init(struct intel_connector *);
+struct intel_connector *intel_connector_alloc(void);
+bool intel_connector_get_hw_state(struct intel_connector *connector);
+void intel_connector_attach_encoder(struct intel_connector *connector,
+				    struct intel_encoder *encoder);
+struct drm_encoder *intel_best_encoder(struct drm_connector *connector);
+struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
+					     struct drm_crtc *crtc);
+enum pipe intel_get_pipe_from_connector(struct intel_connector *connector);
+int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
+				struct drm_file *file_priv);
+enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
+					     enum pipe pipe);
+bool intel_pipe_has_type(struct intel_crtc *crtc, enum intel_output_type type);
+static inline void
+intel_wait_for_vblank(struct drm_device *dev, int pipe)
+{
+	drm_wait_one_vblank(dev, pipe);
+}
+int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
+void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
+			 struct intel_digital_port *dport,
+			 unsigned int expected_mask);
+bool intel_get_load_detect_pipe(struct drm_connector *connector,
+				struct drm_display_mode *mode,
+				struct intel_load_detect_pipe *old,
+				struct drm_modeset_acquire_ctx *ctx);
+void intel_release_load_detect_pipe(struct drm_connector *connector,
+				    struct intel_load_detect_pipe *old,
+				    struct drm_modeset_acquire_ctx *ctx);
+int intel_pin_and_fence_fb_obj(struct drm_plane *plane,
+			       struct drm_framebuffer *fb,
+			       const struct drm_plane_state *plane_state,
+			       struct intel_engine_cs *pipelined,
+			       struct drm_i915_gem_request **pipelined_request);
+struct drm_framebuffer *
+__intel_framebuffer_create(struct drm_device *dev,
+			   struct drm_mode_fb_cmd2 *mode_cmd,
+			   struct drm_i915_gem_object *obj);
+void intel_prepare_page_flip(struct drm_device *dev, int plane);
+void intel_finish_page_flip(struct drm_device *dev, int pipe);
+void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
+void intel_check_page_flip(struct drm_device *dev, int pipe);
+int intel_prepare_plane_fb(struct drm_plane *plane,
+			   const struct drm_plane_state *new_state);
+void intel_cleanup_plane_fb(struct drm_plane *plane,
+			    const struct drm_plane_state *old_state);
+int intel_plane_atomic_get_property(struct drm_plane *plane,
+				    const struct drm_plane_state *state,
+				    struct drm_property *property,
+				    uint64_t *val);
+int intel_plane_atomic_set_property(struct drm_plane *plane,
+				    struct drm_plane_state *state,
+				    struct drm_property *property,
+				    uint64_t val);
+int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
+				    struct drm_plane_state *plane_state);
+
+unsigned int
+intel_tile_height(struct drm_device *dev, uint32_t pixel_format,
+		  uint64_t fb_format_modifier, unsigned int plane);
+
+static inline bool
+intel_rotation_90_or_270(unsigned int rotation)
+{
+	return rotation & (BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270));
+}
+
+void intel_create_rotation_property(struct drm_device *dev,
+					struct intel_plane *plane);
+
+/* shared dpll functions */
+struct intel_shared_dpll *intel_crtc_to_shared_dpll(struct intel_crtc *crtc);
+void assert_shared_dpll(struct drm_i915_private *dev_priv,
+			struct intel_shared_dpll *pll,
+			bool state);
+#define assert_shared_dpll_enabled(d, p) assert_shared_dpll(d, p, true)
+#define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false)
+struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
+						struct intel_crtc_state *state);
+
+void vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
+		      const struct dpll *dpll);
+void vlv_force_pll_off(struct drm_device *dev, enum pipe pipe);
+
+/* modesetting asserts */
+void assert_panel_unlocked(struct drm_i915_private *dev_priv,
+			   enum pipe pipe);
+void assert_pll(struct drm_i915_private *dev_priv,
+		enum pipe pipe, bool state);
+#define assert_pll_enabled(d, p) assert_pll(d, p, true)
+#define assert_pll_disabled(d, p) assert_pll(d, p, false)
+void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
+		       enum pipe pipe, bool state);
+#define assert_fdi_rx_pll_enabled(d, p) assert_fdi_rx_pll(d, p, true)
+#define assert_fdi_rx_pll_disabled(d, p) assert_fdi_rx_pll(d, p, false)
+void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, bool state);
+#define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
+#define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
+unsigned long intel_gen4_compute_page_offset(struct drm_i915_private *dev_priv,
+					     int *x, int *y,
+					     unsigned int tiling_mode,
+					     unsigned int bpp,
+					     unsigned int pitch);
+void intel_prepare_reset(struct drm_device *dev);
+void intel_finish_reset(struct drm_device *dev);
+void hsw_enable_pc8(struct drm_i915_private *dev_priv);
+void hsw_disable_pc8(struct drm_i915_private *dev_priv);
+void broxton_init_cdclk(struct drm_device *dev);
+void broxton_uninit_cdclk(struct drm_device *dev);
+void broxton_ddi_phy_init(struct drm_device *dev);
+void broxton_ddi_phy_uninit(struct drm_device *dev);
+void bxt_enable_dc9(struct drm_i915_private *dev_priv);
+void bxt_disable_dc9(struct drm_i915_private *dev_priv);
+void skl_init_cdclk(struct drm_i915_private *dev_priv);
+void skl_uninit_cdclk(struct drm_i915_private *dev_priv);
+void intel_dp_get_m_n(struct intel_crtc *crtc,
+		      struct intel_crtc_state *pipe_config);
+void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n);
+int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n);
+void
+ironlake_check_encoder_dotclock(const struct intel_crtc_state *pipe_config,
+				int dotclock);
+bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
+			intel_clock_t *best_clock);
+int chv_calc_dpll_params(int refclk, intel_clock_t *pll_clock);
+
+bool intel_crtc_active(struct drm_crtc *crtc);
+void hsw_enable_ips(struct intel_crtc *crtc);
+void hsw_disable_ips(struct intel_crtc *crtc);
+enum intel_display_power_domain
+intel_display_port_power_domain(struct intel_encoder *intel_encoder);
+enum intel_display_power_domain
+intel_display_port_aux_power_domain(struct intel_encoder *intel_encoder);
+void intel_mode_from_pipe_config(struct drm_display_mode *mode,
+				 struct intel_crtc_state *pipe_config);
+void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc);
+void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file);
+
+int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
+int skl_max_scale(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state);
+
+u32 intel_plane_obj_offset(struct intel_plane *intel_plane,
+			   struct drm_i915_gem_object *obj,
+			   unsigned int plane);
+
+u32 skl_plane_ctl_format(uint32_t pixel_format);
+u32 skl_plane_ctl_tiling(uint64_t fb_modifier);
+u32 skl_plane_ctl_rotation(unsigned int rotation);
+
+/* intel_csr.c */
+void intel_csr_ucode_init(struct drm_device *dev);
+enum csr_state intel_csr_load_status_get(struct drm_i915_private *dev_priv);
+void intel_csr_load_status_set(struct drm_i915_private *dev_priv,
+					enum csr_state state);
+void intel_csr_load_program(struct drm_device *dev);
+void intel_csr_ucode_fini(struct drm_device *dev);
+void assert_csr_loaded(struct drm_i915_private *dev_priv);
+
+/* intel_dp.c */
+bool intel_dp_init(struct drm_device *dev, int output_reg, enum port port);
+bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
+			     struct intel_connector *intel_connector);
+void intel_dp_set_link_params(struct intel_dp *intel_dp,
+			      const struct intel_crtc_state *pipe_config);
+void intel_dp_start_link_train(struct intel_dp *intel_dp);
+void intel_dp_stop_link_train(struct intel_dp *intel_dp);
+void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
+void intel_dp_encoder_reset(struct drm_encoder *encoder);
+void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder);
+void intel_dp_encoder_destroy(struct drm_encoder *encoder);
+int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc);
+bool intel_dp_compute_config(struct intel_encoder *encoder,
+			     struct intel_crtc_state *pipe_config);
+bool intel_dp_is_edp(struct drm_device *dev, enum port port);
+enum irqreturn intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port,
+				  bool long_hpd);
+void intel_edp_backlight_on(struct intel_dp *intel_dp);
+void intel_edp_backlight_off(struct intel_dp *intel_dp);
+void intel_edp_panel_vdd_on(struct intel_dp *intel_dp);
+void intel_edp_panel_on(struct intel_dp *intel_dp);
+void intel_edp_panel_off(struct intel_dp *intel_dp);
+void intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector);
+void intel_dp_mst_suspend(struct drm_device *dev);
+void intel_dp_mst_resume(struct drm_device *dev);
+int intel_dp_max_link_rate(struct intel_dp *intel_dp);
+int intel_dp_rate_select(struct intel_dp *intel_dp, int rate);
+void intel_dp_hot_plug(struct intel_encoder *intel_encoder);
+void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv);
+uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes);
+void intel_plane_destroy(struct drm_plane *plane);
+void intel_edp_drrs_enable(struct intel_dp *intel_dp);
+void intel_edp_drrs_disable(struct intel_dp *intel_dp);
+void intel_edp_drrs_invalidate(struct drm_device *dev,
+		unsigned frontbuffer_bits);
+void intel_edp_drrs_flush(struct drm_device *dev, unsigned frontbuffer_bits);
+void hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config);
+
+/* intel_dp_mst.c */
+int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id);
+void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port);
+/* intel_dsi.c */
+void intel_dsi_init(struct drm_device *dev);
+
+
+/* intel_dvo.c */
+void intel_dvo_init(struct drm_device *dev);
+
+
+/* legacy fbdev emulation in intel_fbdev.c */
+#ifdef CONFIG_DRM_FBDEV_EMULATION
+extern int intel_fbdev_init(struct drm_device *dev);
+extern void intel_fbdev_initial_config(void *data, async_cookie_t cookie);
+extern void intel_fbdev_fini(struct drm_device *dev);
+extern void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous);
+extern void intel_fbdev_output_poll_changed(struct drm_device *dev);
+extern void intel_fbdev_restore_mode(struct drm_device *dev);
+#else
+static inline int intel_fbdev_init(struct drm_device *dev)
+{
+	return 0;
+}
+
+static inline void intel_fbdev_initial_config(void *data, async_cookie_t cookie)
+{
+}
+
+static inline void intel_fbdev_fini(struct drm_device *dev)
+{
+}
+
+static inline void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous)
+{
+}
+
+static inline void intel_fbdev_restore_mode(struct drm_device *dev)
+{
+}
+#endif
+
+/* intel_fbc.c */
+bool intel_fbc_enabled(struct drm_i915_private *dev_priv);
+void intel_fbc_update(struct drm_i915_private *dev_priv);
+void intel_fbc_init(struct drm_i915_private *dev_priv);
+void intel_fbc_disable(struct drm_i915_private *dev_priv);
+void intel_fbc_disable_crtc(struct intel_crtc *crtc);
+void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
+			  unsigned int frontbuffer_bits,
+			  enum fb_op_origin origin);
+void intel_fbc_flush(struct drm_i915_private *dev_priv,
+		     unsigned int frontbuffer_bits, enum fb_op_origin origin);
+const char *intel_no_fbc_reason_str(enum no_fbc_reason reason);
+void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv);
+
+/* intel_hdmi.c */
+void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port);
+void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
+			       struct intel_connector *intel_connector);
+struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
+bool intel_hdmi_compute_config(struct intel_encoder *encoder,
+			       struct intel_crtc_state *pipe_config);
+
+
+/* intel_lvds.c */
+void intel_lvds_init(struct drm_device *dev);
+bool intel_is_dual_link_lvds(struct drm_device *dev);
+
+
+/* intel_modes.c */
+int intel_connector_update_modes(struct drm_connector *connector,
+				 struct edid *edid);
+int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
+void intel_attach_force_audio_property(struct drm_connector *connector);
+void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
+void intel_attach_aspect_ratio_property(struct drm_connector *connector);
+
+
+/* intel_overlay.c */
+void intel_setup_overlay(struct drm_device *dev);
+void intel_cleanup_overlay(struct drm_device *dev);
+int intel_overlay_switch_off(struct intel_overlay *overlay);
+int intel_overlay_put_image(struct drm_device *dev, void *data,
+			    struct drm_file *file_priv);
+int intel_overlay_attrs(struct drm_device *dev, void *data,
+			struct drm_file *file_priv);
+void intel_overlay_reset(struct drm_i915_private *dev_priv);
+
+
+/* intel_panel.c */
+int intel_panel_init(struct intel_panel *panel,
+		     struct drm_display_mode *fixed_mode,
+		     struct drm_display_mode *downclock_mode);
+void intel_panel_fini(struct intel_panel *panel);
+void intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
+			    struct drm_display_mode *adjusted_mode);
+void intel_pch_panel_fitting(struct intel_crtc *crtc,
+			     struct intel_crtc_state *pipe_config,
+			     int fitting_mode);
+void intel_gmch_panel_fitting(struct intel_crtc *crtc,
+			      struct intel_crtc_state *pipe_config,
+			      int fitting_mode);
+void intel_panel_set_backlight_acpi(struct intel_connector *connector,
+				    u32 level, u32 max);
+int intel_panel_setup_backlight(struct drm_connector *connector, enum pipe pipe);
+void intel_panel_enable_backlight(struct intel_connector *connector);
+void intel_panel_disable_backlight(struct intel_connector *connector);
+void intel_panel_destroy_backlight(struct drm_connector *connector);
+enum drm_connector_status intel_panel_detect(struct drm_device *dev);
+extern struct drm_display_mode *intel_find_panel_downclock(
+				struct drm_device *dev,
+				struct drm_display_mode *fixed_mode,
+				struct drm_connector *connector);
+void intel_backlight_register(struct drm_device *dev);
+void intel_backlight_unregister(struct drm_device *dev);
+
+
+/* intel_psr.c */
+void intel_psr_enable(struct intel_dp *intel_dp);
+void intel_psr_disable(struct intel_dp *intel_dp);
+void intel_psr_invalidate(struct drm_device *dev,
+			  unsigned frontbuffer_bits);
+void intel_psr_flush(struct drm_device *dev,
+		     unsigned frontbuffer_bits,
+		     enum fb_op_origin origin);
+void intel_psr_init(struct drm_device *dev);
+void intel_psr_single_frame_update(struct drm_device *dev,
+				   unsigned frontbuffer_bits);
+
+/* intel_runtime_pm.c */
+int intel_power_domains_init(struct drm_i915_private *);
+void intel_power_domains_fini(struct drm_i915_private *);
+void intel_power_domains_init_hw(struct drm_i915_private *dev_priv);
+void intel_runtime_pm_enable(struct drm_i915_private *dev_priv);
+
+bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
+				    enum intel_display_power_domain domain);
+bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
+				      enum intel_display_power_domain domain);
+void intel_display_power_get(struct drm_i915_private *dev_priv,
+			     enum intel_display_power_domain domain);
+void intel_display_power_put(struct drm_i915_private *dev_priv,
+			     enum intel_display_power_domain domain);
+void intel_runtime_pm_get(struct drm_i915_private *dev_priv);
+void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv);
+void intel_runtime_pm_put(struct drm_i915_private *dev_priv);
+
+void intel_display_set_init_power(struct drm_i915_private *dev, bool enable);
+
+void chv_phy_powergate_lanes(struct intel_encoder *encoder,
+			     bool override, unsigned int mask);
+bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
+			  enum dpio_channel ch, bool override);
+
+
+/* intel_pm.c */
+void intel_init_clock_gating(struct drm_device *dev);
+void intel_suspend_hw(struct drm_device *dev);
+int ilk_wm_max_level(const struct drm_device *dev);
+void intel_update_watermarks(struct drm_crtc *crtc);
+void intel_update_sprite_watermarks(struct drm_plane *plane,
+				    struct drm_crtc *crtc,
+				    uint32_t sprite_width,
+				    uint32_t sprite_height,
+				    int pixel_size,
+				    bool enabled, bool scaled);
+void intel_init_pm(struct drm_device *dev);
+void intel_pm_setup(struct drm_device *dev);
+void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
+void intel_gpu_ips_teardown(void);
+void intel_init_gt_powersave(struct drm_device *dev);
+void intel_cleanup_gt_powersave(struct drm_device *dev);
+void intel_enable_gt_powersave(struct drm_device *dev);
+void intel_disable_gt_powersave(struct drm_device *dev);
+void intel_suspend_gt_powersave(struct drm_device *dev);
+void intel_reset_gt_powersave(struct drm_device *dev);
+void gen6_update_ring_freq(struct drm_device *dev);
+void gen6_rps_busy(struct drm_i915_private *dev_priv);
+void gen6_rps_reset_ei(struct drm_i915_private *dev_priv);
+void gen6_rps_idle(struct drm_i915_private *dev_priv);
+void gen6_rps_boost(struct drm_i915_private *dev_priv,
+		    struct intel_rps_client *rps,
+		    unsigned long submitted);
+void intel_queue_rps_boost_for_request(struct drm_device *dev,
+				       struct drm_i915_gem_request *req);
+void vlv_wm_get_hw_state(struct drm_device *dev);
+void ilk_wm_get_hw_state(struct drm_device *dev);
+void skl_wm_get_hw_state(struct drm_device *dev);
+void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
+			  struct skl_ddb_allocation *ddb /* out */);
+uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config);
+
+/* intel_sdvo.c */
+bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob);
+
+
+/* intel_sprite.c */
+int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane);
+int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
+			      struct drm_file *file_priv);
+void intel_pipe_update_start(struct intel_crtc *crtc);
+void intel_pipe_update_end(struct intel_crtc *crtc);
+
+/* intel_tv.c */
+void intel_tv_init(struct drm_device *dev);
+
+/* intel_atomic.c */
+int intel_connector_atomic_get_property(struct drm_connector *connector,
+					const struct drm_connector_state *state,
+					struct drm_property *property,
+					uint64_t *val);
+struct drm_crtc_state *intel_crtc_duplicate_state(struct drm_crtc *crtc);
+void intel_crtc_destroy_state(struct drm_crtc *crtc,
+			       struct drm_crtc_state *state);
+struct drm_atomic_state *intel_atomic_state_alloc(struct drm_device *dev);
+void intel_atomic_state_clear(struct drm_atomic_state *);
+struct intel_shared_dpll_config *
+intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s);
+
+static inline struct intel_crtc_state *
+intel_atomic_get_crtc_state(struct drm_atomic_state *state,
+			    struct intel_crtc *crtc)
+{
+	struct drm_crtc_state *crtc_state;
+	crtc_state = drm_atomic_get_crtc_state(state, &crtc->base);
+	if (IS_ERR(crtc_state))
+		return ERR_CAST(crtc_state);
+
+	return to_intel_crtc_state(crtc_state);
+}
+int intel_atomic_setup_scalers(struct drm_device *dev,
+	struct intel_crtc *intel_crtc,
+	struct intel_crtc_state *crtc_state);
+
+/* intel_atomic_plane.c */
+struct intel_plane_state *intel_create_plane_state(struct drm_plane *plane);
+struct drm_plane_state *intel_plane_duplicate_state(struct drm_plane *plane);
+void intel_plane_destroy_state(struct drm_plane *plane,
+			       struct drm_plane_state *state);
+extern const struct drm_plane_helper_funcs intel_plane_helper_funcs;
+
+#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
new file mode 100644
index 0000000..170ae6f
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -0,0 +1,1259 @@
+/*
+ * Copyright © 2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Jani Nikula <jani.nikula@intel.com>
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
+#include <drm/i915_drm.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_mipi_dsi.h>
+#include <linux/slab.h>
+#include <linux/gpio/consumer.h>
+#include "i915_drv.h"
+#include "intel_drv.h"
+#include "intel_dsi.h"
+
+static const struct {
+	u16 panel_id;
+	struct drm_panel * (*init)(struct intel_dsi *intel_dsi, u16 panel_id);
+} intel_dsi_drivers[] = {
+	{
+		.panel_id = MIPI_DSI_GENERIC_PANEL_ID,
+		.init = vbt_panel_init,
+	},
+};
+
+static void wait_for_dsi_fifo_empty(struct intel_dsi *intel_dsi, enum port port)
+{
+	struct drm_encoder *encoder = &intel_dsi->base.base;
+	struct drm_device *dev = encoder->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 mask;
+
+	mask = LP_CTRL_FIFO_EMPTY | HS_CTRL_FIFO_EMPTY |
+		LP_DATA_FIFO_EMPTY | HS_DATA_FIFO_EMPTY;
+
+	if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(port)) & mask) == mask, 100))
+		DRM_ERROR("DPI FIFOs are not empty\n");
+}
+
+static void write_data(struct drm_i915_private *dev_priv, u32 reg,
+		       const u8 *data, u32 len)
+{
+	u32 i, j;
+
+	for (i = 0; i < len; i += 4) {
+		u32 val = 0;
+
+		for (j = 0; j < min_t(u32, len - i, 4); j++)
+			val |= *data++ << 8 * j;
+
+		I915_WRITE(reg, val);
+	}
+}
+
+static void read_data(struct drm_i915_private *dev_priv, u32 reg,
+		      u8 *data, u32 len)
+{
+	u32 i, j;
+
+	for (i = 0; i < len; i += 4) {
+		u32 val = I915_READ(reg);
+
+		for (j = 0; j < min_t(u32, len - i, 4); j++)
+			*data++ = val >> 8 * j;
+	}
+}
+
+static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host,
+				       const struct mipi_dsi_msg *msg)
+{
+	struct intel_dsi_host *intel_dsi_host = to_intel_dsi_host(host);
+	struct drm_device *dev = intel_dsi_host->intel_dsi->base.base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum port port = intel_dsi_host->port;
+	struct mipi_dsi_packet packet;
+	ssize_t ret;
+	const u8 *header, *data;
+	u32 data_reg, data_mask, ctrl_reg, ctrl_mask;
+
+	ret = mipi_dsi_create_packet(&packet, msg);
+	if (ret < 0)
+		return ret;
+
+	header = packet.header;
+	data = packet.payload;
+
+	if (msg->flags & MIPI_DSI_MSG_USE_LPM) {
+		data_reg = MIPI_LP_GEN_DATA(port);
+		data_mask = LP_DATA_FIFO_FULL;
+		ctrl_reg = MIPI_LP_GEN_CTRL(port);
+		ctrl_mask = LP_CTRL_FIFO_FULL;
+	} else {
+		data_reg = MIPI_HS_GEN_DATA(port);
+		data_mask = HS_DATA_FIFO_FULL;
+		ctrl_reg = MIPI_HS_GEN_CTRL(port);
+		ctrl_mask = HS_CTRL_FIFO_FULL;
+	}
+
+	/* note: this is never true for reads */
+	if (packet.payload_length) {
+
+		if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(port)) & data_mask) == 0, 50))
+			DRM_ERROR("Timeout waiting for HS/LP DATA FIFO !full\n");
+
+		write_data(dev_priv, data_reg, packet.payload,
+			   packet.payload_length);
+	}
+
+	if (msg->rx_len) {
+		I915_WRITE(MIPI_INTR_STAT(port), GEN_READ_DATA_AVAIL);
+	}
+
+	if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(port)) & ctrl_mask) == 0, 50)) {
+		DRM_ERROR("Timeout waiting for HS/LP CTRL FIFO !full\n");
+	}
+
+	I915_WRITE(ctrl_reg, header[2] << 16 | header[1] << 8 | header[0]);
+
+	/* ->rx_len is set only for reads */
+	if (msg->rx_len) {
+		data_mask = GEN_READ_DATA_AVAIL;
+		if (wait_for((I915_READ(MIPI_INTR_STAT(port)) & data_mask) == data_mask, 50))
+			DRM_ERROR("Timeout waiting for read data.\n");
+
+		read_data(dev_priv, data_reg, msg->rx_buf, msg->rx_len);
+	}
+
+	/* XXX: fix for reads and writes */
+	return 4 + packet.payload_length;
+}
+
+static int intel_dsi_host_attach(struct mipi_dsi_host *host,
+				 struct mipi_dsi_device *dsi)
+{
+	return 0;
+}
+
+static int intel_dsi_host_detach(struct mipi_dsi_host *host,
+				 struct mipi_dsi_device *dsi)
+{
+	return 0;
+}
+
+static const struct mipi_dsi_host_ops intel_dsi_host_ops = {
+	.attach = intel_dsi_host_attach,
+	.detach = intel_dsi_host_detach,
+	.transfer = intel_dsi_host_transfer,
+};
+
+static struct intel_dsi_host *intel_dsi_host_init(struct intel_dsi *intel_dsi,
+						  enum port port)
+{
+	struct intel_dsi_host *host;
+	struct mipi_dsi_device *device;
+
+	host = kzalloc(sizeof(*host), GFP_KERNEL);
+	if (!host)
+		return NULL;
+
+	host->base.ops = &intel_dsi_host_ops;
+	host->intel_dsi = intel_dsi;
+	host->port = port;
+
+	/*
+	 * We should call mipi_dsi_host_register(&host->base) here, but we don't
+	 * have a host->dev, and we don't have OF stuff either. So just use the
+	 * dsi framework as a library and hope for the best. Create the dsi
+	 * devices by ourselves here too. Need to be careful though, because we
+	 * don't initialize any of the driver model devices here.
+	 */
+	device = kzalloc(sizeof(*device), GFP_KERNEL);
+	if (!device) {
+		kfree(host);
+		return NULL;
+	}
+
+	device->host = &host->base;
+	host->device = device;
+
+	return host;
+}
+
+/*
+ * send a video mode command
+ *
+ * XXX: commands with data in MIPI_DPI_DATA?
+ */
+static int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs,
+			enum port port)
+{
+	struct drm_encoder *encoder = &intel_dsi->base.base;
+	struct drm_device *dev = encoder->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 mask;
+
+	/* XXX: pipe, hs */
+	if (hs)
+		cmd &= ~DPI_LP_MODE;
+	else
+		cmd |= DPI_LP_MODE;
+
+	/* clear bit */
+	I915_WRITE(MIPI_INTR_STAT(port), SPL_PKT_SENT_INTERRUPT);
+
+	/* XXX: old code skips write if control unchanged */
+	if (cmd == I915_READ(MIPI_DPI_CONTROL(port)))
+		DRM_ERROR("Same special packet %02x twice in a row.\n", cmd);
+
+	I915_WRITE(MIPI_DPI_CONTROL(port), cmd);
+
+	mask = SPL_PKT_SENT_INTERRUPT;
+	if (wait_for((I915_READ(MIPI_INTR_STAT(port)) & mask) == mask, 100))
+		DRM_ERROR("Video mode command 0x%08x send failed.\n", cmd);
+
+	return 0;
+}
+
+static void band_gap_reset(struct drm_i915_private *dev_priv)
+{
+	mutex_lock(&dev_priv->sb_lock);
+
+	vlv_flisdsi_write(dev_priv, 0x08, 0x0001);
+	vlv_flisdsi_write(dev_priv, 0x0F, 0x0005);
+	vlv_flisdsi_write(dev_priv, 0x0F, 0x0025);
+	udelay(150);
+	vlv_flisdsi_write(dev_priv, 0x0F, 0x0000);
+	vlv_flisdsi_write(dev_priv, 0x08, 0x0000);
+
+	mutex_unlock(&dev_priv->sb_lock);
+}
+
+static inline bool is_vid_mode(struct intel_dsi *intel_dsi)
+{
+	return intel_dsi->operation_mode == INTEL_DSI_VIDEO_MODE;
+}
+
+static inline bool is_cmd_mode(struct intel_dsi *intel_dsi)
+{
+	return intel_dsi->operation_mode == INTEL_DSI_COMMAND_MODE;
+}
+
+static bool intel_dsi_compute_config(struct intel_encoder *encoder,
+				     struct intel_crtc_state *config)
+{
+	struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi,
+						   base);
+	struct intel_connector *intel_connector = intel_dsi->attached_connector;
+	struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
+	struct drm_display_mode *adjusted_mode = &config->base.adjusted_mode;
+
+	DRM_DEBUG_KMS("\n");
+
+	if (fixed_mode)
+		intel_fixed_panel_mode(fixed_mode, adjusted_mode);
+
+	/* DSI uses short packets for sync events, so clear mode flags for DSI */
+	adjusted_mode->flags = 0;
+
+	return true;
+}
+
+static void bxt_dsi_device_ready(struct intel_encoder *encoder)
+{
+	struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	enum port port;
+	u32 val;
+
+	DRM_DEBUG_KMS("\n");
+
+	/* Exit Low power state in 4 steps*/
+	for_each_dsi_port(port, intel_dsi->ports) {
+
+		/* 1. Enable MIPI PHY transparent latch */
+		val = I915_READ(BXT_MIPI_PORT_CTRL(port));
+		I915_WRITE(BXT_MIPI_PORT_CTRL(port), val | LP_OUTPUT_HOLD);
+		usleep_range(2000, 2500);
+
+		/* 2. Enter ULPS */
+		val = I915_READ(MIPI_DEVICE_READY(port));
+		val &= ~ULPS_STATE_MASK;
+		val |= (ULPS_STATE_ENTER | DEVICE_READY);
+		I915_WRITE(MIPI_DEVICE_READY(port), val);
+		usleep_range(2, 3);
+
+		/* 3. Exit ULPS */
+		val = I915_READ(MIPI_DEVICE_READY(port));
+		val &= ~ULPS_STATE_MASK;
+		val |= (ULPS_STATE_EXIT | DEVICE_READY);
+		I915_WRITE(MIPI_DEVICE_READY(port), val);
+		usleep_range(1000, 1500);
+
+		/* Clear ULPS and set device ready */
+		val = I915_READ(MIPI_DEVICE_READY(port));
+		val &= ~ULPS_STATE_MASK;
+		val |= DEVICE_READY;
+		I915_WRITE(MIPI_DEVICE_READY(port), val);
+	}
+}
+
+static void vlv_dsi_device_ready(struct intel_encoder *encoder)
+{
+	struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	enum port port;
+	u32 val;
+
+	DRM_DEBUG_KMS("\n");
+
+	mutex_lock(&dev_priv->sb_lock);
+	/* program rcomp for compliance, reduce from 50 ohms to 45 ohms
+	 * needed everytime after power gate */
+	vlv_flisdsi_write(dev_priv, 0x04, 0x0004);
+	mutex_unlock(&dev_priv->sb_lock);
+
+	/* bandgap reset is needed after everytime we do power gate */
+	band_gap_reset(dev_priv);
+
+	for_each_dsi_port(port, intel_dsi->ports) {
+
+		I915_WRITE(MIPI_DEVICE_READY(port), ULPS_STATE_ENTER);
+		usleep_range(2500, 3000);
+
+		/* Enable MIPI PHY transparent latch
+		 * Common bit for both MIPI Port A & MIPI Port C
+		 * No similar bit in MIPI Port C reg
+		 */
+		val = I915_READ(MIPI_PORT_CTRL(PORT_A));
+		I915_WRITE(MIPI_PORT_CTRL(PORT_A), val | LP_OUTPUT_HOLD);
+		usleep_range(1000, 1500);
+
+		I915_WRITE(MIPI_DEVICE_READY(port), ULPS_STATE_EXIT);
+		usleep_range(2500, 3000);
+
+		I915_WRITE(MIPI_DEVICE_READY(port), DEVICE_READY);
+		usleep_range(2500, 3000);
+	}
+}
+
+static void intel_dsi_device_ready(struct intel_encoder *encoder)
+{
+	struct drm_device *dev = encoder->base.dev;
+
+	if (IS_VALLEYVIEW(dev))
+		vlv_dsi_device_ready(encoder);
+	else if (IS_BROXTON(dev))
+		bxt_dsi_device_ready(encoder);
+}
+
+static void intel_dsi_port_enable(struct intel_encoder *encoder)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	enum port port;
+	u32 temp;
+	u32 port_ctrl;
+
+	if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) {
+		temp = I915_READ(VLV_CHICKEN_3);
+		temp &= ~PIXEL_OVERLAP_CNT_MASK |
+					intel_dsi->pixel_overlap <<
+					PIXEL_OVERLAP_CNT_SHIFT;
+		I915_WRITE(VLV_CHICKEN_3, temp);
+	}
+
+	for_each_dsi_port(port, intel_dsi->ports) {
+		port_ctrl = IS_BROXTON(dev) ? BXT_MIPI_PORT_CTRL(port) :
+						MIPI_PORT_CTRL(port);
+
+		temp = I915_READ(port_ctrl);
+
+		temp &= ~LANE_CONFIGURATION_MASK;
+		temp &= ~DUAL_LINK_MODE_MASK;
+
+		if (intel_dsi->ports == ((1 << PORT_A) | (1 << PORT_C))) {
+			temp |= (intel_dsi->dual_link - 1)
+						<< DUAL_LINK_MODE_SHIFT;
+			temp |= intel_crtc->pipe ?
+					LANE_CONFIGURATION_DUAL_LINK_B :
+					LANE_CONFIGURATION_DUAL_LINK_A;
+		}
+		/* assert ip_tg_enable signal */
+		I915_WRITE(port_ctrl, temp | DPI_ENABLE);
+		POSTING_READ(port_ctrl);
+	}
+}
+
+static void intel_dsi_port_disable(struct intel_encoder *encoder)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	enum port port;
+	u32 temp;
+	u32 port_ctrl;
+
+	for_each_dsi_port(port, intel_dsi->ports) {
+		/* de-assert ip_tg_enable signal */
+		port_ctrl = IS_BROXTON(dev) ? BXT_MIPI_PORT_CTRL(port) :
+						MIPI_PORT_CTRL(port);
+		temp = I915_READ(port_ctrl);
+		I915_WRITE(port_ctrl, temp & ~DPI_ENABLE);
+		POSTING_READ(port_ctrl);
+	}
+}
+
+static void intel_dsi_enable(struct intel_encoder *encoder)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	enum port port;
+
+	DRM_DEBUG_KMS("\n");
+
+	if (is_cmd_mode(intel_dsi)) {
+		for_each_dsi_port(port, intel_dsi->ports)
+			I915_WRITE(MIPI_MAX_RETURN_PKT_SIZE(port), 8 * 4);
+	} else {
+		msleep(20); /* XXX */
+		for_each_dsi_port(port, intel_dsi->ports)
+			dpi_send_cmd(intel_dsi, TURN_ON, false, port);
+		msleep(100);
+
+		drm_panel_enable(intel_dsi->panel);
+
+		for_each_dsi_port(port, intel_dsi->ports)
+			wait_for_dsi_fifo_empty(intel_dsi, port);
+
+		intel_dsi_port_enable(encoder);
+	}
+
+	intel_panel_enable_backlight(intel_dsi->attached_connector);
+}
+
+static void intel_dsi_pre_enable(struct intel_encoder *encoder)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+	enum pipe pipe = intel_crtc->pipe;
+	enum port port;
+	u32 tmp;
+
+	DRM_DEBUG_KMS("\n");
+
+	/* Panel Enable over CRC PMIC */
+	if (intel_dsi->gpio_panel)
+		gpiod_set_value_cansleep(intel_dsi->gpio_panel, 1);
+
+	msleep(intel_dsi->panel_on_delay);
+
+	if (IS_VALLEYVIEW(dev)) {
+		/*
+		 * Disable DPOunit clock gating, can stall pipe
+		 * and we need DPLL REFA always enabled
+		 */
+		tmp = I915_READ(DPLL(pipe));
+		tmp |= DPLL_REF_CLK_ENABLE_VLV;
+		I915_WRITE(DPLL(pipe), tmp);
+
+		/* update the hw state for DPLL */
+		intel_crtc->config->dpll_hw_state.dpll =
+				DPLL_INTEGRATED_REF_CLK_VLV |
+					DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
+
+		tmp = I915_READ(DSPCLK_GATE_D);
+		tmp |= DPOUNIT_CLOCK_GATE_DISABLE;
+		I915_WRITE(DSPCLK_GATE_D, tmp);
+	}
+
+	/* put device in ready state */
+	intel_dsi_device_ready(encoder);
+
+	drm_panel_prepare(intel_dsi->panel);
+
+	for_each_dsi_port(port, intel_dsi->ports)
+		wait_for_dsi_fifo_empty(intel_dsi, port);
+
+	/* Enable port in pre-enable phase itself because as per hw team
+	 * recommendation, port should be enabled befor plane & pipe */
+	intel_dsi_enable(encoder);
+}
+
+static void intel_dsi_enable_nop(struct intel_encoder *encoder)
+{
+	DRM_DEBUG_KMS("\n");
+
+	/* for DSI port enable has to be done before pipe
+	 * and plane enable, so port enable is done in
+	 * pre_enable phase itself unlike other encoders
+	 */
+}
+
+static void intel_dsi_pre_disable(struct intel_encoder *encoder)
+{
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	enum port port;
+
+	DRM_DEBUG_KMS("\n");
+
+	intel_panel_disable_backlight(intel_dsi->attached_connector);
+
+	if (is_vid_mode(intel_dsi)) {
+		/* Send Shutdown command to the panel in LP mode */
+		for_each_dsi_port(port, intel_dsi->ports)
+			dpi_send_cmd(intel_dsi, SHUTDOWN, false, port);
+		msleep(10);
+	}
+}
+
+static void intel_dsi_disable(struct intel_encoder *encoder)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	enum port port;
+	u32 temp;
+
+	DRM_DEBUG_KMS("\n");
+
+	if (is_vid_mode(intel_dsi)) {
+		for_each_dsi_port(port, intel_dsi->ports)
+			wait_for_dsi_fifo_empty(intel_dsi, port);
+
+		intel_dsi_port_disable(encoder);
+		msleep(2);
+	}
+
+	for_each_dsi_port(port, intel_dsi->ports) {
+		/* Panel commands can be sent when clock is in LP11 */
+		I915_WRITE(MIPI_DEVICE_READY(port), 0x0);
+
+		intel_dsi_reset_clocks(encoder, port);
+		I915_WRITE(MIPI_EOT_DISABLE(port), CLOCKSTOP);
+
+		temp = I915_READ(MIPI_DSI_FUNC_PRG(port));
+		temp &= ~VID_MODE_FORMAT_MASK;
+		I915_WRITE(MIPI_DSI_FUNC_PRG(port), temp);
+
+		I915_WRITE(MIPI_DEVICE_READY(port), 0x1);
+	}
+	/* if disable packets are sent before sending shutdown packet then in
+	 * some next enable sequence send turn on packet error is observed */
+	drm_panel_disable(intel_dsi->panel);
+
+	for_each_dsi_port(port, intel_dsi->ports)
+		wait_for_dsi_fifo_empty(intel_dsi, port);
+}
+
+static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	enum port port;
+	u32 val;
+	u32 port_ctrl = 0;
+
+	DRM_DEBUG_KMS("\n");
+	for_each_dsi_port(port, intel_dsi->ports) {
+
+		I915_WRITE(MIPI_DEVICE_READY(port), DEVICE_READY |
+							ULPS_STATE_ENTER);
+		usleep_range(2000, 2500);
+
+		I915_WRITE(MIPI_DEVICE_READY(port), DEVICE_READY |
+							ULPS_STATE_EXIT);
+		usleep_range(2000, 2500);
+
+		I915_WRITE(MIPI_DEVICE_READY(port), DEVICE_READY |
+							ULPS_STATE_ENTER);
+		usleep_range(2000, 2500);
+
+		if (IS_BROXTON(dev))
+			port_ctrl = BXT_MIPI_PORT_CTRL(port);
+		else if (IS_VALLEYVIEW(dev))
+			/* Common bit for both MIPI Port A & MIPI Port C */
+			port_ctrl = MIPI_PORT_CTRL(PORT_A);
+
+		/* Wait till Clock lanes are in LP-00 state for MIPI Port A
+		 * only. MIPI Port C has no similar bit for checking
+		 */
+		if (wait_for(((I915_READ(port_ctrl) & AFE_LATCHOUT)
+						== 0x00000), 30))
+			DRM_ERROR("DSI LP not going Low\n");
+
+		/* Disable MIPI PHY transparent latch */
+		val = I915_READ(port_ctrl);
+		I915_WRITE(port_ctrl, val & ~LP_OUTPUT_HOLD);
+		usleep_range(1000, 1500);
+
+		I915_WRITE(MIPI_DEVICE_READY(port), 0x00);
+		usleep_range(2000, 2500);
+	}
+
+	intel_disable_dsi_pll(encoder);
+}
+
+static void intel_dsi_post_disable(struct intel_encoder *encoder)
+{
+	struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	u32 val;
+
+	DRM_DEBUG_KMS("\n");
+
+	intel_dsi_disable(encoder);
+
+	intel_dsi_clear_device_ready(encoder);
+
+	val = I915_READ(DSPCLK_GATE_D);
+	val &= ~DPOUNIT_CLOCK_GATE_DISABLE;
+	I915_WRITE(DSPCLK_GATE_D, val);
+
+	drm_panel_unprepare(intel_dsi->panel);
+
+	msleep(intel_dsi->panel_off_delay);
+	msleep(intel_dsi->panel_pwr_cycle_delay);
+
+	/* Panel Disable over CRC PMIC */
+	if (intel_dsi->gpio_panel)
+		gpiod_set_value_cansleep(intel_dsi->gpio_panel, 0);
+}
+
+static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
+				   enum pipe *pipe)
+{
+	struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	struct drm_device *dev = encoder->base.dev;
+	enum intel_display_power_domain power_domain;
+	u32 dpi_enabled, func, ctrl_reg;
+	enum port port;
+
+	DRM_DEBUG_KMS("\n");
+
+	power_domain = intel_display_port_power_domain(encoder);
+	if (!intel_display_power_is_enabled(dev_priv, power_domain))
+		return false;
+
+	/* XXX: this only works for one DSI output */
+	for_each_dsi_port(port, intel_dsi->ports) {
+		func = I915_READ(MIPI_DSI_FUNC_PRG(port));
+		ctrl_reg = IS_BROXTON(dev) ? BXT_MIPI_PORT_CTRL(port) :
+						MIPI_PORT_CTRL(port);
+		dpi_enabled = I915_READ(ctrl_reg) & DPI_ENABLE;
+
+		/* Due to some hardware limitations on BYT, MIPI Port C DPI
+		 * Enable bit does not get set. To check whether DSI Port C
+		 * was enabled in BIOS, check the Pipe B enable bit
+		 */
+		if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) &&
+		    (port == PORT_C))
+			dpi_enabled = I915_READ(PIPECONF(PIPE_B)) &
+							PIPECONF_ENABLE;
+
+		if (dpi_enabled || (func & CMD_MODE_DATA_WIDTH_MASK)) {
+			if (I915_READ(MIPI_DEVICE_READY(port)) & DEVICE_READY) {
+				*pipe = port == PORT_A ? PIPE_A : PIPE_B;
+				return true;
+			}
+		}
+	}
+
+	return false;
+}
+
+static void intel_dsi_get_config(struct intel_encoder *encoder,
+				 struct intel_crtc_state *pipe_config)
+{
+	u32 pclk = 0;
+	DRM_DEBUG_KMS("\n");
+
+	/*
+	 * DPLL_MD is not used in case of DSI, reading will get some default value
+	 * set dpll_md = 0
+	 */
+	pipe_config->dpll_hw_state.dpll_md = 0;
+
+	if (IS_BROXTON(encoder->base.dev))
+		pclk = bxt_get_dsi_pclk(encoder, pipe_config->pipe_bpp);
+	else if (IS_VALLEYVIEW(encoder->base.dev))
+		pclk = vlv_get_dsi_pclk(encoder, pipe_config->pipe_bpp);
+
+	if (!pclk)
+		return;
+
+	pipe_config->base.adjusted_mode.crtc_clock = pclk;
+	pipe_config->port_clock = pclk;
+}
+
+static enum drm_mode_status
+intel_dsi_mode_valid(struct drm_connector *connector,
+		     struct drm_display_mode *mode)
+{
+	struct intel_connector *intel_connector = to_intel_connector(connector);
+	struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
+	int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
+
+	DRM_DEBUG_KMS("\n");
+
+	if (mode->flags & DRM_MODE_FLAG_DBLSCAN) {
+		DRM_DEBUG_KMS("MODE_NO_DBLESCAN\n");
+		return MODE_NO_DBLESCAN;
+	}
+
+	if (fixed_mode) {
+		if (mode->hdisplay > fixed_mode->hdisplay)
+			return MODE_PANEL;
+		if (mode->vdisplay > fixed_mode->vdisplay)
+			return MODE_PANEL;
+		if (fixed_mode->clock > max_dotclk)
+			return MODE_CLOCK_HIGH;
+	}
+
+	return MODE_OK;
+}
+
+/* return txclkesc cycles in terms of divider and duration in us */
+static u16 txclkesc(u32 divider, unsigned int us)
+{
+	switch (divider) {
+	case ESCAPE_CLOCK_DIVIDER_1:
+	default:
+		return 20 * us;
+	case ESCAPE_CLOCK_DIVIDER_2:
+		return 10 * us;
+	case ESCAPE_CLOCK_DIVIDER_4:
+		return 5 * us;
+	}
+}
+
+/* return pixels in terms of txbyteclkhs */
+static u16 txbyteclkhs(u16 pixels, int bpp, int lane_count,
+		       u16 burst_mode_ratio)
+{
+	return DIV_ROUND_UP(DIV_ROUND_UP(pixels * bpp * burst_mode_ratio,
+					 8 * 100), lane_count);
+}
+
+static void set_dsi_timings(struct drm_encoder *encoder,
+			    const struct drm_display_mode *adjusted_mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+	enum port port;
+	unsigned int bpp = intel_crtc->config->pipe_bpp;
+	unsigned int lane_count = intel_dsi->lane_count;
+
+	u16 hactive, hfp, hsync, hbp, vfp, vsync, vbp;
+
+	hactive = adjusted_mode->crtc_hdisplay;
+	hfp = adjusted_mode->crtc_hsync_start - adjusted_mode->crtc_hdisplay;
+	hsync = adjusted_mode->crtc_hsync_end - adjusted_mode->crtc_hsync_start;
+	hbp = adjusted_mode->crtc_htotal - adjusted_mode->crtc_hsync_end;
+
+	if (intel_dsi->dual_link) {
+		hactive /= 2;
+		if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK)
+			hactive += intel_dsi->pixel_overlap;
+		hfp /= 2;
+		hsync /= 2;
+		hbp /= 2;
+	}
+
+	vfp = adjusted_mode->crtc_vsync_start - adjusted_mode->crtc_vdisplay;
+	vsync = adjusted_mode->crtc_vsync_end - adjusted_mode->crtc_vsync_start;
+	vbp = adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vsync_end;
+
+	/* horizontal values are in terms of high speed byte clock */
+	hactive = txbyteclkhs(hactive, bpp, lane_count,
+			      intel_dsi->burst_mode_ratio);
+	hfp = txbyteclkhs(hfp, bpp, lane_count, intel_dsi->burst_mode_ratio);
+	hsync = txbyteclkhs(hsync, bpp, lane_count,
+			    intel_dsi->burst_mode_ratio);
+	hbp = txbyteclkhs(hbp, bpp, lane_count, intel_dsi->burst_mode_ratio);
+
+	for_each_dsi_port(port, intel_dsi->ports) {
+		if (IS_BROXTON(dev)) {
+			/*
+			 * Program hdisplay and vdisplay on MIPI transcoder.
+			 * This is different from calculated hactive and
+			 * vactive, as they are calculated per channel basis,
+			 * whereas these values should be based on resolution.
+			 */
+			I915_WRITE(BXT_MIPI_TRANS_HACTIVE(port),
+				   adjusted_mode->crtc_hdisplay);
+			I915_WRITE(BXT_MIPI_TRANS_VACTIVE(port),
+				   adjusted_mode->crtc_vdisplay);
+			I915_WRITE(BXT_MIPI_TRANS_VTOTAL(port),
+				   adjusted_mode->crtc_vtotal);
+		}
+
+		I915_WRITE(MIPI_HACTIVE_AREA_COUNT(port), hactive);
+		I915_WRITE(MIPI_HFP_COUNT(port), hfp);
+
+		/* meaningful for video mode non-burst sync pulse mode only,
+		 * can be zero for non-burst sync events and burst modes */
+		I915_WRITE(MIPI_HSYNC_PADDING_COUNT(port), hsync);
+		I915_WRITE(MIPI_HBP_COUNT(port), hbp);
+
+		/* vertical values are in terms of lines */
+		I915_WRITE(MIPI_VFP_COUNT(port), vfp);
+		I915_WRITE(MIPI_VSYNC_PADDING_COUNT(port), vsync);
+		I915_WRITE(MIPI_VBP_COUNT(port), vbp);
+	}
+}
+
+static void intel_dsi_prepare(struct intel_encoder *intel_encoder)
+{
+	struct drm_encoder *encoder = &intel_encoder->base;
+	struct drm_device *dev = encoder->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+	const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
+	enum port port;
+	unsigned int bpp = intel_crtc->config->pipe_bpp;
+	u32 val, tmp;
+	u16 mode_hdisplay;
+
+	DRM_DEBUG_KMS("pipe %c\n", pipe_name(intel_crtc->pipe));
+
+	mode_hdisplay = adjusted_mode->crtc_hdisplay;
+
+	if (intel_dsi->dual_link) {
+		mode_hdisplay /= 2;
+		if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK)
+			mode_hdisplay += intel_dsi->pixel_overlap;
+	}
+
+	for_each_dsi_port(port, intel_dsi->ports) {
+		if (IS_VALLEYVIEW(dev)) {
+			/*
+			 * escape clock divider, 20MHz, shared for A and C.
+			 * device ready must be off when doing this! txclkesc?
+			 */
+			tmp = I915_READ(MIPI_CTRL(PORT_A));
+			tmp &= ~ESCAPE_CLOCK_DIVIDER_MASK;
+			I915_WRITE(MIPI_CTRL(PORT_A), tmp |
+					ESCAPE_CLOCK_DIVIDER_1);
+
+			/* read request priority is per pipe */
+			tmp = I915_READ(MIPI_CTRL(port));
+			tmp &= ~READ_REQUEST_PRIORITY_MASK;
+			I915_WRITE(MIPI_CTRL(port), tmp |
+					READ_REQUEST_PRIORITY_HIGH);
+		} else if (IS_BROXTON(dev)) {
+			/*
+			 * FIXME:
+			 * BXT can connect any PIPE to any MIPI port.
+			 * Select the pipe based on the MIPI port read from
+			 * VBT for now. Pick PIPE A for MIPI port A and C
+			 * for port C.
+			 */
+			tmp = I915_READ(MIPI_CTRL(port));
+			tmp &= ~BXT_PIPE_SELECT_MASK;
+
+			if (port == PORT_A)
+				tmp |= BXT_PIPE_SELECT_A;
+			else if (port == PORT_C)
+				tmp |= BXT_PIPE_SELECT_C;
+
+			I915_WRITE(MIPI_CTRL(port), tmp);
+		}
+
+		/* XXX: why here, why like this? handling in irq handler?! */
+		I915_WRITE(MIPI_INTR_STAT(port), 0xffffffff);
+		I915_WRITE(MIPI_INTR_EN(port), 0xffffffff);
+
+		I915_WRITE(MIPI_DPHY_PARAM(port), intel_dsi->dphy_reg);
+
+		I915_WRITE(MIPI_DPI_RESOLUTION(port),
+			adjusted_mode->crtc_vdisplay << VERTICAL_ADDRESS_SHIFT |
+			mode_hdisplay << HORIZONTAL_ADDRESS_SHIFT);
+	}
+
+	set_dsi_timings(encoder, adjusted_mode);
+
+	val = intel_dsi->lane_count << DATA_LANES_PRG_REG_SHIFT;
+	if (is_cmd_mode(intel_dsi)) {
+		val |= intel_dsi->channel << CMD_MODE_CHANNEL_NUMBER_SHIFT;
+		val |= CMD_MODE_DATA_WIDTH_8_BIT; /* XXX */
+	} else {
+		val |= intel_dsi->channel << VID_MODE_CHANNEL_NUMBER_SHIFT;
+
+		/* XXX: cross-check bpp vs. pixel format? */
+		val |= intel_dsi->pixel_format;
+	}
+
+	tmp = 0;
+	if (intel_dsi->eotp_pkt == 0)
+		tmp |= EOT_DISABLE;
+	if (intel_dsi->clock_stop)
+		tmp |= CLOCKSTOP;
+
+	for_each_dsi_port(port, intel_dsi->ports) {
+		I915_WRITE(MIPI_DSI_FUNC_PRG(port), val);
+
+		/* timeouts for recovery. one frame IIUC. if counter expires,
+		 * EOT and stop state. */
+
+		/*
+		 * In burst mode, value greater than one DPI line Time in byte
+		 * clock (txbyteclkhs) To timeout this timer 1+ of the above
+		 * said value is recommended.
+		 *
+		 * In non-burst mode, Value greater than one DPI frame time in
+		 * byte clock(txbyteclkhs) To timeout this timer 1+ of the above
+		 * said value is recommended.
+		 *
+		 * In DBI only mode, value greater than one DBI frame time in
+		 * byte clock(txbyteclkhs) To timeout this timer 1+ of the above
+		 * said value is recommended.
+		 */
+
+		if (is_vid_mode(intel_dsi) &&
+			intel_dsi->video_mode_format == VIDEO_MODE_BURST) {
+			I915_WRITE(MIPI_HS_TX_TIMEOUT(port),
+				txbyteclkhs(adjusted_mode->crtc_htotal, bpp,
+					    intel_dsi->lane_count,
+					    intel_dsi->burst_mode_ratio) + 1);
+		} else {
+			I915_WRITE(MIPI_HS_TX_TIMEOUT(port),
+				txbyteclkhs(adjusted_mode->crtc_vtotal *
+					    adjusted_mode->crtc_htotal,
+					    bpp, intel_dsi->lane_count,
+					    intel_dsi->burst_mode_ratio) + 1);
+		}
+		I915_WRITE(MIPI_LP_RX_TIMEOUT(port), intel_dsi->lp_rx_timeout);
+		I915_WRITE(MIPI_TURN_AROUND_TIMEOUT(port),
+						intel_dsi->turn_arnd_val);
+		I915_WRITE(MIPI_DEVICE_RESET_TIMER(port),
+						intel_dsi->rst_timer_val);
+
+		/* dphy stuff */
+
+		/* in terms of low power clock */
+		I915_WRITE(MIPI_INIT_COUNT(port),
+				txclkesc(intel_dsi->escape_clk_div, 100));
+
+		if (IS_BROXTON(dev) && (!intel_dsi->dual_link)) {
+			/*
+			 * BXT spec says write MIPI_INIT_COUNT for
+			 * both the ports, even if only one is
+			 * getting used. So write the other port
+			 * if not in dual link mode.
+			 */
+			I915_WRITE(MIPI_INIT_COUNT(port ==
+						PORT_A ? PORT_C : PORT_A),
+					intel_dsi->init_count);
+		}
+
+		/* recovery disables */
+		I915_WRITE(MIPI_EOT_DISABLE(port), tmp);
+
+		/* in terms of low power clock */
+		I915_WRITE(MIPI_INIT_COUNT(port), intel_dsi->init_count);
+
+		/* in terms of txbyteclkhs. actual high to low switch +
+		 * MIPI_STOP_STATE_STALL * MIPI_LP_BYTECLK.
+		 *
+		 * XXX: write MIPI_STOP_STATE_STALL?
+		 */
+		I915_WRITE(MIPI_HIGH_LOW_SWITCH_COUNT(port),
+						intel_dsi->hs_to_lp_count);
+
+		/* XXX: low power clock equivalence in terms of byte clock.
+		 * the number of byte clocks occupied in one low power clock.
+		 * based on txbyteclkhs and txclkesc.
+		 * txclkesc time / txbyteclk time * (105 + MIPI_STOP_STATE_STALL
+		 * ) / 105.???
+		 */
+		I915_WRITE(MIPI_LP_BYTECLK(port), intel_dsi->lp_byte_clk);
+
+		/* the bw essential for transmitting 16 long packets containing
+		 * 252 bytes meant for dcs write memory command is programmed in
+		 * this register in terms of byte clocks. based on dsi transfer
+		 * rate and the number of lanes configured the time taken to
+		 * transmit 16 long packets in a dsi stream varies. */
+		I915_WRITE(MIPI_DBI_BW_CTRL(port), intel_dsi->bw_timer);
+
+		I915_WRITE(MIPI_CLK_LANE_SWITCH_TIME_CNT(port),
+		intel_dsi->clk_lp_to_hs_count << LP_HS_SSW_CNT_SHIFT |
+		intel_dsi->clk_hs_to_lp_count << HS_LP_PWR_SW_CNT_SHIFT);
+
+		if (is_vid_mode(intel_dsi))
+			/* Some panels might have resolution which is not a
+			 * multiple of 64 like 1366 x 768. Enable RANDOM
+			 * resolution support for such panels by default */
+			I915_WRITE(MIPI_VIDEO_MODE_FORMAT(port),
+				intel_dsi->video_frmt_cfg_bits |
+				intel_dsi->video_mode_format |
+				IP_TG_CONFIG |
+				RANDOM_DPI_DISPLAY_RESOLUTION);
+	}
+}
+
+static void intel_dsi_pre_pll_enable(struct intel_encoder *encoder)
+{
+	DRM_DEBUG_KMS("\n");
+
+	intel_dsi_prepare(encoder);
+	intel_enable_dsi_pll(encoder);
+
+}
+
+static enum drm_connector_status
+intel_dsi_detect(struct drm_connector *connector, bool force)
+{
+	return connector_status_connected;
+}
+
+static int intel_dsi_get_modes(struct drm_connector *connector)
+{
+	struct intel_connector *intel_connector = to_intel_connector(connector);
+	struct drm_display_mode *mode;
+
+	DRM_DEBUG_KMS("\n");
+
+	if (!intel_connector->panel.fixed_mode) {
+		DRM_DEBUG_KMS("no fixed mode\n");
+		return 0;
+	}
+
+	mode = drm_mode_duplicate(connector->dev,
+				  intel_connector->panel.fixed_mode);
+	if (!mode) {
+		DRM_DEBUG_KMS("drm_mode_duplicate failed\n");
+		return 0;
+	}
+
+	drm_mode_probed_add(connector, mode);
+	return 1;
+}
+
+static void intel_dsi_connector_destroy(struct drm_connector *connector)
+{
+	struct intel_connector *intel_connector = to_intel_connector(connector);
+
+	DRM_DEBUG_KMS("\n");
+	intel_panel_fini(&intel_connector->panel);
+	drm_connector_cleanup(connector);
+	kfree(connector);
+}
+
+static void intel_dsi_encoder_destroy(struct drm_encoder *encoder)
+{
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+
+	if (intel_dsi->panel) {
+		drm_panel_detach(intel_dsi->panel);
+		/* XXX: Logically this call belongs in the panel driver. */
+		drm_panel_remove(intel_dsi->panel);
+	}
+
+	/* dispose of the gpios */
+	if (intel_dsi->gpio_panel)
+		gpiod_put(intel_dsi->gpio_panel);
+
+	intel_encoder_destroy(encoder);
+}
+
+static const struct drm_encoder_funcs intel_dsi_funcs = {
+	.destroy = intel_dsi_encoder_destroy,
+};
+
+static const struct drm_connector_helper_funcs intel_dsi_connector_helper_funcs = {
+	.get_modes = intel_dsi_get_modes,
+	.mode_valid = intel_dsi_mode_valid,
+	.best_encoder = intel_best_encoder,
+};
+
+static const struct drm_connector_funcs intel_dsi_connector_funcs = {
+	.dpms = drm_atomic_helper_connector_dpms,
+	.detect = intel_dsi_detect,
+	.destroy = intel_dsi_connector_destroy,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.atomic_get_property = intel_connector_atomic_get_property,
+	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+	.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+};
+
+void intel_dsi_init(struct drm_device *dev)
+{
+	struct intel_dsi *intel_dsi;
+	struct intel_encoder *intel_encoder;
+	struct drm_encoder *encoder;
+	struct intel_connector *intel_connector;
+	struct drm_connector *connector;
+	struct drm_display_mode *scan, *fixed_mode = NULL;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum port port;
+	unsigned int i;
+
+	DRM_DEBUG_KMS("\n");
+
+	/* There is no detection method for MIPI so rely on VBT */
+	if (!dev_priv->vbt.has_mipi)
+		return;
+
+	if (IS_VALLEYVIEW(dev)) {
+		dev_priv->mipi_mmio_base = VLV_MIPI_BASE;
+	} else {
+		DRM_ERROR("Unsupported Mipi device to reg base");
+		return;
+	}
+
+	intel_dsi = kzalloc(sizeof(*intel_dsi), GFP_KERNEL);
+	if (!intel_dsi)
+		return;
+
+	intel_connector = intel_connector_alloc();
+	if (!intel_connector) {
+		kfree(intel_dsi);
+		return;
+	}
+
+	intel_encoder = &intel_dsi->base;
+	encoder = &intel_encoder->base;
+	intel_dsi->attached_connector = intel_connector;
+
+	connector = &intel_connector->base;
+
+	drm_encoder_init(dev, encoder, &intel_dsi_funcs, DRM_MODE_ENCODER_DSI);
+
+	/* XXX: very likely not all of these are needed */
+	intel_encoder->compute_config = intel_dsi_compute_config;
+	intel_encoder->pre_pll_enable = intel_dsi_pre_pll_enable;
+	intel_encoder->pre_enable = intel_dsi_pre_enable;
+	intel_encoder->enable = intel_dsi_enable_nop;
+	intel_encoder->disable = intel_dsi_pre_disable;
+	intel_encoder->post_disable = intel_dsi_post_disable;
+	intel_encoder->get_hw_state = intel_dsi_get_hw_state;
+	intel_encoder->get_config = intel_dsi_get_config;
+
+	intel_connector->get_hw_state = intel_connector_get_hw_state;
+	intel_connector->unregister = intel_connector_unregister;
+
+	/* Pipe A maps to MIPI DSI port A, pipe B maps to MIPI DSI port C */
+	if (dev_priv->vbt.dsi.port == DVO_PORT_MIPIA) {
+		intel_encoder->crtc_mask = (1 << PIPE_A);
+		intel_dsi->ports = (1 << PORT_A);
+	} else if (dev_priv->vbt.dsi.port == DVO_PORT_MIPIC) {
+		intel_encoder->crtc_mask = (1 << PIPE_B);
+		intel_dsi->ports = (1 << PORT_C);
+	}
+
+	if (dev_priv->vbt.dsi.config->dual_link)
+		intel_dsi->ports = ((1 << PORT_A) | (1 << PORT_C));
+
+	/* Create a DSI host (and a device) for each port. */
+	for_each_dsi_port(port, intel_dsi->ports) {
+		struct intel_dsi_host *host;
+
+		host = intel_dsi_host_init(intel_dsi, port);
+		if (!host)
+			goto err;
+
+		intel_dsi->dsi_hosts[port] = host;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(intel_dsi_drivers); i++) {
+		intel_dsi->panel = intel_dsi_drivers[i].init(intel_dsi,
+							     intel_dsi_drivers[i].panel_id);
+		if (intel_dsi->panel)
+			break;
+	}
+
+	if (!intel_dsi->panel) {
+		DRM_DEBUG_KMS("no device found\n");
+		goto err;
+	}
+
+	/*
+	 * In case of BYT with CRC PMIC, we need to use GPIO for
+	 * Panel control.
+	 */
+	if (dev_priv->vbt.dsi.config->pwm_blc == PPS_BLC_PMIC) {
+		intel_dsi->gpio_panel =
+			gpiod_get(dev->dev, "panel", GPIOD_OUT_HIGH);
+
+		if (IS_ERR(intel_dsi->gpio_panel)) {
+			DRM_ERROR("Failed to own gpio for panel control\n");
+			intel_dsi->gpio_panel = NULL;
+		}
+	}
+
+	intel_encoder->type = INTEL_OUTPUT_DSI;
+	intel_encoder->cloneable = 0;
+	drm_connector_init(dev, connector, &intel_dsi_connector_funcs,
+			   DRM_MODE_CONNECTOR_DSI);
+
+	drm_connector_helper_add(connector, &intel_dsi_connector_helper_funcs);
+
+	connector->display_info.subpixel_order = SubPixelHorizontalRGB; /*XXX*/
+	connector->interlace_allowed = false;
+	connector->doublescan_allowed = false;
+
+	intel_connector_attach_encoder(intel_connector, intel_encoder);
+
+	drm_connector_register(connector);
+
+	drm_panel_attach(intel_dsi->panel, connector);
+
+	mutex_lock(&dev->mode_config.mutex);
+	drm_panel_get_modes(intel_dsi->panel);
+	list_for_each_entry(scan, &connector->probed_modes, head) {
+		if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
+			fixed_mode = drm_mode_duplicate(dev, scan);
+			break;
+		}
+	}
+	mutex_unlock(&dev->mode_config.mutex);
+
+	if (!fixed_mode) {
+		DRM_DEBUG_KMS("no fixed mode\n");
+		goto err;
+	}
+
+	intel_panel_init(&intel_connector->panel, fixed_mode, NULL);
+	intel_panel_setup_backlight(connector, INVALID_PIPE);
+
+	return;
+
+err:
+	drm_encoder_cleanup(&intel_encoder->base);
+	kfree(intel_dsi);
+	kfree(intel_connector);
+}
diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h
new file mode 100644
index 0000000..e6cb252
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dsi.h
@@ -0,0 +1,136 @@
+/*
+ * Copyright © 2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _INTEL_DSI_H
+#define _INTEL_DSI_H
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_mipi_dsi.h>
+#include "intel_drv.h"
+
+/* Dual Link support */
+#define DSI_DUAL_LINK_NONE		0
+#define DSI_DUAL_LINK_FRONT_BACK	1
+#define DSI_DUAL_LINK_PIXEL_ALT		2
+
+struct intel_dsi_host;
+
+struct intel_dsi {
+	struct intel_encoder base;
+
+	struct drm_panel *panel;
+	struct intel_dsi_host *dsi_hosts[I915_MAX_PORTS];
+
+	/* GPIO Desc for CRC based Panel control */
+	struct gpio_desc *gpio_panel;
+
+	struct intel_connector *attached_connector;
+
+	/* bit mask of ports being driven */
+	u16 ports;
+
+	/* if true, use HS mode, otherwise LP */
+	bool hs;
+
+	/* virtual channel */
+	int channel;
+
+	/* Video mode or command mode */
+	u16 operation_mode;
+
+	/* number of DSI lanes */
+	unsigned int lane_count;
+
+	/* video mode pixel format for MIPI_DSI_FUNC_PRG register */
+	u32 pixel_format;
+
+	/* video mode format for MIPI_VIDEO_MODE_FORMAT register */
+	u32 video_mode_format;
+
+	/* eot for MIPI_EOT_DISABLE register */
+	u8 eotp_pkt;
+	u8 clock_stop;
+
+	u8 escape_clk_div;
+	u8 dual_link;
+	u8 pixel_overlap;
+	u32 port_bits;
+	u32 bw_timer;
+	u32 dphy_reg;
+	u32 video_frmt_cfg_bits;
+	u16 lp_byte_clk;
+
+	/* timeouts in byte clocks */
+	u16 lp_rx_timeout;
+	u16 turn_arnd_val;
+	u16 rst_timer_val;
+	u16 hs_to_lp_count;
+	u16 clk_lp_to_hs_count;
+	u16 clk_hs_to_lp_count;
+
+	u16 init_count;
+	u32 pclk;
+	u16 burst_mode_ratio;
+
+	/* all delays in ms */
+	u16 backlight_off_delay;
+	u16 backlight_on_delay;
+	u16 panel_on_delay;
+	u16 panel_off_delay;
+	u16 panel_pwr_cycle_delay;
+};
+
+struct intel_dsi_host {
+	struct mipi_dsi_host base;
+	struct intel_dsi *intel_dsi;
+	enum port port;
+
+	/* our little hack */
+	struct mipi_dsi_device *device;
+};
+
+static inline struct intel_dsi_host *to_intel_dsi_host(struct mipi_dsi_host *h)
+{
+	return container_of(h, struct intel_dsi_host, base);
+}
+
+#define for_each_dsi_port(__port, __ports_mask) \
+	for ((__port) = PORT_A; (__port) < I915_MAX_PORTS; (__port)++)	\
+		if ((__ports_mask) & (1 << (__port)))
+
+static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder)
+{
+	return container_of(encoder, struct intel_dsi, base.base);
+}
+
+extern void intel_enable_dsi_pll(struct intel_encoder *encoder);
+extern void intel_disable_dsi_pll(struct intel_encoder *encoder);
+extern u32 vlv_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp);
+extern u32 bxt_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp);
+extern void intel_dsi_reset_clocks(struct intel_encoder *encoder,
+							enum port port);
+
+struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id);
+
+#endif /* _INTEL_DSI_H */
diff --git a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
new file mode 100644
index 0000000..a8912ae
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
@@ -0,0 +1,687 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Shobhit Kumar <shobhit.kumar@intel.com>
+ *
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
+#include <drm/i915_drm.h>
+#include <drm/drm_panel.h>
+#include <linux/slab.h>
+#include <video/mipi_display.h>
+#include <asm/intel-mid.h>
+#include <video/mipi_display.h>
+#include "i915_drv.h"
+#include "intel_drv.h"
+#include "intel_dsi.h"
+
+struct vbt_panel {
+	struct drm_panel panel;
+	struct intel_dsi *intel_dsi;
+};
+
+static inline struct vbt_panel *to_vbt_panel(struct drm_panel *panel)
+{
+	return container_of(panel, struct vbt_panel, panel);
+}
+
+#define MIPI_TRANSFER_MODE_SHIFT	0
+#define MIPI_VIRTUAL_CHANNEL_SHIFT	1
+#define MIPI_PORT_SHIFT			3
+
+#define PREPARE_CNT_MAX		0x3F
+#define EXIT_ZERO_CNT_MAX	0x3F
+#define CLK_ZERO_CNT_MAX	0xFF
+#define TRAIL_CNT_MAX		0x1F
+
+#define NS_KHZ_RATIO 1000000
+
+#define GPI0_NC_0_HV_DDI0_HPD           0x4130
+#define GPIO_NC_0_HV_DDI0_PAD           0x4138
+#define GPIO_NC_1_HV_DDI0_DDC_SDA       0x4120
+#define GPIO_NC_1_HV_DDI0_DDC_SDA_PAD   0x4128
+#define GPIO_NC_2_HV_DDI0_DDC_SCL       0x4110
+#define GPIO_NC_2_HV_DDI0_DDC_SCL_PAD   0x4118
+#define GPIO_NC_3_PANEL0_VDDEN          0x4140
+#define GPIO_NC_3_PANEL0_VDDEN_PAD      0x4148
+#define GPIO_NC_4_PANEL0_BLKEN          0x4150
+#define GPIO_NC_4_PANEL0_BLKEN_PAD      0x4158
+#define GPIO_NC_5_PANEL0_BLKCTL         0x4160
+#define GPIO_NC_5_PANEL0_BLKCTL_PAD     0x4168
+#define GPIO_NC_6_PCONF0                0x4180
+#define GPIO_NC_6_PAD                   0x4188
+#define GPIO_NC_7_PCONF0                0x4190
+#define GPIO_NC_7_PAD                   0x4198
+#define GPIO_NC_8_PCONF0                0x4170
+#define GPIO_NC_8_PAD                   0x4178
+#define GPIO_NC_9_PCONF0                0x4100
+#define GPIO_NC_9_PAD                   0x4108
+#define GPIO_NC_10_PCONF0               0x40E0
+#define GPIO_NC_10_PAD                  0x40E8
+#define GPIO_NC_11_PCONF0               0x40F0
+#define GPIO_NC_11_PAD                  0x40F8
+
+struct gpio_table {
+	u16 function_reg;
+	u16 pad_reg;
+	u8 init;
+};
+
+static struct gpio_table gtable[] = {
+	{ GPI0_NC_0_HV_DDI0_HPD, GPIO_NC_0_HV_DDI0_PAD, 0 },
+	{ GPIO_NC_1_HV_DDI0_DDC_SDA, GPIO_NC_1_HV_DDI0_DDC_SDA_PAD, 0 },
+	{ GPIO_NC_2_HV_DDI0_DDC_SCL, GPIO_NC_2_HV_DDI0_DDC_SCL_PAD, 0 },
+	{ GPIO_NC_3_PANEL0_VDDEN, GPIO_NC_3_PANEL0_VDDEN_PAD, 0 },
+	{ GPIO_NC_4_PANEL0_BLKEN, GPIO_NC_4_PANEL0_BLKEN_PAD, 0 },
+	{ GPIO_NC_5_PANEL0_BLKCTL, GPIO_NC_5_PANEL0_BLKCTL_PAD, 0 },
+	{ GPIO_NC_6_PCONF0, GPIO_NC_6_PAD, 0 },
+	{ GPIO_NC_7_PCONF0, GPIO_NC_7_PAD, 0 },
+	{ GPIO_NC_8_PCONF0, GPIO_NC_8_PAD, 0 },
+	{ GPIO_NC_9_PCONF0, GPIO_NC_9_PAD, 0 },
+	{ GPIO_NC_10_PCONF0, GPIO_NC_10_PAD, 0},
+	{ GPIO_NC_11_PCONF0, GPIO_NC_11_PAD, 0}
+};
+
+static inline enum port intel_dsi_seq_port_to_port(u8 port)
+{
+	return port ? PORT_C : PORT_A;
+}
+
+static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi,
+				       const u8 *data)
+{
+	struct mipi_dsi_device *dsi_device;
+	u8 type, flags, seq_port;
+	u16 len;
+	enum port port;
+
+	flags = *data++;
+	type = *data++;
+
+	len = *((u16 *) data);
+	data += 2;
+
+	seq_port = (flags >> MIPI_PORT_SHIFT) & 3;
+
+	/* For DSI single link on Port A & C, the seq_port value which is
+	 * parsed from Sequence Block#53 of VBT has been set to 0
+	 * Now, read/write of packets for the DSI single link on Port A and
+	 * Port C will based on the DVO port from VBT block 2.
+	 */
+	if (intel_dsi->ports == (1 << PORT_C))
+		port = PORT_C;
+	else
+		port = intel_dsi_seq_port_to_port(seq_port);
+
+	dsi_device = intel_dsi->dsi_hosts[port]->device;
+	if (!dsi_device) {
+		DRM_DEBUG_KMS("no dsi device for port %c\n", port_name(port));
+		goto out;
+	}
+
+	if ((flags >> MIPI_TRANSFER_MODE_SHIFT) & 1)
+		dsi_device->mode_flags &= ~MIPI_DSI_MODE_LPM;
+	else
+		dsi_device->mode_flags |= MIPI_DSI_MODE_LPM;
+
+	dsi_device->channel = (flags >> MIPI_VIRTUAL_CHANNEL_SHIFT) & 3;
+
+	switch (type) {
+	case MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM:
+		mipi_dsi_generic_write(dsi_device, NULL, 0);
+		break;
+	case MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM:
+		mipi_dsi_generic_write(dsi_device, data, 1);
+		break;
+	case MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM:
+		mipi_dsi_generic_write(dsi_device, data, 2);
+		break;
+	case MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM:
+	case MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM:
+	case MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM:
+		DRM_DEBUG_DRIVER("Generic Read not yet implemented or used\n");
+		break;
+	case MIPI_DSI_GENERIC_LONG_WRITE:
+		mipi_dsi_generic_write(dsi_device, data, len);
+		break;
+	case MIPI_DSI_DCS_SHORT_WRITE:
+		mipi_dsi_dcs_write_buffer(dsi_device, data, 1);
+		break;
+	case MIPI_DSI_DCS_SHORT_WRITE_PARAM:
+		mipi_dsi_dcs_write_buffer(dsi_device, data, 2);
+		break;
+	case MIPI_DSI_DCS_READ:
+		DRM_DEBUG_DRIVER("DCS Read not yet implemented or used\n");
+		break;
+	case MIPI_DSI_DCS_LONG_WRITE:
+		mipi_dsi_dcs_write_buffer(dsi_device, data, len);
+		break;
+	}
+
+out:
+	data += len;
+
+	return data;
+}
+
+static const u8 *mipi_exec_delay(struct intel_dsi *intel_dsi, const u8 *data)
+{
+	u32 delay = *((const u32 *) data);
+
+	usleep_range(delay, delay + 10);
+	data += 4;
+
+	return data;
+}
+
+static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
+{
+	u8 gpio, action;
+	u16 function, pad;
+	u32 val;
+	struct drm_device *dev = intel_dsi->base.base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	gpio = *data++;
+
+	/* pull up/down */
+	action = *data++ & 1;
+
+	if (gpio >= ARRAY_SIZE(gtable)) {
+		DRM_DEBUG_KMS("unknown gpio %u\n", gpio);
+		goto out;
+	}
+
+	function = gtable[gpio].function_reg;
+	pad = gtable[gpio].pad_reg;
+
+	mutex_lock(&dev_priv->sb_lock);
+	if (!gtable[gpio].init) {
+		/* program the function */
+		/* FIXME: remove constant below */
+		vlv_gpio_nc_write(dev_priv, function, 0x2000CC00);
+		gtable[gpio].init = 1;
+	}
+
+	val = 0x4 | action;
+
+	/* pull up/down */
+	vlv_gpio_nc_write(dev_priv, pad, val);
+	mutex_unlock(&dev_priv->sb_lock);
+
+out:
+	return data;
+}
+
+typedef const u8 * (*fn_mipi_elem_exec)(struct intel_dsi *intel_dsi,
+					const u8 *data);
+static const fn_mipi_elem_exec exec_elem[] = {
+	NULL, /* reserved */
+	mipi_exec_send_packet,
+	mipi_exec_delay,
+	mipi_exec_gpio,
+	NULL, /* status read; later */
+};
+
+/*
+ * MIPI Sequence from VBT #53 parsing logic
+ * We have already separated each seqence during bios parsing
+ * Following is generic execution function for any sequence
+ */
+
+static const char * const seq_name[] = {
+	"UNDEFINED",
+	"MIPI_SEQ_ASSERT_RESET",
+	"MIPI_SEQ_INIT_OTP",
+	"MIPI_SEQ_DISPLAY_ON",
+	"MIPI_SEQ_DISPLAY_OFF",
+	"MIPI_SEQ_DEASSERT_RESET"
+};
+
+static void generic_exec_sequence(struct intel_dsi *intel_dsi, const u8 *data)
+{
+	fn_mipi_elem_exec mipi_elem_exec;
+	int index;
+
+	if (!data)
+		return;
+
+	DRM_DEBUG_DRIVER("Starting MIPI sequence - %s\n", seq_name[*data]);
+
+	/* go to the first element of the sequence */
+	data++;
+
+	/* parse each byte till we reach end of sequence byte - 0x00 */
+	while (1) {
+		index = *data;
+		mipi_elem_exec = exec_elem[index];
+		if (!mipi_elem_exec) {
+			DRM_ERROR("Unsupported MIPI element, skipping sequence execution\n");
+			return;
+		}
+
+		/* goto element payload */
+		data++;
+
+		/* execute the element specific rotines */
+		data = mipi_elem_exec(intel_dsi, data);
+
+		/*
+		 * After processing the element, data should point to
+		 * next element or end of sequence
+		 * check if have we reached end of sequence
+		 */
+		if (*data == 0x00)
+			break;
+	}
+}
+
+static int vbt_panel_prepare(struct drm_panel *panel)
+{
+	struct vbt_panel *vbt_panel = to_vbt_panel(panel);
+	struct intel_dsi *intel_dsi = vbt_panel->intel_dsi;
+	struct drm_device *dev = intel_dsi->base.base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	const u8 *sequence;
+
+	sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_ASSERT_RESET];
+	generic_exec_sequence(intel_dsi, sequence);
+
+	sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP];
+	generic_exec_sequence(intel_dsi, sequence);
+
+	return 0;
+}
+
+static int vbt_panel_unprepare(struct drm_panel *panel)
+{
+	struct vbt_panel *vbt_panel = to_vbt_panel(panel);
+	struct intel_dsi *intel_dsi = vbt_panel->intel_dsi;
+	struct drm_device *dev = intel_dsi->base.base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	const u8 *sequence;
+
+	sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET];
+	generic_exec_sequence(intel_dsi, sequence);
+
+	return 0;
+}
+
+static int vbt_panel_enable(struct drm_panel *panel)
+{
+	struct vbt_panel *vbt_panel = to_vbt_panel(panel);
+	struct intel_dsi *intel_dsi = vbt_panel->intel_dsi;
+	struct drm_device *dev = intel_dsi->base.base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	const u8 *sequence;
+
+	sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_DISPLAY_ON];
+	generic_exec_sequence(intel_dsi, sequence);
+
+	return 0;
+}
+
+static int vbt_panel_disable(struct drm_panel *panel)
+{
+	struct vbt_panel *vbt_panel = to_vbt_panel(panel);
+	struct intel_dsi *intel_dsi = vbt_panel->intel_dsi;
+	struct drm_device *dev = intel_dsi->base.base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	const u8 *sequence;
+
+	sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_DISPLAY_OFF];
+	generic_exec_sequence(intel_dsi, sequence);
+
+	return 0;
+}
+
+static int vbt_panel_get_modes(struct drm_panel *panel)
+{
+	struct vbt_panel *vbt_panel = to_vbt_panel(panel);
+	struct intel_dsi *intel_dsi = vbt_panel->intel_dsi;
+	struct drm_device *dev = intel_dsi->base.base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_display_mode *mode;
+
+	if (!panel->connector)
+		return 0;
+
+	mode = drm_mode_duplicate(dev, dev_priv->vbt.lfp_lvds_vbt_mode);
+	if (!mode)
+		return 0;
+
+	mode->type |= DRM_MODE_TYPE_PREFERRED;
+
+	drm_mode_probed_add(panel->connector, mode);
+
+	return 1;
+}
+
+static const struct drm_panel_funcs vbt_panel_funcs = {
+	.disable = vbt_panel_disable,
+	.unprepare = vbt_panel_unprepare,
+	.prepare = vbt_panel_prepare,
+	.enable = vbt_panel_enable,
+	.get_modes = vbt_panel_get_modes,
+};
+
+struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id)
+{
+	struct drm_device *dev = intel_dsi->base.base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct mipi_config *mipi_config = dev_priv->vbt.dsi.config;
+	struct mipi_pps_data *pps = dev_priv->vbt.dsi.pps;
+	struct drm_display_mode *mode = dev_priv->vbt.lfp_lvds_vbt_mode;
+	struct vbt_panel *vbt_panel;
+	u32 bits_per_pixel = 24;
+	u32 tlpx_ns, extra_byte_count, bitrate, tlpx_ui;
+	u32 ui_num, ui_den;
+	u32 prepare_cnt, exit_zero_cnt, clk_zero_cnt, trail_cnt;
+	u32 ths_prepare_ns, tclk_trail_ns;
+	u32 tclk_prepare_clkzero, ths_prepare_hszero;
+	u32 lp_to_hs_switch, hs_to_lp_switch;
+	u32 pclk, computed_ddr;
+	u16 burst_mode_ratio;
+	enum port port;
+
+	DRM_DEBUG_KMS("\n");
+
+	intel_dsi->eotp_pkt = mipi_config->eot_pkt_disabled ? 0 : 1;
+	intel_dsi->clock_stop = mipi_config->enable_clk_stop ? 1 : 0;
+	intel_dsi->lane_count = mipi_config->lane_cnt + 1;
+	intel_dsi->pixel_format = mipi_config->videomode_color_format << 7;
+	intel_dsi->dual_link = mipi_config->dual_link;
+	intel_dsi->pixel_overlap = mipi_config->pixel_overlap;
+
+	if (intel_dsi->pixel_format == VID_MODE_FORMAT_RGB666)
+		bits_per_pixel = 18;
+	else if (intel_dsi->pixel_format == VID_MODE_FORMAT_RGB565)
+		bits_per_pixel = 16;
+
+	intel_dsi->operation_mode = mipi_config->is_cmd_mode;
+	intel_dsi->video_mode_format = mipi_config->video_transfer_mode;
+	intel_dsi->escape_clk_div = mipi_config->byte_clk_sel;
+	intel_dsi->lp_rx_timeout = mipi_config->lp_rx_timeout;
+	intel_dsi->turn_arnd_val = mipi_config->turn_around_timeout;
+	intel_dsi->rst_timer_val = mipi_config->device_reset_timer;
+	intel_dsi->init_count = mipi_config->master_init_timer;
+	intel_dsi->bw_timer = mipi_config->dbi_bw_timer;
+	intel_dsi->video_frmt_cfg_bits =
+		mipi_config->bta_enabled ? DISABLE_VIDEO_BTA : 0;
+
+	pclk = mode->clock;
+
+	/* In dual link mode each port needs half of pixel clock */
+	if (intel_dsi->dual_link) {
+		pclk = pclk / 2;
+
+		/* we can enable pixel_overlap if needed by panel. In this
+		 * case we need to increase the pixelclock for extra pixels
+		 */
+		if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) {
+			pclk += DIV_ROUND_UP(mode->vtotal *
+						intel_dsi->pixel_overlap *
+						60, 1000);
+		}
+	}
+
+	/* Burst Mode Ratio
+	 * Target ddr frequency from VBT / non burst ddr freq
+	 * multiply by 100 to preserve remainder
+	 */
+	if (intel_dsi->video_mode_format == VIDEO_MODE_BURST) {
+		if (mipi_config->target_burst_mode_freq) {
+			computed_ddr =
+				(pclk * bits_per_pixel) / intel_dsi->lane_count;
+
+			if (mipi_config->target_burst_mode_freq <
+								computed_ddr) {
+				DRM_ERROR("Burst mode freq is less than computed\n");
+				return NULL;
+			}
+
+			burst_mode_ratio = DIV_ROUND_UP(
+				mipi_config->target_burst_mode_freq * 100,
+				computed_ddr);
+
+			pclk = DIV_ROUND_UP(pclk * burst_mode_ratio, 100);
+		} else {
+			DRM_ERROR("Burst mode target is not set\n");
+			return NULL;
+		}
+	} else
+		burst_mode_ratio = 100;
+
+	intel_dsi->burst_mode_ratio = burst_mode_ratio;
+	intel_dsi->pclk = pclk;
+
+	bitrate = (pclk * bits_per_pixel) / intel_dsi->lane_count;
+
+	switch (intel_dsi->escape_clk_div) {
+	case 0:
+		tlpx_ns = 50;
+		break;
+	case 1:
+		tlpx_ns = 100;
+		break;
+
+	case 2:
+		tlpx_ns = 200;
+		break;
+	default:
+		tlpx_ns = 50;
+		break;
+	}
+
+	switch (intel_dsi->lane_count) {
+	case 1:
+	case 2:
+		extra_byte_count = 2;
+		break;
+	case 3:
+		extra_byte_count = 4;
+		break;
+	case 4:
+	default:
+		extra_byte_count = 3;
+		break;
+	}
+
+	/*
+	 * ui(s) = 1/f [f in hz]
+	 * ui(ns) = 10^9 / (f*10^6) [f in Mhz] -> 10^3/f(Mhz)
+	 */
+
+	/* in Kbps */
+	ui_num = NS_KHZ_RATIO;
+	ui_den = bitrate;
+
+	tclk_prepare_clkzero = mipi_config->tclk_prepare_clkzero;
+	ths_prepare_hszero = mipi_config->ths_prepare_hszero;
+
+	/*
+	 * B060
+	 * LP byte clock = TLPX/ (8UI)
+	 */
+	intel_dsi->lp_byte_clk = DIV_ROUND_UP(tlpx_ns * ui_den, 8 * ui_num);
+
+	/* count values in UI = (ns value) * (bitrate / (2 * 10^6))
+	 *
+	 * Since txddrclkhs_i is 2xUI, all the count values programmed in
+	 * DPHY param register are divided by 2
+	 *
+	 * prepare count
+	 */
+	ths_prepare_ns = max(mipi_config->ths_prepare,
+			     mipi_config->tclk_prepare);
+	prepare_cnt = DIV_ROUND_UP(ths_prepare_ns * ui_den, ui_num * 2);
+
+	/* exit zero count */
+	exit_zero_cnt = DIV_ROUND_UP(
+				(ths_prepare_hszero - ths_prepare_ns) * ui_den,
+				ui_num * 2
+				);
+
+	/*
+	 * Exit zero  is unified val ths_zero and ths_exit
+	 * minimum value for ths_exit = 110ns
+	 * min (exit_zero_cnt * 2) = 110/UI
+	 * exit_zero_cnt = 55/UI
+	 */
+	 if (exit_zero_cnt < (55 * ui_den / ui_num))
+		if ((55 * ui_den) % ui_num)
+			exit_zero_cnt += 1;
+
+	/* clk zero count */
+	clk_zero_cnt = DIV_ROUND_UP(
+			(tclk_prepare_clkzero -	ths_prepare_ns)
+			* ui_den, 2 * ui_num);
+
+	/* trail count */
+	tclk_trail_ns = max(mipi_config->tclk_trail, mipi_config->ths_trail);
+	trail_cnt = DIV_ROUND_UP(tclk_trail_ns * ui_den, 2 * ui_num);
+
+	if (prepare_cnt > PREPARE_CNT_MAX ||
+		exit_zero_cnt > EXIT_ZERO_CNT_MAX ||
+		clk_zero_cnt > CLK_ZERO_CNT_MAX ||
+		trail_cnt > TRAIL_CNT_MAX)
+		DRM_DEBUG_DRIVER("Values crossing maximum limits, restricting to max values\n");
+
+	if (prepare_cnt > PREPARE_CNT_MAX)
+		prepare_cnt = PREPARE_CNT_MAX;
+
+	if (exit_zero_cnt > EXIT_ZERO_CNT_MAX)
+		exit_zero_cnt = EXIT_ZERO_CNT_MAX;
+
+	if (clk_zero_cnt > CLK_ZERO_CNT_MAX)
+		clk_zero_cnt = CLK_ZERO_CNT_MAX;
+
+	if (trail_cnt > TRAIL_CNT_MAX)
+		trail_cnt = TRAIL_CNT_MAX;
+
+	/* B080 */
+	intel_dsi->dphy_reg = exit_zero_cnt << 24 | trail_cnt << 16 |
+						clk_zero_cnt << 8 | prepare_cnt;
+
+	/*
+	 * LP to HS switch count = 4TLPX + PREP_COUNT * 2 + EXIT_ZERO_COUNT * 2
+	 *					+ 10UI + Extra Byte Count
+	 *
+	 * HS to LP switch count = THS-TRAIL + 2TLPX + Extra Byte Count
+	 * Extra Byte Count is calculated according to number of lanes.
+	 * High Low Switch Count is the Max of LP to HS and
+	 * HS to LP switch count
+	 *
+	 */
+	tlpx_ui = DIV_ROUND_UP(tlpx_ns * ui_den, ui_num);
+
+	/* B044 */
+	/* FIXME:
+	 * The comment above does not match with the code */
+	lp_to_hs_switch = DIV_ROUND_UP(4 * tlpx_ui + prepare_cnt * 2 +
+						exit_zero_cnt * 2 + 10, 8);
+
+	hs_to_lp_switch = DIV_ROUND_UP(mipi_config->ths_trail + 2 * tlpx_ui, 8);
+
+	intel_dsi->hs_to_lp_count = max(lp_to_hs_switch, hs_to_lp_switch);
+	intel_dsi->hs_to_lp_count += extra_byte_count;
+
+	/* B088 */
+	/* LP -> HS for clock lanes
+	 * LP clk sync + LP11 + LP01 + tclk_prepare + tclk_zero +
+	 *						extra byte count
+	 * 2TPLX + 1TLPX + 1 TPLX(in ns) + prepare_cnt * 2 + clk_zero_cnt *
+	 *					2(in UI) + extra byte count
+	 * In byteclks = (4TLPX + prepare_cnt * 2 + clk_zero_cnt *2 (in UI)) /
+	 *					8 + extra byte count
+	 */
+	intel_dsi->clk_lp_to_hs_count =
+		DIV_ROUND_UP(
+			4 * tlpx_ui + prepare_cnt * 2 +
+			clk_zero_cnt * 2,
+			8);
+
+	intel_dsi->clk_lp_to_hs_count += extra_byte_count;
+
+	/* HS->LP for Clock Lanes
+	 * Low Power clock synchronisations + 1Tx byteclk + tclk_trail +
+	 *						Extra byte count
+	 * 2TLPX + 8UI + (trail_count*2)(in UI) + Extra byte count
+	 * In byteclks = (2*TLpx(in UI) + trail_count*2 +8)(in UI)/8 +
+	 *						Extra byte count
+	 */
+	intel_dsi->clk_hs_to_lp_count =
+		DIV_ROUND_UP(2 * tlpx_ui + trail_cnt * 2 + 8,
+			8);
+	intel_dsi->clk_hs_to_lp_count += extra_byte_count;
+
+	DRM_DEBUG_KMS("Eot %s\n", intel_dsi->eotp_pkt ? "enabled" : "disabled");
+	DRM_DEBUG_KMS("Clockstop %s\n", intel_dsi->clock_stop ?
+						"disabled" : "enabled");
+	DRM_DEBUG_KMS("Mode %s\n", intel_dsi->operation_mode ? "command" : "video");
+	if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK)
+		DRM_DEBUG_KMS("Dual link: DSI_DUAL_LINK_FRONT_BACK\n");
+	else if (intel_dsi->dual_link == DSI_DUAL_LINK_PIXEL_ALT)
+		DRM_DEBUG_KMS("Dual link: DSI_DUAL_LINK_PIXEL_ALT\n");
+	else
+		DRM_DEBUG_KMS("Dual link: NONE\n");
+	DRM_DEBUG_KMS("Pixel Format %d\n", intel_dsi->pixel_format);
+	DRM_DEBUG_KMS("TLPX %d\n", intel_dsi->escape_clk_div);
+	DRM_DEBUG_KMS("LP RX Timeout 0x%x\n", intel_dsi->lp_rx_timeout);
+	DRM_DEBUG_KMS("Turnaround Timeout 0x%x\n", intel_dsi->turn_arnd_val);
+	DRM_DEBUG_KMS("Init Count 0x%x\n", intel_dsi->init_count);
+	DRM_DEBUG_KMS("HS to LP Count 0x%x\n", intel_dsi->hs_to_lp_count);
+	DRM_DEBUG_KMS("LP Byte Clock %d\n", intel_dsi->lp_byte_clk);
+	DRM_DEBUG_KMS("DBI BW Timer 0x%x\n", intel_dsi->bw_timer);
+	DRM_DEBUG_KMS("LP to HS Clock Count 0x%x\n", intel_dsi->clk_lp_to_hs_count);
+	DRM_DEBUG_KMS("HS to LP Clock Count 0x%x\n", intel_dsi->clk_hs_to_lp_count);
+	DRM_DEBUG_KMS("BTA %s\n",
+			intel_dsi->video_frmt_cfg_bits & DISABLE_VIDEO_BTA ?
+			"disabled" : "enabled");
+
+	/* delays in VBT are in unit of 100us, so need to convert
+	 * here in ms
+	 * Delay (100us) * 100 /1000 = Delay / 10 (ms) */
+	intel_dsi->backlight_off_delay = pps->bl_disable_delay / 10;
+	intel_dsi->backlight_on_delay = pps->bl_enable_delay / 10;
+	intel_dsi->panel_on_delay = pps->panel_on_delay / 10;
+	intel_dsi->panel_off_delay = pps->panel_off_delay / 10;
+	intel_dsi->panel_pwr_cycle_delay = pps->panel_power_cycle_delay / 10;
+
+	/* This is cheating a bit with the cleanup. */
+	vbt_panel = devm_kzalloc(dev->dev, sizeof(*vbt_panel), GFP_KERNEL);
+
+	vbt_panel->intel_dsi = intel_dsi;
+	drm_panel_init(&vbt_panel->panel);
+	vbt_panel->panel.funcs = &vbt_panel_funcs;
+	drm_panel_add(&vbt_panel->panel);
+
+	/* a regular driver would get the device in probe */
+	for_each_dsi_port(port, intel_dsi->ports) {
+		mipi_dsi_attach(intel_dsi->dsi_hosts[port]->device);
+	}
+
+	return &vbt_panel->panel;
+}
diff --git a/drivers/gpu/drm/i915/intel_dsi_pll.c b/drivers/gpu/drm/i915/intel_dsi_pll.c
new file mode 100644
index 0000000..cb3cf39
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dsi_pll.c
@@ -0,0 +1,604 @@
+/*
+ * Copyright © 2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *	Shobhit Kumar <shobhit.kumar@intel.com>
+ *	Yogesh Mohan Marimuthu <yogesh.mohan.marimuthu@intel.com>
+ */
+
+#include <linux/kernel.h>
+#include "intel_drv.h"
+#include "i915_drv.h"
+#include "intel_dsi.h"
+
+#define DSI_HSS_PACKET_SIZE		4
+#define DSI_HSE_PACKET_SIZE		4
+#define DSI_HSA_PACKET_EXTRA_SIZE	6
+#define DSI_HBP_PACKET_EXTRA_SIZE	6
+#define DSI_HACTIVE_PACKET_EXTRA_SIZE	6
+#define DSI_HFP_PACKET_EXTRA_SIZE	6
+#define DSI_EOTP_PACKET_SIZE		4
+
+static int dsi_pixel_format_bpp(int pixel_format)
+{
+	int bpp;
+
+	switch (pixel_format) {
+	default:
+	case VID_MODE_FORMAT_RGB888:
+	case VID_MODE_FORMAT_RGB666_LOOSE:
+		bpp = 24;
+		break;
+	case VID_MODE_FORMAT_RGB666:
+		bpp = 18;
+		break;
+	case VID_MODE_FORMAT_RGB565:
+		bpp = 16;
+		break;
+	}
+
+	return bpp;
+}
+
+struct dsi_mnp {
+	u32 dsi_pll_ctrl;
+	u32 dsi_pll_div;
+};
+
+static const u32 lfsr_converts[] = {
+	426, 469, 234, 373, 442, 221, 110, 311, 411,		/* 62 - 70 */
+	461, 486, 243, 377, 188, 350, 175, 343, 427, 213,	/* 71 - 80 */
+	106, 53, 282, 397, 454, 227, 113, 56, 284, 142,		/* 81 - 90 */
+	71, 35, 273, 136, 324, 418, 465, 488, 500, 506		/* 91 - 100 */
+};
+
+#ifdef DSI_CLK_FROM_RR
+
+static u32 dsi_rr_formula(const struct drm_display_mode *mode,
+			  int pixel_format, int video_mode_format,
+			  int lane_count, bool eotp)
+{
+	u32 bpp;
+	u32 hactive, vactive, hfp, hsync, hbp, vfp, vsync, vbp;
+	u32 hsync_bytes, hbp_bytes, hactive_bytes, hfp_bytes;
+	u32 bytes_per_line, bytes_per_frame;
+	u32 num_frames;
+	u32 bytes_per_x_frames, bytes_per_x_frames_x_lanes;
+	u32 dsi_bit_clock_hz;
+	u32 dsi_clk;
+
+	bpp = dsi_pixel_format_bpp(pixel_format);
+
+	hactive = mode->hdisplay;
+	vactive = mode->vdisplay;
+	hfp = mode->hsync_start - mode->hdisplay;
+	hsync = mode->hsync_end - mode->hsync_start;
+	hbp = mode->htotal - mode->hsync_end;
+
+	vfp = mode->vsync_start - mode->vdisplay;
+	vsync = mode->vsync_end - mode->vsync_start;
+	vbp = mode->vtotal - mode->vsync_end;
+
+	hsync_bytes = DIV_ROUND_UP(hsync * bpp, 8);
+	hbp_bytes = DIV_ROUND_UP(hbp * bpp, 8);
+	hactive_bytes = DIV_ROUND_UP(hactive * bpp, 8);
+	hfp_bytes = DIV_ROUND_UP(hfp * bpp, 8);
+
+	bytes_per_line = DSI_HSS_PACKET_SIZE + hsync_bytes +
+		DSI_HSA_PACKET_EXTRA_SIZE + DSI_HSE_PACKET_SIZE +
+		hbp_bytes + DSI_HBP_PACKET_EXTRA_SIZE +
+		hactive_bytes + DSI_HACTIVE_PACKET_EXTRA_SIZE +
+		hfp_bytes + DSI_HFP_PACKET_EXTRA_SIZE;
+
+	/*
+	 * XXX: Need to accurately calculate LP to HS transition timeout and add
+	 * it to bytes_per_line/bytes_per_frame.
+	 */
+
+	if (eotp && video_mode_format == VIDEO_MODE_BURST)
+		bytes_per_line += DSI_EOTP_PACKET_SIZE;
+
+	bytes_per_frame = vsync * bytes_per_line + vbp * bytes_per_line +
+		vactive * bytes_per_line + vfp * bytes_per_line;
+
+	if (eotp &&
+	    (video_mode_format == VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE ||
+	     video_mode_format == VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS))
+		bytes_per_frame += DSI_EOTP_PACKET_SIZE;
+
+	num_frames = drm_mode_vrefresh(mode);
+	bytes_per_x_frames = num_frames * bytes_per_frame;
+
+	bytes_per_x_frames_x_lanes = bytes_per_x_frames / lane_count;
+
+	/* the dsi clock is divided by 2 in the hardware to get dsi ddr clock */
+	dsi_bit_clock_hz = bytes_per_x_frames_x_lanes * 8;
+	dsi_clk = dsi_bit_clock_hz / 1000;
+
+	if (eotp && video_mode_format == VIDEO_MODE_BURST)
+		dsi_clk *= 2;
+
+	return dsi_clk;
+}
+
+#else
+
+/* Get DSI clock from pixel clock */
+static u32 dsi_clk_from_pclk(u32 pclk, int pixel_format, int lane_count)
+{
+	u32 dsi_clk_khz;
+	u32 bpp = dsi_pixel_format_bpp(pixel_format);
+
+	/* DSI data rate = pixel clock * bits per pixel / lane count
+	   pixel clock is converted from KHz to Hz */
+	dsi_clk_khz = DIV_ROUND_CLOSEST(pclk * bpp, lane_count);
+
+	return dsi_clk_khz;
+}
+
+#endif
+
+static int dsi_calc_mnp(struct drm_i915_private *dev_priv,
+			struct dsi_mnp *dsi_mnp, int target_dsi_clk)
+{
+	unsigned int calc_m = 0, calc_p = 0;
+	unsigned int m_min, m_max, p_min = 2, p_max = 6;
+	unsigned int m, n, p;
+	int ref_clk;
+	int delta = target_dsi_clk;
+	u32 m_seed;
+
+	/* target_dsi_clk is expected in kHz */
+	if (target_dsi_clk < 300000 || target_dsi_clk > 1150000) {
+		DRM_ERROR("DSI CLK Out of Range\n");
+		return -ECHRNG;
+	}
+
+	if (IS_CHERRYVIEW(dev_priv)) {
+		ref_clk = 100000;
+		n = 4;
+		m_min = 70;
+		m_max = 96;
+	} else {
+		ref_clk = 25000;
+		n = 1;
+		m_min = 62;
+		m_max = 92;
+	}
+
+	for (m = m_min; m <= m_max && delta; m++) {
+		for (p = p_min; p <= p_max && delta; p++) {
+			/*
+			 * Find the optimal m and p divisors with minimal delta
+			 * +/- the required clock
+			 */
+			int calc_dsi_clk = (m * ref_clk) / (p * n);
+			int d = abs(target_dsi_clk - calc_dsi_clk);
+			if (d < delta) {
+				delta = d;
+				calc_m = m;
+				calc_p = p;
+			}
+		}
+	}
+
+	/* register has log2(N1), this works fine for powers of two */
+	n = ffs(n) - 1;
+	m_seed = lfsr_converts[calc_m - 62];
+	dsi_mnp->dsi_pll_ctrl = 1 << (DSI_PLL_P1_POST_DIV_SHIFT + calc_p - 2);
+	dsi_mnp->dsi_pll_div = n << DSI_PLL_N1_DIV_SHIFT |
+		m_seed << DSI_PLL_M1_DIV_SHIFT;
+
+	return 0;
+}
+
+/*
+ * XXX: The muxing and gating is hard coded for now. Need to add support for
+ * sharing PLLs with two DSI outputs.
+ */
+static void vlv_configure_dsi_pll(struct intel_encoder *encoder)
+{
+	struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	int ret;
+	struct dsi_mnp dsi_mnp;
+	u32 dsi_clk;
+
+	dsi_clk = dsi_clk_from_pclk(intel_dsi->pclk, intel_dsi->pixel_format,
+				    intel_dsi->lane_count);
+
+	ret = dsi_calc_mnp(dev_priv, &dsi_mnp, dsi_clk);
+	if (ret) {
+		DRM_DEBUG_KMS("dsi_calc_mnp failed\n");
+		return;
+	}
+
+	if (intel_dsi->ports & (1 << PORT_A))
+		dsi_mnp.dsi_pll_ctrl |= DSI_PLL_CLK_GATE_DSI0_DSIPLL;
+
+	if (intel_dsi->ports & (1 << PORT_C))
+		dsi_mnp.dsi_pll_ctrl |= DSI_PLL_CLK_GATE_DSI1_DSIPLL;
+
+	DRM_DEBUG_KMS("dsi pll div %08x, ctrl %08x\n",
+		      dsi_mnp.dsi_pll_div, dsi_mnp.dsi_pll_ctrl);
+
+	vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, 0);
+	vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_DIVIDER, dsi_mnp.dsi_pll_div);
+	vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, dsi_mnp.dsi_pll_ctrl);
+}
+
+static void vlv_enable_dsi_pll(struct intel_encoder *encoder)
+{
+	struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+	u32 tmp;
+
+	DRM_DEBUG_KMS("\n");
+
+	mutex_lock(&dev_priv->sb_lock);
+
+	vlv_configure_dsi_pll(encoder);
+
+	/* wait at least 0.5 us after ungating before enabling VCO */
+	usleep_range(1, 10);
+
+	tmp = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
+	tmp |= DSI_PLL_VCO_EN;
+	vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, tmp);
+
+	if (wait_for(vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL) &
+						DSI_PLL_LOCK, 20)) {
+
+		mutex_unlock(&dev_priv->sb_lock);
+		DRM_ERROR("DSI PLL lock failed\n");
+		return;
+	}
+	mutex_unlock(&dev_priv->sb_lock);
+
+	DRM_DEBUG_KMS("DSI PLL locked\n");
+}
+
+static void vlv_disable_dsi_pll(struct intel_encoder *encoder)
+{
+	struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+	u32 tmp;
+
+	DRM_DEBUG_KMS("\n");
+
+	mutex_lock(&dev_priv->sb_lock);
+
+	tmp = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
+	tmp &= ~DSI_PLL_VCO_EN;
+	tmp |= DSI_PLL_LDO_GATE;
+	vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, tmp);
+
+	mutex_unlock(&dev_priv->sb_lock);
+}
+
+static void bxt_disable_dsi_pll(struct intel_encoder *encoder)
+{
+	struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+	u32 val;
+
+	DRM_DEBUG_KMS("\n");
+
+	val = I915_READ(BXT_DSI_PLL_ENABLE);
+	val &= ~BXT_DSI_PLL_DO_ENABLE;
+	I915_WRITE(BXT_DSI_PLL_ENABLE, val);
+
+	/*
+	 * PLL lock should deassert within 200us.
+	 * Wait up to 1ms before timing out.
+	 */
+	if (wait_for((I915_READ(BXT_DSI_PLL_ENABLE)
+					& BXT_DSI_PLL_LOCKED) == 0, 1))
+		DRM_ERROR("Timeout waiting for PLL lock deassertion\n");
+}
+
+static void assert_bpp_mismatch(int pixel_format, int pipe_bpp)
+{
+	int bpp = dsi_pixel_format_bpp(pixel_format);
+
+	WARN(bpp != pipe_bpp,
+	     "bpp match assertion failure (expected %d, current %d)\n",
+	     bpp, pipe_bpp);
+}
+
+u32 vlv_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp)
+{
+	struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	u32 dsi_clock, pclk;
+	u32 pll_ctl, pll_div;
+	u32 m = 0, p = 0, n;
+	int refclk = 25000;
+	int i;
+
+	DRM_DEBUG_KMS("\n");
+
+	mutex_lock(&dev_priv->sb_lock);
+	pll_ctl = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
+	pll_div = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_DIVIDER);
+	mutex_unlock(&dev_priv->sb_lock);
+
+	/* mask out other bits and extract the P1 divisor */
+	pll_ctl &= DSI_PLL_P1_POST_DIV_MASK;
+	pll_ctl = pll_ctl >> (DSI_PLL_P1_POST_DIV_SHIFT - 2);
+
+	/* N1 divisor */
+	n = (pll_div & DSI_PLL_N1_DIV_MASK) >> DSI_PLL_N1_DIV_SHIFT;
+	n = 1 << n; /* register has log2(N1) */
+
+	/* mask out the other bits and extract the M1 divisor */
+	pll_div &= DSI_PLL_M1_DIV_MASK;
+	pll_div = pll_div >> DSI_PLL_M1_DIV_SHIFT;
+
+	while (pll_ctl) {
+		pll_ctl = pll_ctl >> 1;
+		p++;
+	}
+	p--;
+
+	if (!p) {
+		DRM_ERROR("wrong P1 divisor\n");
+		return 0;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(lfsr_converts); i++) {
+		if (lfsr_converts[i] == pll_div)
+			break;
+	}
+
+	if (i == ARRAY_SIZE(lfsr_converts)) {
+		DRM_ERROR("wrong m_seed programmed\n");
+		return 0;
+	}
+
+	m = i + 62;
+
+	dsi_clock = (m * refclk) / (p * n);
+
+	/* pixel_format and pipe_bpp should agree */
+	assert_bpp_mismatch(intel_dsi->pixel_format, pipe_bpp);
+
+	pclk = DIV_ROUND_CLOSEST(dsi_clock * intel_dsi->lane_count, pipe_bpp);
+
+	return pclk;
+}
+
+u32 bxt_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp)
+{
+	u32 pclk;
+	u32 dsi_clk;
+	u32 dsi_ratio;
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+
+	/* Divide by zero */
+	if (!pipe_bpp) {
+		DRM_ERROR("Invalid BPP(0)\n");
+		return 0;
+	}
+
+	dsi_ratio = I915_READ(BXT_DSI_PLL_CTL) &
+				BXT_DSI_PLL_RATIO_MASK;
+
+	/* Invalid DSI ratio ? */
+	if (dsi_ratio < BXT_DSI_PLL_RATIO_MIN ||
+			dsi_ratio > BXT_DSI_PLL_RATIO_MAX) {
+		DRM_ERROR("Invalid DSI pll ratio(%u) programmed\n", dsi_ratio);
+		return 0;
+	}
+
+	dsi_clk = (dsi_ratio * BXT_REF_CLOCK_KHZ) / 2;
+
+	/* pixel_format and pipe_bpp should agree */
+	assert_bpp_mismatch(intel_dsi->pixel_format, pipe_bpp);
+
+	pclk = DIV_ROUND_CLOSEST(dsi_clk * intel_dsi->lane_count, pipe_bpp);
+
+	DRM_DEBUG_DRIVER("Calculated pclk=%u\n", pclk);
+	return pclk;
+}
+
+static void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
+{
+	u32 temp;
+	struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+
+	temp = I915_READ(MIPI_CTRL(port));
+	temp &= ~ESCAPE_CLOCK_DIVIDER_MASK;
+	I915_WRITE(MIPI_CTRL(port), temp |
+			intel_dsi->escape_clk_div <<
+			ESCAPE_CLOCK_DIVIDER_SHIFT);
+}
+
+/* Program BXT Mipi clocks and dividers */
+static void bxt_dsi_program_clocks(struct drm_device *dev, enum port port)
+{
+	u32 tmp;
+	u32 divider;
+	u32 dsi_rate;
+	u32 pll_ratio;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	/* Clear old configurations */
+	tmp = I915_READ(BXT_MIPI_CLOCK_CTL);
+	tmp &= ~(BXT_MIPI_TX_ESCLK_FIXDIV_MASK(port));
+	tmp &= ~(BXT_MIPI_RX_ESCLK_FIXDIV_MASK(port));
+	tmp &= ~(BXT_MIPI_ESCLK_VAR_DIV_MASK(port));
+	tmp &= ~(BXT_MIPI_DPHY_DIVIDER_MASK(port));
+
+	/* Get the current DSI rate(actual) */
+	pll_ratio = I915_READ(BXT_DSI_PLL_CTL) &
+				BXT_DSI_PLL_RATIO_MASK;
+	dsi_rate = (BXT_REF_CLOCK_KHZ * pll_ratio) / 2;
+
+	/* Max possible output of clock is 39.5 MHz, program value -1 */
+	divider = (dsi_rate / BXT_MAX_VAR_OUTPUT_KHZ) - 1;
+	tmp |= BXT_MIPI_ESCLK_VAR_DIV(port, divider);
+
+	/*
+	 * Tx escape clock must be as close to 20MHz possible, but should
+	 * not exceed it. Hence select divide by 2
+	 */
+	tmp |= BXT_MIPI_TX_ESCLK_8XDIV_BY2(port);
+
+	tmp |= BXT_MIPI_RX_ESCLK_8X_BY3(port);
+
+	I915_WRITE(BXT_MIPI_CLOCK_CTL, tmp);
+}
+
+static bool bxt_configure_dsi_pll(struct intel_encoder *encoder)
+{
+	struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	u8 dsi_ratio;
+	u32 dsi_clk;
+	u32 val;
+
+	dsi_clk = dsi_clk_from_pclk(intel_dsi->pclk, intel_dsi->pixel_format,
+			intel_dsi->lane_count);
+
+	/*
+	 * From clock diagram, to get PLL ratio divider, divide double of DSI
+	 * link rate (i.e., 2*8x=16x frequency value) by ref clock. Make sure to
+	 * round 'up' the result
+	 */
+	dsi_ratio = DIV_ROUND_UP(dsi_clk * 2, BXT_REF_CLOCK_KHZ);
+	if (dsi_ratio < BXT_DSI_PLL_RATIO_MIN ||
+			dsi_ratio > BXT_DSI_PLL_RATIO_MAX) {
+		DRM_ERROR("Cant get a suitable ratio from DSI PLL ratios\n");
+		return false;
+	}
+
+	/*
+	 * Program DSI ratio and Select MIPIC and MIPIA PLL output as 8x
+	 * Spec says both have to be programmed, even if one is not getting
+	 * used. Configure MIPI_CLOCK_CTL dividers in modeset
+	 */
+	val = I915_READ(BXT_DSI_PLL_CTL);
+	val &= ~BXT_DSI_PLL_PVD_RATIO_MASK;
+	val &= ~BXT_DSI_FREQ_SEL_MASK;
+	val &= ~BXT_DSI_PLL_RATIO_MASK;
+	val |= (dsi_ratio | BXT_DSIA_16X_BY2 | BXT_DSIC_16X_BY2);
+
+	/* As per recommendation from hardware team,
+	 * Prog PVD ratio =1 if dsi ratio <= 50
+	 */
+	if (dsi_ratio <= 50) {
+		val &= ~BXT_DSI_PLL_PVD_RATIO_MASK;
+		val |= BXT_DSI_PLL_PVD_RATIO_1;
+	}
+
+	I915_WRITE(BXT_DSI_PLL_CTL, val);
+	POSTING_READ(BXT_DSI_PLL_CTL);
+
+	return true;
+}
+
+static void bxt_enable_dsi_pll(struct intel_encoder *encoder)
+{
+	struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	enum port port;
+	u32 val;
+
+	DRM_DEBUG_KMS("\n");
+
+	val = I915_READ(BXT_DSI_PLL_ENABLE);
+
+	if (val & BXT_DSI_PLL_DO_ENABLE) {
+		WARN(1, "DSI PLL already enabled. Disabling it.\n");
+		val &= ~BXT_DSI_PLL_DO_ENABLE;
+		I915_WRITE(BXT_DSI_PLL_ENABLE, val);
+	}
+
+	/* Configure PLL vales */
+	if (!bxt_configure_dsi_pll(encoder)) {
+		DRM_ERROR("Configure DSI PLL failed, abort PLL enable\n");
+		return;
+	}
+
+	/* Program TX, RX, Dphy clocks */
+	for_each_dsi_port(port, intel_dsi->ports)
+		bxt_dsi_program_clocks(encoder->base.dev, port);
+
+	/* Enable DSI PLL */
+	val = I915_READ(BXT_DSI_PLL_ENABLE);
+	val |= BXT_DSI_PLL_DO_ENABLE;
+	I915_WRITE(BXT_DSI_PLL_ENABLE, val);
+
+	/* Timeout and fail if PLL not locked */
+	if (wait_for(I915_READ(BXT_DSI_PLL_ENABLE) & BXT_DSI_PLL_LOCKED, 1)) {
+		DRM_ERROR("Timed out waiting for DSI PLL to lock\n");
+		return;
+	}
+
+	DRM_DEBUG_KMS("DSI PLL locked\n");
+}
+
+void intel_enable_dsi_pll(struct intel_encoder *encoder)
+{
+	struct drm_device *dev = encoder->base.dev;
+
+	if (IS_VALLEYVIEW(dev))
+		vlv_enable_dsi_pll(encoder);
+	else if (IS_BROXTON(dev))
+		bxt_enable_dsi_pll(encoder);
+}
+
+void intel_disable_dsi_pll(struct intel_encoder *encoder)
+{
+	struct drm_device *dev = encoder->base.dev;
+
+	if (IS_VALLEYVIEW(dev))
+		vlv_disable_dsi_pll(encoder);
+	else if (IS_BROXTON(dev))
+		bxt_disable_dsi_pll(encoder);
+}
+
+static void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
+{
+	u32 tmp;
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	/* Clear old configurations */
+	tmp = I915_READ(BXT_MIPI_CLOCK_CTL);
+	tmp &= ~(BXT_MIPI_TX_ESCLK_FIXDIV_MASK(port));
+	tmp &= ~(BXT_MIPI_RX_ESCLK_FIXDIV_MASK(port));
+	tmp &= ~(BXT_MIPI_ESCLK_VAR_DIV_MASK(port));
+	tmp &= ~(BXT_MIPI_DPHY_DIVIDER_MASK(port));
+	I915_WRITE(BXT_MIPI_CLOCK_CTL, tmp);
+	I915_WRITE(MIPI_EOT_DISABLE(port), CLOCKSTOP);
+}
+
+void intel_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
+{
+	struct drm_device *dev = encoder->base.dev;
+
+	if (IS_BROXTON(dev))
+		bxt_dsi_reset_clocks(encoder, port);
+	else if (IS_VALLEYVIEW(dev))
+		vlv_dsi_reset_clocks(encoder, port);
+}
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
new file mode 100644
index 0000000..8492053
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -0,0 +1,552 @@
+/*
+ * Copyright 2006 Dave Airlie <airlied@linux.ie>
+ * Copyright © 2006-2007 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *	Eric Anholt <eric@anholt.net>
+ */
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include "intel_drv.h"
+#include <drm/i915_drm.h>
+#include "i915_drv.h"
+#include "dvo.h"
+
+#define SIL164_ADDR	0x38
+#define CH7xxx_ADDR	0x76
+#define TFP410_ADDR	0x38
+#define NS2501_ADDR     0x38
+
+static const struct intel_dvo_device intel_dvo_devices[] = {
+	{
+		.type = INTEL_DVO_CHIP_TMDS,
+		.name = "sil164",
+		.dvo_reg = DVOC,
+		.slave_addr = SIL164_ADDR,
+		.dev_ops = &sil164_ops,
+	},
+	{
+		.type = INTEL_DVO_CHIP_TMDS,
+		.name = "ch7xxx",
+		.dvo_reg = DVOC,
+		.slave_addr = CH7xxx_ADDR,
+		.dev_ops = &ch7xxx_ops,
+	},
+	{
+		.type = INTEL_DVO_CHIP_TMDS,
+		.name = "ch7xxx",
+		.dvo_reg = DVOC,
+		.slave_addr = 0x75, /* For some ch7010 */
+		.dev_ops = &ch7xxx_ops,
+	},
+	{
+		.type = INTEL_DVO_CHIP_LVDS,
+		.name = "ivch",
+		.dvo_reg = DVOA,
+		.slave_addr = 0x02, /* Might also be 0x44, 0x84, 0xc4 */
+		.dev_ops = &ivch_ops,
+	},
+	{
+		.type = INTEL_DVO_CHIP_TMDS,
+		.name = "tfp410",
+		.dvo_reg = DVOC,
+		.slave_addr = TFP410_ADDR,
+		.dev_ops = &tfp410_ops,
+	},
+	{
+		.type = INTEL_DVO_CHIP_LVDS,
+		.name = "ch7017",
+		.dvo_reg = DVOC,
+		.slave_addr = 0x75,
+		.gpio = GMBUS_PIN_DPB,
+		.dev_ops = &ch7017_ops,
+	},
+	{
+	        .type = INTEL_DVO_CHIP_TMDS,
+		.name = "ns2501",
+		.dvo_reg = DVOB,
+		.slave_addr = NS2501_ADDR,
+		.dev_ops = &ns2501_ops,
+       }
+};
+
+struct intel_dvo {
+	struct intel_encoder base;
+
+	struct intel_dvo_device dev;
+
+	struct intel_connector *attached_connector;
+
+	bool panel_wants_dither;
+};
+
+static struct intel_dvo *enc_to_dvo(struct intel_encoder *encoder)
+{
+	return container_of(encoder, struct intel_dvo, base);
+}
+
+static struct intel_dvo *intel_attached_dvo(struct drm_connector *connector)
+{
+	return enc_to_dvo(intel_attached_encoder(connector));
+}
+
+static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector)
+{
+	struct drm_device *dev = connector->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_dvo *intel_dvo = intel_attached_dvo(&connector->base);
+	u32 tmp;
+
+	tmp = I915_READ(intel_dvo->dev.dvo_reg);
+
+	if (!(tmp & DVO_ENABLE))
+		return false;
+
+	return intel_dvo->dev.dev_ops->get_hw_state(&intel_dvo->dev);
+}
+
+static bool intel_dvo_get_hw_state(struct intel_encoder *encoder,
+				   enum pipe *pipe)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
+	u32 tmp;
+
+	tmp = I915_READ(intel_dvo->dev.dvo_reg);
+
+	if (!(tmp & DVO_ENABLE))
+		return false;
+
+	*pipe = PORT_TO_PIPE(tmp);
+
+	return true;
+}
+
+static void intel_dvo_get_config(struct intel_encoder *encoder,
+				 struct intel_crtc_state *pipe_config)
+{
+	struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+	struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
+	u32 tmp, flags = 0;
+
+	tmp = I915_READ(intel_dvo->dev.dvo_reg);
+	if (tmp & DVO_HSYNC_ACTIVE_HIGH)
+		flags |= DRM_MODE_FLAG_PHSYNC;
+	else
+		flags |= DRM_MODE_FLAG_NHSYNC;
+	if (tmp & DVO_VSYNC_ACTIVE_HIGH)
+		flags |= DRM_MODE_FLAG_PVSYNC;
+	else
+		flags |= DRM_MODE_FLAG_NVSYNC;
+
+	pipe_config->base.adjusted_mode.flags |= flags;
+
+	pipe_config->base.adjusted_mode.crtc_clock = pipe_config->port_clock;
+}
+
+static void intel_disable_dvo(struct intel_encoder *encoder)
+{
+	struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+	struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
+	u32 dvo_reg = intel_dvo->dev.dvo_reg;
+	u32 temp = I915_READ(dvo_reg);
+
+	intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, false);
+	I915_WRITE(dvo_reg, temp & ~DVO_ENABLE);
+	I915_READ(dvo_reg);
+}
+
+static void intel_enable_dvo(struct intel_encoder *encoder)
+{
+	struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+	struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
+	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+	u32 dvo_reg = intel_dvo->dev.dvo_reg;
+	u32 temp = I915_READ(dvo_reg);
+
+	intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev,
+					 &crtc->config->base.mode,
+					 &crtc->config->base.adjusted_mode);
+
+	I915_WRITE(dvo_reg, temp | DVO_ENABLE);
+	I915_READ(dvo_reg);
+
+	intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true);
+}
+
+static enum drm_mode_status
+intel_dvo_mode_valid(struct drm_connector *connector,
+		     struct drm_display_mode *mode)
+{
+	struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
+	const struct drm_display_mode *fixed_mode =
+		to_intel_connector(connector)->panel.fixed_mode;
+	int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
+	int target_clock = mode->clock;
+
+	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+		return MODE_NO_DBLESCAN;
+
+	/* XXX: Validate clock range */
+
+	if (fixed_mode) {
+		if (mode->hdisplay > fixed_mode->hdisplay)
+			return MODE_PANEL;
+		if (mode->vdisplay > fixed_mode->vdisplay)
+			return MODE_PANEL;
+
+		target_clock = fixed_mode->clock;
+	}
+
+	if (target_clock > max_dotclk)
+		return MODE_CLOCK_HIGH;
+
+	return intel_dvo->dev.dev_ops->mode_valid(&intel_dvo->dev, mode);
+}
+
+static bool intel_dvo_compute_config(struct intel_encoder *encoder,
+				     struct intel_crtc_state *pipe_config)
+{
+	struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
+	const struct drm_display_mode *fixed_mode =
+		intel_dvo->attached_connector->panel.fixed_mode;
+	struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+
+	/* If we have timings from the BIOS for the panel, put them in
+	 * to the adjusted mode.  The CRTC will be set up for this mode,
+	 * with the panel scaling set up to source from the H/VDisplay
+	 * of the original mode.
+	 */
+	if (fixed_mode)
+		intel_fixed_panel_mode(fixed_mode, adjusted_mode);
+
+	return true;
+}
+
+static void intel_dvo_pre_enable(struct intel_encoder *encoder)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+	const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
+	struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
+	int pipe = crtc->pipe;
+	u32 dvo_val;
+	u32 dvo_reg = intel_dvo->dev.dvo_reg, dvo_srcdim_reg;
+
+	switch (dvo_reg) {
+	case DVOA:
+	default:
+		dvo_srcdim_reg = DVOA_SRCDIM;
+		break;
+	case DVOB:
+		dvo_srcdim_reg = DVOB_SRCDIM;
+		break;
+	case DVOC:
+		dvo_srcdim_reg = DVOC_SRCDIM;
+		break;
+	}
+
+	/* Save the data order, since I don't know what it should be set to. */
+	dvo_val = I915_READ(dvo_reg) &
+		  (DVO_PRESERVE_MASK | DVO_DATA_ORDER_GBRG);
+	dvo_val |= DVO_DATA_ORDER_FP | DVO_BORDER_ENABLE |
+		   DVO_BLANK_ACTIVE_HIGH;
+
+	if (pipe == 1)
+		dvo_val |= DVO_PIPE_B_SELECT;
+	dvo_val |= DVO_PIPE_STALL;
+	if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+		dvo_val |= DVO_HSYNC_ACTIVE_HIGH;
+	if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+		dvo_val |= DVO_VSYNC_ACTIVE_HIGH;
+
+	/*I915_WRITE(DVOB_SRCDIM,
+	  (adjusted_mode->crtc_hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) |
+	  (adjusted_mode->crtc_vdisplay << DVO_SRCDIM_VERTICAL_SHIFT));*/
+	I915_WRITE(dvo_srcdim_reg,
+		   (adjusted_mode->crtc_hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) |
+		   (adjusted_mode->crtc_vdisplay << DVO_SRCDIM_VERTICAL_SHIFT));
+	/*I915_WRITE(DVOB, dvo_val);*/
+	I915_WRITE(dvo_reg, dvo_val);
+}
+
+/**
+ * Detect the output connection on our DVO device.
+ *
+ * Unimplemented.
+ */
+static enum drm_connector_status
+intel_dvo_detect(struct drm_connector *connector, bool force)
+{
+	struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
+	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+		      connector->base.id, connector->name);
+	return intel_dvo->dev.dev_ops->detect(&intel_dvo->dev);
+}
+
+static int intel_dvo_get_modes(struct drm_connector *connector)
+{
+	struct drm_i915_private *dev_priv = connector->dev->dev_private;
+	const struct drm_display_mode *fixed_mode =
+		to_intel_connector(connector)->panel.fixed_mode;
+
+	/* We should probably have an i2c driver get_modes function for those
+	 * devices which will have a fixed set of modes determined by the chip
+	 * (TV-out, for example), but for now with just TMDS and LVDS,
+	 * that's not the case.
+	 */
+	intel_ddc_get_modes(connector,
+			    intel_gmbus_get_adapter(dev_priv, GMBUS_PIN_DPC));
+	if (!list_empty(&connector->probed_modes))
+		return 1;
+
+	if (fixed_mode) {
+		struct drm_display_mode *mode;
+		mode = drm_mode_duplicate(connector->dev, fixed_mode);
+		if (mode) {
+			drm_mode_probed_add(connector, mode);
+			return 1;
+		}
+	}
+
+	return 0;
+}
+
+static void intel_dvo_destroy(struct drm_connector *connector)
+{
+	drm_connector_cleanup(connector);
+	intel_panel_fini(&to_intel_connector(connector)->panel);
+	kfree(connector);
+}
+
+static const struct drm_connector_funcs intel_dvo_connector_funcs = {
+	.dpms = drm_atomic_helper_connector_dpms,
+	.detect = intel_dvo_detect,
+	.destroy = intel_dvo_destroy,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.atomic_get_property = intel_connector_atomic_get_property,
+	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+	.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+};
+
+static const struct drm_connector_helper_funcs intel_dvo_connector_helper_funcs = {
+	.mode_valid = intel_dvo_mode_valid,
+	.get_modes = intel_dvo_get_modes,
+	.best_encoder = intel_best_encoder,
+};
+
+static void intel_dvo_enc_destroy(struct drm_encoder *encoder)
+{
+	struct intel_dvo *intel_dvo = enc_to_dvo(to_intel_encoder(encoder));
+
+	if (intel_dvo->dev.dev_ops->destroy)
+		intel_dvo->dev.dev_ops->destroy(&intel_dvo->dev);
+
+	intel_encoder_destroy(encoder);
+}
+
+static const struct drm_encoder_funcs intel_dvo_enc_funcs = {
+	.destroy = intel_dvo_enc_destroy,
+};
+
+/**
+ * Attempts to get a fixed panel timing for LVDS (currently only the i830).
+ *
+ * Other chips with DVO LVDS will need to extend this to deal with the LVDS
+ * chip being on DVOB/C and having multiple pipes.
+ */
+static struct drm_display_mode *
+intel_dvo_get_current_mode(struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
+	uint32_t dvo_val = I915_READ(intel_dvo->dev.dvo_reg);
+	struct drm_display_mode *mode = NULL;
+
+	/* If the DVO port is active, that'll be the LVDS, so we can pull out
+	 * its timings to get how the BIOS set up the panel.
+	 */
+	if (dvo_val & DVO_ENABLE) {
+		struct drm_crtc *crtc;
+		int pipe = (dvo_val & DVO_PIPE_B_SELECT) ? 1 : 0;
+
+		crtc = intel_get_crtc_for_pipe(dev, pipe);
+		if (crtc) {
+			mode = intel_crtc_mode_get(dev, crtc);
+			if (mode) {
+				mode->type |= DRM_MODE_TYPE_PREFERRED;
+				if (dvo_val & DVO_HSYNC_ACTIVE_HIGH)
+					mode->flags |= DRM_MODE_FLAG_PHSYNC;
+				if (dvo_val & DVO_VSYNC_ACTIVE_HIGH)
+					mode->flags |= DRM_MODE_FLAG_PVSYNC;
+			}
+		}
+	}
+
+	return mode;
+}
+
+void intel_dvo_init(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_encoder *intel_encoder;
+	struct intel_dvo *intel_dvo;
+	struct intel_connector *intel_connector;
+	int i;
+	int encoder_type = DRM_MODE_ENCODER_NONE;
+
+	intel_dvo = kzalloc(sizeof(*intel_dvo), GFP_KERNEL);
+	if (!intel_dvo)
+		return;
+
+	intel_connector = intel_connector_alloc();
+	if (!intel_connector) {
+		kfree(intel_dvo);
+		return;
+	}
+
+	intel_dvo->attached_connector = intel_connector;
+
+	intel_encoder = &intel_dvo->base;
+	drm_encoder_init(dev, &intel_encoder->base,
+			 &intel_dvo_enc_funcs, encoder_type);
+
+	intel_encoder->disable = intel_disable_dvo;
+	intel_encoder->enable = intel_enable_dvo;
+	intel_encoder->get_hw_state = intel_dvo_get_hw_state;
+	intel_encoder->get_config = intel_dvo_get_config;
+	intel_encoder->compute_config = intel_dvo_compute_config;
+	intel_encoder->pre_enable = intel_dvo_pre_enable;
+	intel_connector->get_hw_state = intel_dvo_connector_get_hw_state;
+	intel_connector->unregister = intel_connector_unregister;
+
+	/* Now, try to find a controller */
+	for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) {
+		struct drm_connector *connector = &intel_connector->base;
+		const struct intel_dvo_device *dvo = &intel_dvo_devices[i];
+		struct i2c_adapter *i2c;
+		int gpio;
+		bool dvoinit;
+		enum pipe pipe;
+		uint32_t dpll[I915_MAX_PIPES];
+
+		/* Allow the I2C driver info to specify the GPIO to be used in
+		 * special cases, but otherwise default to what's defined
+		 * in the spec.
+		 */
+		if (intel_gmbus_is_valid_pin(dev_priv, dvo->gpio))
+			gpio = dvo->gpio;
+		else if (dvo->type == INTEL_DVO_CHIP_LVDS)
+			gpio = GMBUS_PIN_SSC;
+		else
+			gpio = GMBUS_PIN_DPB;
+
+		/* Set up the I2C bus necessary for the chip we're probing.
+		 * It appears that everything is on GPIOE except for panels
+		 * on i830 laptops, which are on GPIOB (DVOA).
+		 */
+		i2c = intel_gmbus_get_adapter(dev_priv, gpio);
+
+		intel_dvo->dev = *dvo;
+
+		/* GMBUS NAK handling seems to be unstable, hence let the
+		 * transmitter detection run in bit banging mode for now.
+		 */
+		intel_gmbus_force_bit(i2c, true);
+
+		/* ns2501 requires the DVO 2x clock before it will
+		 * respond to i2c accesses, so make sure we have
+		 * have the clock enabled before we attempt to
+		 * initialize the device.
+		 */
+		for_each_pipe(dev_priv, pipe) {
+			dpll[pipe] = I915_READ(DPLL(pipe));
+			I915_WRITE(DPLL(pipe), dpll[pipe] | DPLL_DVO_2X_MODE);
+		}
+
+		dvoinit = dvo->dev_ops->init(&intel_dvo->dev, i2c);
+
+		/* restore the DVO 2x clock state to original */
+		for_each_pipe(dev_priv, pipe) {
+			I915_WRITE(DPLL(pipe), dpll[pipe]);
+		}
+
+		intel_gmbus_force_bit(i2c, false);
+
+		if (!dvoinit)
+			continue;
+
+		intel_encoder->type = INTEL_OUTPUT_DVO;
+		intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
+		switch (dvo->type) {
+		case INTEL_DVO_CHIP_TMDS:
+			intel_encoder->cloneable = (1 << INTEL_OUTPUT_ANALOG) |
+				(1 << INTEL_OUTPUT_DVO);
+			drm_connector_init(dev, connector,
+					   &intel_dvo_connector_funcs,
+					   DRM_MODE_CONNECTOR_DVII);
+			encoder_type = DRM_MODE_ENCODER_TMDS;
+			break;
+		case INTEL_DVO_CHIP_LVDS:
+			intel_encoder->cloneable = 0;
+			drm_connector_init(dev, connector,
+					   &intel_dvo_connector_funcs,
+					   DRM_MODE_CONNECTOR_LVDS);
+			encoder_type = DRM_MODE_ENCODER_LVDS;
+			break;
+		}
+
+		drm_connector_helper_add(connector,
+					 &intel_dvo_connector_helper_funcs);
+		connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+		connector->interlace_allowed = false;
+		connector->doublescan_allowed = false;
+
+		intel_connector_attach_encoder(intel_connector, intel_encoder);
+		if (dvo->type == INTEL_DVO_CHIP_LVDS) {
+			/* For our LVDS chipsets, we should hopefully be able
+			 * to dig the fixed panel mode out of the BIOS data.
+			 * However, it's in a different format from the BIOS
+			 * data on chipsets with integrated LVDS (stored in AIM
+			 * headers, likely), so for now, just get the current
+			 * mode being output through DVO.
+			 */
+			intel_panel_init(&intel_connector->panel,
+					 intel_dvo_get_current_mode(connector),
+					 NULL);
+			intel_dvo->panel_wants_dither = true;
+		}
+
+		drm_connector_register(connector);
+		return;
+	}
+
+	drm_encoder_cleanup(&intel_encoder->base);
+	kfree(intel_dvo);
+	kfree(intel_connector);
+}
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
new file mode 100644
index 0000000..cf47352
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -0,0 +1,1125 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * DOC: Frame Buffer Compression (FBC)
+ *
+ * FBC tries to save memory bandwidth (and so power consumption) by
+ * compressing the amount of memory used by the display. It is total
+ * transparent to user space and completely handled in the kernel.
+ *
+ * The benefits of FBC are mostly visible with solid backgrounds and
+ * variation-less patterns. It comes from keeping the memory footprint small
+ * and having fewer memory pages opened and accessed for refreshing the display.
+ *
+ * i915 is responsible to reserve stolen memory for FBC and configure its
+ * offset on proper registers. The hardware takes care of all
+ * compress/decompress. However there are many known cases where we have to
+ * forcibly disable it to allow proper screen updates.
+ */
+
+#include "intel_drv.h"
+#include "i915_drv.h"
+
+static inline bool fbc_supported(struct drm_i915_private *dev_priv)
+{
+	return dev_priv->fbc.enable_fbc != NULL;
+}
+
+/*
+ * In some platforms where the CRTC's x:0/y:0 coordinates doesn't match the
+ * frontbuffer's x:0/y:0 coordinates we lie to the hardware about the plane's
+ * origin so the x and y offsets can actually fit the registers. As a
+ * consequence, the fence doesn't really start exactly at the display plane
+ * address we program because it starts at the real start of the buffer, so we
+ * have to take this into consideration here.
+ */
+static unsigned int get_crtc_fence_y_offset(struct intel_crtc *crtc)
+{
+	return crtc->base.y - crtc->adjusted_y;
+}
+
+static void i8xx_fbc_disable(struct drm_i915_private *dev_priv)
+{
+	u32 fbc_ctl;
+
+	dev_priv->fbc.enabled = false;
+
+	/* Disable compression */
+	fbc_ctl = I915_READ(FBC_CONTROL);
+	if ((fbc_ctl & FBC_CTL_EN) == 0)
+		return;
+
+	fbc_ctl &= ~FBC_CTL_EN;
+	I915_WRITE(FBC_CONTROL, fbc_ctl);
+
+	/* Wait for compressing bit to clear */
+	if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
+		DRM_DEBUG_KMS("FBC idle timed out\n");
+		return;
+	}
+
+	DRM_DEBUG_KMS("disabled FBC\n");
+}
+
+static void i8xx_fbc_enable(struct intel_crtc *crtc)
+{
+	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+	struct drm_framebuffer *fb = crtc->base.primary->fb;
+	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+	int cfb_pitch;
+	int i;
+	u32 fbc_ctl;
+
+	dev_priv->fbc.enabled = true;
+
+	/* Note: fbc.threshold == 1 for i8xx */
+	cfb_pitch = dev_priv->fbc.uncompressed_size / FBC_LL_SIZE;
+	if (fb->pitches[0] < cfb_pitch)
+		cfb_pitch = fb->pitches[0];
+
+	/* FBC_CTL wants 32B or 64B units */
+	if (IS_GEN2(dev_priv))
+		cfb_pitch = (cfb_pitch / 32) - 1;
+	else
+		cfb_pitch = (cfb_pitch / 64) - 1;
+
+	/* Clear old tags */
+	for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
+		I915_WRITE(FBC_TAG(i), 0);
+
+	if (IS_GEN4(dev_priv)) {
+		u32 fbc_ctl2;
+
+		/* Set it up... */
+		fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
+		fbc_ctl2 |= FBC_CTL_PLANE(crtc->plane);
+		I915_WRITE(FBC_CONTROL2, fbc_ctl2);
+		I915_WRITE(FBC_FENCE_OFF, get_crtc_fence_y_offset(crtc));
+	}
+
+	/* enable it... */
+	fbc_ctl = I915_READ(FBC_CONTROL);
+	fbc_ctl &= 0x3fff << FBC_CTL_INTERVAL_SHIFT;
+	fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC;
+	if (IS_I945GM(dev_priv))
+		fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
+	fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
+	fbc_ctl |= obj->fence_reg;
+	I915_WRITE(FBC_CONTROL, fbc_ctl);
+
+	DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %c\n",
+		      cfb_pitch, crtc->base.y, plane_name(crtc->plane));
+}
+
+static bool i8xx_fbc_enabled(struct drm_i915_private *dev_priv)
+{
+	return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
+}
+
+static void g4x_fbc_enable(struct intel_crtc *crtc)
+{
+	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+	struct drm_framebuffer *fb = crtc->base.primary->fb;
+	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+	u32 dpfc_ctl;
+
+	dev_priv->fbc.enabled = true;
+
+	dpfc_ctl = DPFC_CTL_PLANE(crtc->plane) | DPFC_SR_EN;
+	if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
+		dpfc_ctl |= DPFC_CTL_LIMIT_2X;
+	else
+		dpfc_ctl |= DPFC_CTL_LIMIT_1X;
+	dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
+
+	I915_WRITE(DPFC_FENCE_YOFF, get_crtc_fence_y_offset(crtc));
+
+	/* enable it... */
+	I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
+
+	DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(crtc->plane));
+}
+
+static void g4x_fbc_disable(struct drm_i915_private *dev_priv)
+{
+	u32 dpfc_ctl;
+
+	dev_priv->fbc.enabled = false;
+
+	/* Disable compression */
+	dpfc_ctl = I915_READ(DPFC_CONTROL);
+	if (dpfc_ctl & DPFC_CTL_EN) {
+		dpfc_ctl &= ~DPFC_CTL_EN;
+		I915_WRITE(DPFC_CONTROL, dpfc_ctl);
+
+		DRM_DEBUG_KMS("disabled FBC\n");
+	}
+}
+
+static bool g4x_fbc_enabled(struct drm_i915_private *dev_priv)
+{
+	return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
+}
+
+static void intel_fbc_nuke(struct drm_i915_private *dev_priv)
+{
+	I915_WRITE(MSG_FBC_REND_STATE, FBC_REND_NUKE);
+	POSTING_READ(MSG_FBC_REND_STATE);
+}
+
+static void ilk_fbc_enable(struct intel_crtc *crtc)
+{
+	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+	struct drm_framebuffer *fb = crtc->base.primary->fb;
+	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+	u32 dpfc_ctl;
+	int threshold = dev_priv->fbc.threshold;
+	unsigned int y_offset;
+
+	dev_priv->fbc.enabled = true;
+
+	dpfc_ctl = DPFC_CTL_PLANE(crtc->plane);
+	if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
+		threshold++;
+
+	switch (threshold) {
+	case 4:
+	case 3:
+		dpfc_ctl |= DPFC_CTL_LIMIT_4X;
+		break;
+	case 2:
+		dpfc_ctl |= DPFC_CTL_LIMIT_2X;
+		break;
+	case 1:
+		dpfc_ctl |= DPFC_CTL_LIMIT_1X;
+		break;
+	}
+	dpfc_ctl |= DPFC_CTL_FENCE_EN;
+	if (IS_GEN5(dev_priv))
+		dpfc_ctl |= obj->fence_reg;
+
+	y_offset = get_crtc_fence_y_offset(crtc);
+	I915_WRITE(ILK_DPFC_FENCE_YOFF, y_offset);
+	I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID);
+	/* enable it... */
+	I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
+
+	if (IS_GEN6(dev_priv)) {
+		I915_WRITE(SNB_DPFC_CTL_SA,
+			   SNB_CPU_FENCE_ENABLE | obj->fence_reg);
+		I915_WRITE(DPFC_CPU_FENCE_OFFSET, y_offset);
+	}
+
+	intel_fbc_nuke(dev_priv);
+
+	DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(crtc->plane));
+}
+
+static void ilk_fbc_disable(struct drm_i915_private *dev_priv)
+{
+	u32 dpfc_ctl;
+
+	dev_priv->fbc.enabled = false;
+
+	/* Disable compression */
+	dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
+	if (dpfc_ctl & DPFC_CTL_EN) {
+		dpfc_ctl &= ~DPFC_CTL_EN;
+		I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
+
+		DRM_DEBUG_KMS("disabled FBC\n");
+	}
+}
+
+static bool ilk_fbc_enabled(struct drm_i915_private *dev_priv)
+{
+	return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
+}
+
+static void gen7_fbc_enable(struct intel_crtc *crtc)
+{
+	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+	struct drm_framebuffer *fb = crtc->base.primary->fb;
+	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+	u32 dpfc_ctl;
+	int threshold = dev_priv->fbc.threshold;
+
+	dev_priv->fbc.enabled = true;
+
+	dpfc_ctl = 0;
+	if (IS_IVYBRIDGE(dev_priv))
+		dpfc_ctl |= IVB_DPFC_CTL_PLANE(crtc->plane);
+
+	if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
+		threshold++;
+
+	switch (threshold) {
+	case 4:
+	case 3:
+		dpfc_ctl |= DPFC_CTL_LIMIT_4X;
+		break;
+	case 2:
+		dpfc_ctl |= DPFC_CTL_LIMIT_2X;
+		break;
+	case 1:
+		dpfc_ctl |= DPFC_CTL_LIMIT_1X;
+		break;
+	}
+
+	dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
+
+	if (dev_priv->fbc.false_color)
+		dpfc_ctl |= FBC_CTL_FALSE_COLOR;
+
+	if (IS_IVYBRIDGE(dev_priv)) {
+		/* WaFbcAsynchFlipDisableFbcQueue:ivb */
+		I915_WRITE(ILK_DISPLAY_CHICKEN1,
+			   I915_READ(ILK_DISPLAY_CHICKEN1) |
+			   ILK_FBCQ_DIS);
+	} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
+		/* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
+		I915_WRITE(CHICKEN_PIPESL_1(crtc->pipe),
+			   I915_READ(CHICKEN_PIPESL_1(crtc->pipe)) |
+			   HSW_FBCQ_DIS);
+	}
+
+	I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
+
+	I915_WRITE(SNB_DPFC_CTL_SA,
+		   SNB_CPU_FENCE_ENABLE | obj->fence_reg);
+	I915_WRITE(DPFC_CPU_FENCE_OFFSET, get_crtc_fence_y_offset(crtc));
+
+	intel_fbc_nuke(dev_priv);
+
+	DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(crtc->plane));
+}
+
+/**
+ * intel_fbc_enabled - Is FBC enabled?
+ * @dev_priv: i915 device instance
+ *
+ * This function is used to verify the current state of FBC.
+ * FIXME: This should be tracked in the plane config eventually
+ *        instead of queried at runtime for most callers.
+ */
+bool intel_fbc_enabled(struct drm_i915_private *dev_priv)
+{
+	return dev_priv->fbc.enabled;
+}
+
+static void intel_fbc_enable(struct intel_crtc *crtc,
+			     const struct drm_framebuffer *fb)
+{
+	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+
+	dev_priv->fbc.enable_fbc(crtc);
+
+	dev_priv->fbc.crtc = crtc;
+	dev_priv->fbc.fb_id = fb->base.id;
+	dev_priv->fbc.y = crtc->base.y;
+}
+
+static void intel_fbc_work_fn(struct work_struct *__work)
+{
+	struct intel_fbc_work *work =
+		container_of(to_delayed_work(__work),
+			     struct intel_fbc_work, work);
+	struct drm_i915_private *dev_priv = work->crtc->base.dev->dev_private;
+	struct drm_framebuffer *crtc_fb = work->crtc->base.primary->fb;
+
+	mutex_lock(&dev_priv->fbc.lock);
+	if (work == dev_priv->fbc.fbc_work) {
+		/* Double check that we haven't switched fb without cancelling
+		 * the prior work.
+		 */
+		if (crtc_fb == work->fb)
+			intel_fbc_enable(work->crtc, work->fb);
+
+		dev_priv->fbc.fbc_work = NULL;
+	}
+	mutex_unlock(&dev_priv->fbc.lock);
+
+	kfree(work);
+}
+
+static void intel_fbc_cancel_work(struct drm_i915_private *dev_priv)
+{
+	WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
+
+	if (dev_priv->fbc.fbc_work == NULL)
+		return;
+
+	DRM_DEBUG_KMS("cancelling pending FBC enable\n");
+
+	/* Synchronisation is provided by struct_mutex and checking of
+	 * dev_priv->fbc.fbc_work, so we can perform the cancellation
+	 * entirely asynchronously.
+	 */
+	if (cancel_delayed_work(&dev_priv->fbc.fbc_work->work))
+		/* tasklet was killed before being run, clean up */
+		kfree(dev_priv->fbc.fbc_work);
+
+	/* Mark the work as no longer wanted so that if it does
+	 * wake-up (because the work was already running and waiting
+	 * for our mutex), it will discover that is no longer
+	 * necessary to run.
+	 */
+	dev_priv->fbc.fbc_work = NULL;
+}
+
+static void intel_fbc_schedule_enable(struct intel_crtc *crtc)
+{
+	struct intel_fbc_work *work;
+	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+
+	WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
+
+	intel_fbc_cancel_work(dev_priv);
+
+	work = kzalloc(sizeof(*work), GFP_KERNEL);
+	if (work == NULL) {
+		DRM_ERROR("Failed to allocate FBC work structure\n");
+		intel_fbc_enable(crtc, crtc->base.primary->fb);
+		return;
+	}
+
+	work->crtc = crtc;
+	work->fb = crtc->base.primary->fb;
+	INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
+
+	dev_priv->fbc.fbc_work = work;
+
+	/* Delay the actual enabling to let pageflipping cease and the
+	 * display to settle before starting the compression. Note that
+	 * this delay also serves a second purpose: it allows for a
+	 * vblank to pass after disabling the FBC before we attempt
+	 * to modify the control registers.
+	 *
+	 * A more complicated solution would involve tracking vblanks
+	 * following the termination of the page-flipping sequence
+	 * and indeed performing the enable as a co-routine and not
+	 * waiting synchronously upon the vblank.
+	 *
+	 * WaFbcWaitForVBlankBeforeEnable:ilk,snb
+	 */
+	schedule_delayed_work(&work->work, msecs_to_jiffies(50));
+}
+
+static void __intel_fbc_disable(struct drm_i915_private *dev_priv)
+{
+	WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
+
+	intel_fbc_cancel_work(dev_priv);
+
+	dev_priv->fbc.disable_fbc(dev_priv);
+	dev_priv->fbc.crtc = NULL;
+}
+
+/**
+ * intel_fbc_disable - disable FBC
+ * @dev_priv: i915 device instance
+ *
+ * This function disables FBC.
+ */
+void intel_fbc_disable(struct drm_i915_private *dev_priv)
+{
+	if (!fbc_supported(dev_priv))
+		return;
+
+	mutex_lock(&dev_priv->fbc.lock);
+	__intel_fbc_disable(dev_priv);
+	mutex_unlock(&dev_priv->fbc.lock);
+}
+
+/*
+ * intel_fbc_disable_crtc - disable FBC if it's associated with crtc
+ * @crtc: the CRTC
+ *
+ * This function disables FBC if it's associated with the provided CRTC.
+ */
+void intel_fbc_disable_crtc(struct intel_crtc *crtc)
+{
+	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+
+	if (!fbc_supported(dev_priv))
+		return;
+
+	mutex_lock(&dev_priv->fbc.lock);
+	if (dev_priv->fbc.crtc == crtc)
+		__intel_fbc_disable(dev_priv);
+	mutex_unlock(&dev_priv->fbc.lock);
+}
+
+const char *intel_no_fbc_reason_str(enum no_fbc_reason reason)
+{
+	switch (reason) {
+	case FBC_OK:
+		return "FBC enabled but currently disabled in hardware";
+	case FBC_UNSUPPORTED:
+		return "unsupported by this chipset";
+	case FBC_NO_OUTPUT:
+		return "no output";
+	case FBC_STOLEN_TOO_SMALL:
+		return "not enough stolen memory";
+	case FBC_UNSUPPORTED_MODE:
+		return "mode incompatible with compression";
+	case FBC_MODE_TOO_LARGE:
+		return "mode too large for compression";
+	case FBC_BAD_PLANE:
+		return "FBC unsupported on plane";
+	case FBC_NOT_TILED:
+		return "framebuffer not tiled or fenced";
+	case FBC_MULTIPLE_PIPES:
+		return "more than one pipe active";
+	case FBC_MODULE_PARAM:
+		return "disabled per module param";
+	case FBC_CHIP_DEFAULT:
+		return "disabled per chip default";
+	case FBC_ROTATION:
+		return "rotation unsupported";
+	case FBC_IN_DBG_MASTER:
+		return "Kernel debugger is active";
+	case FBC_BAD_STRIDE:
+		return "framebuffer stride not supported";
+	case FBC_PIXEL_RATE:
+		return "pixel rate is too big";
+	case FBC_PIXEL_FORMAT:
+		return "pixel format is invalid";
+	default:
+		MISSING_CASE(reason);
+		return "unknown reason";
+	}
+}
+
+static void set_no_fbc_reason(struct drm_i915_private *dev_priv,
+			      enum no_fbc_reason reason)
+{
+	if (dev_priv->fbc.no_fbc_reason == reason)
+		return;
+
+	dev_priv->fbc.no_fbc_reason = reason;
+	DRM_DEBUG_KMS("Disabling FBC: %s\n", intel_no_fbc_reason_str(reason));
+}
+
+static struct drm_crtc *intel_fbc_find_crtc(struct drm_i915_private *dev_priv)
+{
+	struct drm_crtc *crtc = NULL, *tmp_crtc;
+	enum pipe pipe;
+	bool pipe_a_only = false;
+
+	if (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)
+		pipe_a_only = true;
+
+	for_each_pipe(dev_priv, pipe) {
+		tmp_crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+
+		if (intel_crtc_active(tmp_crtc) &&
+		    to_intel_plane_state(tmp_crtc->primary->state)->visible)
+			crtc = tmp_crtc;
+
+		if (pipe_a_only)
+			break;
+	}
+
+	if (!crtc || crtc->primary->fb == NULL)
+		return NULL;
+
+	return crtc;
+}
+
+static bool multiple_pipes_ok(struct drm_i915_private *dev_priv)
+{
+	enum pipe pipe;
+	int n_pipes = 0;
+	struct drm_crtc *crtc;
+
+	if (INTEL_INFO(dev_priv)->gen > 4)
+		return true;
+
+	for_each_pipe(dev_priv, pipe) {
+		crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+
+		if (intel_crtc_active(crtc) &&
+		    to_intel_plane_state(crtc->primary->state)->visible)
+			n_pipes++;
+	}
+
+	return (n_pipes < 2);
+}
+
+static int find_compression_threshold(struct drm_i915_private *dev_priv,
+				      struct drm_mm_node *node,
+				      int size,
+				      int fb_cpp)
+{
+	int compression_threshold = 1;
+	int ret;
+	u64 end;
+
+	/* The FBC hardware for BDW/SKL doesn't have access to the stolen
+	 * reserved range size, so it always assumes the maximum (8mb) is used.
+	 * If we enable FBC using a CFB on that memory range we'll get FIFO
+	 * underruns, even if that range is not reserved by the BIOS. */
+	if (IS_BROADWELL(dev_priv) || IS_SKYLAKE(dev_priv))
+		end = dev_priv->gtt.stolen_size - 8 * 1024 * 1024;
+	else
+		end = dev_priv->gtt.stolen_usable_size;
+
+	/* HACK: This code depends on what we will do in *_enable_fbc. If that
+	 * code changes, this code needs to change as well.
+	 *
+	 * The enable_fbc code will attempt to use one of our 2 compression
+	 * thresholds, therefore, in that case, we only have 1 resort.
+	 */
+
+	/* Try to over-allocate to reduce reallocations and fragmentation. */
+	ret = i915_gem_stolen_insert_node_in_range(dev_priv, node, size <<= 1,
+						   4096, 0, end);
+	if (ret == 0)
+		return compression_threshold;
+
+again:
+	/* HW's ability to limit the CFB is 1:4 */
+	if (compression_threshold > 4 ||
+	    (fb_cpp == 2 && compression_threshold == 2))
+		return 0;
+
+	ret = i915_gem_stolen_insert_node_in_range(dev_priv, node, size >>= 1,
+						   4096, 0, end);
+	if (ret && INTEL_INFO(dev_priv)->gen <= 4) {
+		return 0;
+	} else if (ret) {
+		compression_threshold <<= 1;
+		goto again;
+	} else {
+		return compression_threshold;
+	}
+}
+
+static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv, int size,
+			       int fb_cpp)
+{
+	struct drm_mm_node *uninitialized_var(compressed_llb);
+	int ret;
+
+	ret = find_compression_threshold(dev_priv, &dev_priv->fbc.compressed_fb,
+					 size, fb_cpp);
+	if (!ret)
+		goto err_llb;
+	else if (ret > 1) {
+		DRM_INFO("Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");
+
+	}
+
+	dev_priv->fbc.threshold = ret;
+
+	if (INTEL_INFO(dev_priv)->gen >= 5)
+		I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start);
+	else if (IS_GM45(dev_priv)) {
+		I915_WRITE(DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start);
+	} else {
+		compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL);
+		if (!compressed_llb)
+			goto err_fb;
+
+		ret = i915_gem_stolen_insert_node(dev_priv, compressed_llb,
+						  4096, 4096);
+		if (ret)
+			goto err_fb;
+
+		dev_priv->fbc.compressed_llb = compressed_llb;
+
+		I915_WRITE(FBC_CFB_BASE,
+			   dev_priv->mm.stolen_base + dev_priv->fbc.compressed_fb.start);
+		I915_WRITE(FBC_LL_BASE,
+			   dev_priv->mm.stolen_base + compressed_llb->start);
+	}
+
+	dev_priv->fbc.uncompressed_size = size;
+
+	DRM_DEBUG_KMS("reserved %llu bytes of contiguous stolen space for FBC, threshold: %d\n",
+		      dev_priv->fbc.compressed_fb.size,
+		      dev_priv->fbc.threshold);
+
+	return 0;
+
+err_fb:
+	kfree(compressed_llb);
+	i915_gem_stolen_remove_node(dev_priv, &dev_priv->fbc.compressed_fb);
+err_llb:
+	pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
+	return -ENOSPC;
+}
+
+static void __intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
+{
+	if (dev_priv->fbc.uncompressed_size == 0)
+		return;
+
+	i915_gem_stolen_remove_node(dev_priv, &dev_priv->fbc.compressed_fb);
+
+	if (dev_priv->fbc.compressed_llb) {
+		i915_gem_stolen_remove_node(dev_priv,
+					    dev_priv->fbc.compressed_llb);
+		kfree(dev_priv->fbc.compressed_llb);
+	}
+
+	dev_priv->fbc.uncompressed_size = 0;
+}
+
+void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
+{
+	if (!fbc_supported(dev_priv))
+		return;
+
+	mutex_lock(&dev_priv->fbc.lock);
+	__intel_fbc_cleanup_cfb(dev_priv);
+	mutex_unlock(&dev_priv->fbc.lock);
+}
+
+/*
+ * For SKL+, the plane source size used by the hardware is based on the value we
+ * write to the PLANE_SIZE register. For BDW-, the hardware looks at the value
+ * we wrote to PIPESRC.
+ */
+static void intel_fbc_get_plane_source_size(struct intel_crtc *crtc,
+					    int *width, int *height)
+{
+	struct intel_plane_state *plane_state =
+			to_intel_plane_state(crtc->base.primary->state);
+	int w, h;
+
+	if (intel_rotation_90_or_270(plane_state->base.rotation)) {
+		w = drm_rect_height(&plane_state->src) >> 16;
+		h = drm_rect_width(&plane_state->src) >> 16;
+	} else {
+		w = drm_rect_width(&plane_state->src) >> 16;
+		h = drm_rect_height(&plane_state->src) >> 16;
+	}
+
+	if (width)
+		*width = w;
+	if (height)
+		*height = h;
+}
+
+static int intel_fbc_calculate_cfb_size(struct intel_crtc *crtc)
+{
+	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+	struct drm_framebuffer *fb = crtc->base.primary->fb;
+	int lines;
+
+	intel_fbc_get_plane_source_size(crtc, NULL, &lines);
+	if (INTEL_INFO(dev_priv)->gen >= 7)
+		lines = min(lines, 2048);
+
+	return lines * fb->pitches[0];
+}
+
+static int intel_fbc_setup_cfb(struct intel_crtc *crtc)
+{
+	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+	struct drm_framebuffer *fb = crtc->base.primary->fb;
+	int size, cpp;
+
+	size = intel_fbc_calculate_cfb_size(crtc);
+	cpp = drm_format_plane_cpp(fb->pixel_format, 0);
+
+	if (size <= dev_priv->fbc.uncompressed_size)
+		return 0;
+
+	/* Release any current block */
+	__intel_fbc_cleanup_cfb(dev_priv);
+
+	return intel_fbc_alloc_cfb(dev_priv, size, cpp);
+}
+
+static bool stride_is_valid(struct drm_i915_private *dev_priv,
+			    unsigned int stride)
+{
+	/* These should have been caught earlier. */
+	WARN_ON(stride < 512);
+	WARN_ON((stride & (64 - 1)) != 0);
+
+	/* Below are the additional FBC restrictions. */
+
+	if (IS_GEN2(dev_priv) || IS_GEN3(dev_priv))
+		return stride == 4096 || stride == 8192;
+
+	if (IS_GEN4(dev_priv) && !IS_G4X(dev_priv) && stride < 2048)
+		return false;
+
+	if (stride > 16384)
+		return false;
+
+	return true;
+}
+
+static bool pixel_format_is_valid(struct drm_framebuffer *fb)
+{
+	struct drm_device *dev = fb->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	switch (fb->pixel_format) {
+	case DRM_FORMAT_XRGB8888:
+	case DRM_FORMAT_XBGR8888:
+		return true;
+	case DRM_FORMAT_XRGB1555:
+	case DRM_FORMAT_RGB565:
+		/* 16bpp not supported on gen2 */
+		if (IS_GEN2(dev))
+			return false;
+		/* WaFbcOnly1to1Ratio:ctg */
+		if (IS_G4X(dev_priv))
+			return false;
+		return true;
+	default:
+		return false;
+	}
+}
+
+/*
+ * For some reason, the hardware tracking starts looking at whatever we
+ * programmed as the display plane base address register. It does not look at
+ * the X and Y offset registers. That's why we look at the crtc->adjusted{x,y}
+ * variables instead of just looking at the pipe/plane size.
+ */
+static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
+{
+	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+	unsigned int effective_w, effective_h, max_w, max_h;
+
+	if (INTEL_INFO(dev_priv)->gen >= 8 || IS_HASWELL(dev_priv)) {
+		max_w = 4096;
+		max_h = 4096;
+	} else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
+		max_w = 4096;
+		max_h = 2048;
+	} else {
+		max_w = 2048;
+		max_h = 1536;
+	}
+
+	intel_fbc_get_plane_source_size(crtc, &effective_w, &effective_h);
+	effective_w += crtc->adjusted_x;
+	effective_h += crtc->adjusted_y;
+
+	return effective_w <= max_w && effective_h <= max_h;
+}
+
+/**
+ * __intel_fbc_update - enable/disable FBC as needed, unlocked
+ * @dev_priv: i915 device instance
+ *
+ * Set up the framebuffer compression hardware at mode set time.  We
+ * enable it if possible:
+ *   - plane A only (on pre-965)
+ *   - no pixel mulitply/line duplication
+ *   - no alpha buffer discard
+ *   - no dual wide
+ *   - framebuffer <= max_hdisplay in width, max_vdisplay in height
+ *
+ * We can't assume that any compression will take place (worst case),
+ * so the compressed buffer has to be the same size as the uncompressed
+ * one.  It also must reside (along with the line length buffer) in
+ * stolen memory.
+ *
+ * We need to enable/disable FBC on a global basis.
+ */
+static void __intel_fbc_update(struct drm_i915_private *dev_priv)
+{
+	struct drm_crtc *crtc = NULL;
+	struct intel_crtc *intel_crtc;
+	struct drm_framebuffer *fb;
+	struct drm_i915_gem_object *obj;
+	const struct drm_display_mode *adjusted_mode;
+
+	WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
+
+	/* disable framebuffer compression in vGPU */
+	if (intel_vgpu_active(dev_priv->dev))
+		i915.enable_fbc = 0;
+
+	if (i915.enable_fbc < 0) {
+		set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT);
+		goto out_disable;
+	}
+
+	if (!i915.enable_fbc) {
+		set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM);
+		goto out_disable;
+	}
+
+	/*
+	 * If FBC is already on, we just have to verify that we can
+	 * keep it that way...
+	 * Need to disable if:
+	 *   - more than one pipe is active
+	 *   - changing FBC params (stride, fence, mode)
+	 *   - new fb is too large to fit in compressed buffer
+	 *   - going to an unsupported config (interlace, pixel multiply, etc.)
+	 */
+	crtc = intel_fbc_find_crtc(dev_priv);
+	if (!crtc) {
+		set_no_fbc_reason(dev_priv, FBC_NO_OUTPUT);
+		goto out_disable;
+	}
+
+	if (!multiple_pipes_ok(dev_priv)) {
+		set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES);
+		goto out_disable;
+	}
+
+	intel_crtc = to_intel_crtc(crtc);
+	fb = crtc->primary->fb;
+	obj = intel_fb_obj(fb);
+	adjusted_mode = &intel_crtc->config->base.adjusted_mode;
+
+	if ((adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) ||
+	    (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
+		set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE);
+		goto out_disable;
+	}
+
+	if (!intel_fbc_hw_tracking_covers_screen(intel_crtc)) {
+		set_no_fbc_reason(dev_priv, FBC_MODE_TOO_LARGE);
+		goto out_disable;
+	}
+
+	if ((INTEL_INFO(dev_priv)->gen < 4 || HAS_DDI(dev_priv)) &&
+	    intel_crtc->plane != PLANE_A) {
+		set_no_fbc_reason(dev_priv, FBC_BAD_PLANE);
+		goto out_disable;
+	}
+
+	/* The use of a CPU fence is mandatory in order to detect writes
+	 * by the CPU to the scanout and trigger updates to the FBC.
+	 */
+	if (obj->tiling_mode != I915_TILING_X ||
+	    obj->fence_reg == I915_FENCE_REG_NONE) {
+		set_no_fbc_reason(dev_priv, FBC_NOT_TILED);
+		goto out_disable;
+	}
+	if (INTEL_INFO(dev_priv)->gen <= 4 && !IS_G4X(dev_priv) &&
+	    crtc->primary->state->rotation != BIT(DRM_ROTATE_0)) {
+		set_no_fbc_reason(dev_priv, FBC_ROTATION);
+		goto out_disable;
+	}
+
+	if (!stride_is_valid(dev_priv, fb->pitches[0])) {
+		set_no_fbc_reason(dev_priv, FBC_BAD_STRIDE);
+		goto out_disable;
+	}
+
+	if (!pixel_format_is_valid(fb)) {
+		set_no_fbc_reason(dev_priv, FBC_PIXEL_FORMAT);
+		goto out_disable;
+	}
+
+	/* If the kernel debugger is active, always disable compression */
+	if (in_dbg_master()) {
+		set_no_fbc_reason(dev_priv, FBC_IN_DBG_MASTER);
+		goto out_disable;
+	}
+
+	/* WaFbcExceedCdClockThreshold:hsw,bdw */
+	if ((IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) &&
+	    ilk_pipe_pixel_rate(intel_crtc->config) >=
+	    dev_priv->cdclk_freq * 95 / 100) {
+		set_no_fbc_reason(dev_priv, FBC_PIXEL_RATE);
+		goto out_disable;
+	}
+
+	if (intel_fbc_setup_cfb(intel_crtc)) {
+		set_no_fbc_reason(dev_priv, FBC_STOLEN_TOO_SMALL);
+		goto out_disable;
+	}
+
+	/* If the scanout has not changed, don't modify the FBC settings.
+	 * Note that we make the fundamental assumption that the fb->obj
+	 * cannot be unpinned (and have its GTT offset and fence revoked)
+	 * without first being decoupled from the scanout and FBC disabled.
+	 */
+	if (dev_priv->fbc.crtc == intel_crtc &&
+	    dev_priv->fbc.fb_id == fb->base.id &&
+	    dev_priv->fbc.y == crtc->y)
+		return;
+
+	if (intel_fbc_enabled(dev_priv)) {
+		/* We update FBC along two paths, after changing fb/crtc
+		 * configuration (modeswitching) and after page-flipping
+		 * finishes. For the latter, we know that not only did
+		 * we disable the FBC at the start of the page-flip
+		 * sequence, but also more than one vblank has passed.
+		 *
+		 * For the former case of modeswitching, it is possible
+		 * to switch between two FBC valid configurations
+		 * instantaneously so we do need to disable the FBC
+		 * before we can modify its control registers. We also
+		 * have to wait for the next vblank for that to take
+		 * effect. However, since we delay enabling FBC we can
+		 * assume that a vblank has passed since disabling and
+		 * that we can safely alter the registers in the deferred
+		 * callback.
+		 *
+		 * In the scenario that we go from a valid to invalid
+		 * and then back to valid FBC configuration we have
+		 * no strict enforcement that a vblank occurred since
+		 * disabling the FBC. However, along all current pipe
+		 * disabling paths we do need to wait for a vblank at
+		 * some point. And we wait before enabling FBC anyway.
+		 */
+		DRM_DEBUG_KMS("disabling active FBC for update\n");
+		__intel_fbc_disable(dev_priv);
+	}
+
+	intel_fbc_schedule_enable(intel_crtc);
+	dev_priv->fbc.no_fbc_reason = FBC_OK;
+	return;
+
+out_disable:
+	/* Multiple disables should be harmless */
+	if (intel_fbc_enabled(dev_priv)) {
+		DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
+		__intel_fbc_disable(dev_priv);
+	}
+	__intel_fbc_cleanup_cfb(dev_priv);
+}
+
+/*
+ * intel_fbc_update - enable/disable FBC as needed
+ * @dev_priv: i915 device instance
+ *
+ * This function reevaluates the overall state and enables or disables FBC.
+ */
+void intel_fbc_update(struct drm_i915_private *dev_priv)
+{
+	if (!fbc_supported(dev_priv))
+		return;
+
+	mutex_lock(&dev_priv->fbc.lock);
+	__intel_fbc_update(dev_priv);
+	mutex_unlock(&dev_priv->fbc.lock);
+}
+
+void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
+			  unsigned int frontbuffer_bits,
+			  enum fb_op_origin origin)
+{
+	unsigned int fbc_bits;
+
+	if (!fbc_supported(dev_priv))
+		return;
+
+	if (origin == ORIGIN_GTT)
+		return;
+
+	mutex_lock(&dev_priv->fbc.lock);
+
+	if (dev_priv->fbc.enabled)
+		fbc_bits = INTEL_FRONTBUFFER_PRIMARY(dev_priv->fbc.crtc->pipe);
+	else if (dev_priv->fbc.fbc_work)
+		fbc_bits = INTEL_FRONTBUFFER_PRIMARY(
+					dev_priv->fbc.fbc_work->crtc->pipe);
+	else
+		fbc_bits = dev_priv->fbc.possible_framebuffer_bits;
+
+	dev_priv->fbc.busy_bits |= (fbc_bits & frontbuffer_bits);
+
+	if (dev_priv->fbc.busy_bits)
+		__intel_fbc_disable(dev_priv);
+
+	mutex_unlock(&dev_priv->fbc.lock);
+}
+
+void intel_fbc_flush(struct drm_i915_private *dev_priv,
+		     unsigned int frontbuffer_bits, enum fb_op_origin origin)
+{
+	if (!fbc_supported(dev_priv))
+		return;
+
+	if (origin == ORIGIN_GTT)
+		return;
+
+	mutex_lock(&dev_priv->fbc.lock);
+
+	dev_priv->fbc.busy_bits &= ~frontbuffer_bits;
+
+	if (!dev_priv->fbc.busy_bits) {
+		__intel_fbc_disable(dev_priv);
+		__intel_fbc_update(dev_priv);
+	}
+
+	mutex_unlock(&dev_priv->fbc.lock);
+}
+
+/**
+ * intel_fbc_init - Initialize FBC
+ * @dev_priv: the i915 device
+ *
+ * This function might be called during PM init process.
+ */
+void intel_fbc_init(struct drm_i915_private *dev_priv)
+{
+	enum pipe pipe;
+
+	mutex_init(&dev_priv->fbc.lock);
+
+	if (!HAS_FBC(dev_priv)) {
+		dev_priv->fbc.enabled = false;
+		dev_priv->fbc.no_fbc_reason = FBC_UNSUPPORTED;
+		return;
+	}
+
+	for_each_pipe(dev_priv, pipe) {
+		dev_priv->fbc.possible_framebuffer_bits |=
+				INTEL_FRONTBUFFER_PRIMARY(pipe);
+
+		if (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)
+			break;
+	}
+
+	if (INTEL_INFO(dev_priv)->gen >= 7) {
+		dev_priv->fbc.fbc_enabled = ilk_fbc_enabled;
+		dev_priv->fbc.enable_fbc = gen7_fbc_enable;
+		dev_priv->fbc.disable_fbc = ilk_fbc_disable;
+	} else if (INTEL_INFO(dev_priv)->gen >= 5) {
+		dev_priv->fbc.fbc_enabled = ilk_fbc_enabled;
+		dev_priv->fbc.enable_fbc = ilk_fbc_enable;
+		dev_priv->fbc.disable_fbc = ilk_fbc_disable;
+	} else if (IS_GM45(dev_priv)) {
+		dev_priv->fbc.fbc_enabled = g4x_fbc_enabled;
+		dev_priv->fbc.enable_fbc = g4x_fbc_enable;
+		dev_priv->fbc.disable_fbc = g4x_fbc_disable;
+	} else {
+		dev_priv->fbc.fbc_enabled = i8xx_fbc_enabled;
+		dev_priv->fbc.enable_fbc = i8xx_fbc_enable;
+		dev_priv->fbc.disable_fbc = i8xx_fbc_disable;
+
+		/* This value was pulled out of someone's hat */
+		I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
+	}
+
+	dev_priv->fbc.enabled = dev_priv->fbc.fbc_enabled(dev_priv);
+}
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
new file mode 100644
index 0000000..c0c094d
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -0,0 +1,804 @@
+/*
+ * Copyright © 2007 David Airlie
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *     David Airlie
+ */
+
+#include <linux/async.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/console.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/tty.h>
+#include <linux/sysrq.h>
+#include <linux/delay.h>
+#include <linux/fb.h>
+#include <linux/init.h>
+#include <linux/vga_switcheroo.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_fb_helper.h>
+#include "intel_drv.h"
+#include <drm/i915_drm.h>
+#include "i915_drv.h"
+
+static int intel_fbdev_set_par(struct fb_info *info)
+{
+	struct drm_fb_helper *fb_helper = info->par;
+	struct intel_fbdev *ifbdev =
+		container_of(fb_helper, struct intel_fbdev, helper);
+	int ret;
+
+	ret = drm_fb_helper_set_par(info);
+
+	if (ret == 0) {
+		mutex_lock(&fb_helper->dev->struct_mutex);
+		intel_fb_obj_invalidate(ifbdev->fb->obj, ORIGIN_GTT);
+		mutex_unlock(&fb_helper->dev->struct_mutex);
+	}
+
+	return ret;
+}
+
+static int intel_fbdev_blank(int blank, struct fb_info *info)
+{
+	struct drm_fb_helper *fb_helper = info->par;
+	struct intel_fbdev *ifbdev =
+		container_of(fb_helper, struct intel_fbdev, helper);
+	int ret;
+
+	ret = drm_fb_helper_blank(blank, info);
+
+	if (ret == 0) {
+		mutex_lock(&fb_helper->dev->struct_mutex);
+		intel_fb_obj_invalidate(ifbdev->fb->obj, ORIGIN_GTT);
+		mutex_unlock(&fb_helper->dev->struct_mutex);
+	}
+
+	return ret;
+}
+
+static int intel_fbdev_pan_display(struct fb_var_screeninfo *var,
+				   struct fb_info *info)
+{
+	struct drm_fb_helper *fb_helper = info->par;
+	struct intel_fbdev *ifbdev =
+		container_of(fb_helper, struct intel_fbdev, helper);
+
+	int ret;
+	ret = drm_fb_helper_pan_display(var, info);
+
+	if (ret == 0) {
+		mutex_lock(&fb_helper->dev->struct_mutex);
+		intel_fb_obj_invalidate(ifbdev->fb->obj, ORIGIN_GTT);
+		mutex_unlock(&fb_helper->dev->struct_mutex);
+	}
+
+	return ret;
+}
+
+static struct fb_ops intelfb_ops = {
+	.owner = THIS_MODULE,
+	.fb_check_var = drm_fb_helper_check_var,
+	.fb_set_par = intel_fbdev_set_par,
+	.fb_fillrect = drm_fb_helper_cfb_fillrect,
+	.fb_copyarea = drm_fb_helper_cfb_copyarea,
+	.fb_imageblit = drm_fb_helper_cfb_imageblit,
+	.fb_pan_display = intel_fbdev_pan_display,
+	.fb_blank = intel_fbdev_blank,
+	.fb_setcmap = drm_fb_helper_setcmap,
+	.fb_debug_enter = drm_fb_helper_debug_enter,
+	.fb_debug_leave = drm_fb_helper_debug_leave,
+};
+
+static int intelfb_alloc(struct drm_fb_helper *helper,
+			 struct drm_fb_helper_surface_size *sizes)
+{
+	struct intel_fbdev *ifbdev =
+		container_of(helper, struct intel_fbdev, helper);
+	struct drm_framebuffer *fb;
+	struct drm_device *dev = helper->dev;
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_mode_fb_cmd2 mode_cmd = {};
+	struct drm_i915_gem_object *obj = NULL;
+	int size, ret;
+
+	/* we don't do packed 24bpp */
+	if (sizes->surface_bpp == 24)
+		sizes->surface_bpp = 32;
+
+	mode_cmd.width = sizes->surface_width;
+	mode_cmd.height = sizes->surface_height;
+
+	mode_cmd.pitches[0] = ALIGN(mode_cmd.width *
+				    DIV_ROUND_UP(sizes->surface_bpp, 8), 64);
+	mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+							  sizes->surface_depth);
+
+	size = mode_cmd.pitches[0] * mode_cmd.height;
+	size = PAGE_ALIGN(size);
+
+	/* If the FB is too big, just don't use it since fbdev is not very
+	 * important and we should probably use that space with FBC or other
+	 * features. */
+	if (size * 2 < dev_priv->gtt.stolen_usable_size)
+		obj = i915_gem_object_create_stolen(dev, size);
+	if (obj == NULL)
+		obj = i915_gem_alloc_object(dev, size);
+	if (!obj) {
+		DRM_ERROR("failed to allocate framebuffer\n");
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	fb = __intel_framebuffer_create(dev, &mode_cmd, obj);
+	if (IS_ERR(fb)) {
+		ret = PTR_ERR(fb);
+		goto out_unref;
+	}
+
+	/* Flush everything out, we'll be doing GTT only from now on */
+	ret = intel_pin_and_fence_fb_obj(NULL, fb, NULL, NULL, NULL);
+	if (ret) {
+		DRM_ERROR("failed to pin obj: %d\n", ret);
+		goto out_fb;
+	}
+
+	ifbdev->fb = to_intel_framebuffer(fb);
+
+	return 0;
+
+out_fb:
+	drm_framebuffer_remove(fb);
+out_unref:
+	drm_gem_object_unreference(&obj->base);
+out:
+	return ret;
+}
+
+static int intelfb_create(struct drm_fb_helper *helper,
+			  struct drm_fb_helper_surface_size *sizes)
+{
+	struct intel_fbdev *ifbdev =
+		container_of(helper, struct intel_fbdev, helper);
+	struct intel_framebuffer *intel_fb = ifbdev->fb;
+	struct drm_device *dev = helper->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct fb_info *info;
+	struct drm_framebuffer *fb;
+	struct drm_i915_gem_object *obj;
+	int size, ret;
+	bool prealloc = false;
+
+	mutex_lock(&dev->struct_mutex);
+
+	if (intel_fb &&
+	    (sizes->fb_width > intel_fb->base.width ||
+	     sizes->fb_height > intel_fb->base.height)) {
+		DRM_DEBUG_KMS("BIOS fb too small (%dx%d), we require (%dx%d),"
+			      " releasing it\n",
+			      intel_fb->base.width, intel_fb->base.height,
+			      sizes->fb_width, sizes->fb_height);
+		drm_framebuffer_unreference(&intel_fb->base);
+		intel_fb = ifbdev->fb = NULL;
+	}
+	if (!intel_fb || WARN_ON(!intel_fb->obj)) {
+		DRM_DEBUG_KMS("no BIOS fb, allocating a new one\n");
+		ret = intelfb_alloc(helper, sizes);
+		if (ret)
+			goto out_unlock;
+		intel_fb = ifbdev->fb;
+	} else {
+		DRM_DEBUG_KMS("re-using BIOS fb\n");
+		prealloc = true;
+		sizes->fb_width = intel_fb->base.width;
+		sizes->fb_height = intel_fb->base.height;
+	}
+
+	obj = intel_fb->obj;
+	size = obj->base.size;
+
+	info = drm_fb_helper_alloc_fbi(helper);
+	if (IS_ERR(info)) {
+		ret = PTR_ERR(info);
+		goto out_unpin;
+	}
+
+	info->par = helper;
+
+	fb = &ifbdev->fb->base;
+
+	ifbdev->helper.fb = fb;
+
+	strcpy(info->fix.id, "inteldrmfb");
+
+	info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
+	info->fbops = &intelfb_ops;
+
+	/* setup aperture base/size for vesafb takeover */
+	info->apertures->ranges[0].base = dev->mode_config.fb_base;
+	info->apertures->ranges[0].size = dev_priv->gtt.mappable_end;
+
+	info->fix.smem_start = dev->mode_config.fb_base + i915_gem_obj_ggtt_offset(obj);
+	info->fix.smem_len = size;
+
+	info->screen_base =
+		ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
+			   size);
+	if (!info->screen_base) {
+		ret = -ENOSPC;
+		goto out_destroy_fbi;
+	}
+	info->screen_size = size;
+
+	/* This driver doesn't need a VT switch to restore the mode on resume */
+	info->skip_vt_switch = true;
+
+	drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
+	drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height);
+
+	/* If the object is shmemfs backed, it will have given us zeroed pages.
+	 * If the object is stolen however, it will be full of whatever
+	 * garbage was left in there.
+	 */
+	if (ifbdev->fb->obj->stolen && !prealloc)
+		memset_io(info->screen_base, 0, info->screen_size);
+
+	/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
+
+	DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08llx, bo %p\n",
+		      fb->width, fb->height,
+		      i915_gem_obj_ggtt_offset(obj), obj);
+
+	mutex_unlock(&dev->struct_mutex);
+	vga_switcheroo_client_fb_set(dev->pdev, info);
+	return 0;
+
+out_destroy_fbi:
+	drm_fb_helper_release_fbi(helper);
+out_unpin:
+	i915_gem_object_ggtt_unpin(obj);
+	drm_gem_object_unreference(&obj->base);
+out_unlock:
+	mutex_unlock(&dev->struct_mutex);
+	return ret;
+}
+
+/** Sets the color ramps on behalf of RandR */
+static void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
+				    u16 blue, int regno)
+{
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+	intel_crtc->lut_r[regno] = red >> 8;
+	intel_crtc->lut_g[regno] = green >> 8;
+	intel_crtc->lut_b[regno] = blue >> 8;
+}
+
+static void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
+				    u16 *blue, int regno)
+{
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+	*red = intel_crtc->lut_r[regno] << 8;
+	*green = intel_crtc->lut_g[regno] << 8;
+	*blue = intel_crtc->lut_b[regno] << 8;
+}
+
+static struct drm_fb_helper_crtc *
+intel_fb_helper_crtc(struct drm_fb_helper *fb_helper, struct drm_crtc *crtc)
+{
+	int i;
+
+	for (i = 0; i < fb_helper->crtc_count; i++)
+		if (fb_helper->crtc_info[i].mode_set.crtc == crtc)
+			return &fb_helper->crtc_info[i];
+
+	return NULL;
+}
+
+/*
+ * Try to read the BIOS display configuration and use it for the initial
+ * fb configuration.
+ *
+ * The BIOS or boot loader will generally create an initial display
+ * configuration for us that includes some set of active pipes and displays.
+ * This routine tries to figure out which pipes and connectors are active
+ * and stuffs them into the crtcs and modes array given to us by the
+ * drm_fb_helper code.
+ *
+ * The overall sequence is:
+ *   intel_fbdev_init - from driver load
+ *     intel_fbdev_init_bios - initialize the intel_fbdev using BIOS data
+ *     drm_fb_helper_init - build fb helper structs
+ *     drm_fb_helper_single_add_all_connectors - more fb helper structs
+ *   intel_fbdev_initial_config - apply the config
+ *     drm_fb_helper_initial_config - call ->probe then register_framebuffer()
+ *         drm_setup_crtcs - build crtc config for fbdev
+ *           intel_fb_initial_config - find active connectors etc
+ *         drm_fb_helper_single_fb_probe - set up fbdev
+ *           intelfb_create - re-use or alloc fb, build out fbdev structs
+ *
+ * Note that we don't make special consideration whether we could actually
+ * switch to the selected modes without a full modeset. E.g. when the display
+ * is in VGA mode we need to recalculate watermarks and set a new high-res
+ * framebuffer anyway.
+ */
+static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
+				    struct drm_fb_helper_crtc **crtcs,
+				    struct drm_display_mode **modes,
+				    struct drm_fb_offset *offsets,
+				    bool *enabled, int width, int height)
+{
+	struct drm_device *dev = fb_helper->dev;
+	int i, j;
+	bool *save_enabled;
+	bool fallback = true;
+	int num_connectors_enabled = 0;
+	int num_connectors_detected = 0;
+	uint64_t conn_configured = 0, mask;
+	int pass = 0;
+
+	save_enabled = kcalloc(fb_helper->connector_count, sizeof(bool),
+			       GFP_KERNEL);
+	if (!save_enabled)
+		return false;
+
+	memcpy(save_enabled, enabled, fb_helper->connector_count);
+	mask = (1 << fb_helper->connector_count) - 1;
+retry:
+	for (i = 0; i < fb_helper->connector_count; i++) {
+		struct drm_fb_helper_connector *fb_conn;
+		struct drm_connector *connector;
+		struct drm_encoder *encoder;
+		struct drm_fb_helper_crtc *new_crtc;
+
+		fb_conn = fb_helper->connector_info[i];
+		connector = fb_conn->connector;
+
+		if (conn_configured & (1 << i))
+			continue;
+
+		if (pass == 0 && !connector->has_tile)
+			continue;
+
+		if (connector->status == connector_status_connected)
+			num_connectors_detected++;
+
+		if (!enabled[i]) {
+			DRM_DEBUG_KMS("connector %s not enabled, skipping\n",
+				      connector->name);
+			conn_configured |= (1 << i);
+			continue;
+		}
+
+		if (connector->force == DRM_FORCE_OFF) {
+			DRM_DEBUG_KMS("connector %s is disabled by user, skipping\n",
+				      connector->name);
+			enabled[i] = false;
+			continue;
+		}
+
+		encoder = connector->encoder;
+		if (!encoder || WARN_ON(!encoder->crtc)) {
+			if (connector->force > DRM_FORCE_OFF)
+				goto bail;
+
+			DRM_DEBUG_KMS("connector %s has no encoder or crtc, skipping\n",
+				      connector->name);
+			enabled[i] = false;
+			conn_configured |= (1 << i);
+			continue;
+		}
+
+		num_connectors_enabled++;
+
+		new_crtc = intel_fb_helper_crtc(fb_helper, encoder->crtc);
+
+		/*
+		 * Make sure we're not trying to drive multiple connectors
+		 * with a single CRTC, since our cloning support may not
+		 * match the BIOS.
+		 */
+		for (j = 0; j < fb_helper->connector_count; j++) {
+			if (crtcs[j] == new_crtc) {
+				DRM_DEBUG_KMS("fallback: cloned configuration\n");
+				goto bail;
+			}
+		}
+
+		DRM_DEBUG_KMS("looking for cmdline mode on connector %s\n",
+			      connector->name);
+
+		/* go for command line mode first */
+		modes[i] = drm_pick_cmdline_mode(fb_conn, width, height);
+
+		/* try for preferred next */
+		if (!modes[i]) {
+			DRM_DEBUG_KMS("looking for preferred mode on connector %s %d\n",
+				      connector->name, connector->has_tile);
+			modes[i] = drm_has_preferred_mode(fb_conn, width,
+							  height);
+		}
+
+		/* No preferred mode marked by the EDID? Are there any modes? */
+		if (!modes[i] && !list_empty(&connector->modes)) {
+			DRM_DEBUG_KMS("using first mode listed on connector %s\n",
+				      connector->name);
+			modes[i] = list_first_entry(&connector->modes,
+						    struct drm_display_mode,
+						    head);
+		}
+
+		/* last resort: use current mode */
+		if (!modes[i]) {
+			/*
+			 * IMPORTANT: We want to use the adjusted mode (i.e.
+			 * after the panel fitter upscaling) as the initial
+			 * config, not the input mode, which is what crtc->mode
+			 * usually contains. But since our current
+			 * code puts a mode derived from the post-pfit timings
+			 * into crtc->mode this works out correctly.
+			 */
+			DRM_DEBUG_KMS("looking for current mode on connector %s\n",
+				      connector->name);
+			modes[i] = &encoder->crtc->mode;
+		}
+		crtcs[i] = new_crtc;
+
+		DRM_DEBUG_KMS("connector %s on pipe %c [CRTC:%d]: %dx%d%s\n",
+			      connector->name,
+			      pipe_name(to_intel_crtc(encoder->crtc)->pipe),
+			      encoder->crtc->base.id,
+			      modes[i]->hdisplay, modes[i]->vdisplay,
+			      modes[i]->flags & DRM_MODE_FLAG_INTERLACE ? "i" :"");
+
+		fallback = false;
+		conn_configured |= (1 << i);
+	}
+
+	if ((conn_configured & mask) != mask) {
+		pass++;
+		goto retry;
+	}
+
+	/*
+	 * If the BIOS didn't enable everything it could, fall back to have the
+	 * same user experiencing of lighting up as much as possible like the
+	 * fbdev helper library.
+	 */
+	if (num_connectors_enabled != num_connectors_detected &&
+	    num_connectors_enabled < INTEL_INFO(dev)->num_pipes) {
+		DRM_DEBUG_KMS("fallback: Not all outputs enabled\n");
+		DRM_DEBUG_KMS("Enabled: %i, detected: %i\n", num_connectors_enabled,
+			      num_connectors_detected);
+		fallback = true;
+	}
+
+	if (fallback) {
+bail:
+		DRM_DEBUG_KMS("Not using firmware configuration\n");
+		memcpy(enabled, save_enabled, fb_helper->connector_count);
+		kfree(save_enabled);
+		return false;
+	}
+
+	kfree(save_enabled);
+	return true;
+}
+
+static const struct drm_fb_helper_funcs intel_fb_helper_funcs = {
+	.initial_config = intel_fb_initial_config,
+	.gamma_set = intel_crtc_fb_gamma_set,
+	.gamma_get = intel_crtc_fb_gamma_get,
+	.fb_probe = intelfb_create,
+};
+
+static void intel_fbdev_destroy(struct drm_device *dev,
+				struct intel_fbdev *ifbdev)
+{
+
+	drm_fb_helper_unregister_fbi(&ifbdev->helper);
+	drm_fb_helper_release_fbi(&ifbdev->helper);
+
+	drm_fb_helper_fini(&ifbdev->helper);
+
+	drm_framebuffer_unregister_private(&ifbdev->fb->base);
+	drm_framebuffer_remove(&ifbdev->fb->base);
+}
+
+/*
+ * Build an intel_fbdev struct using a BIOS allocated framebuffer, if possible.
+ * The core display code will have read out the current plane configuration,
+ * so we use that to figure out if there's an object for us to use as the
+ * fb, and if so, we re-use it for the fbdev configuration.
+ *
+ * Note we only support a single fb shared across pipes for boot (mostly for
+ * fbcon), so we just find the biggest and use that.
+ */
+static bool intel_fbdev_init_bios(struct drm_device *dev,
+				 struct intel_fbdev *ifbdev)
+{
+	struct intel_framebuffer *fb = NULL;
+	struct drm_crtc *crtc;
+	struct intel_crtc *intel_crtc;
+	unsigned int max_size = 0;
+
+	/* Find the largest fb */
+	for_each_crtc(dev, crtc) {
+		struct drm_i915_gem_object *obj =
+			intel_fb_obj(crtc->primary->state->fb);
+		intel_crtc = to_intel_crtc(crtc);
+
+		if (!crtc->state->active || !obj) {
+			DRM_DEBUG_KMS("pipe %c not active or no fb, skipping\n",
+				      pipe_name(intel_crtc->pipe));
+			continue;
+		}
+
+		if (obj->base.size > max_size) {
+			DRM_DEBUG_KMS("found possible fb from plane %c\n",
+				      pipe_name(intel_crtc->pipe));
+			fb = to_intel_framebuffer(crtc->primary->state->fb);
+			max_size = obj->base.size;
+		}
+	}
+
+	if (!fb) {
+		DRM_DEBUG_KMS("no active fbs found, not using BIOS config\n");
+		goto out;
+	}
+
+	/* Now make sure all the pipes will fit into it */
+	for_each_crtc(dev, crtc) {
+		unsigned int cur_size;
+
+		intel_crtc = to_intel_crtc(crtc);
+
+		if (!crtc->state->active) {
+			DRM_DEBUG_KMS("pipe %c not active, skipping\n",
+				      pipe_name(intel_crtc->pipe));
+			continue;
+		}
+
+		DRM_DEBUG_KMS("checking plane %c for BIOS fb\n",
+			      pipe_name(intel_crtc->pipe));
+
+		/*
+		 * See if the plane fb we found above will fit on this
+		 * pipe.  Note we need to use the selected fb's pitch and bpp
+		 * rather than the current pipe's, since they differ.
+		 */
+		cur_size = intel_crtc->config->base.adjusted_mode.crtc_hdisplay;
+		cur_size = cur_size * fb->base.bits_per_pixel / 8;
+		if (fb->base.pitches[0] < cur_size) {
+			DRM_DEBUG_KMS("fb not wide enough for plane %c (%d vs %d)\n",
+				      pipe_name(intel_crtc->pipe),
+				      cur_size, fb->base.pitches[0]);
+			fb = NULL;
+			break;
+		}
+
+		cur_size = intel_crtc->config->base.adjusted_mode.crtc_vdisplay;
+		cur_size = intel_fb_align_height(dev, cur_size,
+						 fb->base.pixel_format,
+						 fb->base.modifier[0]);
+		cur_size *= fb->base.pitches[0];
+		DRM_DEBUG_KMS("pipe %c area: %dx%d, bpp: %d, size: %d\n",
+			      pipe_name(intel_crtc->pipe),
+			      intel_crtc->config->base.adjusted_mode.crtc_hdisplay,
+			      intel_crtc->config->base.adjusted_mode.crtc_vdisplay,
+			      fb->base.bits_per_pixel,
+			      cur_size);
+
+		if (cur_size > max_size) {
+			DRM_DEBUG_KMS("fb not big enough for plane %c (%d vs %d)\n",
+				      pipe_name(intel_crtc->pipe),
+				      cur_size, max_size);
+			fb = NULL;
+			break;
+		}
+
+		DRM_DEBUG_KMS("fb big enough for plane %c (%d >= %d)\n",
+			      pipe_name(intel_crtc->pipe),
+			      max_size, cur_size);
+	}
+
+	if (!fb) {
+		DRM_DEBUG_KMS("BIOS fb not suitable for all pipes, not using\n");
+		goto out;
+	}
+
+	ifbdev->preferred_bpp = fb->base.bits_per_pixel;
+	ifbdev->fb = fb;
+
+	drm_framebuffer_reference(&ifbdev->fb->base);
+
+	/* Final pass to check if any active pipes don't have fbs */
+	for_each_crtc(dev, crtc) {
+		intel_crtc = to_intel_crtc(crtc);
+
+		if (!crtc->state->active)
+			continue;
+
+		WARN(!crtc->primary->fb,
+		     "re-used BIOS config but lost an fb on crtc %d\n",
+		     crtc->base.id);
+	}
+
+
+	DRM_DEBUG_KMS("using BIOS fb for initial console\n");
+	return true;
+
+out:
+
+	return false;
+}
+
+static void intel_fbdev_suspend_worker(struct work_struct *work)
+{
+	intel_fbdev_set_suspend(container_of(work,
+					     struct drm_i915_private,
+					     fbdev_suspend_work)->dev,
+				FBINFO_STATE_RUNNING,
+				true);
+}
+
+int intel_fbdev_init(struct drm_device *dev)
+{
+	struct intel_fbdev *ifbdev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret;
+
+	if (WARN_ON(INTEL_INFO(dev)->num_pipes == 0))
+		return -ENODEV;
+
+	ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL);
+	if (ifbdev == NULL)
+		return -ENOMEM;
+
+	drm_fb_helper_prepare(dev, &ifbdev->helper, &intel_fb_helper_funcs);
+
+	if (!intel_fbdev_init_bios(dev, ifbdev))
+		ifbdev->preferred_bpp = 32;
+
+	ret = drm_fb_helper_init(dev, &ifbdev->helper,
+				 INTEL_INFO(dev)->num_pipes, 4);
+	if (ret) {
+		kfree(ifbdev);
+		return ret;
+	}
+
+	ifbdev->helper.atomic = true;
+
+	dev_priv->fbdev = ifbdev;
+	INIT_WORK(&dev_priv->fbdev_suspend_work, intel_fbdev_suspend_worker);
+
+	drm_fb_helper_single_add_all_connectors(&ifbdev->helper);
+
+	return 0;
+}
+
+void intel_fbdev_initial_config(void *data, async_cookie_t cookie)
+{
+	struct drm_i915_private *dev_priv = data;
+	struct intel_fbdev *ifbdev = dev_priv->fbdev;
+
+	/* Due to peculiar init order wrt to hpd handling this is separate. */
+	drm_fb_helper_initial_config(&ifbdev->helper, ifbdev->preferred_bpp);
+}
+
+void intel_fbdev_fini(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	if (!dev_priv->fbdev)
+		return;
+
+	flush_work(&dev_priv->fbdev_suspend_work);
+
+	async_synchronize_full();
+	intel_fbdev_destroy(dev, dev_priv->fbdev);
+	kfree(dev_priv->fbdev);
+	dev_priv->fbdev = NULL;
+}
+
+void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_fbdev *ifbdev = dev_priv->fbdev;
+	struct fb_info *info;
+
+	if (!ifbdev)
+		return;
+
+	info = ifbdev->helper.fbdev;
+
+	if (synchronous) {
+		/* Flush any pending work to turn the console on, and then
+		 * wait to turn it off. It must be synchronous as we are
+		 * about to suspend or unload the driver.
+		 *
+		 * Note that from within the work-handler, we cannot flush
+		 * ourselves, so only flush outstanding work upon suspend!
+		 */
+		if (state != FBINFO_STATE_RUNNING)
+			flush_work(&dev_priv->fbdev_suspend_work);
+		console_lock();
+	} else {
+		/*
+		 * The console lock can be pretty contented on resume due
+		 * to all the printk activity.  Try to keep it out of the hot
+		 * path of resume if possible.
+		 */
+		WARN_ON(state != FBINFO_STATE_RUNNING);
+		if (!console_trylock()) {
+			/* Don't block our own workqueue as this can
+			 * be run in parallel with other i915.ko tasks.
+			 */
+			schedule_work(&dev_priv->fbdev_suspend_work);
+			return;
+		}
+	}
+
+	/* On resume from hibernation: If the object is shmemfs backed, it has
+	 * been restored from swap. If the object is stolen however, it will be
+	 * full of whatever garbage was left in there.
+	 */
+	if (state == FBINFO_STATE_RUNNING && ifbdev->fb->obj->stolen)
+		memset_io(info->screen_base, 0, info->screen_size);
+
+	drm_fb_helper_set_suspend(&ifbdev->helper, state);
+	console_unlock();
+}
+
+void intel_fbdev_output_poll_changed(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	if (dev_priv->fbdev)
+		drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper);
+}
+
+void intel_fbdev_restore_mode(struct drm_device *dev)
+{
+	int ret;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_fbdev *ifbdev = dev_priv->fbdev;
+	struct drm_fb_helper *fb_helper;
+
+	if (!ifbdev)
+		return;
+
+	fb_helper = &ifbdev->helper;
+
+	ret = drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper);
+	if (ret) {
+		DRM_DEBUG("failed to restore crtc mode\n");
+	} else {
+		mutex_lock(&fb_helper->dev->struct_mutex);
+		intel_fb_obj_invalidate(ifbdev->fb->obj, ORIGIN_GTT);
+		mutex_unlock(&fb_helper->dev->struct_mutex);
+	}
+}
diff --git a/drivers/gpu/drm/i915/intel_fifo_underrun.c b/drivers/gpu/drm/i915/intel_fifo_underrun.c
new file mode 100644
index 0000000..54daa66
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_fifo_underrun.c
@@ -0,0 +1,377 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Daniel Vetter <daniel.vetter@ffwll.ch>
+ *
+ */
+
+#include "i915_drv.h"
+#include "intel_drv.h"
+
+/**
+ * DOC: fifo underrun handling
+ *
+ * The i915 driver checks for display fifo underruns using the interrupt signals
+ * provided by the hardware. This is enabled by default and fairly useful to
+ * debug display issues, especially watermark settings.
+ *
+ * If an underrun is detected this is logged into dmesg. To avoid flooding logs
+ * and occupying the cpu underrun interrupts are disabled after the first
+ * occurrence until the next modeset on a given pipe.
+ *
+ * Note that underrun detection on gmch platforms is a bit more ugly since there
+ * is no interrupt (despite that the signalling bit is in the PIPESTAT pipe
+ * interrupt register). Also on some other platforms underrun interrupts are
+ * shared, which means that if we detect an underrun we need to disable underrun
+ * reporting on all pipes.
+ *
+ * The code also supports underrun detection on the PCH transcoder.
+ */
+
+static bool ivb_can_enable_err_int(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *crtc;
+	enum pipe pipe;
+
+	assert_spin_locked(&dev_priv->irq_lock);
+
+	for_each_pipe(dev_priv, pipe) {
+		crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+
+		if (crtc->cpu_fifo_underrun_disabled)
+			return false;
+	}
+
+	return true;
+}
+
+static bool cpt_can_enable_serr_int(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum pipe pipe;
+	struct intel_crtc *crtc;
+
+	assert_spin_locked(&dev_priv->irq_lock);
+
+	for_each_pipe(dev_priv, pipe) {
+		crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+
+		if (crtc->pch_fifo_underrun_disabled)
+			return false;
+	}
+
+	return true;
+}
+
+/**
+ * i9xx_check_fifo_underruns - check for fifo underruns
+ * @dev_priv: i915 device instance
+ *
+ * This function checks for fifo underruns on GMCH platforms. This needs to be
+ * done manually on modeset to make sure that we catch all underruns since they
+ * do not generate an interrupt by themselves on these platforms.
+ */
+void i9xx_check_fifo_underruns(struct drm_i915_private *dev_priv)
+{
+	struct intel_crtc *crtc;
+
+	spin_lock_irq(&dev_priv->irq_lock);
+
+	for_each_intel_crtc(dev_priv->dev, crtc) {
+		u32 reg = PIPESTAT(crtc->pipe);
+		u32 pipestat;
+
+		if (crtc->cpu_fifo_underrun_disabled)
+			continue;
+
+		pipestat = I915_READ(reg) & 0xffff0000;
+		if ((pipestat & PIPE_FIFO_UNDERRUN_STATUS) == 0)
+			continue;
+
+		I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
+		POSTING_READ(reg);
+
+		DRM_ERROR("pipe %c underrun\n", pipe_name(crtc->pipe));
+	}
+
+	spin_unlock_irq(&dev_priv->irq_lock);
+}
+
+static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev,
+					     enum pipe pipe,
+					     bool enable, bool old)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 reg = PIPESTAT(pipe);
+	u32 pipestat = I915_READ(reg) & 0xffff0000;
+
+	assert_spin_locked(&dev_priv->irq_lock);
+
+	if (enable) {
+		I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
+		POSTING_READ(reg);
+	} else {
+		if (old && pipestat & PIPE_FIFO_UNDERRUN_STATUS)
+			DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
+	}
+}
+
+static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
+						 enum pipe pipe, bool enable)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN :
+					  DE_PIPEB_FIFO_UNDERRUN;
+
+	if (enable)
+		ironlake_enable_display_irq(dev_priv, bit);
+	else
+		ironlake_disable_display_irq(dev_priv, bit);
+}
+
+static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
+						  enum pipe pipe,
+						  bool enable, bool old)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	if (enable) {
+		I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));
+
+		if (!ivb_can_enable_err_int(dev))
+			return;
+
+		ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
+	} else {
+		ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
+
+		if (old &&
+		    I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe)) {
+			DRM_ERROR("uncleared fifo underrun on pipe %c\n",
+				  pipe_name(pipe));
+		}
+	}
+}
+
+static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev,
+						  enum pipe pipe, bool enable)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	assert_spin_locked(&dev_priv->irq_lock);
+
+	if (enable)
+		dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_FIFO_UNDERRUN;
+	else
+		dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_FIFO_UNDERRUN;
+	I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
+	POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
+}
+
+static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
+					    enum transcoder pch_transcoder,
+					    bool enable)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t bit = (pch_transcoder == TRANSCODER_A) ?
+		       SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER;
+
+	if (enable)
+		ibx_enable_display_interrupt(dev_priv, bit);
+	else
+		ibx_disable_display_interrupt(dev_priv, bit);
+}
+
+static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
+					    enum transcoder pch_transcoder,
+					    bool enable, bool old)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (enable) {
+		I915_WRITE(SERR_INT,
+			   SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
+
+		if (!cpt_can_enable_serr_int(dev))
+			return;
+
+		ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT);
+	} else {
+		ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT);
+
+		if (old && I915_READ(SERR_INT) &
+		    SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) {
+			DRM_ERROR("uncleared pch fifo underrun on pch transcoder %c\n",
+				  transcoder_name(pch_transcoder));
+		}
+	}
+}
+
+static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
+						    enum pipe pipe, bool enable)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	bool old;
+
+	assert_spin_locked(&dev_priv->irq_lock);
+
+	old = !intel_crtc->cpu_fifo_underrun_disabled;
+	intel_crtc->cpu_fifo_underrun_disabled = !enable;
+
+	if (HAS_GMCH_DISPLAY(dev))
+		i9xx_set_fifo_underrun_reporting(dev, pipe, enable, old);
+	else if (IS_GEN5(dev) || IS_GEN6(dev))
+		ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
+	else if (IS_GEN7(dev))
+		ivybridge_set_fifo_underrun_reporting(dev, pipe, enable, old);
+	else if (IS_GEN8(dev) || IS_GEN9(dev))
+		broadwell_set_fifo_underrun_reporting(dev, pipe, enable);
+
+	return old;
+}
+
+/**
+ * intel_set_cpu_fifo_underrun_reporting - set cpu fifo underrrun reporting state
+ * @dev_priv: i915 device instance
+ * @pipe: (CPU) pipe to set state for
+ * @enable: whether underruns should be reported or not
+ *
+ * This function sets the fifo underrun state for @pipe. It is used in the
+ * modeset code to avoid false positives since on many platforms underruns are
+ * expected when disabling or enabling the pipe.
+ *
+ * Notice that on some platforms disabling underrun reports for one pipe
+ * disables for all due to shared interrupts. Actual reporting is still per-pipe
+ * though.
+ *
+ * Returns the previous state of underrun reporting.
+ */
+bool intel_set_cpu_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
+					   enum pipe pipe, bool enable)
+{
+	unsigned long flags;
+	bool ret;
+
+	spin_lock_irqsave(&dev_priv->irq_lock, flags);
+	ret = __intel_set_cpu_fifo_underrun_reporting(dev_priv->dev, pipe,
+						      enable);
+	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+
+	return ret;
+}
+
+/**
+ * intel_set_pch_fifo_underrun_reporting - set PCH fifo underrun reporting state
+ * @dev_priv: i915 device instance
+ * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older)
+ * @enable: whether underruns should be reported or not
+ *
+ * This function makes us disable or enable PCH fifo underruns for a specific
+ * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO
+ * underrun reporting for one transcoder may also disable all the other PCH
+ * error interruts for the other transcoders, due to the fact that there's just
+ * one interrupt mask/enable bit for all the transcoders.
+ *
+ * Returns the previous state of underrun reporting.
+ */
+bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
+					   enum transcoder pch_transcoder,
+					   bool enable)
+{
+	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	unsigned long flags;
+	bool old;
+
+	/*
+	 * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT
+	 * has only one pch transcoder A that all pipes can use. To avoid racy
+	 * pch transcoder -> pipe lookups from interrupt code simply store the
+	 * underrun statistics in crtc A. Since we never expose this anywhere
+	 * nor use it outside of the fifo underrun code here using the "wrong"
+	 * crtc on LPT won't cause issues.
+	 */
+
+	spin_lock_irqsave(&dev_priv->irq_lock, flags);
+
+	old = !intel_crtc->pch_fifo_underrun_disabled;
+	intel_crtc->pch_fifo_underrun_disabled = !enable;
+
+	if (HAS_PCH_IBX(dev_priv->dev))
+		ibx_set_fifo_underrun_reporting(dev_priv->dev, pch_transcoder,
+						enable);
+	else
+		cpt_set_fifo_underrun_reporting(dev_priv->dev, pch_transcoder,
+						enable, old);
+
+	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+	return old;
+}
+
+/**
+ * intel_cpu_fifo_underrun_irq_handler - handle CPU fifo underrun interrupt
+ * @dev_priv: i915 device instance
+ * @pipe: (CPU) pipe to set state for
+ *
+ * This handles a CPU fifo underrun interrupt, generating an underrun warning
+ * into dmesg if underrun reporting is enabled and then disables the underrun
+ * interrupt to avoid an irq storm.
+ */
+void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
+					 enum pipe pipe)
+{
+	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+
+	/* We may be called too early in init, thanks BIOS! */
+	if (crtc == NULL)
+		return;
+
+	/* GMCH can't disable fifo underruns, filter them. */
+	if (HAS_GMCH_DISPLAY(dev_priv->dev) &&
+	    to_intel_crtc(crtc)->cpu_fifo_underrun_disabled)
+		return;
+
+	if (intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false))
+		DRM_ERROR("CPU pipe %c FIFO underrun\n",
+			  pipe_name(pipe));
+}
+
+/**
+ * intel_pch_fifo_underrun_irq_handler - handle PCH fifo underrun interrupt
+ * @dev_priv: i915 device instance
+ * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older)
+ *
+ * This handles a PCH fifo underrun interrupt, generating an underrun warning
+ * into dmesg if underrun reporting is enabled and then disables the underrun
+ * interrupt to avoid an irq storm.
+ */
+void intel_pch_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
+					 enum transcoder pch_transcoder)
+{
+	if (intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder,
+						  false))
+		DRM_ERROR("PCH transcoder %c FIFO underrun\n",
+			  transcoder_name(pch_transcoder));
+}
diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.c b/drivers/gpu/drm/i915/intel_frontbuffer.c
new file mode 100644
index 0000000..ac85357
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_frontbuffer.c
@@ -0,0 +1,243 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *	Daniel Vetter <daniel.vetter@ffwll.ch>
+ */
+
+/**
+ * DOC: frontbuffer tracking
+ *
+ * Many features require us to track changes to the currently active
+ * frontbuffer, especially rendering targeted at the frontbuffer.
+ *
+ * To be able to do so GEM tracks frontbuffers using a bitmask for all possible
+ * frontbuffer slots through i915_gem_track_fb(). The function in this file are
+ * then called when the contents of the frontbuffer are invalidated, when
+ * frontbuffer rendering has stopped again to flush out all the changes and when
+ * the frontbuffer is exchanged with a flip. Subsystems interested in
+ * frontbuffer changes (e.g. PSR, FBC, DRRS) should directly put their callbacks
+ * into the relevant places and filter for the frontbuffer slots that they are
+ * interested int.
+ *
+ * On a high level there are two types of powersaving features. The first one
+ * work like a special cache (FBC and PSR) and are interested when they should
+ * stop caching and when to restart caching. This is done by placing callbacks
+ * into the invalidate and the flush functions: At invalidate the caching must
+ * be stopped and at flush time it can be restarted. And maybe they need to know
+ * when the frontbuffer changes (e.g. when the hw doesn't initiate an invalidate
+ * and flush on its own) which can be achieved with placing callbacks into the
+ * flip functions.
+ *
+ * The other type of display power saving feature only cares about busyness
+ * (e.g. DRRS). In that case all three (invalidate, flush and flip) indicate
+ * busyness. There is no direct way to detect idleness. Instead an idle timer
+ * work delayed work should be started from the flush and flip functions and
+ * cancelled as soon as busyness is detected.
+ *
+ * Note that there's also an older frontbuffer activity tracking scheme which
+ * just tracks general activity. This is done by the various mark_busy and
+ * mark_idle functions. For display power management features using these
+ * functions is deprecated and should be avoided.
+ */
+
+#include <drm/drmP.h>
+
+#include "intel_drv.h"
+#include "i915_drv.h"
+
+/**
+ * intel_fb_obj_invalidate - invalidate frontbuffer object
+ * @obj: GEM object to invalidate
+ * @origin: which operation caused the invalidation
+ *
+ * This function gets called every time rendering on the given object starts and
+ * frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must
+ * be invalidated. For ORIGIN_CS any subsequent invalidation will be delayed
+ * until the rendering completes or a flip on this frontbuffer plane is
+ * scheduled.
+ */
+void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
+			     enum fb_op_origin origin)
+{
+	struct drm_device *dev = obj->base.dev;
+	struct drm_i915_private *dev_priv = to_i915(dev);
+
+	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+	if (!obj->frontbuffer_bits)
+		return;
+
+	if (origin == ORIGIN_CS) {
+		mutex_lock(&dev_priv->fb_tracking.lock);
+		dev_priv->fb_tracking.busy_bits
+			|= obj->frontbuffer_bits;
+		dev_priv->fb_tracking.flip_bits
+			&= ~obj->frontbuffer_bits;
+		mutex_unlock(&dev_priv->fb_tracking.lock);
+	}
+
+	intel_psr_invalidate(dev, obj->frontbuffer_bits);
+	intel_edp_drrs_invalidate(dev, obj->frontbuffer_bits);
+	intel_fbc_invalidate(dev_priv, obj->frontbuffer_bits, origin);
+}
+
+/**
+ * intel_frontbuffer_flush - flush frontbuffer
+ * @dev: DRM device
+ * @frontbuffer_bits: frontbuffer plane tracking bits
+ * @origin: which operation caused the flush
+ *
+ * This function gets called every time rendering on the given planes has
+ * completed and frontbuffer caching can be started again. Flushes will get
+ * delayed if they're blocked by some outstanding asynchronous rendering.
+ *
+ * Can be called without any locks held.
+ */
+static void intel_frontbuffer_flush(struct drm_device *dev,
+				    unsigned frontbuffer_bits,
+				    enum fb_op_origin origin)
+{
+	struct drm_i915_private *dev_priv = to_i915(dev);
+
+	/* Delay flushing when rings are still busy.*/
+	mutex_lock(&dev_priv->fb_tracking.lock);
+	frontbuffer_bits &= ~dev_priv->fb_tracking.busy_bits;
+	mutex_unlock(&dev_priv->fb_tracking.lock);
+
+	if (!frontbuffer_bits)
+		return;
+
+	intel_edp_drrs_flush(dev, frontbuffer_bits);
+	intel_psr_flush(dev, frontbuffer_bits, origin);
+	intel_fbc_flush(dev_priv, frontbuffer_bits, origin);
+}
+
+/**
+ * intel_fb_obj_flush - flush frontbuffer object
+ * @obj: GEM object to flush
+ * @retire: set when retiring asynchronous rendering
+ * @origin: which operation caused the flush
+ *
+ * This function gets called every time rendering on the given object has
+ * completed and frontbuffer caching can be started again. If @retire is true
+ * then any delayed flushes will be unblocked.
+ */
+void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
+			bool retire, enum fb_op_origin origin)
+{
+	struct drm_device *dev = obj->base.dev;
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	unsigned frontbuffer_bits;
+
+	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+	if (!obj->frontbuffer_bits)
+		return;
+
+	frontbuffer_bits = obj->frontbuffer_bits;
+
+	if (retire) {
+		mutex_lock(&dev_priv->fb_tracking.lock);
+		/* Filter out new bits since rendering started. */
+		frontbuffer_bits &= dev_priv->fb_tracking.busy_bits;
+
+		dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
+		mutex_unlock(&dev_priv->fb_tracking.lock);
+	}
+
+	intel_frontbuffer_flush(dev, frontbuffer_bits, origin);
+}
+
+/**
+ * intel_frontbuffer_flip_prepare - prepare asynchronous frontbuffer flip
+ * @dev: DRM device
+ * @frontbuffer_bits: frontbuffer plane tracking bits
+ *
+ * This function gets called after scheduling a flip on @obj. The actual
+ * frontbuffer flushing will be delayed until completion is signalled with
+ * intel_frontbuffer_flip_complete. If an invalidate happens in between this
+ * flush will be cancelled.
+ *
+ * Can be called without any locks held.
+ */
+void intel_frontbuffer_flip_prepare(struct drm_device *dev,
+				    unsigned frontbuffer_bits)
+{
+	struct drm_i915_private *dev_priv = to_i915(dev);
+
+	mutex_lock(&dev_priv->fb_tracking.lock);
+	dev_priv->fb_tracking.flip_bits |= frontbuffer_bits;
+	/* Remove stale busy bits due to the old buffer. */
+	dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
+	mutex_unlock(&dev_priv->fb_tracking.lock);
+
+	intel_psr_single_frame_update(dev, frontbuffer_bits);
+}
+
+/**
+ * intel_frontbuffer_flip_complete - complete asynchronous frontbuffer flip
+ * @dev: DRM device
+ * @frontbuffer_bits: frontbuffer plane tracking bits
+ *
+ * This function gets called after the flip has been latched and will complete
+ * on the next vblank. It will execute the flush if it hasn't been cancelled yet.
+ *
+ * Can be called without any locks held.
+ */
+void intel_frontbuffer_flip_complete(struct drm_device *dev,
+				     unsigned frontbuffer_bits)
+{
+	struct drm_i915_private *dev_priv = to_i915(dev);
+
+	mutex_lock(&dev_priv->fb_tracking.lock);
+	/* Mask any cancelled flips. */
+	frontbuffer_bits &= dev_priv->fb_tracking.flip_bits;
+	dev_priv->fb_tracking.flip_bits &= ~frontbuffer_bits;
+	mutex_unlock(&dev_priv->fb_tracking.lock);
+
+	intel_frontbuffer_flush(dev, frontbuffer_bits, ORIGIN_FLIP);
+}
+
+/**
+ * intel_frontbuffer_flip - synchronous frontbuffer flip
+ * @dev: DRM device
+ * @frontbuffer_bits: frontbuffer plane tracking bits
+ *
+ * This function gets called after scheduling a flip on @obj. This is for
+ * synchronous plane updates which will happen on the next vblank and which will
+ * not get delayed by pending gpu rendering.
+ *
+ * Can be called without any locks held.
+ */
+void intel_frontbuffer_flip(struct drm_device *dev,
+			    unsigned frontbuffer_bits)
+{
+	struct drm_i915_private *dev_priv = to_i915(dev);
+
+	mutex_lock(&dev_priv->fb_tracking.lock);
+	/* Remove stale busy bits due to the old buffer. */
+	dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
+	mutex_unlock(&dev_priv->fb_tracking.lock);
+
+	intel_frontbuffer_flush(dev, frontbuffer_bits, ORIGIN_FLIP);
+}
diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h
new file mode 100644
index 0000000..081d5f6
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_guc.h
@@ -0,0 +1,124 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+#ifndef _INTEL_GUC_H_
+#define _INTEL_GUC_H_
+
+#include "intel_guc_fwif.h"
+#include "i915_guc_reg.h"
+
+struct i915_guc_client {
+	struct drm_i915_gem_object *client_obj;
+	struct intel_context *owner;
+	struct intel_guc *guc;
+	uint32_t priority;
+	uint32_t ctx_index;
+
+	uint32_t proc_desc_offset;
+	uint32_t doorbell_offset;
+	uint32_t cookie;
+	uint16_t doorbell_id;
+	uint16_t padding;		/* Maintain alignment		*/
+
+	uint32_t wq_offset;
+	uint32_t wq_size;
+
+	spinlock_t wq_lock;		/* Protects all data below	*/
+	uint32_t wq_tail;
+
+	/* GuC submission statistics & status */
+	uint64_t submissions[I915_NUM_RINGS];
+	uint32_t q_fail;
+	uint32_t b_fail;
+	int retcode;
+};
+
+enum intel_guc_fw_status {
+	GUC_FIRMWARE_FAIL = -1,
+	GUC_FIRMWARE_NONE = 0,
+	GUC_FIRMWARE_PENDING,
+	GUC_FIRMWARE_SUCCESS
+};
+
+/*
+ * This structure encapsulates all the data needed during the process
+ * of fetching, caching, and loading the firmware image into the GuC.
+ */
+struct intel_guc_fw {
+	struct drm_device *		guc_dev;
+	const char *			guc_fw_path;
+	size_t				guc_fw_size;
+	struct drm_i915_gem_object *	guc_fw_obj;
+	enum intel_guc_fw_status	guc_fw_fetch_status;
+	enum intel_guc_fw_status	guc_fw_load_status;
+
+	uint16_t			guc_fw_major_wanted;
+	uint16_t			guc_fw_minor_wanted;
+	uint16_t			guc_fw_major_found;
+	uint16_t			guc_fw_minor_found;
+};
+
+struct intel_guc {
+	struct intel_guc_fw guc_fw;
+
+	uint32_t log_flags;
+	struct drm_i915_gem_object *log_obj;
+
+	struct drm_i915_gem_object *ctx_pool_obj;
+	struct ida ctx_ids;
+
+	struct i915_guc_client *execbuf_client;
+
+	spinlock_t host2guc_lock;	/* Protects all data below	*/
+
+	DECLARE_BITMAP(doorbell_bitmap, GUC_MAX_DOORBELLS);
+	uint32_t db_cacheline;		/* Cyclic counter mod pagesize	*/
+
+	/* Action status & statistics */
+	uint64_t action_count;		/* Total commands issued	*/
+	uint32_t action_cmd;		/* Last command word		*/
+	uint32_t action_status;		/* Last return status		*/
+	uint32_t action_fail;		/* Total number of failures	*/
+	int32_t action_err;		/* Last error code		*/
+
+	uint64_t submissions[I915_NUM_RINGS];
+	uint32_t last_seqno[I915_NUM_RINGS];
+};
+
+/* intel_guc_loader.c */
+extern void intel_guc_ucode_init(struct drm_device *dev);
+extern int intel_guc_ucode_load(struct drm_device *dev);
+extern void intel_guc_ucode_fini(struct drm_device *dev);
+extern const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status);
+extern int intel_guc_suspend(struct drm_device *dev);
+extern int intel_guc_resume(struct drm_device *dev);
+
+/* i915_guc_submission.c */
+int i915_guc_submission_init(struct drm_device *dev);
+int i915_guc_submission_enable(struct drm_device *dev);
+int i915_guc_submit(struct i915_guc_client *client,
+		    struct drm_i915_gem_request *rq);
+void i915_guc_submission_disable(struct drm_device *dev);
+void i915_guc_submission_fini(struct drm_device *dev);
+
+#endif
diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h b/drivers/gpu/drm/i915/intel_guc_fwif.h
new file mode 100644
index 0000000..593d2f5
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_guc_fwif.h
@@ -0,0 +1,260 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifndef _INTEL_GUC_FWIF_H
+#define _INTEL_GUC_FWIF_H
+
+/*
+ * This file is partially autogenerated, although currently with some manual
+ * fixups afterwards. In future, it should be entirely autogenerated, in order
+ * to ensure that the definitions herein remain in sync with those used by the
+ * GuC's own firmware.
+ *
+ * EDITING THIS FILE IS THEREFORE NOT RECOMMENDED - YOUR CHANGES MAY BE LOST.
+ */
+
+#define GFXCORE_FAMILY_GEN9		12
+#define GFXCORE_FAMILY_UNKNOWN		0x7fffffff
+
+#define GUC_CTX_PRIORITY_KMD_HIGH	0
+#define GUC_CTX_PRIORITY_HIGH		1
+#define GUC_CTX_PRIORITY_KMD_NORMAL	2
+#define GUC_CTX_PRIORITY_NORMAL		3
+
+#define GUC_MAX_GPU_CONTEXTS		1024
+#define	GUC_INVALID_CTX_ID		GUC_MAX_GPU_CONTEXTS
+
+/* Work queue item header definitions */
+#define WQ_STATUS_ACTIVE		1
+#define WQ_STATUS_SUSPENDED		2
+#define WQ_STATUS_CMD_ERROR		3
+#define WQ_STATUS_ENGINE_ID_NOT_USED	4
+#define WQ_STATUS_SUSPENDED_FROM_RESET	5
+#define WQ_TYPE_SHIFT			0
+#define   WQ_TYPE_BATCH_BUF		(0x1 << WQ_TYPE_SHIFT)
+#define   WQ_TYPE_PSEUDO		(0x2 << WQ_TYPE_SHIFT)
+#define   WQ_TYPE_INORDER		(0x3 << WQ_TYPE_SHIFT)
+#define WQ_TARGET_SHIFT			10
+#define WQ_LEN_SHIFT			16
+#define WQ_NO_WCFLUSH_WAIT		(1 << 27)
+#define WQ_PRESENT_WORKLOAD		(1 << 28)
+#define WQ_WORKLOAD_SHIFT		29
+#define   WQ_WORKLOAD_GENERAL		(0 << WQ_WORKLOAD_SHIFT)
+#define   WQ_WORKLOAD_GPGPU		(1 << WQ_WORKLOAD_SHIFT)
+#define   WQ_WORKLOAD_TOUCH		(2 << WQ_WORKLOAD_SHIFT)
+
+#define WQ_RING_TAIL_SHIFT		20
+#define WQ_RING_TAIL_MASK		(0x7FF << WQ_RING_TAIL_SHIFT)
+
+#define GUC_DOORBELL_ENABLED		1
+#define GUC_DOORBELL_DISABLED		0
+
+#define GUC_CTX_DESC_ATTR_ACTIVE	(1 << 0)
+#define GUC_CTX_DESC_ATTR_PENDING_DB	(1 << 1)
+#define GUC_CTX_DESC_ATTR_KERNEL	(1 << 2)
+#define GUC_CTX_DESC_ATTR_PREEMPT	(1 << 3)
+#define GUC_CTX_DESC_ATTR_RESET		(1 << 4)
+#define GUC_CTX_DESC_ATTR_WQLOCKED	(1 << 5)
+#define GUC_CTX_DESC_ATTR_PCH		(1 << 6)
+#define GUC_CTX_DESC_ATTR_TERMINATED	(1 << 7)
+
+/* The guc control data is 10 DWORDs */
+#define GUC_CTL_CTXINFO			0
+#define   GUC_CTL_CTXNUM_IN16_SHIFT	0
+#define   GUC_CTL_BASE_ADDR_SHIFT	12
+#define GUC_CTL_ARAT_HIGH		1
+#define GUC_CTL_ARAT_LOW		2
+#define GUC_CTL_DEVICE_INFO		3
+#define   GUC_CTL_GTTYPE_SHIFT		0
+#define   GUC_CTL_COREFAMILY_SHIFT	7
+#define GUC_CTL_LOG_PARAMS		4
+#define   GUC_LOG_VALID			(1 << 0)
+#define   GUC_LOG_NOTIFY_ON_HALF_FULL	(1 << 1)
+#define   GUC_LOG_ALLOC_IN_MEGABYTE	(1 << 3)
+#define   GUC_LOG_CRASH_PAGES		1
+#define   GUC_LOG_CRASH_SHIFT		4
+#define   GUC_LOG_DPC_PAGES		3
+#define   GUC_LOG_DPC_SHIFT		6
+#define   GUC_LOG_ISR_PAGES		3
+#define   GUC_LOG_ISR_SHIFT		9
+#define   GUC_LOG_BUF_ADDR_SHIFT	12
+#define GUC_CTL_PAGE_FAULT_CONTROL	5
+#define GUC_CTL_WA			6
+#define   GUC_CTL_WA_UK_BY_DRIVER	(1 << 3)
+#define GUC_CTL_FEATURE			7
+#define   GUC_CTL_VCS2_ENABLED		(1 << 0)
+#define   GUC_CTL_KERNEL_SUBMISSIONS	(1 << 1)
+#define   GUC_CTL_FEATURE2		(1 << 2)
+#define   GUC_CTL_POWER_GATING		(1 << 3)
+#define   GUC_CTL_DISABLE_SCHEDULER	(1 << 4)
+#define   GUC_CTL_PREEMPTION_LOG	(1 << 5)
+#define   GUC_CTL_ENABLE_SLPC		(1 << 7)
+#define   GUC_CTL_RESET_ON_PREMPT_FAILURE	(1 << 8)
+#define GUC_CTL_DEBUG			8
+#define   GUC_LOG_VERBOSITY_SHIFT	0
+#define   GUC_LOG_VERBOSITY_LOW		(0 << GUC_LOG_VERBOSITY_SHIFT)
+#define   GUC_LOG_VERBOSITY_MED		(1 << GUC_LOG_VERBOSITY_SHIFT)
+#define   GUC_LOG_VERBOSITY_HIGH	(2 << GUC_LOG_VERBOSITY_SHIFT)
+#define   GUC_LOG_VERBOSITY_ULTRA	(3 << GUC_LOG_VERBOSITY_SHIFT)
+/* Verbosity range-check limits, without the shift */
+#define	  GUC_LOG_VERBOSITY_MIN		0
+#define	  GUC_LOG_VERBOSITY_MAX		3
+#define GUC_CTL_RSRVD			9
+
+#define GUC_CTL_MAX_DWORDS		(GUC_CTL_RSRVD + 1)
+
+struct guc_doorbell_info {
+	u32 db_status;
+	u32 cookie;
+	u32 reserved[14];
+} __packed;
+
+union guc_doorbell_qw {
+	struct {
+		u32 db_status;
+		u32 cookie;
+	};
+	u64 value_qw;
+} __packed;
+
+#define GUC_MAX_DOORBELLS		256
+#define GUC_INVALID_DOORBELL_ID		(GUC_MAX_DOORBELLS)
+
+#define GUC_DB_SIZE			(PAGE_SIZE)
+#define GUC_WQ_SIZE			(PAGE_SIZE * 2)
+
+/* Work item for submitting workloads into work queue of GuC. */
+struct guc_wq_item {
+	u32 header;
+	u32 context_desc;
+	u32 ring_tail;
+	u32 fence_id;
+} __packed;
+
+struct guc_process_desc {
+	u32 context_id;
+	u64 db_base_addr;
+	u32 head;
+	u32 tail;
+	u32 error_offset;
+	u64 wq_base_addr;
+	u32 wq_size_bytes;
+	u32 wq_status;
+	u32 engine_presence;
+	u32 priority;
+	u32 reserved[30];
+} __packed;
+
+/* engine id and context id is packed into guc_execlist_context.context_id*/
+#define GUC_ELC_CTXID_OFFSET		0
+#define GUC_ELC_ENGINE_OFFSET		29
+
+/* The execlist context including software and HW information */
+struct guc_execlist_context {
+	u32 context_desc;
+	u32 context_id;
+	u32 ring_status;
+	u32 ring_lcra;
+	u32 ring_begin;
+	u32 ring_end;
+	u32 ring_next_free_location;
+	u32 ring_current_tail_pointer_value;
+	u8 engine_state_submit_value;
+	u8 engine_state_wait_value;
+	u16 pagefault_count;
+	u16 engine_submit_queue_count;
+} __packed;
+
+/*Context descriptor for communicating between uKernel and Driver*/
+struct guc_context_desc {
+	u32 sched_common_area;
+	u32 context_id;
+	u32 pas_id;
+	u8 engines_used;
+	u64 db_trigger_cpu;
+	u32 db_trigger_uk;
+	u64 db_trigger_phy;
+	u16 db_id;
+
+	struct guc_execlist_context lrc[I915_NUM_RINGS];
+
+	u8 attribute;
+
+	u32 priority;
+
+	u32 wq_sampled_tail_offset;
+	u32 wq_total_submit_enqueues;
+
+	u32 process_desc;
+	u32 wq_addr;
+	u32 wq_size;
+
+	u32 engine_presence;
+
+	u8 engine_suspended;
+
+	u8 reserved0[3];
+	u64 reserved1[1];
+
+	u64 desc_private;
+} __packed;
+
+#define GUC_FORCEWAKE_RENDER	(1 << 0)
+#define GUC_FORCEWAKE_MEDIA	(1 << 1)
+
+#define GUC_POWER_UNSPECIFIED	0
+#define GUC_POWER_D0		1
+#define GUC_POWER_D1		2
+#define GUC_POWER_D2		3
+#define GUC_POWER_D3		4
+
+/* This Action will be programmed in C180 - SOFT_SCRATCH_O_REG */
+enum host2guc_action {
+	HOST2GUC_ACTION_DEFAULT = 0x0,
+	HOST2GUC_ACTION_SAMPLE_FORCEWAKE = 0x6,
+	HOST2GUC_ACTION_ALLOCATE_DOORBELL = 0x10,
+	HOST2GUC_ACTION_DEALLOCATE_DOORBELL = 0x20,
+	HOST2GUC_ACTION_ENTER_S_STATE = 0x501,
+	HOST2GUC_ACTION_EXIT_S_STATE = 0x502,
+	HOST2GUC_ACTION_SLPC_REQUEST = 0x3003,
+	HOST2GUC_ACTION_LIMIT
+};
+
+/*
+ * The GuC sends its response to a command by overwriting the
+ * command in SS0. The response is distinguishable from a command
+ * by the fact that all the MASK bits are set. The remaining bits
+ * give more detail.
+ */
+#define	GUC2HOST_RESPONSE_MASK		((u32)0xF0000000)
+#define	GUC2HOST_IS_RESPONSE(x) 	((u32)(x) >= GUC2HOST_RESPONSE_MASK)
+#define	GUC2HOST_STATUS(x)		(GUC2HOST_RESPONSE_MASK | (x))
+
+/* GUC will return status back to SOFT_SCRATCH_O_REG */
+enum guc2host_status {
+	GUC2HOST_STATUS_SUCCESS = GUC2HOST_STATUS(0x0),
+	GUC2HOST_STATUS_ALLOCATE_DOORBELL_FAIL = GUC2HOST_STATUS(0x10),
+	GUC2HOST_STATUS_DEALLOCATE_DOORBELL_FAIL = GUC2HOST_STATUS(0x20),
+	GUC2HOST_STATUS_GENERIC_FAIL = GUC2HOST_STATUS(0x0000F000)
+};
+
+#endif
diff --git a/drivers/gpu/drm/i915/intel_guc_loader.c b/drivers/gpu/drm/i915/intel_guc_loader.c
new file mode 100644
index 0000000..3541f76
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_guc_loader.c
@@ -0,0 +1,608 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Vinit Azad <vinit.azad@intel.com>
+ *    Ben Widawsky <ben@bwidawsk.net>
+ *    Dave Gordon <david.s.gordon@intel.com>
+ *    Alex Dai <yu.dai@intel.com>
+ */
+#include <linux/firmware.h>
+#include "i915_drv.h"
+#include "intel_guc.h"
+
+/**
+ * DOC: GuC
+ *
+ * intel_guc:
+ * Top level structure of guc. It handles firmware loading and manages client
+ * pool and doorbells. intel_guc owns a i915_guc_client to replace the legacy
+ * ExecList submission.
+ *
+ * Firmware versioning:
+ * The firmware build process will generate a version header file with major and
+ * minor version defined. The versions are built into CSS header of firmware.
+ * i915 kernel driver set the minimal firmware version required per platform.
+ * The firmware installation package will install (symbolic link) proper version
+ * of firmware.
+ *
+ * GuC address space:
+ * GuC does not allow any gfx GGTT address that falls into range [0, WOPCM_TOP),
+ * which is reserved for Boot ROM, SRAM and WOPCM. Currently this top address is
+ * 512K. In order to exclude 0-512K address space from GGTT, all gfx objects
+ * used by GuC is pinned with PIN_OFFSET_BIAS along with size of WOPCM.
+ *
+ * Firmware log:
+ * Firmware log is enabled by setting i915.guc_log_level to non-negative level.
+ * Log data is printed out via reading debugfs i915_guc_log_dump. Reading from
+ * i915_guc_load_status will print out firmware loading status and scratch
+ * registers value.
+ *
+ */
+
+#define I915_SKL_GUC_UCODE "i915/skl_guc_ver4.bin"
+MODULE_FIRMWARE(I915_SKL_GUC_UCODE);
+
+/* User-friendly representation of an enum */
+const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status)
+{
+	switch (status) {
+	case GUC_FIRMWARE_FAIL:
+		return "FAIL";
+	case GUC_FIRMWARE_NONE:
+		return "NONE";
+	case GUC_FIRMWARE_PENDING:
+		return "PENDING";
+	case GUC_FIRMWARE_SUCCESS:
+		return "SUCCESS";
+	default:
+		return "UNKNOWN!";
+	}
+};
+
+static void direct_interrupts_to_host(struct drm_i915_private *dev_priv)
+{
+	struct intel_engine_cs *ring;
+	int i, irqs;
+
+	/* tell all command streamers NOT to forward interrupts and vblank to GuC */
+	irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_NEVER);
+	irqs |= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING);
+	for_each_ring(ring, dev_priv, i)
+		I915_WRITE(RING_MODE_GEN7(ring), irqs);
+
+	/* route all GT interrupts to the host */
+	I915_WRITE(GUC_BCS_RCS_IER, 0);
+	I915_WRITE(GUC_VCS2_VCS1_IER, 0);
+	I915_WRITE(GUC_WD_VECS_IER, 0);
+}
+
+static void direct_interrupts_to_guc(struct drm_i915_private *dev_priv)
+{
+	struct intel_engine_cs *ring;
+	int i, irqs;
+
+	/* tell all command streamers to forward interrupts and vblank to GuC */
+	irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_ALWAYS);
+	irqs |= _MASKED_BIT_ENABLE(GFX_INTERRUPT_STEERING);
+	for_each_ring(ring, dev_priv, i)
+		I915_WRITE(RING_MODE_GEN7(ring), irqs);
+
+	/* route USER_INTERRUPT to Host, all others are sent to GuC. */
+	irqs = GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
+	       GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
+	/* These three registers have the same bit definitions */
+	I915_WRITE(GUC_BCS_RCS_IER, ~irqs);
+	I915_WRITE(GUC_VCS2_VCS1_IER, ~irqs);
+	I915_WRITE(GUC_WD_VECS_IER, ~irqs);
+}
+
+static u32 get_gttype(struct drm_i915_private *dev_priv)
+{
+	/* XXX: GT type based on PCI device ID? field seems unused by fw */
+	return 0;
+}
+
+static u32 get_core_family(struct drm_i915_private *dev_priv)
+{
+	switch (INTEL_INFO(dev_priv)->gen) {
+	case 9:
+		return GFXCORE_FAMILY_GEN9;
+
+	default:
+		DRM_ERROR("GUC: unsupported core family\n");
+		return GFXCORE_FAMILY_UNKNOWN;
+	}
+}
+
+static void set_guc_init_params(struct drm_i915_private *dev_priv)
+{
+	struct intel_guc *guc = &dev_priv->guc;
+	u32 params[GUC_CTL_MAX_DWORDS];
+	int i;
+
+	memset(&params, 0, sizeof(params));
+
+	params[GUC_CTL_DEVICE_INFO] |=
+		(get_gttype(dev_priv) << GUC_CTL_GTTYPE_SHIFT) |
+		(get_core_family(dev_priv) << GUC_CTL_COREFAMILY_SHIFT);
+
+	/*
+	 * GuC ARAT increment is 10 ns. GuC default scheduler quantum is one
+	 * second. This ARAR is calculated by:
+	 * Scheduler-Quantum-in-ns / ARAT-increment-in-ns = 1000000000 / 10
+	 */
+	params[GUC_CTL_ARAT_HIGH] = 0;
+	params[GUC_CTL_ARAT_LOW] = 100000000;
+
+	params[GUC_CTL_WA] |= GUC_CTL_WA_UK_BY_DRIVER;
+
+	params[GUC_CTL_FEATURE] |= GUC_CTL_DISABLE_SCHEDULER |
+			GUC_CTL_VCS2_ENABLED;
+
+	if (i915.guc_log_level >= 0) {
+		params[GUC_CTL_LOG_PARAMS] = guc->log_flags;
+		params[GUC_CTL_DEBUG] =
+			i915.guc_log_level << GUC_LOG_VERBOSITY_SHIFT;
+	}
+
+	/* If GuC submission is enabled, set up additional parameters here */
+	if (i915.enable_guc_submission) {
+		u32 pgs = i915_gem_obj_ggtt_offset(dev_priv->guc.ctx_pool_obj);
+		u32 ctx_in_16 = GUC_MAX_GPU_CONTEXTS / 16;
+
+		pgs >>= PAGE_SHIFT;
+		params[GUC_CTL_CTXINFO] = (pgs << GUC_CTL_BASE_ADDR_SHIFT) |
+			(ctx_in_16 << GUC_CTL_CTXNUM_IN16_SHIFT);
+
+		params[GUC_CTL_FEATURE] |= GUC_CTL_KERNEL_SUBMISSIONS;
+
+		/* Unmask this bit to enable the GuC's internal scheduler */
+		params[GUC_CTL_FEATURE] &= ~GUC_CTL_DISABLE_SCHEDULER;
+	}
+
+	I915_WRITE(SOFT_SCRATCH(0), 0);
+
+	for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
+		I915_WRITE(SOFT_SCRATCH(1 + i), params[i]);
+}
+
+/*
+ * Read the GuC status register (GUC_STATUS) and store it in the
+ * specified location; then return a boolean indicating whether
+ * the value matches either of two values representing completion
+ * of the GuC boot process.
+ *
+ * This is used for polling the GuC status in a wait_for_atomic()
+ * loop below.
+ */
+static inline bool guc_ucode_response(struct drm_i915_private *dev_priv,
+				      u32 *status)
+{
+	u32 val = I915_READ(GUC_STATUS);
+	u32 uk_val = val & GS_UKERNEL_MASK;
+	*status = val;
+	return (uk_val == GS_UKERNEL_READY ||
+		((val & GS_MIA_CORE_STATE) && uk_val == GS_UKERNEL_LAPIC_DONE));
+}
+
+/*
+ * Transfer the firmware image to RAM for execution by the microcontroller.
+ *
+ * GuC Firmware layout:
+ * +-------------------------------+  ----
+ * |          CSS header           |  128B
+ * | contains major/minor version  |
+ * +-------------------------------+  ----
+ * |             uCode             |
+ * +-------------------------------+  ----
+ * |         RSA signature         |  256B
+ * +-------------------------------+  ----
+ *
+ * Architecturally, the DMA engine is bidirectional, and can potentially even
+ * transfer between GTT locations. This functionality is left out of the API
+ * for now as there is no need for it.
+ *
+ * Note that GuC needs the CSS header plus uKernel code to be copied by the
+ * DMA engine in one operation, whereas the RSA signature is loaded via MMIO.
+ */
+
+#define UOS_CSS_HEADER_OFFSET		0
+#define UOS_VER_MINOR_OFFSET		0x44
+#define UOS_VER_MAJOR_OFFSET		0x46
+#define UOS_CSS_HEADER_SIZE		0x80
+#define UOS_RSA_SIG_SIZE		0x100
+
+static int guc_ucode_xfer_dma(struct drm_i915_private *dev_priv)
+{
+	struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
+	struct drm_i915_gem_object *fw_obj = guc_fw->guc_fw_obj;
+	unsigned long offset;
+	struct sg_table *sg = fw_obj->pages;
+	u32 status, ucode_size, rsa[UOS_RSA_SIG_SIZE / sizeof(u32)];
+	int i, ret = 0;
+
+	/* uCode size, also is where RSA signature starts */
+	offset = ucode_size = guc_fw->guc_fw_size - UOS_RSA_SIG_SIZE;
+	I915_WRITE(DMA_COPY_SIZE, ucode_size);
+
+	/* Copy RSA signature from the fw image to HW for verification */
+	sg_pcopy_to_buffer(sg->sgl, sg->nents, rsa, UOS_RSA_SIG_SIZE, offset);
+	for (i = 0; i < UOS_RSA_SIG_SIZE / sizeof(u32); i++)
+		I915_WRITE(UOS_RSA_SCRATCH(i), rsa[i]);
+
+	/* Set the source address for the new blob */
+	offset = i915_gem_obj_ggtt_offset(fw_obj);
+	I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset));
+	I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF);
+
+	/*
+	 * Set the DMA destination. Current uCode expects the code to be
+	 * loaded at 8k; locations below this are used for the stack.
+	 */
+	I915_WRITE(DMA_ADDR_1_LOW, 0x2000);
+	I915_WRITE(DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);
+
+	/* Finally start the DMA */
+	I915_WRITE(DMA_CTRL, _MASKED_BIT_ENABLE(UOS_MOVE | START_DMA));
+
+	/*
+	 * Spin-wait for the DMA to complete & the GuC to start up.
+	 * NB: Docs recommend not using the interrupt for completion.
+	 * Measurements indicate this should take no more than 20ms, so a
+	 * timeout here indicates that the GuC has failed and is unusable.
+	 * (Higher levels of the driver will attempt to fall back to
+	 * execlist mode if this happens.)
+	 */
+	ret = wait_for_atomic(guc_ucode_response(dev_priv, &status), 100);
+
+	DRM_DEBUG_DRIVER("DMA status 0x%x, GuC status 0x%x\n",
+			I915_READ(DMA_CTRL), status);
+
+	if ((status & GS_BOOTROM_MASK) == GS_BOOTROM_RSA_FAILED) {
+		DRM_ERROR("GuC firmware signature verification failed\n");
+		ret = -ENOEXEC;
+	}
+
+	DRM_DEBUG_DRIVER("returning %d\n", ret);
+
+	return ret;
+}
+
+/*
+ * Load the GuC firmware blob into the MinuteIA.
+ */
+static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
+{
+	struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
+	struct drm_device *dev = dev_priv->dev;
+	int ret;
+
+	ret = i915_gem_object_set_to_gtt_domain(guc_fw->guc_fw_obj, false);
+	if (ret) {
+		DRM_DEBUG_DRIVER("set-domain failed %d\n", ret);
+		return ret;
+	}
+
+	ret = i915_gem_obj_ggtt_pin(guc_fw->guc_fw_obj, 0, 0);
+	if (ret) {
+		DRM_DEBUG_DRIVER("pin failed %d\n", ret);
+		return ret;
+	}
+
+	/* Invalidate GuC TLB to let GuC take the latest updates to GTT. */
+	I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
+
+	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+
+	/* init WOPCM */
+	I915_WRITE(GUC_WOPCM_SIZE, GUC_WOPCM_SIZE_VALUE);
+	I915_WRITE(DMA_GUC_WOPCM_OFFSET, GUC_WOPCM_OFFSET_VALUE);
+
+	/* Enable MIA caching. GuC clock gating is disabled. */
+	I915_WRITE(GUC_SHIM_CONTROL, GUC_SHIM_CONTROL_VALUE);
+
+	/* WaDisableMinuteIaClockGating:skl,bxt */
+	if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0) ||
+	    (IS_BROXTON(dev) && INTEL_REVID(dev) == BXT_REVID_A0)) {
+		I915_WRITE(GUC_SHIM_CONTROL, (I915_READ(GUC_SHIM_CONTROL) &
+					      ~GUC_ENABLE_MIA_CLOCK_GATING));
+	}
+
+	/* WaC6DisallowByGfxPause*/
+	I915_WRITE(GEN6_GFXPAUSE, 0x30FFF);
+
+	if (IS_BROXTON(dev))
+		I915_WRITE(GEN9LP_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
+	else
+		I915_WRITE(GEN9_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
+
+	if (IS_GEN9(dev)) {
+		/* DOP Clock Gating Enable for GuC clocks */
+		I915_WRITE(GEN7_MISCCPCTL, (GEN8_DOP_CLOCK_GATE_GUC_ENABLE |
+					    I915_READ(GEN7_MISCCPCTL)));
+
+		/* allows for 5us before GT can go to RC6 */
+		I915_WRITE(GUC_ARAT_C6DIS, 0x1FF);
+	}
+
+	set_guc_init_params(dev_priv);
+
+	ret = guc_ucode_xfer_dma(dev_priv);
+
+	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+
+	/*
+	 * We keep the object pages for reuse during resume. But we can unpin it
+	 * now that DMA has completed, so it doesn't continue to take up space.
+	 */
+	i915_gem_object_ggtt_unpin(guc_fw->guc_fw_obj);
+
+	return ret;
+}
+
+/**
+ * intel_guc_ucode_load() - load GuC uCode into the device
+ * @dev:	drm device
+ *
+ * Called from gem_init_hw() during driver loading and also after a GPU reset.
+ *
+ * The firmware image should have already been fetched into memory by the
+ * earlier call to intel_guc_ucode_init(), so here we need only check that
+ * is succeeded, and then transfer the image to the h/w.
+ *
+ * Return:	non-zero code on error
+ */
+int intel_guc_ucode_load(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
+	int err = 0;
+
+	DRM_DEBUG_DRIVER("GuC fw status: fetch %s, load %s\n",
+		intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status),
+		intel_guc_fw_status_repr(guc_fw->guc_fw_load_status));
+
+	direct_interrupts_to_host(dev_priv);
+
+	if (guc_fw->guc_fw_fetch_status == GUC_FIRMWARE_NONE)
+		return 0;
+
+	if (guc_fw->guc_fw_fetch_status == GUC_FIRMWARE_SUCCESS &&
+	    guc_fw->guc_fw_load_status == GUC_FIRMWARE_FAIL)
+		return -ENOEXEC;
+
+	guc_fw->guc_fw_load_status = GUC_FIRMWARE_PENDING;
+
+	DRM_DEBUG_DRIVER("GuC fw fetch status %s\n",
+		intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status));
+
+	switch (guc_fw->guc_fw_fetch_status) {
+	case GUC_FIRMWARE_FAIL:
+		/* something went wrong :( */
+		err = -EIO;
+		goto fail;
+
+	case GUC_FIRMWARE_NONE:
+	case GUC_FIRMWARE_PENDING:
+	default:
+		/* "can't happen" */
+		WARN_ONCE(1, "GuC fw %s invalid guc_fw_fetch_status %s [%d]\n",
+			guc_fw->guc_fw_path,
+			intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status),
+			guc_fw->guc_fw_fetch_status);
+		err = -ENXIO;
+		goto fail;
+
+	case GUC_FIRMWARE_SUCCESS:
+		break;
+	}
+
+	err = i915_guc_submission_init(dev);
+	if (err)
+		goto fail;
+
+	err = guc_ucode_xfer(dev_priv);
+	if (err)
+		goto fail;
+
+	guc_fw->guc_fw_load_status = GUC_FIRMWARE_SUCCESS;
+
+	DRM_DEBUG_DRIVER("GuC fw status: fetch %s, load %s\n",
+		intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status),
+		intel_guc_fw_status_repr(guc_fw->guc_fw_load_status));
+
+	if (i915.enable_guc_submission) {
+		/* The execbuf_client will be recreated. Release it first. */
+		i915_guc_submission_disable(dev);
+
+		err = i915_guc_submission_enable(dev);
+		if (err)
+			goto fail;
+		direct_interrupts_to_guc(dev_priv);
+	}
+
+	return 0;
+
+fail:
+	if (guc_fw->guc_fw_load_status == GUC_FIRMWARE_PENDING)
+		guc_fw->guc_fw_load_status = GUC_FIRMWARE_FAIL;
+
+	direct_interrupts_to_host(dev_priv);
+	i915_guc_submission_disable(dev);
+
+	return err;
+}
+
+static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw)
+{
+	struct drm_i915_gem_object *obj;
+	const struct firmware *fw;
+	const u8 *css_header;
+	const size_t minsize = UOS_CSS_HEADER_SIZE + UOS_RSA_SIG_SIZE;
+	const size_t maxsize = GUC_WOPCM_SIZE_VALUE + UOS_RSA_SIG_SIZE
+			- 0x8000; /* 32k reserved (8K stack + 24k context) */
+	int err;
+
+	DRM_DEBUG_DRIVER("before requesting firmware: GuC fw fetch status %s\n",
+		intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status));
+
+	err = request_firmware(&fw, guc_fw->guc_fw_path, &dev->pdev->dev);
+	if (err)
+		goto fail;
+	if (!fw)
+		goto fail;
+
+	DRM_DEBUG_DRIVER("fetch GuC fw from %s succeeded, fw %p\n",
+		guc_fw->guc_fw_path, fw);
+	DRM_DEBUG_DRIVER("firmware file size %zu (minimum %zu, maximum %zu)\n",
+		fw->size, minsize, maxsize);
+
+	/* Check the size of the blob befoe examining buffer contents */
+	if (fw->size < minsize || fw->size > maxsize)
+		goto fail;
+
+	/*
+	 * The GuC firmware image has the version number embedded at a well-known
+	 * offset within the firmware blob; note that major / minor version are
+	 * TWO bytes each (i.e. u16), although all pointers and offsets are defined
+	 * in terms of bytes (u8).
+	 */
+	css_header = fw->data + UOS_CSS_HEADER_OFFSET;
+	guc_fw->guc_fw_major_found = *(u16 *)(css_header + UOS_VER_MAJOR_OFFSET);
+	guc_fw->guc_fw_minor_found = *(u16 *)(css_header + UOS_VER_MINOR_OFFSET);
+
+	if (guc_fw->guc_fw_major_found != guc_fw->guc_fw_major_wanted ||
+	    guc_fw->guc_fw_minor_found < guc_fw->guc_fw_minor_wanted) {
+		DRM_ERROR("GuC firmware version %d.%d, required %d.%d\n",
+			guc_fw->guc_fw_major_found, guc_fw->guc_fw_minor_found,
+			guc_fw->guc_fw_major_wanted, guc_fw->guc_fw_minor_wanted);
+		err = -ENOEXEC;
+		goto fail;
+	}
+
+	DRM_DEBUG_DRIVER("firmware version %d.%d OK (minimum %d.%d)\n",
+			guc_fw->guc_fw_major_found, guc_fw->guc_fw_minor_found,
+			guc_fw->guc_fw_major_wanted, guc_fw->guc_fw_minor_wanted);
+
+	mutex_lock(&dev->struct_mutex);
+	obj = i915_gem_object_create_from_data(dev, fw->data, fw->size);
+	mutex_unlock(&dev->struct_mutex);
+	if (IS_ERR_OR_NULL(obj)) {
+		err = obj ? PTR_ERR(obj) : -ENOMEM;
+		goto fail;
+	}
+
+	guc_fw->guc_fw_obj = obj;
+	guc_fw->guc_fw_size = fw->size;
+
+	DRM_DEBUG_DRIVER("GuC fw fetch status SUCCESS, obj %p\n",
+			guc_fw->guc_fw_obj);
+
+	release_firmware(fw);
+	guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_SUCCESS;
+	return;
+
+fail:
+	DRM_DEBUG_DRIVER("GuC fw fetch status FAIL; err %d, fw %p, obj %p\n",
+		err, fw, guc_fw->guc_fw_obj);
+	DRM_ERROR("Failed to fetch GuC firmware from %s (error %d)\n",
+		  guc_fw->guc_fw_path, err);
+
+	obj = guc_fw->guc_fw_obj;
+	if (obj)
+		drm_gem_object_unreference(&obj->base);
+	guc_fw->guc_fw_obj = NULL;
+
+	release_firmware(fw);		/* OK even if fw is NULL */
+	guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_FAIL;
+}
+
+/**
+ * intel_guc_ucode_init() - define parameters and fetch firmware
+ * @dev:	drm device
+ *
+ * Called early during driver load, but after GEM is initialised.
+ *
+ * The firmware will be transferred to the GuC's memory later,
+ * when intel_guc_ucode_load() is called.
+ */
+void intel_guc_ucode_init(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
+	const char *fw_path;
+
+	if (!HAS_GUC_SCHED(dev))
+		i915.enable_guc_submission = false;
+
+	if (!HAS_GUC_UCODE(dev)) {
+		fw_path = NULL;
+	} else if (IS_SKYLAKE(dev)) {
+		fw_path = I915_SKL_GUC_UCODE;
+		guc_fw->guc_fw_major_wanted = 4;
+		guc_fw->guc_fw_minor_wanted = 3;
+	} else {
+		i915.enable_guc_submission = false;
+		fw_path = "";	/* unknown device */
+	}
+
+	guc_fw->guc_dev = dev;
+	guc_fw->guc_fw_path = fw_path;
+	guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_NONE;
+	guc_fw->guc_fw_load_status = GUC_FIRMWARE_NONE;
+
+	if (fw_path == NULL)
+		return;
+
+	if (*fw_path == '\0') {
+		DRM_ERROR("No GuC firmware known for this platform\n");
+		guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_FAIL;
+		return;
+	}
+
+	guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_PENDING;
+	DRM_DEBUG_DRIVER("GuC firmware pending, path %s\n", fw_path);
+	guc_fw_fetch(dev, guc_fw);
+	/* status must now be FAIL or SUCCESS */
+}
+
+/**
+ * intel_guc_ucode_fini() - clean up all allocated resources
+ * @dev:	drm device
+ */
+void intel_guc_ucode_fini(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
+
+	direct_interrupts_to_host(dev_priv);
+	i915_guc_submission_fini(dev);
+
+	mutex_lock(&dev->struct_mutex);
+	if (guc_fw->guc_fw_obj)
+		drm_gem_object_unreference(&guc_fw->guc_fw_obj->base);
+	guc_fw->guc_fw_obj = NULL;
+	mutex_unlock(&dev->struct_mutex);
+
+	guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_NONE;
+}
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
new file mode 100644
index 0000000..1ea8532
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -0,0 +1,2210 @@
+/*
+ * Copyright 2006 Dave Airlie <airlied@linux.ie>
+ * Copyright © 2006-2009 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *	Eric Anholt <eric@anholt.net>
+ *	Jesse Barnes <jesse.barnes@intel.com>
+ */
+
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/hdmi.h>
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
+#include "intel_drv.h"
+#include <drm/i915_drm.h>
+#include "i915_drv.h"
+
+static struct drm_device *intel_hdmi_to_dev(struct intel_hdmi *intel_hdmi)
+{
+	return hdmi_to_dig_port(intel_hdmi)->base.base.dev;
+}
+
+static void
+assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi)
+{
+	struct drm_device *dev = intel_hdmi_to_dev(intel_hdmi);
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t enabled_bits;
+
+	enabled_bits = HAS_DDI(dev) ? DDI_BUF_CTL_ENABLE : SDVO_ENABLE;
+
+	WARN(I915_READ(intel_hdmi->hdmi_reg) & enabled_bits,
+	     "HDMI port enabled, expecting disabled\n");
+}
+
+struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder)
+{
+	struct intel_digital_port *intel_dig_port =
+		container_of(encoder, struct intel_digital_port, base.base);
+	return &intel_dig_port->hdmi;
+}
+
+static struct intel_hdmi *intel_attached_hdmi(struct drm_connector *connector)
+{
+	return enc_to_intel_hdmi(&intel_attached_encoder(connector)->base);
+}
+
+static u32 g4x_infoframe_index(enum hdmi_infoframe_type type)
+{
+	switch (type) {
+	case HDMI_INFOFRAME_TYPE_AVI:
+		return VIDEO_DIP_SELECT_AVI;
+	case HDMI_INFOFRAME_TYPE_SPD:
+		return VIDEO_DIP_SELECT_SPD;
+	case HDMI_INFOFRAME_TYPE_VENDOR:
+		return VIDEO_DIP_SELECT_VENDOR;
+	default:
+		DRM_DEBUG_DRIVER("unknown info frame type %d\n", type);
+		return 0;
+	}
+}
+
+static u32 g4x_infoframe_enable(enum hdmi_infoframe_type type)
+{
+	switch (type) {
+	case HDMI_INFOFRAME_TYPE_AVI:
+		return VIDEO_DIP_ENABLE_AVI;
+	case HDMI_INFOFRAME_TYPE_SPD:
+		return VIDEO_DIP_ENABLE_SPD;
+	case HDMI_INFOFRAME_TYPE_VENDOR:
+		return VIDEO_DIP_ENABLE_VENDOR;
+	default:
+		DRM_DEBUG_DRIVER("unknown info frame type %d\n", type);
+		return 0;
+	}
+}
+
+static u32 hsw_infoframe_enable(enum hdmi_infoframe_type type)
+{
+	switch (type) {
+	case HDMI_INFOFRAME_TYPE_AVI:
+		return VIDEO_DIP_ENABLE_AVI_HSW;
+	case HDMI_INFOFRAME_TYPE_SPD:
+		return VIDEO_DIP_ENABLE_SPD_HSW;
+	case HDMI_INFOFRAME_TYPE_VENDOR:
+		return VIDEO_DIP_ENABLE_VS_HSW;
+	default:
+		DRM_DEBUG_DRIVER("unknown info frame type %d\n", type);
+		return 0;
+	}
+}
+
+static u32 hsw_dip_data_reg(struct drm_i915_private *dev_priv,
+			    enum transcoder cpu_transcoder,
+			    enum hdmi_infoframe_type type,
+			    int i)
+{
+	switch (type) {
+	case HDMI_INFOFRAME_TYPE_AVI:
+		return HSW_TVIDEO_DIP_AVI_DATA(cpu_transcoder, i);
+	case HDMI_INFOFRAME_TYPE_SPD:
+		return HSW_TVIDEO_DIP_SPD_DATA(cpu_transcoder, i);
+	case HDMI_INFOFRAME_TYPE_VENDOR:
+		return HSW_TVIDEO_DIP_VS_DATA(cpu_transcoder, i);
+	default:
+		DRM_DEBUG_DRIVER("unknown info frame type %d\n", type);
+		return 0;
+	}
+}
+
+static void g4x_write_infoframe(struct drm_encoder *encoder,
+				enum hdmi_infoframe_type type,
+				const void *frame, ssize_t len)
+{
+	const uint32_t *data = frame;
+	struct drm_device *dev = encoder->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 val = I915_READ(VIDEO_DIP_CTL);
+	int i;
+
+	WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
+
+	val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
+	val |= g4x_infoframe_index(type);
+
+	val &= ~g4x_infoframe_enable(type);
+
+	I915_WRITE(VIDEO_DIP_CTL, val);
+
+	mmiowb();
+	for (i = 0; i < len; i += 4) {
+		I915_WRITE(VIDEO_DIP_DATA, *data);
+		data++;
+	}
+	/* Write every possible data byte to force correct ECC calculation. */
+	for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
+		I915_WRITE(VIDEO_DIP_DATA, 0);
+	mmiowb();
+
+	val |= g4x_infoframe_enable(type);
+	val &= ~VIDEO_DIP_FREQ_MASK;
+	val |= VIDEO_DIP_FREQ_VSYNC;
+
+	I915_WRITE(VIDEO_DIP_CTL, val);
+	POSTING_READ(VIDEO_DIP_CTL);
+}
+
+static bool g4x_infoframe_enabled(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+	u32 val = I915_READ(VIDEO_DIP_CTL);
+
+	if ((val & VIDEO_DIP_ENABLE) == 0)
+		return false;
+
+	if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(intel_dig_port->port))
+		return false;
+
+	return val & (VIDEO_DIP_ENABLE_AVI |
+		      VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_SPD);
+}
+
+static void ibx_write_infoframe(struct drm_encoder *encoder,
+				enum hdmi_infoframe_type type,
+				const void *frame, ssize_t len)
+{
+	const uint32_t *data = frame;
+	struct drm_device *dev = encoder->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+	int i, reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
+	u32 val = I915_READ(reg);
+
+	WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
+
+	val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
+	val |= g4x_infoframe_index(type);
+
+	val &= ~g4x_infoframe_enable(type);
+
+	I915_WRITE(reg, val);
+
+	mmiowb();
+	for (i = 0; i < len; i += 4) {
+		I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
+		data++;
+	}
+	/* Write every possible data byte to force correct ECC calculation. */
+	for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
+		I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
+	mmiowb();
+
+	val |= g4x_infoframe_enable(type);
+	val &= ~VIDEO_DIP_FREQ_MASK;
+	val |= VIDEO_DIP_FREQ_VSYNC;
+
+	I915_WRITE(reg, val);
+	POSTING_READ(reg);
+}
+
+static bool ibx_infoframe_enabled(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+	struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+	int reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
+	u32 val = I915_READ(reg);
+
+	if ((val & VIDEO_DIP_ENABLE) == 0)
+		return false;
+
+	if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(intel_dig_port->port))
+		return false;
+
+	return val & (VIDEO_DIP_ENABLE_AVI |
+		      VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
+		      VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
+}
+
+static void cpt_write_infoframe(struct drm_encoder *encoder,
+				enum hdmi_infoframe_type type,
+				const void *frame, ssize_t len)
+{
+	const uint32_t *data = frame;
+	struct drm_device *dev = encoder->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+	int i, reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
+	u32 val = I915_READ(reg);
+
+	WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
+
+	val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
+	val |= g4x_infoframe_index(type);
+
+	/* The DIP control register spec says that we need to update the AVI
+	 * infoframe without clearing its enable bit */
+	if (type != HDMI_INFOFRAME_TYPE_AVI)
+		val &= ~g4x_infoframe_enable(type);
+
+	I915_WRITE(reg, val);
+
+	mmiowb();
+	for (i = 0; i < len; i += 4) {
+		I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
+		data++;
+	}
+	/* Write every possible data byte to force correct ECC calculation. */
+	for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
+		I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
+	mmiowb();
+
+	val |= g4x_infoframe_enable(type);
+	val &= ~VIDEO_DIP_FREQ_MASK;
+	val |= VIDEO_DIP_FREQ_VSYNC;
+
+	I915_WRITE(reg, val);
+	POSTING_READ(reg);
+}
+
+static bool cpt_infoframe_enabled(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+	int reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
+	u32 val = I915_READ(reg);
+
+	if ((val & VIDEO_DIP_ENABLE) == 0)
+		return false;
+
+	return val & (VIDEO_DIP_ENABLE_AVI |
+		      VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
+		      VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
+}
+
+static void vlv_write_infoframe(struct drm_encoder *encoder,
+				enum hdmi_infoframe_type type,
+				const void *frame, ssize_t len)
+{
+	const uint32_t *data = frame;
+	struct drm_device *dev = encoder->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+	int i, reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
+	u32 val = I915_READ(reg);
+
+	WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
+
+	val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
+	val |= g4x_infoframe_index(type);
+
+	val &= ~g4x_infoframe_enable(type);
+
+	I915_WRITE(reg, val);
+
+	mmiowb();
+	for (i = 0; i < len; i += 4) {
+		I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
+		data++;
+	}
+	/* Write every possible data byte to force correct ECC calculation. */
+	for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
+		I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
+	mmiowb();
+
+	val |= g4x_infoframe_enable(type);
+	val &= ~VIDEO_DIP_FREQ_MASK;
+	val |= VIDEO_DIP_FREQ_VSYNC;
+
+	I915_WRITE(reg, val);
+	POSTING_READ(reg);
+}
+
+static bool vlv_infoframe_enabled(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+	struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+	int reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
+	u32 val = I915_READ(reg);
+
+	if ((val & VIDEO_DIP_ENABLE) == 0)
+		return false;
+
+	if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(intel_dig_port->port))
+		return false;
+
+	return val & (VIDEO_DIP_ENABLE_AVI |
+		      VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
+		      VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
+}
+
+static void hsw_write_infoframe(struct drm_encoder *encoder,
+				enum hdmi_infoframe_type type,
+				const void *frame, ssize_t len)
+{
+	const uint32_t *data = frame;
+	struct drm_device *dev = encoder->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+	enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
+	u32 ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder);
+	u32 data_reg;
+	int i;
+	u32 val = I915_READ(ctl_reg);
+
+	data_reg = hsw_dip_data_reg(dev_priv, cpu_transcoder, type, 0);
+	if (data_reg == 0)
+		return;
+
+	val &= ~hsw_infoframe_enable(type);
+	I915_WRITE(ctl_reg, val);
+
+	mmiowb();
+	for (i = 0; i < len; i += 4) {
+		I915_WRITE(hsw_dip_data_reg(dev_priv, cpu_transcoder,
+					    type, i >> 2), *data);
+		data++;
+	}
+	/* Write every possible data byte to force correct ECC calculation. */
+	for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
+		I915_WRITE(hsw_dip_data_reg(dev_priv, cpu_transcoder,
+					    type, i >> 2), 0);
+	mmiowb();
+
+	val |= hsw_infoframe_enable(type);
+	I915_WRITE(ctl_reg, val);
+	POSTING_READ(ctl_reg);
+}
+
+static bool hsw_infoframe_enabled(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+	u32 ctl_reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config->cpu_transcoder);
+	u32 val = I915_READ(ctl_reg);
+
+	return val & (VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_AVI_HSW |
+		      VIDEO_DIP_ENABLE_GCP_HSW | VIDEO_DIP_ENABLE_VS_HSW |
+		      VIDEO_DIP_ENABLE_GMP_HSW | VIDEO_DIP_ENABLE_SPD_HSW);
+}
+
+/*
+ * The data we write to the DIP data buffer registers is 1 byte bigger than the
+ * HDMI infoframe size because of an ECC/reserved byte at position 3 (starting
+ * at 0). It's also a byte used by DisplayPort so the same DIP registers can be
+ * used for both technologies.
+ *
+ * DW0: Reserved/ECC/DP | HB2 | HB1 | HB0
+ * DW1:       DB3       | DB2 | DB1 | DB0
+ * DW2:       DB7       | DB6 | DB5 | DB4
+ * DW3: ...
+ *
+ * (HB is Header Byte, DB is Data Byte)
+ *
+ * The hdmi pack() functions don't know about that hardware specific hole so we
+ * trick them by giving an offset into the buffer and moving back the header
+ * bytes by one.
+ */
+static void intel_write_infoframe(struct drm_encoder *encoder,
+				  union hdmi_infoframe *frame)
+{
+	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+	uint8_t buffer[VIDEO_DIP_DATA_SIZE];
+	ssize_t len;
+
+	/* see comment above for the reason for this offset */
+	len = hdmi_infoframe_pack(frame, buffer + 1, sizeof(buffer) - 1);
+	if (len < 0)
+		return;
+
+	/* Insert the 'hole' (see big comment above) at position 3 */
+	buffer[0] = buffer[1];
+	buffer[1] = buffer[2];
+	buffer[2] = buffer[3];
+	buffer[3] = 0;
+	len++;
+
+	intel_hdmi->write_infoframe(encoder, frame->any.type, buffer, len);
+}
+
+static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
+					 const struct drm_display_mode *adjusted_mode)
+{
+	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+	union hdmi_infoframe frame;
+	int ret;
+
+	ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
+						       adjusted_mode);
+	if (ret < 0) {
+		DRM_ERROR("couldn't fill AVI infoframe\n");
+		return;
+	}
+
+	if (intel_hdmi->rgb_quant_range_selectable) {
+		if (intel_crtc->config->limited_color_range)
+			frame.avi.quantization_range =
+				HDMI_QUANTIZATION_RANGE_LIMITED;
+		else
+			frame.avi.quantization_range =
+				HDMI_QUANTIZATION_RANGE_FULL;
+	}
+
+	intel_write_infoframe(encoder, &frame);
+}
+
+static void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder)
+{
+	union hdmi_infoframe frame;
+	int ret;
+
+	ret = hdmi_spd_infoframe_init(&frame.spd, "Intel", "Integrated gfx");
+	if (ret < 0) {
+		DRM_ERROR("couldn't fill SPD infoframe\n");
+		return;
+	}
+
+	frame.spd.sdi = HDMI_SPD_SDI_PC;
+
+	intel_write_infoframe(encoder, &frame);
+}
+
+static void
+intel_hdmi_set_hdmi_infoframe(struct drm_encoder *encoder,
+			      const struct drm_display_mode *adjusted_mode)
+{
+	union hdmi_infoframe frame;
+	int ret;
+
+	ret = drm_hdmi_vendor_infoframe_from_display_mode(&frame.vendor.hdmi,
+							  adjusted_mode);
+	if (ret < 0)
+		return;
+
+	intel_write_infoframe(encoder, &frame);
+}
+
+static void g4x_set_infoframes(struct drm_encoder *encoder,
+			       bool enable,
+			       const struct drm_display_mode *adjusted_mode)
+{
+	struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+	struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+	struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
+	u32 reg = VIDEO_DIP_CTL;
+	u32 val = I915_READ(reg);
+	u32 port = VIDEO_DIP_PORT(intel_dig_port->port);
+
+	assert_hdmi_port_disabled(intel_hdmi);
+
+	/* If the registers were not initialized yet, they might be zeroes,
+	 * which means we're selecting the AVI DIP and we're setting its
+	 * frequency to once. This seems to really confuse the HW and make
+	 * things stop working (the register spec says the AVI always needs to
+	 * be sent every VSync). So here we avoid writing to the register more
+	 * than we need and also explicitly select the AVI DIP and explicitly
+	 * set its frequency to every VSync. Avoiding to write it twice seems to
+	 * be enough to solve the problem, but being defensive shouldn't hurt us
+	 * either. */
+	val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC;
+
+	if (!enable) {
+		if (!(val & VIDEO_DIP_ENABLE))
+			return;
+		if (port != (val & VIDEO_DIP_PORT_MASK)) {
+			DRM_DEBUG_KMS("video DIP still enabled on port %c\n",
+				      (val & VIDEO_DIP_PORT_MASK) >> 29);
+			return;
+		}
+		val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI |
+			 VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_SPD);
+		I915_WRITE(reg, val);
+		POSTING_READ(reg);
+		return;
+	}
+
+	if (port != (val & VIDEO_DIP_PORT_MASK)) {
+		if (val & VIDEO_DIP_ENABLE) {
+			DRM_DEBUG_KMS("video DIP already enabled on port %c\n",
+				      (val & VIDEO_DIP_PORT_MASK) >> 29);
+			return;
+		}
+		val &= ~VIDEO_DIP_PORT_MASK;
+		val |= port;
+	}
+
+	val |= VIDEO_DIP_ENABLE;
+	val &= ~(VIDEO_DIP_ENABLE_AVI |
+		 VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_SPD);
+
+	I915_WRITE(reg, val);
+	POSTING_READ(reg);
+
+	intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
+	intel_hdmi_set_spd_infoframe(encoder);
+	intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode);
+}
+
+static bool hdmi_sink_is_deep_color(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct drm_connector *connector;
+
+	WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+
+	/*
+	 * HDMI cloning is only supported on g4x which doesn't
+	 * support deep color or GCP infoframes anyway so no
+	 * need to worry about multiple HDMI sinks here.
+	 */
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+		if (connector->encoder == encoder)
+			return connector->display_info.bpc > 8;
+
+	return false;
+}
+
+/*
+ * Determine if default_phase=1 can be indicated in the GCP infoframe.
+ *
+ * From HDMI specification 1.4a:
+ * - The first pixel of each Video Data Period shall always have a pixel packing phase of 0
+ * - The first pixel following each Video Data Period shall have a pixel packing phase of 0
+ * - The PP bits shall be constant for all GCPs and will be equal to the last packing phase
+ * - The first pixel following every transition of HSYNC or VSYNC shall have a pixel packing
+ *   phase of 0
+ */
+static bool gcp_default_phase_possible(int pipe_bpp,
+				       const struct drm_display_mode *mode)
+{
+	unsigned int pixels_per_group;
+
+	switch (pipe_bpp) {
+	case 30:
+		/* 4 pixels in 5 clocks */
+		pixels_per_group = 4;
+		break;
+	case 36:
+		/* 2 pixels in 3 clocks */
+		pixels_per_group = 2;
+		break;
+	case 48:
+		/* 1 pixel in 2 clocks */
+		pixels_per_group = 1;
+		break;
+	default:
+		/* phase information not relevant for 8bpc */
+		return false;
+	}
+
+	return mode->crtc_hdisplay % pixels_per_group == 0 &&
+		mode->crtc_htotal % pixels_per_group == 0 &&
+		mode->crtc_hblank_start % pixels_per_group == 0 &&
+		mode->crtc_hblank_end % pixels_per_group == 0 &&
+		mode->crtc_hsync_start % pixels_per_group == 0 &&
+		mode->crtc_hsync_end % pixels_per_group == 0 &&
+		((mode->flags & DRM_MODE_FLAG_INTERLACE) == 0 ||
+		 mode->crtc_htotal/2 % pixels_per_group == 0);
+}
+
+static bool intel_hdmi_set_gcp_infoframe(struct drm_encoder *encoder)
+{
+	struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+	struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
+	u32 reg, val = 0;
+
+	if (HAS_DDI(dev_priv))
+		reg = HSW_TVIDEO_DIP_GCP(crtc->config->cpu_transcoder);
+	else if (IS_VALLEYVIEW(dev_priv))
+		reg = VLV_TVIDEO_DIP_GCP(crtc->pipe);
+	else if (HAS_PCH_SPLIT(dev_priv->dev))
+		reg = TVIDEO_DIP_GCP(crtc->pipe);
+	else
+		return false;
+
+	/* Indicate color depth whenever the sink supports deep color */
+	if (hdmi_sink_is_deep_color(encoder))
+		val |= GCP_COLOR_INDICATION;
+
+	/* Enable default_phase whenever the display mode is suitably aligned */
+	if (gcp_default_phase_possible(crtc->config->pipe_bpp,
+				       &crtc->config->base.adjusted_mode))
+		val |= GCP_DEFAULT_PHASE_ENABLE;
+
+	I915_WRITE(reg, val);
+
+	return val != 0;
+}
+
+static void ibx_set_infoframes(struct drm_encoder *encoder,
+			       bool enable,
+			       const struct drm_display_mode *adjusted_mode)
+{
+	struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+	struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+	struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
+	u32 reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
+	u32 val = I915_READ(reg);
+	u32 port = VIDEO_DIP_PORT(intel_dig_port->port);
+
+	assert_hdmi_port_disabled(intel_hdmi);
+
+	/* See the big comment in g4x_set_infoframes() */
+	val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC;
+
+	if (!enable) {
+		if (!(val & VIDEO_DIP_ENABLE))
+			return;
+		val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI |
+			 VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
+			 VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
+		I915_WRITE(reg, val);
+		POSTING_READ(reg);
+		return;
+	}
+
+	if (port != (val & VIDEO_DIP_PORT_MASK)) {
+		WARN(val & VIDEO_DIP_ENABLE,
+		     "DIP already enabled on port %c\n",
+		     (val & VIDEO_DIP_PORT_MASK) >> 29);
+		val &= ~VIDEO_DIP_PORT_MASK;
+		val |= port;
+	}
+
+	val |= VIDEO_DIP_ENABLE;
+	val &= ~(VIDEO_DIP_ENABLE_AVI |
+		 VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
+		 VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
+
+	if (intel_hdmi_set_gcp_infoframe(encoder))
+		val |= VIDEO_DIP_ENABLE_GCP;
+
+	I915_WRITE(reg, val);
+	POSTING_READ(reg);
+
+	intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
+	intel_hdmi_set_spd_infoframe(encoder);
+	intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode);
+}
+
+static void cpt_set_infoframes(struct drm_encoder *encoder,
+			       bool enable,
+			       const struct drm_display_mode *adjusted_mode)
+{
+	struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+	u32 reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
+	u32 val = I915_READ(reg);
+
+	assert_hdmi_port_disabled(intel_hdmi);
+
+	/* See the big comment in g4x_set_infoframes() */
+	val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC;
+
+	if (!enable) {
+		if (!(val & VIDEO_DIP_ENABLE))
+			return;
+		val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI |
+			 VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
+			 VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
+		I915_WRITE(reg, val);
+		POSTING_READ(reg);
+		return;
+	}
+
+	/* Set both together, unset both together: see the spec. */
+	val |= VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI;
+	val &= ~(VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
+		 VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
+
+	if (intel_hdmi_set_gcp_infoframe(encoder))
+		val |= VIDEO_DIP_ENABLE_GCP;
+
+	I915_WRITE(reg, val);
+	POSTING_READ(reg);
+
+	intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
+	intel_hdmi_set_spd_infoframe(encoder);
+	intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode);
+}
+
+static void vlv_set_infoframes(struct drm_encoder *encoder,
+			       bool enable,
+			       const struct drm_display_mode *adjusted_mode)
+{
+	struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+	struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+	u32 reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
+	u32 val = I915_READ(reg);
+	u32 port = VIDEO_DIP_PORT(intel_dig_port->port);
+
+	assert_hdmi_port_disabled(intel_hdmi);
+
+	/* See the big comment in g4x_set_infoframes() */
+	val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC;
+
+	if (!enable) {
+		if (!(val & VIDEO_DIP_ENABLE))
+			return;
+		val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI |
+			 VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
+			 VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
+		I915_WRITE(reg, val);
+		POSTING_READ(reg);
+		return;
+	}
+
+	if (port != (val & VIDEO_DIP_PORT_MASK)) {
+		WARN(val & VIDEO_DIP_ENABLE,
+		     "DIP already enabled on port %c\n",
+		     (val & VIDEO_DIP_PORT_MASK) >> 29);
+		val &= ~VIDEO_DIP_PORT_MASK;
+		val |= port;
+	}
+
+	val |= VIDEO_DIP_ENABLE;
+	val &= ~(VIDEO_DIP_ENABLE_AVI |
+		 VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
+		 VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
+
+	if (intel_hdmi_set_gcp_infoframe(encoder))
+		val |= VIDEO_DIP_ENABLE_GCP;
+
+	I915_WRITE(reg, val);
+	POSTING_READ(reg);
+
+	intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
+	intel_hdmi_set_spd_infoframe(encoder);
+	intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode);
+}
+
+static void hsw_set_infoframes(struct drm_encoder *encoder,
+			       bool enable,
+			       const struct drm_display_mode *adjusted_mode)
+{
+	struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+	u32 reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config->cpu_transcoder);
+	u32 val = I915_READ(reg);
+
+	assert_hdmi_port_disabled(intel_hdmi);
+
+	val &= ~(VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_AVI_HSW |
+		 VIDEO_DIP_ENABLE_GCP_HSW | VIDEO_DIP_ENABLE_VS_HSW |
+		 VIDEO_DIP_ENABLE_GMP_HSW | VIDEO_DIP_ENABLE_SPD_HSW);
+
+	if (!enable) {
+		I915_WRITE(reg, val);
+		POSTING_READ(reg);
+		return;
+	}
+
+	if (intel_hdmi_set_gcp_infoframe(encoder))
+		val |= VIDEO_DIP_ENABLE_GCP_HSW;
+
+	I915_WRITE(reg, val);
+	POSTING_READ(reg);
+
+	intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
+	intel_hdmi_set_spd_infoframe(encoder);
+	intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode);
+}
+
+static void intel_hdmi_prepare(struct intel_encoder *encoder)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+	const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
+	u32 hdmi_val;
+
+	hdmi_val = SDVO_ENCODING_HDMI;
+	if (!HAS_PCH_SPLIT(dev) && crtc->config->limited_color_range)
+		hdmi_val |= HDMI_COLOR_RANGE_16_235;
+	if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+		hdmi_val |= SDVO_VSYNC_ACTIVE_HIGH;
+	if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+		hdmi_val |= SDVO_HSYNC_ACTIVE_HIGH;
+
+	if (crtc->config->pipe_bpp > 24)
+		hdmi_val |= HDMI_COLOR_FORMAT_12bpc;
+	else
+		hdmi_val |= SDVO_COLOR_FORMAT_8bpc;
+
+	if (crtc->config->has_hdmi_sink)
+		hdmi_val |= HDMI_MODE_SELECT_HDMI;
+
+	if (HAS_PCH_CPT(dev))
+		hdmi_val |= SDVO_PIPE_SEL_CPT(crtc->pipe);
+	else if (IS_CHERRYVIEW(dev))
+		hdmi_val |= SDVO_PIPE_SEL_CHV(crtc->pipe);
+	else
+		hdmi_val |= SDVO_PIPE_SEL(crtc->pipe);
+
+	I915_WRITE(intel_hdmi->hdmi_reg, hdmi_val);
+	POSTING_READ(intel_hdmi->hdmi_reg);
+}
+
+static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
+				    enum pipe *pipe)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+	enum intel_display_power_domain power_domain;
+	u32 tmp;
+
+	power_domain = intel_display_port_power_domain(encoder);
+	if (!intel_display_power_is_enabled(dev_priv, power_domain))
+		return false;
+
+	tmp = I915_READ(intel_hdmi->hdmi_reg);
+
+	if (!(tmp & SDVO_ENABLE))
+		return false;
+
+	if (HAS_PCH_CPT(dev))
+		*pipe = PORT_TO_PIPE_CPT(tmp);
+	else if (IS_CHERRYVIEW(dev))
+		*pipe = SDVO_PORT_TO_PIPE_CHV(tmp);
+	else
+		*pipe = PORT_TO_PIPE(tmp);
+
+	return true;
+}
+
+static void intel_hdmi_get_config(struct intel_encoder *encoder,
+				  struct intel_crtc_state *pipe_config)
+{
+	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 tmp, flags = 0;
+	int dotclock;
+
+	tmp = I915_READ(intel_hdmi->hdmi_reg);
+
+	if (tmp & SDVO_HSYNC_ACTIVE_HIGH)
+		flags |= DRM_MODE_FLAG_PHSYNC;
+	else
+		flags |= DRM_MODE_FLAG_NHSYNC;
+
+	if (tmp & SDVO_VSYNC_ACTIVE_HIGH)
+		flags |= DRM_MODE_FLAG_PVSYNC;
+	else
+		flags |= DRM_MODE_FLAG_NVSYNC;
+
+	if (tmp & HDMI_MODE_SELECT_HDMI)
+		pipe_config->has_hdmi_sink = true;
+
+	if (intel_hdmi->infoframe_enabled(&encoder->base))
+		pipe_config->has_infoframe = true;
+
+	if (tmp & SDVO_AUDIO_ENABLE)
+		pipe_config->has_audio = true;
+
+	if (!HAS_PCH_SPLIT(dev) &&
+	    tmp & HDMI_COLOR_RANGE_16_235)
+		pipe_config->limited_color_range = true;
+
+	pipe_config->base.adjusted_mode.flags |= flags;
+
+	if ((tmp & SDVO_COLOR_FORMAT_MASK) == HDMI_COLOR_FORMAT_12bpc)
+		dotclock = pipe_config->port_clock * 2 / 3;
+	else
+		dotclock = pipe_config->port_clock;
+
+	if (pipe_config->pixel_multiplier)
+		dotclock /= pipe_config->pixel_multiplier;
+
+	if (HAS_PCH_SPLIT(dev_priv->dev))
+		ironlake_check_encoder_dotclock(pipe_config, dotclock);
+
+	pipe_config->base.adjusted_mode.crtc_clock = dotclock;
+}
+
+static void intel_enable_hdmi_audio(struct intel_encoder *encoder)
+{
+	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+
+	WARN_ON(!crtc->config->has_hdmi_sink);
+	DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n",
+			 pipe_name(crtc->pipe));
+	intel_audio_codec_enable(encoder);
+}
+
+static void g4x_enable_hdmi(struct intel_encoder *encoder)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+	u32 temp;
+
+	temp = I915_READ(intel_hdmi->hdmi_reg);
+
+	temp |= SDVO_ENABLE;
+	if (crtc->config->has_audio)
+		temp |= SDVO_AUDIO_ENABLE;
+
+	I915_WRITE(intel_hdmi->hdmi_reg, temp);
+	POSTING_READ(intel_hdmi->hdmi_reg);
+
+	if (crtc->config->has_audio)
+		intel_enable_hdmi_audio(encoder);
+}
+
+static void ibx_enable_hdmi(struct intel_encoder *encoder)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+	u32 temp;
+
+	temp = I915_READ(intel_hdmi->hdmi_reg);
+
+	temp |= SDVO_ENABLE;
+	if (crtc->config->has_audio)
+		temp |= SDVO_AUDIO_ENABLE;
+
+	/*
+	 * HW workaround, need to write this twice for issue
+	 * that may result in first write getting masked.
+	 */
+	I915_WRITE(intel_hdmi->hdmi_reg, temp);
+	POSTING_READ(intel_hdmi->hdmi_reg);
+	I915_WRITE(intel_hdmi->hdmi_reg, temp);
+	POSTING_READ(intel_hdmi->hdmi_reg);
+
+	/*
+	 * HW workaround, need to toggle enable bit off and on
+	 * for 12bpc with pixel repeat.
+	 *
+	 * FIXME: BSpec says this should be done at the end of
+	 * of the modeset sequence, so not sure if this isn't too soon.
+	 */
+	if (crtc->config->pipe_bpp > 24 &&
+	    crtc->config->pixel_multiplier > 1) {
+		I915_WRITE(intel_hdmi->hdmi_reg, temp & ~SDVO_ENABLE);
+		POSTING_READ(intel_hdmi->hdmi_reg);
+
+		/*
+		 * HW workaround, need to write this twice for issue
+		 * that may result in first write getting masked.
+		 */
+		I915_WRITE(intel_hdmi->hdmi_reg, temp);
+		POSTING_READ(intel_hdmi->hdmi_reg);
+		I915_WRITE(intel_hdmi->hdmi_reg, temp);
+		POSTING_READ(intel_hdmi->hdmi_reg);
+	}
+
+	if (crtc->config->has_audio)
+		intel_enable_hdmi_audio(encoder);
+}
+
+static void cpt_enable_hdmi(struct intel_encoder *encoder)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+	enum pipe pipe = crtc->pipe;
+	u32 temp;
+
+	temp = I915_READ(intel_hdmi->hdmi_reg);
+
+	temp |= SDVO_ENABLE;
+	if (crtc->config->has_audio)
+		temp |= SDVO_AUDIO_ENABLE;
+
+	/*
+	 * WaEnableHDMI8bpcBefore12bpc:snb,ivb
+	 *
+	 * The procedure for 12bpc is as follows:
+	 * 1. disable HDMI clock gating
+	 * 2. enable HDMI with 8bpc
+	 * 3. enable HDMI with 12bpc
+	 * 4. enable HDMI clock gating
+	 */
+
+	if (crtc->config->pipe_bpp > 24) {
+		I915_WRITE(TRANS_CHICKEN1(pipe),
+			   I915_READ(TRANS_CHICKEN1(pipe)) |
+			   TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE);
+
+		temp &= ~SDVO_COLOR_FORMAT_MASK;
+		temp |= SDVO_COLOR_FORMAT_8bpc;
+	}
+
+	I915_WRITE(intel_hdmi->hdmi_reg, temp);
+	POSTING_READ(intel_hdmi->hdmi_reg);
+
+	if (crtc->config->pipe_bpp > 24) {
+		temp &= ~SDVO_COLOR_FORMAT_MASK;
+		temp |= HDMI_COLOR_FORMAT_12bpc;
+
+		I915_WRITE(intel_hdmi->hdmi_reg, temp);
+		POSTING_READ(intel_hdmi->hdmi_reg);
+
+		I915_WRITE(TRANS_CHICKEN1(pipe),
+			   I915_READ(TRANS_CHICKEN1(pipe)) &
+			   ~TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE);
+	}
+
+	if (crtc->config->has_audio)
+		intel_enable_hdmi_audio(encoder);
+}
+
+static void vlv_enable_hdmi(struct intel_encoder *encoder)
+{
+}
+
+static void intel_disable_hdmi(struct intel_encoder *encoder)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+	u32 temp;
+
+	temp = I915_READ(intel_hdmi->hdmi_reg);
+
+	temp &= ~(SDVO_ENABLE | SDVO_AUDIO_ENABLE);
+	I915_WRITE(intel_hdmi->hdmi_reg, temp);
+	POSTING_READ(intel_hdmi->hdmi_reg);
+
+	/*
+	 * HW workaround for IBX, we need to move the port
+	 * to transcoder A after disabling it to allow the
+	 * matching DP port to be enabled on transcoder A.
+	 */
+	if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B) {
+		temp &= ~SDVO_PIPE_B_SELECT;
+		temp |= SDVO_ENABLE;
+		/*
+		 * HW workaround, need to write this twice for issue
+		 * that may result in first write getting masked.
+		 */
+		I915_WRITE(intel_hdmi->hdmi_reg, temp);
+		POSTING_READ(intel_hdmi->hdmi_reg);
+		I915_WRITE(intel_hdmi->hdmi_reg, temp);
+		POSTING_READ(intel_hdmi->hdmi_reg);
+
+		temp &= ~SDVO_ENABLE;
+		I915_WRITE(intel_hdmi->hdmi_reg, temp);
+		POSTING_READ(intel_hdmi->hdmi_reg);
+	}
+
+	intel_hdmi->set_infoframes(&encoder->base, false, NULL);
+}
+
+static void g4x_disable_hdmi(struct intel_encoder *encoder)
+{
+	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+
+	if (crtc->config->has_audio)
+		intel_audio_codec_disable(encoder);
+
+	intel_disable_hdmi(encoder);
+}
+
+static void pch_disable_hdmi(struct intel_encoder *encoder)
+{
+	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+
+	if (crtc->config->has_audio)
+		intel_audio_codec_disable(encoder);
+}
+
+static void pch_post_disable_hdmi(struct intel_encoder *encoder)
+{
+	intel_disable_hdmi(encoder);
+}
+
+static int hdmi_port_clock_limit(struct intel_hdmi *hdmi, bool respect_dvi_limit)
+{
+	struct drm_device *dev = intel_hdmi_to_dev(hdmi);
+
+	if ((respect_dvi_limit && !hdmi->has_hdmi_sink) || IS_G4X(dev))
+		return 165000;
+	else if (IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8)
+		return 300000;
+	else
+		return 225000;
+}
+
+static enum drm_mode_status
+hdmi_port_clock_valid(struct intel_hdmi *hdmi,
+		      int clock, bool respect_dvi_limit)
+{
+	struct drm_device *dev = intel_hdmi_to_dev(hdmi);
+
+	if (clock < 25000)
+		return MODE_CLOCK_LOW;
+	if (clock > hdmi_port_clock_limit(hdmi, respect_dvi_limit))
+		return MODE_CLOCK_HIGH;
+
+	/* BXT DPLL can't generate 223-240 MHz */
+	if (IS_BROXTON(dev) && clock > 223333 && clock < 240000)
+		return MODE_CLOCK_RANGE;
+
+	/* CHV DPLL can't generate 216-240 MHz */
+	if (IS_CHERRYVIEW(dev) && clock > 216000 && clock < 240000)
+		return MODE_CLOCK_RANGE;
+
+	return MODE_OK;
+}
+
+static enum drm_mode_status
+intel_hdmi_mode_valid(struct drm_connector *connector,
+		      struct drm_display_mode *mode)
+{
+	struct intel_hdmi *hdmi = intel_attached_hdmi(connector);
+	struct drm_device *dev = intel_hdmi_to_dev(hdmi);
+	enum drm_mode_status status;
+	int clock;
+
+	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+		return MODE_NO_DBLESCAN;
+
+	clock = mode->clock;
+	if (mode->flags & DRM_MODE_FLAG_DBLCLK)
+		clock *= 2;
+
+	/* check if we can do 8bpc */
+	status = hdmi_port_clock_valid(hdmi, clock, true);
+
+	/* if we can't do 8bpc we may still be able to do 12bpc */
+	if (!HAS_GMCH_DISPLAY(dev) && status != MODE_OK)
+		status = hdmi_port_clock_valid(hdmi, clock * 3 / 2, true);
+
+	return status;
+}
+
+static bool hdmi_12bpc_possible(struct intel_crtc_state *crtc_state)
+{
+	struct drm_device *dev = crtc_state->base.crtc->dev;
+	struct drm_atomic_state *state;
+	struct intel_encoder *encoder;
+	struct drm_connector *connector;
+	struct drm_connector_state *connector_state;
+	int count = 0, count_hdmi = 0;
+	int i;
+
+	if (HAS_GMCH_DISPLAY(dev))
+		return false;
+
+	state = crtc_state->base.state;
+
+	for_each_connector_in_state(state, connector, connector_state, i) {
+		if (connector_state->crtc != crtc_state->base.crtc)
+			continue;
+
+		encoder = to_intel_encoder(connector_state->best_encoder);
+
+		count_hdmi += encoder->type == INTEL_OUTPUT_HDMI;
+		count++;
+	}
+
+	/*
+	 * HDMI 12bpc affects the clocks, so it's only possible
+	 * when not cloning with other encoder types.
+	 */
+	return count_hdmi > 0 && count_hdmi == count;
+}
+
+bool intel_hdmi_compute_config(struct intel_encoder *encoder,
+			       struct intel_crtc_state *pipe_config)
+{
+	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+	int clock_8bpc = pipe_config->base.adjusted_mode.crtc_clock;
+	int clock_12bpc = clock_8bpc * 3 / 2;
+	int desired_bpp;
+
+	pipe_config->has_hdmi_sink = intel_hdmi->has_hdmi_sink;
+
+	if (pipe_config->has_hdmi_sink)
+		pipe_config->has_infoframe = true;
+
+	if (intel_hdmi->color_range_auto) {
+		/* See CEA-861-E - 5.1 Default Encoding Parameters */
+		pipe_config->limited_color_range =
+			pipe_config->has_hdmi_sink &&
+			drm_match_cea_mode(adjusted_mode) > 1;
+	} else {
+		pipe_config->limited_color_range =
+			intel_hdmi->limited_color_range;
+	}
+
+	if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) {
+		pipe_config->pixel_multiplier = 2;
+		clock_8bpc *= 2;
+		clock_12bpc *= 2;
+	}
+
+	if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev))
+		pipe_config->has_pch_encoder = true;
+
+	if (pipe_config->has_hdmi_sink && intel_hdmi->has_audio)
+		pipe_config->has_audio = true;
+
+	/*
+	 * HDMI is either 12 or 8, so if the display lets 10bpc sneak
+	 * through, clamp it down. Note that g4x/vlv don't support 12bpc hdmi
+	 * outputs. We also need to check that the higher clock still fits
+	 * within limits.
+	 */
+	if (pipe_config->pipe_bpp > 8*3 && pipe_config->has_hdmi_sink &&
+	    hdmi_port_clock_valid(intel_hdmi, clock_12bpc, false) == MODE_OK &&
+	    hdmi_12bpc_possible(pipe_config)) {
+		DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n");
+		desired_bpp = 12*3;
+
+		/* Need to adjust the port link by 1.5x for 12bpc. */
+		pipe_config->port_clock = clock_12bpc;
+	} else {
+		DRM_DEBUG_KMS("picking bpc to 8 for HDMI output\n");
+		desired_bpp = 8*3;
+
+		pipe_config->port_clock = clock_8bpc;
+	}
+
+	if (!pipe_config->bw_constrained) {
+		DRM_DEBUG_KMS("forcing pipe bpc to %i for HDMI\n", desired_bpp);
+		pipe_config->pipe_bpp = desired_bpp;
+	}
+
+	if (hdmi_port_clock_valid(intel_hdmi, pipe_config->port_clock,
+				  false) != MODE_OK) {
+		DRM_DEBUG_KMS("unsupported HDMI clock, rejecting mode\n");
+		return false;
+	}
+
+	/* Set user selected PAR to incoming mode's member */
+	adjusted_mode->picture_aspect_ratio = intel_hdmi->aspect_ratio;
+
+	return true;
+}
+
+static void
+intel_hdmi_unset_edid(struct drm_connector *connector)
+{
+	struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+
+	intel_hdmi->has_hdmi_sink = false;
+	intel_hdmi->has_audio = false;
+	intel_hdmi->rgb_quant_range_selectable = false;
+
+	kfree(to_intel_connector(connector)->detect_edid);
+	to_intel_connector(connector)->detect_edid = NULL;
+}
+
+static bool
+intel_hdmi_set_edid(struct drm_connector *connector)
+{
+	struct drm_i915_private *dev_priv = to_i915(connector->dev);
+	struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+	struct edid *edid;
+	bool connected = false;
+
+	intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
+
+	edid = drm_get_edid(connector,
+			    intel_gmbus_get_adapter(dev_priv,
+			    intel_hdmi->ddc_bus));
+
+	intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS);
+
+	to_intel_connector(connector)->detect_edid = edid;
+	if (edid && edid->input & DRM_EDID_INPUT_DIGITAL) {
+		intel_hdmi->rgb_quant_range_selectable =
+			drm_rgb_quant_range_selectable(edid);
+
+		intel_hdmi->has_audio = drm_detect_monitor_audio(edid);
+		if (intel_hdmi->force_audio != HDMI_AUDIO_AUTO)
+			intel_hdmi->has_audio =
+				intel_hdmi->force_audio == HDMI_AUDIO_ON;
+
+		if (intel_hdmi->force_audio != HDMI_AUDIO_OFF_DVI)
+			intel_hdmi->has_hdmi_sink =
+				drm_detect_hdmi_monitor(edid);
+
+		connected = true;
+	}
+
+	return connected;
+}
+
+static enum drm_connector_status
+intel_hdmi_detect(struct drm_connector *connector, bool force)
+{
+	enum drm_connector_status status;
+	struct drm_i915_private *dev_priv = to_i915(connector->dev);
+
+	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+		      connector->base.id, connector->name);
+
+	intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
+
+	intel_hdmi_unset_edid(connector);
+
+	if (intel_hdmi_set_edid(connector)) {
+		struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+
+		hdmi_to_dig_port(intel_hdmi)->base.type = INTEL_OUTPUT_HDMI;
+		status = connector_status_connected;
+	} else
+		status = connector_status_disconnected;
+
+	intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS);
+
+	return status;
+}
+
+static void
+intel_hdmi_force(struct drm_connector *connector)
+{
+	struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+
+	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+		      connector->base.id, connector->name);
+
+	intel_hdmi_unset_edid(connector);
+
+	if (connector->status != connector_status_connected)
+		return;
+
+	intel_hdmi_set_edid(connector);
+	hdmi_to_dig_port(intel_hdmi)->base.type = INTEL_OUTPUT_HDMI;
+}
+
+static int intel_hdmi_get_modes(struct drm_connector *connector)
+{
+	struct edid *edid;
+
+	edid = to_intel_connector(connector)->detect_edid;
+	if (edid == NULL)
+		return 0;
+
+	return intel_connector_update_modes(connector, edid);
+}
+
+static bool
+intel_hdmi_detect_audio(struct drm_connector *connector)
+{
+	bool has_audio = false;
+	struct edid *edid;
+
+	edid = to_intel_connector(connector)->detect_edid;
+	if (edid && edid->input & DRM_EDID_INPUT_DIGITAL)
+		has_audio = drm_detect_monitor_audio(edid);
+
+	return has_audio;
+}
+
+static int
+intel_hdmi_set_property(struct drm_connector *connector,
+			struct drm_property *property,
+			uint64_t val)
+{
+	struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+	struct intel_digital_port *intel_dig_port =
+		hdmi_to_dig_port(intel_hdmi);
+	struct drm_i915_private *dev_priv = connector->dev->dev_private;
+	int ret;
+
+	ret = drm_object_property_set_value(&connector->base, property, val);
+	if (ret)
+		return ret;
+
+	if (property == dev_priv->force_audio_property) {
+		enum hdmi_force_audio i = val;
+		bool has_audio;
+
+		if (i == intel_hdmi->force_audio)
+			return 0;
+
+		intel_hdmi->force_audio = i;
+
+		if (i == HDMI_AUDIO_AUTO)
+			has_audio = intel_hdmi_detect_audio(connector);
+		else
+			has_audio = (i == HDMI_AUDIO_ON);
+
+		if (i == HDMI_AUDIO_OFF_DVI)
+			intel_hdmi->has_hdmi_sink = 0;
+
+		intel_hdmi->has_audio = has_audio;
+		goto done;
+	}
+
+	if (property == dev_priv->broadcast_rgb_property) {
+		bool old_auto = intel_hdmi->color_range_auto;
+		bool old_range = intel_hdmi->limited_color_range;
+
+		switch (val) {
+		case INTEL_BROADCAST_RGB_AUTO:
+			intel_hdmi->color_range_auto = true;
+			break;
+		case INTEL_BROADCAST_RGB_FULL:
+			intel_hdmi->color_range_auto = false;
+			intel_hdmi->limited_color_range = false;
+			break;
+		case INTEL_BROADCAST_RGB_LIMITED:
+			intel_hdmi->color_range_auto = false;
+			intel_hdmi->limited_color_range = true;
+			break;
+		default:
+			return -EINVAL;
+		}
+
+		if (old_auto == intel_hdmi->color_range_auto &&
+		    old_range == intel_hdmi->limited_color_range)
+			return 0;
+
+		goto done;
+	}
+
+	if (property == connector->dev->mode_config.aspect_ratio_property) {
+		switch (val) {
+		case DRM_MODE_PICTURE_ASPECT_NONE:
+			intel_hdmi->aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
+			break;
+		case DRM_MODE_PICTURE_ASPECT_4_3:
+			intel_hdmi->aspect_ratio = HDMI_PICTURE_ASPECT_4_3;
+			break;
+		case DRM_MODE_PICTURE_ASPECT_16_9:
+			intel_hdmi->aspect_ratio = HDMI_PICTURE_ASPECT_16_9;
+			break;
+		default:
+			return -EINVAL;
+		}
+		goto done;
+	}
+
+	return -EINVAL;
+
+done:
+	if (intel_dig_port->base.base.crtc)
+		intel_crtc_restore_mode(intel_dig_port->base.base.crtc);
+
+	return 0;
+}
+
+static void intel_hdmi_pre_enable(struct intel_encoder *encoder)
+{
+	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+	const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
+
+	intel_hdmi_prepare(encoder);
+
+	intel_hdmi->set_infoframes(&encoder->base,
+				   intel_crtc->config->has_hdmi_sink,
+				   adjusted_mode);
+}
+
+static void vlv_hdmi_pre_enable(struct intel_encoder *encoder)
+{
+	struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+	struct intel_hdmi *intel_hdmi = &dport->hdmi;
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc =
+		to_intel_crtc(encoder->base.crtc);
+	const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
+	enum dpio_channel port = vlv_dport_to_channel(dport);
+	int pipe = intel_crtc->pipe;
+	u32 val;
+
+	/* Enable clock channels for this port */
+	mutex_lock(&dev_priv->sb_lock);
+	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
+	val = 0;
+	if (pipe)
+		val |= (1<<21);
+	else
+		val &= ~(1<<21);
+	val |= 0x001000c4;
+	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
+
+	/* HDMI 1.0V-2dB */
+	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0);
+	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), 0x2b245f5f);
+	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port), 0x5578b83a);
+	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0c782040);
+	vlv_dpio_write(dev_priv, pipe, VLV_TX3_DW4(port), 0x2b247878);
+	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
+	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), 0x00002000);
+	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), DPIO_TX_OCALINIT_EN);
+
+	/* Program lane clock */
+	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
+	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
+	mutex_unlock(&dev_priv->sb_lock);
+
+	intel_hdmi->set_infoframes(&encoder->base,
+				   intel_crtc->config->has_hdmi_sink,
+				   adjusted_mode);
+
+	g4x_enable_hdmi(encoder);
+
+	vlv_wait_port_ready(dev_priv, dport, 0x0);
+}
+
+static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
+{
+	struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc =
+		to_intel_crtc(encoder->base.crtc);
+	enum dpio_channel port = vlv_dport_to_channel(dport);
+	int pipe = intel_crtc->pipe;
+
+	intel_hdmi_prepare(encoder);
+
+	/* Program Tx lane resets to default */
+	mutex_lock(&dev_priv->sb_lock);
+	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
+			 DPIO_PCS_TX_LANE2_RESET |
+			 DPIO_PCS_TX_LANE1_RESET);
+	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
+			 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
+			 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
+			 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
+			 DPIO_PCS_CLK_SOFT_RESET);
+
+	/* Fix up inter-pair skew failure */
+	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
+	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
+	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
+
+	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), 0x00002000);
+	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), DPIO_TX_OCALINIT_EN);
+	mutex_unlock(&dev_priv->sb_lock);
+}
+
+static void chv_data_lane_soft_reset(struct intel_encoder *encoder,
+				     bool reset)
+{
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
+	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+	enum pipe pipe = crtc->pipe;
+	uint32_t val;
+
+	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
+	if (reset)
+		val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
+	else
+		val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
+	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
+
+	if (crtc->config->lane_count > 2) {
+		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
+		if (reset)
+			val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
+		else
+			val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
+		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
+	}
+
+	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
+	val |= CHV_PCS_REQ_SOFTRESET_EN;
+	if (reset)
+		val &= ~DPIO_PCS_CLK_SOFT_RESET;
+	else
+		val |= DPIO_PCS_CLK_SOFT_RESET;
+	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
+
+	if (crtc->config->lane_count > 2) {
+		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
+		val |= CHV_PCS_REQ_SOFTRESET_EN;
+		if (reset)
+			val &= ~DPIO_PCS_CLK_SOFT_RESET;
+		else
+			val |= DPIO_PCS_CLK_SOFT_RESET;
+		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
+	}
+}
+
+static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
+{
+	struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc =
+		to_intel_crtc(encoder->base.crtc);
+	enum dpio_channel ch = vlv_dport_to_channel(dport);
+	enum pipe pipe = intel_crtc->pipe;
+	u32 val;
+
+	intel_hdmi_prepare(encoder);
+
+	/*
+	 * Must trick the second common lane into life.
+	 * Otherwise we can't even access the PLL.
+	 */
+	if (ch == DPIO_CH0 && pipe == PIPE_B)
+		dport->release_cl2_override =
+			!chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
+
+	chv_phy_powergate_lanes(encoder, true, 0x0);
+
+	mutex_lock(&dev_priv->sb_lock);
+
+	/* Assert data lane reset */
+	chv_data_lane_soft_reset(encoder, true);
+
+	/* program left/right clock distribution */
+	if (pipe != PIPE_B) {
+		val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
+		val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
+		if (ch == DPIO_CH0)
+			val |= CHV_BUFLEFTENA1_FORCE;
+		if (ch == DPIO_CH1)
+			val |= CHV_BUFRIGHTENA1_FORCE;
+		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
+	} else {
+		val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
+		val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
+		if (ch == DPIO_CH0)
+			val |= CHV_BUFLEFTENA2_FORCE;
+		if (ch == DPIO_CH1)
+			val |= CHV_BUFRIGHTENA2_FORCE;
+		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
+	}
+
+	/* program clock channel usage */
+	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
+	val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
+	if (pipe != PIPE_B)
+		val &= ~CHV_PCS_USEDCLKCHANNEL;
+	else
+		val |= CHV_PCS_USEDCLKCHANNEL;
+	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
+
+	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
+	val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
+	if (pipe != PIPE_B)
+		val &= ~CHV_PCS_USEDCLKCHANNEL;
+	else
+		val |= CHV_PCS_USEDCLKCHANNEL;
+	vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
+
+	/*
+	 * This a a bit weird since generally CL
+	 * matches the pipe, but here we need to
+	 * pick the CL based on the port.
+	 */
+	val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
+	if (pipe != PIPE_B)
+		val &= ~CHV_CMN_USEDCLKCHANNEL;
+	else
+		val |= CHV_CMN_USEDCLKCHANNEL;
+	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
+
+	mutex_unlock(&dev_priv->sb_lock);
+}
+
+static void chv_hdmi_post_pll_disable(struct intel_encoder *encoder)
+{
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
+	u32 val;
+
+	mutex_lock(&dev_priv->sb_lock);
+
+	/* disable left/right clock distribution */
+	if (pipe != PIPE_B) {
+		val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
+		val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
+		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
+	} else {
+		val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
+		val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
+		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
+	}
+
+	mutex_unlock(&dev_priv->sb_lock);
+
+	/*
+	 * Leave the power down bit cleared for at least one
+	 * lane so that chv_powergate_phy_ch() will power
+	 * on something when the channel is otherwise unused.
+	 * When the port is off and the override is removed
+	 * the lanes power down anyway, so otherwise it doesn't
+	 * really matter what the state of power down bits is
+	 * after this.
+	 */
+	chv_phy_powergate_lanes(encoder, false, 0x0);
+}
+
+static void vlv_hdmi_post_disable(struct intel_encoder *encoder)
+{
+	struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+	struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+	struct intel_crtc *intel_crtc =
+		to_intel_crtc(encoder->base.crtc);
+	enum dpio_channel port = vlv_dport_to_channel(dport);
+	int pipe = intel_crtc->pipe;
+
+	/* Reset lanes to avoid HDMI flicker (VLV w/a) */
+	mutex_lock(&dev_priv->sb_lock);
+	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port), 0x00000000);
+	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port), 0x00e00060);
+	mutex_unlock(&dev_priv->sb_lock);
+}
+
+static void chv_hdmi_post_disable(struct intel_encoder *encoder)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	mutex_lock(&dev_priv->sb_lock);
+
+	/* Assert data lane reset */
+	chv_data_lane_soft_reset(encoder, true);
+
+	mutex_unlock(&dev_priv->sb_lock);
+}
+
+static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
+{
+	struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+	struct intel_hdmi *intel_hdmi = &dport->hdmi;
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc =
+		to_intel_crtc(encoder->base.crtc);
+	const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
+	enum dpio_channel ch = vlv_dport_to_channel(dport);
+	int pipe = intel_crtc->pipe;
+	int data, i, stagger;
+	u32 val;
+
+	mutex_lock(&dev_priv->sb_lock);
+
+	/* allow hardware to manage TX FIFO reset source */
+	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
+	val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
+	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
+
+	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
+	val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
+	vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
+
+	/* Program Tx latency optimal setting */
+	for (i = 0; i < 4; i++) {
+		/* Set the upar bit */
+		data = (i == 1) ? 0x0 : 0x1;
+		vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
+				data << DPIO_UPAR_SHIFT);
+	}
+
+	/* Data lane stagger programming */
+	if (intel_crtc->config->port_clock > 270000)
+		stagger = 0x18;
+	else if (intel_crtc->config->port_clock > 135000)
+		stagger = 0xd;
+	else if (intel_crtc->config->port_clock > 67500)
+		stagger = 0x7;
+	else if (intel_crtc->config->port_clock > 33750)
+		stagger = 0x4;
+	else
+		stagger = 0x2;
+
+	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
+	val |= DPIO_TX2_STAGGER_MASK(0x1f);
+	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
+
+	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
+	val |= DPIO_TX2_STAGGER_MASK(0x1f);
+	vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
+
+	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
+		       DPIO_LANESTAGGER_STRAP(stagger) |
+		       DPIO_LANESTAGGER_STRAP_OVRD |
+		       DPIO_TX1_STAGGER_MASK(0x1f) |
+		       DPIO_TX1_STAGGER_MULT(6) |
+		       DPIO_TX2_STAGGER_MULT(0));
+
+	vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
+		       DPIO_LANESTAGGER_STRAP(stagger) |
+		       DPIO_LANESTAGGER_STRAP_OVRD |
+		       DPIO_TX1_STAGGER_MASK(0x1f) |
+		       DPIO_TX1_STAGGER_MULT(7) |
+		       DPIO_TX2_STAGGER_MULT(5));
+
+	/* Deassert data lane reset */
+	chv_data_lane_soft_reset(encoder, false);
+
+	/* Clear calc init */
+	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
+	val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
+	val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
+	val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
+	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
+
+	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
+	val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
+	val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
+	val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
+	vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
+
+	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
+	val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
+	val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
+	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
+
+	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
+	val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
+	val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
+	vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
+
+	/* FIXME: Program the support xxx V-dB */
+	/* Use 800mV-0dB */
+	for (i = 0; i < 4; i++) {
+		val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
+		val &= ~DPIO_SWING_DEEMPH9P5_MASK;
+		val |= 128 << DPIO_SWING_DEEMPH9P5_SHIFT;
+		vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
+	}
+
+	for (i = 0; i < 4; i++) {
+		val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
+
+		val &= ~DPIO_SWING_MARGIN000_MASK;
+		val |= 102 << DPIO_SWING_MARGIN000_SHIFT;
+
+		/*
+		 * Supposedly this value shouldn't matter when unique transition
+		 * scale is disabled, but in fact it does matter. Let's just
+		 * always program the same value and hope it's OK.
+		 */
+		val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
+		val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
+
+		vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
+	}
+
+	/*
+	 * The document said it needs to set bit 27 for ch0 and bit 26
+	 * for ch1. Might be a typo in the doc.
+	 * For now, for this unique transition scale selection, set bit
+	 * 27 for ch0 and ch1.
+	 */
+	for (i = 0; i < 4; i++) {
+		val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
+		val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
+		vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
+	}
+
+	/* Start swing calculation */
+	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
+	val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
+	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
+
+	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
+	val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
+	vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
+
+	mutex_unlock(&dev_priv->sb_lock);
+
+	intel_hdmi->set_infoframes(&encoder->base,
+				   intel_crtc->config->has_hdmi_sink,
+				   adjusted_mode);
+
+	g4x_enable_hdmi(encoder);
+
+	vlv_wait_port_ready(dev_priv, dport, 0x0);
+
+	/* Second common lane will stay alive on its own now */
+	if (dport->release_cl2_override) {
+		chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
+		dport->release_cl2_override = false;
+	}
+}
+
+static void intel_hdmi_destroy(struct drm_connector *connector)
+{
+	kfree(to_intel_connector(connector)->detect_edid);
+	drm_connector_cleanup(connector);
+	kfree(connector);
+}
+
+static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
+	.dpms = drm_atomic_helper_connector_dpms,
+	.detect = intel_hdmi_detect,
+	.force = intel_hdmi_force,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.set_property = intel_hdmi_set_property,
+	.atomic_get_property = intel_connector_atomic_get_property,
+	.destroy = intel_hdmi_destroy,
+	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+	.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+};
+
+static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs = {
+	.get_modes = intel_hdmi_get_modes,
+	.mode_valid = intel_hdmi_mode_valid,
+	.best_encoder = intel_best_encoder,
+};
+
+static const struct drm_encoder_funcs intel_hdmi_enc_funcs = {
+	.destroy = intel_encoder_destroy,
+};
+
+static void
+intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector)
+{
+	intel_attach_force_audio_property(connector);
+	intel_attach_broadcast_rgb_property(connector);
+	intel_hdmi->color_range_auto = true;
+	intel_attach_aspect_ratio_property(connector);
+	intel_hdmi->aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
+}
+
+static u8 intel_hdmi_ddc_pin(struct drm_i915_private *dev_priv,
+			     enum port port)
+{
+	const struct ddi_vbt_port_info *info =
+		&dev_priv->vbt.ddi_port_info[port];
+	u8 ddc_pin;
+
+	if (info->alternate_ddc_pin) {
+		DRM_DEBUG_KMS("Using DDC pin 0x%x for port %c (VBT)\n",
+			      info->alternate_ddc_pin, port_name(port));
+		return info->alternate_ddc_pin;
+	}
+
+	switch (port) {
+	case PORT_B:
+		if (IS_BROXTON(dev_priv))
+			ddc_pin = GMBUS_PIN_1_BXT;
+		else
+			ddc_pin = GMBUS_PIN_DPB;
+		break;
+	case PORT_C:
+		if (IS_BROXTON(dev_priv))
+			ddc_pin = GMBUS_PIN_2_BXT;
+		else
+			ddc_pin = GMBUS_PIN_DPC;
+		break;
+	case PORT_D:
+		if (IS_CHERRYVIEW(dev_priv))
+			ddc_pin = GMBUS_PIN_DPD_CHV;
+		else
+			ddc_pin = GMBUS_PIN_DPD;
+		break;
+	default:
+		MISSING_CASE(port);
+		ddc_pin = GMBUS_PIN_DPB;
+		break;
+	}
+
+	DRM_DEBUG_KMS("Using DDC pin 0x%x for port %c (platform default)\n",
+		      ddc_pin, port_name(port));
+
+	return ddc_pin;
+}
+
+void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
+			       struct intel_connector *intel_connector)
+{
+	struct drm_connector *connector = &intel_connector->base;
+	struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
+	struct intel_encoder *intel_encoder = &intel_dig_port->base;
+	struct drm_device *dev = intel_encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum port port = intel_dig_port->port;
+
+	DRM_DEBUG_KMS("Adding HDMI connector on port %c\n",
+		      port_name(port));
+
+	drm_connector_init(dev, connector, &intel_hdmi_connector_funcs,
+			   DRM_MODE_CONNECTOR_HDMIA);
+	drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs);
+
+	connector->interlace_allowed = 1;
+	connector->doublescan_allowed = 0;
+	connector->stereo_allowed = 1;
+
+	intel_hdmi->ddc_bus = intel_hdmi_ddc_pin(dev_priv, port);
+
+	switch (port) {
+	case PORT_B:
+		/*
+		 * On BXT A0/A1, sw needs to activate DDIA HPD logic and
+		 * interrupts to check the external panel connection.
+		 */
+		if (IS_BROXTON(dev_priv) && (INTEL_REVID(dev) < BXT_REVID_B0))
+			intel_encoder->hpd_pin = HPD_PORT_A;
+		else
+			intel_encoder->hpd_pin = HPD_PORT_B;
+		break;
+	case PORT_C:
+		intel_encoder->hpd_pin = HPD_PORT_C;
+		break;
+	case PORT_D:
+		intel_encoder->hpd_pin = HPD_PORT_D;
+		break;
+	case PORT_E:
+		intel_encoder->hpd_pin = HPD_PORT_E;
+		break;
+	default:
+		MISSING_CASE(port);
+		return;
+	}
+
+	if (IS_VALLEYVIEW(dev)) {
+		intel_hdmi->write_infoframe = vlv_write_infoframe;
+		intel_hdmi->set_infoframes = vlv_set_infoframes;
+		intel_hdmi->infoframe_enabled = vlv_infoframe_enabled;
+	} else if (IS_G4X(dev)) {
+		intel_hdmi->write_infoframe = g4x_write_infoframe;
+		intel_hdmi->set_infoframes = g4x_set_infoframes;
+		intel_hdmi->infoframe_enabled = g4x_infoframe_enabled;
+	} else if (HAS_DDI(dev)) {
+		intel_hdmi->write_infoframe = hsw_write_infoframe;
+		intel_hdmi->set_infoframes = hsw_set_infoframes;
+		intel_hdmi->infoframe_enabled = hsw_infoframe_enabled;
+	} else if (HAS_PCH_IBX(dev)) {
+		intel_hdmi->write_infoframe = ibx_write_infoframe;
+		intel_hdmi->set_infoframes = ibx_set_infoframes;
+		intel_hdmi->infoframe_enabled = ibx_infoframe_enabled;
+	} else {
+		intel_hdmi->write_infoframe = cpt_write_infoframe;
+		intel_hdmi->set_infoframes = cpt_set_infoframes;
+		intel_hdmi->infoframe_enabled = cpt_infoframe_enabled;
+	}
+
+	if (HAS_DDI(dev))
+		intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
+	else
+		intel_connector->get_hw_state = intel_connector_get_hw_state;
+	intel_connector->unregister = intel_connector_unregister;
+
+	intel_hdmi_add_properties(intel_hdmi, connector);
+
+	intel_connector_attach_encoder(intel_connector, intel_encoder);
+	drm_connector_register(connector);
+	intel_hdmi->attached_connector = intel_connector;
+
+	/* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
+	 * 0xd.  Failure to do so will result in spurious interrupts being
+	 * generated on the port when a cable is not attached.
+	 */
+	if (IS_G4X(dev) && !IS_GM45(dev)) {
+		u32 temp = I915_READ(PEG_BAND_GAP_DATA);
+		I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
+	}
+}
+
+void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port)
+{
+	struct intel_digital_port *intel_dig_port;
+	struct intel_encoder *intel_encoder;
+	struct intel_connector *intel_connector;
+
+	intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
+	if (!intel_dig_port)
+		return;
+
+	intel_connector = intel_connector_alloc();
+	if (!intel_connector) {
+		kfree(intel_dig_port);
+		return;
+	}
+
+	intel_encoder = &intel_dig_port->base;
+
+	drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs,
+			 DRM_MODE_ENCODER_TMDS);
+
+	intel_encoder->compute_config = intel_hdmi_compute_config;
+	if (HAS_PCH_SPLIT(dev)) {
+		intel_encoder->disable = pch_disable_hdmi;
+		intel_encoder->post_disable = pch_post_disable_hdmi;
+	} else {
+		intel_encoder->disable = g4x_disable_hdmi;
+	}
+	intel_encoder->get_hw_state = intel_hdmi_get_hw_state;
+	intel_encoder->get_config = intel_hdmi_get_config;
+	if (IS_CHERRYVIEW(dev)) {
+		intel_encoder->pre_pll_enable = chv_hdmi_pre_pll_enable;
+		intel_encoder->pre_enable = chv_hdmi_pre_enable;
+		intel_encoder->enable = vlv_enable_hdmi;
+		intel_encoder->post_disable = chv_hdmi_post_disable;
+		intel_encoder->post_pll_disable = chv_hdmi_post_pll_disable;
+	} else if (IS_VALLEYVIEW(dev)) {
+		intel_encoder->pre_pll_enable = vlv_hdmi_pre_pll_enable;
+		intel_encoder->pre_enable = vlv_hdmi_pre_enable;
+		intel_encoder->enable = vlv_enable_hdmi;
+		intel_encoder->post_disable = vlv_hdmi_post_disable;
+	} else {
+		intel_encoder->pre_enable = intel_hdmi_pre_enable;
+		if (HAS_PCH_CPT(dev))
+			intel_encoder->enable = cpt_enable_hdmi;
+		else if (HAS_PCH_IBX(dev))
+			intel_encoder->enable = ibx_enable_hdmi;
+		else
+			intel_encoder->enable = g4x_enable_hdmi;
+	}
+
+	intel_encoder->type = INTEL_OUTPUT_HDMI;
+	if (IS_CHERRYVIEW(dev)) {
+		if (port == PORT_D)
+			intel_encoder->crtc_mask = 1 << 2;
+		else
+			intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
+	} else {
+		intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+	}
+	intel_encoder->cloneable = 1 << INTEL_OUTPUT_ANALOG;
+	/*
+	 * BSpec is unclear about HDMI+HDMI cloning on g4x, but it seems
+	 * to work on real hardware. And since g4x can send infoframes to
+	 * only one port anyway, nothing is lost by allowing it.
+	 */
+	if (IS_G4X(dev))
+		intel_encoder->cloneable |= 1 << INTEL_OUTPUT_HDMI;
+
+	intel_dig_port->port = port;
+	intel_dig_port->hdmi.hdmi_reg = hdmi_reg;
+	intel_dig_port->dp.output_reg = 0;
+
+	intel_hdmi_init_connector(intel_dig_port, intel_connector);
+}
diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c
new file mode 100644
index 0000000..d7a6437
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_hotplug.c
@@ -0,0 +1,513 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+
+#include <drm/drmP.h>
+#include <drm/i915_drm.h>
+
+#include "i915_drv.h"
+#include "intel_drv.h"
+
+/**
+ * DOC: Hotplug
+ *
+ * Simply put, hotplug occurs when a display is connected to or disconnected
+ * from the system. However, there may be adapters and docking stations and
+ * Display Port short pulses and MST devices involved, complicating matters.
+ *
+ * Hotplug in i915 is handled in many different levels of abstraction.
+ *
+ * The platform dependent interrupt handling code in i915_irq.c enables,
+ * disables, and does preliminary handling of the interrupts. The interrupt
+ * handlers gather the hotplug detect (HPD) information from relevant registers
+ * into a platform independent mask of hotplug pins that have fired.
+ *
+ * The platform independent interrupt handler intel_hpd_irq_handler() in
+ * intel_hotplug.c does hotplug irq storm detection and mitigation, and passes
+ * further processing to appropriate bottom halves (Display Port specific and
+ * regular hotplug).
+ *
+ * The Display Port work function i915_digport_work_func() calls into
+ * intel_dp_hpd_pulse() via hooks, which handles DP short pulses and DP MST long
+ * pulses, with failures and non-MST long pulses triggering regular hotplug
+ * processing on the connector.
+ *
+ * The regular hotplug work function i915_hotplug_work_func() calls connector
+ * detect hooks, and, if connector status changes, triggers sending of hotplug
+ * uevent to userspace via drm_kms_helper_hotplug_event().
+ *
+ * Finally, the userspace is responsible for triggering a modeset upon receiving
+ * the hotplug uevent, disabling or enabling the crtc as needed.
+ *
+ * The hotplug interrupt storm detection and mitigation code keeps track of the
+ * number of interrupts per hotplug pin per a period of time, and if the number
+ * of interrupts exceeds a certain threshold, the interrupt is disabled for a
+ * while before being re-enabled. The intention is to mitigate issues raising
+ * from broken hardware triggering massive amounts of interrupts and grinding
+ * the system to a halt.
+ *
+ * Current implementation expects that hotplug interrupt storm will not be
+ * seen when display port sink is connected, hence on platforms whose DP
+ * callback is handled by i915_digport_work_func reenabling of hpd is not
+ * performed (it was never expected to be disabled in the first place ;) )
+ * this is specific to DP sinks handled by this routine and any other display
+ * such as HDMI or DVI enabled on the same port will have proper logic since
+ * it will use i915_hotplug_work_func where this logic is handled.
+ */
+
+bool intel_hpd_pin_to_port(enum hpd_pin pin, enum port *port)
+{
+	switch (pin) {
+	case HPD_PORT_A:
+		*port = PORT_A;
+		return true;
+	case HPD_PORT_B:
+		*port = PORT_B;
+		return true;
+	case HPD_PORT_C:
+		*port = PORT_C;
+		return true;
+	case HPD_PORT_D:
+		*port = PORT_D;
+		return true;
+	case HPD_PORT_E:
+		*port = PORT_E;
+		return true;
+	default:
+		return false;	/* no hpd */
+	}
+}
+
+#define HPD_STORM_DETECT_PERIOD		1000
+#define HPD_STORM_THRESHOLD		5
+#define HPD_STORM_REENABLE_DELAY	(2 * 60 * 1000)
+
+/**
+ * intel_hpd_irq_storm_detect - gather stats and detect HPD irq storm on a pin
+ * @dev_priv: private driver data pointer
+ * @pin: the pin to gather stats on
+ *
+ * Gather stats about HPD irqs from the specified @pin, and detect irq
+ * storms. Only the pin specific stats and state are changed, the caller is
+ * responsible for further action.
+ *
+ * @HPD_STORM_THRESHOLD irqs are allowed within @HPD_STORM_DETECT_PERIOD ms,
+ * otherwise it's considered an irq storm, and the irq state is set to
+ * @HPD_MARK_DISABLED.
+ *
+ * Return true if an irq storm was detected on @pin.
+ */
+static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv,
+				       enum hpd_pin pin)
+{
+	unsigned long start = dev_priv->hotplug.stats[pin].last_jiffies;
+	unsigned long end = start + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD);
+	bool storm = false;
+
+	if (!time_in_range(jiffies, start, end)) {
+		dev_priv->hotplug.stats[pin].last_jiffies = jiffies;
+		dev_priv->hotplug.stats[pin].count = 0;
+		DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", pin);
+	} else if (dev_priv->hotplug.stats[pin].count > HPD_STORM_THRESHOLD) {
+		dev_priv->hotplug.stats[pin].state = HPD_MARK_DISABLED;
+		DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", pin);
+		storm = true;
+	} else {
+		dev_priv->hotplug.stats[pin].count++;
+		DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", pin,
+			      dev_priv->hotplug.stats[pin].count);
+	}
+
+	return storm;
+}
+
+static void intel_hpd_irq_storm_disable(struct drm_i915_private *dev_priv)
+{
+	struct drm_device *dev = dev_priv->dev;
+	struct drm_mode_config *mode_config = &dev->mode_config;
+	struct intel_connector *intel_connector;
+	struct intel_encoder *intel_encoder;
+	struct drm_connector *connector;
+	enum hpd_pin pin;
+	bool hpd_disabled = false;
+
+	assert_spin_locked(&dev_priv->irq_lock);
+
+	list_for_each_entry(connector, &mode_config->connector_list, head) {
+		if (connector->polled != DRM_CONNECTOR_POLL_HPD)
+			continue;
+
+		intel_connector = to_intel_connector(connector);
+		intel_encoder = intel_connector->encoder;
+		if (!intel_encoder)
+			continue;
+
+		pin = intel_encoder->hpd_pin;
+		if (pin == HPD_NONE ||
+		    dev_priv->hotplug.stats[pin].state != HPD_MARK_DISABLED)
+			continue;
+
+		DRM_INFO("HPD interrupt storm detected on connector %s: "
+			 "switching from hotplug detection to polling\n",
+			 connector->name);
+
+		dev_priv->hotplug.stats[pin].state = HPD_DISABLED;
+		connector->polled = DRM_CONNECTOR_POLL_CONNECT
+			| DRM_CONNECTOR_POLL_DISCONNECT;
+		hpd_disabled = true;
+	}
+
+	/* Enable polling and queue hotplug re-enabling. */
+	if (hpd_disabled) {
+		drm_kms_helper_poll_enable_locked(dev);
+		mod_delayed_work(system_wq, &dev_priv->hotplug.reenable_work,
+				 msecs_to_jiffies(HPD_STORM_REENABLE_DELAY));
+	}
+}
+
+static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
+{
+	struct drm_i915_private *dev_priv =
+		container_of(work, typeof(*dev_priv),
+			     hotplug.reenable_work.work);
+	struct drm_device *dev = dev_priv->dev;
+	struct drm_mode_config *mode_config = &dev->mode_config;
+	int i;
+
+	intel_runtime_pm_get(dev_priv);
+
+	spin_lock_irq(&dev_priv->irq_lock);
+	for_each_hpd_pin(i) {
+		struct drm_connector *connector;
+
+		if (dev_priv->hotplug.stats[i].state != HPD_DISABLED)
+			continue;
+
+		dev_priv->hotplug.stats[i].state = HPD_ENABLED;
+
+		list_for_each_entry(connector, &mode_config->connector_list, head) {
+			struct intel_connector *intel_connector = to_intel_connector(connector);
+
+			if (intel_connector->encoder->hpd_pin == i) {
+				if (connector->polled != intel_connector->polled)
+					DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
+							 connector->name);
+				connector->polled = intel_connector->polled;
+				if (!connector->polled)
+					connector->polled = DRM_CONNECTOR_POLL_HPD;
+			}
+		}
+	}
+	if (dev_priv->display.hpd_irq_setup)
+		dev_priv->display.hpd_irq_setup(dev);
+	spin_unlock_irq(&dev_priv->irq_lock);
+
+	intel_runtime_pm_put(dev_priv);
+}
+
+static bool intel_hpd_irq_event(struct drm_device *dev,
+				struct drm_connector *connector)
+{
+	enum drm_connector_status old_status;
+
+	WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
+	old_status = connector->status;
+
+	connector->status = connector->funcs->detect(connector, false);
+	if (old_status == connector->status)
+		return false;
+
+	DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
+		      connector->base.id,
+		      connector->name,
+		      drm_get_connector_status_name(old_status),
+		      drm_get_connector_status_name(connector->status));
+
+	return true;
+}
+
+static void i915_digport_work_func(struct work_struct *work)
+{
+	struct drm_i915_private *dev_priv =
+		container_of(work, struct drm_i915_private, hotplug.dig_port_work);
+	u32 long_port_mask, short_port_mask;
+	struct intel_digital_port *intel_dig_port;
+	int i;
+	u32 old_bits = 0;
+
+	spin_lock_irq(&dev_priv->irq_lock);
+	long_port_mask = dev_priv->hotplug.long_port_mask;
+	dev_priv->hotplug.long_port_mask = 0;
+	short_port_mask = dev_priv->hotplug.short_port_mask;
+	dev_priv->hotplug.short_port_mask = 0;
+	spin_unlock_irq(&dev_priv->irq_lock);
+
+	for (i = 0; i < I915_MAX_PORTS; i++) {
+		bool valid = false;
+		bool long_hpd = false;
+		intel_dig_port = dev_priv->hotplug.irq_port[i];
+		if (!intel_dig_port || !intel_dig_port->hpd_pulse)
+			continue;
+
+		if (long_port_mask & (1 << i))  {
+			valid = true;
+			long_hpd = true;
+		} else if (short_port_mask & (1 << i))
+			valid = true;
+
+		if (valid) {
+			enum irqreturn ret;
+
+			ret = intel_dig_port->hpd_pulse(intel_dig_port, long_hpd);
+			if (ret == IRQ_NONE) {
+				/* fall back to old school hpd */
+				old_bits |= (1 << intel_dig_port->base.hpd_pin);
+			}
+		}
+	}
+
+	if (old_bits) {
+		spin_lock_irq(&dev_priv->irq_lock);
+		dev_priv->hotplug.event_bits |= old_bits;
+		spin_unlock_irq(&dev_priv->irq_lock);
+		schedule_work(&dev_priv->hotplug.hotplug_work);
+	}
+}
+
+/*
+ * Handle hotplug events outside the interrupt handler proper.
+ */
+static void i915_hotplug_work_func(struct work_struct *work)
+{
+	struct drm_i915_private *dev_priv =
+		container_of(work, struct drm_i915_private, hotplug.hotplug_work);
+	struct drm_device *dev = dev_priv->dev;
+	struct drm_mode_config *mode_config = &dev->mode_config;
+	struct intel_connector *intel_connector;
+	struct intel_encoder *intel_encoder;
+	struct drm_connector *connector;
+	bool changed = false;
+	u32 hpd_event_bits;
+
+	mutex_lock(&mode_config->mutex);
+	DRM_DEBUG_KMS("running encoder hotplug functions\n");
+
+	spin_lock_irq(&dev_priv->irq_lock);
+
+	hpd_event_bits = dev_priv->hotplug.event_bits;
+	dev_priv->hotplug.event_bits = 0;
+
+	/* Disable hotplug on connectors that hit an irq storm. */
+	intel_hpd_irq_storm_disable(dev_priv);
+
+	spin_unlock_irq(&dev_priv->irq_lock);
+
+	list_for_each_entry(connector, &mode_config->connector_list, head) {
+		intel_connector = to_intel_connector(connector);
+		if (!intel_connector->encoder)
+			continue;
+		intel_encoder = intel_connector->encoder;
+		if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
+			DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
+				      connector->name, intel_encoder->hpd_pin);
+			if (intel_encoder->hot_plug)
+				intel_encoder->hot_plug(intel_encoder);
+			if (intel_hpd_irq_event(dev, connector))
+				changed = true;
+		}
+	}
+	mutex_unlock(&mode_config->mutex);
+
+	if (changed)
+		drm_kms_helper_hotplug_event(dev);
+}
+
+
+/**
+ * intel_hpd_irq_handler - main hotplug irq handler
+ * @dev: drm device
+ * @pin_mask: a mask of hpd pins that have triggered the irq
+ * @long_mask: a mask of hpd pins that may be long hpd pulses
+ *
+ * This is the main hotplug irq handler for all platforms. The platform specific
+ * irq handlers call the platform specific hotplug irq handlers, which read and
+ * decode the appropriate registers into bitmasks about hpd pins that have
+ * triggered (@pin_mask), and which of those pins may be long pulses
+ * (@long_mask). The @long_mask is ignored if the port corresponding to the pin
+ * is not a digital port.
+ *
+ * Here, we do hotplug irq storm detection and mitigation, and pass further
+ * processing to appropriate bottom halves.
+ */
+void intel_hpd_irq_handler(struct drm_device *dev,
+			   u32 pin_mask, u32 long_mask)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int i;
+	enum port port;
+	bool storm_detected = false;
+	bool queue_dig = false, queue_hp = false;
+	bool is_dig_port;
+
+	if (!pin_mask)
+		return;
+
+	spin_lock(&dev_priv->irq_lock);
+	for_each_hpd_pin(i) {
+		if (!(BIT(i) & pin_mask))
+			continue;
+
+		is_dig_port = intel_hpd_pin_to_port(i, &port) &&
+			      dev_priv->hotplug.irq_port[port];
+
+		if (is_dig_port) {
+			bool long_hpd = long_mask & BIT(i);
+
+			DRM_DEBUG_DRIVER("digital hpd port %c - %s\n", port_name(port),
+					 long_hpd ? "long" : "short");
+			/*
+			 * For long HPD pulses we want to have the digital queue happen,
+			 * but we still want HPD storm detection to function.
+			 */
+			queue_dig = true;
+			if (long_hpd) {
+				dev_priv->hotplug.long_port_mask |= (1 << port);
+			} else {
+				/* for short HPD just trigger the digital queue */
+				dev_priv->hotplug.short_port_mask |= (1 << port);
+				continue;
+			}
+		}
+
+		if (dev_priv->hotplug.stats[i].state == HPD_DISABLED) {
+			/*
+			 * On GMCH platforms the interrupt mask bits only
+			 * prevent irq generation, not the setting of the
+			 * hotplug bits itself. So only WARN about unexpected
+			 * interrupts on saner platforms.
+			 */
+			WARN_ONCE(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev),
+				  "Received HPD interrupt on pin %d although disabled\n", i);
+			continue;
+		}
+
+		if (dev_priv->hotplug.stats[i].state != HPD_ENABLED)
+			continue;
+
+		if (!is_dig_port) {
+			dev_priv->hotplug.event_bits |= BIT(i);
+			queue_hp = true;
+		}
+
+		if (intel_hpd_irq_storm_detect(dev_priv, i)) {
+			dev_priv->hotplug.event_bits &= ~BIT(i);
+			storm_detected = true;
+		}
+	}
+
+	if (storm_detected)
+		dev_priv->display.hpd_irq_setup(dev);
+	spin_unlock(&dev_priv->irq_lock);
+
+	/*
+	 * Our hotplug handler can grab modeset locks (by calling down into the
+	 * fb helpers). Hence it must not be run on our own dev-priv->wq work
+	 * queue for otherwise the flush_work in the pageflip code will
+	 * deadlock.
+	 */
+	if (queue_dig)
+		queue_work(dev_priv->hotplug.dp_wq, &dev_priv->hotplug.dig_port_work);
+	if (queue_hp)
+		schedule_work(&dev_priv->hotplug.hotplug_work);
+}
+
+/**
+ * intel_hpd_init - initializes and enables hpd support
+ * @dev_priv: i915 device instance
+ *
+ * This function enables the hotplug support. It requires that interrupts have
+ * already been enabled with intel_irq_init_hw(). From this point on hotplug and
+ * poll request can run concurrently to other code, so locking rules must be
+ * obeyed.
+ *
+ * This is a separate step from interrupt enabling to simplify the locking rules
+ * in the driver load and resume code.
+ */
+void intel_hpd_init(struct drm_i915_private *dev_priv)
+{
+	struct drm_device *dev = dev_priv->dev;
+	struct drm_mode_config *mode_config = &dev->mode_config;
+	struct drm_connector *connector;
+	int i;
+
+	for_each_hpd_pin(i) {
+		dev_priv->hotplug.stats[i].count = 0;
+		dev_priv->hotplug.stats[i].state = HPD_ENABLED;
+	}
+	list_for_each_entry(connector, &mode_config->connector_list, head) {
+		struct intel_connector *intel_connector = to_intel_connector(connector);
+		connector->polled = intel_connector->polled;
+
+		/* MST has a dynamic intel_connector->encoder and it's reprobing
+		 * is all handled by the MST helpers. */
+		if (intel_connector->mst_port)
+			continue;
+
+		if (!connector->polled && I915_HAS_HOTPLUG(dev) &&
+		    intel_connector->encoder->hpd_pin > HPD_NONE)
+			connector->polled = DRM_CONNECTOR_POLL_HPD;
+	}
+
+	/*
+	 * Interrupt setup is already guaranteed to be single-threaded, this is
+	 * just to make the assert_spin_locked checks happy.
+	 */
+	spin_lock_irq(&dev_priv->irq_lock);
+	if (dev_priv->display.hpd_irq_setup)
+		dev_priv->display.hpd_irq_setup(dev);
+	spin_unlock_irq(&dev_priv->irq_lock);
+}
+
+void intel_hpd_init_work(struct drm_i915_private *dev_priv)
+{
+	INIT_WORK(&dev_priv->hotplug.hotplug_work, i915_hotplug_work_func);
+	INIT_WORK(&dev_priv->hotplug.dig_port_work, i915_digport_work_func);
+	INIT_DELAYED_WORK(&dev_priv->hotplug.reenable_work,
+			  intel_hpd_irq_storm_reenable_work);
+}
+
+void intel_hpd_cancel_work(struct drm_i915_private *dev_priv)
+{
+	spin_lock_irq(&dev_priv->irq_lock);
+
+	dev_priv->hotplug.long_port_mask = 0;
+	dev_priv->hotplug.short_port_mask = 0;
+	dev_priv->hotplug.event_bits = 0;
+
+	spin_unlock_irq(&dev_priv->irq_lock);
+
+	cancel_work_sync(&dev_priv->hotplug.dig_port_work);
+	cancel_work_sync(&dev_priv->hotplug.hotplug_work);
+	cancel_delayed_work_sync(&dev_priv->hotplug.reenable_work);
+}
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
new file mode 100644
index 0000000..f3bee54
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -0,0 +1,727 @@
+/*
+ * Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
+ * Copyright © 2006-2008,2010 Intel Corporation
+ *   Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *	Eric Anholt <eric@anholt.net>
+ *	Chris Wilson <chris@chris-wilson.co.uk>
+ */
+#include <linux/i2c.h>
+#include <linux/i2c-algo-bit.h>
+#include <linux/export.h>
+#include <drm/drmP.h>
+#include "intel_drv.h"
+#include <drm/i915_drm.h>
+#include "i915_drv.h"
+
+struct gmbus_pin {
+	const char *name;
+	int reg;
+};
+
+/* Map gmbus pin pairs to names and registers. */
+static const struct gmbus_pin gmbus_pins[] = {
+	[GMBUS_PIN_SSC] = { "ssc", GPIOB },
+	[GMBUS_PIN_VGADDC] = { "vga", GPIOA },
+	[GMBUS_PIN_PANEL] = { "panel", GPIOC },
+	[GMBUS_PIN_DPC] = { "dpc", GPIOD },
+	[GMBUS_PIN_DPB] = { "dpb", GPIOE },
+	[GMBUS_PIN_DPD] = { "dpd", GPIOF },
+};
+
+static const struct gmbus_pin gmbus_pins_bdw[] = {
+	[GMBUS_PIN_VGADDC] = { "vga", GPIOA },
+	[GMBUS_PIN_DPC] = { "dpc", GPIOD },
+	[GMBUS_PIN_DPB] = { "dpb", GPIOE },
+	[GMBUS_PIN_DPD] = { "dpd", GPIOF },
+};
+
+static const struct gmbus_pin gmbus_pins_skl[] = {
+	[GMBUS_PIN_DPC] = { "dpc", GPIOD },
+	[GMBUS_PIN_DPB] = { "dpb", GPIOE },
+	[GMBUS_PIN_DPD] = { "dpd", GPIOF },
+};
+
+static const struct gmbus_pin gmbus_pins_bxt[] = {
+	[GMBUS_PIN_1_BXT] = { "dpb", PCH_GPIOB },
+	[GMBUS_PIN_2_BXT] = { "dpc", PCH_GPIOC },
+	[GMBUS_PIN_3_BXT] = { "misc", PCH_GPIOD },
+};
+
+/* pin is expected to be valid */
+static const struct gmbus_pin *get_gmbus_pin(struct drm_i915_private *dev_priv,
+					     unsigned int pin)
+{
+	if (IS_BROXTON(dev_priv))
+		return &gmbus_pins_bxt[pin];
+	else if (IS_SKYLAKE(dev_priv))
+		return &gmbus_pins_skl[pin];
+	else if (IS_BROADWELL(dev_priv))
+		return &gmbus_pins_bdw[pin];
+	else
+		return &gmbus_pins[pin];
+}
+
+bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv,
+			      unsigned int pin)
+{
+	unsigned int size;
+
+	if (IS_BROXTON(dev_priv))
+		size = ARRAY_SIZE(gmbus_pins_bxt);
+	else if (IS_SKYLAKE(dev_priv))
+		size = ARRAY_SIZE(gmbus_pins_skl);
+	else if (IS_BROADWELL(dev_priv))
+		size = ARRAY_SIZE(gmbus_pins_bdw);
+	else
+		size = ARRAY_SIZE(gmbus_pins);
+
+	return pin < size && get_gmbus_pin(dev_priv, pin)->reg;
+}
+
+/* Intel GPIO access functions */
+
+#define I2C_RISEFALL_TIME 10
+
+static inline struct intel_gmbus *
+to_intel_gmbus(struct i2c_adapter *i2c)
+{
+	return container_of(i2c, struct intel_gmbus, adapter);
+}
+
+void
+intel_i2c_reset(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	I915_WRITE(GMBUS0, 0);
+	I915_WRITE(GMBUS4, 0);
+}
+
+static void intel_i2c_quirk_set(struct drm_i915_private *dev_priv, bool enable)
+{
+	u32 val;
+
+	/* When using bit bashing for I2C, this bit needs to be set to 1 */
+	if (!IS_PINEVIEW(dev_priv->dev))
+		return;
+
+	val = I915_READ(DSPCLK_GATE_D);
+	if (enable)
+		val |= DPCUNIT_CLOCK_GATE_DISABLE;
+	else
+		val &= ~DPCUNIT_CLOCK_GATE_DISABLE;
+	I915_WRITE(DSPCLK_GATE_D, val);
+}
+
+static u32 get_reserved(struct intel_gmbus *bus)
+{
+	struct drm_i915_private *dev_priv = bus->dev_priv;
+	struct drm_device *dev = dev_priv->dev;
+	u32 reserved = 0;
+
+	/* On most chips, these bits must be preserved in software. */
+	if (!IS_I830(dev) && !IS_845G(dev))
+		reserved = I915_READ_NOTRACE(bus->gpio_reg) &
+					     (GPIO_DATA_PULLUP_DISABLE |
+					      GPIO_CLOCK_PULLUP_DISABLE);
+
+	return reserved;
+}
+
+static int get_clock(void *data)
+{
+	struct intel_gmbus *bus = data;
+	struct drm_i915_private *dev_priv = bus->dev_priv;
+	u32 reserved = get_reserved(bus);
+	I915_WRITE_NOTRACE(bus->gpio_reg, reserved | GPIO_CLOCK_DIR_MASK);
+	I915_WRITE_NOTRACE(bus->gpio_reg, reserved);
+	return (I915_READ_NOTRACE(bus->gpio_reg) & GPIO_CLOCK_VAL_IN) != 0;
+}
+
+static int get_data(void *data)
+{
+	struct intel_gmbus *bus = data;
+	struct drm_i915_private *dev_priv = bus->dev_priv;
+	u32 reserved = get_reserved(bus);
+	I915_WRITE_NOTRACE(bus->gpio_reg, reserved | GPIO_DATA_DIR_MASK);
+	I915_WRITE_NOTRACE(bus->gpio_reg, reserved);
+	return (I915_READ_NOTRACE(bus->gpio_reg) & GPIO_DATA_VAL_IN) != 0;
+}
+
+static void set_clock(void *data, int state_high)
+{
+	struct intel_gmbus *bus = data;
+	struct drm_i915_private *dev_priv = bus->dev_priv;
+	u32 reserved = get_reserved(bus);
+	u32 clock_bits;
+
+	if (state_high)
+		clock_bits = GPIO_CLOCK_DIR_IN | GPIO_CLOCK_DIR_MASK;
+	else
+		clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK |
+			GPIO_CLOCK_VAL_MASK;
+
+	I915_WRITE_NOTRACE(bus->gpio_reg, reserved | clock_bits);
+	POSTING_READ(bus->gpio_reg);
+}
+
+static void set_data(void *data, int state_high)
+{
+	struct intel_gmbus *bus = data;
+	struct drm_i915_private *dev_priv = bus->dev_priv;
+	u32 reserved = get_reserved(bus);
+	u32 data_bits;
+
+	if (state_high)
+		data_bits = GPIO_DATA_DIR_IN | GPIO_DATA_DIR_MASK;
+	else
+		data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK |
+			GPIO_DATA_VAL_MASK;
+
+	I915_WRITE_NOTRACE(bus->gpio_reg, reserved | data_bits);
+	POSTING_READ(bus->gpio_reg);
+}
+
+static int
+intel_gpio_pre_xfer(struct i2c_adapter *adapter)
+{
+	struct intel_gmbus *bus = container_of(adapter,
+					       struct intel_gmbus,
+					       adapter);
+	struct drm_i915_private *dev_priv = bus->dev_priv;
+
+	intel_i2c_reset(dev_priv->dev);
+	intel_i2c_quirk_set(dev_priv, true);
+	set_data(bus, 1);
+	set_clock(bus, 1);
+	udelay(I2C_RISEFALL_TIME);
+	return 0;
+}
+
+static void
+intel_gpio_post_xfer(struct i2c_adapter *adapter)
+{
+	struct intel_gmbus *bus = container_of(adapter,
+					       struct intel_gmbus,
+					       adapter);
+	struct drm_i915_private *dev_priv = bus->dev_priv;
+
+	set_data(bus, 1);
+	set_clock(bus, 1);
+	intel_i2c_quirk_set(dev_priv, false);
+}
+
+static void
+intel_gpio_setup(struct intel_gmbus *bus, unsigned int pin)
+{
+	struct drm_i915_private *dev_priv = bus->dev_priv;
+	struct i2c_algo_bit_data *algo;
+
+	algo = &bus->bit_algo;
+
+	bus->gpio_reg = dev_priv->gpio_mmio_base +
+		get_gmbus_pin(dev_priv, pin)->reg;
+
+	bus->adapter.algo_data = algo;
+	algo->setsda = set_data;
+	algo->setscl = set_clock;
+	algo->getsda = get_data;
+	algo->getscl = get_clock;
+	algo->pre_xfer = intel_gpio_pre_xfer;
+	algo->post_xfer = intel_gpio_post_xfer;
+	algo->udelay = I2C_RISEFALL_TIME;
+	algo->timeout = usecs_to_jiffies(2200);
+	algo->data = bus;
+}
+
+static int
+gmbus_wait_hw_status(struct drm_i915_private *dev_priv,
+		     u32 gmbus2_status,
+		     u32 gmbus4_irq_en)
+{
+	int i;
+	u32 gmbus2 = 0;
+	DEFINE_WAIT(wait);
+
+	if (!HAS_GMBUS_IRQ(dev_priv->dev))
+		gmbus4_irq_en = 0;
+
+	/* Important: The hw handles only the first bit, so set only one! Since
+	 * we also need to check for NAKs besides the hw ready/idle signal, we
+	 * need to wake up periodically and check that ourselves. */
+	I915_WRITE(GMBUS4, gmbus4_irq_en);
+
+	for (i = 0; i < msecs_to_jiffies_timeout(50); i++) {
+		prepare_to_wait(&dev_priv->gmbus_wait_queue, &wait,
+				TASK_UNINTERRUPTIBLE);
+
+		gmbus2 = I915_READ_NOTRACE(GMBUS2);
+		if (gmbus2 & (GMBUS_SATOER | gmbus2_status))
+			break;
+
+		schedule_timeout(1);
+	}
+	finish_wait(&dev_priv->gmbus_wait_queue, &wait);
+
+	I915_WRITE(GMBUS4, 0);
+
+	if (gmbus2 & GMBUS_SATOER)
+		return -ENXIO;
+	if (gmbus2 & gmbus2_status)
+		return 0;
+	return -ETIMEDOUT;
+}
+
+static int
+gmbus_wait_idle(struct drm_i915_private *dev_priv)
+{
+	int ret;
+
+#define C ((I915_READ_NOTRACE(GMBUS2) & GMBUS_ACTIVE) == 0)
+
+	if (!HAS_GMBUS_IRQ(dev_priv->dev))
+		return wait_for(C, 10);
+
+	/* Important: The hw handles only the first bit, so set only one! */
+	I915_WRITE(GMBUS4, GMBUS_IDLE_EN);
+
+	ret = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
+				 msecs_to_jiffies_timeout(10));
+
+	I915_WRITE(GMBUS4, 0);
+
+	if (ret)
+		return 0;
+	else
+		return -ETIMEDOUT;
+#undef C
+}
+
+static int
+gmbus_xfer_read_chunk(struct drm_i915_private *dev_priv,
+		      unsigned short addr, u8 *buf, unsigned int len,
+		      u32 gmbus1_index)
+{
+	I915_WRITE(GMBUS1,
+		   gmbus1_index |
+		   GMBUS_CYCLE_WAIT |
+		   (len << GMBUS_BYTE_COUNT_SHIFT) |
+		   (addr << GMBUS_SLAVE_ADDR_SHIFT) |
+		   GMBUS_SLAVE_READ | GMBUS_SW_RDY);
+	while (len) {
+		int ret;
+		u32 val, loop = 0;
+
+		ret = gmbus_wait_hw_status(dev_priv, GMBUS_HW_RDY,
+					   GMBUS_HW_RDY_EN);
+		if (ret)
+			return ret;
+
+		val = I915_READ(GMBUS3);
+		do {
+			*buf++ = val & 0xff;
+			val >>= 8;
+		} while (--len && ++loop < 4);
+	}
+
+	return 0;
+}
+
+static int
+gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
+		u32 gmbus1_index)
+{
+	u8 *buf = msg->buf;
+	unsigned int rx_size = msg->len;
+	unsigned int len;
+	int ret;
+
+	do {
+		len = min(rx_size, GMBUS_BYTE_COUNT_MAX);
+
+		ret = gmbus_xfer_read_chunk(dev_priv, msg->addr,
+					    buf, len, gmbus1_index);
+		if (ret)
+			return ret;
+
+		rx_size -= len;
+		buf += len;
+	} while (rx_size != 0);
+
+	return 0;
+}
+
+static int
+gmbus_xfer_write_chunk(struct drm_i915_private *dev_priv,
+		       unsigned short addr, u8 *buf, unsigned int len)
+{
+	unsigned int chunk_size = len;
+	u32 val, loop;
+
+	val = loop = 0;
+	while (len && loop < 4) {
+		val |= *buf++ << (8 * loop++);
+		len -= 1;
+	}
+
+	I915_WRITE(GMBUS3, val);
+	I915_WRITE(GMBUS1,
+		   GMBUS_CYCLE_WAIT |
+		   (chunk_size << GMBUS_BYTE_COUNT_SHIFT) |
+		   (addr << GMBUS_SLAVE_ADDR_SHIFT) |
+		   GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
+	while (len) {
+		int ret;
+
+		val = loop = 0;
+		do {
+			val |= *buf++ << (8 * loop);
+		} while (--len && ++loop < 4);
+
+		I915_WRITE(GMBUS3, val);
+
+		ret = gmbus_wait_hw_status(dev_priv, GMBUS_HW_RDY,
+					   GMBUS_HW_RDY_EN);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static int
+gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
+{
+	u8 *buf = msg->buf;
+	unsigned int tx_size = msg->len;
+	unsigned int len;
+	int ret;
+
+	do {
+		len = min(tx_size, GMBUS_BYTE_COUNT_MAX);
+
+		ret = gmbus_xfer_write_chunk(dev_priv, msg->addr, buf, len);
+		if (ret)
+			return ret;
+
+		buf += len;
+		tx_size -= len;
+	} while (tx_size != 0);
+
+	return 0;
+}
+
+/*
+ * The gmbus controller can combine a 1 or 2 byte write with a read that
+ * immediately follows it by using an "INDEX" cycle.
+ */
+static bool
+gmbus_is_index_read(struct i2c_msg *msgs, int i, int num)
+{
+	return (i + 1 < num &&
+		!(msgs[i].flags & I2C_M_RD) && msgs[i].len <= 2 &&
+		(msgs[i + 1].flags & I2C_M_RD));
+}
+
+static int
+gmbus_xfer_index_read(struct drm_i915_private *dev_priv, struct i2c_msg *msgs)
+{
+	u32 gmbus1_index = 0;
+	u32 gmbus5 = 0;
+	int ret;
+
+	if (msgs[0].len == 2)
+		gmbus5 = GMBUS_2BYTE_INDEX_EN |
+			 msgs[0].buf[1] | (msgs[0].buf[0] << 8);
+	if (msgs[0].len == 1)
+		gmbus1_index = GMBUS_CYCLE_INDEX |
+			       (msgs[0].buf[0] << GMBUS_SLAVE_INDEX_SHIFT);
+
+	/* GMBUS5 holds 16-bit index */
+	if (gmbus5)
+		I915_WRITE(GMBUS5, gmbus5);
+
+	ret = gmbus_xfer_read(dev_priv, &msgs[1], gmbus1_index);
+
+	/* Clear GMBUS5 after each index transfer */
+	if (gmbus5)
+		I915_WRITE(GMBUS5, 0);
+
+	return ret;
+}
+
+static int
+gmbus_xfer(struct i2c_adapter *adapter,
+	   struct i2c_msg *msgs,
+	   int num)
+{
+	struct intel_gmbus *bus = container_of(adapter,
+					       struct intel_gmbus,
+					       adapter);
+	struct drm_i915_private *dev_priv = bus->dev_priv;
+	int i = 0, inc, try = 0;
+	int ret = 0;
+
+	intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
+	mutex_lock(&dev_priv->gmbus_mutex);
+
+	if (bus->force_bit) {
+		ret = i2c_bit_algo.master_xfer(adapter, msgs, num);
+		goto out;
+	}
+
+retry:
+	I915_WRITE(GMBUS0, bus->reg0);
+
+	for (; i < num; i += inc) {
+		inc = 1;
+		if (gmbus_is_index_read(msgs, i, num)) {
+			ret = gmbus_xfer_index_read(dev_priv, &msgs[i]);
+			inc = 2; /* an index read is two msgs */
+		} else if (msgs[i].flags & I2C_M_RD) {
+			ret = gmbus_xfer_read(dev_priv, &msgs[i], 0);
+		} else {
+			ret = gmbus_xfer_write(dev_priv, &msgs[i]);
+		}
+
+		if (ret == -ETIMEDOUT)
+			goto timeout;
+		if (ret == -ENXIO)
+			goto clear_err;
+
+		ret = gmbus_wait_hw_status(dev_priv, GMBUS_HW_WAIT_PHASE,
+					   GMBUS_HW_WAIT_EN);
+		if (ret == -ENXIO)
+			goto clear_err;
+		if (ret)
+			goto timeout;
+	}
+
+	/* Generate a STOP condition on the bus. Note that gmbus can't generata
+	 * a STOP on the very first cycle. To simplify the code we
+	 * unconditionally generate the STOP condition with an additional gmbus
+	 * cycle. */
+	I915_WRITE(GMBUS1, GMBUS_CYCLE_STOP | GMBUS_SW_RDY);
+
+	/* Mark the GMBUS interface as disabled after waiting for idle.
+	 * We will re-enable it at the start of the next xfer,
+	 * till then let it sleep.
+	 */
+	if (gmbus_wait_idle(dev_priv)) {
+		DRM_DEBUG_KMS("GMBUS [%s] timed out waiting for idle\n",
+			 adapter->name);
+		ret = -ETIMEDOUT;
+	}
+	I915_WRITE(GMBUS0, 0);
+	ret = ret ?: i;
+	goto out;
+
+clear_err:
+	/*
+	 * Wait for bus to IDLE before clearing NAK.
+	 * If we clear the NAK while bus is still active, then it will stay
+	 * active and the next transaction may fail.
+	 *
+	 * If no ACK is received during the address phase of a transaction, the
+	 * adapter must report -ENXIO. It is not clear what to return if no ACK
+	 * is received at other times. But we have to be careful to not return
+	 * spurious -ENXIO because that will prevent i2c and drm edid functions
+	 * from retrying. So return -ENXIO only when gmbus properly quiescents -
+	 * timing out seems to happen when there _is_ a ddc chip present, but
+	 * it's slow responding and only answers on the 2nd retry.
+	 */
+	ret = -ENXIO;
+	if (gmbus_wait_idle(dev_priv)) {
+		DRM_DEBUG_KMS("GMBUS [%s] timed out after NAK\n",
+			      adapter->name);
+		ret = -ETIMEDOUT;
+	}
+
+	/* Toggle the Software Clear Interrupt bit. This has the effect
+	 * of resetting the GMBUS controller and so clearing the
+	 * BUS_ERROR raised by the slave's NAK.
+	 */
+	I915_WRITE(GMBUS1, GMBUS_SW_CLR_INT);
+	I915_WRITE(GMBUS1, 0);
+	I915_WRITE(GMBUS0, 0);
+
+	DRM_DEBUG_KMS("GMBUS [%s] NAK for addr: %04x %c(%d)\n",
+			 adapter->name, msgs[i].addr,
+			 (msgs[i].flags & I2C_M_RD) ? 'r' : 'w', msgs[i].len);
+
+	/*
+	 * Passive adapters sometimes NAK the first probe. Retry the first
+	 * message once on -ENXIO for GMBUS transfers; the bit banging algorithm
+	 * has retries internally. See also the retry loop in
+	 * drm_do_probe_ddc_edid, which bails out on the first -ENXIO.
+	 */
+	if (ret == -ENXIO && i == 0 && try++ == 0) {
+		DRM_DEBUG_KMS("GMBUS [%s] NAK on first message, retry\n",
+			      adapter->name);
+		goto retry;
+	}
+
+	goto out;
+
+timeout:
+	DRM_INFO("GMBUS [%s] timed out, falling back to bit banging on pin %d\n",
+		 bus->adapter.name, bus->reg0 & 0xff);
+	I915_WRITE(GMBUS0, 0);
+
+	/* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */
+	bus->force_bit = 1;
+	ret = i2c_bit_algo.master_xfer(adapter, msgs, num);
+
+out:
+	mutex_unlock(&dev_priv->gmbus_mutex);
+
+	intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS);
+
+	return ret;
+}
+
+static u32 gmbus_func(struct i2c_adapter *adapter)
+{
+	return i2c_bit_algo.functionality(adapter) &
+		(I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
+		/* I2C_FUNC_10BIT_ADDR | */
+		I2C_FUNC_SMBUS_READ_BLOCK_DATA |
+		I2C_FUNC_SMBUS_BLOCK_PROC_CALL);
+}
+
+static const struct i2c_algorithm gmbus_algorithm = {
+	.master_xfer	= gmbus_xfer,
+	.functionality	= gmbus_func
+};
+
+/**
+ * intel_gmbus_setup - instantiate all Intel i2c GMBuses
+ * @dev: DRM device
+ */
+int intel_setup_gmbus(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_gmbus *bus;
+	unsigned int pin;
+	int ret;
+
+	if (HAS_PCH_NOP(dev))
+		return 0;
+	else if (HAS_PCH_SPLIT(dev))
+		dev_priv->gpio_mmio_base = PCH_GPIOA - GPIOA;
+	else if (IS_VALLEYVIEW(dev))
+		dev_priv->gpio_mmio_base = VLV_DISPLAY_BASE;
+	else
+		dev_priv->gpio_mmio_base = 0;
+
+	mutex_init(&dev_priv->gmbus_mutex);
+	init_waitqueue_head(&dev_priv->gmbus_wait_queue);
+
+	for (pin = 0; pin < ARRAY_SIZE(dev_priv->gmbus); pin++) {
+		if (!intel_gmbus_is_valid_pin(dev_priv, pin))
+			continue;
+
+		bus = &dev_priv->gmbus[pin];
+
+		bus->adapter.owner = THIS_MODULE;
+		bus->adapter.class = I2C_CLASS_DDC;
+		snprintf(bus->adapter.name,
+			 sizeof(bus->adapter.name),
+			 "i915 gmbus %s",
+			 get_gmbus_pin(dev_priv, pin)->name);
+
+		bus->adapter.dev.parent = &dev->pdev->dev;
+		bus->dev_priv = dev_priv;
+
+		bus->adapter.algo = &gmbus_algorithm;
+
+		/* By default use a conservative clock rate */
+		bus->reg0 = pin | GMBUS_RATE_100KHZ;
+
+		/* gmbus seems to be broken on i830 */
+		if (IS_I830(dev))
+			bus->force_bit = 1;
+
+		intel_gpio_setup(bus, pin);
+
+		ret = i2c_add_adapter(&bus->adapter);
+		if (ret)
+			goto err;
+	}
+
+	intel_i2c_reset(dev_priv->dev);
+
+	return 0;
+
+err:
+	while (pin--) {
+		if (!intel_gmbus_is_valid_pin(dev_priv, pin))
+			continue;
+
+		bus = &dev_priv->gmbus[pin];
+		i2c_del_adapter(&bus->adapter);
+	}
+	return ret;
+}
+
+struct i2c_adapter *intel_gmbus_get_adapter(struct drm_i915_private *dev_priv,
+					    unsigned int pin)
+{
+	if (WARN_ON(!intel_gmbus_is_valid_pin(dev_priv, pin)))
+		return NULL;
+
+	return &dev_priv->gmbus[pin].adapter;
+}
+
+void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed)
+{
+	struct intel_gmbus *bus = to_intel_gmbus(adapter);
+
+	bus->reg0 = (bus->reg0 & ~(0x3 << 8)) | speed;
+}
+
+void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit)
+{
+	struct intel_gmbus *bus = to_intel_gmbus(adapter);
+
+	bus->force_bit += force_bit ? 1 : -1;
+	DRM_DEBUG_KMS("%sabling bit-banging on %s. force bit now %d\n",
+		      force_bit ? "en" : "dis", adapter->name,
+		      bus->force_bit);
+}
+
+void intel_teardown_gmbus(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_gmbus *bus;
+	unsigned int pin;
+
+	for (pin = 0; pin < ARRAY_SIZE(dev_priv->gmbus); pin++) {
+		if (!intel_gmbus_is_valid_pin(dev_priv, pin))
+			continue;
+
+		bus = &dev_priv->gmbus[pin];
+		i2c_del_adapter(&bus->adapter);
+	}
+}
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
new file mode 100644
index 0000000..7058f75
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -0,0 +1,2558 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Ben Widawsky <ben@bwidawsk.net>
+ *    Michel Thierry <michel.thierry@intel.com>
+ *    Thomas Daniel <thomas.daniel@intel.com>
+ *    Oscar Mateo <oscar.mateo@intel.com>
+ *
+ */
+
+/**
+ * DOC: Logical Rings, Logical Ring Contexts and Execlists
+ *
+ * Motivation:
+ * GEN8 brings an expansion of the HW contexts: "Logical Ring Contexts".
+ * These expanded contexts enable a number of new abilities, especially
+ * "Execlists" (also implemented in this file).
+ *
+ * One of the main differences with the legacy HW contexts is that logical
+ * ring contexts incorporate many more things to the context's state, like
+ * PDPs or ringbuffer control registers:
+ *
+ * The reason why PDPs are included in the context is straightforward: as
+ * PPGTTs (per-process GTTs) are actually per-context, having the PDPs
+ * contained there mean you don't need to do a ppgtt->switch_mm yourself,
+ * instead, the GPU will do it for you on the context switch.
+ *
+ * But, what about the ringbuffer control registers (head, tail, etc..)?
+ * shouldn't we just need a set of those per engine command streamer? This is
+ * where the name "Logical Rings" starts to make sense: by virtualizing the
+ * rings, the engine cs shifts to a new "ring buffer" with every context
+ * switch. When you want to submit a workload to the GPU you: A) choose your
+ * context, B) find its appropriate virtualized ring, C) write commands to it
+ * and then, finally, D) tell the GPU to switch to that context.
+ *
+ * Instead of the legacy MI_SET_CONTEXT, the way you tell the GPU to switch
+ * to a contexts is via a context execution list, ergo "Execlists".
+ *
+ * LRC implementation:
+ * Regarding the creation of contexts, we have:
+ *
+ * - One global default context.
+ * - One local default context for each opened fd.
+ * - One local extra context for each context create ioctl call.
+ *
+ * Now that ringbuffers belong per-context (and not per-engine, like before)
+ * and that contexts are uniquely tied to a given engine (and not reusable,
+ * like before) we need:
+ *
+ * - One ringbuffer per-engine inside each context.
+ * - One backing object per-engine inside each context.
+ *
+ * The global default context starts its life with these new objects fully
+ * allocated and populated. The local default context for each opened fd is
+ * more complex, because we don't know at creation time which engine is going
+ * to use them. To handle this, we have implemented a deferred creation of LR
+ * contexts:
+ *
+ * The local context starts its life as a hollow or blank holder, that only
+ * gets populated for a given engine once we receive an execbuffer. If later
+ * on we receive another execbuffer ioctl for the same context but a different
+ * engine, we allocate/populate a new ringbuffer and context backing object and
+ * so on.
+ *
+ * Finally, regarding local contexts created using the ioctl call: as they are
+ * only allowed with the render ring, we can allocate & populate them right
+ * away (no need to defer anything, at least for now).
+ *
+ * Execlists implementation:
+ * Execlists are the new method by which, on gen8+ hardware, workloads are
+ * submitted for execution (as opposed to the legacy, ringbuffer-based, method).
+ * This method works as follows:
+ *
+ * When a request is committed, its commands (the BB start and any leading or
+ * trailing commands, like the seqno breadcrumbs) are placed in the ringbuffer
+ * for the appropriate context. The tail pointer in the hardware context is not
+ * updated at this time, but instead, kept by the driver in the ringbuffer
+ * structure. A structure representing this request is added to a request queue
+ * for the appropriate engine: this structure contains a copy of the context's
+ * tail after the request was written to the ring buffer and a pointer to the
+ * context itself.
+ *
+ * If the engine's request queue was empty before the request was added, the
+ * queue is processed immediately. Otherwise the queue will be processed during
+ * a context switch interrupt. In any case, elements on the queue will get sent
+ * (in pairs) to the GPU's ExecLists Submit Port (ELSP, for short) with a
+ * globally unique 20-bits submission ID.
+ *
+ * When execution of a request completes, the GPU updates the context status
+ * buffer with a context complete event and generates a context switch interrupt.
+ * During the interrupt handling, the driver examines the events in the buffer:
+ * for each context complete event, if the announced ID matches that on the head
+ * of the request queue, then that request is retired and removed from the queue.
+ *
+ * After processing, if any requests were retired and the queue is not empty
+ * then a new execution list can be submitted. The two requests at the front of
+ * the queue are next to be submitted but since a context may not occur twice in
+ * an execution list, if subsequent requests have the same ID as the first then
+ * the two requests must be combined. This is done simply by discarding requests
+ * at the head of the queue until either only one requests is left (in which case
+ * we use a NULL second context) or the first two requests have unique IDs.
+ *
+ * By always executing the first two requests in the queue the driver ensures
+ * that the GPU is kept as busy as possible. In the case where a single context
+ * completes but a second context is still executing, the request for this second
+ * context will be at the head of the queue when we remove the first one. This
+ * request will then be resubmitted along with a new request for a different context,
+ * which will cause the hardware to continue executing the second request and queue
+ * the new request (the GPU detects the condition of a context getting preempted
+ * with the same context and optimizes the context switch flow by not doing
+ * preemption, but just sampling the new tail pointer).
+ *
+ */
+
+#include <drm/drmP.h>
+#include <drm/i915_drm.h>
+#include "i915_drv.h"
+#include "intel_mocs.h"
+
+#define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
+#define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE)
+#define GEN8_LR_CONTEXT_OTHER_SIZE (2 * PAGE_SIZE)
+
+#define RING_EXECLIST_QFULL		(1 << 0x2)
+#define RING_EXECLIST1_VALID		(1 << 0x3)
+#define RING_EXECLIST0_VALID		(1 << 0x4)
+#define RING_EXECLIST_ACTIVE_STATUS	(3 << 0xE)
+#define RING_EXECLIST1_ACTIVE		(1 << 0x11)
+#define RING_EXECLIST0_ACTIVE		(1 << 0x12)
+
+#define GEN8_CTX_STATUS_IDLE_ACTIVE	(1 << 0)
+#define GEN8_CTX_STATUS_PREEMPTED	(1 << 1)
+#define GEN8_CTX_STATUS_ELEMENT_SWITCH	(1 << 2)
+#define GEN8_CTX_STATUS_ACTIVE_IDLE	(1 << 3)
+#define GEN8_CTX_STATUS_COMPLETE	(1 << 4)
+#define GEN8_CTX_STATUS_LITE_RESTORE	(1 << 15)
+
+#define CTX_LRI_HEADER_0		0x01
+#define CTX_CONTEXT_CONTROL		0x02
+#define CTX_RING_HEAD			0x04
+#define CTX_RING_TAIL			0x06
+#define CTX_RING_BUFFER_START		0x08
+#define CTX_RING_BUFFER_CONTROL		0x0a
+#define CTX_BB_HEAD_U			0x0c
+#define CTX_BB_HEAD_L			0x0e
+#define CTX_BB_STATE			0x10
+#define CTX_SECOND_BB_HEAD_U		0x12
+#define CTX_SECOND_BB_HEAD_L		0x14
+#define CTX_SECOND_BB_STATE		0x16
+#define CTX_BB_PER_CTX_PTR		0x18
+#define CTX_RCS_INDIRECT_CTX		0x1a
+#define CTX_RCS_INDIRECT_CTX_OFFSET	0x1c
+#define CTX_LRI_HEADER_1		0x21
+#define CTX_CTX_TIMESTAMP		0x22
+#define CTX_PDP3_UDW			0x24
+#define CTX_PDP3_LDW			0x26
+#define CTX_PDP2_UDW			0x28
+#define CTX_PDP2_LDW			0x2a
+#define CTX_PDP1_UDW			0x2c
+#define CTX_PDP1_LDW			0x2e
+#define CTX_PDP0_UDW			0x30
+#define CTX_PDP0_LDW			0x32
+#define CTX_LRI_HEADER_2		0x41
+#define CTX_R_PWR_CLK_STATE		0x42
+#define CTX_GPGPU_CSR_BASE_ADDRESS	0x44
+
+#define GEN8_CTX_VALID (1<<0)
+#define GEN8_CTX_FORCE_PD_RESTORE (1<<1)
+#define GEN8_CTX_FORCE_RESTORE (1<<2)
+#define GEN8_CTX_L3LLC_COHERENT (1<<5)
+#define GEN8_CTX_PRIVILEGE (1<<8)
+
+#define ASSIGN_CTX_PDP(ppgtt, reg_state, n) { \
+	const u64 _addr = i915_page_dir_dma_addr((ppgtt), (n));	\
+	reg_state[CTX_PDP ## n ## _UDW+1] = upper_32_bits(_addr); \
+	reg_state[CTX_PDP ## n ## _LDW+1] = lower_32_bits(_addr); \
+}
+
+#define ASSIGN_CTX_PML4(ppgtt, reg_state) { \
+	reg_state[CTX_PDP0_UDW + 1] = upper_32_bits(px_dma(&ppgtt->pml4)); \
+	reg_state[CTX_PDP0_LDW + 1] = lower_32_bits(px_dma(&ppgtt->pml4)); \
+}
+
+enum {
+	ADVANCED_CONTEXT = 0,
+	LEGACY_32B_CONTEXT,
+	ADVANCED_AD_CONTEXT,
+	LEGACY_64B_CONTEXT
+};
+#define GEN8_CTX_ADDRESSING_MODE_SHIFT 3
+#define GEN8_CTX_ADDRESSING_MODE(dev)  (USES_FULL_48BIT_PPGTT(dev) ?\
+		LEGACY_64B_CONTEXT :\
+		LEGACY_32B_CONTEXT)
+enum {
+	FAULT_AND_HANG = 0,
+	FAULT_AND_HALT, /* Debug only */
+	FAULT_AND_STREAM,
+	FAULT_AND_CONTINUE /* Unsupported */
+};
+#define GEN8_CTX_ID_SHIFT 32
+#define CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT  0x17
+
+static int intel_lr_context_pin(struct drm_i915_gem_request *rq);
+static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring,
+		struct drm_i915_gem_object *default_ctx_obj);
+
+
+/**
+ * intel_sanitize_enable_execlists() - sanitize i915.enable_execlists
+ * @dev: DRM device.
+ * @enable_execlists: value of i915.enable_execlists module parameter.
+ *
+ * Only certain platforms support Execlists (the prerequisites being
+ * support for Logical Ring Contexts and Aliasing PPGTT or better).
+ *
+ * Return: 1 if Execlists is supported and has to be enabled.
+ */
+int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists)
+{
+	WARN_ON(i915.enable_ppgtt == -1);
+
+	/* On platforms with execlist available, vGPU will only
+	 * support execlist mode, no ring buffer mode.
+	 */
+	if (HAS_LOGICAL_RING_CONTEXTS(dev) && intel_vgpu_active(dev))
+		return 1;
+
+	if (INTEL_INFO(dev)->gen >= 9)
+		return 1;
+
+	if (enable_execlists == 0)
+		return 0;
+
+	if (HAS_LOGICAL_RING_CONTEXTS(dev) && USES_PPGTT(dev) &&
+	    i915.use_mmio_flip >= 0)
+		return 1;
+
+	return 0;
+}
+
+/**
+ * intel_execlists_ctx_id() - get the Execlists Context ID
+ * @ctx_obj: Logical Ring Context backing object.
+ *
+ * Do not confuse with ctx->id! Unfortunately we have a name overload
+ * here: the old context ID we pass to userspace as a handler so that
+ * they can refer to a context, and the new context ID we pass to the
+ * ELSP so that the GPU can inform us of the context status via
+ * interrupts.
+ *
+ * Return: 20-bits globally unique context ID.
+ */
+u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj)
+{
+	u32 lrca = i915_gem_obj_ggtt_offset(ctx_obj) +
+			LRC_PPHWSP_PN * PAGE_SIZE;
+
+	/* LRCA is required to be 4K aligned so the more significant 20 bits
+	 * are globally unique */
+	return lrca >> 12;
+}
+
+static bool disable_lite_restore_wa(struct intel_engine_cs *ring)
+{
+	struct drm_device *dev = ring->dev;
+
+	return ((IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0) ||
+		(IS_BROXTON(dev) && INTEL_REVID(dev) == BXT_REVID_A0)) &&
+	       (ring->id == VCS || ring->id == VCS2);
+}
+
+uint64_t intel_lr_context_descriptor(struct intel_context *ctx,
+				     struct intel_engine_cs *ring)
+{
+	struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state;
+	uint64_t desc;
+	uint64_t lrca = i915_gem_obj_ggtt_offset(ctx_obj) +
+			LRC_PPHWSP_PN * PAGE_SIZE;
+
+	WARN_ON(lrca & 0xFFFFFFFF00000FFFULL);
+
+	desc = GEN8_CTX_VALID;
+	desc |= GEN8_CTX_ADDRESSING_MODE(dev) << GEN8_CTX_ADDRESSING_MODE_SHIFT;
+	if (IS_GEN8(ctx_obj->base.dev))
+		desc |= GEN8_CTX_L3LLC_COHERENT;
+	desc |= GEN8_CTX_PRIVILEGE;
+	desc |= lrca;
+	desc |= (u64)intel_execlists_ctx_id(ctx_obj) << GEN8_CTX_ID_SHIFT;
+
+	/* TODO: WaDisableLiteRestore when we start using semaphore
+	 * signalling between Command Streamers */
+	/* desc |= GEN8_CTX_FORCE_RESTORE; */
+
+	/* WaEnableForceRestoreInCtxtDescForVCS:skl */
+	/* WaEnableForceRestoreInCtxtDescForVCS:bxt */
+	if (disable_lite_restore_wa(ring))
+		desc |= GEN8_CTX_FORCE_RESTORE;
+
+	return desc;
+}
+
+static void execlists_elsp_write(struct drm_i915_gem_request *rq0,
+				 struct drm_i915_gem_request *rq1)
+{
+
+	struct intel_engine_cs *ring = rq0->ring;
+	struct drm_device *dev = ring->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint64_t desc[2];
+
+	if (rq1) {
+		desc[1] = intel_lr_context_descriptor(rq1->ctx, rq1->ring);
+		rq1->elsp_submitted++;
+	} else {
+		desc[1] = 0;
+	}
+
+	desc[0] = intel_lr_context_descriptor(rq0->ctx, rq0->ring);
+	rq0->elsp_submitted++;
+
+	/* You must always write both descriptors in the order below. */
+	spin_lock(&dev_priv->uncore.lock);
+	intel_uncore_forcewake_get__locked(dev_priv, FORCEWAKE_ALL);
+	I915_WRITE_FW(RING_ELSP(ring), upper_32_bits(desc[1]));
+	I915_WRITE_FW(RING_ELSP(ring), lower_32_bits(desc[1]));
+
+	I915_WRITE_FW(RING_ELSP(ring), upper_32_bits(desc[0]));
+	/* The context is automatically loaded after the following */
+	I915_WRITE_FW(RING_ELSP(ring), lower_32_bits(desc[0]));
+
+	/* ELSP is a wo register, use another nearby reg for posting */
+	POSTING_READ_FW(RING_EXECLIST_STATUS_LO(ring));
+	intel_uncore_forcewake_put__locked(dev_priv, FORCEWAKE_ALL);
+	spin_unlock(&dev_priv->uncore.lock);
+}
+
+static int execlists_update_context(struct drm_i915_gem_request *rq)
+{
+	struct intel_engine_cs *ring = rq->ring;
+	struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt;
+	struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state;
+	struct drm_i915_gem_object *rb_obj = rq->ringbuf->obj;
+	struct page *page;
+	uint32_t *reg_state;
+
+	BUG_ON(!ctx_obj);
+	WARN_ON(!i915_gem_obj_is_pinned(ctx_obj));
+	WARN_ON(!i915_gem_obj_is_pinned(rb_obj));
+
+	page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
+	reg_state = kmap_atomic(page);
+
+	reg_state[CTX_RING_TAIL+1] = rq->tail;
+	reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(rb_obj);
+
+	if (ppgtt && !USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
+		/* True 32b PPGTT with dynamic page allocation: update PDP
+		 * registers and point the unallocated PDPs to scratch page.
+		 * PML4 is allocated during ppgtt init, so this is not needed
+		 * in 48-bit mode.
+		 */
+		ASSIGN_CTX_PDP(ppgtt, reg_state, 3);
+		ASSIGN_CTX_PDP(ppgtt, reg_state, 2);
+		ASSIGN_CTX_PDP(ppgtt, reg_state, 1);
+		ASSIGN_CTX_PDP(ppgtt, reg_state, 0);
+	}
+
+	kunmap_atomic(reg_state);
+
+	return 0;
+}
+
+static void execlists_submit_requests(struct drm_i915_gem_request *rq0,
+				      struct drm_i915_gem_request *rq1)
+{
+	execlists_update_context(rq0);
+
+	if (rq1)
+		execlists_update_context(rq1);
+
+	execlists_elsp_write(rq0, rq1);
+}
+
+static void execlists_context_unqueue(struct intel_engine_cs *ring)
+{
+	struct drm_i915_gem_request *req0 = NULL, *req1 = NULL;
+	struct drm_i915_gem_request *cursor = NULL, *tmp = NULL;
+
+	assert_spin_locked(&ring->execlist_lock);
+
+	/*
+	 * If irqs are not active generate a warning as batches that finish
+	 * without the irqs may get lost and a GPU Hang may occur.
+	 */
+	WARN_ON(!intel_irqs_enabled(ring->dev->dev_private));
+
+	if (list_empty(&ring->execlist_queue))
+		return;
+
+	/* Try to read in pairs */
+	list_for_each_entry_safe(cursor, tmp, &ring->execlist_queue,
+				 execlist_link) {
+		if (!req0) {
+			req0 = cursor;
+		} else if (req0->ctx == cursor->ctx) {
+			/* Same ctx: ignore first request, as second request
+			 * will update tail past first request's workload */
+			cursor->elsp_submitted = req0->elsp_submitted;
+			list_del(&req0->execlist_link);
+			list_add_tail(&req0->execlist_link,
+				&ring->execlist_retired_req_list);
+			req0 = cursor;
+		} else {
+			req1 = cursor;
+			break;
+		}
+	}
+
+	if (IS_GEN8(ring->dev) || IS_GEN9(ring->dev)) {
+		/*
+		 * WaIdleLiteRestore: make sure we never cause a lite
+		 * restore with HEAD==TAIL
+		 */
+		if (req0->elsp_submitted) {
+			/*
+			 * Apply the wa NOOPS to prevent ring:HEAD == req:TAIL
+			 * as we resubmit the request. See gen8_emit_request()
+			 * for where we prepare the padding after the end of the
+			 * request.
+			 */
+			struct intel_ringbuffer *ringbuf;
+
+			ringbuf = req0->ctx->engine[ring->id].ringbuf;
+			req0->tail += 8;
+			req0->tail &= ringbuf->size - 1;
+		}
+	}
+
+	WARN_ON(req1 && req1->elsp_submitted);
+
+	execlists_submit_requests(req0, req1);
+}
+
+static bool execlists_check_remove_request(struct intel_engine_cs *ring,
+					   u32 request_id)
+{
+	struct drm_i915_gem_request *head_req;
+
+	assert_spin_locked(&ring->execlist_lock);
+
+	head_req = list_first_entry_or_null(&ring->execlist_queue,
+					    struct drm_i915_gem_request,
+					    execlist_link);
+
+	if (head_req != NULL) {
+		struct drm_i915_gem_object *ctx_obj =
+				head_req->ctx->engine[ring->id].state;
+		if (intel_execlists_ctx_id(ctx_obj) == request_id) {
+			WARN(head_req->elsp_submitted == 0,
+			     "Never submitted head request\n");
+
+			if (--head_req->elsp_submitted <= 0) {
+				list_del(&head_req->execlist_link);
+				list_add_tail(&head_req->execlist_link,
+					&ring->execlist_retired_req_list);
+				return true;
+			}
+		}
+	}
+
+	return false;
+}
+
+/**
+ * intel_lrc_irq_handler() - handle Context Switch interrupts
+ * @ring: Engine Command Streamer to handle.
+ *
+ * Check the unread Context Status Buffers and manage the submission of new
+ * contexts to the ELSP accordingly.
+ */
+void intel_lrc_irq_handler(struct intel_engine_cs *ring)
+{
+	struct drm_i915_private *dev_priv = ring->dev->dev_private;
+	u32 status_pointer;
+	u8 read_pointer;
+	u8 write_pointer;
+	u32 status = 0;
+	u32 status_id;
+	u32 submit_contexts = 0;
+
+	status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(ring));
+
+	read_pointer = ring->next_context_status_buffer;
+	write_pointer = status_pointer & GEN8_CSB_PTR_MASK;
+	if (read_pointer > write_pointer)
+		write_pointer += GEN8_CSB_ENTRIES;
+
+	spin_lock(&ring->execlist_lock);
+
+	while (read_pointer < write_pointer) {
+		read_pointer++;
+		status = I915_READ(RING_CONTEXT_STATUS_BUF_LO(ring, read_pointer % GEN8_CSB_ENTRIES));
+		status_id = I915_READ(RING_CONTEXT_STATUS_BUF_HI(ring, read_pointer % GEN8_CSB_ENTRIES));
+
+		if (status & GEN8_CTX_STATUS_IDLE_ACTIVE)
+			continue;
+
+		if (status & GEN8_CTX_STATUS_PREEMPTED) {
+			if (status & GEN8_CTX_STATUS_LITE_RESTORE) {
+				if (execlists_check_remove_request(ring, status_id))
+					WARN(1, "Lite Restored request removed from queue\n");
+			} else
+				WARN(1, "Preemption without Lite Restore\n");
+		}
+
+		 if ((status & GEN8_CTX_STATUS_ACTIVE_IDLE) ||
+		     (status & GEN8_CTX_STATUS_ELEMENT_SWITCH)) {
+			if (execlists_check_remove_request(ring, status_id))
+				submit_contexts++;
+		}
+	}
+
+	if (disable_lite_restore_wa(ring)) {
+		/* Prevent a ctx to preempt itself */
+		if ((status & GEN8_CTX_STATUS_ACTIVE_IDLE) &&
+		    (submit_contexts != 0))
+			execlists_context_unqueue(ring);
+	} else if (submit_contexts != 0) {
+		execlists_context_unqueue(ring);
+	}
+
+	spin_unlock(&ring->execlist_lock);
+
+	WARN(submit_contexts > 2, "More than two context complete events?\n");
+	ring->next_context_status_buffer = write_pointer % GEN8_CSB_ENTRIES;
+
+	I915_WRITE(RING_CONTEXT_STATUS_PTR(ring),
+		   _MASKED_FIELD(GEN8_CSB_PTR_MASK << 8,
+				 ((u32)ring->next_context_status_buffer &
+				  GEN8_CSB_PTR_MASK) << 8));
+}
+
+static int execlists_context_queue(struct drm_i915_gem_request *request)
+{
+	struct intel_engine_cs *ring = request->ring;
+	struct drm_i915_gem_request *cursor;
+	int num_elements = 0;
+
+	if (request->ctx != ring->default_context)
+		intel_lr_context_pin(request);
+
+	i915_gem_request_reference(request);
+
+	spin_lock_irq(&ring->execlist_lock);
+
+	list_for_each_entry(cursor, &ring->execlist_queue, execlist_link)
+		if (++num_elements > 2)
+			break;
+
+	if (num_elements > 2) {
+		struct drm_i915_gem_request *tail_req;
+
+		tail_req = list_last_entry(&ring->execlist_queue,
+					   struct drm_i915_gem_request,
+					   execlist_link);
+
+		if (request->ctx == tail_req->ctx) {
+			WARN(tail_req->elsp_submitted != 0,
+				"More than 2 already-submitted reqs queued\n");
+			list_del(&tail_req->execlist_link);
+			list_add_tail(&tail_req->execlist_link,
+				&ring->execlist_retired_req_list);
+		}
+	}
+
+	list_add_tail(&request->execlist_link, &ring->execlist_queue);
+	if (num_elements == 0)
+		execlists_context_unqueue(ring);
+
+	spin_unlock_irq(&ring->execlist_lock);
+
+	return 0;
+}
+
+static int logical_ring_invalidate_all_caches(struct drm_i915_gem_request *req)
+{
+	struct intel_engine_cs *ring = req->ring;
+	uint32_t flush_domains;
+	int ret;
+
+	flush_domains = 0;
+	if (ring->gpu_caches_dirty)
+		flush_domains = I915_GEM_GPU_DOMAINS;
+
+	ret = ring->emit_flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
+	if (ret)
+		return ret;
+
+	ring->gpu_caches_dirty = false;
+	return 0;
+}
+
+static int execlists_move_to_gpu(struct drm_i915_gem_request *req,
+				 struct list_head *vmas)
+{
+	const unsigned other_rings = ~intel_ring_flag(req->ring);
+	struct i915_vma *vma;
+	uint32_t flush_domains = 0;
+	bool flush_chipset = false;
+	int ret;
+
+	list_for_each_entry(vma, vmas, exec_list) {
+		struct drm_i915_gem_object *obj = vma->obj;
+
+		if (obj->active & other_rings) {
+			ret = i915_gem_object_sync(obj, req->ring, &req);
+			if (ret)
+				return ret;
+		}
+
+		if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
+			flush_chipset |= i915_gem_clflush_object(obj, false);
+
+		flush_domains |= obj->base.write_domain;
+	}
+
+	if (flush_domains & I915_GEM_DOMAIN_GTT)
+		wmb();
+
+	/* Unconditionally invalidate gpu caches and ensure that we do flush
+	 * any residual writes from the previous batch.
+	 */
+	return logical_ring_invalidate_all_caches(req);
+}
+
+int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request)
+{
+	int ret;
+
+	request->ringbuf = request->ctx->engine[request->ring->id].ringbuf;
+
+	if (request->ctx != request->ring->default_context) {
+		ret = intel_lr_context_pin(request);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static int logical_ring_wait_for_space(struct drm_i915_gem_request *req,
+				       int bytes)
+{
+	struct intel_ringbuffer *ringbuf = req->ringbuf;
+	struct intel_engine_cs *ring = req->ring;
+	struct drm_i915_gem_request *target;
+	unsigned space;
+	int ret;
+
+	if (intel_ring_space(ringbuf) >= bytes)
+		return 0;
+
+	/* The whole point of reserving space is to not wait! */
+	WARN_ON(ringbuf->reserved_in_use);
+
+	list_for_each_entry(target, &ring->request_list, list) {
+		/*
+		 * The request queue is per-engine, so can contain requests
+		 * from multiple ringbuffers. Here, we must ignore any that
+		 * aren't from the ringbuffer we're considering.
+		 */
+		if (target->ringbuf != ringbuf)
+			continue;
+
+		/* Would completion of this request free enough space? */
+		space = __intel_ring_space(target->postfix, ringbuf->tail,
+					   ringbuf->size);
+		if (space >= bytes)
+			break;
+	}
+
+	if (WARN_ON(&target->list == &ring->request_list))
+		return -ENOSPC;
+
+	ret = i915_wait_request(target);
+	if (ret)
+		return ret;
+
+	ringbuf->space = space;
+	return 0;
+}
+
+/*
+ * intel_logical_ring_advance_and_submit() - advance the tail and submit the workload
+ * @request: Request to advance the logical ringbuffer of.
+ *
+ * The tail is updated in our logical ringbuffer struct, not in the actual context. What
+ * really happens during submission is that the context and current tail will be placed
+ * on a queue waiting for the ELSP to be ready to accept a new context submission. At that
+ * point, the tail *inside* the context is updated and the ELSP written to.
+ */
+static void
+intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
+{
+	struct intel_engine_cs *ring = request->ring;
+	struct drm_i915_private *dev_priv = request->i915;
+
+	intel_logical_ring_advance(request->ringbuf);
+
+	request->tail = request->ringbuf->tail;
+
+	if (intel_ring_stopped(ring))
+		return;
+
+	if (dev_priv->guc.execbuf_client)
+		i915_guc_submit(dev_priv->guc.execbuf_client, request);
+	else
+		execlists_context_queue(request);
+}
+
+static void __wrap_ring_buffer(struct intel_ringbuffer *ringbuf)
+{
+	uint32_t __iomem *virt;
+	int rem = ringbuf->size - ringbuf->tail;
+
+	virt = ringbuf->virtual_start + ringbuf->tail;
+	rem /= 4;
+	while (rem--)
+		iowrite32(MI_NOOP, virt++);
+
+	ringbuf->tail = 0;
+	intel_ring_update_space(ringbuf);
+}
+
+static int logical_ring_prepare(struct drm_i915_gem_request *req, int bytes)
+{
+	struct intel_ringbuffer *ringbuf = req->ringbuf;
+	int remain_usable = ringbuf->effective_size - ringbuf->tail;
+	int remain_actual = ringbuf->size - ringbuf->tail;
+	int ret, total_bytes, wait_bytes = 0;
+	bool need_wrap = false;
+
+	if (ringbuf->reserved_in_use)
+		total_bytes = bytes;
+	else
+		total_bytes = bytes + ringbuf->reserved_size;
+
+	if (unlikely(bytes > remain_usable)) {
+		/*
+		 * Not enough space for the basic request. So need to flush
+		 * out the remainder and then wait for base + reserved.
+		 */
+		wait_bytes = remain_actual + total_bytes;
+		need_wrap = true;
+	} else {
+		if (unlikely(total_bytes > remain_usable)) {
+			/*
+			 * The base request will fit but the reserved space
+			 * falls off the end. So don't need an immediate wrap
+			 * and only need to effectively wait for the reserved
+			 * size space from the start of ringbuffer.
+			 */
+			wait_bytes = remain_actual + ringbuf->reserved_size;
+		} else if (total_bytes > ringbuf->space) {
+			/* No wrapping required, just waiting. */
+			wait_bytes = total_bytes;
+		}
+	}
+
+	if (wait_bytes) {
+		ret = logical_ring_wait_for_space(req, wait_bytes);
+		if (unlikely(ret))
+			return ret;
+
+		if (need_wrap)
+			__wrap_ring_buffer(ringbuf);
+	}
+
+	return 0;
+}
+
+/**
+ * intel_logical_ring_begin() - prepare the logical ringbuffer to accept some commands
+ *
+ * @req: The request to start some new work for
+ * @num_dwords: number of DWORDs that we plan to write to the ringbuffer.
+ *
+ * The ringbuffer might not be ready to accept the commands right away (maybe it needs to
+ * be wrapped, or wait a bit for the tail to be updated). This function takes care of that
+ * and also preallocates a request (every workload submission is still mediated through
+ * requests, same as it did with legacy ringbuffer submission).
+ *
+ * Return: non-zero if the ringbuffer is not ready to be written to.
+ */
+int intel_logical_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
+{
+	struct drm_i915_private *dev_priv;
+	int ret;
+
+	WARN_ON(req == NULL);
+	dev_priv = req->ring->dev->dev_private;
+
+	ret = i915_gem_check_wedge(&dev_priv->gpu_error,
+				   dev_priv->mm.interruptible);
+	if (ret)
+		return ret;
+
+	ret = logical_ring_prepare(req, num_dwords * sizeof(uint32_t));
+	if (ret)
+		return ret;
+
+	req->ringbuf->space -= num_dwords * sizeof(uint32_t);
+	return 0;
+}
+
+int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request)
+{
+	/*
+	 * The first call merely notes the reserve request and is common for
+	 * all back ends. The subsequent localised _begin() call actually
+	 * ensures that the reservation is available. Without the begin, if
+	 * the request creator immediately submitted the request without
+	 * adding any commands to it then there might not actually be
+	 * sufficient room for the submission commands.
+	 */
+	intel_ring_reserved_space_reserve(request->ringbuf, MIN_SPACE_FOR_ADD_REQUEST);
+
+	return intel_logical_ring_begin(request, 0);
+}
+
+/**
+ * execlists_submission() - submit a batchbuffer for execution, Execlists style
+ * @dev: DRM device.
+ * @file: DRM file.
+ * @ring: Engine Command Streamer to submit to.
+ * @ctx: Context to employ for this submission.
+ * @args: execbuffer call arguments.
+ * @vmas: list of vmas.
+ * @batch_obj: the batchbuffer to submit.
+ * @exec_start: batchbuffer start virtual address pointer.
+ * @dispatch_flags: translated execbuffer call flags.
+ *
+ * This is the evil twin version of i915_gem_ringbuffer_submission. It abstracts
+ * away the submission details of the execbuffer ioctl call.
+ *
+ * Return: non-zero if the submission fails.
+ */
+int intel_execlists_submission(struct i915_execbuffer_params *params,
+			       struct drm_i915_gem_execbuffer2 *args,
+			       struct list_head *vmas)
+{
+	struct drm_device       *dev = params->dev;
+	struct intel_engine_cs  *ring = params->ring;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_ringbuffer *ringbuf = params->ctx->engine[ring->id].ringbuf;
+	u64 exec_start;
+	int instp_mode;
+	u32 instp_mask;
+	int ret;
+
+	instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
+	instp_mask = I915_EXEC_CONSTANTS_MASK;
+	switch (instp_mode) {
+	case I915_EXEC_CONSTANTS_REL_GENERAL:
+	case I915_EXEC_CONSTANTS_ABSOLUTE:
+	case I915_EXEC_CONSTANTS_REL_SURFACE:
+		if (instp_mode != 0 && ring != &dev_priv->ring[RCS]) {
+			DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
+			return -EINVAL;
+		}
+
+		if (instp_mode != dev_priv->relative_constants_mode) {
+			if (instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
+				DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
+				return -EINVAL;
+			}
+
+			/* The HW changed the meaning on this bit on gen6 */
+			instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
+		}
+		break;
+	default:
+		DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode);
+		return -EINVAL;
+	}
+
+	if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
+		DRM_DEBUG("sol reset is gen7 only\n");
+		return -EINVAL;
+	}
+
+	ret = execlists_move_to_gpu(params->request, vmas);
+	if (ret)
+		return ret;
+
+	if (ring == &dev_priv->ring[RCS] &&
+	    instp_mode != dev_priv->relative_constants_mode) {
+		ret = intel_logical_ring_begin(params->request, 4);
+		if (ret)
+			return ret;
+
+		intel_logical_ring_emit(ringbuf, MI_NOOP);
+		intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(1));
+		intel_logical_ring_emit(ringbuf, INSTPM);
+		intel_logical_ring_emit(ringbuf, instp_mask << 16 | instp_mode);
+		intel_logical_ring_advance(ringbuf);
+
+		dev_priv->relative_constants_mode = instp_mode;
+	}
+
+	exec_start = params->batch_obj_vm_offset +
+		     args->batch_start_offset;
+
+	ret = ring->emit_bb_start(params->request, exec_start, params->dispatch_flags);
+	if (ret)
+		return ret;
+
+	trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags);
+
+	i915_gem_execbuffer_move_to_active(vmas, params->request);
+	i915_gem_execbuffer_retire_commands(params);
+
+	return 0;
+}
+
+void intel_execlists_retire_requests(struct intel_engine_cs *ring)
+{
+	struct drm_i915_gem_request *req, *tmp;
+	struct list_head retired_list;
+
+	WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
+	if (list_empty(&ring->execlist_retired_req_list))
+		return;
+
+	INIT_LIST_HEAD(&retired_list);
+	spin_lock_irq(&ring->execlist_lock);
+	list_replace_init(&ring->execlist_retired_req_list, &retired_list);
+	spin_unlock_irq(&ring->execlist_lock);
+
+	list_for_each_entry_safe(req, tmp, &retired_list, execlist_link) {
+		struct intel_context *ctx = req->ctx;
+		struct drm_i915_gem_object *ctx_obj =
+				ctx->engine[ring->id].state;
+
+		if (ctx_obj && (ctx != ring->default_context))
+			intel_lr_context_unpin(req);
+		list_del(&req->execlist_link);
+		i915_gem_request_unreference(req);
+	}
+}
+
+void intel_logical_ring_stop(struct intel_engine_cs *ring)
+{
+	struct drm_i915_private *dev_priv = ring->dev->dev_private;
+	int ret;
+
+	if (!intel_ring_initialized(ring))
+		return;
+
+	ret = intel_ring_idle(ring);
+	if (ret && !i915_reset_in_progress(&to_i915(ring->dev)->gpu_error))
+		DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
+			  ring->name, ret);
+
+	/* TODO: Is this correct with Execlists enabled? */
+	I915_WRITE_MODE(ring, _MASKED_BIT_ENABLE(STOP_RING));
+	if (wait_for_atomic((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) {
+		DRM_ERROR("%s :timed out trying to stop ring\n", ring->name);
+		return;
+	}
+	I915_WRITE_MODE(ring, _MASKED_BIT_DISABLE(STOP_RING));
+}
+
+int logical_ring_flush_all_caches(struct drm_i915_gem_request *req)
+{
+	struct intel_engine_cs *ring = req->ring;
+	int ret;
+
+	if (!ring->gpu_caches_dirty)
+		return 0;
+
+	ret = ring->emit_flush(req, 0, I915_GEM_GPU_DOMAINS);
+	if (ret)
+		return ret;
+
+	ring->gpu_caches_dirty = false;
+	return 0;
+}
+
+static int intel_lr_context_do_pin(struct intel_engine_cs *ring,
+		struct drm_i915_gem_object *ctx_obj,
+		struct intel_ringbuffer *ringbuf)
+{
+	struct drm_device *dev = ring->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret = 0;
+
+	WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
+	ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN,
+			PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
+	if (ret)
+		return ret;
+
+	ret = intel_pin_and_map_ringbuffer_obj(ring->dev, ringbuf);
+	if (ret)
+		goto unpin_ctx_obj;
+
+	ctx_obj->dirty = true;
+
+	/* Invalidate GuC TLB. */
+	if (i915.enable_guc_submission)
+		I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
+
+	return ret;
+
+unpin_ctx_obj:
+	i915_gem_object_ggtt_unpin(ctx_obj);
+
+	return ret;
+}
+
+static int intel_lr_context_pin(struct drm_i915_gem_request *rq)
+{
+	int ret = 0;
+	struct intel_engine_cs *ring = rq->ring;
+	struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state;
+	struct intel_ringbuffer *ringbuf = rq->ringbuf;
+
+	if (rq->ctx->engine[ring->id].pin_count++ == 0) {
+		ret = intel_lr_context_do_pin(ring, ctx_obj, ringbuf);
+		if (ret)
+			goto reset_pin_count;
+	}
+	return ret;
+
+reset_pin_count:
+	rq->ctx->engine[ring->id].pin_count = 0;
+	return ret;
+}
+
+void intel_lr_context_unpin(struct drm_i915_gem_request *rq)
+{
+	struct intel_engine_cs *ring = rq->ring;
+	struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state;
+	struct intel_ringbuffer *ringbuf = rq->ringbuf;
+
+	if (ctx_obj) {
+		WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
+		if (--rq->ctx->engine[ring->id].pin_count == 0) {
+			intel_unpin_ringbuffer_obj(ringbuf);
+			i915_gem_object_ggtt_unpin(ctx_obj);
+		}
+	}
+}
+
+static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
+{
+	int ret, i;
+	struct intel_engine_cs *ring = req->ring;
+	struct intel_ringbuffer *ringbuf = req->ringbuf;
+	struct drm_device *dev = ring->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct i915_workarounds *w = &dev_priv->workarounds;
+
+	if (WARN_ON_ONCE(w->count == 0))
+		return 0;
+
+	ring->gpu_caches_dirty = true;
+	ret = logical_ring_flush_all_caches(req);
+	if (ret)
+		return ret;
+
+	ret = intel_logical_ring_begin(req, w->count * 2 + 2);
+	if (ret)
+		return ret;
+
+	intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(w->count));
+	for (i = 0; i < w->count; i++) {
+		intel_logical_ring_emit(ringbuf, w->reg[i].addr);
+		intel_logical_ring_emit(ringbuf, w->reg[i].value);
+	}
+	intel_logical_ring_emit(ringbuf, MI_NOOP);
+
+	intel_logical_ring_advance(ringbuf);
+
+	ring->gpu_caches_dirty = true;
+	ret = logical_ring_flush_all_caches(req);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+#define wa_ctx_emit(batch, index, cmd)					\
+	do {								\
+		int __index = (index)++;				\
+		if (WARN_ON(__index >= (PAGE_SIZE / sizeof(uint32_t)))) { \
+			return -ENOSPC;					\
+		}							\
+		batch[__index] = (cmd);					\
+	} while (0)
+
+
+/*
+ * In this WA we need to set GEN8_L3SQCREG4[21:21] and reset it after
+ * PIPE_CONTROL instruction. This is required for the flush to happen correctly
+ * but there is a slight complication as this is applied in WA batch where the
+ * values are only initialized once so we cannot take register value at the
+ * beginning and reuse it further; hence we save its value to memory, upload a
+ * constant value with bit21 set and then we restore it back with the saved value.
+ * To simplify the WA, a constant value is formed by using the default value
+ * of this register. This shouldn't be a problem because we are only modifying
+ * it for a short period and this batch in non-premptible. We can ofcourse
+ * use additional instructions that read the actual value of the register
+ * at that time and set our bit of interest but it makes the WA complicated.
+ *
+ * This WA is also required for Gen9 so extracting as a function avoids
+ * code duplication.
+ */
+static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *ring,
+						uint32_t *const batch,
+						uint32_t index)
+{
+	uint32_t l3sqc4_flush = (0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES);
+
+	/*
+	 * WaDisableLSQCROPERFforOCL:skl
+	 * This WA is implemented in skl_init_clock_gating() but since
+	 * this batch updates GEN8_L3SQCREG4 with default value we need to
+	 * set this bit here to retain the WA during flush.
+	 */
+	if (IS_SKYLAKE(ring->dev) && INTEL_REVID(ring->dev) <= SKL_REVID_E0)
+		l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS;
+
+	wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 |
+				   MI_SRM_LRM_GLOBAL_GTT));
+	wa_ctx_emit(batch, index, GEN8_L3SQCREG4);
+	wa_ctx_emit(batch, index, ring->scratch.gtt_offset + 256);
+	wa_ctx_emit(batch, index, 0);
+
+	wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
+	wa_ctx_emit(batch, index, GEN8_L3SQCREG4);
+	wa_ctx_emit(batch, index, l3sqc4_flush);
+
+	wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
+	wa_ctx_emit(batch, index, (PIPE_CONTROL_CS_STALL |
+				   PIPE_CONTROL_DC_FLUSH_ENABLE));
+	wa_ctx_emit(batch, index, 0);
+	wa_ctx_emit(batch, index, 0);
+	wa_ctx_emit(batch, index, 0);
+	wa_ctx_emit(batch, index, 0);
+
+	wa_ctx_emit(batch, index, (MI_LOAD_REGISTER_MEM_GEN8 |
+				   MI_SRM_LRM_GLOBAL_GTT));
+	wa_ctx_emit(batch, index, GEN8_L3SQCREG4);
+	wa_ctx_emit(batch, index, ring->scratch.gtt_offset + 256);
+	wa_ctx_emit(batch, index, 0);
+
+	return index;
+}
+
+static inline uint32_t wa_ctx_start(struct i915_wa_ctx_bb *wa_ctx,
+				    uint32_t offset,
+				    uint32_t start_alignment)
+{
+	return wa_ctx->offset = ALIGN(offset, start_alignment);
+}
+
+static inline int wa_ctx_end(struct i915_wa_ctx_bb *wa_ctx,
+			     uint32_t offset,
+			     uint32_t size_alignment)
+{
+	wa_ctx->size = offset - wa_ctx->offset;
+
+	WARN(wa_ctx->size % size_alignment,
+	     "wa_ctx_bb failed sanity checks: size %d is not aligned to %d\n",
+	     wa_ctx->size, size_alignment);
+	return 0;
+}
+
+/**
+ * gen8_init_indirectctx_bb() - initialize indirect ctx batch with WA
+ *
+ * @ring: only applicable for RCS
+ * @wa_ctx: structure representing wa_ctx
+ *  offset: specifies start of the batch, should be cache-aligned. This is updated
+ *    with the offset value received as input.
+ *  size: size of the batch in DWORDS but HW expects in terms of cachelines
+ * @batch: page in which WA are loaded
+ * @offset: This field specifies the start of the batch, it should be
+ *  cache-aligned otherwise it is adjusted accordingly.
+ *  Typically we only have one indirect_ctx and per_ctx batch buffer which are
+ *  initialized at the beginning and shared across all contexts but this field
+ *  helps us to have multiple batches at different offsets and select them based
+ *  on a criteria. At the moment this batch always start at the beginning of the page
+ *  and at this point we don't have multiple wa_ctx batch buffers.
+ *
+ *  The number of WA applied are not known at the beginning; we use this field
+ *  to return the no of DWORDS written.
+ *
+ *  It is to be noted that this batch does not contain MI_BATCH_BUFFER_END
+ *  so it adds NOOPs as padding to make it cacheline aligned.
+ *  MI_BATCH_BUFFER_END will be added to perctx batch and both of them together
+ *  makes a complete batch buffer.
+ *
+ * Return: non-zero if we exceed the PAGE_SIZE limit.
+ */
+
+static int gen8_init_indirectctx_bb(struct intel_engine_cs *ring,
+				    struct i915_wa_ctx_bb *wa_ctx,
+				    uint32_t *const batch,
+				    uint32_t *offset)
+{
+	uint32_t scratch_addr;
+	uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
+
+	/* WaDisableCtxRestoreArbitration:bdw,chv */
+	wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
+
+	/* WaFlushCoherentL3CacheLinesAtContextSwitch:bdw */
+	if (IS_BROADWELL(ring->dev)) {
+		int rc = gen8_emit_flush_coherentl3_wa(ring, batch, index);
+		if (rc < 0)
+			return rc;
+		index = rc;
+	}
+
+	/* WaClearSlmSpaceAtContextSwitch:bdw,chv */
+	/* Actual scratch location is at 128 bytes offset */
+	scratch_addr = ring->scratch.gtt_offset + 2*CACHELINE_BYTES;
+
+	wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
+	wa_ctx_emit(batch, index, (PIPE_CONTROL_FLUSH_L3 |
+				   PIPE_CONTROL_GLOBAL_GTT_IVB |
+				   PIPE_CONTROL_CS_STALL |
+				   PIPE_CONTROL_QW_WRITE));
+	wa_ctx_emit(batch, index, scratch_addr);
+	wa_ctx_emit(batch, index, 0);
+	wa_ctx_emit(batch, index, 0);
+	wa_ctx_emit(batch, index, 0);
+
+	/* Pad to end of cacheline */
+	while (index % CACHELINE_DWORDS)
+		wa_ctx_emit(batch, index, MI_NOOP);
+
+	/*
+	 * MI_BATCH_BUFFER_END is not required in Indirect ctx BB because
+	 * execution depends on the length specified in terms of cache lines
+	 * in the register CTX_RCS_INDIRECT_CTX
+	 */
+
+	return wa_ctx_end(wa_ctx, *offset = index, CACHELINE_DWORDS);
+}
+
+/**
+ * gen8_init_perctx_bb() - initialize per ctx batch with WA
+ *
+ * @ring: only applicable for RCS
+ * @wa_ctx: structure representing wa_ctx
+ *  offset: specifies start of the batch, should be cache-aligned.
+ *  size: size of the batch in DWORDS but HW expects in terms of cachelines
+ * @batch: page in which WA are loaded
+ * @offset: This field specifies the start of this batch.
+ *   This batch is started immediately after indirect_ctx batch. Since we ensure
+ *   that indirect_ctx ends on a cacheline this batch is aligned automatically.
+ *
+ *   The number of DWORDS written are returned using this field.
+ *
+ *  This batch is terminated with MI_BATCH_BUFFER_END and so we need not add padding
+ *  to align it with cacheline as padding after MI_BATCH_BUFFER_END is redundant.
+ */
+static int gen8_init_perctx_bb(struct intel_engine_cs *ring,
+			       struct i915_wa_ctx_bb *wa_ctx,
+			       uint32_t *const batch,
+			       uint32_t *offset)
+{
+	uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
+
+	/* WaDisableCtxRestoreArbitration:bdw,chv */
+	wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE);
+
+	wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END);
+
+	return wa_ctx_end(wa_ctx, *offset = index, 1);
+}
+
+static int gen9_init_indirectctx_bb(struct intel_engine_cs *ring,
+				    struct i915_wa_ctx_bb *wa_ctx,
+				    uint32_t *const batch,
+				    uint32_t *offset)
+{
+	int ret;
+	struct drm_device *dev = ring->dev;
+	uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
+
+	/* WaDisableCtxRestoreArbitration:skl,bxt */
+	if ((IS_SKYLAKE(dev) && (INTEL_REVID(dev) <= SKL_REVID_D0)) ||
+	    (IS_BROXTON(dev) && (INTEL_REVID(dev) == BXT_REVID_A0)))
+		wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
+
+	/* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt */
+	ret = gen8_emit_flush_coherentl3_wa(ring, batch, index);
+	if (ret < 0)
+		return ret;
+	index = ret;
+
+	/* Pad to end of cacheline */
+	while (index % CACHELINE_DWORDS)
+		wa_ctx_emit(batch, index, MI_NOOP);
+
+	return wa_ctx_end(wa_ctx, *offset = index, CACHELINE_DWORDS);
+}
+
+static int gen9_init_perctx_bb(struct intel_engine_cs *ring,
+			       struct i915_wa_ctx_bb *wa_ctx,
+			       uint32_t *const batch,
+			       uint32_t *offset)
+{
+	struct drm_device *dev = ring->dev;
+	uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
+
+	/* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
+	if ((IS_SKYLAKE(dev) && (INTEL_REVID(dev) <= SKL_REVID_B0)) ||
+	    (IS_BROXTON(dev) && (INTEL_REVID(dev) == BXT_REVID_A0))) {
+		wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
+		wa_ctx_emit(batch, index, GEN9_SLICE_COMMON_ECO_CHICKEN0);
+		wa_ctx_emit(batch, index,
+			    _MASKED_BIT_ENABLE(DISABLE_PIXEL_MASK_CAMMING));
+		wa_ctx_emit(batch, index, MI_NOOP);
+	}
+
+	/* WaDisableCtxRestoreArbitration:skl,bxt */
+	if ((IS_SKYLAKE(dev) && (INTEL_REVID(dev) <= SKL_REVID_D0)) ||
+	    (IS_BROXTON(dev) && (INTEL_REVID(dev) == BXT_REVID_A0)))
+		wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE);
+
+	wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END);
+
+	return wa_ctx_end(wa_ctx, *offset = index, 1);
+}
+
+static int lrc_setup_wa_ctx_obj(struct intel_engine_cs *ring, u32 size)
+{
+	int ret;
+
+	ring->wa_ctx.obj = i915_gem_alloc_object(ring->dev, PAGE_ALIGN(size));
+	if (!ring->wa_ctx.obj) {
+		DRM_DEBUG_DRIVER("alloc LRC WA ctx backing obj failed.\n");
+		return -ENOMEM;
+	}
+
+	ret = i915_gem_obj_ggtt_pin(ring->wa_ctx.obj, PAGE_SIZE, 0);
+	if (ret) {
+		DRM_DEBUG_DRIVER("pin LRC WA ctx backing obj failed: %d\n",
+				 ret);
+		drm_gem_object_unreference(&ring->wa_ctx.obj->base);
+		return ret;
+	}
+
+	return 0;
+}
+
+static void lrc_destroy_wa_ctx_obj(struct intel_engine_cs *ring)
+{
+	if (ring->wa_ctx.obj) {
+		i915_gem_object_ggtt_unpin(ring->wa_ctx.obj);
+		drm_gem_object_unreference(&ring->wa_ctx.obj->base);
+		ring->wa_ctx.obj = NULL;
+	}
+}
+
+static int intel_init_workaround_bb(struct intel_engine_cs *ring)
+{
+	int ret;
+	uint32_t *batch;
+	uint32_t offset;
+	struct page *page;
+	struct i915_ctx_workarounds *wa_ctx = &ring->wa_ctx;
+
+	WARN_ON(ring->id != RCS);
+
+	/* update this when WA for higher Gen are added */
+	if (INTEL_INFO(ring->dev)->gen > 9) {
+		DRM_ERROR("WA batch buffer is not initialized for Gen%d\n",
+			  INTEL_INFO(ring->dev)->gen);
+		return 0;
+	}
+
+	/* some WA perform writes to scratch page, ensure it is valid */
+	if (ring->scratch.obj == NULL) {
+		DRM_ERROR("scratch page not allocated for %s\n", ring->name);
+		return -EINVAL;
+	}
+
+	ret = lrc_setup_wa_ctx_obj(ring, PAGE_SIZE);
+	if (ret) {
+		DRM_DEBUG_DRIVER("Failed to setup context WA page: %d\n", ret);
+		return ret;
+	}
+
+	page = i915_gem_object_get_page(wa_ctx->obj, 0);
+	batch = kmap_atomic(page);
+	offset = 0;
+
+	if (INTEL_INFO(ring->dev)->gen == 8) {
+		ret = gen8_init_indirectctx_bb(ring,
+					       &wa_ctx->indirect_ctx,
+					       batch,
+					       &offset);
+		if (ret)
+			goto out;
+
+		ret = gen8_init_perctx_bb(ring,
+					  &wa_ctx->per_ctx,
+					  batch,
+					  &offset);
+		if (ret)
+			goto out;
+	} else if (INTEL_INFO(ring->dev)->gen == 9) {
+		ret = gen9_init_indirectctx_bb(ring,
+					       &wa_ctx->indirect_ctx,
+					       batch,
+					       &offset);
+		if (ret)
+			goto out;
+
+		ret = gen9_init_perctx_bb(ring,
+					  &wa_ctx->per_ctx,
+					  batch,
+					  &offset);
+		if (ret)
+			goto out;
+	}
+
+out:
+	kunmap_atomic(batch);
+	if (ret)
+		lrc_destroy_wa_ctx_obj(ring);
+
+	return ret;
+}
+
+static int gen8_init_common_ring(struct intel_engine_cs *ring)
+{
+	struct drm_device *dev = ring->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u8 next_context_status_buffer_hw;
+
+	lrc_setup_hardware_status_page(ring,
+				ring->default_context->engine[ring->id].state);
+
+	I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask));
+	I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff);
+
+	if (ring->status_page.obj) {
+		I915_WRITE(RING_HWS_PGA(ring->mmio_base),
+			   (u32)ring->status_page.gfx_addr);
+		POSTING_READ(RING_HWS_PGA(ring->mmio_base));
+	}
+
+	I915_WRITE(RING_MODE_GEN7(ring),
+		   _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) |
+		   _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
+	POSTING_READ(RING_MODE_GEN7(ring));
+
+	/*
+	 * Instead of resetting the Context Status Buffer (CSB) read pointer to
+	 * zero, we need to read the write pointer from hardware and use its
+	 * value because "this register is power context save restored".
+	 * Effectively, these states have been observed:
+	 *
+	 *      | Suspend-to-idle (freeze) | Suspend-to-RAM (mem) |
+	 * BDW  | CSB regs not reset       | CSB regs reset       |
+	 * CHT  | CSB regs not reset       | CSB regs not reset   |
+	 */
+	next_context_status_buffer_hw = (I915_READ(RING_CONTEXT_STATUS_PTR(ring))
+						   & GEN8_CSB_PTR_MASK);
+
+	/*
+	 * When the CSB registers are reset (also after power-up / gpu reset),
+	 * CSB write pointer is set to all 1's, which is not valid, use '5' in
+	 * this special case, so the first element read is CSB[0].
+	 */
+	if (next_context_status_buffer_hw == GEN8_CSB_PTR_MASK)
+		next_context_status_buffer_hw = (GEN8_CSB_ENTRIES - 1);
+
+	ring->next_context_status_buffer = next_context_status_buffer_hw;
+	DRM_DEBUG_DRIVER("Execlists enabled for %s\n", ring->name);
+
+	memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
+
+	return 0;
+}
+
+static int gen8_init_render_ring(struct intel_engine_cs *ring)
+{
+	struct drm_device *dev = ring->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret;
+
+	ret = gen8_init_common_ring(ring);
+	if (ret)
+		return ret;
+
+	/* We need to disable the AsyncFlip performance optimisations in order
+	 * to use MI_WAIT_FOR_EVENT within the CS. It should already be
+	 * programmed to '1' on all products.
+	 *
+	 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv
+	 */
+	I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
+
+	I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
+
+	return init_workarounds_ring(ring);
+}
+
+static int gen9_init_render_ring(struct intel_engine_cs *ring)
+{
+	int ret;
+
+	ret = gen8_init_common_ring(ring);
+	if (ret)
+		return ret;
+
+	return init_workarounds_ring(ring);
+}
+
+static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
+{
+	struct i915_hw_ppgtt *ppgtt = req->ctx->ppgtt;
+	struct intel_engine_cs *ring = req->ring;
+	struct intel_ringbuffer *ringbuf = req->ringbuf;
+	const int num_lri_cmds = GEN8_LEGACY_PDPES * 2;
+	int i, ret;
+
+	ret = intel_logical_ring_begin(req, num_lri_cmds * 2 + 2);
+	if (ret)
+		return ret;
+
+	intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(num_lri_cmds));
+	for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) {
+		const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
+
+		intel_logical_ring_emit(ringbuf, GEN8_RING_PDP_UDW(ring, i));
+		intel_logical_ring_emit(ringbuf, upper_32_bits(pd_daddr));
+		intel_logical_ring_emit(ringbuf, GEN8_RING_PDP_LDW(ring, i));
+		intel_logical_ring_emit(ringbuf, lower_32_bits(pd_daddr));
+	}
+
+	intel_logical_ring_emit(ringbuf, MI_NOOP);
+	intel_logical_ring_advance(ringbuf);
+
+	return 0;
+}
+
+static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
+			      u64 offset, unsigned dispatch_flags)
+{
+	struct intel_ringbuffer *ringbuf = req->ringbuf;
+	bool ppgtt = !(dispatch_flags & I915_DISPATCH_SECURE);
+	int ret;
+
+	/* Don't rely in hw updating PDPs, specially in lite-restore.
+	 * Ideally, we should set Force PD Restore in ctx descriptor,
+	 * but we can't. Force Restore would be a second option, but
+	 * it is unsafe in case of lite-restore (because the ctx is
+	 * not idle). PML4 is allocated during ppgtt init so this is
+	 * not needed in 48-bit.*/
+	if (req->ctx->ppgtt &&
+	    (intel_ring_flag(req->ring) & req->ctx->ppgtt->pd_dirty_rings)) {
+		if (!USES_FULL_48BIT_PPGTT(req->i915) &&
+		    !intel_vgpu_active(req->i915->dev)) {
+			ret = intel_logical_ring_emit_pdps(req);
+			if (ret)
+				return ret;
+		}
+
+		req->ctx->ppgtt->pd_dirty_rings &= ~intel_ring_flag(req->ring);
+	}
+
+	ret = intel_logical_ring_begin(req, 4);
+	if (ret)
+		return ret;
+
+	/* FIXME(BDW): Address space and security selectors. */
+	intel_logical_ring_emit(ringbuf, MI_BATCH_BUFFER_START_GEN8 |
+				(ppgtt<<8) |
+				(dispatch_flags & I915_DISPATCH_RS ?
+				 MI_BATCH_RESOURCE_STREAMER : 0));
+	intel_logical_ring_emit(ringbuf, lower_32_bits(offset));
+	intel_logical_ring_emit(ringbuf, upper_32_bits(offset));
+	intel_logical_ring_emit(ringbuf, MI_NOOP);
+	intel_logical_ring_advance(ringbuf);
+
+	return 0;
+}
+
+static bool gen8_logical_ring_get_irq(struct intel_engine_cs *ring)
+{
+	struct drm_device *dev = ring->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	unsigned long flags;
+
+	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
+		return false;
+
+	spin_lock_irqsave(&dev_priv->irq_lock, flags);
+	if (ring->irq_refcount++ == 0) {
+		I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask));
+		POSTING_READ(RING_IMR(ring->mmio_base));
+	}
+	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+
+	return true;
+}
+
+static void gen8_logical_ring_put_irq(struct intel_engine_cs *ring)
+{
+	struct drm_device *dev = ring->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev_priv->irq_lock, flags);
+	if (--ring->irq_refcount == 0) {
+		I915_WRITE_IMR(ring, ~ring->irq_keep_mask);
+		POSTING_READ(RING_IMR(ring->mmio_base));
+	}
+	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+}
+
+static int gen8_emit_flush(struct drm_i915_gem_request *request,
+			   u32 invalidate_domains,
+			   u32 unused)
+{
+	struct intel_ringbuffer *ringbuf = request->ringbuf;
+	struct intel_engine_cs *ring = ringbuf->ring;
+	struct drm_device *dev = ring->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t cmd;
+	int ret;
+
+	ret = intel_logical_ring_begin(request, 4);
+	if (ret)
+		return ret;
+
+	cmd = MI_FLUSH_DW + 1;
+
+	/* We always require a command barrier so that subsequent
+	 * commands, such as breadcrumb interrupts, are strictly ordered
+	 * wrt the contents of the write cache being flushed to memory
+	 * (and thus being coherent from the CPU).
+	 */
+	cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
+
+	if (invalidate_domains & I915_GEM_GPU_DOMAINS) {
+		cmd |= MI_INVALIDATE_TLB;
+		if (ring == &dev_priv->ring[VCS])
+			cmd |= MI_INVALIDATE_BSD;
+	}
+
+	intel_logical_ring_emit(ringbuf, cmd);
+	intel_logical_ring_emit(ringbuf,
+				I915_GEM_HWS_SCRATCH_ADDR |
+				MI_FLUSH_DW_USE_GTT);
+	intel_logical_ring_emit(ringbuf, 0); /* upper addr */
+	intel_logical_ring_emit(ringbuf, 0); /* value */
+	intel_logical_ring_advance(ringbuf);
+
+	return 0;
+}
+
+static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
+				  u32 invalidate_domains,
+				  u32 flush_domains)
+{
+	struct intel_ringbuffer *ringbuf = request->ringbuf;
+	struct intel_engine_cs *ring = ringbuf->ring;
+	u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
+	bool vf_flush_wa;
+	u32 flags = 0;
+	int ret;
+
+	flags |= PIPE_CONTROL_CS_STALL;
+
+	if (flush_domains) {
+		flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
+		flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
+		flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
+		flags |= PIPE_CONTROL_FLUSH_ENABLE;
+	}
+
+	if (invalidate_domains) {
+		flags |= PIPE_CONTROL_TLB_INVALIDATE;
+		flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
+		flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
+		flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
+		flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
+		flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
+		flags |= PIPE_CONTROL_QW_WRITE;
+		flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
+	}
+
+	/*
+	 * On GEN9+ Before VF_CACHE_INVALIDATE we need to emit a NULL pipe
+	 * control.
+	 */
+	vf_flush_wa = INTEL_INFO(ring->dev)->gen >= 9 &&
+		      flags & PIPE_CONTROL_VF_CACHE_INVALIDATE;
+
+	ret = intel_logical_ring_begin(request, vf_flush_wa ? 12 : 6);
+	if (ret)
+		return ret;
+
+	if (vf_flush_wa) {
+		intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
+		intel_logical_ring_emit(ringbuf, 0);
+		intel_logical_ring_emit(ringbuf, 0);
+		intel_logical_ring_emit(ringbuf, 0);
+		intel_logical_ring_emit(ringbuf, 0);
+		intel_logical_ring_emit(ringbuf, 0);
+	}
+
+	intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
+	intel_logical_ring_emit(ringbuf, flags);
+	intel_logical_ring_emit(ringbuf, scratch_addr);
+	intel_logical_ring_emit(ringbuf, 0);
+	intel_logical_ring_emit(ringbuf, 0);
+	intel_logical_ring_emit(ringbuf, 0);
+	intel_logical_ring_advance(ringbuf);
+
+	return 0;
+}
+
+static u32 gen8_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
+{
+	return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
+}
+
+static void gen8_set_seqno(struct intel_engine_cs *ring, u32 seqno)
+{
+	intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
+}
+
+static u32 bxt_a_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
+{
+
+	/*
+	 * On BXT A steppings there is a HW coherency issue whereby the
+	 * MI_STORE_DATA_IMM storing the completed request's seqno
+	 * occasionally doesn't invalidate the CPU cache. Work around this by
+	 * clflushing the corresponding cacheline whenever the caller wants
+	 * the coherency to be guaranteed. Note that this cacheline is known
+	 * to be clean at this point, since we only write it in
+	 * bxt_a_set_seqno(), where we also do a clflush after the write. So
+	 * this clflush in practice becomes an invalidate operation.
+	 */
+
+	if (!lazy_coherency)
+		intel_flush_status_page(ring, I915_GEM_HWS_INDEX);
+
+	return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
+}
+
+static void bxt_a_set_seqno(struct intel_engine_cs *ring, u32 seqno)
+{
+	intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
+
+	/* See bxt_a_get_seqno() explaining the reason for the clflush. */
+	intel_flush_status_page(ring, I915_GEM_HWS_INDEX);
+}
+
+static int gen8_emit_request(struct drm_i915_gem_request *request)
+{
+	struct intel_ringbuffer *ringbuf = request->ringbuf;
+	struct intel_engine_cs *ring = ringbuf->ring;
+	u32 cmd;
+	int ret;
+
+	/*
+	 * Reserve space for 2 NOOPs at the end of each request to be
+	 * used as a workaround for not being allowed to do lite
+	 * restore with HEAD==TAIL (WaIdleLiteRestore).
+	 */
+	ret = intel_logical_ring_begin(request, 8);
+	if (ret)
+		return ret;
+
+	cmd = MI_STORE_DWORD_IMM_GEN4;
+	cmd |= MI_GLOBAL_GTT;
+
+	intel_logical_ring_emit(ringbuf, cmd);
+	intel_logical_ring_emit(ringbuf,
+				(ring->status_page.gfx_addr +
+				(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)));
+	intel_logical_ring_emit(ringbuf, 0);
+	intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
+	intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
+	intel_logical_ring_emit(ringbuf, MI_NOOP);
+	intel_logical_ring_advance_and_submit(request);
+
+	/*
+	 * Here we add two extra NOOPs as padding to avoid
+	 * lite restore of a context with HEAD==TAIL.
+	 */
+	intel_logical_ring_emit(ringbuf, MI_NOOP);
+	intel_logical_ring_emit(ringbuf, MI_NOOP);
+	intel_logical_ring_advance(ringbuf);
+
+	return 0;
+}
+
+static int intel_lr_context_render_state_init(struct drm_i915_gem_request *req)
+{
+	struct render_state so;
+	int ret;
+
+	ret = i915_gem_render_state_prepare(req->ring, &so);
+	if (ret)
+		return ret;
+
+	if (so.rodata == NULL)
+		return 0;
+
+	ret = req->ring->emit_bb_start(req, so.ggtt_offset,
+				       I915_DISPATCH_SECURE);
+	if (ret)
+		goto out;
+
+	ret = req->ring->emit_bb_start(req,
+				       (so.ggtt_offset + so.aux_batch_offset),
+				       I915_DISPATCH_SECURE);
+	if (ret)
+		goto out;
+
+	i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), req);
+
+out:
+	i915_gem_render_state_fini(&so);
+	return ret;
+}
+
+static int gen8_init_rcs_context(struct drm_i915_gem_request *req)
+{
+	int ret;
+
+	ret = intel_logical_ring_workarounds_emit(req);
+	if (ret)
+		return ret;
+
+	ret = intel_rcs_context_init_mocs(req);
+	/*
+	 * Failing to program the MOCS is non-fatal.The system will not
+	 * run at peak performance. So generate an error and carry on.
+	 */
+	if (ret)
+		DRM_ERROR("MOCS failed to program: expect performance issues.\n");
+
+	return intel_lr_context_render_state_init(req);
+}
+
+/**
+ * intel_logical_ring_cleanup() - deallocate the Engine Command Streamer
+ *
+ * @ring: Engine Command Streamer.
+ *
+ */
+void intel_logical_ring_cleanup(struct intel_engine_cs *ring)
+{
+	struct drm_i915_private *dev_priv;
+
+	if (!intel_ring_initialized(ring))
+		return;
+
+	dev_priv = ring->dev->dev_private;
+
+	intel_logical_ring_stop(ring);
+	WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
+
+	if (ring->cleanup)
+		ring->cleanup(ring);
+
+	i915_cmd_parser_fini_ring(ring);
+	i915_gem_batch_pool_fini(&ring->batch_pool);
+
+	if (ring->status_page.obj) {
+		kunmap(sg_page(ring->status_page.obj->pages->sgl));
+		ring->status_page.obj = NULL;
+	}
+
+	lrc_destroy_wa_ctx_obj(ring);
+}
+
+static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *ring)
+{
+	int ret;
+
+	/* Intentionally left blank. */
+	ring->buffer = NULL;
+
+	ring->dev = dev;
+	INIT_LIST_HEAD(&ring->active_list);
+	INIT_LIST_HEAD(&ring->request_list);
+	i915_gem_batch_pool_init(dev, &ring->batch_pool);
+	init_waitqueue_head(&ring->irq_queue);
+
+	INIT_LIST_HEAD(&ring->execlist_queue);
+	INIT_LIST_HEAD(&ring->execlist_retired_req_list);
+	spin_lock_init(&ring->execlist_lock);
+
+	ret = i915_cmd_parser_init_ring(ring);
+	if (ret)
+		return ret;
+
+	ret = intel_lr_context_deferred_alloc(ring->default_context, ring);
+	if (ret)
+		return ret;
+
+	/* As this is the default context, always pin it */
+	ret = intel_lr_context_do_pin(
+			ring,
+			ring->default_context->engine[ring->id].state,
+			ring->default_context->engine[ring->id].ringbuf);
+	if (ret) {
+		DRM_ERROR(
+			"Failed to pin and map ringbuffer %s: %d\n",
+			ring->name, ret);
+		return ret;
+	}
+
+	return ret;
+}
+
+static int logical_render_ring_init(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_engine_cs *ring = &dev_priv->ring[RCS];
+	int ret;
+
+	ring->name = "render ring";
+	ring->id = RCS;
+	ring->mmio_base = RENDER_RING_BASE;
+	ring->irq_enable_mask =
+		GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT;
+	ring->irq_keep_mask =
+		GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT;
+	if (HAS_L3_DPF(dev))
+		ring->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
+
+	if (INTEL_INFO(dev)->gen >= 9)
+		ring->init_hw = gen9_init_render_ring;
+	else
+		ring->init_hw = gen8_init_render_ring;
+	ring->init_context = gen8_init_rcs_context;
+	ring->cleanup = intel_fini_pipe_control;
+	if (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0) {
+		ring->get_seqno = bxt_a_get_seqno;
+		ring->set_seqno = bxt_a_set_seqno;
+	} else {
+		ring->get_seqno = gen8_get_seqno;
+		ring->set_seqno = gen8_set_seqno;
+	}
+	ring->emit_request = gen8_emit_request;
+	ring->emit_flush = gen8_emit_flush_render;
+	ring->irq_get = gen8_logical_ring_get_irq;
+	ring->irq_put = gen8_logical_ring_put_irq;
+	ring->emit_bb_start = gen8_emit_bb_start;
+
+	ring->dev = dev;
+
+	ret = intel_init_pipe_control(ring);
+	if (ret)
+		return ret;
+
+	ret = intel_init_workaround_bb(ring);
+	if (ret) {
+		/*
+		 * We continue even if we fail to initialize WA batch
+		 * because we only expect rare glitches but nothing
+		 * critical to prevent us from using GPU
+		 */
+		DRM_ERROR("WA batch buffer initialization failed: %d\n",
+			  ret);
+	}
+
+	ret = logical_ring_init(dev, ring);
+	if (ret) {
+		lrc_destroy_wa_ctx_obj(ring);
+	}
+
+	return ret;
+}
+
+static int logical_bsd_ring_init(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_engine_cs *ring = &dev_priv->ring[VCS];
+
+	ring->name = "bsd ring";
+	ring->id = VCS;
+	ring->mmio_base = GEN6_BSD_RING_BASE;
+	ring->irq_enable_mask =
+		GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
+	ring->irq_keep_mask =
+		GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
+
+	ring->init_hw = gen8_init_common_ring;
+	if (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0) {
+		ring->get_seqno = bxt_a_get_seqno;
+		ring->set_seqno = bxt_a_set_seqno;
+	} else {
+		ring->get_seqno = gen8_get_seqno;
+		ring->set_seqno = gen8_set_seqno;
+	}
+	ring->emit_request = gen8_emit_request;
+	ring->emit_flush = gen8_emit_flush;
+	ring->irq_get = gen8_logical_ring_get_irq;
+	ring->irq_put = gen8_logical_ring_put_irq;
+	ring->emit_bb_start = gen8_emit_bb_start;
+
+	return logical_ring_init(dev, ring);
+}
+
+static int logical_bsd2_ring_init(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_engine_cs *ring = &dev_priv->ring[VCS2];
+
+	ring->name = "bds2 ring";
+	ring->id = VCS2;
+	ring->mmio_base = GEN8_BSD2_RING_BASE;
+	ring->irq_enable_mask =
+		GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT;
+	ring->irq_keep_mask =
+		GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT;
+
+	ring->init_hw = gen8_init_common_ring;
+	ring->get_seqno = gen8_get_seqno;
+	ring->set_seqno = gen8_set_seqno;
+	ring->emit_request = gen8_emit_request;
+	ring->emit_flush = gen8_emit_flush;
+	ring->irq_get = gen8_logical_ring_get_irq;
+	ring->irq_put = gen8_logical_ring_put_irq;
+	ring->emit_bb_start = gen8_emit_bb_start;
+
+	return logical_ring_init(dev, ring);
+}
+
+static int logical_blt_ring_init(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_engine_cs *ring = &dev_priv->ring[BCS];
+
+	ring->name = "blitter ring";
+	ring->id = BCS;
+	ring->mmio_base = BLT_RING_BASE;
+	ring->irq_enable_mask =
+		GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
+	ring->irq_keep_mask =
+		GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
+
+	ring->init_hw = gen8_init_common_ring;
+	if (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0) {
+		ring->get_seqno = bxt_a_get_seqno;
+		ring->set_seqno = bxt_a_set_seqno;
+	} else {
+		ring->get_seqno = gen8_get_seqno;
+		ring->set_seqno = gen8_set_seqno;
+	}
+	ring->emit_request = gen8_emit_request;
+	ring->emit_flush = gen8_emit_flush;
+	ring->irq_get = gen8_logical_ring_get_irq;
+	ring->irq_put = gen8_logical_ring_put_irq;
+	ring->emit_bb_start = gen8_emit_bb_start;
+
+	return logical_ring_init(dev, ring);
+}
+
+static int logical_vebox_ring_init(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_engine_cs *ring = &dev_priv->ring[VECS];
+
+	ring->name = "video enhancement ring";
+	ring->id = VECS;
+	ring->mmio_base = VEBOX_RING_BASE;
+	ring->irq_enable_mask =
+		GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
+	ring->irq_keep_mask =
+		GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
+
+	ring->init_hw = gen8_init_common_ring;
+	if (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0) {
+		ring->get_seqno = bxt_a_get_seqno;
+		ring->set_seqno = bxt_a_set_seqno;
+	} else {
+		ring->get_seqno = gen8_get_seqno;
+		ring->set_seqno = gen8_set_seqno;
+	}
+	ring->emit_request = gen8_emit_request;
+	ring->emit_flush = gen8_emit_flush;
+	ring->irq_get = gen8_logical_ring_get_irq;
+	ring->irq_put = gen8_logical_ring_put_irq;
+	ring->emit_bb_start = gen8_emit_bb_start;
+
+	return logical_ring_init(dev, ring);
+}
+
+/**
+ * intel_logical_rings_init() - allocate, populate and init the Engine Command Streamers
+ * @dev: DRM device.
+ *
+ * This function inits the engines for an Execlists submission style (the equivalent in the
+ * legacy ringbuffer submission world would be i915_gem_init_rings). It does it only for
+ * those engines that are present in the hardware.
+ *
+ * Return: non-zero if the initialization failed.
+ */
+int intel_logical_rings_init(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret;
+
+	ret = logical_render_ring_init(dev);
+	if (ret)
+		return ret;
+
+	if (HAS_BSD(dev)) {
+		ret = logical_bsd_ring_init(dev);
+		if (ret)
+			goto cleanup_render_ring;
+	}
+
+	if (HAS_BLT(dev)) {
+		ret = logical_blt_ring_init(dev);
+		if (ret)
+			goto cleanup_bsd_ring;
+	}
+
+	if (HAS_VEBOX(dev)) {
+		ret = logical_vebox_ring_init(dev);
+		if (ret)
+			goto cleanup_blt_ring;
+	}
+
+	if (HAS_BSD2(dev)) {
+		ret = logical_bsd2_ring_init(dev);
+		if (ret)
+			goto cleanup_vebox_ring;
+	}
+
+	return 0;
+
+cleanup_vebox_ring:
+	intel_logical_ring_cleanup(&dev_priv->ring[VECS]);
+cleanup_blt_ring:
+	intel_logical_ring_cleanup(&dev_priv->ring[BCS]);
+cleanup_bsd_ring:
+	intel_logical_ring_cleanup(&dev_priv->ring[VCS]);
+cleanup_render_ring:
+	intel_logical_ring_cleanup(&dev_priv->ring[RCS]);
+
+	return ret;
+}
+
+static u32
+make_rpcs(struct drm_device *dev)
+{
+	u32 rpcs = 0;
+
+	/*
+	 * No explicit RPCS request is needed to ensure full
+	 * slice/subslice/EU enablement prior to Gen9.
+	*/
+	if (INTEL_INFO(dev)->gen < 9)
+		return 0;
+
+	/*
+	 * Starting in Gen9, render power gating can leave
+	 * slice/subslice/EU in a partially enabled state. We
+	 * must make an explicit request through RPCS for full
+	 * enablement.
+	*/
+	if (INTEL_INFO(dev)->has_slice_pg) {
+		rpcs |= GEN8_RPCS_S_CNT_ENABLE;
+		rpcs |= INTEL_INFO(dev)->slice_total <<
+			GEN8_RPCS_S_CNT_SHIFT;
+		rpcs |= GEN8_RPCS_ENABLE;
+	}
+
+	if (INTEL_INFO(dev)->has_subslice_pg) {
+		rpcs |= GEN8_RPCS_SS_CNT_ENABLE;
+		rpcs |= INTEL_INFO(dev)->subslice_per_slice <<
+			GEN8_RPCS_SS_CNT_SHIFT;
+		rpcs |= GEN8_RPCS_ENABLE;
+	}
+
+	if (INTEL_INFO(dev)->has_eu_pg) {
+		rpcs |= INTEL_INFO(dev)->eu_per_subslice <<
+			GEN8_RPCS_EU_MIN_SHIFT;
+		rpcs |= INTEL_INFO(dev)->eu_per_subslice <<
+			GEN8_RPCS_EU_MAX_SHIFT;
+		rpcs |= GEN8_RPCS_ENABLE;
+	}
+
+	return rpcs;
+}
+
+static int
+populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_obj,
+		    struct intel_engine_cs *ring, struct intel_ringbuffer *ringbuf)
+{
+	struct drm_device *dev = ring->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
+	struct page *page;
+	uint32_t *reg_state;
+	int ret;
+
+	if (!ppgtt)
+		ppgtt = dev_priv->mm.aliasing_ppgtt;
+
+	ret = i915_gem_object_set_to_cpu_domain(ctx_obj, true);
+	if (ret) {
+		DRM_DEBUG_DRIVER("Could not set to CPU domain\n");
+		return ret;
+	}
+
+	ret = i915_gem_object_get_pages(ctx_obj);
+	if (ret) {
+		DRM_DEBUG_DRIVER("Could not get object pages\n");
+		return ret;
+	}
+
+	i915_gem_object_pin_pages(ctx_obj);
+
+	/* The second page of the context object contains some fields which must
+	 * be set up prior to the first execution. */
+	page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
+	reg_state = kmap_atomic(page);
+
+	/* A context is actually a big batch buffer with several MI_LOAD_REGISTER_IMM
+	 * commands followed by (reg, value) pairs. The values we are setting here are
+	 * only for the first context restore: on a subsequent save, the GPU will
+	 * recreate this batchbuffer with new values (including all the missing
+	 * MI_LOAD_REGISTER_IMM commands that we are not initializing here). */
+	if (ring->id == RCS)
+		reg_state[CTX_LRI_HEADER_0] = MI_LOAD_REGISTER_IMM(14);
+	else
+		reg_state[CTX_LRI_HEADER_0] = MI_LOAD_REGISTER_IMM(11);
+	reg_state[CTX_LRI_HEADER_0] |= MI_LRI_FORCE_POSTED;
+	reg_state[CTX_CONTEXT_CONTROL] = RING_CONTEXT_CONTROL(ring);
+	reg_state[CTX_CONTEXT_CONTROL+1] =
+		_MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH |
+				   CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
+				   CTX_CTRL_RS_CTX_ENABLE);
+	reg_state[CTX_RING_HEAD] = RING_HEAD(ring->mmio_base);
+	reg_state[CTX_RING_HEAD+1] = 0;
+	reg_state[CTX_RING_TAIL] = RING_TAIL(ring->mmio_base);
+	reg_state[CTX_RING_TAIL+1] = 0;
+	reg_state[CTX_RING_BUFFER_START] = RING_START(ring->mmio_base);
+	/* Ring buffer start address is not known until the buffer is pinned.
+	 * It is written to the context image in execlists_update_context()
+	 */
+	reg_state[CTX_RING_BUFFER_CONTROL] = RING_CTL(ring->mmio_base);
+	reg_state[CTX_RING_BUFFER_CONTROL+1] =
+			((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID;
+	reg_state[CTX_BB_HEAD_U] = ring->mmio_base + 0x168;
+	reg_state[CTX_BB_HEAD_U+1] = 0;
+	reg_state[CTX_BB_HEAD_L] = ring->mmio_base + 0x140;
+	reg_state[CTX_BB_HEAD_L+1] = 0;
+	reg_state[CTX_BB_STATE] = ring->mmio_base + 0x110;
+	reg_state[CTX_BB_STATE+1] = (1<<5);
+	reg_state[CTX_SECOND_BB_HEAD_U] = ring->mmio_base + 0x11c;
+	reg_state[CTX_SECOND_BB_HEAD_U+1] = 0;
+	reg_state[CTX_SECOND_BB_HEAD_L] = ring->mmio_base + 0x114;
+	reg_state[CTX_SECOND_BB_HEAD_L+1] = 0;
+	reg_state[CTX_SECOND_BB_STATE] = ring->mmio_base + 0x118;
+	reg_state[CTX_SECOND_BB_STATE+1] = 0;
+	if (ring->id == RCS) {
+		reg_state[CTX_BB_PER_CTX_PTR] = ring->mmio_base + 0x1c0;
+		reg_state[CTX_BB_PER_CTX_PTR+1] = 0;
+		reg_state[CTX_RCS_INDIRECT_CTX] = ring->mmio_base + 0x1c4;
+		reg_state[CTX_RCS_INDIRECT_CTX+1] = 0;
+		reg_state[CTX_RCS_INDIRECT_CTX_OFFSET] = ring->mmio_base + 0x1c8;
+		reg_state[CTX_RCS_INDIRECT_CTX_OFFSET+1] = 0;
+		if (ring->wa_ctx.obj) {
+			struct i915_ctx_workarounds *wa_ctx = &ring->wa_ctx;
+			uint32_t ggtt_offset = i915_gem_obj_ggtt_offset(wa_ctx->obj);
+
+			reg_state[CTX_RCS_INDIRECT_CTX+1] =
+				(ggtt_offset + wa_ctx->indirect_ctx.offset * sizeof(uint32_t)) |
+				(wa_ctx->indirect_ctx.size / CACHELINE_DWORDS);
+
+			reg_state[CTX_RCS_INDIRECT_CTX_OFFSET+1] =
+				CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT << 6;
+
+			reg_state[CTX_BB_PER_CTX_PTR+1] =
+				(ggtt_offset + wa_ctx->per_ctx.offset * sizeof(uint32_t)) |
+				0x01;
+		}
+	}
+	reg_state[CTX_LRI_HEADER_1] = MI_LOAD_REGISTER_IMM(9);
+	reg_state[CTX_LRI_HEADER_1] |= MI_LRI_FORCE_POSTED;
+	reg_state[CTX_CTX_TIMESTAMP] = ring->mmio_base + 0x3a8;
+	reg_state[CTX_CTX_TIMESTAMP+1] = 0;
+	reg_state[CTX_PDP3_UDW] = GEN8_RING_PDP_UDW(ring, 3);
+	reg_state[CTX_PDP3_LDW] = GEN8_RING_PDP_LDW(ring, 3);
+	reg_state[CTX_PDP2_UDW] = GEN8_RING_PDP_UDW(ring, 2);
+	reg_state[CTX_PDP2_LDW] = GEN8_RING_PDP_LDW(ring, 2);
+	reg_state[CTX_PDP1_UDW] = GEN8_RING_PDP_UDW(ring, 1);
+	reg_state[CTX_PDP1_LDW] = GEN8_RING_PDP_LDW(ring, 1);
+	reg_state[CTX_PDP0_UDW] = GEN8_RING_PDP_UDW(ring, 0);
+	reg_state[CTX_PDP0_LDW] = GEN8_RING_PDP_LDW(ring, 0);
+
+	if (USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
+		/* 64b PPGTT (48bit canonical)
+		 * PDP0_DESCRIPTOR contains the base address to PML4 and
+		 * other PDP Descriptors are ignored.
+		 */
+		ASSIGN_CTX_PML4(ppgtt, reg_state);
+	} else {
+		/* 32b PPGTT
+		 * PDP*_DESCRIPTOR contains the base address of space supported.
+		 * With dynamic page allocation, PDPs may not be allocated at
+		 * this point. Point the unallocated PDPs to the scratch page
+		 */
+		ASSIGN_CTX_PDP(ppgtt, reg_state, 3);
+		ASSIGN_CTX_PDP(ppgtt, reg_state, 2);
+		ASSIGN_CTX_PDP(ppgtt, reg_state, 1);
+		ASSIGN_CTX_PDP(ppgtt, reg_state, 0);
+	}
+
+	if (ring->id == RCS) {
+		reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
+		reg_state[CTX_R_PWR_CLK_STATE] = GEN8_R_PWR_CLK_STATE;
+		reg_state[CTX_R_PWR_CLK_STATE+1] = make_rpcs(dev);
+	}
+
+	kunmap_atomic(reg_state);
+
+	ctx_obj->dirty = 1;
+	set_page_dirty(page);
+	i915_gem_object_unpin_pages(ctx_obj);
+
+	return 0;
+}
+
+/**
+ * intel_lr_context_free() - free the LRC specific bits of a context
+ * @ctx: the LR context to free.
+ *
+ * The real context freeing is done in i915_gem_context_free: this only
+ * takes care of the bits that are LRC related: the per-engine backing
+ * objects and the logical ringbuffer.
+ */
+void intel_lr_context_free(struct intel_context *ctx)
+{
+	int i;
+
+	for (i = 0; i < I915_NUM_RINGS; i++) {
+		struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state;
+
+		if (ctx_obj) {
+			struct intel_ringbuffer *ringbuf =
+					ctx->engine[i].ringbuf;
+			struct intel_engine_cs *ring = ringbuf->ring;
+
+			if (ctx == ring->default_context) {
+				intel_unpin_ringbuffer_obj(ringbuf);
+				i915_gem_object_ggtt_unpin(ctx_obj);
+			}
+			WARN_ON(ctx->engine[ring->id].pin_count);
+			intel_ringbuffer_free(ringbuf);
+			drm_gem_object_unreference(&ctx_obj->base);
+		}
+	}
+}
+
+static uint32_t get_lr_context_size(struct intel_engine_cs *ring)
+{
+	int ret = 0;
+
+	WARN_ON(INTEL_INFO(ring->dev)->gen < 8);
+
+	switch (ring->id) {
+	case RCS:
+		if (INTEL_INFO(ring->dev)->gen >= 9)
+			ret = GEN9_LR_CONTEXT_RENDER_SIZE;
+		else
+			ret = GEN8_LR_CONTEXT_RENDER_SIZE;
+		break;
+	case VCS:
+	case BCS:
+	case VECS:
+	case VCS2:
+		ret = GEN8_LR_CONTEXT_OTHER_SIZE;
+		break;
+	}
+
+	return ret;
+}
+
+static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring,
+		struct drm_i915_gem_object *default_ctx_obj)
+{
+	struct drm_i915_private *dev_priv = ring->dev->dev_private;
+	struct page *page;
+
+	/* The HWSP is part of the default context object in LRC mode. */
+	ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(default_ctx_obj)
+			+ LRC_PPHWSP_PN * PAGE_SIZE;
+	page = i915_gem_object_get_page(default_ctx_obj, LRC_PPHWSP_PN);
+	ring->status_page.page_addr = kmap(page);
+	ring->status_page.obj = default_ctx_obj;
+
+	I915_WRITE(RING_HWS_PGA(ring->mmio_base),
+			(u32)ring->status_page.gfx_addr);
+	POSTING_READ(RING_HWS_PGA(ring->mmio_base));
+}
+
+/**
+ * intel_lr_context_deferred_alloc() - create the LRC specific bits of a context
+ * @ctx: LR context to create.
+ * @ring: engine to be used with the context.
+ *
+ * This function can be called more than once, with different engines, if we plan
+ * to use the context with them. The context backing objects and the ringbuffers
+ * (specially the ringbuffer backing objects) suck a lot of memory up, and that's why
+ * the creation is a deferred call: it's better to make sure first that we need to use
+ * a given ring with the context.
+ *
+ * Return: non-zero on error.
+ */
+
+int intel_lr_context_deferred_alloc(struct intel_context *ctx,
+				     struct intel_engine_cs *ring)
+{
+	struct drm_device *dev = ring->dev;
+	struct drm_i915_gem_object *ctx_obj;
+	uint32_t context_size;
+	struct intel_ringbuffer *ringbuf;
+	int ret;
+
+	WARN_ON(ctx->legacy_hw_ctx.rcs_state != NULL);
+	WARN_ON(ctx->engine[ring->id].state);
+
+	context_size = round_up(get_lr_context_size(ring), 4096);
+
+	/* One extra page as the sharing data between driver and GuC */
+	context_size += PAGE_SIZE * LRC_PPHWSP_PN;
+
+	ctx_obj = i915_gem_alloc_object(dev, context_size);
+	if (!ctx_obj) {
+		DRM_DEBUG_DRIVER("Alloc LRC backing obj failed.\n");
+		return -ENOMEM;
+	}
+
+	ringbuf = intel_engine_create_ringbuffer(ring, 4 * PAGE_SIZE);
+	if (IS_ERR(ringbuf)) {
+		ret = PTR_ERR(ringbuf);
+		goto error_deref_obj;
+	}
+
+	ret = populate_lr_context(ctx, ctx_obj, ring, ringbuf);
+	if (ret) {
+		DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret);
+		goto error_ringbuf;
+	}
+
+	ctx->engine[ring->id].ringbuf = ringbuf;
+	ctx->engine[ring->id].state = ctx_obj;
+
+	if (ctx != ring->default_context && ring->init_context) {
+		struct drm_i915_gem_request *req;
+
+		ret = i915_gem_request_alloc(ring,
+			ctx, &req);
+		if (ret) {
+			DRM_ERROR("ring create req: %d\n",
+				ret);
+			goto error_ringbuf;
+		}
+
+		ret = ring->init_context(req);
+		if (ret) {
+			DRM_ERROR("ring init context: %d\n",
+				ret);
+			i915_gem_request_cancel(req);
+			goto error_ringbuf;
+		}
+		i915_add_request_no_flush(req);
+	}
+	return 0;
+
+error_ringbuf:
+	intel_ringbuffer_free(ringbuf);
+error_deref_obj:
+	drm_gem_object_unreference(&ctx_obj->base);
+	ctx->engine[ring->id].ringbuf = NULL;
+	ctx->engine[ring->id].state = NULL;
+	return ret;
+}
+
+void intel_lr_context_reset(struct drm_device *dev,
+			struct intel_context *ctx)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_engine_cs *ring;
+	int i;
+
+	for_each_ring(ring, dev_priv, i) {
+		struct drm_i915_gem_object *ctx_obj =
+				ctx->engine[ring->id].state;
+		struct intel_ringbuffer *ringbuf =
+				ctx->engine[ring->id].ringbuf;
+		uint32_t *reg_state;
+		struct page *page;
+
+		if (!ctx_obj)
+			continue;
+
+		if (i915_gem_object_get_pages(ctx_obj)) {
+			WARN(1, "Failed get_pages for context obj\n");
+			continue;
+		}
+		page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
+		reg_state = kmap_atomic(page);
+
+		reg_state[CTX_RING_HEAD+1] = 0;
+		reg_state[CTX_RING_TAIL+1] = 0;
+
+		kunmap_atomic(reg_state);
+
+		ringbuf->head = 0;
+		ringbuf->tail = 0;
+	}
+}
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
new file mode 100644
index 0000000..4e60d54
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _INTEL_LRC_H_
+#define _INTEL_LRC_H_
+
+#define GEN8_LR_CONTEXT_ALIGN 4096
+#define GEN8_CSB_ENTRIES 6
+#define GEN8_CSB_PTR_MASK 0x07
+
+/* Execlists regs */
+#define RING_ELSP(ring)			((ring)->mmio_base+0x230)
+#define RING_EXECLIST_STATUS_LO(ring)	((ring)->mmio_base+0x234)
+#define RING_EXECLIST_STATUS_HI(ring)	((ring)->mmio_base+0x234 + 4)
+#define RING_CONTEXT_CONTROL(ring)	((ring)->mmio_base+0x244)
+#define	  CTX_CTRL_INHIBIT_SYN_CTX_SWITCH	(1 << 3)
+#define	  CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT	(1 << 0)
+#define   CTX_CTRL_RS_CTX_ENABLE                (1 << 1)
+#define RING_CONTEXT_STATUS_BUF_LO(ring, i)	((ring)->mmio_base+0x370 + (i) * 8)
+#define RING_CONTEXT_STATUS_BUF_HI(ring, i)	((ring)->mmio_base+0x370 + (i) * 8 + 4)
+#define RING_CONTEXT_STATUS_PTR(ring)	((ring)->mmio_base+0x3a0)
+
+/* Logical Rings */
+int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request);
+int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request);
+void intel_logical_ring_stop(struct intel_engine_cs *ring);
+void intel_logical_ring_cleanup(struct intel_engine_cs *ring);
+int intel_logical_rings_init(struct drm_device *dev);
+int intel_logical_ring_begin(struct drm_i915_gem_request *req, int num_dwords);
+
+int logical_ring_flush_all_caches(struct drm_i915_gem_request *req);
+/**
+ * intel_logical_ring_advance() - advance the ringbuffer tail
+ * @ringbuf: Ringbuffer to advance.
+ *
+ * The tail is only updated in our logical ringbuffer struct.
+ */
+static inline void intel_logical_ring_advance(struct intel_ringbuffer *ringbuf)
+{
+	ringbuf->tail &= ringbuf->size - 1;
+}
+/**
+ * intel_logical_ring_emit() - write a DWORD to the ringbuffer.
+ * @ringbuf: Ringbuffer to write to.
+ * @data: DWORD to write.
+ */
+static inline void intel_logical_ring_emit(struct intel_ringbuffer *ringbuf,
+					   u32 data)
+{
+	iowrite32(data, ringbuf->virtual_start + ringbuf->tail);
+	ringbuf->tail += 4;
+}
+
+/* Logical Ring Contexts */
+
+/* One extra page is added before LRC for GuC as shared data */
+#define LRC_GUCSHR_PN	(0)
+#define LRC_PPHWSP_PN	(LRC_GUCSHR_PN + 1)
+#define LRC_STATE_PN	(LRC_PPHWSP_PN + 1)
+
+void intel_lr_context_free(struct intel_context *ctx);
+int intel_lr_context_deferred_alloc(struct intel_context *ctx,
+				    struct intel_engine_cs *ring);
+void intel_lr_context_unpin(struct drm_i915_gem_request *req);
+void intel_lr_context_reset(struct drm_device *dev,
+			struct intel_context *ctx);
+uint64_t intel_lr_context_descriptor(struct intel_context *ctx,
+				     struct intel_engine_cs *ring);
+
+/* Execlists */
+int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists);
+struct i915_execbuffer_params;
+int intel_execlists_submission(struct i915_execbuffer_params *params,
+			       struct drm_i915_gem_execbuffer2 *args,
+			       struct list_head *vmas);
+u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj);
+
+void intel_lrc_irq_handler(struct intel_engine_cs *ring);
+void intel_execlists_retire_requests(struct intel_engine_cs *ring);
+
+#endif /* _INTEL_LRC_H_ */
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
new file mode 100644
index 0000000..7f39b8a
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -0,0 +1,1190 @@
+/*
+ * Copyright © 2006-2007 Intel Corporation
+ * Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *	Eric Anholt <eric@anholt.net>
+ *      Dave Airlie <airlied@linux.ie>
+ *      Jesse Barnes <jesse.barnes@intel.com>
+ */
+
+#include <acpi/button.h>
+#include <linux/dmi.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
+#include "intel_drv.h"
+#include <drm/i915_drm.h>
+#include "i915_drv.h"
+#include <linux/acpi.h>
+
+/* Private structure for the integrated LVDS support */
+struct intel_lvds_connector {
+	struct intel_connector base;
+
+	struct notifier_block lid_notifier;
+};
+
+struct intel_lvds_encoder {
+	struct intel_encoder base;
+
+	bool is_dual_link;
+	u32 reg;
+	u32 a3_power;
+
+	struct intel_lvds_connector *attached_connector;
+};
+
+static struct intel_lvds_encoder *to_lvds_encoder(struct drm_encoder *encoder)
+{
+	return container_of(encoder, struct intel_lvds_encoder, base.base);
+}
+
+static struct intel_lvds_connector *to_lvds_connector(struct drm_connector *connector)
+{
+	return container_of(connector, struct intel_lvds_connector, base.base);
+}
+
+static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
+				    enum pipe *pipe)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
+	enum intel_display_power_domain power_domain;
+	u32 tmp;
+
+	power_domain = intel_display_port_power_domain(encoder);
+	if (!intel_display_power_is_enabled(dev_priv, power_domain))
+		return false;
+
+	tmp = I915_READ(lvds_encoder->reg);
+
+	if (!(tmp & LVDS_PORT_EN))
+		return false;
+
+	if (HAS_PCH_CPT(dev))
+		*pipe = PORT_TO_PIPE_CPT(tmp);
+	else
+		*pipe = PORT_TO_PIPE(tmp);
+
+	return true;
+}
+
+static void intel_lvds_get_config(struct intel_encoder *encoder,
+				  struct intel_crtc_state *pipe_config)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
+	u32 tmp, flags = 0;
+	int dotclock;
+
+	tmp = I915_READ(lvds_encoder->reg);
+	if (tmp & LVDS_HSYNC_POLARITY)
+		flags |= DRM_MODE_FLAG_NHSYNC;
+	else
+		flags |= DRM_MODE_FLAG_PHSYNC;
+	if (tmp & LVDS_VSYNC_POLARITY)
+		flags |= DRM_MODE_FLAG_NVSYNC;
+	else
+		flags |= DRM_MODE_FLAG_PVSYNC;
+
+	pipe_config->base.adjusted_mode.flags |= flags;
+
+	/* gen2/3 store dither state in pfit control, needs to match */
+	if (INTEL_INFO(dev)->gen < 4) {
+		tmp = I915_READ(PFIT_CONTROL);
+
+		pipe_config->gmch_pfit.control |= tmp & PANEL_8TO6_DITHER_ENABLE;
+	}
+
+	dotclock = pipe_config->port_clock;
+
+	if (HAS_PCH_SPLIT(dev_priv->dev))
+		ironlake_check_encoder_dotclock(pipe_config, dotclock);
+
+	pipe_config->base.adjusted_mode.crtc_clock = dotclock;
+}
+
+static void intel_pre_enable_lvds(struct intel_encoder *encoder)
+{
+	struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+	const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
+	int pipe = crtc->pipe;
+	u32 temp;
+
+	if (HAS_PCH_SPLIT(dev)) {
+		assert_fdi_rx_pll_disabled(dev_priv, pipe);
+		assert_shared_dpll_disabled(dev_priv,
+					    intel_crtc_to_shared_dpll(crtc));
+	} else {
+		assert_pll_disabled(dev_priv, pipe);
+	}
+
+	temp = I915_READ(lvds_encoder->reg);
+	temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
+
+	if (HAS_PCH_CPT(dev)) {
+		temp &= ~PORT_TRANS_SEL_MASK;
+		temp |= PORT_TRANS_SEL_CPT(pipe);
+	} else {
+		if (pipe == 1) {
+			temp |= LVDS_PIPEB_SELECT;
+		} else {
+			temp &= ~LVDS_PIPEB_SELECT;
+		}
+	}
+
+	/* set the corresponsding LVDS_BORDER bit */
+	temp &= ~LVDS_BORDER_ENABLE;
+	temp |= crtc->config->gmch_pfit.lvds_border_bits;
+	/* Set the B0-B3 data pairs corresponding to whether we're going to
+	 * set the DPLLs for dual-channel mode or not.
+	 */
+	if (lvds_encoder->is_dual_link)
+		temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
+	else
+		temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
+
+	/* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
+	 * appropriately here, but we need to look more thoroughly into how
+	 * panels behave in the two modes. For now, let's just maintain the
+	 * value we got from the BIOS.
+	 */
+	 temp &= ~LVDS_A3_POWER_MASK;
+	 temp |= lvds_encoder->a3_power;
+
+	/* Set the dithering flag on LVDS as needed, note that there is no
+	 * special lvds dither control bit on pch-split platforms, dithering is
+	 * only controlled through the PIPECONF reg. */
+	if (INTEL_INFO(dev)->gen == 4) {
+		/* Bspec wording suggests that LVDS port dithering only exists
+		 * for 18bpp panels. */
+		if (crtc->config->dither && crtc->config->pipe_bpp == 18)
+			temp |= LVDS_ENABLE_DITHER;
+		else
+			temp &= ~LVDS_ENABLE_DITHER;
+	}
+	temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
+	if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
+		temp |= LVDS_HSYNC_POLARITY;
+	if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
+		temp |= LVDS_VSYNC_POLARITY;
+
+	I915_WRITE(lvds_encoder->reg, temp);
+}
+
+/**
+ * Sets the power state for the panel.
+ */
+static void intel_enable_lvds(struct intel_encoder *encoder)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
+	struct intel_connector *intel_connector =
+		&lvds_encoder->attached_connector->base;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 ctl_reg, stat_reg;
+
+	if (HAS_PCH_SPLIT(dev)) {
+		ctl_reg = PCH_PP_CONTROL;
+		stat_reg = PCH_PP_STATUS;
+	} else {
+		ctl_reg = PP_CONTROL;
+		stat_reg = PP_STATUS;
+	}
+
+	I915_WRITE(lvds_encoder->reg, I915_READ(lvds_encoder->reg) | LVDS_PORT_EN);
+
+	I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON);
+	POSTING_READ(lvds_encoder->reg);
+	if (wait_for((I915_READ(stat_reg) & PP_ON) != 0, 1000))
+		DRM_ERROR("timed out waiting for panel to power on\n");
+
+	intel_panel_enable_backlight(intel_connector);
+}
+
+static void intel_disable_lvds(struct intel_encoder *encoder)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 ctl_reg, stat_reg;
+
+	if (HAS_PCH_SPLIT(dev)) {
+		ctl_reg = PCH_PP_CONTROL;
+		stat_reg = PCH_PP_STATUS;
+	} else {
+		ctl_reg = PP_CONTROL;
+		stat_reg = PP_STATUS;
+	}
+
+	I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON);
+	if (wait_for((I915_READ(stat_reg) & PP_ON) == 0, 1000))
+		DRM_ERROR("timed out waiting for panel to power off\n");
+
+	I915_WRITE(lvds_encoder->reg, I915_READ(lvds_encoder->reg) & ~LVDS_PORT_EN);
+	POSTING_READ(lvds_encoder->reg);
+}
+
+static void gmch_disable_lvds(struct intel_encoder *encoder)
+{
+	struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
+	struct intel_connector *intel_connector =
+		&lvds_encoder->attached_connector->base;
+
+	intel_panel_disable_backlight(intel_connector);
+
+	intel_disable_lvds(encoder);
+}
+
+static void pch_disable_lvds(struct intel_encoder *encoder)
+{
+	struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
+	struct intel_connector *intel_connector =
+		&lvds_encoder->attached_connector->base;
+
+	intel_panel_disable_backlight(intel_connector);
+}
+
+static void pch_post_disable_lvds(struct intel_encoder *encoder)
+{
+	intel_disable_lvds(encoder);
+}
+
+static enum drm_mode_status
+intel_lvds_mode_valid(struct drm_connector *connector,
+		      struct drm_display_mode *mode)
+{
+	struct intel_connector *intel_connector = to_intel_connector(connector);
+	struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
+	int max_pixclk = to_i915(connector->dev)->max_dotclk_freq;
+
+	if (mode->hdisplay > fixed_mode->hdisplay)
+		return MODE_PANEL;
+	if (mode->vdisplay > fixed_mode->vdisplay)
+		return MODE_PANEL;
+	if (fixed_mode->clock > max_pixclk)
+		return MODE_CLOCK_HIGH;
+
+	return MODE_OK;
+}
+
+static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
+				      struct intel_crtc_state *pipe_config)
+{
+	struct drm_device *dev = intel_encoder->base.dev;
+	struct intel_lvds_encoder *lvds_encoder =
+		to_lvds_encoder(&intel_encoder->base);
+	struct intel_connector *intel_connector =
+		&lvds_encoder->attached_connector->base;
+	struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+	struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
+	unsigned int lvds_bpp;
+
+	/* Should never happen!! */
+	if (INTEL_INFO(dev)->gen < 4 && intel_crtc->pipe == 0) {
+		DRM_ERROR("Can't support LVDS on pipe A\n");
+		return false;
+	}
+
+	if (lvds_encoder->a3_power == LVDS_A3_POWER_UP)
+		lvds_bpp = 8*3;
+	else
+		lvds_bpp = 6*3;
+
+	if (lvds_bpp != pipe_config->pipe_bpp && !pipe_config->bw_constrained) {
+		DRM_DEBUG_KMS("forcing display bpp (was %d) to LVDS (%d)\n",
+			      pipe_config->pipe_bpp, lvds_bpp);
+		pipe_config->pipe_bpp = lvds_bpp;
+	}
+
+	/*
+	 * We have timings from the BIOS for the panel, put them in
+	 * to the adjusted mode.  The CRTC will be set up for this mode,
+	 * with the panel scaling set up to source from the H/VDisplay
+	 * of the original mode.
+	 */
+	intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
+			       adjusted_mode);
+
+	if (HAS_PCH_SPLIT(dev)) {
+		pipe_config->has_pch_encoder = true;
+
+		intel_pch_panel_fitting(intel_crtc, pipe_config,
+					intel_connector->panel.fitting_mode);
+	} else {
+		intel_gmch_panel_fitting(intel_crtc, pipe_config,
+					 intel_connector->panel.fitting_mode);
+
+	}
+
+	/*
+	 * XXX: It would be nice to support lower refresh rates on the
+	 * panels to reduce power consumption, and perhaps match the
+	 * user's requested refresh rate.
+	 */
+
+	return true;
+}
+
+/**
+ * Detect the LVDS connection.
+ *
+ * Since LVDS doesn't have hotlug, we use the lid as a proxy.  Open means
+ * connected and closed means disconnected.  We also send hotplug events as
+ * needed, using lid status notification from the input layer.
+ */
+static enum drm_connector_status
+intel_lvds_detect(struct drm_connector *connector, bool force)
+{
+	struct drm_device *dev = connector->dev;
+	enum drm_connector_status status;
+
+	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+		      connector->base.id, connector->name);
+
+	status = intel_panel_detect(dev);
+	if (status != connector_status_unknown)
+		return status;
+
+	return connector_status_connected;
+}
+
+/**
+ * Return the list of DDC modes if available, or the BIOS fixed mode otherwise.
+ */
+static int intel_lvds_get_modes(struct drm_connector *connector)
+{
+	struct intel_lvds_connector *lvds_connector = to_lvds_connector(connector);
+	struct drm_device *dev = connector->dev;
+	struct drm_display_mode *mode;
+
+	/* use cached edid if we have one */
+	if (!IS_ERR_OR_NULL(lvds_connector->base.edid))
+		return drm_add_edid_modes(connector, lvds_connector->base.edid);
+
+	mode = drm_mode_duplicate(dev, lvds_connector->base.panel.fixed_mode);
+	if (mode == NULL)
+		return 0;
+
+	drm_mode_probed_add(connector, mode);
+	return 1;
+}
+
+static int intel_no_modeset_on_lid_dmi_callback(const struct dmi_system_id *id)
+{
+	DRM_INFO("Skipping forced modeset for %s\n", id->ident);
+	return 1;
+}
+
+/* The GPU hangs up on these systems if modeset is performed on LID open */
+static const struct dmi_system_id intel_no_modeset_on_lid[] = {
+	{
+		.callback = intel_no_modeset_on_lid_dmi_callback,
+		.ident = "Toshiba Tecra A11",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "TECRA A11"),
+		},
+	},
+
+	{ }	/* terminating entry */
+};
+
+/*
+ * Lid events. Note the use of 'modeset':
+ *  - we set it to MODESET_ON_LID_OPEN on lid close,
+ *    and set it to MODESET_DONE on open
+ *  - we use it as a "only once" bit (ie we ignore
+ *    duplicate events where it was already properly set)
+ *  - the suspend/resume paths will set it to
+ *    MODESET_SUSPENDED and ignore the lid open event,
+ *    because they restore the mode ("lid open").
+ */
+static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
+			    void *unused)
+{
+	struct intel_lvds_connector *lvds_connector =
+		container_of(nb, struct intel_lvds_connector, lid_notifier);
+	struct drm_connector *connector = &lvds_connector->base.base;
+	struct drm_device *dev = connector->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (dev->switch_power_state != DRM_SWITCH_POWER_ON)
+		return NOTIFY_OK;
+
+	mutex_lock(&dev_priv->modeset_restore_lock);
+	if (dev_priv->modeset_restore == MODESET_SUSPENDED)
+		goto exit;
+	/*
+	 * check and update the status of LVDS connector after receiving
+	 * the LID nofication event.
+	 */
+	connector->status = connector->funcs->detect(connector, false);
+
+	/* Don't force modeset on machines where it causes a GPU lockup */
+	if (dmi_check_system(intel_no_modeset_on_lid))
+		goto exit;
+	if (!acpi_lid_open()) {
+		/* do modeset on next lid open event */
+		dev_priv->modeset_restore = MODESET_ON_LID_OPEN;
+		goto exit;
+	}
+
+	if (dev_priv->modeset_restore == MODESET_DONE)
+		goto exit;
+
+	/*
+	 * Some old platform's BIOS love to wreak havoc while the lid is closed.
+	 * We try to detect this here and undo any damage. The split for PCH
+	 * platforms is rather conservative and a bit arbitrary expect that on
+	 * those platforms VGA disabling requires actual legacy VGA I/O access,
+	 * and as part of the cleanup in the hw state restore we also redisable
+	 * the vga plane.
+	 */
+	if (!HAS_PCH_SPLIT(dev)) {
+		drm_modeset_lock_all(dev);
+		intel_display_resume(dev);
+		drm_modeset_unlock_all(dev);
+	}
+
+	dev_priv->modeset_restore = MODESET_DONE;
+
+exit:
+	mutex_unlock(&dev_priv->modeset_restore_lock);
+	return NOTIFY_OK;
+}
+
+/**
+ * intel_lvds_destroy - unregister and free LVDS structures
+ * @connector: connector to free
+ *
+ * Unregister the DDC bus for this connector then free the driver private
+ * structure.
+ */
+static void intel_lvds_destroy(struct drm_connector *connector)
+{
+	struct intel_lvds_connector *lvds_connector =
+		to_lvds_connector(connector);
+
+	if (lvds_connector->lid_notifier.notifier_call)
+		acpi_lid_notifier_unregister(&lvds_connector->lid_notifier);
+
+	if (!IS_ERR_OR_NULL(lvds_connector->base.edid))
+		kfree(lvds_connector->base.edid);
+
+	intel_panel_fini(&lvds_connector->base.panel);
+
+	drm_connector_cleanup(connector);
+	kfree(connector);
+}
+
+static int intel_lvds_set_property(struct drm_connector *connector,
+				   struct drm_property *property,
+				   uint64_t value)
+{
+	struct intel_connector *intel_connector = to_intel_connector(connector);
+	struct drm_device *dev = connector->dev;
+
+	if (property == dev->mode_config.scaling_mode_property) {
+		struct drm_crtc *crtc;
+
+		if (value == DRM_MODE_SCALE_NONE) {
+			DRM_DEBUG_KMS("no scaling not supported\n");
+			return -EINVAL;
+		}
+
+		if (intel_connector->panel.fitting_mode == value) {
+			/* the LVDS scaling property is not changed */
+			return 0;
+		}
+		intel_connector->panel.fitting_mode = value;
+
+		crtc = intel_attached_encoder(connector)->base.crtc;
+		if (crtc && crtc->state->enable) {
+			/*
+			 * If the CRTC is enabled, the display will be changed
+			 * according to the new panel fitting mode.
+			 */
+			intel_crtc_restore_mode(crtc);
+		}
+	}
+
+	return 0;
+}
+
+static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = {
+	.get_modes = intel_lvds_get_modes,
+	.mode_valid = intel_lvds_mode_valid,
+	.best_encoder = intel_best_encoder,
+};
+
+static const struct drm_connector_funcs intel_lvds_connector_funcs = {
+	.dpms = drm_atomic_helper_connector_dpms,
+	.detect = intel_lvds_detect,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.set_property = intel_lvds_set_property,
+	.atomic_get_property = intel_connector_atomic_get_property,
+	.destroy = intel_lvds_destroy,
+	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+	.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+};
+
+static const struct drm_encoder_funcs intel_lvds_enc_funcs = {
+	.destroy = intel_encoder_destroy,
+};
+
+static int intel_no_lvds_dmi_callback(const struct dmi_system_id *id)
+{
+	DRM_INFO("Skipping LVDS initialization for %s\n", id->ident);
+	return 1;
+}
+
+/* These systems claim to have LVDS, but really don't */
+static const struct dmi_system_id intel_no_lvds[] = {
+	{
+		.callback = intel_no_lvds_dmi_callback,
+		.ident = "Apple Mac Mini (Core series)",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "Apple"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "Macmini1,1"),
+		},
+	},
+	{
+		.callback = intel_no_lvds_dmi_callback,
+		.ident = "Apple Mac Mini (Core 2 series)",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "Apple"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "Macmini2,1"),
+		},
+	},
+	{
+		.callback = intel_no_lvds_dmi_callback,
+		.ident = "MSI IM-945GSE-A",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "MSI"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "A9830IMS"),
+		},
+	},
+	{
+		.callback = intel_no_lvds_dmi_callback,
+		.ident = "Dell Studio Hybrid",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+			DMI_MATCH(DMI_PRODUCT_NAME, "Studio Hybrid 140g"),
+		},
+	},
+	{
+		.callback = intel_no_lvds_dmi_callback,
+		.ident = "Dell OptiPlex FX170",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+			DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex FX170"),
+		},
+	},
+	{
+		.callback = intel_no_lvds_dmi_callback,
+		.ident = "AOpen Mini PC",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "AOpen"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "i965GMx-IF"),
+		},
+	},
+	{
+		.callback = intel_no_lvds_dmi_callback,
+		.ident = "AOpen Mini PC MP915",
+		.matches = {
+			DMI_MATCH(DMI_BOARD_VENDOR, "AOpen"),
+			DMI_MATCH(DMI_BOARD_NAME, "i915GMx-F"),
+		},
+	},
+	{
+		.callback = intel_no_lvds_dmi_callback,
+		.ident = "AOpen i915GMm-HFS",
+		.matches = {
+			DMI_MATCH(DMI_BOARD_VENDOR, "AOpen"),
+			DMI_MATCH(DMI_BOARD_NAME, "i915GMm-HFS"),
+		},
+	},
+	{
+		.callback = intel_no_lvds_dmi_callback,
+                .ident = "AOpen i45GMx-I",
+                .matches = {
+                        DMI_MATCH(DMI_BOARD_VENDOR, "AOpen"),
+                        DMI_MATCH(DMI_BOARD_NAME, "i45GMx-I"),
+                },
+        },
+	{
+		.callback = intel_no_lvds_dmi_callback,
+		.ident = "Aopen i945GTt-VFA",
+		.matches = {
+			DMI_MATCH(DMI_PRODUCT_VERSION, "AO00001JW"),
+		},
+	},
+	{
+		.callback = intel_no_lvds_dmi_callback,
+		.ident = "Clientron U800",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "Clientron"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "U800"),
+		},
+	},
+	{
+                .callback = intel_no_lvds_dmi_callback,
+                .ident = "Clientron E830",
+                .matches = {
+                        DMI_MATCH(DMI_SYS_VENDOR, "Clientron"),
+                        DMI_MATCH(DMI_PRODUCT_NAME, "E830"),
+                },
+        },
+        {
+		.callback = intel_no_lvds_dmi_callback,
+		.ident = "Asus EeeBox PC EB1007",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer INC."),
+			DMI_MATCH(DMI_PRODUCT_NAME, "EB1007"),
+		},
+	},
+	{
+		.callback = intel_no_lvds_dmi_callback,
+		.ident = "Asus AT5NM10T-I",
+		.matches = {
+			DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
+			DMI_MATCH(DMI_BOARD_NAME, "AT5NM10T-I"),
+		},
+	},
+	{
+		.callback = intel_no_lvds_dmi_callback,
+		.ident = "Hewlett-Packard HP t5740",
+		.matches = {
+			DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
+			DMI_MATCH(DMI_PRODUCT_NAME, " t5740"),
+		},
+	},
+	{
+		.callback = intel_no_lvds_dmi_callback,
+		.ident = "Hewlett-Packard t5745",
+		.matches = {
+			DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "hp t5745"),
+		},
+	},
+	{
+		.callback = intel_no_lvds_dmi_callback,
+		.ident = "Hewlett-Packard st5747",
+		.matches = {
+			DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "hp st5747"),
+		},
+	},
+	{
+		.callback = intel_no_lvds_dmi_callback,
+		.ident = "MSI Wind Box DC500",
+		.matches = {
+			DMI_MATCH(DMI_BOARD_VENDOR, "MICRO-STAR INTERNATIONAL CO., LTD"),
+			DMI_MATCH(DMI_BOARD_NAME, "MS-7469"),
+		},
+	},
+	{
+		.callback = intel_no_lvds_dmi_callback,
+		.ident = "Gigabyte GA-D525TUD",
+		.matches = {
+			DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
+			DMI_MATCH(DMI_BOARD_NAME, "D525TUD"),
+		},
+	},
+	{
+		.callback = intel_no_lvds_dmi_callback,
+		.ident = "Supermicro X7SPA-H",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "Supermicro"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "X7SPA-H"),
+		},
+	},
+	{
+		.callback = intel_no_lvds_dmi_callback,
+		.ident = "Fujitsu Esprimo Q900",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "ESPRIMO Q900"),
+		},
+	},
+	{
+		.callback = intel_no_lvds_dmi_callback,
+		.ident = "Intel D410PT",
+		.matches = {
+			DMI_MATCH(DMI_BOARD_VENDOR, "Intel"),
+			DMI_MATCH(DMI_BOARD_NAME, "D410PT"),
+		},
+	},
+	{
+		.callback = intel_no_lvds_dmi_callback,
+		.ident = "Intel D425KT",
+		.matches = {
+			DMI_MATCH(DMI_BOARD_VENDOR, "Intel"),
+			DMI_EXACT_MATCH(DMI_BOARD_NAME, "D425KT"),
+		},
+	},
+	{
+		.callback = intel_no_lvds_dmi_callback,
+		.ident = "Intel D510MO",
+		.matches = {
+			DMI_MATCH(DMI_BOARD_VENDOR, "Intel"),
+			DMI_EXACT_MATCH(DMI_BOARD_NAME, "D510MO"),
+		},
+	},
+	{
+		.callback = intel_no_lvds_dmi_callback,
+		.ident = "Intel D525MW",
+		.matches = {
+			DMI_MATCH(DMI_BOARD_VENDOR, "Intel"),
+			DMI_EXACT_MATCH(DMI_BOARD_NAME, "D525MW"),
+		},
+	},
+
+	{ }	/* terminating entry */
+};
+
+/*
+ * Enumerate the child dev array parsed from VBT to check whether
+ * the LVDS is present.
+ * If it is present, return 1.
+ * If it is not present, return false.
+ * If no child dev is parsed from VBT, it assumes that the LVDS is present.
+ */
+static bool lvds_is_present_in_vbt(struct drm_device *dev,
+				   u8 *i2c_pin)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int i;
+
+	if (!dev_priv->vbt.child_dev_num)
+		return true;
+
+	for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
+		union child_device_config *uchild = dev_priv->vbt.child_dev + i;
+		struct old_child_dev_config *child = &uchild->old;
+
+		/* If the device type is not LFP, continue.
+		 * We have to check both the new identifiers as well as the
+		 * old for compatibility with some BIOSes.
+		 */
+		if (child->device_type != DEVICE_TYPE_INT_LFP &&
+		    child->device_type != DEVICE_TYPE_LFP)
+			continue;
+
+		if (intel_gmbus_is_valid_pin(dev_priv, child->i2c_pin))
+			*i2c_pin = child->i2c_pin;
+
+		/* However, we cannot trust the BIOS writers to populate
+		 * the VBT correctly.  Since LVDS requires additional
+		 * information from AIM blocks, a non-zero addin offset is
+		 * a good indicator that the LVDS is actually present.
+		 */
+		if (child->addin_offset)
+			return true;
+
+		/* But even then some BIOS writers perform some black magic
+		 * and instantiate the device without reference to any
+		 * additional data.  Trust that if the VBT was written into
+		 * the OpRegion then they have validated the LVDS's existence.
+		 */
+		if (dev_priv->opregion.vbt)
+			return true;
+	}
+
+	return false;
+}
+
+static int intel_dual_link_lvds_callback(const struct dmi_system_id *id)
+{
+	DRM_INFO("Forcing lvds to dual link mode on %s\n", id->ident);
+	return 1;
+}
+
+static const struct dmi_system_id intel_dual_link_lvds[] = {
+	{
+		.callback = intel_dual_link_lvds_callback,
+		.ident = "Apple MacBook Pro 15\" (2010)",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+			DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro6,2"),
+		},
+	},
+	{
+		.callback = intel_dual_link_lvds_callback,
+		.ident = "Apple MacBook Pro 15\" (2011)",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+			DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro8,2"),
+		},
+	},
+	{
+		.callback = intel_dual_link_lvds_callback,
+		.ident = "Apple MacBook Pro 15\" (2012)",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+			DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro9,1"),
+		},
+	},
+	{ }	/* terminating entry */
+};
+
+bool intel_is_dual_link_lvds(struct drm_device *dev)
+{
+	struct intel_encoder *encoder;
+	struct intel_lvds_encoder *lvds_encoder;
+
+	for_each_intel_encoder(dev, encoder) {
+		if (encoder->type == INTEL_OUTPUT_LVDS) {
+			lvds_encoder = to_lvds_encoder(&encoder->base);
+
+			return lvds_encoder->is_dual_link;
+		}
+	}
+
+	return false;
+}
+
+static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
+{
+	struct drm_device *dev = lvds_encoder->base.base.dev;
+	unsigned int val;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	/* use the module option value if specified */
+	if (i915.lvds_channel_mode > 0)
+		return i915.lvds_channel_mode == 2;
+
+	/* single channel LVDS is limited to 112 MHz */
+	if (lvds_encoder->attached_connector->base.panel.fixed_mode->clock
+	    > 112999)
+		return true;
+
+	if (dmi_check_system(intel_dual_link_lvds))
+		return true;
+
+	/* BIOS should set the proper LVDS register value at boot, but
+	 * in reality, it doesn't set the value when the lid is closed;
+	 * we need to check "the value to be set" in VBT when LVDS
+	 * register is uninitialized.
+	 */
+	val = I915_READ(lvds_encoder->reg);
+	if (!(val & ~(LVDS_PIPE_MASK | LVDS_DETECTED)))
+		val = dev_priv->vbt.bios_lvds_val;
+
+	return (val & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP;
+}
+
+static bool intel_lvds_supported(struct drm_device *dev)
+{
+	/* With the introduction of the PCH we gained a dedicated
+	 * LVDS presence pin, use it. */
+	if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
+		return true;
+
+	/* Otherwise LVDS was only attached to mobile products,
+	 * except for the inglorious 830gm */
+	if (INTEL_INFO(dev)->gen <= 4 && IS_MOBILE(dev) && !IS_I830(dev))
+		return true;
+
+	return false;
+}
+
+/**
+ * intel_lvds_init - setup LVDS connectors on this device
+ * @dev: drm device
+ *
+ * Create the connector, register the LVDS DDC bus, and try to figure out what
+ * modes we can display on the LVDS panel (if present).
+ */
+void intel_lvds_init(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_lvds_encoder *lvds_encoder;
+	struct intel_encoder *intel_encoder;
+	struct intel_lvds_connector *lvds_connector;
+	struct intel_connector *intel_connector;
+	struct drm_connector *connector;
+	struct drm_encoder *encoder;
+	struct drm_display_mode *scan; /* *modes, *bios_mode; */
+	struct drm_display_mode *fixed_mode = NULL;
+	struct drm_display_mode *downclock_mode = NULL;
+	struct edid *edid;
+	struct drm_crtc *crtc;
+	u32 lvds_reg;
+	u32 lvds;
+	int pipe;
+	u8 pin;
+
+	/*
+	 * Unlock registers and just leave them unlocked. Do this before
+	 * checking quirk lists to avoid bogus WARNINGs.
+	 */
+	if (HAS_PCH_SPLIT(dev)) {
+		I915_WRITE(PCH_PP_CONTROL,
+			   I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS);
+	} else if (INTEL_INFO(dev_priv)->gen < 5) {
+		I915_WRITE(PP_CONTROL,
+			   I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
+	}
+	if (!intel_lvds_supported(dev))
+		return;
+
+	/* Skip init on machines we know falsely report LVDS */
+	if (dmi_check_system(intel_no_lvds))
+		return;
+
+	if (HAS_PCH_SPLIT(dev))
+		lvds_reg = PCH_LVDS;
+	else
+		lvds_reg = LVDS;
+
+	lvds = I915_READ(lvds_reg);
+
+	if (HAS_PCH_SPLIT(dev)) {
+		if ((lvds & LVDS_DETECTED) == 0)
+			return;
+		if (dev_priv->vbt.edp_support) {
+			DRM_DEBUG_KMS("disable LVDS for eDP support\n");
+			return;
+		}
+	}
+
+	pin = GMBUS_PIN_PANEL;
+	if (!lvds_is_present_in_vbt(dev, &pin)) {
+		if ((lvds & LVDS_PORT_EN) == 0) {
+			DRM_DEBUG_KMS("LVDS is not present in VBT\n");
+			return;
+		}
+		DRM_DEBUG_KMS("LVDS is not present in VBT, but enabled anyway\n");
+	}
+
+	 /* Set the Panel Power On/Off timings if uninitialized. */
+	if (INTEL_INFO(dev_priv)->gen < 5 &&
+	    I915_READ(PP_ON_DELAYS) == 0 && I915_READ(PP_OFF_DELAYS) == 0) {
+		/* Set T2 to 40ms and T5 to 200ms */
+		I915_WRITE(PP_ON_DELAYS, 0x019007d0);
+
+		/* Set T3 to 35ms and Tx to 200ms */
+		I915_WRITE(PP_OFF_DELAYS, 0x015e07d0);
+
+		DRM_DEBUG_KMS("Panel power timings uninitialized, setting defaults\n");
+	}
+
+	lvds_encoder = kzalloc(sizeof(*lvds_encoder), GFP_KERNEL);
+	if (!lvds_encoder)
+		return;
+
+	lvds_connector = kzalloc(sizeof(*lvds_connector), GFP_KERNEL);
+	if (!lvds_connector) {
+		kfree(lvds_encoder);
+		return;
+	}
+
+	if (intel_connector_init(&lvds_connector->base) < 0) {
+		kfree(lvds_connector);
+		kfree(lvds_encoder);
+		return;
+	}
+
+	lvds_encoder->attached_connector = lvds_connector;
+
+	intel_encoder = &lvds_encoder->base;
+	encoder = &intel_encoder->base;
+	intel_connector = &lvds_connector->base;
+	connector = &intel_connector->base;
+	drm_connector_init(dev, &intel_connector->base, &intel_lvds_connector_funcs,
+			   DRM_MODE_CONNECTOR_LVDS);
+
+	drm_encoder_init(dev, &intel_encoder->base, &intel_lvds_enc_funcs,
+			 DRM_MODE_ENCODER_LVDS);
+
+	intel_encoder->enable = intel_enable_lvds;
+	intel_encoder->pre_enable = intel_pre_enable_lvds;
+	intel_encoder->compute_config = intel_lvds_compute_config;
+	if (HAS_PCH_SPLIT(dev_priv)) {
+		intel_encoder->disable = pch_disable_lvds;
+		intel_encoder->post_disable = pch_post_disable_lvds;
+	} else {
+		intel_encoder->disable = gmch_disable_lvds;
+	}
+	intel_encoder->get_hw_state = intel_lvds_get_hw_state;
+	intel_encoder->get_config = intel_lvds_get_config;
+	intel_connector->get_hw_state = intel_connector_get_hw_state;
+	intel_connector->unregister = intel_connector_unregister;
+
+	intel_connector_attach_encoder(intel_connector, intel_encoder);
+	intel_encoder->type = INTEL_OUTPUT_LVDS;
+
+	intel_encoder->cloneable = 0;
+	if (HAS_PCH_SPLIT(dev))
+		intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+	else if (IS_GEN4(dev))
+		intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
+	else
+		intel_encoder->crtc_mask = (1 << 1);
+
+	drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs);
+	connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+	connector->interlace_allowed = false;
+	connector->doublescan_allowed = false;
+
+	lvds_encoder->reg = lvds_reg;
+
+	/* create the scaling mode property */
+	drm_mode_create_scaling_mode_property(dev);
+	drm_object_attach_property(&connector->base,
+				      dev->mode_config.scaling_mode_property,
+				      DRM_MODE_SCALE_ASPECT);
+	intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
+	/*
+	 * LVDS discovery:
+	 * 1) check for EDID on DDC
+	 * 2) check for VBT data
+	 * 3) check to see if LVDS is already on
+	 *    if none of the above, no panel
+	 * 4) make sure lid is open
+	 *    if closed, act like it's not there for now
+	 */
+
+	/*
+	 * Attempt to get the fixed panel mode from DDC.  Assume that the
+	 * preferred mode is the right one.
+	 */
+	mutex_lock(&dev->mode_config.mutex);
+	edid = drm_get_edid(connector, intel_gmbus_get_adapter(dev_priv, pin));
+	if (edid) {
+		if (drm_add_edid_modes(connector, edid)) {
+			drm_mode_connector_update_edid_property(connector,
+								edid);
+		} else {
+			kfree(edid);
+			edid = ERR_PTR(-EINVAL);
+		}
+	} else {
+		edid = ERR_PTR(-ENOENT);
+	}
+	lvds_connector->base.edid = edid;
+
+	if (IS_ERR_OR_NULL(edid)) {
+		/* Didn't get an EDID, so
+		 * Set wide sync ranges so we get all modes
+		 * handed to valid_mode for checking
+		 */
+		connector->display_info.min_vfreq = 0;
+		connector->display_info.max_vfreq = 200;
+		connector->display_info.min_hfreq = 0;
+		connector->display_info.max_hfreq = 200;
+	}
+
+	list_for_each_entry(scan, &connector->probed_modes, head) {
+		if (scan->type & DRM_MODE_TYPE_PREFERRED) {
+			DRM_DEBUG_KMS("using preferred mode from EDID: ");
+			drm_mode_debug_printmodeline(scan);
+
+			fixed_mode = drm_mode_duplicate(dev, scan);
+			if (fixed_mode)
+				goto out;
+		}
+	}
+
+	/* Failed to get EDID, what about VBT? */
+	if (dev_priv->vbt.lfp_lvds_vbt_mode) {
+		DRM_DEBUG_KMS("using mode from VBT: ");
+		drm_mode_debug_printmodeline(dev_priv->vbt.lfp_lvds_vbt_mode);
+
+		fixed_mode = drm_mode_duplicate(dev, dev_priv->vbt.lfp_lvds_vbt_mode);
+		if (fixed_mode) {
+			fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
+			goto out;
+		}
+	}
+
+	/*
+	 * If we didn't get EDID, try checking if the panel is already turned
+	 * on.  If so, assume that whatever is currently programmed is the
+	 * correct mode.
+	 */
+
+	/* Ironlake: FIXME if still fail, not try pipe mode now */
+	if (HAS_PCH_SPLIT(dev))
+		goto failed;
+
+	pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0;
+	crtc = intel_get_crtc_for_pipe(dev, pipe);
+
+	if (crtc && (lvds & LVDS_PORT_EN)) {
+		fixed_mode = intel_crtc_mode_get(dev, crtc);
+		if (fixed_mode) {
+			DRM_DEBUG_KMS("using current (BIOS) mode: ");
+			drm_mode_debug_printmodeline(fixed_mode);
+			fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
+			goto out;
+		}
+	}
+
+	/* If we still don't have a mode after all that, give up. */
+	if (!fixed_mode)
+		goto failed;
+
+out:
+	mutex_unlock(&dev->mode_config.mutex);
+
+	intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
+
+	lvds_encoder->is_dual_link = compute_is_dual_link_lvds(lvds_encoder);
+	DRM_DEBUG_KMS("detected %s-link lvds configuration\n",
+		      lvds_encoder->is_dual_link ? "dual" : "single");
+
+	lvds_encoder->a3_power = I915_READ(lvds_encoder->reg) &
+				 LVDS_A3_POWER_MASK;
+
+	lvds_connector->lid_notifier.notifier_call = intel_lid_notify;
+	if (acpi_lid_notifier_register(&lvds_connector->lid_notifier)) {
+		DRM_DEBUG_KMS("lid notifier registration failed\n");
+		lvds_connector->lid_notifier.notifier_call = NULL;
+	}
+	drm_connector_register(connector);
+
+	intel_panel_setup_backlight(connector, INVALID_PIPE);
+
+	return;
+
+failed:
+	mutex_unlock(&dev->mode_config.mutex);
+
+	DRM_DEBUG_KMS("No LVDS modes found, disabling.\n");
+	drm_connector_cleanup(connector);
+	drm_encoder_cleanup(encoder);
+	kfree(lvds_encoder);
+	kfree(lvds_connector);
+	return;
+}
diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c
new file mode 100644
index 0000000..6d3c6c0
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_mocs.c
@@ -0,0 +1,335 @@
+/*
+ * Copyright (c) 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions: *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "intel_mocs.h"
+#include "intel_lrc.h"
+#include "intel_ringbuffer.h"
+
+/* structures required */
+struct drm_i915_mocs_entry {
+	u32 control_value;
+	u16 l3cc_value;
+};
+
+struct drm_i915_mocs_table {
+	u32 size;
+	const struct drm_i915_mocs_entry *table;
+};
+
+/* Defines for the tables (XXX_MOCS_0 - XXX_MOCS_63) */
+#define LE_CACHEABILITY(value)	((value) << 0)
+#define LE_TGT_CACHE(value)	((value) << 2)
+#define LE_LRUM(value)		((value) << 4)
+#define LE_AOM(value)		((value) << 6)
+#define LE_RSC(value)		((value) << 7)
+#define LE_SCC(value)		((value) << 8)
+#define LE_PFM(value)		((value) << 11)
+#define LE_SCF(value)		((value) << 14)
+
+/* Defines for the tables (LNCFMOCS0 - LNCFMOCS31) - two entries per word */
+#define L3_ESC(value)		((value) << 0)
+#define L3_SCC(value)		((value) << 1)
+#define L3_CACHEABILITY(value)	((value) << 4)
+
+/* Helper defines */
+#define GEN9_NUM_MOCS_ENTRIES	62  /* 62 out of 64 - 63 & 64 are reserved. */
+
+/* (e)LLC caching options */
+#define LE_PAGETABLE		0
+#define LE_UC			1
+#define LE_WT			2
+#define LE_WB			3
+
+/* L3 caching options */
+#define L3_DIRECT		0
+#define L3_UC			1
+#define L3_RESERVED		2
+#define L3_WB			3
+
+/* Target cache */
+#define ELLC			0
+#define LLC			1
+#define LLC_ELLC		2
+
+/*
+ * MOCS tables
+ *
+ * These are the MOCS tables that are programmed across all the rings.
+ * The control value is programmed to all the rings that support the
+ * MOCS registers. While the l3cc_values are only programmed to the
+ * LNCFCMOCS0 - LNCFCMOCS32 registers.
+ *
+ * These tables are intended to be kept reasonably consistent across
+ * platforms. However some of the fields are not applicable to all of
+ * them.
+ *
+ * Entries not part of the following tables are undefined as far as
+ * userspace is concerned and shouldn't be relied upon.  For the time
+ * being they will be implicitly initialized to the strictest caching
+ * configuration (uncached) to guarantee forwards compatibility with
+ * userspace programs written against more recent kernels providing
+ * additional MOCS entries.
+ *
+ * NOTE: These tables MUST start with being uncached and the length
+ *       MUST be less than 63 as the last two registers are reserved
+ *       by the hardware.  These tables are part of the kernel ABI and
+ *       may only be updated incrementally by adding entries at the
+ *       end.
+ */
+static const struct drm_i915_mocs_entry skylake_mocs_table[] = {
+	/* { 0x00000009, 0x0010 } */
+	{ (LE_CACHEABILITY(LE_UC) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(0) |
+	   LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)),
+	  (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_UC)) },
+	/* { 0x00000038, 0x0030 } */
+	{ (LE_CACHEABILITY(LE_PAGETABLE) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(3) |
+	   LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)),
+	  (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB)) },
+	/* { 0x0000003b, 0x0030 } */
+	{ (LE_CACHEABILITY(LE_WB) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(3) |
+	   LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)),
+	  (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB)) }
+};
+
+/* NOTE: the LE_TGT_CACHE is not used on Broxton */
+static const struct drm_i915_mocs_entry broxton_mocs_table[] = {
+	/* { 0x00000009, 0x0010 } */
+	{ (LE_CACHEABILITY(LE_UC) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(0) |
+	   LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)),
+	  (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_UC)) },
+	/* { 0x00000038, 0x0030 } */
+	{ (LE_CACHEABILITY(LE_PAGETABLE) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(3) |
+	   LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)),
+	  (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB)) },
+	/* { 0x0000003b, 0x0030 } */
+	{ (LE_CACHEABILITY(LE_WB) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(3) |
+	   LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)),
+	  (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB)) }
+};
+
+/**
+ * get_mocs_settings()
+ * @dev:        DRM device.
+ * @table:      Output table that will be made to point at appropriate
+ *              MOCS values for the device.
+ *
+ * This function will return the values of the MOCS table that needs to
+ * be programmed for the platform. It will return the values that need
+ * to be programmed and if they need to be programmed.
+ *
+ * Return: true if there are applicable MOCS settings for the device.
+ */
+static bool get_mocs_settings(struct drm_device *dev,
+			      struct drm_i915_mocs_table *table)
+{
+	bool result = false;
+
+	if (IS_SKYLAKE(dev)) {
+		table->size  = ARRAY_SIZE(skylake_mocs_table);
+		table->table = skylake_mocs_table;
+		result = true;
+	} else if (IS_BROXTON(dev)) {
+		table->size  = ARRAY_SIZE(broxton_mocs_table);
+		table->table = broxton_mocs_table;
+		result = true;
+	} else {
+		WARN_ONCE(INTEL_INFO(dev)->gen >= 9,
+			  "Platform that should have a MOCS table does not.\n");
+	}
+
+	return result;
+}
+
+/**
+ * emit_mocs_control_table() - emit the mocs control table
+ * @req:	Request to set up the MOCS table for.
+ * @table:	The values to program into the control regs.
+ * @reg_base:	The base for the engine that needs to be programmed.
+ *
+ * This function simply emits a MI_LOAD_REGISTER_IMM command for the
+ * given table starting at the given address.
+ *
+ * Return: 0 on success, otherwise the error status.
+ */
+static int emit_mocs_control_table(struct drm_i915_gem_request *req,
+				   const struct drm_i915_mocs_table *table,
+				   u32 reg_base)
+{
+	struct intel_ringbuffer *ringbuf = req->ringbuf;
+	unsigned int index;
+	int ret;
+
+	if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES))
+		return -ENODEV;
+
+	ret = intel_logical_ring_begin(req, 2 + 2 * GEN9_NUM_MOCS_ENTRIES);
+	if (ret) {
+		DRM_DEBUG("intel_logical_ring_begin failed %d\n", ret);
+		return ret;
+	}
+
+	intel_logical_ring_emit(ringbuf,
+				MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES));
+
+	for (index = 0; index < table->size; index++) {
+		intel_logical_ring_emit(ringbuf, reg_base + index * 4);
+		intel_logical_ring_emit(ringbuf,
+					table->table[index].control_value);
+	}
+
+	/*
+	 * Ok, now set the unused entries to uncached. These entries
+	 * are officially undefined and no contract for the contents
+	 * and settings is given for these entries.
+	 *
+	 * Entry 0 in the table is uncached - so we are just writing
+	 * that value to all the used entries.
+	 */
+	for (; index < GEN9_NUM_MOCS_ENTRIES; index++) {
+		intel_logical_ring_emit(ringbuf, reg_base + index * 4);
+		intel_logical_ring_emit(ringbuf, table->table[0].control_value);
+	}
+
+	intel_logical_ring_emit(ringbuf, MI_NOOP);
+	intel_logical_ring_advance(ringbuf);
+
+	return 0;
+}
+
+/**
+ * emit_mocs_l3cc_table() - emit the mocs control table
+ * @req:	Request to set up the MOCS table for.
+ * @table:	The values to program into the control regs.
+ *
+ * This function simply emits a MI_LOAD_REGISTER_IMM command for the
+ * given table starting at the given address. This register set is
+ * programmed in pairs.
+ *
+ * Return: 0 on success, otherwise the error status.
+ */
+static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
+				const struct drm_i915_mocs_table *table)
+{
+	struct intel_ringbuffer *ringbuf = req->ringbuf;
+	unsigned int count;
+	unsigned int i;
+	u32 value;
+	u32 filler = (table->table[0].l3cc_value & 0xffff) |
+			((table->table[0].l3cc_value & 0xffff) << 16);
+	int ret;
+
+	if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES))
+		return -ENODEV;
+
+	ret = intel_logical_ring_begin(req, 2 + GEN9_NUM_MOCS_ENTRIES);
+	if (ret) {
+		DRM_DEBUG("intel_logical_ring_begin failed %d\n", ret);
+		return ret;
+	}
+
+	intel_logical_ring_emit(ringbuf,
+			MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES / 2));
+
+	for (i = 0, count = 0; i < table->size / 2; i++, count += 2) {
+		value = (table->table[count].l3cc_value & 0xffff) |
+			((table->table[count + 1].l3cc_value & 0xffff) << 16);
+
+		intel_logical_ring_emit(ringbuf, GEN9_LNCFCMOCS0 + i * 4);
+		intel_logical_ring_emit(ringbuf, value);
+	}
+
+	if (table->size & 0x01) {
+		/* Odd table size - 1 left over */
+		value = (table->table[count].l3cc_value & 0xffff) |
+			((table->table[0].l3cc_value & 0xffff) << 16);
+	} else
+		value = filler;
+
+	/*
+	 * Now set the rest of the table to uncached - use entry 0 as
+	 * this will be uncached. Leave the last pair uninitialised as
+	 * they are reserved by the hardware.
+	 */
+	for (; i < GEN9_NUM_MOCS_ENTRIES / 2; i++) {
+		intel_logical_ring_emit(ringbuf, GEN9_LNCFCMOCS0 + i * 4);
+		intel_logical_ring_emit(ringbuf, value);
+
+		value = filler;
+	}
+
+	intel_logical_ring_emit(ringbuf, MI_NOOP);
+	intel_logical_ring_advance(ringbuf);
+
+	return 0;
+}
+
+/**
+ * intel_rcs_context_init_mocs() - program the MOCS register.
+ * @req:	Request to set up the MOCS tables for.
+ *
+ * This function will emit a batch buffer with the values required for
+ * programming the MOCS register values for all the currently supported
+ * rings.
+ *
+ * These registers are partially stored in the RCS context, so they are
+ * emitted at the same time so that when a context is created these registers
+ * are set up. These registers have to be emitted into the start of the
+ * context as setting the ELSP will re-init some of these registers back
+ * to the hw values.
+ *
+ * Return: 0 on success, otherwise the error status.
+ */
+int intel_rcs_context_init_mocs(struct drm_i915_gem_request *req)
+{
+	struct drm_i915_mocs_table t;
+	int ret;
+
+	if (get_mocs_settings(req->ring->dev, &t)) {
+		/* Program the control registers */
+		ret = emit_mocs_control_table(req, &t, GEN9_GFX_MOCS_0);
+		if (ret)
+			return ret;
+
+		ret = emit_mocs_control_table(req, &t, GEN9_MFX0_MOCS_0);
+		if (ret)
+			return ret;
+
+		ret = emit_mocs_control_table(req, &t, GEN9_MFX1_MOCS_0);
+		if (ret)
+			return ret;
+
+		ret = emit_mocs_control_table(req, &t, GEN9_VEBOX_MOCS_0);
+		if (ret)
+			return ret;
+
+		ret = emit_mocs_control_table(req, &t, GEN9_BLT_MOCS_0);
+		if (ret)
+			return ret;
+
+		/* Now program the l3cc registers */
+		ret = emit_mocs_l3cc_table(req, &t);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/i915/intel_mocs.h b/drivers/gpu/drm/i915/intel_mocs.h
new file mode 100644
index 0000000..76e45b1
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_mocs.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef INTEL_MOCS_H
+#define INTEL_MOCS_H
+
+/**
+ * DOC: Memory Objects Control State (MOCS)
+ *
+ * Motivation:
+ * In previous Gens the MOCS settings was a value that was set by user land as
+ * part of the batch. In Gen9 this has changed to be a single table (per ring)
+ * that all batches now reference by index instead of programming the MOCS
+ * directly.
+ *
+ * The one wrinkle in this is that only PART of the MOCS tables are included
+ * in context (The GFX_MOCS_0 - GFX_MOCS_64 and the LNCFCMOCS0 - LNCFCMOCS32
+ * registers). The rest are not (the settings for the other rings).
+ *
+ * This table needs to be set at system start-up because the way the table
+ * interacts with the contexts and the GmmLib interface.
+ *
+ *
+ * Implementation:
+ *
+ * The tables (one per supported platform) are defined in intel_mocs.c
+ * and are programmed in the first batch after the context is loaded
+ * (with the hardware workarounds). This will then let the usual
+ * context handling keep the MOCS in step.
+ */
+
+#include <drm/drmP.h>
+#include "i915_drv.h"
+
+int intel_rcs_context_init_mocs(struct drm_i915_gem_request *req);
+
+#endif
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
new file mode 100644
index 0000000..38a4c8c
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_modes.c
@@ -0,0 +1,137 @@
+/*
+ * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
+ * Copyright (c) 2007, 2010 Intel Corporation
+ *   Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/fb.h>
+#include <drm/drm_edid.h>
+#include <drm/drmP.h>
+#include "intel_drv.h"
+#include "i915_drv.h"
+
+/**
+ * intel_connector_update_modes - update connector from edid
+ * @connector: DRM connector device to use
+ * @edid: previously read EDID information
+ */
+int intel_connector_update_modes(struct drm_connector *connector,
+				struct edid *edid)
+{
+	int ret;
+
+	drm_mode_connector_update_edid_property(connector, edid);
+	ret = drm_add_edid_modes(connector, edid);
+	drm_edid_to_eld(connector, edid);
+
+	return ret;
+}
+
+/**
+ * intel_ddc_get_modes - get modelist from monitor
+ * @connector: DRM connector device to use
+ * @adapter: i2c adapter
+ *
+ * Fetch the EDID information from @connector using the DDC bus.
+ */
+int intel_ddc_get_modes(struct drm_connector *connector,
+			struct i2c_adapter *adapter)
+{
+	struct edid *edid;
+	int ret;
+
+	edid = drm_get_edid(connector, adapter);
+	if (!edid)
+		return 0;
+
+	ret = intel_connector_update_modes(connector, edid);
+	kfree(edid);
+
+	return ret;
+}
+
+static const struct drm_prop_enum_list force_audio_names[] = {
+	{ HDMI_AUDIO_OFF_DVI, "force-dvi" },
+	{ HDMI_AUDIO_OFF, "off" },
+	{ HDMI_AUDIO_AUTO, "auto" },
+	{ HDMI_AUDIO_ON, "on" },
+};
+
+void
+intel_attach_force_audio_property(struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_property *prop;
+
+	prop = dev_priv->force_audio_property;
+	if (prop == NULL) {
+		prop = drm_property_create_enum(dev, 0,
+					   "audio",
+					   force_audio_names,
+					   ARRAY_SIZE(force_audio_names));
+		if (prop == NULL)
+			return;
+
+		dev_priv->force_audio_property = prop;
+	}
+	drm_object_attach_property(&connector->base, prop, 0);
+}
+
+static const struct drm_prop_enum_list broadcast_rgb_names[] = {
+	{ INTEL_BROADCAST_RGB_AUTO, "Automatic" },
+	{ INTEL_BROADCAST_RGB_FULL, "Full" },
+	{ INTEL_BROADCAST_RGB_LIMITED, "Limited 16:235" },
+};
+
+void
+intel_attach_broadcast_rgb_property(struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_property *prop;
+
+	prop = dev_priv->broadcast_rgb_property;
+	if (prop == NULL) {
+		prop = drm_property_create_enum(dev, DRM_MODE_PROP_ENUM,
+					   "Broadcast RGB",
+					   broadcast_rgb_names,
+					   ARRAY_SIZE(broadcast_rgb_names));
+		if (prop == NULL)
+			return;
+
+		dev_priv->broadcast_rgb_property = prop;
+	}
+
+	drm_object_attach_property(&connector->base, prop, 0);
+}
+
+void
+intel_attach_aspect_ratio_property(struct drm_connector *connector)
+{
+	if (!drm_mode_create_aspect_ratio_property(connector->dev))
+		drm_object_attach_property(&connector->base,
+			connector->dev->mode_config.aspect_ratio_property,
+			DRM_MODE_PICTURE_ASPECT_NONE);
+}
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
new file mode 100644
index 0000000..e362a30
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -0,0 +1,961 @@
+/*
+ * Copyright 2008 Intel Corporation <hong.liu@intel.com>
+ * Copyright 2008 Red Hat <mjg@redhat.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT.  IN NO EVENT SHALL INTEL AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/acpi.h>
+#include <acpi/video.h>
+
+#include <drm/drmP.h>
+#include <drm/i915_drm.h>
+#include "i915_drv.h"
+#include "intel_drv.h"
+
+#define PCI_ASLE		0xe4
+#define PCI_ASLS		0xfc
+#define PCI_SWSCI		0xe8
+#define PCI_SWSCI_SCISEL	(1 << 15)
+#define PCI_SWSCI_GSSCIE	(1 << 0)
+
+#define OPREGION_HEADER_OFFSET 0
+#define OPREGION_ACPI_OFFSET   0x100
+#define   ACPI_CLID 0x01ac /* current lid state indicator */
+#define   ACPI_CDCK 0x01b0 /* current docking state indicator */
+#define OPREGION_SWSCI_OFFSET  0x200
+#define OPREGION_ASLE_OFFSET   0x300
+#define OPREGION_VBT_OFFSET    0x400
+
+#define OPREGION_SIGNATURE "IntelGraphicsMem"
+#define MBOX_ACPI      (1<<0)
+#define MBOX_SWSCI     (1<<1)
+#define MBOX_ASLE      (1<<2)
+#define MBOX_ASLE_EXT  (1<<4)
+
+struct opregion_header {
+	u8 signature[16];
+	u32 size;
+	u32 opregion_ver;
+	u8 bios_ver[32];
+	u8 vbios_ver[16];
+	u8 driver_ver[16];
+	u32 mboxes;
+	u32 driver_model;
+	u32 pcon;
+	u8 dver[32];
+	u8 rsvd[124];
+} __packed;
+
+/* OpRegion mailbox #1: public ACPI methods */
+struct opregion_acpi {
+	u32 drdy;       /* driver readiness */
+	u32 csts;       /* notification status */
+	u32 cevt;       /* current event */
+	u8 rsvd1[20];
+	u32 didl[8];    /* supported display devices ID list */
+	u32 cpdl[8];    /* currently presented display list */
+	u32 cadl[8];    /* currently active display list */
+	u32 nadl[8];    /* next active devices list */
+	u32 aslp;       /* ASL sleep time-out */
+	u32 tidx;       /* toggle table index */
+	u32 chpd;       /* current hotplug enable indicator */
+	u32 clid;       /* current lid state*/
+	u32 cdck;       /* current docking state */
+	u32 sxsw;       /* Sx state resume */
+	u32 evts;       /* ASL supported events */
+	u32 cnot;       /* current OS notification */
+	u32 nrdy;       /* driver status */
+	u32 did2[7];	/* extended supported display devices ID list */
+	u32 cpd2[7];	/* extended attached display devices list */
+	u8 rsvd2[4];
+} __packed;
+
+/* OpRegion mailbox #2: SWSCI */
+struct opregion_swsci {
+	u32 scic;       /* SWSCI command|status|data */
+	u32 parm;       /* command parameters */
+	u32 dslp;       /* driver sleep time-out */
+	u8 rsvd[244];
+} __packed;
+
+/* OpRegion mailbox #3: ASLE */
+struct opregion_asle {
+	u32 ardy;       /* driver readiness */
+	u32 aslc;       /* ASLE interrupt command */
+	u32 tche;       /* technology enabled indicator */
+	u32 alsi;       /* current ALS illuminance reading */
+	u32 bclp;       /* backlight brightness to set */
+	u32 pfit;       /* panel fitting state */
+	u32 cblv;       /* current brightness level */
+	u16 bclm[20];   /* backlight level duty cycle mapping table */
+	u32 cpfm;       /* current panel fitting mode */
+	u32 epfm;       /* enabled panel fitting modes */
+	u8 plut[74];    /* panel LUT and identifier */
+	u32 pfmb;       /* PWM freq and min brightness */
+	u32 cddv;       /* color correction default values */
+	u32 pcft;       /* power conservation features */
+	u32 srot;       /* supported rotation angles */
+	u32 iuer;       /* IUER events */
+	u64 fdss;
+	u32 fdsp;
+	u32 stat;
+	u8 rsvd[70];
+} __packed;
+
+/* Driver readiness indicator */
+#define ASLE_ARDY_READY		(1 << 0)
+#define ASLE_ARDY_NOT_READY	(0 << 0)
+
+/* ASLE Interrupt Command (ASLC) bits */
+#define ASLC_SET_ALS_ILLUM		(1 << 0)
+#define ASLC_SET_BACKLIGHT		(1 << 1)
+#define ASLC_SET_PFIT			(1 << 2)
+#define ASLC_SET_PWM_FREQ		(1 << 3)
+#define ASLC_SUPPORTED_ROTATION_ANGLES	(1 << 4)
+#define ASLC_BUTTON_ARRAY		(1 << 5)
+#define ASLC_CONVERTIBLE_INDICATOR	(1 << 6)
+#define ASLC_DOCKING_INDICATOR		(1 << 7)
+#define ASLC_ISCT_STATE_CHANGE		(1 << 8)
+#define ASLC_REQ_MSK			0x1ff
+/* response bits */
+#define ASLC_ALS_ILLUM_FAILED		(1 << 10)
+#define ASLC_BACKLIGHT_FAILED		(1 << 12)
+#define ASLC_PFIT_FAILED		(1 << 14)
+#define ASLC_PWM_FREQ_FAILED		(1 << 16)
+#define ASLC_ROTATION_ANGLES_FAILED	(1 << 18)
+#define ASLC_BUTTON_ARRAY_FAILED	(1 << 20)
+#define ASLC_CONVERTIBLE_FAILED		(1 << 22)
+#define ASLC_DOCKING_FAILED		(1 << 24)
+#define ASLC_ISCT_STATE_FAILED		(1 << 26)
+
+/* Technology enabled indicator */
+#define ASLE_TCHE_ALS_EN	(1 << 0)
+#define ASLE_TCHE_BLC_EN	(1 << 1)
+#define ASLE_TCHE_PFIT_EN	(1 << 2)
+#define ASLE_TCHE_PFMB_EN	(1 << 3)
+
+/* ASLE backlight brightness to set */
+#define ASLE_BCLP_VALID                (1<<31)
+#define ASLE_BCLP_MSK          (~(1<<31))
+
+/* ASLE panel fitting request */
+#define ASLE_PFIT_VALID         (1<<31)
+#define ASLE_PFIT_CENTER (1<<0)
+#define ASLE_PFIT_STRETCH_TEXT (1<<1)
+#define ASLE_PFIT_STRETCH_GFX (1<<2)
+
+/* PWM frequency and minimum brightness */
+#define ASLE_PFMB_BRIGHTNESS_MASK (0xff)
+#define ASLE_PFMB_BRIGHTNESS_VALID (1<<8)
+#define ASLE_PFMB_PWM_MASK (0x7ffffe00)
+#define ASLE_PFMB_PWM_VALID (1<<31)
+
+#define ASLE_CBLV_VALID         (1<<31)
+
+/* IUER */
+#define ASLE_IUER_DOCKING		(1 << 7)
+#define ASLE_IUER_CONVERTIBLE		(1 << 6)
+#define ASLE_IUER_ROTATION_LOCK_BTN	(1 << 4)
+#define ASLE_IUER_VOLUME_DOWN_BTN	(1 << 3)
+#define ASLE_IUER_VOLUME_UP_BTN		(1 << 2)
+#define ASLE_IUER_WINDOWS_BTN		(1 << 1)
+#define ASLE_IUER_POWER_BTN		(1 << 0)
+
+/* Software System Control Interrupt (SWSCI) */
+#define SWSCI_SCIC_INDICATOR		(1 << 0)
+#define SWSCI_SCIC_MAIN_FUNCTION_SHIFT	1
+#define SWSCI_SCIC_MAIN_FUNCTION_MASK	(0xf << 1)
+#define SWSCI_SCIC_SUB_FUNCTION_SHIFT	8
+#define SWSCI_SCIC_SUB_FUNCTION_MASK	(0xff << 8)
+#define SWSCI_SCIC_EXIT_PARAMETER_SHIFT	8
+#define SWSCI_SCIC_EXIT_PARAMETER_MASK	(0xff << 8)
+#define SWSCI_SCIC_EXIT_STATUS_SHIFT	5
+#define SWSCI_SCIC_EXIT_STATUS_MASK	(7 << 5)
+#define SWSCI_SCIC_EXIT_STATUS_SUCCESS	1
+
+#define SWSCI_FUNCTION_CODE(main, sub) \
+	((main) << SWSCI_SCIC_MAIN_FUNCTION_SHIFT | \
+	 (sub) << SWSCI_SCIC_SUB_FUNCTION_SHIFT)
+
+/* SWSCI: Get BIOS Data (GBDA) */
+#define SWSCI_GBDA			4
+#define SWSCI_GBDA_SUPPORTED_CALLS	SWSCI_FUNCTION_CODE(SWSCI_GBDA, 0)
+#define SWSCI_GBDA_REQUESTED_CALLBACKS	SWSCI_FUNCTION_CODE(SWSCI_GBDA, 1)
+#define SWSCI_GBDA_BOOT_DISPLAY_PREF	SWSCI_FUNCTION_CODE(SWSCI_GBDA, 4)
+#define SWSCI_GBDA_PANEL_DETAILS	SWSCI_FUNCTION_CODE(SWSCI_GBDA, 5)
+#define SWSCI_GBDA_TV_STANDARD		SWSCI_FUNCTION_CODE(SWSCI_GBDA, 6)
+#define SWSCI_GBDA_INTERNAL_GRAPHICS	SWSCI_FUNCTION_CODE(SWSCI_GBDA, 7)
+#define SWSCI_GBDA_SPREAD_SPECTRUM	SWSCI_FUNCTION_CODE(SWSCI_GBDA, 10)
+
+/* SWSCI: System BIOS Callbacks (SBCB) */
+#define SWSCI_SBCB			6
+#define SWSCI_SBCB_SUPPORTED_CALLBACKS	SWSCI_FUNCTION_CODE(SWSCI_SBCB, 0)
+#define SWSCI_SBCB_INIT_COMPLETION	SWSCI_FUNCTION_CODE(SWSCI_SBCB, 1)
+#define SWSCI_SBCB_PRE_HIRES_SET_MODE	SWSCI_FUNCTION_CODE(SWSCI_SBCB, 3)
+#define SWSCI_SBCB_POST_HIRES_SET_MODE	SWSCI_FUNCTION_CODE(SWSCI_SBCB, 4)
+#define SWSCI_SBCB_DISPLAY_SWITCH	SWSCI_FUNCTION_CODE(SWSCI_SBCB, 5)
+#define SWSCI_SBCB_SET_TV_FORMAT	SWSCI_FUNCTION_CODE(SWSCI_SBCB, 6)
+#define SWSCI_SBCB_ADAPTER_POWER_STATE	SWSCI_FUNCTION_CODE(SWSCI_SBCB, 7)
+#define SWSCI_SBCB_DISPLAY_POWER_STATE	SWSCI_FUNCTION_CODE(SWSCI_SBCB, 8)
+#define SWSCI_SBCB_SET_BOOT_DISPLAY	SWSCI_FUNCTION_CODE(SWSCI_SBCB, 9)
+#define SWSCI_SBCB_SET_PANEL_DETAILS	SWSCI_FUNCTION_CODE(SWSCI_SBCB, 10)
+#define SWSCI_SBCB_SET_INTERNAL_GFX	SWSCI_FUNCTION_CODE(SWSCI_SBCB, 11)
+#define SWSCI_SBCB_POST_HIRES_TO_DOS_FS	SWSCI_FUNCTION_CODE(SWSCI_SBCB, 16)
+#define SWSCI_SBCB_SUSPEND_RESUME	SWSCI_FUNCTION_CODE(SWSCI_SBCB, 17)
+#define SWSCI_SBCB_SET_SPREAD_SPECTRUM	SWSCI_FUNCTION_CODE(SWSCI_SBCB, 18)
+#define SWSCI_SBCB_POST_VBE_PM		SWSCI_FUNCTION_CODE(SWSCI_SBCB, 19)
+#define SWSCI_SBCB_ENABLE_DISABLE_AUDIO	SWSCI_FUNCTION_CODE(SWSCI_SBCB, 21)
+
+#define ACPI_OTHER_OUTPUT (0<<8)
+#define ACPI_VGA_OUTPUT (1<<8)
+#define ACPI_TV_OUTPUT (2<<8)
+#define ACPI_DIGITAL_OUTPUT (3<<8)
+#define ACPI_LVDS_OUTPUT (4<<8)
+
+#define MAX_DSLP	1500
+
+#ifdef CONFIG_ACPI
+static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct opregion_swsci *swsci = dev_priv->opregion.swsci;
+	u32 main_function, sub_function, scic;
+	u16 pci_swsci;
+	u32 dslp;
+
+	if (!swsci)
+		return -ENODEV;
+
+	main_function = (function & SWSCI_SCIC_MAIN_FUNCTION_MASK) >>
+		SWSCI_SCIC_MAIN_FUNCTION_SHIFT;
+	sub_function = (function & SWSCI_SCIC_SUB_FUNCTION_MASK) >>
+		SWSCI_SCIC_SUB_FUNCTION_SHIFT;
+
+	/* Check if we can call the function. See swsci_setup for details. */
+	if (main_function == SWSCI_SBCB) {
+		if ((dev_priv->opregion.swsci_sbcb_sub_functions &
+		     (1 << sub_function)) == 0)
+			return -EINVAL;
+	} else if (main_function == SWSCI_GBDA) {
+		if ((dev_priv->opregion.swsci_gbda_sub_functions &
+		     (1 << sub_function)) == 0)
+			return -EINVAL;
+	}
+
+	/* Driver sleep timeout in ms. */
+	dslp = swsci->dslp;
+	if (!dslp) {
+		/* The spec says 2ms should be the default, but it's too small
+		 * for some machines. */
+		dslp = 50;
+	} else if (dslp > MAX_DSLP) {
+		/* Hey bios, trust must be earned. */
+		DRM_INFO_ONCE("ACPI BIOS requests an excessive sleep of %u ms, "
+			      "using %u ms instead\n", dslp, MAX_DSLP);
+		dslp = MAX_DSLP;
+	}
+
+	/* The spec tells us to do this, but we are the only user... */
+	scic = swsci->scic;
+	if (scic & SWSCI_SCIC_INDICATOR) {
+		DRM_DEBUG_DRIVER("SWSCI request already in progress\n");
+		return -EBUSY;
+	}
+
+	scic = function | SWSCI_SCIC_INDICATOR;
+
+	swsci->parm = parm;
+	swsci->scic = scic;
+
+	/* Ensure SCI event is selected and event trigger is cleared. */
+	pci_read_config_word(dev->pdev, PCI_SWSCI, &pci_swsci);
+	if (!(pci_swsci & PCI_SWSCI_SCISEL) || (pci_swsci & PCI_SWSCI_GSSCIE)) {
+		pci_swsci |= PCI_SWSCI_SCISEL;
+		pci_swsci &= ~PCI_SWSCI_GSSCIE;
+		pci_write_config_word(dev->pdev, PCI_SWSCI, pci_swsci);
+	}
+
+	/* Use event trigger to tell bios to check the mail. */
+	pci_swsci |= PCI_SWSCI_GSSCIE;
+	pci_write_config_word(dev->pdev, PCI_SWSCI, pci_swsci);
+
+	/* Poll for the result. */
+#define C (((scic = swsci->scic) & SWSCI_SCIC_INDICATOR) == 0)
+	if (wait_for(C, dslp)) {
+		DRM_DEBUG_DRIVER("SWSCI request timed out\n");
+		return -ETIMEDOUT;
+	}
+
+	scic = (scic & SWSCI_SCIC_EXIT_STATUS_MASK) >>
+		SWSCI_SCIC_EXIT_STATUS_SHIFT;
+
+	/* Note: scic == 0 is an error! */
+	if (scic != SWSCI_SCIC_EXIT_STATUS_SUCCESS) {
+		DRM_DEBUG_DRIVER("SWSCI request error %u\n", scic);
+		return -EIO;
+	}
+
+	if (parm_out)
+		*parm_out = swsci->parm;
+
+	return 0;
+
+#undef C
+}
+
+#define DISPLAY_TYPE_CRT			0
+#define DISPLAY_TYPE_TV				1
+#define DISPLAY_TYPE_EXTERNAL_FLAT_PANEL	2
+#define DISPLAY_TYPE_INTERNAL_FLAT_PANEL	3
+
+int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
+				  bool enable)
+{
+	struct drm_device *dev = intel_encoder->base.dev;
+	u32 parm = 0;
+	u32 type = 0;
+	u32 port;
+
+	/* don't care about old stuff for now */
+	if (!HAS_DDI(dev))
+		return 0;
+
+	if (intel_encoder->type == INTEL_OUTPUT_DSI)
+		port = 0;
+	else
+		port = intel_ddi_get_encoder_port(intel_encoder);
+
+	if (port == PORT_E)  {
+		port = 0;
+	} else {
+		parm |= 1 << port;
+		port++;
+	}
+
+	if (!enable)
+		parm |= 4 << 8;
+
+	switch (intel_encoder->type) {
+	case INTEL_OUTPUT_ANALOG:
+		type = DISPLAY_TYPE_CRT;
+		break;
+	case INTEL_OUTPUT_UNKNOWN:
+	case INTEL_OUTPUT_DISPLAYPORT:
+	case INTEL_OUTPUT_HDMI:
+	case INTEL_OUTPUT_DP_MST:
+		type = DISPLAY_TYPE_EXTERNAL_FLAT_PANEL;
+		break;
+	case INTEL_OUTPUT_EDP:
+	case INTEL_OUTPUT_DSI:
+		type = DISPLAY_TYPE_INTERNAL_FLAT_PANEL;
+		break;
+	default:
+		WARN_ONCE(1, "unsupported intel_encoder type %d\n",
+			  intel_encoder->type);
+		return -EINVAL;
+	}
+
+	parm |= type << (16 + port * 3);
+
+	return swsci(dev, SWSCI_SBCB_DISPLAY_POWER_STATE, parm, NULL);
+}
+
+static const struct {
+	pci_power_t pci_power_state;
+	u32 parm;
+} power_state_map[] = {
+	{ PCI_D0,	0x00 },
+	{ PCI_D1,	0x01 },
+	{ PCI_D2,	0x02 },
+	{ PCI_D3hot,	0x04 },
+	{ PCI_D3cold,	0x04 },
+};
+
+int intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state)
+{
+	int i;
+
+	if (!HAS_DDI(dev))
+		return 0;
+
+	for (i = 0; i < ARRAY_SIZE(power_state_map); i++) {
+		if (state == power_state_map[i].pci_power_state)
+			return swsci(dev, SWSCI_SBCB_ADAPTER_POWER_STATE,
+				     power_state_map[i].parm, NULL);
+	}
+
+	return -EINVAL;
+}
+
+static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_connector *intel_connector;
+	struct opregion_asle *asle = dev_priv->opregion.asle;
+
+	DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp);
+
+	if (acpi_video_get_backlight_type() == acpi_backlight_native) {
+		DRM_DEBUG_KMS("opregion backlight request ignored\n");
+		return 0;
+	}
+
+	if (!(bclp & ASLE_BCLP_VALID))
+		return ASLC_BACKLIGHT_FAILED;
+
+	bclp &= ASLE_BCLP_MSK;
+	if (bclp > 255)
+		return ASLC_BACKLIGHT_FAILED;
+
+	drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+
+	/*
+	 * Update backlight on all connectors that support backlight (usually
+	 * only one).
+	 */
+	DRM_DEBUG_KMS("updating opregion backlight %d/255\n", bclp);
+	list_for_each_entry(intel_connector, &dev->mode_config.connector_list, base.head)
+		intel_panel_set_backlight_acpi(intel_connector, bclp, 255);
+	asle->cblv = DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID;
+
+	drm_modeset_unlock(&dev->mode_config.connection_mutex);
+
+
+	return 0;
+}
+
+static u32 asle_set_als_illum(struct drm_device *dev, u32 alsi)
+{
+	/* alsi is the current ALS reading in lux. 0 indicates below sensor
+	   range, 0xffff indicates above sensor range. 1-0xfffe are valid */
+	DRM_DEBUG_DRIVER("Illum is not supported\n");
+	return ASLC_ALS_ILLUM_FAILED;
+}
+
+static u32 asle_set_pwm_freq(struct drm_device *dev, u32 pfmb)
+{
+	DRM_DEBUG_DRIVER("PWM freq is not supported\n");
+	return ASLC_PWM_FREQ_FAILED;
+}
+
+static u32 asle_set_pfit(struct drm_device *dev, u32 pfit)
+{
+	/* Panel fitting is currently controlled by the X code, so this is a
+	   noop until modesetting support works fully */
+	DRM_DEBUG_DRIVER("Pfit is not supported\n");
+	return ASLC_PFIT_FAILED;
+}
+
+static u32 asle_set_supported_rotation_angles(struct drm_device *dev, u32 srot)
+{
+	DRM_DEBUG_DRIVER("SROT is not supported\n");
+	return ASLC_ROTATION_ANGLES_FAILED;
+}
+
+static u32 asle_set_button_array(struct drm_device *dev, u32 iuer)
+{
+	if (!iuer)
+		DRM_DEBUG_DRIVER("Button array event is not supported (nothing)\n");
+	if (iuer & ASLE_IUER_ROTATION_LOCK_BTN)
+		DRM_DEBUG_DRIVER("Button array event is not supported (rotation lock)\n");
+	if (iuer & ASLE_IUER_VOLUME_DOWN_BTN)
+		DRM_DEBUG_DRIVER("Button array event is not supported (volume down)\n");
+	if (iuer & ASLE_IUER_VOLUME_UP_BTN)
+		DRM_DEBUG_DRIVER("Button array event is not supported (volume up)\n");
+	if (iuer & ASLE_IUER_WINDOWS_BTN)
+		DRM_DEBUG_DRIVER("Button array event is not supported (windows)\n");
+	if (iuer & ASLE_IUER_POWER_BTN)
+		DRM_DEBUG_DRIVER("Button array event is not supported (power)\n");
+
+	return ASLC_BUTTON_ARRAY_FAILED;
+}
+
+static u32 asle_set_convertible(struct drm_device *dev, u32 iuer)
+{
+	if (iuer & ASLE_IUER_CONVERTIBLE)
+		DRM_DEBUG_DRIVER("Convertible is not supported (clamshell)\n");
+	else
+		DRM_DEBUG_DRIVER("Convertible is not supported (slate)\n");
+
+	return ASLC_CONVERTIBLE_FAILED;
+}
+
+static u32 asle_set_docking(struct drm_device *dev, u32 iuer)
+{
+	if (iuer & ASLE_IUER_DOCKING)
+		DRM_DEBUG_DRIVER("Docking is not supported (docked)\n");
+	else
+		DRM_DEBUG_DRIVER("Docking is not supported (undocked)\n");
+
+	return ASLC_DOCKING_FAILED;
+}
+
+static u32 asle_isct_state(struct drm_device *dev)
+{
+	DRM_DEBUG_DRIVER("ISCT is not supported\n");
+	return ASLC_ISCT_STATE_FAILED;
+}
+
+static void asle_work(struct work_struct *work)
+{
+	struct intel_opregion *opregion =
+		container_of(work, struct intel_opregion, asle_work);
+	struct drm_i915_private *dev_priv =
+		container_of(opregion, struct drm_i915_private, opregion);
+	struct drm_device *dev = dev_priv->dev;
+	struct opregion_asle *asle = dev_priv->opregion.asle;
+	u32 aslc_stat = 0;
+	u32 aslc_req;
+
+	if (!asle)
+		return;
+
+	aslc_req = asle->aslc;
+
+	if (!(aslc_req & ASLC_REQ_MSK)) {
+		DRM_DEBUG_DRIVER("No request on ASLC interrupt 0x%08x\n",
+				 aslc_req);
+		return;
+	}
+
+	if (aslc_req & ASLC_SET_ALS_ILLUM)
+		aslc_stat |= asle_set_als_illum(dev, asle->alsi);
+
+	if (aslc_req & ASLC_SET_BACKLIGHT)
+		aslc_stat |= asle_set_backlight(dev, asle->bclp);
+
+	if (aslc_req & ASLC_SET_PFIT)
+		aslc_stat |= asle_set_pfit(dev, asle->pfit);
+
+	if (aslc_req & ASLC_SET_PWM_FREQ)
+		aslc_stat |= asle_set_pwm_freq(dev, asle->pfmb);
+
+	if (aslc_req & ASLC_SUPPORTED_ROTATION_ANGLES)
+		aslc_stat |= asle_set_supported_rotation_angles(dev,
+							asle->srot);
+
+	if (aslc_req & ASLC_BUTTON_ARRAY)
+		aslc_stat |= asle_set_button_array(dev, asle->iuer);
+
+	if (aslc_req & ASLC_CONVERTIBLE_INDICATOR)
+		aslc_stat |= asle_set_convertible(dev, asle->iuer);
+
+	if (aslc_req & ASLC_DOCKING_INDICATOR)
+		aslc_stat |= asle_set_docking(dev, asle->iuer);
+
+	if (aslc_req & ASLC_ISCT_STATE_CHANGE)
+		aslc_stat |= asle_isct_state(dev);
+
+	asle->aslc = aslc_stat;
+}
+
+void intel_opregion_asle_intr(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (dev_priv->opregion.asle)
+		schedule_work(&dev_priv->opregion.asle_work);
+}
+
+#define ACPI_EV_DISPLAY_SWITCH (1<<0)
+#define ACPI_EV_LID            (1<<1)
+#define ACPI_EV_DOCK           (1<<2)
+
+static struct intel_opregion *system_opregion;
+
+static int intel_opregion_video_event(struct notifier_block *nb,
+				      unsigned long val, void *data)
+{
+	/* The only video events relevant to opregion are 0x80. These indicate
+	   either a docking event, lid switch or display switch request. In
+	   Linux, these are handled by the dock, button and video drivers.
+	*/
+
+	struct acpi_bus_event *event = data;
+	struct opregion_acpi *acpi;
+	int ret = NOTIFY_OK;
+
+	if (strcmp(event->device_class, ACPI_VIDEO_CLASS) != 0)
+		return NOTIFY_DONE;
+
+	if (!system_opregion)
+		return NOTIFY_DONE;
+
+	acpi = system_opregion->acpi;
+
+	if (event->type == 0x80 && ((acpi->cevt & 1) == 0))
+		ret = NOTIFY_BAD;
+
+	acpi->csts = 0;
+
+	return ret;
+}
+
+static struct notifier_block intel_opregion_notifier = {
+	.notifier_call = intel_opregion_video_event,
+};
+
+/*
+ * Initialise the DIDL field in opregion. This passes a list of devices to
+ * the firmware. Values are defined by section B.4.2 of the ACPI specification
+ * (version 3)
+ */
+
+static u32 get_did(struct intel_opregion *opregion, int i)
+{
+	u32 did;
+
+	if (i < ARRAY_SIZE(opregion->acpi->didl)) {
+		did = opregion->acpi->didl[i];
+	} else {
+		i -= ARRAY_SIZE(opregion->acpi->didl);
+
+		if (WARN_ON(i >= ARRAY_SIZE(opregion->acpi->did2)))
+			return 0;
+
+		did = opregion->acpi->did2[i];
+	}
+
+	return did;
+}
+
+static void set_did(struct intel_opregion *opregion, int i, u32 val)
+{
+	if (i < ARRAY_SIZE(opregion->acpi->didl)) {
+		opregion->acpi->didl[i] = val;
+	} else {
+		i -= ARRAY_SIZE(opregion->acpi->didl);
+
+		if (WARN_ON(i >= ARRAY_SIZE(opregion->acpi->did2)))
+			return;
+
+		opregion->acpi->did2[i] = val;
+	}
+}
+
+static void intel_didl_outputs(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_opregion *opregion = &dev_priv->opregion;
+	struct drm_connector *connector;
+	acpi_handle handle;
+	struct acpi_device *acpi_dev, *acpi_cdev, *acpi_video_bus = NULL;
+	unsigned long long device_id;
+	acpi_status status;
+	u32 temp, max_outputs;
+	int i = 0;
+
+	handle = ACPI_HANDLE(&dev->pdev->dev);
+	if (!handle || acpi_bus_get_device(handle, &acpi_dev))
+		return;
+
+	if (acpi_is_video_device(handle))
+		acpi_video_bus = acpi_dev;
+	else {
+		list_for_each_entry(acpi_cdev, &acpi_dev->children, node) {
+			if (acpi_is_video_device(acpi_cdev->handle)) {
+				acpi_video_bus = acpi_cdev;
+				break;
+			}
+		}
+	}
+
+	if (!acpi_video_bus) {
+		DRM_DEBUG_KMS("No ACPI video bus found\n");
+		return;
+	}
+
+	/*
+	 * In theory, did2, the extended didl, gets added at opregion version
+	 * 3.0. In practice, however, we're supposed to set it for earlier
+	 * versions as well, since a BIOS that doesn't understand did2 should
+	 * not look at it anyway. Use a variable so we can tweak this if a need
+	 * arises later.
+	 */
+	max_outputs = ARRAY_SIZE(opregion->acpi->didl) +
+		ARRAY_SIZE(opregion->acpi->did2);
+
+	list_for_each_entry(acpi_cdev, &acpi_video_bus->children, node) {
+		if (i >= max_outputs) {
+			DRM_DEBUG_KMS("More than %u outputs detected via ACPI\n",
+				      max_outputs);
+			return;
+		}
+		status = acpi_evaluate_integer(acpi_cdev->handle, "_ADR",
+					       NULL, &device_id);
+		if (ACPI_SUCCESS(status)) {
+			if (!device_id)
+				goto blind_set;
+			set_did(opregion, i++, (u32)(device_id & 0x0f0f));
+		}
+	}
+
+end:
+	DRM_DEBUG_KMS("%d outputs detected\n", i);
+
+	/* If fewer than max outputs, the list must be null terminated */
+	if (i < max_outputs)
+		set_did(opregion, i, 0);
+	return;
+
+blind_set:
+	i = 0;
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		int output_type = ACPI_OTHER_OUTPUT;
+		if (i >= max_outputs) {
+			DRM_DEBUG_KMS("More than %u outputs in connector list\n",
+				      max_outputs);
+			return;
+		}
+		switch (connector->connector_type) {
+		case DRM_MODE_CONNECTOR_VGA:
+		case DRM_MODE_CONNECTOR_DVIA:
+			output_type = ACPI_VGA_OUTPUT;
+			break;
+		case DRM_MODE_CONNECTOR_Composite:
+		case DRM_MODE_CONNECTOR_SVIDEO:
+		case DRM_MODE_CONNECTOR_Component:
+		case DRM_MODE_CONNECTOR_9PinDIN:
+			output_type = ACPI_TV_OUTPUT;
+			break;
+		case DRM_MODE_CONNECTOR_DVII:
+		case DRM_MODE_CONNECTOR_DVID:
+		case DRM_MODE_CONNECTOR_DisplayPort:
+		case DRM_MODE_CONNECTOR_HDMIA:
+		case DRM_MODE_CONNECTOR_HDMIB:
+			output_type = ACPI_DIGITAL_OUTPUT;
+			break;
+		case DRM_MODE_CONNECTOR_LVDS:
+			output_type = ACPI_LVDS_OUTPUT;
+			break;
+		}
+		temp = get_did(opregion, i);
+		set_did(opregion, i, temp | (1 << 31) | output_type | i);
+		i++;
+	}
+	goto end;
+}
+
+static void intel_setup_cadls(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_opregion *opregion = &dev_priv->opregion;
+	int i = 0;
+	u32 disp_id;
+
+	/* Initialize the CADL field by duplicating the DIDL values.
+	 * Technically, this is not always correct as display outputs may exist,
+	 * but not active. This initialization is necessary for some Clevo
+	 * laptops that check this field before processing the brightness and
+	 * display switching hotkeys. Just like DIDL, CADL is NULL-terminated if
+	 * there are less than eight devices. */
+	do {
+		disp_id = get_did(opregion, i);
+		opregion->acpi->cadl[i] = disp_id;
+	} while (++i < 8 && disp_id != 0);
+}
+
+void intel_opregion_init(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_opregion *opregion = &dev_priv->opregion;
+
+	if (!opregion->header)
+		return;
+
+	if (opregion->acpi) {
+		intel_didl_outputs(dev);
+		intel_setup_cadls(dev);
+
+		/* Notify BIOS we are ready to handle ACPI video ext notifs.
+		 * Right now, all the events are handled by the ACPI video module.
+		 * We don't actually need to do anything with them. */
+		opregion->acpi->csts = 0;
+		opregion->acpi->drdy = 1;
+
+		system_opregion = opregion;
+		register_acpi_notifier(&intel_opregion_notifier);
+	}
+
+	if (opregion->asle) {
+		opregion->asle->tche = ASLE_TCHE_BLC_EN;
+		opregion->asle->ardy = ASLE_ARDY_READY;
+	}
+}
+
+void intel_opregion_fini(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_opregion *opregion = &dev_priv->opregion;
+
+	if (!opregion->header)
+		return;
+
+	if (opregion->asle)
+		opregion->asle->ardy = ASLE_ARDY_NOT_READY;
+
+	cancel_work_sync(&dev_priv->opregion.asle_work);
+
+	if (opregion->acpi) {
+		opregion->acpi->drdy = 0;
+
+		system_opregion = NULL;
+		unregister_acpi_notifier(&intel_opregion_notifier);
+	}
+
+	/* just clear all opregion memory pointers now */
+	memunmap(opregion->header);
+	opregion->header = NULL;
+	opregion->acpi = NULL;
+	opregion->swsci = NULL;
+	opregion->asle = NULL;
+	opregion->vbt = NULL;
+	opregion->lid_state = NULL;
+}
+
+static void swsci_setup(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_opregion *opregion = &dev_priv->opregion;
+	bool requested_callbacks = false;
+	u32 tmp;
+
+	/* Sub-function code 0 is okay, let's allow them. */
+	opregion->swsci_gbda_sub_functions = 1;
+	opregion->swsci_sbcb_sub_functions = 1;
+
+	/* We use GBDA to ask for supported GBDA calls. */
+	if (swsci(dev, SWSCI_GBDA_SUPPORTED_CALLS, 0, &tmp) == 0) {
+		/* make the bits match the sub-function codes */
+		tmp <<= 1;
+		opregion->swsci_gbda_sub_functions |= tmp;
+	}
+
+	/*
+	 * We also use GBDA to ask for _requested_ SBCB callbacks. The driver
+	 * must not call interfaces that are not specifically requested by the
+	 * bios.
+	 */
+	if (swsci(dev, SWSCI_GBDA_REQUESTED_CALLBACKS, 0, &tmp) == 0) {
+		/* here, the bits already match sub-function codes */
+		opregion->swsci_sbcb_sub_functions |= tmp;
+		requested_callbacks = true;
+	}
+
+	/*
+	 * But we use SBCB to ask for _supported_ SBCB calls. This does not mean
+	 * the callback is _requested_. But we still can't call interfaces that
+	 * are not requested.
+	 */
+	if (swsci(dev, SWSCI_SBCB_SUPPORTED_CALLBACKS, 0, &tmp) == 0) {
+		/* make the bits match the sub-function codes */
+		u32 low = tmp & 0x7ff;
+		u32 high = tmp & ~0xfff; /* bit 11 is reserved */
+		tmp = (high << 4) | (low << 1) | 1;
+
+		/* best guess what to do with supported wrt requested */
+		if (requested_callbacks) {
+			u32 req = opregion->swsci_sbcb_sub_functions;
+			if ((req & tmp) != req)
+				DRM_DEBUG_DRIVER("SWSCI BIOS requested (%08x) SBCB callbacks that are not supported (%08x)\n", req, tmp);
+			/* XXX: for now, trust the requested callbacks */
+			/* opregion->swsci_sbcb_sub_functions &= tmp; */
+		} else {
+			opregion->swsci_sbcb_sub_functions |= tmp;
+		}
+	}
+
+	DRM_DEBUG_DRIVER("SWSCI GBDA callbacks %08x, SBCB callbacks %08x\n",
+			 opregion->swsci_gbda_sub_functions,
+			 opregion->swsci_sbcb_sub_functions);
+}
+#else /* CONFIG_ACPI */
+static inline void swsci_setup(struct drm_device *dev) {}
+#endif  /* CONFIG_ACPI */
+
+int intel_opregion_setup(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_opregion *opregion = &dev_priv->opregion;
+	u32 asls, mboxes;
+	char buf[sizeof(OPREGION_SIGNATURE)];
+	int err = 0;
+	void *base;
+
+	BUILD_BUG_ON(sizeof(struct opregion_header) != 0x100);
+	BUILD_BUG_ON(sizeof(struct opregion_acpi) != 0x100);
+	BUILD_BUG_ON(sizeof(struct opregion_swsci) != 0x100);
+	BUILD_BUG_ON(sizeof(struct opregion_asle) != 0x100);
+
+	pci_read_config_dword(dev->pdev, PCI_ASLS, &asls);
+	DRM_DEBUG_DRIVER("graphic opregion physical addr: 0x%x\n", asls);
+	if (asls == 0) {
+		DRM_DEBUG_DRIVER("ACPI OpRegion not supported!\n");
+		return -ENOTSUPP;
+	}
+
+#ifdef CONFIG_ACPI
+	INIT_WORK(&opregion->asle_work, asle_work);
+#endif
+
+	base = memremap(asls, OPREGION_SIZE, MEMREMAP_WB);
+	if (!base)
+		return -ENOMEM;
+
+	memcpy(buf, base, sizeof(buf));
+
+	if (memcmp(buf, OPREGION_SIGNATURE, 16)) {
+		DRM_DEBUG_DRIVER("opregion signature mismatch\n");
+		err = -EINVAL;
+		goto err_out;
+	}
+	opregion->header = base;
+	opregion->vbt = base + OPREGION_VBT_OFFSET;
+
+	opregion->lid_state = base + ACPI_CLID;
+
+	mboxes = opregion->header->mboxes;
+	if (mboxes & MBOX_ACPI) {
+		DRM_DEBUG_DRIVER("Public ACPI methods supported\n");
+		opregion->acpi = base + OPREGION_ACPI_OFFSET;
+	}
+
+	if (mboxes & MBOX_SWSCI) {
+		DRM_DEBUG_DRIVER("SWSCI supported\n");
+		opregion->swsci = base + OPREGION_SWSCI_OFFSET;
+		swsci_setup(dev);
+	}
+	if (mboxes & MBOX_ASLE) {
+		DRM_DEBUG_DRIVER("ASLE supported\n");
+		opregion->asle = base + OPREGION_ASLE_OFFSET;
+
+		opregion->asle->ardy = ASLE_ARDY_NOT_READY;
+	}
+
+	return 0;
+
+err_out:
+	memunmap(base);
+	return err;
+}
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
new file mode 100644
index 0000000..4445426
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -0,0 +1,1593 @@
+/*
+ * Copyright © 2009
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Daniel Vetter <daniel@ffwll.ch>
+ *
+ * Derived from Xorg ddx, xf86-video-intel, src/i830_video.c
+ */
+#include <drm/drmP.h>
+#include <drm/i915_drm.h>
+#include "i915_drv.h"
+#include "i915_reg.h"
+#include "intel_drv.h"
+
+/* Limits for overlay size. According to intel doc, the real limits are:
+ * Y width: 4095, UV width (planar): 2047, Y height: 2047,
+ * UV width (planar): * 1023. But the xorg thinks 2048 for height and width. Use
+ * the mininum of both.  */
+#define IMAGE_MAX_WIDTH		2048
+#define IMAGE_MAX_HEIGHT	2046 /* 2 * 1023 */
+/* on 830 and 845 these large limits result in the card hanging */
+#define IMAGE_MAX_WIDTH_LEGACY	1024
+#define IMAGE_MAX_HEIGHT_LEGACY	1088
+
+/* overlay register definitions */
+/* OCMD register */
+#define OCMD_TILED_SURFACE	(0x1<<19)
+#define OCMD_MIRROR_MASK	(0x3<<17)
+#define OCMD_MIRROR_MODE	(0x3<<17)
+#define OCMD_MIRROR_HORIZONTAL	(0x1<<17)
+#define OCMD_MIRROR_VERTICAL	(0x2<<17)
+#define OCMD_MIRROR_BOTH	(0x3<<17)
+#define OCMD_BYTEORDER_MASK	(0x3<<14) /* zero for YUYV or FOURCC YUY2 */
+#define OCMD_UV_SWAP		(0x1<<14) /* YVYU */
+#define OCMD_Y_SWAP		(0x2<<14) /* UYVY or FOURCC UYVY */
+#define OCMD_Y_AND_UV_SWAP	(0x3<<14) /* VYUY */
+#define OCMD_SOURCE_FORMAT_MASK (0xf<<10)
+#define OCMD_RGB_888		(0x1<<10) /* not in i965 Intel docs */
+#define OCMD_RGB_555		(0x2<<10) /* not in i965 Intel docs */
+#define OCMD_RGB_565		(0x3<<10) /* not in i965 Intel docs */
+#define OCMD_YUV_422_PACKED	(0x8<<10)
+#define OCMD_YUV_411_PACKED	(0x9<<10) /* not in i965 Intel docs */
+#define OCMD_YUV_420_PLANAR	(0xc<<10)
+#define OCMD_YUV_422_PLANAR	(0xd<<10)
+#define OCMD_YUV_410_PLANAR	(0xe<<10) /* also 411 */
+#define OCMD_TVSYNCFLIP_PARITY	(0x1<<9)
+#define OCMD_TVSYNCFLIP_ENABLE	(0x1<<7)
+#define OCMD_BUF_TYPE_MASK	(0x1<<5)
+#define OCMD_BUF_TYPE_FRAME	(0x0<<5)
+#define OCMD_BUF_TYPE_FIELD	(0x1<<5)
+#define OCMD_TEST_MODE		(0x1<<4)
+#define OCMD_BUFFER_SELECT	(0x3<<2)
+#define OCMD_BUFFER0		(0x0<<2)
+#define OCMD_BUFFER1		(0x1<<2)
+#define OCMD_FIELD_SELECT	(0x1<<2)
+#define OCMD_FIELD0		(0x0<<1)
+#define OCMD_FIELD1		(0x1<<1)
+#define OCMD_ENABLE		(0x1<<0)
+
+/* OCONFIG register */
+#define OCONF_PIPE_MASK		(0x1<<18)
+#define OCONF_PIPE_A		(0x0<<18)
+#define OCONF_PIPE_B		(0x1<<18)
+#define OCONF_GAMMA2_ENABLE	(0x1<<16)
+#define OCONF_CSC_MODE_BT601	(0x0<<5)
+#define OCONF_CSC_MODE_BT709	(0x1<<5)
+#define OCONF_CSC_BYPASS	(0x1<<4)
+#define OCONF_CC_OUT_8BIT	(0x1<<3)
+#define OCONF_TEST_MODE		(0x1<<2)
+#define OCONF_THREE_LINE_BUFFER	(0x1<<0)
+#define OCONF_TWO_LINE_BUFFER	(0x0<<0)
+
+/* DCLRKM (dst-key) register */
+#define DST_KEY_ENABLE		(0x1<<31)
+#define CLK_RGB24_MASK		0x0
+#define CLK_RGB16_MASK		0x070307
+#define CLK_RGB15_MASK		0x070707
+#define CLK_RGB8I_MASK		0xffffff
+
+#define RGB16_TO_COLORKEY(c) \
+	(((c & 0xF800) << 8) | ((c & 0x07E0) << 5) | ((c & 0x001F) << 3))
+#define RGB15_TO_COLORKEY(c) \
+	(((c & 0x7c00) << 9) | ((c & 0x03E0) << 6) | ((c & 0x001F) << 3))
+
+/* overlay flip addr flag */
+#define OFC_UPDATE		0x1
+
+/* polyphase filter coefficients */
+#define N_HORIZ_Y_TAPS          5
+#define N_VERT_Y_TAPS           3
+#define N_HORIZ_UV_TAPS         3
+#define N_VERT_UV_TAPS          3
+#define N_PHASES                17
+#define MAX_TAPS                5
+
+/* memory bufferd overlay registers */
+struct overlay_registers {
+	u32 OBUF_0Y;
+	u32 OBUF_1Y;
+	u32 OBUF_0U;
+	u32 OBUF_0V;
+	u32 OBUF_1U;
+	u32 OBUF_1V;
+	u32 OSTRIDE;
+	u32 YRGB_VPH;
+	u32 UV_VPH;
+	u32 HORZ_PH;
+	u32 INIT_PHS;
+	u32 DWINPOS;
+	u32 DWINSZ;
+	u32 SWIDTH;
+	u32 SWIDTHSW;
+	u32 SHEIGHT;
+	u32 YRGBSCALE;
+	u32 UVSCALE;
+	u32 OCLRC0;
+	u32 OCLRC1;
+	u32 DCLRKV;
+	u32 DCLRKM;
+	u32 SCLRKVH;
+	u32 SCLRKVL;
+	u32 SCLRKEN;
+	u32 OCONFIG;
+	u32 OCMD;
+	u32 RESERVED1; /* 0x6C */
+	u32 OSTART_0Y;
+	u32 OSTART_1Y;
+	u32 OSTART_0U;
+	u32 OSTART_0V;
+	u32 OSTART_1U;
+	u32 OSTART_1V;
+	u32 OTILEOFF_0Y;
+	u32 OTILEOFF_1Y;
+	u32 OTILEOFF_0U;
+	u32 OTILEOFF_0V;
+	u32 OTILEOFF_1U;
+	u32 OTILEOFF_1V;
+	u32 FASTHSCALE; /* 0xA0 */
+	u32 UVSCALEV; /* 0xA4 */
+	u32 RESERVEDC[(0x200 - 0xA8) / 4]; /* 0xA8 - 0x1FC */
+	u16 Y_VCOEFS[N_VERT_Y_TAPS * N_PHASES]; /* 0x200 */
+	u16 RESERVEDD[0x100 / 2 - N_VERT_Y_TAPS * N_PHASES];
+	u16 Y_HCOEFS[N_HORIZ_Y_TAPS * N_PHASES]; /* 0x300 */
+	u16 RESERVEDE[0x200 / 2 - N_HORIZ_Y_TAPS * N_PHASES];
+	u16 UV_VCOEFS[N_VERT_UV_TAPS * N_PHASES]; /* 0x500 */
+	u16 RESERVEDF[0x100 / 2 - N_VERT_UV_TAPS * N_PHASES];
+	u16 UV_HCOEFS[N_HORIZ_UV_TAPS * N_PHASES]; /* 0x600 */
+	u16 RESERVEDG[0x100 / 2 - N_HORIZ_UV_TAPS * N_PHASES];
+};
+
+struct intel_overlay {
+	struct drm_device *dev;
+	struct intel_crtc *crtc;
+	struct drm_i915_gem_object *vid_bo;
+	struct drm_i915_gem_object *old_vid_bo;
+	bool active;
+	bool pfit_active;
+	u32 pfit_vscale_ratio; /* shifted-point number, (1<<12) == 1.0 */
+	u32 color_key:24;
+	u32 color_key_enabled:1;
+	u32 brightness, contrast, saturation;
+	u32 old_xscale, old_yscale;
+	/* register access */
+	u32 flip_addr;
+	struct drm_i915_gem_object *reg_bo;
+	/* flip handling */
+	struct drm_i915_gem_request *last_flip_req;
+	void (*flip_tail)(struct intel_overlay *);
+};
+
+static struct overlay_registers __iomem *
+intel_overlay_map_regs(struct intel_overlay *overlay)
+{
+	struct drm_i915_private *dev_priv = overlay->dev->dev_private;
+	struct overlay_registers __iomem *regs;
+
+	if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
+		regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_handle->vaddr;
+	else
+		regs = io_mapping_map_wc(dev_priv->gtt.mappable,
+					 i915_gem_obj_ggtt_offset(overlay->reg_bo));
+
+	return regs;
+}
+
+static void intel_overlay_unmap_regs(struct intel_overlay *overlay,
+				     struct overlay_registers __iomem *regs)
+{
+	if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev))
+		io_mapping_unmap(regs);
+}
+
+static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
+					 struct drm_i915_gem_request *req,
+					 void (*tail)(struct intel_overlay *))
+{
+	int ret;
+
+	WARN_ON(overlay->last_flip_req);
+	i915_gem_request_assign(&overlay->last_flip_req, req);
+	i915_add_request(req);
+
+	overlay->flip_tail = tail;
+	ret = i915_wait_request(overlay->last_flip_req);
+	if (ret)
+		return ret;
+
+	i915_gem_request_assign(&overlay->last_flip_req, NULL);
+	return 0;
+}
+
+/* overlay needs to be disable in OCMD reg */
+static int intel_overlay_on(struct intel_overlay *overlay)
+{
+	struct drm_device *dev = overlay->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_engine_cs *ring = &dev_priv->ring[RCS];
+	struct drm_i915_gem_request *req;
+	int ret;
+
+	WARN_ON(overlay->active);
+	WARN_ON(IS_I830(dev) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE));
+
+	ret = i915_gem_request_alloc(ring, ring->default_context, &req);
+	if (ret)
+		return ret;
+
+	ret = intel_ring_begin(req, 4);
+	if (ret) {
+		i915_gem_request_cancel(req);
+		return ret;
+	}
+
+	overlay->active = true;
+
+	intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_ON);
+	intel_ring_emit(ring, overlay->flip_addr | OFC_UPDATE);
+	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
+
+	return intel_overlay_do_wait_request(overlay, req, NULL);
+}
+
+/* overlay needs to be enabled in OCMD reg */
+static int intel_overlay_continue(struct intel_overlay *overlay,
+				  bool load_polyphase_filter)
+{
+	struct drm_device *dev = overlay->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_engine_cs *ring = &dev_priv->ring[RCS];
+	struct drm_i915_gem_request *req;
+	u32 flip_addr = overlay->flip_addr;
+	u32 tmp;
+	int ret;
+
+	WARN_ON(!overlay->active);
+
+	if (load_polyphase_filter)
+		flip_addr |= OFC_UPDATE;
+
+	/* check for underruns */
+	tmp = I915_READ(DOVSTA);
+	if (tmp & (1 << 17))
+		DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
+
+	ret = i915_gem_request_alloc(ring, ring->default_context, &req);
+	if (ret)
+		return ret;
+
+	ret = intel_ring_begin(req, 2);
+	if (ret) {
+		i915_gem_request_cancel(req);
+		return ret;
+	}
+
+	intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
+	intel_ring_emit(ring, flip_addr);
+	intel_ring_advance(ring);
+
+	WARN_ON(overlay->last_flip_req);
+	i915_gem_request_assign(&overlay->last_flip_req, req);
+	i915_add_request(req);
+
+	return 0;
+}
+
+static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay)
+{
+	struct drm_i915_gem_object *obj = overlay->old_vid_bo;
+
+	i915_gem_object_ggtt_unpin(obj);
+	drm_gem_object_unreference(&obj->base);
+
+	overlay->old_vid_bo = NULL;
+}
+
+static void intel_overlay_off_tail(struct intel_overlay *overlay)
+{
+	struct drm_i915_gem_object *obj = overlay->vid_bo;
+
+	/* never have the overlay hw on without showing a frame */
+	if (WARN_ON(!obj))
+		return;
+
+	i915_gem_object_ggtt_unpin(obj);
+	drm_gem_object_unreference(&obj->base);
+	overlay->vid_bo = NULL;
+
+	overlay->crtc->overlay = NULL;
+	overlay->crtc = NULL;
+	overlay->active = false;
+}
+
+/* overlay needs to be disabled in OCMD reg */
+static int intel_overlay_off(struct intel_overlay *overlay)
+{
+	struct drm_device *dev = overlay->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_engine_cs *ring = &dev_priv->ring[RCS];
+	struct drm_i915_gem_request *req;
+	u32 flip_addr = overlay->flip_addr;
+	int ret;
+
+	WARN_ON(!overlay->active);
+
+	/* According to intel docs the overlay hw may hang (when switching
+	 * off) without loading the filter coeffs. It is however unclear whether
+	 * this applies to the disabling of the overlay or to the switching off
+	 * of the hw. Do it in both cases */
+	flip_addr |= OFC_UPDATE;
+
+	ret = i915_gem_request_alloc(ring, ring->default_context, &req);
+	if (ret)
+		return ret;
+
+	ret = intel_ring_begin(req, 6);
+	if (ret) {
+		i915_gem_request_cancel(req);
+		return ret;
+	}
+
+	/* wait for overlay to go idle */
+	intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
+	intel_ring_emit(ring, flip_addr);
+	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+	/* turn overlay off */
+	if (IS_I830(dev)) {
+		/* Workaround: Don't disable the overlay fully, since otherwise
+		 * it dies on the next OVERLAY_ON cmd. */
+		intel_ring_emit(ring, MI_NOOP);
+		intel_ring_emit(ring, MI_NOOP);
+		intel_ring_emit(ring, MI_NOOP);
+	} else {
+		intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
+		intel_ring_emit(ring, flip_addr);
+		intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+	}
+	intel_ring_advance(ring);
+
+	return intel_overlay_do_wait_request(overlay, req, intel_overlay_off_tail);
+}
+
+/* recover from an interruption due to a signal
+ * We have to be careful not to repeat work forever an make forward progess. */
+static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
+{
+	int ret;
+
+	if (overlay->last_flip_req == NULL)
+		return 0;
+
+	ret = i915_wait_request(overlay->last_flip_req);
+	if (ret)
+		return ret;
+
+	if (overlay->flip_tail)
+		overlay->flip_tail(overlay);
+
+	i915_gem_request_assign(&overlay->last_flip_req, NULL);
+	return 0;
+}
+
+/* Wait for pending overlay flip and release old frame.
+ * Needs to be called before the overlay register are changed
+ * via intel_overlay_(un)map_regs
+ */
+static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
+{
+	struct drm_device *dev = overlay->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_engine_cs *ring = &dev_priv->ring[RCS];
+	int ret;
+
+	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+	/* Only wait if there is actually an old frame to release to
+	 * guarantee forward progress.
+	 */
+	if (!overlay->old_vid_bo)
+		return 0;
+
+	if (I915_READ(ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT) {
+		/* synchronous slowpath */
+		struct drm_i915_gem_request *req;
+
+		ret = i915_gem_request_alloc(ring, ring->default_context, &req);
+		if (ret)
+			return ret;
+
+		ret = intel_ring_begin(req, 2);
+		if (ret) {
+			i915_gem_request_cancel(req);
+			return ret;
+		}
+
+		intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+		intel_ring_emit(ring, MI_NOOP);
+		intel_ring_advance(ring);
+
+		ret = intel_overlay_do_wait_request(overlay, req,
+						    intel_overlay_release_old_vid_tail);
+		if (ret)
+			return ret;
+	}
+
+	intel_overlay_release_old_vid_tail(overlay);
+
+
+	i915_gem_track_fb(overlay->old_vid_bo, NULL,
+			  INTEL_FRONTBUFFER_OVERLAY(overlay->crtc->pipe));
+	return 0;
+}
+
+void intel_overlay_reset(struct drm_i915_private *dev_priv)
+{
+	struct intel_overlay *overlay = dev_priv->overlay;
+
+	if (!overlay)
+		return;
+
+	intel_overlay_release_old_vid(overlay);
+
+	overlay->last_flip_req = NULL;
+	overlay->old_xscale = 0;
+	overlay->old_yscale = 0;
+	overlay->crtc = NULL;
+	overlay->active = false;
+}
+
+struct put_image_params {
+	int format;
+	short dst_x;
+	short dst_y;
+	short dst_w;
+	short dst_h;
+	short src_w;
+	short src_scan_h;
+	short src_scan_w;
+	short src_h;
+	short stride_Y;
+	short stride_UV;
+	int offset_Y;
+	int offset_U;
+	int offset_V;
+};
+
+static int packed_depth_bytes(u32 format)
+{
+	switch (format & I915_OVERLAY_DEPTH_MASK) {
+	case I915_OVERLAY_YUV422:
+		return 4;
+	case I915_OVERLAY_YUV411:
+		/* return 6; not implemented */
+	default:
+		return -EINVAL;
+	}
+}
+
+static int packed_width_bytes(u32 format, short width)
+{
+	switch (format & I915_OVERLAY_DEPTH_MASK) {
+	case I915_OVERLAY_YUV422:
+		return width << 1;
+	default:
+		return -EINVAL;
+	}
+}
+
+static int uv_hsubsampling(u32 format)
+{
+	switch (format & I915_OVERLAY_DEPTH_MASK) {
+	case I915_OVERLAY_YUV422:
+	case I915_OVERLAY_YUV420:
+		return 2;
+	case I915_OVERLAY_YUV411:
+	case I915_OVERLAY_YUV410:
+		return 4;
+	default:
+		return -EINVAL;
+	}
+}
+
+static int uv_vsubsampling(u32 format)
+{
+	switch (format & I915_OVERLAY_DEPTH_MASK) {
+	case I915_OVERLAY_YUV420:
+	case I915_OVERLAY_YUV410:
+		return 2;
+	case I915_OVERLAY_YUV422:
+	case I915_OVERLAY_YUV411:
+		return 1;
+	default:
+		return -EINVAL;
+	}
+}
+
+static u32 calc_swidthsw(struct drm_device *dev, u32 offset, u32 width)
+{
+	u32 mask, shift, ret;
+	if (IS_GEN2(dev)) {
+		mask = 0x1f;
+		shift = 5;
+	} else {
+		mask = 0x3f;
+		shift = 6;
+	}
+	ret = ((offset + width + mask) >> shift) - (offset >> shift);
+	if (!IS_GEN2(dev))
+		ret <<= 1;
+	ret -= 1;
+	return ret << 2;
+}
+
+static const u16 y_static_hcoeffs[N_HORIZ_Y_TAPS * N_PHASES] = {
+	0x3000, 0xb4a0, 0x1930, 0x1920, 0xb4a0,
+	0x3000, 0xb500, 0x19d0, 0x1880, 0xb440,
+	0x3000, 0xb540, 0x1a88, 0x2f80, 0xb3e0,
+	0x3000, 0xb580, 0x1b30, 0x2e20, 0xb380,
+	0x3000, 0xb5c0, 0x1bd8, 0x2cc0, 0xb320,
+	0x3020, 0xb5e0, 0x1c60, 0x2b80, 0xb2c0,
+	0x3020, 0xb5e0, 0x1cf8, 0x2a20, 0xb260,
+	0x3020, 0xb5e0, 0x1d80, 0x28e0, 0xb200,
+	0x3020, 0xb5c0, 0x1e08, 0x3f40, 0xb1c0,
+	0x3020, 0xb580, 0x1e78, 0x3ce0, 0xb160,
+	0x3040, 0xb520, 0x1ed8, 0x3aa0, 0xb120,
+	0x3040, 0xb4a0, 0x1f30, 0x3880, 0xb0e0,
+	0x3040, 0xb400, 0x1f78, 0x3680, 0xb0a0,
+	0x3020, 0xb340, 0x1fb8, 0x34a0, 0xb060,
+	0x3020, 0xb240, 0x1fe0, 0x32e0, 0xb040,
+	0x3020, 0xb140, 0x1ff8, 0x3160, 0xb020,
+	0xb000, 0x3000, 0x0800, 0x3000, 0xb000
+};
+
+static const u16 uv_static_hcoeffs[N_HORIZ_UV_TAPS * N_PHASES] = {
+	0x3000, 0x1800, 0x1800, 0xb000, 0x18d0, 0x2e60,
+	0xb000, 0x1990, 0x2ce0, 0xb020, 0x1a68, 0x2b40,
+	0xb040, 0x1b20, 0x29e0, 0xb060, 0x1bd8, 0x2880,
+	0xb080, 0x1c88, 0x3e60, 0xb0a0, 0x1d28, 0x3c00,
+	0xb0c0, 0x1db8, 0x39e0, 0xb0e0, 0x1e40, 0x37e0,
+	0xb100, 0x1eb8, 0x3620, 0xb100, 0x1f18, 0x34a0,
+	0xb100, 0x1f68, 0x3360, 0xb0e0, 0x1fa8, 0x3240,
+	0xb0c0, 0x1fe0, 0x3140, 0xb060, 0x1ff0, 0x30a0,
+	0x3000, 0x0800, 0x3000
+};
+
+static void update_polyphase_filter(struct overlay_registers __iomem *regs)
+{
+	memcpy_toio(regs->Y_HCOEFS, y_static_hcoeffs, sizeof(y_static_hcoeffs));
+	memcpy_toio(regs->UV_HCOEFS, uv_static_hcoeffs,
+		    sizeof(uv_static_hcoeffs));
+}
+
+static bool update_scaling_factors(struct intel_overlay *overlay,
+				   struct overlay_registers __iomem *regs,
+				   struct put_image_params *params)
+{
+	/* fixed point with a 12 bit shift */
+	u32 xscale, yscale, xscale_UV, yscale_UV;
+#define FP_SHIFT 12
+#define FRACT_MASK 0xfff
+	bool scale_changed = false;
+	int uv_hscale = uv_hsubsampling(params->format);
+	int uv_vscale = uv_vsubsampling(params->format);
+
+	if (params->dst_w > 1)
+		xscale = ((params->src_scan_w - 1) << FP_SHIFT)
+			/(params->dst_w);
+	else
+		xscale = 1 << FP_SHIFT;
+
+	if (params->dst_h > 1)
+		yscale = ((params->src_scan_h - 1) << FP_SHIFT)
+			/(params->dst_h);
+	else
+		yscale = 1 << FP_SHIFT;
+
+	/*if (params->format & I915_OVERLAY_YUV_PLANAR) {*/
+	xscale_UV = xscale/uv_hscale;
+	yscale_UV = yscale/uv_vscale;
+	/* make the Y scale to UV scale ratio an exact multiply */
+	xscale = xscale_UV * uv_hscale;
+	yscale = yscale_UV * uv_vscale;
+	/*} else {
+	  xscale_UV = 0;
+	  yscale_UV = 0;
+	  }*/
+
+	if (xscale != overlay->old_xscale || yscale != overlay->old_yscale)
+		scale_changed = true;
+	overlay->old_xscale = xscale;
+	overlay->old_yscale = yscale;
+
+	iowrite32(((yscale & FRACT_MASK) << 20) |
+		  ((xscale >> FP_SHIFT)  << 16) |
+		  ((xscale & FRACT_MASK) << 3),
+		 &regs->YRGBSCALE);
+
+	iowrite32(((yscale_UV & FRACT_MASK) << 20) |
+		  ((xscale_UV >> FP_SHIFT)  << 16) |
+		  ((xscale_UV & FRACT_MASK) << 3),
+		 &regs->UVSCALE);
+
+	iowrite32((((yscale    >> FP_SHIFT) << 16) |
+		   ((yscale_UV >> FP_SHIFT) << 0)),
+		 &regs->UVSCALEV);
+
+	if (scale_changed)
+		update_polyphase_filter(regs);
+
+	return scale_changed;
+}
+
+static void update_colorkey(struct intel_overlay *overlay,
+			    struct overlay_registers __iomem *regs)
+{
+	u32 key = overlay->color_key;
+	u32 flags;
+
+	flags = 0;
+	if (overlay->color_key_enabled)
+		flags |= DST_KEY_ENABLE;
+
+	switch (overlay->crtc->base.primary->fb->bits_per_pixel) {
+	case 8:
+		key = 0;
+		flags |= CLK_RGB8I_MASK;
+		break;
+
+	case 16:
+		if (overlay->crtc->base.primary->fb->depth == 15) {
+			key = RGB15_TO_COLORKEY(key);
+			flags |= CLK_RGB15_MASK;
+		} else {
+			key = RGB16_TO_COLORKEY(key);
+			flags |= CLK_RGB16_MASK;
+		}
+		break;
+
+	case 24:
+	case 32:
+		flags |= CLK_RGB24_MASK;
+		break;
+	}
+
+	iowrite32(key, &regs->DCLRKV);
+	iowrite32(flags, &regs->DCLRKM);
+}
+
+static u32 overlay_cmd_reg(struct put_image_params *params)
+{
+	u32 cmd = OCMD_ENABLE | OCMD_BUF_TYPE_FRAME | OCMD_BUFFER0;
+
+	if (params->format & I915_OVERLAY_YUV_PLANAR) {
+		switch (params->format & I915_OVERLAY_DEPTH_MASK) {
+		case I915_OVERLAY_YUV422:
+			cmd |= OCMD_YUV_422_PLANAR;
+			break;
+		case I915_OVERLAY_YUV420:
+			cmd |= OCMD_YUV_420_PLANAR;
+			break;
+		case I915_OVERLAY_YUV411:
+		case I915_OVERLAY_YUV410:
+			cmd |= OCMD_YUV_410_PLANAR;
+			break;
+		}
+	} else { /* YUV packed */
+		switch (params->format & I915_OVERLAY_DEPTH_MASK) {
+		case I915_OVERLAY_YUV422:
+			cmd |= OCMD_YUV_422_PACKED;
+			break;
+		case I915_OVERLAY_YUV411:
+			cmd |= OCMD_YUV_411_PACKED;
+			break;
+		}
+
+		switch (params->format & I915_OVERLAY_SWAP_MASK) {
+		case I915_OVERLAY_NO_SWAP:
+			break;
+		case I915_OVERLAY_UV_SWAP:
+			cmd |= OCMD_UV_SWAP;
+			break;
+		case I915_OVERLAY_Y_SWAP:
+			cmd |= OCMD_Y_SWAP;
+			break;
+		case I915_OVERLAY_Y_AND_UV_SWAP:
+			cmd |= OCMD_Y_AND_UV_SWAP;
+			break;
+		}
+	}
+
+	return cmd;
+}
+
+static int intel_overlay_do_put_image(struct intel_overlay *overlay,
+				      struct drm_i915_gem_object *new_bo,
+				      struct put_image_params *params)
+{
+	int ret, tmp_width;
+	struct overlay_registers __iomem *regs;
+	bool scale_changed = false;
+	struct drm_device *dev = overlay->dev;
+	u32 swidth, swidthsw, sheight, ostride;
+	enum pipe pipe = overlay->crtc->pipe;
+
+	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+	WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+
+	ret = intel_overlay_release_old_vid(overlay);
+	if (ret != 0)
+		return ret;
+
+	ret = i915_gem_object_pin_to_display_plane(new_bo, 0, NULL, NULL,
+						   &i915_ggtt_view_normal);
+	if (ret != 0)
+		return ret;
+
+	ret = i915_gem_object_put_fence(new_bo);
+	if (ret)
+		goto out_unpin;
+
+	if (!overlay->active) {
+		u32 oconfig;
+		regs = intel_overlay_map_regs(overlay);
+		if (!regs) {
+			ret = -ENOMEM;
+			goto out_unpin;
+		}
+		oconfig = OCONF_CC_OUT_8BIT;
+		if (IS_GEN4(overlay->dev))
+			oconfig |= OCONF_CSC_MODE_BT709;
+		oconfig |= pipe == 0 ?
+			OCONF_PIPE_A : OCONF_PIPE_B;
+		iowrite32(oconfig, &regs->OCONFIG);
+		intel_overlay_unmap_regs(overlay, regs);
+
+		ret = intel_overlay_on(overlay);
+		if (ret != 0)
+			goto out_unpin;
+	}
+
+	regs = intel_overlay_map_regs(overlay);
+	if (!regs) {
+		ret = -ENOMEM;
+		goto out_unpin;
+	}
+
+	iowrite32((params->dst_y << 16) | params->dst_x, &regs->DWINPOS);
+	iowrite32((params->dst_h << 16) | params->dst_w, &regs->DWINSZ);
+
+	if (params->format & I915_OVERLAY_YUV_PACKED)
+		tmp_width = packed_width_bytes(params->format, params->src_w);
+	else
+		tmp_width = params->src_w;
+
+	swidth = params->src_w;
+	swidthsw = calc_swidthsw(overlay->dev, params->offset_Y, tmp_width);
+	sheight = params->src_h;
+	iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_Y, &regs->OBUF_0Y);
+	ostride = params->stride_Y;
+
+	if (params->format & I915_OVERLAY_YUV_PLANAR) {
+		int uv_hscale = uv_hsubsampling(params->format);
+		int uv_vscale = uv_vsubsampling(params->format);
+		u32 tmp_U, tmp_V;
+		swidth |= (params->src_w/uv_hscale) << 16;
+		tmp_U = calc_swidthsw(overlay->dev, params->offset_U,
+				      params->src_w/uv_hscale);
+		tmp_V = calc_swidthsw(overlay->dev, params->offset_V,
+				      params->src_w/uv_hscale);
+		swidthsw |= max_t(u32, tmp_U, tmp_V) << 16;
+		sheight |= (params->src_h/uv_vscale) << 16;
+		iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_U, &regs->OBUF_0U);
+		iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_V, &regs->OBUF_0V);
+		ostride |= params->stride_UV << 16;
+	}
+
+	iowrite32(swidth, &regs->SWIDTH);
+	iowrite32(swidthsw, &regs->SWIDTHSW);
+	iowrite32(sheight, &regs->SHEIGHT);
+	iowrite32(ostride, &regs->OSTRIDE);
+
+	scale_changed = update_scaling_factors(overlay, regs, params);
+
+	update_colorkey(overlay, regs);
+
+	iowrite32(overlay_cmd_reg(params), &regs->OCMD);
+
+	intel_overlay_unmap_regs(overlay, regs);
+
+	ret = intel_overlay_continue(overlay, scale_changed);
+	if (ret)
+		goto out_unpin;
+
+	i915_gem_track_fb(overlay->vid_bo, new_bo,
+			  INTEL_FRONTBUFFER_OVERLAY(pipe));
+
+	overlay->old_vid_bo = overlay->vid_bo;
+	overlay->vid_bo = new_bo;
+
+	intel_frontbuffer_flip(dev,
+			       INTEL_FRONTBUFFER_OVERLAY(pipe));
+
+	return 0;
+
+out_unpin:
+	i915_gem_object_ggtt_unpin(new_bo);
+	return ret;
+}
+
+int intel_overlay_switch_off(struct intel_overlay *overlay)
+{
+	struct overlay_registers __iomem *regs;
+	struct drm_device *dev = overlay->dev;
+	int ret;
+
+	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+	WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+
+	ret = intel_overlay_recover_from_interrupt(overlay);
+	if (ret != 0)
+		return ret;
+
+	if (!overlay->active)
+		return 0;
+
+	ret = intel_overlay_release_old_vid(overlay);
+	if (ret != 0)
+		return ret;
+
+	regs = intel_overlay_map_regs(overlay);
+	iowrite32(0, &regs->OCMD);
+	intel_overlay_unmap_regs(overlay, regs);
+
+	ret = intel_overlay_off(overlay);
+	if (ret != 0)
+		return ret;
+
+	intel_overlay_off_tail(overlay);
+	return 0;
+}
+
+static int check_overlay_possible_on_crtc(struct intel_overlay *overlay,
+					  struct intel_crtc *crtc)
+{
+	if (!crtc->active)
+		return -EINVAL;
+
+	/* can't use the overlay with double wide pipe */
+	if (crtc->config->double_wide)
+		return -EINVAL;
+
+	return 0;
+}
+
+static void update_pfit_vscale_ratio(struct intel_overlay *overlay)
+{
+	struct drm_device *dev = overlay->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 pfit_control = I915_READ(PFIT_CONTROL);
+	u32 ratio;
+
+	/* XXX: This is not the same logic as in the xorg driver, but more in
+	 * line with the intel documentation for the i965
+	 */
+	if (INTEL_INFO(dev)->gen >= 4) {
+		/* on i965 use the PGM reg to read out the autoscaler values */
+		ratio = I915_READ(PFIT_PGM_RATIOS) >> PFIT_VERT_SCALE_SHIFT_965;
+	} else {
+		if (pfit_control & VERT_AUTO_SCALE)
+			ratio = I915_READ(PFIT_AUTO_RATIOS);
+		else
+			ratio = I915_READ(PFIT_PGM_RATIOS);
+		ratio >>= PFIT_VERT_SCALE_SHIFT;
+	}
+
+	overlay->pfit_vscale_ratio = ratio;
+}
+
+static int check_overlay_dst(struct intel_overlay *overlay,
+			     struct drm_intel_overlay_put_image *rec)
+{
+	struct drm_display_mode *mode = &overlay->crtc->base.mode;
+
+	if (rec->dst_x < mode->hdisplay &&
+	    rec->dst_x + rec->dst_width <= mode->hdisplay &&
+	    rec->dst_y < mode->vdisplay &&
+	    rec->dst_y + rec->dst_height <= mode->vdisplay)
+		return 0;
+	else
+		return -EINVAL;
+}
+
+static int check_overlay_scaling(struct put_image_params *rec)
+{
+	u32 tmp;
+
+	/* downscaling limit is 8.0 */
+	tmp = ((rec->src_scan_h << 16) / rec->dst_h) >> 16;
+	if (tmp > 7)
+		return -EINVAL;
+	tmp = ((rec->src_scan_w << 16) / rec->dst_w) >> 16;
+	if (tmp > 7)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int check_overlay_src(struct drm_device *dev,
+			     struct drm_intel_overlay_put_image *rec,
+			     struct drm_i915_gem_object *new_bo)
+{
+	int uv_hscale = uv_hsubsampling(rec->flags);
+	int uv_vscale = uv_vsubsampling(rec->flags);
+	u32 stride_mask;
+	int depth;
+	u32 tmp;
+
+	/* check src dimensions */
+	if (IS_845G(dev) || IS_I830(dev)) {
+		if (rec->src_height > IMAGE_MAX_HEIGHT_LEGACY ||
+		    rec->src_width  > IMAGE_MAX_WIDTH_LEGACY)
+			return -EINVAL;
+	} else {
+		if (rec->src_height > IMAGE_MAX_HEIGHT ||
+		    rec->src_width  > IMAGE_MAX_WIDTH)
+			return -EINVAL;
+	}
+
+	/* better safe than sorry, use 4 as the maximal subsampling ratio */
+	if (rec->src_height < N_VERT_Y_TAPS*4 ||
+	    rec->src_width  < N_HORIZ_Y_TAPS*4)
+		return -EINVAL;
+
+	/* check alignment constraints */
+	switch (rec->flags & I915_OVERLAY_TYPE_MASK) {
+	case I915_OVERLAY_RGB:
+		/* not implemented */
+		return -EINVAL;
+
+	case I915_OVERLAY_YUV_PACKED:
+		if (uv_vscale != 1)
+			return -EINVAL;
+
+		depth = packed_depth_bytes(rec->flags);
+		if (depth < 0)
+			return depth;
+
+		/* ignore UV planes */
+		rec->stride_UV = 0;
+		rec->offset_U = 0;
+		rec->offset_V = 0;
+		/* check pixel alignment */
+		if (rec->offset_Y % depth)
+			return -EINVAL;
+		break;
+
+	case I915_OVERLAY_YUV_PLANAR:
+		if (uv_vscale < 0 || uv_hscale < 0)
+			return -EINVAL;
+		/* no offset restrictions for planar formats */
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	if (rec->src_width % uv_hscale)
+		return -EINVAL;
+
+	/* stride checking */
+	if (IS_I830(dev) || IS_845G(dev))
+		stride_mask = 255;
+	else
+		stride_mask = 63;
+
+	if (rec->stride_Y & stride_mask || rec->stride_UV & stride_mask)
+		return -EINVAL;
+	if (IS_GEN4(dev) && rec->stride_Y < 512)
+		return -EINVAL;
+
+	tmp = (rec->flags & I915_OVERLAY_TYPE_MASK) == I915_OVERLAY_YUV_PLANAR ?
+		4096 : 8192;
+	if (rec->stride_Y > tmp || rec->stride_UV > 2*1024)
+		return -EINVAL;
+
+	/* check buffer dimensions */
+	switch (rec->flags & I915_OVERLAY_TYPE_MASK) {
+	case I915_OVERLAY_RGB:
+	case I915_OVERLAY_YUV_PACKED:
+		/* always 4 Y values per depth pixels */
+		if (packed_width_bytes(rec->flags, rec->src_width) > rec->stride_Y)
+			return -EINVAL;
+
+		tmp = rec->stride_Y*rec->src_height;
+		if (rec->offset_Y + tmp > new_bo->base.size)
+			return -EINVAL;
+		break;
+
+	case I915_OVERLAY_YUV_PLANAR:
+		if (rec->src_width > rec->stride_Y)
+			return -EINVAL;
+		if (rec->src_width/uv_hscale > rec->stride_UV)
+			return -EINVAL;
+
+		tmp = rec->stride_Y * rec->src_height;
+		if (rec->offset_Y + tmp > new_bo->base.size)
+			return -EINVAL;
+
+		tmp = rec->stride_UV * (rec->src_height / uv_vscale);
+		if (rec->offset_U + tmp > new_bo->base.size ||
+		    rec->offset_V + tmp > new_bo->base.size)
+			return -EINVAL;
+		break;
+	}
+
+	return 0;
+}
+
+/**
+ * Return the pipe currently connected to the panel fitter,
+ * or -1 if the panel fitter is not present or not in use
+ */
+static int intel_panel_fitter_pipe(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32  pfit_control;
+
+	/* i830 doesn't have a panel fitter */
+	if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev)))
+		return -1;
+
+	pfit_control = I915_READ(PFIT_CONTROL);
+
+	/* See if the panel fitter is in use */
+	if ((pfit_control & PFIT_ENABLE) == 0)
+		return -1;
+
+	/* 965 can place panel fitter on either pipe */
+	if (IS_GEN4(dev))
+		return (pfit_control >> 29) & 0x3;
+
+	/* older chips can only use pipe 1 */
+	return 1;
+}
+
+int intel_overlay_put_image(struct drm_device *dev, void *data,
+			    struct drm_file *file_priv)
+{
+	struct drm_intel_overlay_put_image *put_image_rec = data;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_overlay *overlay;
+	struct drm_crtc *drmmode_crtc;
+	struct intel_crtc *crtc;
+	struct drm_i915_gem_object *new_bo;
+	struct put_image_params *params;
+	int ret;
+
+	overlay = dev_priv->overlay;
+	if (!overlay) {
+		DRM_DEBUG("userspace bug: no overlay\n");
+		return -ENODEV;
+	}
+
+	if (!(put_image_rec->flags & I915_OVERLAY_ENABLE)) {
+		drm_modeset_lock_all(dev);
+		mutex_lock(&dev->struct_mutex);
+
+		ret = intel_overlay_switch_off(overlay);
+
+		mutex_unlock(&dev->struct_mutex);
+		drm_modeset_unlock_all(dev);
+
+		return ret;
+	}
+
+	params = kmalloc(sizeof(*params), GFP_KERNEL);
+	if (!params)
+		return -ENOMEM;
+
+	drmmode_crtc = drm_crtc_find(dev, put_image_rec->crtc_id);
+	if (!drmmode_crtc) {
+		ret = -ENOENT;
+		goto out_free;
+	}
+	crtc = to_intel_crtc(drmmode_crtc);
+
+	new_bo = to_intel_bo(drm_gem_object_lookup(dev, file_priv,
+						   put_image_rec->bo_handle));
+	if (&new_bo->base == NULL) {
+		ret = -ENOENT;
+		goto out_free;
+	}
+
+	drm_modeset_lock_all(dev);
+	mutex_lock(&dev->struct_mutex);
+
+	if (new_bo->tiling_mode) {
+		DRM_DEBUG_KMS("buffer used for overlay image can not be tiled\n");
+		ret = -EINVAL;
+		goto out_unlock;
+	}
+
+	ret = intel_overlay_recover_from_interrupt(overlay);
+	if (ret != 0)
+		goto out_unlock;
+
+	if (overlay->crtc != crtc) {
+		struct drm_display_mode *mode = &crtc->base.mode;
+		ret = intel_overlay_switch_off(overlay);
+		if (ret != 0)
+			goto out_unlock;
+
+		ret = check_overlay_possible_on_crtc(overlay, crtc);
+		if (ret != 0)
+			goto out_unlock;
+
+		overlay->crtc = crtc;
+		crtc->overlay = overlay;
+
+		/* line too wide, i.e. one-line-mode */
+		if (mode->hdisplay > 1024 &&
+		    intel_panel_fitter_pipe(dev) == crtc->pipe) {
+			overlay->pfit_active = true;
+			update_pfit_vscale_ratio(overlay);
+		} else
+			overlay->pfit_active = false;
+	}
+
+	ret = check_overlay_dst(overlay, put_image_rec);
+	if (ret != 0)
+		goto out_unlock;
+
+	if (overlay->pfit_active) {
+		params->dst_y = ((((u32)put_image_rec->dst_y) << 12) /
+				 overlay->pfit_vscale_ratio);
+		/* shifting right rounds downwards, so add 1 */
+		params->dst_h = ((((u32)put_image_rec->dst_height) << 12) /
+				 overlay->pfit_vscale_ratio) + 1;
+	} else {
+		params->dst_y = put_image_rec->dst_y;
+		params->dst_h = put_image_rec->dst_height;
+	}
+	params->dst_x = put_image_rec->dst_x;
+	params->dst_w = put_image_rec->dst_width;
+
+	params->src_w = put_image_rec->src_width;
+	params->src_h = put_image_rec->src_height;
+	params->src_scan_w = put_image_rec->src_scan_width;
+	params->src_scan_h = put_image_rec->src_scan_height;
+	if (params->src_scan_h > params->src_h ||
+	    params->src_scan_w > params->src_w) {
+		ret = -EINVAL;
+		goto out_unlock;
+	}
+
+	ret = check_overlay_src(dev, put_image_rec, new_bo);
+	if (ret != 0)
+		goto out_unlock;
+	params->format = put_image_rec->flags & ~I915_OVERLAY_FLAGS_MASK;
+	params->stride_Y = put_image_rec->stride_Y;
+	params->stride_UV = put_image_rec->stride_UV;
+	params->offset_Y = put_image_rec->offset_Y;
+	params->offset_U = put_image_rec->offset_U;
+	params->offset_V = put_image_rec->offset_V;
+
+	/* Check scaling after src size to prevent a divide-by-zero. */
+	ret = check_overlay_scaling(params);
+	if (ret != 0)
+		goto out_unlock;
+
+	ret = intel_overlay_do_put_image(overlay, new_bo, params);
+	if (ret != 0)
+		goto out_unlock;
+
+	mutex_unlock(&dev->struct_mutex);
+	drm_modeset_unlock_all(dev);
+
+	kfree(params);
+
+	return 0;
+
+out_unlock:
+	mutex_unlock(&dev->struct_mutex);
+	drm_modeset_unlock_all(dev);
+	drm_gem_object_unreference_unlocked(&new_bo->base);
+out_free:
+	kfree(params);
+
+	return ret;
+}
+
+static void update_reg_attrs(struct intel_overlay *overlay,
+			     struct overlay_registers __iomem *regs)
+{
+	iowrite32((overlay->contrast << 18) | (overlay->brightness & 0xff),
+		  &regs->OCLRC0);
+	iowrite32(overlay->saturation, &regs->OCLRC1);
+}
+
+static bool check_gamma_bounds(u32 gamma1, u32 gamma2)
+{
+	int i;
+
+	if (gamma1 & 0xff000000 || gamma2 & 0xff000000)
+		return false;
+
+	for (i = 0; i < 3; i++) {
+		if (((gamma1 >> i*8) & 0xff) >= ((gamma2 >> i*8) & 0xff))
+			return false;
+	}
+
+	return true;
+}
+
+static bool check_gamma5_errata(u32 gamma5)
+{
+	int i;
+
+	for (i = 0; i < 3; i++) {
+		if (((gamma5 >> i*8) & 0xff) == 0x80)
+			return false;
+	}
+
+	return true;
+}
+
+static int check_gamma(struct drm_intel_overlay_attrs *attrs)
+{
+	if (!check_gamma_bounds(0, attrs->gamma0) ||
+	    !check_gamma_bounds(attrs->gamma0, attrs->gamma1) ||
+	    !check_gamma_bounds(attrs->gamma1, attrs->gamma2) ||
+	    !check_gamma_bounds(attrs->gamma2, attrs->gamma3) ||
+	    !check_gamma_bounds(attrs->gamma3, attrs->gamma4) ||
+	    !check_gamma_bounds(attrs->gamma4, attrs->gamma5) ||
+	    !check_gamma_bounds(attrs->gamma5, 0x00ffffff))
+		return -EINVAL;
+
+	if (!check_gamma5_errata(attrs->gamma5))
+		return -EINVAL;
+
+	return 0;
+}
+
+int intel_overlay_attrs(struct drm_device *dev, void *data,
+			struct drm_file *file_priv)
+{
+	struct drm_intel_overlay_attrs *attrs = data;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_overlay *overlay;
+	struct overlay_registers __iomem *regs;
+	int ret;
+
+	overlay = dev_priv->overlay;
+	if (!overlay) {
+		DRM_DEBUG("userspace bug: no overlay\n");
+		return -ENODEV;
+	}
+
+	drm_modeset_lock_all(dev);
+	mutex_lock(&dev->struct_mutex);
+
+	ret = -EINVAL;
+	if (!(attrs->flags & I915_OVERLAY_UPDATE_ATTRS)) {
+		attrs->color_key  = overlay->color_key;
+		attrs->brightness = overlay->brightness;
+		attrs->contrast   = overlay->contrast;
+		attrs->saturation = overlay->saturation;
+
+		if (!IS_GEN2(dev)) {
+			attrs->gamma0 = I915_READ(OGAMC0);
+			attrs->gamma1 = I915_READ(OGAMC1);
+			attrs->gamma2 = I915_READ(OGAMC2);
+			attrs->gamma3 = I915_READ(OGAMC3);
+			attrs->gamma4 = I915_READ(OGAMC4);
+			attrs->gamma5 = I915_READ(OGAMC5);
+		}
+	} else {
+		if (attrs->brightness < -128 || attrs->brightness > 127)
+			goto out_unlock;
+		if (attrs->contrast > 255)
+			goto out_unlock;
+		if (attrs->saturation > 1023)
+			goto out_unlock;
+
+		overlay->color_key  = attrs->color_key;
+		overlay->brightness = attrs->brightness;
+		overlay->contrast   = attrs->contrast;
+		overlay->saturation = attrs->saturation;
+
+		regs = intel_overlay_map_regs(overlay);
+		if (!regs) {
+			ret = -ENOMEM;
+			goto out_unlock;
+		}
+
+		update_reg_attrs(overlay, regs);
+
+		intel_overlay_unmap_regs(overlay, regs);
+
+		if (attrs->flags & I915_OVERLAY_UPDATE_GAMMA) {
+			if (IS_GEN2(dev))
+				goto out_unlock;
+
+			if (overlay->active) {
+				ret = -EBUSY;
+				goto out_unlock;
+			}
+
+			ret = check_gamma(attrs);
+			if (ret)
+				goto out_unlock;
+
+			I915_WRITE(OGAMC0, attrs->gamma0);
+			I915_WRITE(OGAMC1, attrs->gamma1);
+			I915_WRITE(OGAMC2, attrs->gamma2);
+			I915_WRITE(OGAMC3, attrs->gamma3);
+			I915_WRITE(OGAMC4, attrs->gamma4);
+			I915_WRITE(OGAMC5, attrs->gamma5);
+		}
+	}
+	overlay->color_key_enabled = (attrs->flags & I915_OVERLAY_DISABLE_DEST_COLORKEY) == 0;
+
+	ret = 0;
+out_unlock:
+	mutex_unlock(&dev->struct_mutex);
+	drm_modeset_unlock_all(dev);
+
+	return ret;
+}
+
+void intel_setup_overlay(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_overlay *overlay;
+	struct drm_i915_gem_object *reg_bo;
+	struct overlay_registers __iomem *regs;
+	int ret;
+
+	if (!HAS_OVERLAY(dev))
+		return;
+
+	overlay = kzalloc(sizeof(*overlay), GFP_KERNEL);
+	if (!overlay)
+		return;
+
+	mutex_lock(&dev->struct_mutex);
+	if (WARN_ON(dev_priv->overlay))
+		goto out_free;
+
+	overlay->dev = dev;
+
+	reg_bo = NULL;
+	if (!OVERLAY_NEEDS_PHYSICAL(dev))
+		reg_bo = i915_gem_object_create_stolen(dev, PAGE_SIZE);
+	if (reg_bo == NULL)
+		reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE);
+	if (reg_bo == NULL)
+		goto out_free;
+	overlay->reg_bo = reg_bo;
+
+	if (OVERLAY_NEEDS_PHYSICAL(dev)) {
+		ret = i915_gem_object_attach_phys(reg_bo, PAGE_SIZE);
+		if (ret) {
+			DRM_ERROR("failed to attach phys overlay regs\n");
+			goto out_free_bo;
+		}
+		overlay->flip_addr = reg_bo->phys_handle->busaddr;
+	} else {
+		ret = i915_gem_obj_ggtt_pin(reg_bo, PAGE_SIZE, PIN_MAPPABLE);
+		if (ret) {
+			DRM_ERROR("failed to pin overlay register bo\n");
+			goto out_free_bo;
+		}
+		overlay->flip_addr = i915_gem_obj_ggtt_offset(reg_bo);
+
+		ret = i915_gem_object_set_to_gtt_domain(reg_bo, true);
+		if (ret) {
+			DRM_ERROR("failed to move overlay register bo into the GTT\n");
+			goto out_unpin_bo;
+		}
+	}
+
+	/* init all values */
+	overlay->color_key = 0x0101fe;
+	overlay->color_key_enabled = true;
+	overlay->brightness = -19;
+	overlay->contrast = 75;
+	overlay->saturation = 146;
+
+	regs = intel_overlay_map_regs(overlay);
+	if (!regs)
+		goto out_unpin_bo;
+
+	memset_io(regs, 0, sizeof(struct overlay_registers));
+	update_polyphase_filter(regs);
+	update_reg_attrs(overlay, regs);
+
+	intel_overlay_unmap_regs(overlay, regs);
+
+	dev_priv->overlay = overlay;
+	mutex_unlock(&dev->struct_mutex);
+	DRM_INFO("initialized overlay support\n");
+	return;
+
+out_unpin_bo:
+	if (!OVERLAY_NEEDS_PHYSICAL(dev))
+		i915_gem_object_ggtt_unpin(reg_bo);
+out_free_bo:
+	drm_gem_object_unreference(&reg_bo->base);
+out_free:
+	mutex_unlock(&dev->struct_mutex);
+	kfree(overlay);
+	return;
+}
+
+void intel_cleanup_overlay(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (!dev_priv->overlay)
+		return;
+
+	/* The bo's should be free'd by the generic code already.
+	 * Furthermore modesetting teardown happens beforehand so the
+	 * hardware should be off already */
+	WARN_ON(dev_priv->overlay->active);
+
+	drm_gem_object_unreference_unlocked(&dev_priv->overlay->reg_bo->base);
+	kfree(dev_priv->overlay);
+}
+
+struct intel_overlay_error_state {
+	struct overlay_registers regs;
+	unsigned long base;
+	u32 dovsta;
+	u32 isr;
+};
+
+static struct overlay_registers __iomem *
+intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
+{
+	struct drm_i915_private *dev_priv = overlay->dev->dev_private;
+	struct overlay_registers __iomem *regs;
+
+	if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
+		/* Cast to make sparse happy, but it's wc memory anyway, so
+		 * equivalent to the wc io mapping on X86. */
+		regs = (struct overlay_registers __iomem *)
+			overlay->reg_bo->phys_handle->vaddr;
+	else
+		regs = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
+						i915_gem_obj_ggtt_offset(overlay->reg_bo));
+
+	return regs;
+}
+
+static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay,
+					struct overlay_registers __iomem *regs)
+{
+	if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev))
+		io_mapping_unmap_atomic(regs);
+}
+
+
+struct intel_overlay_error_state *
+intel_overlay_capture_error_state(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_overlay *overlay = dev_priv->overlay;
+	struct intel_overlay_error_state *error;
+	struct overlay_registers __iomem *regs;
+
+	if (!overlay || !overlay->active)
+		return NULL;
+
+	error = kmalloc(sizeof(*error), GFP_ATOMIC);
+	if (error == NULL)
+		return NULL;
+
+	error->dovsta = I915_READ(DOVSTA);
+	error->isr = I915_READ(ISR);
+	if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
+		error->base = (__force long)overlay->reg_bo->phys_handle->vaddr;
+	else
+		error->base = i915_gem_obj_ggtt_offset(overlay->reg_bo);
+
+	regs = intel_overlay_map_regs_atomic(overlay);
+	if (!regs)
+		goto err;
+
+	memcpy_fromio(&error->regs, regs, sizeof(struct overlay_registers));
+	intel_overlay_unmap_regs_atomic(overlay, regs);
+
+	return error;
+
+err:
+	kfree(error);
+	return NULL;
+}
+
+void
+intel_overlay_print_error_state(struct drm_i915_error_state_buf *m,
+				struct intel_overlay_error_state *error)
+{
+	i915_error_printf(m, "Overlay, status: 0x%08x, interrupt: 0x%08x\n",
+			  error->dovsta, error->isr);
+	i915_error_printf(m, "  Register file at 0x%08lx:\n",
+			  error->base);
+
+#define P(x) i915_error_printf(m, "    " #x ":	0x%08x\n", error->regs.x)
+	P(OBUF_0Y);
+	P(OBUF_1Y);
+	P(OBUF_0U);
+	P(OBUF_0V);
+	P(OBUF_1U);
+	P(OBUF_1V);
+	P(OSTRIDE);
+	P(YRGB_VPH);
+	P(UV_VPH);
+	P(HORZ_PH);
+	P(INIT_PHS);
+	P(DWINPOS);
+	P(DWINSZ);
+	P(SWIDTH);
+	P(SWIDTHSW);
+	P(SHEIGHT);
+	P(YRGBSCALE);
+	P(UVSCALE);
+	P(OCLRC0);
+	P(OCLRC1);
+	P(DCLRKV);
+	P(DCLRKM);
+	P(SCLRKVH);
+	P(SCLRKVL);
+	P(SCLRKEN);
+	P(OCONFIG);
+	P(OCMD);
+	P(OSTART_0Y);
+	P(OSTART_1Y);
+	P(OSTART_0U);
+	P(OSTART_0V);
+	P(OSTART_1U);
+	P(OSTART_1V);
+	P(OTILEOFF_0Y);
+	P(OTILEOFF_1Y);
+	P(OTILEOFF_0U);
+	P(OTILEOFF_0V);
+	P(OTILEOFF_1U);
+	P(OTILEOFF_1V);
+	P(FASTHSCALE);
+	P(UVSCALEV);
+#undef P
+}
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
new file mode 100644
index 0000000..a24df35
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -0,0 +1,1841 @@
+/*
+ * Copyright © 2006-2010 Intel Corporation
+ * Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *	Eric Anholt <eric@anholt.net>
+ *      Dave Airlie <airlied@linux.ie>
+ *      Jesse Barnes <jesse.barnes@intel.com>
+ *      Chris Wilson <chris@chris-wilson.co.uk>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/moduleparam.h>
+#include <linux/pwm.h>
+#include "intel_drv.h"
+
+#define CRC_PMIC_PWM_PERIOD_NS	21333
+
+void
+intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
+		       struct drm_display_mode *adjusted_mode)
+{
+	drm_mode_copy(adjusted_mode, fixed_mode);
+
+	drm_mode_set_crtcinfo(adjusted_mode, 0);
+}
+
+/**
+ * intel_find_panel_downclock - find the reduced downclock for LVDS in EDID
+ * @dev: drm device
+ * @fixed_mode : panel native mode
+ * @connector: LVDS/eDP connector
+ *
+ * Return downclock_avail
+ * Find the reduced downclock for LVDS/eDP in EDID.
+ */
+struct drm_display_mode *
+intel_find_panel_downclock(struct drm_device *dev,
+			struct drm_display_mode *fixed_mode,
+			struct drm_connector *connector)
+{
+	struct drm_display_mode *scan, *tmp_mode;
+	int temp_downclock;
+
+	temp_downclock = fixed_mode->clock;
+	tmp_mode = NULL;
+
+	list_for_each_entry(scan, &connector->probed_modes, head) {
+		/*
+		 * If one mode has the same resolution with the fixed_panel
+		 * mode while they have the different refresh rate, it means
+		 * that the reduced downclock is found. In such
+		 * case we can set the different FPx0/1 to dynamically select
+		 * between low and high frequency.
+		 */
+		if (scan->hdisplay == fixed_mode->hdisplay &&
+		    scan->hsync_start == fixed_mode->hsync_start &&
+		    scan->hsync_end == fixed_mode->hsync_end &&
+		    scan->htotal == fixed_mode->htotal &&
+		    scan->vdisplay == fixed_mode->vdisplay &&
+		    scan->vsync_start == fixed_mode->vsync_start &&
+		    scan->vsync_end == fixed_mode->vsync_end &&
+		    scan->vtotal == fixed_mode->vtotal) {
+			if (scan->clock < temp_downclock) {
+				/*
+				 * The downclock is already found. But we
+				 * expect to find the lower downclock.
+				 */
+				temp_downclock = scan->clock;
+				tmp_mode = scan;
+			}
+		}
+	}
+
+	if (temp_downclock < fixed_mode->clock)
+		return drm_mode_duplicate(dev, tmp_mode);
+	else
+		return NULL;
+}
+
+/* adjusted_mode has been preset to be the panel's fixed mode */
+void
+intel_pch_panel_fitting(struct intel_crtc *intel_crtc,
+			struct intel_crtc_state *pipe_config,
+			int fitting_mode)
+{
+	const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+	int x = 0, y = 0, width = 0, height = 0;
+
+	/* Native modes don't need fitting */
+	if (adjusted_mode->crtc_hdisplay == pipe_config->pipe_src_w &&
+	    adjusted_mode->crtc_vdisplay == pipe_config->pipe_src_h)
+		goto done;
+
+	switch (fitting_mode) {
+	case DRM_MODE_SCALE_CENTER:
+		width = pipe_config->pipe_src_w;
+		height = pipe_config->pipe_src_h;
+		x = (adjusted_mode->crtc_hdisplay - width + 1)/2;
+		y = (adjusted_mode->crtc_vdisplay - height + 1)/2;
+		break;
+
+	case DRM_MODE_SCALE_ASPECT:
+		/* Scale but preserve the aspect ratio */
+		{
+			u32 scaled_width = adjusted_mode->crtc_hdisplay
+				* pipe_config->pipe_src_h;
+			u32 scaled_height = pipe_config->pipe_src_w
+				* adjusted_mode->crtc_vdisplay;
+			if (scaled_width > scaled_height) { /* pillar */
+				width = scaled_height / pipe_config->pipe_src_h;
+				if (width & 1)
+					width++;
+				x = (adjusted_mode->crtc_hdisplay - width + 1) / 2;
+				y = 0;
+				height = adjusted_mode->crtc_vdisplay;
+			} else if (scaled_width < scaled_height) { /* letter */
+				height = scaled_width / pipe_config->pipe_src_w;
+				if (height & 1)
+				    height++;
+				y = (adjusted_mode->crtc_vdisplay - height + 1) / 2;
+				x = 0;
+				width = adjusted_mode->crtc_hdisplay;
+			} else {
+				x = y = 0;
+				width = adjusted_mode->crtc_hdisplay;
+				height = adjusted_mode->crtc_vdisplay;
+			}
+		}
+		break;
+
+	case DRM_MODE_SCALE_FULLSCREEN:
+		x = y = 0;
+		width = adjusted_mode->crtc_hdisplay;
+		height = adjusted_mode->crtc_vdisplay;
+		break;
+
+	default:
+		WARN(1, "bad panel fit mode: %d\n", fitting_mode);
+		return;
+	}
+
+done:
+	pipe_config->pch_pfit.pos = (x << 16) | y;
+	pipe_config->pch_pfit.size = (width << 16) | height;
+	pipe_config->pch_pfit.enabled = pipe_config->pch_pfit.size != 0;
+}
+
+static void
+centre_horizontally(struct drm_display_mode *adjusted_mode,
+		    int width)
+{
+	u32 border, sync_pos, blank_width, sync_width;
+
+	/* keep the hsync and hblank widths constant */
+	sync_width = adjusted_mode->crtc_hsync_end - adjusted_mode->crtc_hsync_start;
+	blank_width = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start;
+	sync_pos = (blank_width - sync_width + 1) / 2;
+
+	border = (adjusted_mode->crtc_hdisplay - width + 1) / 2;
+	border += border & 1; /* make the border even */
+
+	adjusted_mode->crtc_hdisplay = width;
+	adjusted_mode->crtc_hblank_start = width + border;
+	adjusted_mode->crtc_hblank_end = adjusted_mode->crtc_hblank_start + blank_width;
+
+	adjusted_mode->crtc_hsync_start = adjusted_mode->crtc_hblank_start + sync_pos;
+	adjusted_mode->crtc_hsync_end = adjusted_mode->crtc_hsync_start + sync_width;
+}
+
+static void
+centre_vertically(struct drm_display_mode *adjusted_mode,
+		  int height)
+{
+	u32 border, sync_pos, blank_width, sync_width;
+
+	/* keep the vsync and vblank widths constant */
+	sync_width = adjusted_mode->crtc_vsync_end - adjusted_mode->crtc_vsync_start;
+	blank_width = adjusted_mode->crtc_vblank_end - adjusted_mode->crtc_vblank_start;
+	sync_pos = (blank_width - sync_width + 1) / 2;
+
+	border = (adjusted_mode->crtc_vdisplay - height + 1) / 2;
+
+	adjusted_mode->crtc_vdisplay = height;
+	adjusted_mode->crtc_vblank_start = height + border;
+	adjusted_mode->crtc_vblank_end = adjusted_mode->crtc_vblank_start + blank_width;
+
+	adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vblank_start + sync_pos;
+	adjusted_mode->crtc_vsync_end = adjusted_mode->crtc_vsync_start + sync_width;
+}
+
+static inline u32 panel_fitter_scaling(u32 source, u32 target)
+{
+	/*
+	 * Floating point operation is not supported. So the FACTOR
+	 * is defined, which can avoid the floating point computation
+	 * when calculating the panel ratio.
+	 */
+#define ACCURACY 12
+#define FACTOR (1 << ACCURACY)
+	u32 ratio = source * FACTOR / target;
+	return (FACTOR * ratio + FACTOR/2) / FACTOR;
+}
+
+static void i965_scale_aspect(struct intel_crtc_state *pipe_config,
+			      u32 *pfit_control)
+{
+	const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+	u32 scaled_width = adjusted_mode->crtc_hdisplay *
+		pipe_config->pipe_src_h;
+	u32 scaled_height = pipe_config->pipe_src_w *
+		adjusted_mode->crtc_vdisplay;
+
+	/* 965+ is easy, it does everything in hw */
+	if (scaled_width > scaled_height)
+		*pfit_control |= PFIT_ENABLE |
+			PFIT_SCALING_PILLAR;
+	else if (scaled_width < scaled_height)
+		*pfit_control |= PFIT_ENABLE |
+			PFIT_SCALING_LETTER;
+	else if (adjusted_mode->crtc_hdisplay != pipe_config->pipe_src_w)
+		*pfit_control |= PFIT_ENABLE | PFIT_SCALING_AUTO;
+}
+
+static void i9xx_scale_aspect(struct intel_crtc_state *pipe_config,
+			      u32 *pfit_control, u32 *pfit_pgm_ratios,
+			      u32 *border)
+{
+	struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+	u32 scaled_width = adjusted_mode->crtc_hdisplay *
+		pipe_config->pipe_src_h;
+	u32 scaled_height = pipe_config->pipe_src_w *
+		adjusted_mode->crtc_vdisplay;
+	u32 bits;
+
+	/*
+	 * For earlier chips we have to calculate the scaling
+	 * ratio by hand and program it into the
+	 * PFIT_PGM_RATIO register
+	 */
+	if (scaled_width > scaled_height) { /* pillar */
+		centre_horizontally(adjusted_mode,
+				    scaled_height /
+				    pipe_config->pipe_src_h);
+
+		*border = LVDS_BORDER_ENABLE;
+		if (pipe_config->pipe_src_h != adjusted_mode->crtc_vdisplay) {
+			bits = panel_fitter_scaling(pipe_config->pipe_src_h,
+						    adjusted_mode->crtc_vdisplay);
+
+			*pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
+					     bits << PFIT_VERT_SCALE_SHIFT);
+			*pfit_control |= (PFIT_ENABLE |
+					  VERT_INTERP_BILINEAR |
+					  HORIZ_INTERP_BILINEAR);
+		}
+	} else if (scaled_width < scaled_height) { /* letter */
+		centre_vertically(adjusted_mode,
+				  scaled_width /
+				  pipe_config->pipe_src_w);
+
+		*border = LVDS_BORDER_ENABLE;
+		if (pipe_config->pipe_src_w != adjusted_mode->crtc_hdisplay) {
+			bits = panel_fitter_scaling(pipe_config->pipe_src_w,
+						    adjusted_mode->crtc_hdisplay);
+
+			*pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
+					     bits << PFIT_VERT_SCALE_SHIFT);
+			*pfit_control |= (PFIT_ENABLE |
+					  VERT_INTERP_BILINEAR |
+					  HORIZ_INTERP_BILINEAR);
+		}
+	} else {
+		/* Aspects match, Let hw scale both directions */
+		*pfit_control |= (PFIT_ENABLE |
+				  VERT_AUTO_SCALE | HORIZ_AUTO_SCALE |
+				  VERT_INTERP_BILINEAR |
+				  HORIZ_INTERP_BILINEAR);
+	}
+}
+
+void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
+			      struct intel_crtc_state *pipe_config,
+			      int fitting_mode)
+{
+	struct drm_device *dev = intel_crtc->base.dev;
+	u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
+	struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+
+	/* Native modes don't need fitting */
+	if (adjusted_mode->crtc_hdisplay == pipe_config->pipe_src_w &&
+	    adjusted_mode->crtc_vdisplay == pipe_config->pipe_src_h)
+		goto out;
+
+	switch (fitting_mode) {
+	case DRM_MODE_SCALE_CENTER:
+		/*
+		 * For centered modes, we have to calculate border widths &
+		 * heights and modify the values programmed into the CRTC.
+		 */
+		centre_horizontally(adjusted_mode, pipe_config->pipe_src_w);
+		centre_vertically(adjusted_mode, pipe_config->pipe_src_h);
+		border = LVDS_BORDER_ENABLE;
+		break;
+	case DRM_MODE_SCALE_ASPECT:
+		/* Scale but preserve the aspect ratio */
+		if (INTEL_INFO(dev)->gen >= 4)
+			i965_scale_aspect(pipe_config, &pfit_control);
+		else
+			i9xx_scale_aspect(pipe_config, &pfit_control,
+					  &pfit_pgm_ratios, &border);
+		break;
+	case DRM_MODE_SCALE_FULLSCREEN:
+		/*
+		 * Full scaling, even if it changes the aspect ratio.
+		 * Fortunately this is all done for us in hw.
+		 */
+		if (pipe_config->pipe_src_h != adjusted_mode->crtc_vdisplay ||
+		    pipe_config->pipe_src_w != adjusted_mode->crtc_hdisplay) {
+			pfit_control |= PFIT_ENABLE;
+			if (INTEL_INFO(dev)->gen >= 4)
+				pfit_control |= PFIT_SCALING_AUTO;
+			else
+				pfit_control |= (VERT_AUTO_SCALE |
+						 VERT_INTERP_BILINEAR |
+						 HORIZ_AUTO_SCALE |
+						 HORIZ_INTERP_BILINEAR);
+		}
+		break;
+	default:
+		WARN(1, "bad panel fit mode: %d\n", fitting_mode);
+		return;
+	}
+
+	/* 965+ wants fuzzy fitting */
+	/* FIXME: handle multiple panels by failing gracefully */
+	if (INTEL_INFO(dev)->gen >= 4)
+		pfit_control |= ((intel_crtc->pipe << PFIT_PIPE_SHIFT) |
+				 PFIT_FILTER_FUZZY);
+
+out:
+	if ((pfit_control & PFIT_ENABLE) == 0) {
+		pfit_control = 0;
+		pfit_pgm_ratios = 0;
+	}
+
+	/* Make sure pre-965 set dither correctly for 18bpp panels. */
+	if (INTEL_INFO(dev)->gen < 4 && pipe_config->pipe_bpp == 18)
+		pfit_control |= PANEL_8TO6_DITHER_ENABLE;
+
+	pipe_config->gmch_pfit.control = pfit_control;
+	pipe_config->gmch_pfit.pgm_ratios = pfit_pgm_ratios;
+	pipe_config->gmch_pfit.lvds_border_bits = border;
+}
+
+enum drm_connector_status
+intel_panel_detect(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	/* Assume that the BIOS does not lie through the OpRegion... */
+	if (!i915.panel_ignore_lid && dev_priv->opregion.lid_state) {
+		return *dev_priv->opregion.lid_state & 0x1 ?
+			connector_status_connected :
+			connector_status_disconnected;
+	}
+
+	switch (i915.panel_ignore_lid) {
+	case -2:
+		return connector_status_connected;
+	case -1:
+		return connector_status_disconnected;
+	default:
+		return connector_status_unknown;
+	}
+}
+
+/**
+ * scale - scale values from one range to another
+ *
+ * @source_val: value in range [@source_min..@source_max]
+ *
+ * Return @source_val in range [@source_min..@source_max] scaled to range
+ * [@target_min..@target_max].
+ */
+static uint32_t scale(uint32_t source_val,
+		      uint32_t source_min, uint32_t source_max,
+		      uint32_t target_min, uint32_t target_max)
+{
+	uint64_t target_val;
+
+	WARN_ON(source_min > source_max);
+	WARN_ON(target_min > target_max);
+
+	/* defensive */
+	source_val = clamp(source_val, source_min, source_max);
+
+	/* avoid overflows */
+	target_val = DIV_ROUND_CLOSEST_ULL((uint64_t)(source_val - source_min) *
+			(target_max - target_min), source_max - source_min);
+	target_val += target_min;
+
+	return target_val;
+}
+
+/* Scale user_level in range [0..user_max] to [hw_min..hw_max]. */
+static inline u32 scale_user_to_hw(struct intel_connector *connector,
+				   u32 user_level, u32 user_max)
+{
+	struct intel_panel *panel = &connector->panel;
+
+	return scale(user_level, 0, user_max,
+		     panel->backlight.min, panel->backlight.max);
+}
+
+/* Scale user_level in range [0..user_max] to [0..hw_max], clamping the result
+ * to [hw_min..hw_max]. */
+static inline u32 clamp_user_to_hw(struct intel_connector *connector,
+				   u32 user_level, u32 user_max)
+{
+	struct intel_panel *panel = &connector->panel;
+	u32 hw_level;
+
+	hw_level = scale(user_level, 0, user_max, 0, panel->backlight.max);
+	hw_level = clamp(hw_level, panel->backlight.min, panel->backlight.max);
+
+	return hw_level;
+}
+
+/* Scale hw_level in range [hw_min..hw_max] to [0..user_max]. */
+static inline u32 scale_hw_to_user(struct intel_connector *connector,
+				   u32 hw_level, u32 user_max)
+{
+	struct intel_panel *panel = &connector->panel;
+
+	return scale(hw_level, panel->backlight.min, panel->backlight.max,
+		     0, user_max);
+}
+
+static u32 intel_panel_compute_brightness(struct intel_connector *connector,
+					  u32 val)
+{
+	struct drm_device *dev = connector->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_panel *panel = &connector->panel;
+
+	WARN_ON(panel->backlight.max == 0);
+
+	if (i915.invert_brightness < 0)
+		return val;
+
+	if (i915.invert_brightness > 0 ||
+	    dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) {
+		return panel->backlight.max - val;
+	}
+
+	return val;
+}
+
+static u32 lpt_get_backlight(struct intel_connector *connector)
+{
+	struct drm_device *dev = connector->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	return I915_READ(BLC_PWM_PCH_CTL2) & BACKLIGHT_DUTY_CYCLE_MASK;
+}
+
+static u32 pch_get_backlight(struct intel_connector *connector)
+{
+	struct drm_device *dev = connector->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	return I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
+}
+
+static u32 i9xx_get_backlight(struct intel_connector *connector)
+{
+	struct drm_device *dev = connector->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_panel *panel = &connector->panel;
+	u32 val;
+
+	val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
+	if (INTEL_INFO(dev)->gen < 4)
+		val >>= 1;
+
+	if (panel->backlight.combination_mode) {
+		u8 lbpc;
+
+		pci_read_config_byte(dev->pdev, PCI_LBPC, &lbpc);
+		val *= lbpc;
+	}
+
+	return val;
+}
+
+static u32 _vlv_get_backlight(struct drm_device *dev, enum pipe pipe)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
+		return 0;
+
+	return I915_READ(VLV_BLC_PWM_CTL(pipe)) & BACKLIGHT_DUTY_CYCLE_MASK;
+}
+
+static u32 vlv_get_backlight(struct intel_connector *connector)
+{
+	struct drm_device *dev = connector->base.dev;
+	enum pipe pipe = intel_get_pipe_from_connector(connector);
+
+	return _vlv_get_backlight(dev, pipe);
+}
+
+static u32 bxt_get_backlight(struct intel_connector *connector)
+{
+	struct drm_device *dev = connector->base.dev;
+	struct intel_panel *panel = &connector->panel;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	return I915_READ(BXT_BLC_PWM_DUTY(panel->backlight.controller));
+}
+
+static u32 pwm_get_backlight(struct intel_connector *connector)
+{
+	struct intel_panel *panel = &connector->panel;
+	int duty_ns;
+
+	duty_ns = pwm_get_duty_cycle(panel->backlight.pwm);
+	return DIV_ROUND_UP(duty_ns * 100, CRC_PMIC_PWM_PERIOD_NS);
+}
+
+static u32 intel_panel_get_backlight(struct intel_connector *connector)
+{
+	struct drm_device *dev = connector->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_panel *panel = &connector->panel;
+	u32 val = 0;
+
+	mutex_lock(&dev_priv->backlight_lock);
+
+	if (panel->backlight.enabled) {
+		val = panel->backlight.get(connector);
+		val = intel_panel_compute_brightness(connector, val);
+	}
+
+	mutex_unlock(&dev_priv->backlight_lock);
+
+	DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val);
+	return val;
+}
+
+static void lpt_set_backlight(struct intel_connector *connector, u32 level)
+{
+	struct drm_device *dev = connector->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 val = I915_READ(BLC_PWM_PCH_CTL2) & ~BACKLIGHT_DUTY_CYCLE_MASK;
+	I915_WRITE(BLC_PWM_PCH_CTL2, val | level);
+}
+
+static void pch_set_backlight(struct intel_connector *connector, u32 level)
+{
+	struct drm_device *dev = connector->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 tmp;
+
+	tmp = I915_READ(BLC_PWM_CPU_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
+	I915_WRITE(BLC_PWM_CPU_CTL, tmp | level);
+}
+
+static void i9xx_set_backlight(struct intel_connector *connector, u32 level)
+{
+	struct drm_device *dev = connector->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_panel *panel = &connector->panel;
+	u32 tmp, mask;
+
+	WARN_ON(panel->backlight.max == 0);
+
+	if (panel->backlight.combination_mode) {
+		u8 lbpc;
+
+		lbpc = level * 0xfe / panel->backlight.max + 1;
+		level /= lbpc;
+		pci_write_config_byte(dev->pdev, PCI_LBPC, lbpc);
+	}
+
+	if (IS_GEN4(dev)) {
+		mask = BACKLIGHT_DUTY_CYCLE_MASK;
+	} else {
+		level <<= 1;
+		mask = BACKLIGHT_DUTY_CYCLE_MASK_PNV;
+	}
+
+	tmp = I915_READ(BLC_PWM_CTL) & ~mask;
+	I915_WRITE(BLC_PWM_CTL, tmp | level);
+}
+
+static void vlv_set_backlight(struct intel_connector *connector, u32 level)
+{
+	struct drm_device *dev = connector->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum pipe pipe = intel_get_pipe_from_connector(connector);
+	u32 tmp;
+
+	if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
+		return;
+
+	tmp = I915_READ(VLV_BLC_PWM_CTL(pipe)) & ~BACKLIGHT_DUTY_CYCLE_MASK;
+	I915_WRITE(VLV_BLC_PWM_CTL(pipe), tmp | level);
+}
+
+static void bxt_set_backlight(struct intel_connector *connector, u32 level)
+{
+	struct drm_device *dev = connector->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_panel *panel = &connector->panel;
+
+	I915_WRITE(BXT_BLC_PWM_DUTY(panel->backlight.controller), level);
+}
+
+static void pwm_set_backlight(struct intel_connector *connector, u32 level)
+{
+	struct intel_panel *panel = &connector->panel;
+	int duty_ns = DIV_ROUND_UP(level * CRC_PMIC_PWM_PERIOD_NS, 100);
+
+	pwm_config(panel->backlight.pwm, duty_ns, CRC_PMIC_PWM_PERIOD_NS);
+}
+
+static void
+intel_panel_actually_set_backlight(struct intel_connector *connector, u32 level)
+{
+	struct intel_panel *panel = &connector->panel;
+
+	DRM_DEBUG_DRIVER("set backlight PWM = %d\n", level);
+
+	level = intel_panel_compute_brightness(connector, level);
+	panel->backlight.set(connector, level);
+}
+
+/* set backlight brightness to level in range [0..max], scaling wrt hw min */
+static void intel_panel_set_backlight(struct intel_connector *connector,
+				      u32 user_level, u32 user_max)
+{
+	struct drm_device *dev = connector->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_panel *panel = &connector->panel;
+	u32 hw_level;
+
+	if (!panel->backlight.present)
+		return;
+
+	mutex_lock(&dev_priv->backlight_lock);
+
+	WARN_ON(panel->backlight.max == 0);
+
+	hw_level = scale_user_to_hw(connector, user_level, user_max);
+	panel->backlight.level = hw_level;
+
+	if (panel->backlight.enabled)
+		intel_panel_actually_set_backlight(connector, hw_level);
+
+	mutex_unlock(&dev_priv->backlight_lock);
+}
+
+/* set backlight brightness to level in range [0..max], assuming hw min is
+ * respected.
+ */
+void intel_panel_set_backlight_acpi(struct intel_connector *connector,
+				    u32 user_level, u32 user_max)
+{
+	struct drm_device *dev = connector->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_panel *panel = &connector->panel;
+	enum pipe pipe = intel_get_pipe_from_connector(connector);
+	u32 hw_level;
+
+	/*
+	 * INVALID_PIPE may occur during driver init because
+	 * connection_mutex isn't held across the entire backlight
+	 * setup + modeset readout, and the BIOS can issue the
+	 * requests at any time.
+	 */
+	if (!panel->backlight.present || pipe == INVALID_PIPE)
+		return;
+
+	mutex_lock(&dev_priv->backlight_lock);
+
+	WARN_ON(panel->backlight.max == 0);
+
+	hw_level = clamp_user_to_hw(connector, user_level, user_max);
+	panel->backlight.level = hw_level;
+
+	if (panel->backlight.device)
+		panel->backlight.device->props.brightness =
+			scale_hw_to_user(connector,
+					 panel->backlight.level,
+					 panel->backlight.device->props.max_brightness);
+
+	if (panel->backlight.enabled)
+		intel_panel_actually_set_backlight(connector, hw_level);
+
+	mutex_unlock(&dev_priv->backlight_lock);
+}
+
+static void lpt_disable_backlight(struct intel_connector *connector)
+{
+	struct drm_device *dev = connector->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 tmp;
+
+	intel_panel_actually_set_backlight(connector, 0);
+
+	/*
+	 * Although we don't support or enable CPU PWM with LPT/SPT based
+	 * systems, it may have been enabled prior to loading the
+	 * driver. Disable to avoid warnings on LCPLL disable.
+	 *
+	 * This needs rework if we need to add support for CPU PWM on PCH split
+	 * platforms.
+	 */
+	tmp = I915_READ(BLC_PWM_CPU_CTL2);
+	if (tmp & BLM_PWM_ENABLE) {
+		DRM_DEBUG_KMS("cpu backlight was enabled, disabling\n");
+		I915_WRITE(BLC_PWM_CPU_CTL2, tmp & ~BLM_PWM_ENABLE);
+	}
+
+	tmp = I915_READ(BLC_PWM_PCH_CTL1);
+	I915_WRITE(BLC_PWM_PCH_CTL1, tmp & ~BLM_PCH_PWM_ENABLE);
+}
+
+static void pch_disable_backlight(struct intel_connector *connector)
+{
+	struct drm_device *dev = connector->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 tmp;
+
+	intel_panel_actually_set_backlight(connector, 0);
+
+	tmp = I915_READ(BLC_PWM_CPU_CTL2);
+	I915_WRITE(BLC_PWM_CPU_CTL2, tmp & ~BLM_PWM_ENABLE);
+
+	tmp = I915_READ(BLC_PWM_PCH_CTL1);
+	I915_WRITE(BLC_PWM_PCH_CTL1, tmp & ~BLM_PCH_PWM_ENABLE);
+}
+
+static void i9xx_disable_backlight(struct intel_connector *connector)
+{
+	intel_panel_actually_set_backlight(connector, 0);
+}
+
+static void i965_disable_backlight(struct intel_connector *connector)
+{
+	struct drm_device *dev = connector->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 tmp;
+
+	intel_panel_actually_set_backlight(connector, 0);
+
+	tmp = I915_READ(BLC_PWM_CTL2);
+	I915_WRITE(BLC_PWM_CTL2, tmp & ~BLM_PWM_ENABLE);
+}
+
+static void vlv_disable_backlight(struct intel_connector *connector)
+{
+	struct drm_device *dev = connector->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum pipe pipe = intel_get_pipe_from_connector(connector);
+	u32 tmp;
+
+	if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
+		return;
+
+	intel_panel_actually_set_backlight(connector, 0);
+
+	tmp = I915_READ(VLV_BLC_PWM_CTL2(pipe));
+	I915_WRITE(VLV_BLC_PWM_CTL2(pipe), tmp & ~BLM_PWM_ENABLE);
+}
+
+static void bxt_disable_backlight(struct intel_connector *connector)
+{
+	struct drm_device *dev = connector->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_panel *panel = &connector->panel;
+	u32 tmp, val;
+
+	intel_panel_actually_set_backlight(connector, 0);
+
+	tmp = I915_READ(BXT_BLC_PWM_CTL(panel->backlight.controller));
+	I915_WRITE(BXT_BLC_PWM_CTL(panel->backlight.controller),
+			tmp & ~BXT_BLC_PWM_ENABLE);
+
+	if (panel->backlight.controller == 1) {
+		val = I915_READ(UTIL_PIN_CTL);
+		val &= ~UTIL_PIN_ENABLE;
+		I915_WRITE(UTIL_PIN_CTL, val);
+	}
+}
+
+static void pwm_disable_backlight(struct intel_connector *connector)
+{
+	struct intel_panel *panel = &connector->panel;
+
+	/* Disable the backlight */
+	pwm_config(panel->backlight.pwm, 0, CRC_PMIC_PWM_PERIOD_NS);
+	usleep_range(2000, 3000);
+	pwm_disable(panel->backlight.pwm);
+}
+
+void intel_panel_disable_backlight(struct intel_connector *connector)
+{
+	struct drm_device *dev = connector->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_panel *panel = &connector->panel;
+
+	if (!panel->backlight.present)
+		return;
+
+	/*
+	 * Do not disable backlight on the vga_switcheroo path. When switching
+	 * away from i915, the other client may depend on i915 to handle the
+	 * backlight. This will leave the backlight on unnecessarily when
+	 * another client is not activated.
+	 */
+	if (dev->switch_power_state == DRM_SWITCH_POWER_CHANGING) {
+		DRM_DEBUG_DRIVER("Skipping backlight disable on vga switch\n");
+		return;
+	}
+
+	mutex_lock(&dev_priv->backlight_lock);
+
+	if (panel->backlight.device)
+		panel->backlight.device->props.power = FB_BLANK_POWERDOWN;
+	panel->backlight.enabled = false;
+	panel->backlight.disable(connector);
+
+	mutex_unlock(&dev_priv->backlight_lock);
+}
+
+static void lpt_enable_backlight(struct intel_connector *connector)
+{
+	struct drm_device *dev = connector->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_panel *panel = &connector->panel;
+	u32 pch_ctl1, pch_ctl2;
+
+	pch_ctl1 = I915_READ(BLC_PWM_PCH_CTL1);
+	if (pch_ctl1 & BLM_PCH_PWM_ENABLE) {
+		DRM_DEBUG_KMS("pch backlight already enabled\n");
+		pch_ctl1 &= ~BLM_PCH_PWM_ENABLE;
+		I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1);
+	}
+
+	pch_ctl2 = panel->backlight.max << 16;
+	I915_WRITE(BLC_PWM_PCH_CTL2, pch_ctl2);
+
+	pch_ctl1 = 0;
+	if (panel->backlight.active_low_pwm)
+		pch_ctl1 |= BLM_PCH_POLARITY;
+
+	/* After LPT, override is the default. */
+	if (HAS_PCH_LPT(dev_priv))
+		pch_ctl1 |= BLM_PCH_OVERRIDE_ENABLE;
+
+	I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1);
+	POSTING_READ(BLC_PWM_PCH_CTL1);
+	I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1 | BLM_PCH_PWM_ENABLE);
+
+	/* This won't stick until the above enable. */
+	intel_panel_actually_set_backlight(connector, panel->backlight.level);
+}
+
+static void pch_enable_backlight(struct intel_connector *connector)
+{
+	struct drm_device *dev = connector->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_panel *panel = &connector->panel;
+	enum pipe pipe = intel_get_pipe_from_connector(connector);
+	enum transcoder cpu_transcoder =
+		intel_pipe_to_cpu_transcoder(dev_priv, pipe);
+	u32 cpu_ctl2, pch_ctl1, pch_ctl2;
+
+	cpu_ctl2 = I915_READ(BLC_PWM_CPU_CTL2);
+	if (cpu_ctl2 & BLM_PWM_ENABLE) {
+		DRM_DEBUG_KMS("cpu backlight already enabled\n");
+		cpu_ctl2 &= ~BLM_PWM_ENABLE;
+		I915_WRITE(BLC_PWM_CPU_CTL2, cpu_ctl2);
+	}
+
+	pch_ctl1 = I915_READ(BLC_PWM_PCH_CTL1);
+	if (pch_ctl1 & BLM_PCH_PWM_ENABLE) {
+		DRM_DEBUG_KMS("pch backlight already enabled\n");
+		pch_ctl1 &= ~BLM_PCH_PWM_ENABLE;
+		I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1);
+	}
+
+	if (cpu_transcoder == TRANSCODER_EDP)
+		cpu_ctl2 = BLM_TRANSCODER_EDP;
+	else
+		cpu_ctl2 = BLM_PIPE(cpu_transcoder);
+	I915_WRITE(BLC_PWM_CPU_CTL2, cpu_ctl2);
+	POSTING_READ(BLC_PWM_CPU_CTL2);
+	I915_WRITE(BLC_PWM_CPU_CTL2, cpu_ctl2 | BLM_PWM_ENABLE);
+
+	/* This won't stick until the above enable. */
+	intel_panel_actually_set_backlight(connector, panel->backlight.level);
+
+	pch_ctl2 = panel->backlight.max << 16;
+	I915_WRITE(BLC_PWM_PCH_CTL2, pch_ctl2);
+
+	pch_ctl1 = 0;
+	if (panel->backlight.active_low_pwm)
+		pch_ctl1 |= BLM_PCH_POLARITY;
+
+	I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1);
+	POSTING_READ(BLC_PWM_PCH_CTL1);
+	I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1 | BLM_PCH_PWM_ENABLE);
+}
+
+static void i9xx_enable_backlight(struct intel_connector *connector)
+{
+	struct drm_device *dev = connector->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_panel *panel = &connector->panel;
+	u32 ctl, freq;
+
+	ctl = I915_READ(BLC_PWM_CTL);
+	if (ctl & BACKLIGHT_DUTY_CYCLE_MASK_PNV) {
+		DRM_DEBUG_KMS("backlight already enabled\n");
+		I915_WRITE(BLC_PWM_CTL, 0);
+	}
+
+	freq = panel->backlight.max;
+	if (panel->backlight.combination_mode)
+		freq /= 0xff;
+
+	ctl = freq << 17;
+	if (panel->backlight.combination_mode)
+		ctl |= BLM_LEGACY_MODE;
+	if (IS_PINEVIEW(dev) && panel->backlight.active_low_pwm)
+		ctl |= BLM_POLARITY_PNV;
+
+	I915_WRITE(BLC_PWM_CTL, ctl);
+	POSTING_READ(BLC_PWM_CTL);
+
+	/* XXX: combine this into above write? */
+	intel_panel_actually_set_backlight(connector, panel->backlight.level);
+
+	/*
+	 * Needed to enable backlight on some 855gm models. BLC_HIST_CTL is
+	 * 855gm only, but checking for gen2 is safe, as 855gm is the only gen2
+	 * that has backlight.
+	 */
+	if (IS_GEN2(dev))
+		I915_WRITE(BLC_HIST_CTL, BLM_HISTOGRAM_ENABLE);
+}
+
+static void i965_enable_backlight(struct intel_connector *connector)
+{
+	struct drm_device *dev = connector->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_panel *panel = &connector->panel;
+	enum pipe pipe = intel_get_pipe_from_connector(connector);
+	u32 ctl, ctl2, freq;
+
+	ctl2 = I915_READ(BLC_PWM_CTL2);
+	if (ctl2 & BLM_PWM_ENABLE) {
+		DRM_DEBUG_KMS("backlight already enabled\n");
+		ctl2 &= ~BLM_PWM_ENABLE;
+		I915_WRITE(BLC_PWM_CTL2, ctl2);
+	}
+
+	freq = panel->backlight.max;
+	if (panel->backlight.combination_mode)
+		freq /= 0xff;
+
+	ctl = freq << 16;
+	I915_WRITE(BLC_PWM_CTL, ctl);
+
+	ctl2 = BLM_PIPE(pipe);
+	if (panel->backlight.combination_mode)
+		ctl2 |= BLM_COMBINATION_MODE;
+	if (panel->backlight.active_low_pwm)
+		ctl2 |= BLM_POLARITY_I965;
+	I915_WRITE(BLC_PWM_CTL2, ctl2);
+	POSTING_READ(BLC_PWM_CTL2);
+	I915_WRITE(BLC_PWM_CTL2, ctl2 | BLM_PWM_ENABLE);
+
+	intel_panel_actually_set_backlight(connector, panel->backlight.level);
+}
+
+static void vlv_enable_backlight(struct intel_connector *connector)
+{
+	struct drm_device *dev = connector->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_panel *panel = &connector->panel;
+	enum pipe pipe = intel_get_pipe_from_connector(connector);
+	u32 ctl, ctl2;
+
+	if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
+		return;
+
+	ctl2 = I915_READ(VLV_BLC_PWM_CTL2(pipe));
+	if (ctl2 & BLM_PWM_ENABLE) {
+		DRM_DEBUG_KMS("backlight already enabled\n");
+		ctl2 &= ~BLM_PWM_ENABLE;
+		I915_WRITE(VLV_BLC_PWM_CTL2(pipe), ctl2);
+	}
+
+	ctl = panel->backlight.max << 16;
+	I915_WRITE(VLV_BLC_PWM_CTL(pipe), ctl);
+
+	/* XXX: combine this into above write? */
+	intel_panel_actually_set_backlight(connector, panel->backlight.level);
+
+	ctl2 = 0;
+	if (panel->backlight.active_low_pwm)
+		ctl2 |= BLM_POLARITY_I965;
+	I915_WRITE(VLV_BLC_PWM_CTL2(pipe), ctl2);
+	POSTING_READ(VLV_BLC_PWM_CTL2(pipe));
+	I915_WRITE(VLV_BLC_PWM_CTL2(pipe), ctl2 | BLM_PWM_ENABLE);
+}
+
+static void bxt_enable_backlight(struct intel_connector *connector)
+{
+	struct drm_device *dev = connector->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_panel *panel = &connector->panel;
+	enum pipe pipe = intel_get_pipe_from_connector(connector);
+	u32 pwm_ctl, val;
+
+	/* To use 2nd set of backlight registers, utility pin has to be
+	 * enabled with PWM mode.
+	 * The field should only be changed when the utility pin is disabled
+	 */
+	if (panel->backlight.controller == 1) {
+		val = I915_READ(UTIL_PIN_CTL);
+		if (val & UTIL_PIN_ENABLE) {
+			DRM_DEBUG_KMS("util pin already enabled\n");
+			val &= ~UTIL_PIN_ENABLE;
+			I915_WRITE(UTIL_PIN_CTL, val);
+		}
+
+		val = 0;
+		if (panel->backlight.util_pin_active_low)
+			val |= UTIL_PIN_POLARITY;
+		I915_WRITE(UTIL_PIN_CTL, val | UTIL_PIN_PIPE(pipe) |
+				UTIL_PIN_MODE_PWM | UTIL_PIN_ENABLE);
+	}
+
+	pwm_ctl = I915_READ(BXT_BLC_PWM_CTL(panel->backlight.controller));
+	if (pwm_ctl & BXT_BLC_PWM_ENABLE) {
+		DRM_DEBUG_KMS("backlight already enabled\n");
+		pwm_ctl &= ~BXT_BLC_PWM_ENABLE;
+		I915_WRITE(BXT_BLC_PWM_CTL(panel->backlight.controller),
+				pwm_ctl);
+	}
+
+	I915_WRITE(BXT_BLC_PWM_FREQ(panel->backlight.controller),
+			panel->backlight.max);
+
+	intel_panel_actually_set_backlight(connector, panel->backlight.level);
+
+	pwm_ctl = 0;
+	if (panel->backlight.active_low_pwm)
+		pwm_ctl |= BXT_BLC_PWM_POLARITY;
+
+	I915_WRITE(BXT_BLC_PWM_CTL(panel->backlight.controller), pwm_ctl);
+	POSTING_READ(BXT_BLC_PWM_CTL(panel->backlight.controller));
+	I915_WRITE(BXT_BLC_PWM_CTL(panel->backlight.controller),
+			pwm_ctl | BXT_BLC_PWM_ENABLE);
+}
+
+static void pwm_enable_backlight(struct intel_connector *connector)
+{
+	struct intel_panel *panel = &connector->panel;
+
+	pwm_enable(panel->backlight.pwm);
+	intel_panel_actually_set_backlight(connector, panel->backlight.level);
+}
+
+void intel_panel_enable_backlight(struct intel_connector *connector)
+{
+	struct drm_device *dev = connector->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_panel *panel = &connector->panel;
+	enum pipe pipe = intel_get_pipe_from_connector(connector);
+
+	if (!panel->backlight.present)
+		return;
+
+	DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe));
+
+	mutex_lock(&dev_priv->backlight_lock);
+
+	WARN_ON(panel->backlight.max == 0);
+
+	if (panel->backlight.level <= panel->backlight.min) {
+		panel->backlight.level = panel->backlight.max;
+		if (panel->backlight.device)
+			panel->backlight.device->props.brightness =
+				scale_hw_to_user(connector,
+						 panel->backlight.level,
+						 panel->backlight.device->props.max_brightness);
+	}
+
+	panel->backlight.enable(connector);
+	panel->backlight.enabled = true;
+	if (panel->backlight.device)
+		panel->backlight.device->props.power = FB_BLANK_UNBLANK;
+
+	mutex_unlock(&dev_priv->backlight_lock);
+}
+
+#if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE)
+static int intel_backlight_device_update_status(struct backlight_device *bd)
+{
+	struct intel_connector *connector = bl_get_data(bd);
+	struct intel_panel *panel = &connector->panel;
+	struct drm_device *dev = connector->base.dev;
+
+	drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+	DRM_DEBUG_KMS("updating intel_backlight, brightness=%d/%d\n",
+		      bd->props.brightness, bd->props.max_brightness);
+	intel_panel_set_backlight(connector, bd->props.brightness,
+				  bd->props.max_brightness);
+
+	/*
+	 * Allow flipping bl_power as a sub-state of enabled. Sadly the
+	 * backlight class device does not make it easy to to differentiate
+	 * between callbacks for brightness and bl_power, so our backlight_power
+	 * callback needs to take this into account.
+	 */
+	if (panel->backlight.enabled) {
+		if (panel->backlight.power) {
+			bool enable = bd->props.power == FB_BLANK_UNBLANK &&
+				bd->props.brightness != 0;
+			panel->backlight.power(connector, enable);
+		}
+	} else {
+		bd->props.power = FB_BLANK_POWERDOWN;
+	}
+
+	drm_modeset_unlock(&dev->mode_config.connection_mutex);
+	return 0;
+}
+
+static int intel_backlight_device_get_brightness(struct backlight_device *bd)
+{
+	struct intel_connector *connector = bl_get_data(bd);
+	struct drm_device *dev = connector->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 hw_level;
+	int ret;
+
+	intel_runtime_pm_get(dev_priv);
+	drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+
+	hw_level = intel_panel_get_backlight(connector);
+	ret = scale_hw_to_user(connector, hw_level, bd->props.max_brightness);
+
+	drm_modeset_unlock(&dev->mode_config.connection_mutex);
+	intel_runtime_pm_put(dev_priv);
+
+	return ret;
+}
+
+static const struct backlight_ops intel_backlight_device_ops = {
+	.update_status = intel_backlight_device_update_status,
+	.get_brightness = intel_backlight_device_get_brightness,
+};
+
+static int intel_backlight_device_register(struct intel_connector *connector)
+{
+	struct intel_panel *panel = &connector->panel;
+	struct backlight_properties props;
+
+	if (WARN_ON(panel->backlight.device))
+		return -ENODEV;
+
+	if (!panel->backlight.present)
+		return 0;
+
+	WARN_ON(panel->backlight.max == 0);
+
+	memset(&props, 0, sizeof(props));
+	props.type = BACKLIGHT_RAW;
+
+	/*
+	 * Note: Everything should work even if the backlight device max
+	 * presented to the userspace is arbitrarily chosen.
+	 */
+	props.max_brightness = panel->backlight.max;
+	props.brightness = scale_hw_to_user(connector,
+					    panel->backlight.level,
+					    props.max_brightness);
+
+	if (panel->backlight.enabled)
+		props.power = FB_BLANK_UNBLANK;
+	else
+		props.power = FB_BLANK_POWERDOWN;
+
+	/*
+	 * Note: using the same name independent of the connector prevents
+	 * registration of multiple backlight devices in the driver.
+	 */
+	panel->backlight.device =
+		backlight_device_register("intel_backlight",
+					  connector->base.kdev,
+					  connector,
+					  &intel_backlight_device_ops, &props);
+
+	if (IS_ERR(panel->backlight.device)) {
+		DRM_ERROR("Failed to register backlight: %ld\n",
+			  PTR_ERR(panel->backlight.device));
+		panel->backlight.device = NULL;
+		return -ENODEV;
+	}
+
+	DRM_DEBUG_KMS("Connector %s backlight sysfs interface registered\n",
+		      connector->base.name);
+
+	return 0;
+}
+
+static void intel_backlight_device_unregister(struct intel_connector *connector)
+{
+	struct intel_panel *panel = &connector->panel;
+
+	if (panel->backlight.device) {
+		backlight_device_unregister(panel->backlight.device);
+		panel->backlight.device = NULL;
+	}
+}
+#else /* CONFIG_BACKLIGHT_CLASS_DEVICE */
+static int intel_backlight_device_register(struct intel_connector *connector)
+{
+	return 0;
+}
+static void intel_backlight_device_unregister(struct intel_connector *connector)
+{
+}
+#endif /* CONFIG_BACKLIGHT_CLASS_DEVICE */
+
+/*
+ * SPT: This value represents the period of the PWM stream in clock periods
+ * multiplied by 16 (default increment) or 128 (alternate increment selected in
+ * SCHICKEN_1 bit 0). PWM clock is 24 MHz.
+ */
+static u32 spt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
+{
+	struct drm_device *dev = connector->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 mul, clock;
+
+	if (I915_READ(SOUTH_CHICKEN1) & SPT_PWM_GRANULARITY)
+		mul = 128;
+	else
+		mul = 16;
+
+	clock = MHz(24);
+
+	return clock / (pwm_freq_hz * mul);
+}
+
+/*
+ * LPT: This value represents the period of the PWM stream in clock periods
+ * multiplied by 128 (default increment) or 16 (alternate increment, selected in
+ * LPT SOUTH_CHICKEN2 register bit 5).
+ */
+static u32 lpt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
+{
+	struct drm_device *dev = connector->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 mul, clock;
+
+	if (I915_READ(SOUTH_CHICKEN2) & LPT_PWM_GRANULARITY)
+		mul = 16;
+	else
+		mul = 128;
+
+	if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE)
+		clock = MHz(135); /* LPT:H */
+	else
+		clock = MHz(24); /* LPT:LP */
+
+	return clock / (pwm_freq_hz * mul);
+}
+
+/*
+ * ILK/SNB/IVB: This value represents the period of the PWM stream in PCH
+ * display raw clocks multiplied by 128.
+ */
+static u32 pch_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
+{
+	struct drm_device *dev = connector->base.dev;
+	int clock = MHz(intel_pch_rawclk(dev));
+
+	return clock / (pwm_freq_hz * 128);
+}
+
+/*
+ * Gen2: This field determines the number of time base events (display core
+ * clock frequency/32) in total for a complete cycle of modulated backlight
+ * control.
+ *
+ * Gen3: A time base event equals the display core clock ([DevPNV] HRAW clock)
+ * divided by 32.
+ */
+static u32 i9xx_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
+{
+	struct drm_device *dev = connector->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int clock;
+
+	if (IS_PINEVIEW(dev))
+		clock = intel_hrawclk(dev);
+	else
+		clock = 1000 * dev_priv->display.get_display_clock_speed(dev);
+
+	return clock / (pwm_freq_hz * 32);
+}
+
+/*
+ * Gen4: This value represents the period of the PWM stream in display core
+ * clocks multiplied by 128.
+ */
+static u32 i965_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
+{
+	struct drm_device *dev = connector->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int clock = 1000 * dev_priv->display.get_display_clock_speed(dev);
+
+	return clock / (pwm_freq_hz * 128);
+}
+
+/*
+ * VLV: This value represents the period of the PWM stream in display core
+ * clocks ([DevCTG] 200MHz HRAW clocks) multiplied by 128 or 25MHz S0IX clocks
+ * multiplied by 16. CHV uses a 19.2MHz S0IX clock.
+ */
+static u32 vlv_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
+{
+	struct drm_device *dev = connector->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int clock;
+
+	if ((I915_READ(CBR1_VLV) & CBR_PWM_CLOCK_MUX_SELECT) == 0) {
+		if (IS_CHERRYVIEW(dev))
+			return KHz(19200) / (pwm_freq_hz * 16);
+		else
+			return MHz(25) / (pwm_freq_hz * 16);
+	} else {
+		clock = intel_hrawclk(dev);
+		return MHz(clock) / (pwm_freq_hz * 128);
+	}
+}
+
+static u32 get_backlight_max_vbt(struct intel_connector *connector)
+{
+	struct drm_device *dev = connector->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_panel *panel = &connector->panel;
+	u16 pwm_freq_hz = dev_priv->vbt.backlight.pwm_freq_hz;
+	u32 pwm;
+
+	if (!pwm_freq_hz) {
+		DRM_DEBUG_KMS("backlight frequency not specified in VBT\n");
+		return 0;
+	}
+
+	if (!panel->backlight.hz_to_pwm) {
+		DRM_DEBUG_KMS("backlight frequency setting from VBT currently not supported on this platform\n");
+		return 0;
+	}
+
+	pwm = panel->backlight.hz_to_pwm(connector, pwm_freq_hz);
+	if (!pwm) {
+		DRM_DEBUG_KMS("backlight frequency conversion failed\n");
+		return 0;
+	}
+
+	DRM_DEBUG_KMS("backlight frequency %u Hz from VBT\n", pwm_freq_hz);
+
+	return pwm;
+}
+
+/*
+ * Note: The setup hooks can't assume pipe is set!
+ */
+static u32 get_backlight_min_vbt(struct intel_connector *connector)
+{
+	struct drm_device *dev = connector->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_panel *panel = &connector->panel;
+	int min;
+
+	WARN_ON(panel->backlight.max == 0);
+
+	/*
+	 * XXX: If the vbt value is 255, it makes min equal to max, which leads
+	 * to problems. There are such machines out there. Either our
+	 * interpretation is wrong or the vbt has bogus data. Or both. Safeguard
+	 * against this by letting the minimum be at most (arbitrarily chosen)
+	 * 25% of the max.
+	 */
+	min = clamp_t(int, dev_priv->vbt.backlight.min_brightness, 0, 64);
+	if (min != dev_priv->vbt.backlight.min_brightness) {
+		DRM_DEBUG_KMS("clamping VBT min backlight %d/255 to %d/255\n",
+			      dev_priv->vbt.backlight.min_brightness, min);
+	}
+
+	/* vbt value is a coefficient in range [0..255] */
+	return scale(min, 0, 255, 0, panel->backlight.max);
+}
+
+static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unused)
+{
+	struct drm_device *dev = connector->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_panel *panel = &connector->panel;
+	u32 pch_ctl1, pch_ctl2, val;
+
+	pch_ctl1 = I915_READ(BLC_PWM_PCH_CTL1);
+	panel->backlight.active_low_pwm = pch_ctl1 & BLM_PCH_POLARITY;
+
+	pch_ctl2 = I915_READ(BLC_PWM_PCH_CTL2);
+	panel->backlight.max = pch_ctl2 >> 16;
+
+	if (!panel->backlight.max)
+		panel->backlight.max = get_backlight_max_vbt(connector);
+
+	if (!panel->backlight.max)
+		return -ENODEV;
+
+	panel->backlight.min = get_backlight_min_vbt(connector);
+
+	val = lpt_get_backlight(connector);
+	panel->backlight.level = intel_panel_compute_brightness(connector, val);
+
+	panel->backlight.enabled = (pch_ctl1 & BLM_PCH_PWM_ENABLE) &&
+		panel->backlight.level != 0;
+
+	return 0;
+}
+
+static int pch_setup_backlight(struct intel_connector *connector, enum pipe unused)
+{
+	struct drm_device *dev = connector->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_panel *panel = &connector->panel;
+	u32 cpu_ctl2, pch_ctl1, pch_ctl2, val;
+
+	pch_ctl1 = I915_READ(BLC_PWM_PCH_CTL1);
+	panel->backlight.active_low_pwm = pch_ctl1 & BLM_PCH_POLARITY;
+
+	pch_ctl2 = I915_READ(BLC_PWM_PCH_CTL2);
+	panel->backlight.max = pch_ctl2 >> 16;
+
+	if (!panel->backlight.max)
+		panel->backlight.max = get_backlight_max_vbt(connector);
+
+	if (!panel->backlight.max)
+		return -ENODEV;
+
+	panel->backlight.min = get_backlight_min_vbt(connector);
+
+	val = pch_get_backlight(connector);
+	panel->backlight.level = intel_panel_compute_brightness(connector, val);
+
+	cpu_ctl2 = I915_READ(BLC_PWM_CPU_CTL2);
+	panel->backlight.enabled = (cpu_ctl2 & BLM_PWM_ENABLE) &&
+		(pch_ctl1 & BLM_PCH_PWM_ENABLE) && panel->backlight.level != 0;
+
+	return 0;
+}
+
+static int i9xx_setup_backlight(struct intel_connector *connector, enum pipe unused)
+{
+	struct drm_device *dev = connector->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_panel *panel = &connector->panel;
+	u32 ctl, val;
+
+	ctl = I915_READ(BLC_PWM_CTL);
+
+	if (IS_GEN2(dev) || IS_I915GM(dev) || IS_I945GM(dev))
+		panel->backlight.combination_mode = ctl & BLM_LEGACY_MODE;
+
+	if (IS_PINEVIEW(dev))
+		panel->backlight.active_low_pwm = ctl & BLM_POLARITY_PNV;
+
+	panel->backlight.max = ctl >> 17;
+
+	if (!panel->backlight.max) {
+		panel->backlight.max = get_backlight_max_vbt(connector);
+		panel->backlight.max >>= 1;
+	}
+
+	if (!panel->backlight.max)
+		return -ENODEV;
+
+	if (panel->backlight.combination_mode)
+		panel->backlight.max *= 0xff;
+
+	panel->backlight.min = get_backlight_min_vbt(connector);
+
+	val = i9xx_get_backlight(connector);
+	panel->backlight.level = intel_panel_compute_brightness(connector, val);
+
+	panel->backlight.enabled = panel->backlight.level != 0;
+
+	return 0;
+}
+
+static int i965_setup_backlight(struct intel_connector *connector, enum pipe unused)
+{
+	struct drm_device *dev = connector->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_panel *panel = &connector->panel;
+	u32 ctl, ctl2, val;
+
+	ctl2 = I915_READ(BLC_PWM_CTL2);
+	panel->backlight.combination_mode = ctl2 & BLM_COMBINATION_MODE;
+	panel->backlight.active_low_pwm = ctl2 & BLM_POLARITY_I965;
+
+	ctl = I915_READ(BLC_PWM_CTL);
+	panel->backlight.max = ctl >> 16;
+
+	if (!panel->backlight.max)
+		panel->backlight.max = get_backlight_max_vbt(connector);
+
+	if (!panel->backlight.max)
+		return -ENODEV;
+
+	if (panel->backlight.combination_mode)
+		panel->backlight.max *= 0xff;
+
+	panel->backlight.min = get_backlight_min_vbt(connector);
+
+	val = i9xx_get_backlight(connector);
+	panel->backlight.level = intel_panel_compute_brightness(connector, val);
+
+	panel->backlight.enabled = (ctl2 & BLM_PWM_ENABLE) &&
+		panel->backlight.level != 0;
+
+	return 0;
+}
+
+static int vlv_setup_backlight(struct intel_connector *connector, enum pipe pipe)
+{
+	struct drm_device *dev = connector->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_panel *panel = &connector->panel;
+	u32 ctl, ctl2, val;
+
+	if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
+		return -ENODEV;
+
+	ctl2 = I915_READ(VLV_BLC_PWM_CTL2(pipe));
+	panel->backlight.active_low_pwm = ctl2 & BLM_POLARITY_I965;
+
+	ctl = I915_READ(VLV_BLC_PWM_CTL(pipe));
+	panel->backlight.max = ctl >> 16;
+
+	if (!panel->backlight.max)
+		panel->backlight.max = get_backlight_max_vbt(connector);
+
+	if (!panel->backlight.max)
+		return -ENODEV;
+
+	panel->backlight.min = get_backlight_min_vbt(connector);
+
+	val = _vlv_get_backlight(dev, pipe);
+	panel->backlight.level = intel_panel_compute_brightness(connector, val);
+
+	panel->backlight.enabled = (ctl2 & BLM_PWM_ENABLE) &&
+		panel->backlight.level != 0;
+
+	return 0;
+}
+
+static int
+bxt_setup_backlight(struct intel_connector *connector, enum pipe unused)
+{
+	struct drm_device *dev = connector->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_panel *panel = &connector->panel;
+	u32 pwm_ctl, val;
+
+	/*
+	 * For BXT hard coding the Backlight controller to 0.
+	 * TODO : Read the controller value from VBT and generalize
+	 */
+	panel->backlight.controller = 0;
+
+	pwm_ctl = I915_READ(BXT_BLC_PWM_CTL(panel->backlight.controller));
+
+	/* Keeping the check if controller 1 is to be programmed.
+	 * This will come into affect once the VBT parsing
+	 * is fixed for controller selection, and controller 1 is used
+	 * for a prticular display configuration.
+	 */
+	if (panel->backlight.controller == 1) {
+		val = I915_READ(UTIL_PIN_CTL);
+		panel->backlight.util_pin_active_low =
+					val & UTIL_PIN_POLARITY;
+	}
+
+	panel->backlight.active_low_pwm = pwm_ctl & BXT_BLC_PWM_POLARITY;
+	panel->backlight.max =
+		I915_READ(BXT_BLC_PWM_FREQ(panel->backlight.controller));
+
+	if (!panel->backlight.max)
+		panel->backlight.max = get_backlight_max_vbt(connector);
+
+	if (!panel->backlight.max)
+		return -ENODEV;
+
+	val = bxt_get_backlight(connector);
+	panel->backlight.level = intel_panel_compute_brightness(connector, val);
+
+	panel->backlight.enabled = (pwm_ctl & BXT_BLC_PWM_ENABLE) &&
+		panel->backlight.level != 0;
+
+	return 0;
+}
+
+static int pwm_setup_backlight(struct intel_connector *connector,
+			       enum pipe pipe)
+{
+	struct drm_device *dev = connector->base.dev;
+	struct intel_panel *panel = &connector->panel;
+	int retval;
+
+	/* Get the PWM chip for backlight control */
+	panel->backlight.pwm = pwm_get(dev->dev, "pwm_backlight");
+	if (IS_ERR(panel->backlight.pwm)) {
+		DRM_ERROR("Failed to own the pwm chip\n");
+		panel->backlight.pwm = NULL;
+		return -ENODEV;
+	}
+
+	retval = pwm_config(panel->backlight.pwm, CRC_PMIC_PWM_PERIOD_NS,
+			    CRC_PMIC_PWM_PERIOD_NS);
+	if (retval < 0) {
+		DRM_ERROR("Failed to configure the pwm chip\n");
+		pwm_put(panel->backlight.pwm);
+		panel->backlight.pwm = NULL;
+		return retval;
+	}
+
+	panel->backlight.min = 0; /* 0% */
+	panel->backlight.max = 100; /* 100% */
+	panel->backlight.level = DIV_ROUND_UP(
+				 pwm_get_duty_cycle(panel->backlight.pwm) * 100,
+				 CRC_PMIC_PWM_PERIOD_NS);
+	panel->backlight.enabled = panel->backlight.level != 0;
+
+	return 0;
+}
+
+int intel_panel_setup_backlight(struct drm_connector *connector, enum pipe pipe)
+{
+	struct drm_device *dev = connector->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_connector *intel_connector = to_intel_connector(connector);
+	struct intel_panel *panel = &intel_connector->panel;
+	int ret;
+
+	if (!dev_priv->vbt.backlight.present) {
+		if (dev_priv->quirks & QUIRK_BACKLIGHT_PRESENT) {
+			DRM_DEBUG_KMS("no backlight present per VBT, but present per quirk\n");
+		} else {
+			DRM_DEBUG_KMS("no backlight present per VBT\n");
+			return 0;
+		}
+	}
+
+	/* ensure intel_panel has been initialized first */
+	if (WARN_ON(!panel->backlight.setup))
+		return -ENODEV;
+
+	/* set level and max in panel struct */
+	mutex_lock(&dev_priv->backlight_lock);
+	ret = panel->backlight.setup(intel_connector, pipe);
+	mutex_unlock(&dev_priv->backlight_lock);
+
+	if (ret) {
+		DRM_DEBUG_KMS("failed to setup backlight for connector %s\n",
+			      connector->name);
+		return ret;
+	}
+
+	panel->backlight.present = true;
+
+	DRM_DEBUG_KMS("Connector %s backlight initialized, %s, brightness %u/%u\n",
+		      connector->name,
+		      panel->backlight.enabled ? "enabled" : "disabled",
+		      panel->backlight.level, panel->backlight.max);
+
+	return 0;
+}
+
+void intel_panel_destroy_backlight(struct drm_connector *connector)
+{
+	struct intel_connector *intel_connector = to_intel_connector(connector);
+	struct intel_panel *panel = &intel_connector->panel;
+
+	/* dispose of the pwm */
+	if (panel->backlight.pwm)
+		pwm_put(panel->backlight.pwm);
+
+	panel->backlight.present = false;
+}
+
+/* Set up chip specific backlight functions */
+static void
+intel_panel_init_backlight_funcs(struct intel_panel *panel)
+{
+	struct intel_connector *intel_connector =
+		container_of(panel, struct intel_connector, panel);
+	struct drm_device *dev = intel_connector->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (IS_BROXTON(dev)) {
+		panel->backlight.setup = bxt_setup_backlight;
+		panel->backlight.enable = bxt_enable_backlight;
+		panel->backlight.disable = bxt_disable_backlight;
+		panel->backlight.set = bxt_set_backlight;
+		panel->backlight.get = bxt_get_backlight;
+	} else if (HAS_PCH_LPT(dev) || HAS_PCH_SPT(dev)) {
+		panel->backlight.setup = lpt_setup_backlight;
+		panel->backlight.enable = lpt_enable_backlight;
+		panel->backlight.disable = lpt_disable_backlight;
+		panel->backlight.set = lpt_set_backlight;
+		panel->backlight.get = lpt_get_backlight;
+		if (HAS_PCH_LPT(dev))
+			panel->backlight.hz_to_pwm = lpt_hz_to_pwm;
+		else
+			panel->backlight.hz_to_pwm = spt_hz_to_pwm;
+	} else if (HAS_PCH_SPLIT(dev)) {
+		panel->backlight.setup = pch_setup_backlight;
+		panel->backlight.enable = pch_enable_backlight;
+		panel->backlight.disable = pch_disable_backlight;
+		panel->backlight.set = pch_set_backlight;
+		panel->backlight.get = pch_get_backlight;
+		panel->backlight.hz_to_pwm = pch_hz_to_pwm;
+	} else if (IS_VALLEYVIEW(dev)) {
+		if (dev_priv->vbt.has_mipi) {
+			panel->backlight.setup = pwm_setup_backlight;
+			panel->backlight.enable = pwm_enable_backlight;
+			panel->backlight.disable = pwm_disable_backlight;
+			panel->backlight.set = pwm_set_backlight;
+			panel->backlight.get = pwm_get_backlight;
+		} else {
+			panel->backlight.setup = vlv_setup_backlight;
+			panel->backlight.enable = vlv_enable_backlight;
+			panel->backlight.disable = vlv_disable_backlight;
+			panel->backlight.set = vlv_set_backlight;
+			panel->backlight.get = vlv_get_backlight;
+			panel->backlight.hz_to_pwm = vlv_hz_to_pwm;
+		}
+	} else if (IS_GEN4(dev)) {
+		panel->backlight.setup = i965_setup_backlight;
+		panel->backlight.enable = i965_enable_backlight;
+		panel->backlight.disable = i965_disable_backlight;
+		panel->backlight.set = i9xx_set_backlight;
+		panel->backlight.get = i9xx_get_backlight;
+		panel->backlight.hz_to_pwm = i965_hz_to_pwm;
+	} else {
+		panel->backlight.setup = i9xx_setup_backlight;
+		panel->backlight.enable = i9xx_enable_backlight;
+		panel->backlight.disable = i9xx_disable_backlight;
+		panel->backlight.set = i9xx_set_backlight;
+		panel->backlight.get = i9xx_get_backlight;
+		panel->backlight.hz_to_pwm = i9xx_hz_to_pwm;
+	}
+}
+
+int intel_panel_init(struct intel_panel *panel,
+		     struct drm_display_mode *fixed_mode,
+		     struct drm_display_mode *downclock_mode)
+{
+	intel_panel_init_backlight_funcs(panel);
+
+	panel->fixed_mode = fixed_mode;
+	panel->downclock_mode = downclock_mode;
+
+	return 0;
+}
+
+void intel_panel_fini(struct intel_panel *panel)
+{
+	struct intel_connector *intel_connector =
+		container_of(panel, struct intel_connector, panel);
+
+	if (panel->fixed_mode)
+		drm_mode_destroy(intel_connector->base.dev, panel->fixed_mode);
+
+	if (panel->downclock_mode)
+		drm_mode_destroy(intel_connector->base.dev,
+				panel->downclock_mode);
+}
+
+void intel_backlight_register(struct drm_device *dev)
+{
+	struct intel_connector *connector;
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, base.head)
+		intel_backlight_device_register(connector);
+}
+
+void intel_backlight_unregister(struct drm_device *dev)
+{
+	struct intel_connector *connector;
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, base.head)
+		intel_backlight_device_unregister(connector);
+}
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
new file mode 100644
index 0000000..fd4690e
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -0,0 +1,7368 @@
+/*
+ * Copyright © 2012 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Eugeni Dodonov <eugeni.dodonov@intel.com>
+ *
+ */
+
+#include <linux/cpufreq.h>
+#include "i915_drv.h"
+#include "intel_drv.h"
+#include "../../../platform/x86/intel_ips.h"
+#include <linux/module.h>
+
+/**
+ * RC6 is a special power stage which allows the GPU to enter an very
+ * low-voltage mode when idle, using down to 0V while at this stage.  This
+ * stage is entered automatically when the GPU is idle when RC6 support is
+ * enabled, and as soon as new workload arises GPU wakes up automatically as well.
+ *
+ * There are different RC6 modes available in Intel GPU, which differentiate
+ * among each other with the latency required to enter and leave RC6 and
+ * voltage consumed by the GPU in different states.
+ *
+ * The combination of the following flags define which states GPU is allowed
+ * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
+ * RC6pp is deepest RC6. Their support by hardware varies according to the
+ * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
+ * which brings the most power savings; deeper states save more power, but
+ * require higher latency to switch to and wake up.
+ */
+#define INTEL_RC6_ENABLE			(1<<0)
+#define INTEL_RC6p_ENABLE			(1<<1)
+#define INTEL_RC6pp_ENABLE			(1<<2)
+
+static void bxt_init_clock_gating(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	/* WaDisableSDEUnitClockGating:bxt */
+	I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
+		   GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
+
+	/*
+	 * FIXME:
+	 * GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ applies on 3x6 GT SKUs only.
+	 */
+	I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
+		   GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ);
+}
+
+static void i915_pineview_get_mem_freq(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 tmp;
+
+	tmp = I915_READ(CLKCFG);
+
+	switch (tmp & CLKCFG_FSB_MASK) {
+	case CLKCFG_FSB_533:
+		dev_priv->fsb_freq = 533; /* 133*4 */
+		break;
+	case CLKCFG_FSB_800:
+		dev_priv->fsb_freq = 800; /* 200*4 */
+		break;
+	case CLKCFG_FSB_667:
+		dev_priv->fsb_freq =  667; /* 167*4 */
+		break;
+	case CLKCFG_FSB_400:
+		dev_priv->fsb_freq = 400; /* 100*4 */
+		break;
+	}
+
+	switch (tmp & CLKCFG_MEM_MASK) {
+	case CLKCFG_MEM_533:
+		dev_priv->mem_freq = 533;
+		break;
+	case CLKCFG_MEM_667:
+		dev_priv->mem_freq = 667;
+		break;
+	case CLKCFG_MEM_800:
+		dev_priv->mem_freq = 800;
+		break;
+	}
+
+	/* detect pineview DDR3 setting */
+	tmp = I915_READ(CSHRDDR3CTL);
+	dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
+}
+
+static void i915_ironlake_get_mem_freq(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u16 ddrpll, csipll;
+
+	ddrpll = I915_READ16(DDRMPLL1);
+	csipll = I915_READ16(CSIPLL0);
+
+	switch (ddrpll & 0xff) {
+	case 0xc:
+		dev_priv->mem_freq = 800;
+		break;
+	case 0x10:
+		dev_priv->mem_freq = 1066;
+		break;
+	case 0x14:
+		dev_priv->mem_freq = 1333;
+		break;
+	case 0x18:
+		dev_priv->mem_freq = 1600;
+		break;
+	default:
+		DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
+				 ddrpll & 0xff);
+		dev_priv->mem_freq = 0;
+		break;
+	}
+
+	dev_priv->ips.r_t = dev_priv->mem_freq;
+
+	switch (csipll & 0x3ff) {
+	case 0x00c:
+		dev_priv->fsb_freq = 3200;
+		break;
+	case 0x00e:
+		dev_priv->fsb_freq = 3733;
+		break;
+	case 0x010:
+		dev_priv->fsb_freq = 4266;
+		break;
+	case 0x012:
+		dev_priv->fsb_freq = 4800;
+		break;
+	case 0x014:
+		dev_priv->fsb_freq = 5333;
+		break;
+	case 0x016:
+		dev_priv->fsb_freq = 5866;
+		break;
+	case 0x018:
+		dev_priv->fsb_freq = 6400;
+		break;
+	default:
+		DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
+				 csipll & 0x3ff);
+		dev_priv->fsb_freq = 0;
+		break;
+	}
+
+	if (dev_priv->fsb_freq == 3200) {
+		dev_priv->ips.c_m = 0;
+	} else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
+		dev_priv->ips.c_m = 1;
+	} else {
+		dev_priv->ips.c_m = 2;
+	}
+}
+
+static const struct cxsr_latency cxsr_latency_table[] = {
+	{1, 0, 800, 400, 3382, 33382, 3983, 33983},    /* DDR2-400 SC */
+	{1, 0, 800, 667, 3354, 33354, 3807, 33807},    /* DDR2-667 SC */
+	{1, 0, 800, 800, 3347, 33347, 3763, 33763},    /* DDR2-800 SC */
+	{1, 1, 800, 667, 6420, 36420, 6873, 36873},    /* DDR3-667 SC */
+	{1, 1, 800, 800, 5902, 35902, 6318, 36318},    /* DDR3-800 SC */
+
+	{1, 0, 667, 400, 3400, 33400, 4021, 34021},    /* DDR2-400 SC */
+	{1, 0, 667, 667, 3372, 33372, 3845, 33845},    /* DDR2-667 SC */
+	{1, 0, 667, 800, 3386, 33386, 3822, 33822},    /* DDR2-800 SC */
+	{1, 1, 667, 667, 6438, 36438, 6911, 36911},    /* DDR3-667 SC */
+	{1, 1, 667, 800, 5941, 35941, 6377, 36377},    /* DDR3-800 SC */
+
+	{1, 0, 400, 400, 3472, 33472, 4173, 34173},    /* DDR2-400 SC */
+	{1, 0, 400, 667, 3443, 33443, 3996, 33996},    /* DDR2-667 SC */
+	{1, 0, 400, 800, 3430, 33430, 3946, 33946},    /* DDR2-800 SC */
+	{1, 1, 400, 667, 6509, 36509, 7062, 37062},    /* DDR3-667 SC */
+	{1, 1, 400, 800, 5985, 35985, 6501, 36501},    /* DDR3-800 SC */
+
+	{0, 0, 800, 400, 3438, 33438, 4065, 34065},    /* DDR2-400 SC */
+	{0, 0, 800, 667, 3410, 33410, 3889, 33889},    /* DDR2-667 SC */
+	{0, 0, 800, 800, 3403, 33403, 3845, 33845},    /* DDR2-800 SC */
+	{0, 1, 800, 667, 6476, 36476, 6955, 36955},    /* DDR3-667 SC */
+	{0, 1, 800, 800, 5958, 35958, 6400, 36400},    /* DDR3-800 SC */
+
+	{0, 0, 667, 400, 3456, 33456, 4103, 34106},    /* DDR2-400 SC */
+	{0, 0, 667, 667, 3428, 33428, 3927, 33927},    /* DDR2-667 SC */
+	{0, 0, 667, 800, 3443, 33443, 3905, 33905},    /* DDR2-800 SC */
+	{0, 1, 667, 667, 6494, 36494, 6993, 36993},    /* DDR3-667 SC */
+	{0, 1, 667, 800, 5998, 35998, 6460, 36460},    /* DDR3-800 SC */
+
+	{0, 0, 400, 400, 3528, 33528, 4255, 34255},    /* DDR2-400 SC */
+	{0, 0, 400, 667, 3500, 33500, 4079, 34079},    /* DDR2-667 SC */
+	{0, 0, 400, 800, 3487, 33487, 4029, 34029},    /* DDR2-800 SC */
+	{0, 1, 400, 667, 6566, 36566, 7145, 37145},    /* DDR3-667 SC */
+	{0, 1, 400, 800, 6042, 36042, 6584, 36584},    /* DDR3-800 SC */
+};
+
+static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
+							 int is_ddr3,
+							 int fsb,
+							 int mem)
+{
+	const struct cxsr_latency *latency;
+	int i;
+
+	if (fsb == 0 || mem == 0)
+		return NULL;
+
+	for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
+		latency = &cxsr_latency_table[i];
+		if (is_desktop == latency->is_desktop &&
+		    is_ddr3 == latency->is_ddr3 &&
+		    fsb == latency->fsb_freq && mem == latency->mem_freq)
+			return latency;
+	}
+
+	DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
+
+	return NULL;
+}
+
+static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable)
+{
+	u32 val;
+
+	mutex_lock(&dev_priv->rps.hw_lock);
+
+	val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
+	if (enable)
+		val &= ~FORCE_DDR_HIGH_FREQ;
+	else
+		val |= FORCE_DDR_HIGH_FREQ;
+	val &= ~FORCE_DDR_LOW_FREQ;
+	val |= FORCE_DDR_FREQ_REQ_ACK;
+	vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
+
+	if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
+		      FORCE_DDR_FREQ_REQ_ACK) == 0, 3))
+		DRM_ERROR("timed out waiting for Punit DDR DVFS request\n");
+
+	mutex_unlock(&dev_priv->rps.hw_lock);
+}
+
+static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable)
+{
+	u32 val;
+
+	mutex_lock(&dev_priv->rps.hw_lock);
+
+	val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
+	if (enable)
+		val |= DSP_MAXFIFO_PM5_ENABLE;
+	else
+		val &= ~DSP_MAXFIFO_PM5_ENABLE;
+	vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
+
+	mutex_unlock(&dev_priv->rps.hw_lock);
+}
+
+#define FW_WM(value, plane) \
+	(((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK)
+
+void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
+{
+	struct drm_device *dev = dev_priv->dev;
+	u32 val;
+
+	if (IS_VALLEYVIEW(dev)) {
+		I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
+		POSTING_READ(FW_BLC_SELF_VLV);
+		dev_priv->wm.vlv.cxsr = enable;
+	} else if (IS_G4X(dev) || IS_CRESTLINE(dev)) {
+		I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
+		POSTING_READ(FW_BLC_SELF);
+	} else if (IS_PINEVIEW(dev)) {
+		val = I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN;
+		val |= enable ? PINEVIEW_SELF_REFRESH_EN : 0;
+		I915_WRITE(DSPFW3, val);
+		POSTING_READ(DSPFW3);
+	} else if (IS_I945G(dev) || IS_I945GM(dev)) {
+		val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) :
+			       _MASKED_BIT_DISABLE(FW_BLC_SELF_EN);
+		I915_WRITE(FW_BLC_SELF, val);
+		POSTING_READ(FW_BLC_SELF);
+	} else if (IS_I915GM(dev)) {
+		val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) :
+			       _MASKED_BIT_DISABLE(INSTPM_SELF_EN);
+		I915_WRITE(INSTPM, val);
+		POSTING_READ(INSTPM);
+	} else {
+		return;
+	}
+
+	DRM_DEBUG_KMS("memory self-refresh is %s\n",
+		      enable ? "enabled" : "disabled");
+}
+
+
+/*
+ * Latency for FIFO fetches is dependent on several factors:
+ *   - memory configuration (speed, channels)
+ *   - chipset
+ *   - current MCH state
+ * It can be fairly high in some situations, so here we assume a fairly
+ * pessimal value.  It's a tradeoff between extra memory fetches (if we
+ * set this value too high, the FIFO will fetch frequently to stay full)
+ * and power consumption (set it too low to save power and we might see
+ * FIFO underruns and display "flicker").
+ *
+ * A value of 5us seems to be a good balance; safe for very low end
+ * platforms but not overly aggressive on lower latency configs.
+ */
+static const int pessimal_latency_ns = 5000;
+
+#define VLV_FIFO_START(dsparb, dsparb2, lo_shift, hi_shift) \
+	((((dsparb) >> (lo_shift)) & 0xff) | ((((dsparb2) >> (hi_shift)) & 0x1) << 8))
+
+static int vlv_get_fifo_size(struct drm_device *dev,
+			      enum pipe pipe, int plane)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int sprite0_start, sprite1_start, size;
+
+	switch (pipe) {
+		uint32_t dsparb, dsparb2, dsparb3;
+	case PIPE_A:
+		dsparb = I915_READ(DSPARB);
+		dsparb2 = I915_READ(DSPARB2);
+		sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 0, 0);
+		sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 8, 4);
+		break;
+	case PIPE_B:
+		dsparb = I915_READ(DSPARB);
+		dsparb2 = I915_READ(DSPARB2);
+		sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 16, 8);
+		sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 24, 12);
+		break;
+	case PIPE_C:
+		dsparb2 = I915_READ(DSPARB2);
+		dsparb3 = I915_READ(DSPARB3);
+		sprite0_start = VLV_FIFO_START(dsparb3, dsparb2, 0, 16);
+		sprite1_start = VLV_FIFO_START(dsparb3, dsparb2, 8, 20);
+		break;
+	default:
+		return 0;
+	}
+
+	switch (plane) {
+	case 0:
+		size = sprite0_start;
+		break;
+	case 1:
+		size = sprite1_start - sprite0_start;
+		break;
+	case 2:
+		size = 512 - 1 - sprite1_start;
+		break;
+	default:
+		return 0;
+	}
+
+	DRM_DEBUG_KMS("Pipe %c %s %c FIFO size: %d\n",
+		      pipe_name(pipe), plane == 0 ? "primary" : "sprite",
+		      plane == 0 ? plane_name(pipe) : sprite_name(pipe, plane - 1),
+		      size);
+
+	return size;
+}
+
+static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t dsparb = I915_READ(DSPARB);
+	int size;
+
+	size = dsparb & 0x7f;
+	if (plane)
+		size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
+
+	DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
+		      plane ? "B" : "A", size);
+
+	return size;
+}
+
+static int i830_get_fifo_size(struct drm_device *dev, int plane)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t dsparb = I915_READ(DSPARB);
+	int size;
+
+	size = dsparb & 0x1ff;
+	if (plane)
+		size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
+	size >>= 1; /* Convert to cachelines */
+
+	DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
+		      plane ? "B" : "A", size);
+
+	return size;
+}
+
+static int i845_get_fifo_size(struct drm_device *dev, int plane)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t dsparb = I915_READ(DSPARB);
+	int size;
+
+	size = dsparb & 0x7f;
+	size >>= 2; /* Convert to cachelines */
+
+	DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
+		      plane ? "B" : "A",
+		      size);
+
+	return size;
+}
+
+/* Pineview has different values for various configs */
+static const struct intel_watermark_params pineview_display_wm = {
+	.fifo_size = PINEVIEW_DISPLAY_FIFO,
+	.max_wm = PINEVIEW_MAX_WM,
+	.default_wm = PINEVIEW_DFT_WM,
+	.guard_size = PINEVIEW_GUARD_WM,
+	.cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
+};
+static const struct intel_watermark_params pineview_display_hplloff_wm = {
+	.fifo_size = PINEVIEW_DISPLAY_FIFO,
+	.max_wm = PINEVIEW_MAX_WM,
+	.default_wm = PINEVIEW_DFT_HPLLOFF_WM,
+	.guard_size = PINEVIEW_GUARD_WM,
+	.cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
+};
+static const struct intel_watermark_params pineview_cursor_wm = {
+	.fifo_size = PINEVIEW_CURSOR_FIFO,
+	.max_wm = PINEVIEW_CURSOR_MAX_WM,
+	.default_wm = PINEVIEW_CURSOR_DFT_WM,
+	.guard_size = PINEVIEW_CURSOR_GUARD_WM,
+	.cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
+};
+static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
+	.fifo_size = PINEVIEW_CURSOR_FIFO,
+	.max_wm = PINEVIEW_CURSOR_MAX_WM,
+	.default_wm = PINEVIEW_CURSOR_DFT_WM,
+	.guard_size = PINEVIEW_CURSOR_GUARD_WM,
+	.cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
+};
+static const struct intel_watermark_params g4x_wm_info = {
+	.fifo_size = G4X_FIFO_SIZE,
+	.max_wm = G4X_MAX_WM,
+	.default_wm = G4X_MAX_WM,
+	.guard_size = 2,
+	.cacheline_size = G4X_FIFO_LINE_SIZE,
+};
+static const struct intel_watermark_params g4x_cursor_wm_info = {
+	.fifo_size = I965_CURSOR_FIFO,
+	.max_wm = I965_CURSOR_MAX_WM,
+	.default_wm = I965_CURSOR_DFT_WM,
+	.guard_size = 2,
+	.cacheline_size = G4X_FIFO_LINE_SIZE,
+};
+static const struct intel_watermark_params valleyview_wm_info = {
+	.fifo_size = VALLEYVIEW_FIFO_SIZE,
+	.max_wm = VALLEYVIEW_MAX_WM,
+	.default_wm = VALLEYVIEW_MAX_WM,
+	.guard_size = 2,
+	.cacheline_size = G4X_FIFO_LINE_SIZE,
+};
+static const struct intel_watermark_params valleyview_cursor_wm_info = {
+	.fifo_size = I965_CURSOR_FIFO,
+	.max_wm = VALLEYVIEW_CURSOR_MAX_WM,
+	.default_wm = I965_CURSOR_DFT_WM,
+	.guard_size = 2,
+	.cacheline_size = G4X_FIFO_LINE_SIZE,
+};
+static const struct intel_watermark_params i965_cursor_wm_info = {
+	.fifo_size = I965_CURSOR_FIFO,
+	.max_wm = I965_CURSOR_MAX_WM,
+	.default_wm = I965_CURSOR_DFT_WM,
+	.guard_size = 2,
+	.cacheline_size = I915_FIFO_LINE_SIZE,
+};
+static const struct intel_watermark_params i945_wm_info = {
+	.fifo_size = I945_FIFO_SIZE,
+	.max_wm = I915_MAX_WM,
+	.default_wm = 1,
+	.guard_size = 2,
+	.cacheline_size = I915_FIFO_LINE_SIZE,
+};
+static const struct intel_watermark_params i915_wm_info = {
+	.fifo_size = I915_FIFO_SIZE,
+	.max_wm = I915_MAX_WM,
+	.default_wm = 1,
+	.guard_size = 2,
+	.cacheline_size = I915_FIFO_LINE_SIZE,
+};
+static const struct intel_watermark_params i830_a_wm_info = {
+	.fifo_size = I855GM_FIFO_SIZE,
+	.max_wm = I915_MAX_WM,
+	.default_wm = 1,
+	.guard_size = 2,
+	.cacheline_size = I830_FIFO_LINE_SIZE,
+};
+static const struct intel_watermark_params i830_bc_wm_info = {
+	.fifo_size = I855GM_FIFO_SIZE,
+	.max_wm = I915_MAX_WM/2,
+	.default_wm = 1,
+	.guard_size = 2,
+	.cacheline_size = I830_FIFO_LINE_SIZE,
+};
+static const struct intel_watermark_params i845_wm_info = {
+	.fifo_size = I830_FIFO_SIZE,
+	.max_wm = I915_MAX_WM,
+	.default_wm = 1,
+	.guard_size = 2,
+	.cacheline_size = I830_FIFO_LINE_SIZE,
+};
+
+/**
+ * intel_calculate_wm - calculate watermark level
+ * @clock_in_khz: pixel clock
+ * @wm: chip FIFO params
+ * @pixel_size: display pixel size
+ * @latency_ns: memory latency for the platform
+ *
+ * Calculate the watermark level (the level at which the display plane will
+ * start fetching from memory again).  Each chip has a different display
+ * FIFO size and allocation, so the caller needs to figure that out and pass
+ * in the correct intel_watermark_params structure.
+ *
+ * As the pixel clock runs, the FIFO will be drained at a rate that depends
+ * on the pixel size.  When it reaches the watermark level, it'll start
+ * fetching FIFO line sized based chunks from memory until the FIFO fills
+ * past the watermark point.  If the FIFO drains completely, a FIFO underrun
+ * will occur, and a display engine hang could result.
+ */
+static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
+					const struct intel_watermark_params *wm,
+					int fifo_size,
+					int pixel_size,
+					unsigned long latency_ns)
+{
+	long entries_required, wm_size;
+
+	/*
+	 * Note: we need to make sure we don't overflow for various clock &
+	 * latency values.
+	 * clocks go from a few thousand to several hundred thousand.
+	 * latency is usually a few thousand
+	 */
+	entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
+		1000;
+	entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
+
+	DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required);
+
+	wm_size = fifo_size - (entries_required + wm->guard_size);
+
+	DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size);
+
+	/* Don't promote wm_size to unsigned... */
+	if (wm_size > (long)wm->max_wm)
+		wm_size = wm->max_wm;
+	if (wm_size <= 0)
+		wm_size = wm->default_wm;
+
+	/*
+	 * Bspec seems to indicate that the value shouldn't be lower than
+	 * 'burst size + 1'. Certainly 830 is quite unhappy with low values.
+	 * Lets go for 8 which is the burst size since certain platforms
+	 * already use a hardcoded 8 (which is what the spec says should be
+	 * done).
+	 */
+	if (wm_size <= 8)
+		wm_size = 8;
+
+	return wm_size;
+}
+
+static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
+{
+	struct drm_crtc *crtc, *enabled = NULL;
+
+	for_each_crtc(dev, crtc) {
+		if (intel_crtc_active(crtc)) {
+			if (enabled)
+				return NULL;
+			enabled = crtc;
+		}
+	}
+
+	return enabled;
+}
+
+static void pineview_update_wm(struct drm_crtc *unused_crtc)
+{
+	struct drm_device *dev = unused_crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_crtc *crtc;
+	const struct cxsr_latency *latency;
+	u32 reg;
+	unsigned long wm;
+
+	latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
+					 dev_priv->fsb_freq, dev_priv->mem_freq);
+	if (!latency) {
+		DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
+		intel_set_memory_cxsr(dev_priv, false);
+		return;
+	}
+
+	crtc = single_enabled_crtc(dev);
+	if (crtc) {
+		const struct drm_display_mode *adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
+		int pixel_size = crtc->primary->state->fb->bits_per_pixel / 8;
+		int clock = adjusted_mode->crtc_clock;
+
+		/* Display SR */
+		wm = intel_calculate_wm(clock, &pineview_display_wm,
+					pineview_display_wm.fifo_size,
+					pixel_size, latency->display_sr);
+		reg = I915_READ(DSPFW1);
+		reg &= ~DSPFW_SR_MASK;
+		reg |= FW_WM(wm, SR);
+		I915_WRITE(DSPFW1, reg);
+		DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
+
+		/* cursor SR */
+		wm = intel_calculate_wm(clock, &pineview_cursor_wm,
+					pineview_display_wm.fifo_size,
+					pixel_size, latency->cursor_sr);
+		reg = I915_READ(DSPFW3);
+		reg &= ~DSPFW_CURSOR_SR_MASK;
+		reg |= FW_WM(wm, CURSOR_SR);
+		I915_WRITE(DSPFW3, reg);
+
+		/* Display HPLL off SR */
+		wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
+					pineview_display_hplloff_wm.fifo_size,
+					pixel_size, latency->display_hpll_disable);
+		reg = I915_READ(DSPFW3);
+		reg &= ~DSPFW_HPLL_SR_MASK;
+		reg |= FW_WM(wm, HPLL_SR);
+		I915_WRITE(DSPFW3, reg);
+
+		/* cursor HPLL off SR */
+		wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
+					pineview_display_hplloff_wm.fifo_size,
+					pixel_size, latency->cursor_hpll_disable);
+		reg = I915_READ(DSPFW3);
+		reg &= ~DSPFW_HPLL_CURSOR_MASK;
+		reg |= FW_WM(wm, HPLL_CURSOR);
+		I915_WRITE(DSPFW3, reg);
+		DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
+
+		intel_set_memory_cxsr(dev_priv, true);
+	} else {
+		intel_set_memory_cxsr(dev_priv, false);
+	}
+}
+
+static bool g4x_compute_wm0(struct drm_device *dev,
+			    int plane,
+			    const struct intel_watermark_params *display,
+			    int display_latency_ns,
+			    const struct intel_watermark_params *cursor,
+			    int cursor_latency_ns,
+			    int *plane_wm,
+			    int *cursor_wm)
+{
+	struct drm_crtc *crtc;
+	const struct drm_display_mode *adjusted_mode;
+	int htotal, hdisplay, clock, pixel_size;
+	int line_time_us, line_count;
+	int entries, tlb_miss;
+
+	crtc = intel_get_crtc_for_plane(dev, plane);
+	if (!intel_crtc_active(crtc)) {
+		*cursor_wm = cursor->guard_size;
+		*plane_wm = display->guard_size;
+		return false;
+	}
+
+	adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
+	clock = adjusted_mode->crtc_clock;
+	htotal = adjusted_mode->crtc_htotal;
+	hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
+	pixel_size = crtc->primary->state->fb->bits_per_pixel / 8;
+
+	/* Use the small buffer method to calculate plane watermark */
+	entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
+	tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
+	if (tlb_miss > 0)
+		entries += tlb_miss;
+	entries = DIV_ROUND_UP(entries, display->cacheline_size);
+	*plane_wm = entries + display->guard_size;
+	if (*plane_wm > (int)display->max_wm)
+		*plane_wm = display->max_wm;
+
+	/* Use the large buffer method to calculate cursor watermark */
+	line_time_us = max(htotal * 1000 / clock, 1);
+	line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
+	entries = line_count * crtc->cursor->state->crtc_w * pixel_size;
+	tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
+	if (tlb_miss > 0)
+		entries += tlb_miss;
+	entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
+	*cursor_wm = entries + cursor->guard_size;
+	if (*cursor_wm > (int)cursor->max_wm)
+		*cursor_wm = (int)cursor->max_wm;
+
+	return true;
+}
+
+/*
+ * Check the wm result.
+ *
+ * If any calculated watermark values is larger than the maximum value that
+ * can be programmed into the associated watermark register, that watermark
+ * must be disabled.
+ */
+static bool g4x_check_srwm(struct drm_device *dev,
+			   int display_wm, int cursor_wm,
+			   const struct intel_watermark_params *display,
+			   const struct intel_watermark_params *cursor)
+{
+	DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
+		      display_wm, cursor_wm);
+
+	if (display_wm > display->max_wm) {
+		DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
+			      display_wm, display->max_wm);
+		return false;
+	}
+
+	if (cursor_wm > cursor->max_wm) {
+		DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
+			      cursor_wm, cursor->max_wm);
+		return false;
+	}
+
+	if (!(display_wm || cursor_wm)) {
+		DRM_DEBUG_KMS("SR latency is 0, disabling\n");
+		return false;
+	}
+
+	return true;
+}
+
+static bool g4x_compute_srwm(struct drm_device *dev,
+			     int plane,
+			     int latency_ns,
+			     const struct intel_watermark_params *display,
+			     const struct intel_watermark_params *cursor,
+			     int *display_wm, int *cursor_wm)
+{
+	struct drm_crtc *crtc;
+	const struct drm_display_mode *adjusted_mode;
+	int hdisplay, htotal, pixel_size, clock;
+	unsigned long line_time_us;
+	int line_count, line_size;
+	int small, large;
+	int entries;
+
+	if (!latency_ns) {
+		*display_wm = *cursor_wm = 0;
+		return false;
+	}
+
+	crtc = intel_get_crtc_for_plane(dev, plane);
+	adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
+	clock = adjusted_mode->crtc_clock;
+	htotal = adjusted_mode->crtc_htotal;
+	hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
+	pixel_size = crtc->primary->state->fb->bits_per_pixel / 8;
+
+	line_time_us = max(htotal * 1000 / clock, 1);
+	line_count = (latency_ns / line_time_us + 1000) / 1000;
+	line_size = hdisplay * pixel_size;
+
+	/* Use the minimum of the small and large buffer method for primary */
+	small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
+	large = line_count * line_size;
+
+	entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
+	*display_wm = entries + display->guard_size;
+
+	/* calculate the self-refresh watermark for display cursor */
+	entries = line_count * pixel_size * crtc->cursor->state->crtc_w;
+	entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
+	*cursor_wm = entries + cursor->guard_size;
+
+	return g4x_check_srwm(dev,
+			      *display_wm, *cursor_wm,
+			      display, cursor);
+}
+
+#define FW_WM_VLV(value, plane) \
+	(((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK_VLV)
+
+static void vlv_write_wm_values(struct intel_crtc *crtc,
+				const struct vlv_wm_values *wm)
+{
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	enum pipe pipe = crtc->pipe;
+
+	I915_WRITE(VLV_DDL(pipe),
+		   (wm->ddl[pipe].cursor << DDL_CURSOR_SHIFT) |
+		   (wm->ddl[pipe].sprite[1] << DDL_SPRITE_SHIFT(1)) |
+		   (wm->ddl[pipe].sprite[0] << DDL_SPRITE_SHIFT(0)) |
+		   (wm->ddl[pipe].primary << DDL_PLANE_SHIFT));
+
+	I915_WRITE(DSPFW1,
+		   FW_WM(wm->sr.plane, SR) |
+		   FW_WM(wm->pipe[PIPE_B].cursor, CURSORB) |
+		   FW_WM_VLV(wm->pipe[PIPE_B].primary, PLANEB) |
+		   FW_WM_VLV(wm->pipe[PIPE_A].primary, PLANEA));
+	I915_WRITE(DSPFW2,
+		   FW_WM_VLV(wm->pipe[PIPE_A].sprite[1], SPRITEB) |
+		   FW_WM(wm->pipe[PIPE_A].cursor, CURSORA) |
+		   FW_WM_VLV(wm->pipe[PIPE_A].sprite[0], SPRITEA));
+	I915_WRITE(DSPFW3,
+		   FW_WM(wm->sr.cursor, CURSOR_SR));
+
+	if (IS_CHERRYVIEW(dev_priv)) {
+		I915_WRITE(DSPFW7_CHV,
+			   FW_WM_VLV(wm->pipe[PIPE_B].sprite[1], SPRITED) |
+			   FW_WM_VLV(wm->pipe[PIPE_B].sprite[0], SPRITEC));
+		I915_WRITE(DSPFW8_CHV,
+			   FW_WM_VLV(wm->pipe[PIPE_C].sprite[1], SPRITEF) |
+			   FW_WM_VLV(wm->pipe[PIPE_C].sprite[0], SPRITEE));
+		I915_WRITE(DSPFW9_CHV,
+			   FW_WM_VLV(wm->pipe[PIPE_C].primary, PLANEC) |
+			   FW_WM(wm->pipe[PIPE_C].cursor, CURSORC));
+		I915_WRITE(DSPHOWM,
+			   FW_WM(wm->sr.plane >> 9, SR_HI) |
+			   FW_WM(wm->pipe[PIPE_C].sprite[1] >> 8, SPRITEF_HI) |
+			   FW_WM(wm->pipe[PIPE_C].sprite[0] >> 8, SPRITEE_HI) |
+			   FW_WM(wm->pipe[PIPE_C].primary >> 8, PLANEC_HI) |
+			   FW_WM(wm->pipe[PIPE_B].sprite[1] >> 8, SPRITED_HI) |
+			   FW_WM(wm->pipe[PIPE_B].sprite[0] >> 8, SPRITEC_HI) |
+			   FW_WM(wm->pipe[PIPE_B].primary >> 8, PLANEB_HI) |
+			   FW_WM(wm->pipe[PIPE_A].sprite[1] >> 8, SPRITEB_HI) |
+			   FW_WM(wm->pipe[PIPE_A].sprite[0] >> 8, SPRITEA_HI) |
+			   FW_WM(wm->pipe[PIPE_A].primary >> 8, PLANEA_HI));
+	} else {
+		I915_WRITE(DSPFW7,
+			   FW_WM_VLV(wm->pipe[PIPE_B].sprite[1], SPRITED) |
+			   FW_WM_VLV(wm->pipe[PIPE_B].sprite[0], SPRITEC));
+		I915_WRITE(DSPHOWM,
+			   FW_WM(wm->sr.plane >> 9, SR_HI) |
+			   FW_WM(wm->pipe[PIPE_B].sprite[1] >> 8, SPRITED_HI) |
+			   FW_WM(wm->pipe[PIPE_B].sprite[0] >> 8, SPRITEC_HI) |
+			   FW_WM(wm->pipe[PIPE_B].primary >> 8, PLANEB_HI) |
+			   FW_WM(wm->pipe[PIPE_A].sprite[1] >> 8, SPRITEB_HI) |
+			   FW_WM(wm->pipe[PIPE_A].sprite[0] >> 8, SPRITEA_HI) |
+			   FW_WM(wm->pipe[PIPE_A].primary >> 8, PLANEA_HI));
+	}
+
+	/* zero (unused) WM1 watermarks */
+	I915_WRITE(DSPFW4, 0);
+	I915_WRITE(DSPFW5, 0);
+	I915_WRITE(DSPFW6, 0);
+	I915_WRITE(DSPHOWM1, 0);
+
+	POSTING_READ(DSPFW1);
+}
+
+#undef FW_WM_VLV
+
+enum vlv_wm_level {
+	VLV_WM_LEVEL_PM2,
+	VLV_WM_LEVEL_PM5,
+	VLV_WM_LEVEL_DDR_DVFS,
+};
+
+/* latency must be in 0.1us units. */
+static unsigned int vlv_wm_method2(unsigned int pixel_rate,
+				   unsigned int pipe_htotal,
+				   unsigned int horiz_pixels,
+				   unsigned int bytes_per_pixel,
+				   unsigned int latency)
+{
+	unsigned int ret;
+
+	ret = (latency * pixel_rate) / (pipe_htotal * 10000);
+	ret = (ret + 1) * horiz_pixels * bytes_per_pixel;
+	ret = DIV_ROUND_UP(ret, 64);
+
+	return ret;
+}
+
+static void vlv_setup_wm_latency(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	/* all latencies in usec */
+	dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM2] = 3;
+
+	dev_priv->wm.max_level = VLV_WM_LEVEL_PM2;
+
+	if (IS_CHERRYVIEW(dev_priv)) {
+		dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM5] = 12;
+		dev_priv->wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33;
+
+		dev_priv->wm.max_level = VLV_WM_LEVEL_DDR_DVFS;
+	}
+}
+
+static uint16_t vlv_compute_wm_level(struct intel_plane *plane,
+				     struct intel_crtc *crtc,
+				     const struct intel_plane_state *state,
+				     int level)
+{
+	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+	int clock, htotal, pixel_size, width, wm;
+
+	if (dev_priv->wm.pri_latency[level] == 0)
+		return USHRT_MAX;
+
+	if (!state->visible)
+		return 0;
+
+	pixel_size = drm_format_plane_cpp(state->base.fb->pixel_format, 0);
+	clock = crtc->config->base.adjusted_mode.crtc_clock;
+	htotal = crtc->config->base.adjusted_mode.crtc_htotal;
+	width = crtc->config->pipe_src_w;
+	if (WARN_ON(htotal == 0))
+		htotal = 1;
+
+	if (plane->base.type == DRM_PLANE_TYPE_CURSOR) {
+		/*
+		 * FIXME the formula gives values that are
+		 * too big for the cursor FIFO, and hence we
+		 * would never be able to use cursors. For
+		 * now just hardcode the watermark.
+		 */
+		wm = 63;
+	} else {
+		wm = vlv_wm_method2(clock, htotal, width, pixel_size,
+				    dev_priv->wm.pri_latency[level] * 10);
+	}
+
+	return min_t(int, wm, USHRT_MAX);
+}
+
+static void vlv_compute_fifo(struct intel_crtc *crtc)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct vlv_wm_state *wm_state = &crtc->wm_state;
+	struct intel_plane *plane;
+	unsigned int total_rate = 0;
+	const int fifo_size = 512 - 1;
+	int fifo_extra, fifo_left = fifo_size;
+
+	for_each_intel_plane_on_crtc(dev, crtc, plane) {
+		struct intel_plane_state *state =
+			to_intel_plane_state(plane->base.state);
+
+		if (plane->base.type == DRM_PLANE_TYPE_CURSOR)
+			continue;
+
+		if (state->visible) {
+			wm_state->num_active_planes++;
+			total_rate += drm_format_plane_cpp(state->base.fb->pixel_format, 0);
+		}
+	}
+
+	for_each_intel_plane_on_crtc(dev, crtc, plane) {
+		struct intel_plane_state *state =
+			to_intel_plane_state(plane->base.state);
+		unsigned int rate;
+
+		if (plane->base.type == DRM_PLANE_TYPE_CURSOR) {
+			plane->wm.fifo_size = 63;
+			continue;
+		}
+
+		if (!state->visible) {
+			plane->wm.fifo_size = 0;
+			continue;
+		}
+
+		rate = drm_format_plane_cpp(state->base.fb->pixel_format, 0);
+		plane->wm.fifo_size = fifo_size * rate / total_rate;
+		fifo_left -= plane->wm.fifo_size;
+	}
+
+	fifo_extra = DIV_ROUND_UP(fifo_left, wm_state->num_active_planes ?: 1);
+
+	/* spread the remainder evenly */
+	for_each_intel_plane_on_crtc(dev, crtc, plane) {
+		int plane_extra;
+
+		if (fifo_left == 0)
+			break;
+
+		if (plane->base.type == DRM_PLANE_TYPE_CURSOR)
+			continue;
+
+		/* give it all to the first plane if none are active */
+		if (plane->wm.fifo_size == 0 &&
+		    wm_state->num_active_planes)
+			continue;
+
+		plane_extra = min(fifo_extra, fifo_left);
+		plane->wm.fifo_size += plane_extra;
+		fifo_left -= plane_extra;
+	}
+
+	WARN_ON(fifo_left != 0);
+}
+
+static void vlv_invert_wms(struct intel_crtc *crtc)
+{
+	struct vlv_wm_state *wm_state = &crtc->wm_state;
+	int level;
+
+	for (level = 0; level < wm_state->num_levels; level++) {
+		struct drm_device *dev = crtc->base.dev;
+		const int sr_fifo_size = INTEL_INFO(dev)->num_pipes * 512 - 1;
+		struct intel_plane *plane;
+
+		wm_state->sr[level].plane = sr_fifo_size - wm_state->sr[level].plane;
+		wm_state->sr[level].cursor = 63 - wm_state->sr[level].cursor;
+
+		for_each_intel_plane_on_crtc(dev, crtc, plane) {
+			switch (plane->base.type) {
+				int sprite;
+			case DRM_PLANE_TYPE_CURSOR:
+				wm_state->wm[level].cursor = plane->wm.fifo_size -
+					wm_state->wm[level].cursor;
+				break;
+			case DRM_PLANE_TYPE_PRIMARY:
+				wm_state->wm[level].primary = plane->wm.fifo_size -
+					wm_state->wm[level].primary;
+				break;
+			case DRM_PLANE_TYPE_OVERLAY:
+				sprite = plane->plane;
+				wm_state->wm[level].sprite[sprite] = plane->wm.fifo_size -
+					wm_state->wm[level].sprite[sprite];
+				break;
+			}
+		}
+	}
+}
+
+static void vlv_compute_wm(struct intel_crtc *crtc)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct vlv_wm_state *wm_state = &crtc->wm_state;
+	struct intel_plane *plane;
+	int sr_fifo_size = INTEL_INFO(dev)->num_pipes * 512 - 1;
+	int level;
+
+	memset(wm_state, 0, sizeof(*wm_state));
+
+	wm_state->cxsr = crtc->pipe != PIPE_C && crtc->wm.cxsr_allowed;
+	wm_state->num_levels = to_i915(dev)->wm.max_level + 1;
+
+	wm_state->num_active_planes = 0;
+
+	vlv_compute_fifo(crtc);
+
+	if (wm_state->num_active_planes != 1)
+		wm_state->cxsr = false;
+
+	if (wm_state->cxsr) {
+		for (level = 0; level < wm_state->num_levels; level++) {
+			wm_state->sr[level].plane = sr_fifo_size;
+			wm_state->sr[level].cursor = 63;
+		}
+	}
+
+	for_each_intel_plane_on_crtc(dev, crtc, plane) {
+		struct intel_plane_state *state =
+			to_intel_plane_state(plane->base.state);
+
+		if (!state->visible)
+			continue;
+
+		/* normal watermarks */
+		for (level = 0; level < wm_state->num_levels; level++) {
+			int wm = vlv_compute_wm_level(plane, crtc, state, level);
+			int max_wm = plane->base.type == DRM_PLANE_TYPE_CURSOR ? 63 : 511;
+
+			/* hack */
+			if (WARN_ON(level == 0 && wm > max_wm))
+				wm = max_wm;
+
+			if (wm > plane->wm.fifo_size)
+				break;
+
+			switch (plane->base.type) {
+				int sprite;
+			case DRM_PLANE_TYPE_CURSOR:
+				wm_state->wm[level].cursor = wm;
+				break;
+			case DRM_PLANE_TYPE_PRIMARY:
+				wm_state->wm[level].primary = wm;
+				break;
+			case DRM_PLANE_TYPE_OVERLAY:
+				sprite = plane->plane;
+				wm_state->wm[level].sprite[sprite] = wm;
+				break;
+			}
+		}
+
+		wm_state->num_levels = level;
+
+		if (!wm_state->cxsr)
+			continue;
+
+		/* maxfifo watermarks */
+		switch (plane->base.type) {
+			int sprite, level;
+		case DRM_PLANE_TYPE_CURSOR:
+			for (level = 0; level < wm_state->num_levels; level++)
+				wm_state->sr[level].cursor =
+					wm_state->wm[level].cursor;
+			break;
+		case DRM_PLANE_TYPE_PRIMARY:
+			for (level = 0; level < wm_state->num_levels; level++)
+				wm_state->sr[level].plane =
+					min(wm_state->sr[level].plane,
+					    wm_state->wm[level].primary);
+			break;
+		case DRM_PLANE_TYPE_OVERLAY:
+			sprite = plane->plane;
+			for (level = 0; level < wm_state->num_levels; level++)
+				wm_state->sr[level].plane =
+					min(wm_state->sr[level].plane,
+					    wm_state->wm[level].sprite[sprite]);
+			break;
+		}
+	}
+
+	/* clear any (partially) filled invalid levels */
+	for (level = wm_state->num_levels; level < to_i915(dev)->wm.max_level + 1; level++) {
+		memset(&wm_state->wm[level], 0, sizeof(wm_state->wm[level]));
+		memset(&wm_state->sr[level], 0, sizeof(wm_state->sr[level]));
+	}
+
+	vlv_invert_wms(crtc);
+}
+
+#define VLV_FIFO(plane, value) \
+	(((value) << DSPARB_ ## plane ## _SHIFT_VLV) & DSPARB_ ## plane ## _MASK_VLV)
+
+static void vlv_pipe_set_fifo_size(struct intel_crtc *crtc)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct intel_plane *plane;
+	int sprite0_start = 0, sprite1_start = 0, fifo_size = 0;
+
+	for_each_intel_plane_on_crtc(dev, crtc, plane) {
+		if (plane->base.type == DRM_PLANE_TYPE_CURSOR) {
+			WARN_ON(plane->wm.fifo_size != 63);
+			continue;
+		}
+
+		if (plane->base.type == DRM_PLANE_TYPE_PRIMARY)
+			sprite0_start = plane->wm.fifo_size;
+		else if (plane->plane == 0)
+			sprite1_start = sprite0_start + plane->wm.fifo_size;
+		else
+			fifo_size = sprite1_start + plane->wm.fifo_size;
+	}
+
+	WARN_ON(fifo_size != 512 - 1);
+
+	DRM_DEBUG_KMS("Pipe %c FIFO split %d / %d / %d\n",
+		      pipe_name(crtc->pipe), sprite0_start,
+		      sprite1_start, fifo_size);
+
+	switch (crtc->pipe) {
+		uint32_t dsparb, dsparb2, dsparb3;
+	case PIPE_A:
+		dsparb = I915_READ(DSPARB);
+		dsparb2 = I915_READ(DSPARB2);
+
+		dsparb &= ~(VLV_FIFO(SPRITEA, 0xff) |
+			    VLV_FIFO(SPRITEB, 0xff));
+		dsparb |= (VLV_FIFO(SPRITEA, sprite0_start) |
+			   VLV_FIFO(SPRITEB, sprite1_start));
+
+		dsparb2 &= ~(VLV_FIFO(SPRITEA_HI, 0x1) |
+			     VLV_FIFO(SPRITEB_HI, 0x1));
+		dsparb2 |= (VLV_FIFO(SPRITEA_HI, sprite0_start >> 8) |
+			   VLV_FIFO(SPRITEB_HI, sprite1_start >> 8));
+
+		I915_WRITE(DSPARB, dsparb);
+		I915_WRITE(DSPARB2, dsparb2);
+		break;
+	case PIPE_B:
+		dsparb = I915_READ(DSPARB);
+		dsparb2 = I915_READ(DSPARB2);
+
+		dsparb &= ~(VLV_FIFO(SPRITEC, 0xff) |
+			    VLV_FIFO(SPRITED, 0xff));
+		dsparb |= (VLV_FIFO(SPRITEC, sprite0_start) |
+			   VLV_FIFO(SPRITED, sprite1_start));
+
+		dsparb2 &= ~(VLV_FIFO(SPRITEC_HI, 0xff) |
+			     VLV_FIFO(SPRITED_HI, 0xff));
+		dsparb2 |= (VLV_FIFO(SPRITEC_HI, sprite0_start >> 8) |
+			   VLV_FIFO(SPRITED_HI, sprite1_start >> 8));
+
+		I915_WRITE(DSPARB, dsparb);
+		I915_WRITE(DSPARB2, dsparb2);
+		break;
+	case PIPE_C:
+		dsparb3 = I915_READ(DSPARB3);
+		dsparb2 = I915_READ(DSPARB2);
+
+		dsparb3 &= ~(VLV_FIFO(SPRITEE, 0xff) |
+			     VLV_FIFO(SPRITEF, 0xff));
+		dsparb3 |= (VLV_FIFO(SPRITEE, sprite0_start) |
+			    VLV_FIFO(SPRITEF, sprite1_start));
+
+		dsparb2 &= ~(VLV_FIFO(SPRITEE_HI, 0xff) |
+			     VLV_FIFO(SPRITEF_HI, 0xff));
+		dsparb2 |= (VLV_FIFO(SPRITEE_HI, sprite0_start >> 8) |
+			   VLV_FIFO(SPRITEF_HI, sprite1_start >> 8));
+
+		I915_WRITE(DSPARB3, dsparb3);
+		I915_WRITE(DSPARB2, dsparb2);
+		break;
+	default:
+		break;
+	}
+}
+
+#undef VLV_FIFO
+
+static void vlv_merge_wm(struct drm_device *dev,
+			 struct vlv_wm_values *wm)
+{
+	struct intel_crtc *crtc;
+	int num_active_crtcs = 0;
+
+	wm->level = to_i915(dev)->wm.max_level;
+	wm->cxsr = true;
+
+	for_each_intel_crtc(dev, crtc) {
+		const struct vlv_wm_state *wm_state = &crtc->wm_state;
+
+		if (!crtc->active)
+			continue;
+
+		if (!wm_state->cxsr)
+			wm->cxsr = false;
+
+		num_active_crtcs++;
+		wm->level = min_t(int, wm->level, wm_state->num_levels - 1);
+	}
+
+	if (num_active_crtcs != 1)
+		wm->cxsr = false;
+
+	if (num_active_crtcs > 1)
+		wm->level = VLV_WM_LEVEL_PM2;
+
+	for_each_intel_crtc(dev, crtc) {
+		struct vlv_wm_state *wm_state = &crtc->wm_state;
+		enum pipe pipe = crtc->pipe;
+
+		if (!crtc->active)
+			continue;
+
+		wm->pipe[pipe] = wm_state->wm[wm->level];
+		if (wm->cxsr)
+			wm->sr = wm_state->sr[wm->level];
+
+		wm->ddl[pipe].primary = DDL_PRECISION_HIGH | 2;
+		wm->ddl[pipe].sprite[0] = DDL_PRECISION_HIGH | 2;
+		wm->ddl[pipe].sprite[1] = DDL_PRECISION_HIGH | 2;
+		wm->ddl[pipe].cursor = DDL_PRECISION_HIGH | 2;
+	}
+}
+
+static void vlv_update_wm(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	enum pipe pipe = intel_crtc->pipe;
+	struct vlv_wm_values wm = {};
+
+	vlv_compute_wm(intel_crtc);
+	vlv_merge_wm(dev, &wm);
+
+	if (memcmp(&dev_priv->wm.vlv, &wm, sizeof(wm)) == 0) {
+		/* FIXME should be part of crtc atomic commit */
+		vlv_pipe_set_fifo_size(intel_crtc);
+		return;
+	}
+
+	if (wm.level < VLV_WM_LEVEL_DDR_DVFS &&
+	    dev_priv->wm.vlv.level >= VLV_WM_LEVEL_DDR_DVFS)
+		chv_set_memory_dvfs(dev_priv, false);
+
+	if (wm.level < VLV_WM_LEVEL_PM5 &&
+	    dev_priv->wm.vlv.level >= VLV_WM_LEVEL_PM5)
+		chv_set_memory_pm5(dev_priv, false);
+
+	if (!wm.cxsr && dev_priv->wm.vlv.cxsr)
+		intel_set_memory_cxsr(dev_priv, false);
+
+	/* FIXME should be part of crtc atomic commit */
+	vlv_pipe_set_fifo_size(intel_crtc);
+
+	vlv_write_wm_values(intel_crtc, &wm);
+
+	DRM_DEBUG_KMS("Setting FIFO watermarks - %c: plane=%d, cursor=%d, "
+		      "sprite0=%d, sprite1=%d, SR: plane=%d, cursor=%d level=%d cxsr=%d\n",
+		      pipe_name(pipe), wm.pipe[pipe].primary, wm.pipe[pipe].cursor,
+		      wm.pipe[pipe].sprite[0], wm.pipe[pipe].sprite[1],
+		      wm.sr.plane, wm.sr.cursor, wm.level, wm.cxsr);
+
+	if (wm.cxsr && !dev_priv->wm.vlv.cxsr)
+		intel_set_memory_cxsr(dev_priv, true);
+
+	if (wm.level >= VLV_WM_LEVEL_PM5 &&
+	    dev_priv->wm.vlv.level < VLV_WM_LEVEL_PM5)
+		chv_set_memory_pm5(dev_priv, true);
+
+	if (wm.level >= VLV_WM_LEVEL_DDR_DVFS &&
+	    dev_priv->wm.vlv.level < VLV_WM_LEVEL_DDR_DVFS)
+		chv_set_memory_dvfs(dev_priv, true);
+
+	dev_priv->wm.vlv = wm;
+}
+
+#define single_plane_enabled(mask) is_power_of_2(mask)
+
+static void g4x_update_wm(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	static const int sr_latency_ns = 12000;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
+	int plane_sr, cursor_sr;
+	unsigned int enabled = 0;
+	bool cxsr_enabled;
+
+	if (g4x_compute_wm0(dev, PIPE_A,
+			    &g4x_wm_info, pessimal_latency_ns,
+			    &g4x_cursor_wm_info, pessimal_latency_ns,
+			    &planea_wm, &cursora_wm))
+		enabled |= 1 << PIPE_A;
+
+	if (g4x_compute_wm0(dev, PIPE_B,
+			    &g4x_wm_info, pessimal_latency_ns,
+			    &g4x_cursor_wm_info, pessimal_latency_ns,
+			    &planeb_wm, &cursorb_wm))
+		enabled |= 1 << PIPE_B;
+
+	if (single_plane_enabled(enabled) &&
+	    g4x_compute_srwm(dev, ffs(enabled) - 1,
+			     sr_latency_ns,
+			     &g4x_wm_info,
+			     &g4x_cursor_wm_info,
+			     &plane_sr, &cursor_sr)) {
+		cxsr_enabled = true;
+	} else {
+		cxsr_enabled = false;
+		intel_set_memory_cxsr(dev_priv, false);
+		plane_sr = cursor_sr = 0;
+	}
+
+	DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, "
+		      "B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
+		      planea_wm, cursora_wm,
+		      planeb_wm, cursorb_wm,
+		      plane_sr, cursor_sr);
+
+	I915_WRITE(DSPFW1,
+		   FW_WM(plane_sr, SR) |
+		   FW_WM(cursorb_wm, CURSORB) |
+		   FW_WM(planeb_wm, PLANEB) |
+		   FW_WM(planea_wm, PLANEA));
+	I915_WRITE(DSPFW2,
+		   (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
+		   FW_WM(cursora_wm, CURSORA));
+	/* HPLL off in SR has some issues on G4x... disable it */
+	I915_WRITE(DSPFW3,
+		   (I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) |
+		   FW_WM(cursor_sr, CURSOR_SR));
+
+	if (cxsr_enabled)
+		intel_set_memory_cxsr(dev_priv, true);
+}
+
+static void i965_update_wm(struct drm_crtc *unused_crtc)
+{
+	struct drm_device *dev = unused_crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_crtc *crtc;
+	int srwm = 1;
+	int cursor_sr = 16;
+	bool cxsr_enabled;
+
+	/* Calc sr entries for one plane configs */
+	crtc = single_enabled_crtc(dev);
+	if (crtc) {
+		/* self-refresh has much higher latency */
+		static const int sr_latency_ns = 12000;
+		const struct drm_display_mode *adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
+		int clock = adjusted_mode->crtc_clock;
+		int htotal = adjusted_mode->crtc_htotal;
+		int hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
+		int pixel_size = crtc->primary->state->fb->bits_per_pixel / 8;
+		unsigned long line_time_us;
+		int entries;
+
+		line_time_us = max(htotal * 1000 / clock, 1);
+
+		/* Use ns/us then divide to preserve precision */
+		entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
+			pixel_size * hdisplay;
+		entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
+		srwm = I965_FIFO_SIZE - entries;
+		if (srwm < 0)
+			srwm = 1;
+		srwm &= 0x1ff;
+		DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
+			      entries, srwm);
+
+		entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
+			pixel_size * crtc->cursor->state->crtc_w;
+		entries = DIV_ROUND_UP(entries,
+					  i965_cursor_wm_info.cacheline_size);
+		cursor_sr = i965_cursor_wm_info.fifo_size -
+			(entries + i965_cursor_wm_info.guard_size);
+
+		if (cursor_sr > i965_cursor_wm_info.max_wm)
+			cursor_sr = i965_cursor_wm_info.max_wm;
+
+		DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
+			      "cursor %d\n", srwm, cursor_sr);
+
+		cxsr_enabled = true;
+	} else {
+		cxsr_enabled = false;
+		/* Turn off self refresh if both pipes are enabled */
+		intel_set_memory_cxsr(dev_priv, false);
+	}
+
+	DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
+		      srwm);
+
+	/* 965 has limitations... */
+	I915_WRITE(DSPFW1, FW_WM(srwm, SR) |
+		   FW_WM(8, CURSORB) |
+		   FW_WM(8, PLANEB) |
+		   FW_WM(8, PLANEA));
+	I915_WRITE(DSPFW2, FW_WM(8, CURSORA) |
+		   FW_WM(8, PLANEC_OLD));
+	/* update cursor SR watermark */
+	I915_WRITE(DSPFW3, FW_WM(cursor_sr, CURSOR_SR));
+
+	if (cxsr_enabled)
+		intel_set_memory_cxsr(dev_priv, true);
+}
+
+#undef FW_WM
+
+static void i9xx_update_wm(struct drm_crtc *unused_crtc)
+{
+	struct drm_device *dev = unused_crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	const struct intel_watermark_params *wm_info;
+	uint32_t fwater_lo;
+	uint32_t fwater_hi;
+	int cwm, srwm = 1;
+	int fifo_size;
+	int planea_wm, planeb_wm;
+	struct drm_crtc *crtc, *enabled = NULL;
+
+	if (IS_I945GM(dev))
+		wm_info = &i945_wm_info;
+	else if (!IS_GEN2(dev))
+		wm_info = &i915_wm_info;
+	else
+		wm_info = &i830_a_wm_info;
+
+	fifo_size = dev_priv->display.get_fifo_size(dev, 0);
+	crtc = intel_get_crtc_for_plane(dev, 0);
+	if (intel_crtc_active(crtc)) {
+		const struct drm_display_mode *adjusted_mode;
+		int cpp = crtc->primary->state->fb->bits_per_pixel / 8;
+		if (IS_GEN2(dev))
+			cpp = 4;
+
+		adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
+		planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
+					       wm_info, fifo_size, cpp,
+					       pessimal_latency_ns);
+		enabled = crtc;
+	} else {
+		planea_wm = fifo_size - wm_info->guard_size;
+		if (planea_wm > (long)wm_info->max_wm)
+			planea_wm = wm_info->max_wm;
+	}
+
+	if (IS_GEN2(dev))
+		wm_info = &i830_bc_wm_info;
+
+	fifo_size = dev_priv->display.get_fifo_size(dev, 1);
+	crtc = intel_get_crtc_for_plane(dev, 1);
+	if (intel_crtc_active(crtc)) {
+		const struct drm_display_mode *adjusted_mode;
+		int cpp = crtc->primary->state->fb->bits_per_pixel / 8;
+		if (IS_GEN2(dev))
+			cpp = 4;
+
+		adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
+		planeb_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
+					       wm_info, fifo_size, cpp,
+					       pessimal_latency_ns);
+		if (enabled == NULL)
+			enabled = crtc;
+		else
+			enabled = NULL;
+	} else {
+		planeb_wm = fifo_size - wm_info->guard_size;
+		if (planeb_wm > (long)wm_info->max_wm)
+			planeb_wm = wm_info->max_wm;
+	}
+
+	DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
+
+	if (IS_I915GM(dev) && enabled) {
+		struct drm_i915_gem_object *obj;
+
+		obj = intel_fb_obj(enabled->primary->state->fb);
+
+		/* self-refresh seems busted with untiled */
+		if (obj->tiling_mode == I915_TILING_NONE)
+			enabled = NULL;
+	}
+
+	/*
+	 * Overlay gets an aggressive default since video jitter is bad.
+	 */
+	cwm = 2;
+
+	/* Play safe and disable self-refresh before adjusting watermarks. */
+	intel_set_memory_cxsr(dev_priv, false);
+
+	/* Calc sr entries for one plane configs */
+	if (HAS_FW_BLC(dev) && enabled) {
+		/* self-refresh has much higher latency */
+		static const int sr_latency_ns = 6000;
+		const struct drm_display_mode *adjusted_mode = &to_intel_crtc(enabled)->config->base.adjusted_mode;
+		int clock = adjusted_mode->crtc_clock;
+		int htotal = adjusted_mode->crtc_htotal;
+		int hdisplay = to_intel_crtc(enabled)->config->pipe_src_w;
+		int pixel_size = enabled->primary->state->fb->bits_per_pixel / 8;
+		unsigned long line_time_us;
+		int entries;
+
+		line_time_us = max(htotal * 1000 / clock, 1);
+
+		/* Use ns/us then divide to preserve precision */
+		entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
+			pixel_size * hdisplay;
+		entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
+		DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
+		srwm = wm_info->fifo_size - entries;
+		if (srwm < 0)
+			srwm = 1;
+
+		if (IS_I945G(dev) || IS_I945GM(dev))
+			I915_WRITE(FW_BLC_SELF,
+				   FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
+		else if (IS_I915GM(dev))
+			I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
+	}
+
+	DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
+		      planea_wm, planeb_wm, cwm, srwm);
+
+	fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
+	fwater_hi = (cwm & 0x1f);
+
+	/* Set request length to 8 cachelines per fetch */
+	fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
+	fwater_hi = fwater_hi | (1 << 8);
+
+	I915_WRITE(FW_BLC, fwater_lo);
+	I915_WRITE(FW_BLC2, fwater_hi);
+
+	if (enabled)
+		intel_set_memory_cxsr(dev_priv, true);
+}
+
+static void i845_update_wm(struct drm_crtc *unused_crtc)
+{
+	struct drm_device *dev = unused_crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_crtc *crtc;
+	const struct drm_display_mode *adjusted_mode;
+	uint32_t fwater_lo;
+	int planea_wm;
+
+	crtc = single_enabled_crtc(dev);
+	if (crtc == NULL)
+		return;
+
+	adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
+	planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
+				       &i845_wm_info,
+				       dev_priv->display.get_fifo_size(dev, 0),
+				       4, pessimal_latency_ns);
+	fwater_lo = I915_READ(FW_BLC) & ~0xfff;
+	fwater_lo |= (3<<8) | planea_wm;
+
+	DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
+
+	I915_WRITE(FW_BLC, fwater_lo);
+}
+
+uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
+{
+	uint32_t pixel_rate;
+
+	pixel_rate = pipe_config->base.adjusted_mode.crtc_clock;
+
+	/* We only use IF-ID interlacing. If we ever use PF-ID we'll need to
+	 * adjust the pixel_rate here. */
+
+	if (pipe_config->pch_pfit.enabled) {
+		uint64_t pipe_w, pipe_h, pfit_w, pfit_h;
+		uint32_t pfit_size = pipe_config->pch_pfit.size;
+
+		pipe_w = pipe_config->pipe_src_w;
+		pipe_h = pipe_config->pipe_src_h;
+
+		pfit_w = (pfit_size >> 16) & 0xFFFF;
+		pfit_h = pfit_size & 0xFFFF;
+		if (pipe_w < pfit_w)
+			pipe_w = pfit_w;
+		if (pipe_h < pfit_h)
+			pipe_h = pfit_h;
+
+		pixel_rate = div_u64((uint64_t) pixel_rate * pipe_w * pipe_h,
+				     pfit_w * pfit_h);
+	}
+
+	return pixel_rate;
+}
+
+/* latency must be in 0.1us units. */
+static uint32_t ilk_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel,
+			       uint32_t latency)
+{
+	uint64_t ret;
+
+	if (WARN(latency == 0, "Latency value missing\n"))
+		return UINT_MAX;
+
+	ret = (uint64_t) pixel_rate * bytes_per_pixel * latency;
+	ret = DIV_ROUND_UP_ULL(ret, 64 * 10000) + 2;
+
+	return ret;
+}
+
+/* latency must be in 0.1us units. */
+static uint32_t ilk_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
+			       uint32_t horiz_pixels, uint8_t bytes_per_pixel,
+			       uint32_t latency)
+{
+	uint32_t ret;
+
+	if (WARN(latency == 0, "Latency value missing\n"))
+		return UINT_MAX;
+
+	ret = (latency * pixel_rate) / (pipe_htotal * 10000);
+	ret = (ret + 1) * horiz_pixels * bytes_per_pixel;
+	ret = DIV_ROUND_UP(ret, 64) + 2;
+	return ret;
+}
+
+static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels,
+			   uint8_t bytes_per_pixel)
+{
+	return DIV_ROUND_UP(pri_val * 64, horiz_pixels * bytes_per_pixel) + 2;
+}
+
+struct skl_pipe_wm_parameters {
+	bool active;
+	uint32_t pipe_htotal;
+	uint32_t pixel_rate; /* in KHz */
+	struct intel_plane_wm_parameters plane[I915_MAX_PLANES];
+};
+
+struct ilk_wm_maximums {
+	uint16_t pri;
+	uint16_t spr;
+	uint16_t cur;
+	uint16_t fbc;
+};
+
+/* used in computing the new watermarks state */
+struct intel_wm_config {
+	unsigned int num_pipes_active;
+	bool sprites_enabled;
+	bool sprites_scaled;
+};
+
+/*
+ * For both WM_PIPE and WM_LP.
+ * mem_value must be in 0.1us units.
+ */
+static uint32_t ilk_compute_pri_wm(const struct intel_crtc_state *cstate,
+				   const struct intel_plane_state *pstate,
+				   uint32_t mem_value,
+				   bool is_lp)
+{
+	int bpp = pstate->base.fb ? pstate->base.fb->bits_per_pixel / 8 : 0;
+	uint32_t method1, method2;
+
+	if (!cstate->base.active || !pstate->visible)
+		return 0;
+
+	method1 = ilk_wm_method1(ilk_pipe_pixel_rate(cstate), bpp, mem_value);
+
+	if (!is_lp)
+		return method1;
+
+	method2 = ilk_wm_method2(ilk_pipe_pixel_rate(cstate),
+				 cstate->base.adjusted_mode.crtc_htotal,
+				 drm_rect_width(&pstate->dst),
+				 bpp,
+				 mem_value);
+
+	return min(method1, method2);
+}
+
+/*
+ * For both WM_PIPE and WM_LP.
+ * mem_value must be in 0.1us units.
+ */
+static uint32_t ilk_compute_spr_wm(const struct intel_crtc_state *cstate,
+				   const struct intel_plane_state *pstate,
+				   uint32_t mem_value)
+{
+	int bpp = pstate->base.fb ? pstate->base.fb->bits_per_pixel / 8 : 0;
+	uint32_t method1, method2;
+
+	if (!cstate->base.active || !pstate->visible)
+		return 0;
+
+	method1 = ilk_wm_method1(ilk_pipe_pixel_rate(cstate), bpp, mem_value);
+	method2 = ilk_wm_method2(ilk_pipe_pixel_rate(cstate),
+				 cstate->base.adjusted_mode.crtc_htotal,
+				 drm_rect_width(&pstate->dst),
+				 bpp,
+				 mem_value);
+	return min(method1, method2);
+}
+
+/*
+ * For both WM_PIPE and WM_LP.
+ * mem_value must be in 0.1us units.
+ */
+static uint32_t ilk_compute_cur_wm(const struct intel_crtc_state *cstate,
+				   const struct intel_plane_state *pstate,
+				   uint32_t mem_value)
+{
+	/*
+	 * We treat the cursor plane as always-on for the purposes of watermark
+	 * calculation.  Until we have two-stage watermark programming merged,
+	 * this is necessary to avoid flickering.
+	 */
+	int cpp = 4;
+	int width = pstate->visible ? pstate->base.crtc_w : 64;
+
+	if (!cstate->base.active)
+		return 0;
+
+	return ilk_wm_method2(ilk_pipe_pixel_rate(cstate),
+			      cstate->base.adjusted_mode.crtc_htotal,
+			      width, cpp, mem_value);
+}
+
+/* Only for WM_LP. */
+static uint32_t ilk_compute_fbc_wm(const struct intel_crtc_state *cstate,
+				   const struct intel_plane_state *pstate,
+				   uint32_t pri_val)
+{
+	int bpp = pstate->base.fb ? pstate->base.fb->bits_per_pixel / 8 : 0;
+
+	if (!cstate->base.active || !pstate->visible)
+		return 0;
+
+	return ilk_wm_fbc(pri_val, drm_rect_width(&pstate->dst), bpp);
+}
+
+static unsigned int ilk_display_fifo_size(const struct drm_device *dev)
+{
+	if (INTEL_INFO(dev)->gen >= 8)
+		return 3072;
+	else if (INTEL_INFO(dev)->gen >= 7)
+		return 768;
+	else
+		return 512;
+}
+
+static unsigned int ilk_plane_wm_reg_max(const struct drm_device *dev,
+					 int level, bool is_sprite)
+{
+	if (INTEL_INFO(dev)->gen >= 8)
+		/* BDW primary/sprite plane watermarks */
+		return level == 0 ? 255 : 2047;
+	else if (INTEL_INFO(dev)->gen >= 7)
+		/* IVB/HSW primary/sprite plane watermarks */
+		return level == 0 ? 127 : 1023;
+	else if (!is_sprite)
+		/* ILK/SNB primary plane watermarks */
+		return level == 0 ? 127 : 511;
+	else
+		/* ILK/SNB sprite plane watermarks */
+		return level == 0 ? 63 : 255;
+}
+
+static unsigned int ilk_cursor_wm_reg_max(const struct drm_device *dev,
+					  int level)
+{
+	if (INTEL_INFO(dev)->gen >= 7)
+		return level == 0 ? 63 : 255;
+	else
+		return level == 0 ? 31 : 63;
+}
+
+static unsigned int ilk_fbc_wm_reg_max(const struct drm_device *dev)
+{
+	if (INTEL_INFO(dev)->gen >= 8)
+		return 31;
+	else
+		return 15;
+}
+
+/* Calculate the maximum primary/sprite plane watermark */
+static unsigned int ilk_plane_wm_max(const struct drm_device *dev,
+				     int level,
+				     const struct intel_wm_config *config,
+				     enum intel_ddb_partitioning ddb_partitioning,
+				     bool is_sprite)
+{
+	unsigned int fifo_size = ilk_display_fifo_size(dev);
+
+	/* if sprites aren't enabled, sprites get nothing */
+	if (is_sprite && !config->sprites_enabled)
+		return 0;
+
+	/* HSW allows LP1+ watermarks even with multiple pipes */
+	if (level == 0 || config->num_pipes_active > 1) {
+		fifo_size /= INTEL_INFO(dev)->num_pipes;
+
+		/*
+		 * For some reason the non self refresh
+		 * FIFO size is only half of the self
+		 * refresh FIFO size on ILK/SNB.
+		 */
+		if (INTEL_INFO(dev)->gen <= 6)
+			fifo_size /= 2;
+	}
+
+	if (config->sprites_enabled) {
+		/* level 0 is always calculated with 1:1 split */
+		if (level > 0 && ddb_partitioning == INTEL_DDB_PART_5_6) {
+			if (is_sprite)
+				fifo_size *= 5;
+			fifo_size /= 6;
+		} else {
+			fifo_size /= 2;
+		}
+	}
+
+	/* clamp to max that the registers can hold */
+	return min(fifo_size, ilk_plane_wm_reg_max(dev, level, is_sprite));
+}
+
+/* Calculate the maximum cursor plane watermark */
+static unsigned int ilk_cursor_wm_max(const struct drm_device *dev,
+				      int level,
+				      const struct intel_wm_config *config)
+{
+	/* HSW LP1+ watermarks w/ multiple pipes */
+	if (level > 0 && config->num_pipes_active > 1)
+		return 64;
+
+	/* otherwise just report max that registers can hold */
+	return ilk_cursor_wm_reg_max(dev, level);
+}
+
+static void ilk_compute_wm_maximums(const struct drm_device *dev,
+				    int level,
+				    const struct intel_wm_config *config,
+				    enum intel_ddb_partitioning ddb_partitioning,
+				    struct ilk_wm_maximums *max)
+{
+	max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false);
+	max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true);
+	max->cur = ilk_cursor_wm_max(dev, level, config);
+	max->fbc = ilk_fbc_wm_reg_max(dev);
+}
+
+static void ilk_compute_wm_reg_maximums(struct drm_device *dev,
+					int level,
+					struct ilk_wm_maximums *max)
+{
+	max->pri = ilk_plane_wm_reg_max(dev, level, false);
+	max->spr = ilk_plane_wm_reg_max(dev, level, true);
+	max->cur = ilk_cursor_wm_reg_max(dev, level);
+	max->fbc = ilk_fbc_wm_reg_max(dev);
+}
+
+static bool ilk_validate_wm_level(int level,
+				  const struct ilk_wm_maximums *max,
+				  struct intel_wm_level *result)
+{
+	bool ret;
+
+	/* already determined to be invalid? */
+	if (!result->enable)
+		return false;
+
+	result->enable = result->pri_val <= max->pri &&
+			 result->spr_val <= max->spr &&
+			 result->cur_val <= max->cur;
+
+	ret = result->enable;
+
+	/*
+	 * HACK until we can pre-compute everything,
+	 * and thus fail gracefully if LP0 watermarks
+	 * are exceeded...
+	 */
+	if (level == 0 && !result->enable) {
+		if (result->pri_val > max->pri)
+			DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n",
+				      level, result->pri_val, max->pri);
+		if (result->spr_val > max->spr)
+			DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n",
+				      level, result->spr_val, max->spr);
+		if (result->cur_val > max->cur)
+			DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n",
+				      level, result->cur_val, max->cur);
+
+		result->pri_val = min_t(uint32_t, result->pri_val, max->pri);
+		result->spr_val = min_t(uint32_t, result->spr_val, max->spr);
+		result->cur_val = min_t(uint32_t, result->cur_val, max->cur);
+		result->enable = true;
+	}
+
+	return ret;
+}
+
+static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
+				 const struct intel_crtc *intel_crtc,
+				 int level,
+				 struct intel_crtc_state *cstate,
+				 struct intel_wm_level *result)
+{
+	struct intel_plane *intel_plane;
+	uint16_t pri_latency = dev_priv->wm.pri_latency[level];
+	uint16_t spr_latency = dev_priv->wm.spr_latency[level];
+	uint16_t cur_latency = dev_priv->wm.cur_latency[level];
+
+	/* WM1+ latency values stored in 0.5us units */
+	if (level > 0) {
+		pri_latency *= 5;
+		spr_latency *= 5;
+		cur_latency *= 5;
+	}
+
+	for_each_intel_plane_on_crtc(dev_priv->dev, intel_crtc, intel_plane) {
+		struct intel_plane_state *pstate =
+			to_intel_plane_state(intel_plane->base.state);
+
+		switch (intel_plane->base.type) {
+		case DRM_PLANE_TYPE_PRIMARY:
+			result->pri_val = ilk_compute_pri_wm(cstate, pstate,
+							     pri_latency,
+							     level);
+			result->fbc_val = ilk_compute_fbc_wm(cstate, pstate,
+							     result->pri_val);
+			break;
+		case DRM_PLANE_TYPE_OVERLAY:
+			result->spr_val = ilk_compute_spr_wm(cstate, pstate,
+							     spr_latency);
+			break;
+		case DRM_PLANE_TYPE_CURSOR:
+			result->cur_val = ilk_compute_cur_wm(cstate, pstate,
+							     cur_latency);
+			break;
+		}
+	}
+
+	result->enable = true;
+}
+
+static uint32_t
+hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
+	u32 linetime, ips_linetime;
+
+	if (!intel_crtc->active)
+		return 0;
+
+	/* The WM are computed with base on how long it takes to fill a single
+	 * row at the given clock rate, multiplied by 8.
+	 * */
+	linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
+				     adjusted_mode->crtc_clock);
+	ips_linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
+					 dev_priv->cdclk_freq);
+
+	return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) |
+	       PIPE_WM_LINETIME_TIME(linetime);
+}
+
+static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[8])
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (IS_GEN9(dev)) {
+		uint32_t val;
+		int ret, i;
+		int level, max_level = ilk_wm_max_level(dev);
+
+		/* read the first set of memory latencies[0:3] */
+		val = 0; /* data0 to be programmed to 0 for first set */
+		mutex_lock(&dev_priv->rps.hw_lock);
+		ret = sandybridge_pcode_read(dev_priv,
+					     GEN9_PCODE_READ_MEM_LATENCY,
+					     &val);
+		mutex_unlock(&dev_priv->rps.hw_lock);
+
+		if (ret) {
+			DRM_ERROR("SKL Mailbox read error = %d\n", ret);
+			return;
+		}
+
+		wm[0] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
+		wm[1] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
+				GEN9_MEM_LATENCY_LEVEL_MASK;
+		wm[2] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
+				GEN9_MEM_LATENCY_LEVEL_MASK;
+		wm[3] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
+				GEN9_MEM_LATENCY_LEVEL_MASK;
+
+		/* read the second set of memory latencies[4:7] */
+		val = 1; /* data0 to be programmed to 1 for second set */
+		mutex_lock(&dev_priv->rps.hw_lock);
+		ret = sandybridge_pcode_read(dev_priv,
+					     GEN9_PCODE_READ_MEM_LATENCY,
+					     &val);
+		mutex_unlock(&dev_priv->rps.hw_lock);
+		if (ret) {
+			DRM_ERROR("SKL Mailbox read error = %d\n", ret);
+			return;
+		}
+
+		wm[4] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
+		wm[5] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
+				GEN9_MEM_LATENCY_LEVEL_MASK;
+		wm[6] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
+				GEN9_MEM_LATENCY_LEVEL_MASK;
+		wm[7] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
+				GEN9_MEM_LATENCY_LEVEL_MASK;
+
+		/*
+		 * If a level n (n > 1) has a 0us latency, all levels m (m >= n)
+		 * need to be disabled. We make sure to sanitize the values out
+		 * of the punit to satisfy this requirement.
+		 */
+		for (level = 1; level <= max_level; level++) {
+			if (wm[level] == 0) {
+				for (i = level + 1; i <= max_level; i++)
+					wm[i] = 0;
+				break;
+			}
+		}
+
+		/*
+		 * WaWmMemoryReadLatency:skl
+		 *
+		 * punit doesn't take into account the read latency so we need
+		 * to add 2us to the various latency levels we retrieve from the
+		 * punit when level 0 response data us 0us.
+		 */
+		if (wm[0] == 0) {
+			wm[0] += 2;
+			for (level = 1; level <= max_level; level++) {
+				if (wm[level] == 0)
+					break;
+				wm[level] += 2;
+			}
+		}
+
+	} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+		uint64_t sskpd = I915_READ64(MCH_SSKPD);
+
+		wm[0] = (sskpd >> 56) & 0xFF;
+		if (wm[0] == 0)
+			wm[0] = sskpd & 0xF;
+		wm[1] = (sskpd >> 4) & 0xFF;
+		wm[2] = (sskpd >> 12) & 0xFF;
+		wm[3] = (sskpd >> 20) & 0x1FF;
+		wm[4] = (sskpd >> 32) & 0x1FF;
+	} else if (INTEL_INFO(dev)->gen >= 6) {
+		uint32_t sskpd = I915_READ(MCH_SSKPD);
+
+		wm[0] = (sskpd >> SSKPD_WM0_SHIFT) & SSKPD_WM_MASK;
+		wm[1] = (sskpd >> SSKPD_WM1_SHIFT) & SSKPD_WM_MASK;
+		wm[2] = (sskpd >> SSKPD_WM2_SHIFT) & SSKPD_WM_MASK;
+		wm[3] = (sskpd >> SSKPD_WM3_SHIFT) & SSKPD_WM_MASK;
+	} else if (INTEL_INFO(dev)->gen >= 5) {
+		uint32_t mltr = I915_READ(MLTR_ILK);
+
+		/* ILK primary LP0 latency is 700 ns */
+		wm[0] = 7;
+		wm[1] = (mltr >> MLTR_WM1_SHIFT) & ILK_SRLT_MASK;
+		wm[2] = (mltr >> MLTR_WM2_SHIFT) & ILK_SRLT_MASK;
+	}
+}
+
+static void intel_fixup_spr_wm_latency(struct drm_device *dev, uint16_t wm[5])
+{
+	/* ILK sprite LP0 latency is 1300 ns */
+	if (INTEL_INFO(dev)->gen == 5)
+		wm[0] = 13;
+}
+
+static void intel_fixup_cur_wm_latency(struct drm_device *dev, uint16_t wm[5])
+{
+	/* ILK cursor LP0 latency is 1300 ns */
+	if (INTEL_INFO(dev)->gen == 5)
+		wm[0] = 13;
+
+	/* WaDoubleCursorLP3Latency:ivb */
+	if (IS_IVYBRIDGE(dev))
+		wm[3] *= 2;
+}
+
+int ilk_wm_max_level(const struct drm_device *dev)
+{
+	/* how many WM levels are we expecting */
+	if (INTEL_INFO(dev)->gen >= 9)
+		return 7;
+	else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+		return 4;
+	else if (INTEL_INFO(dev)->gen >= 6)
+		return 3;
+	else
+		return 2;
+}
+
+static void intel_print_wm_latency(struct drm_device *dev,
+				   const char *name,
+				   const uint16_t wm[8])
+{
+	int level, max_level = ilk_wm_max_level(dev);
+
+	for (level = 0; level <= max_level; level++) {
+		unsigned int latency = wm[level];
+
+		if (latency == 0) {
+			DRM_ERROR("%s WM%d latency not provided\n",
+				  name, level);
+			continue;
+		}
+
+		/*
+		 * - latencies are in us on gen9.
+		 * - before then, WM1+ latency values are in 0.5us units
+		 */
+		if (IS_GEN9(dev))
+			latency *= 10;
+		else if (level > 0)
+			latency *= 5;
+
+		DRM_DEBUG_KMS("%s WM%d latency %u (%u.%u usec)\n",
+			      name, level, wm[level],
+			      latency / 10, latency % 10);
+	}
+}
+
+static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
+				    uint16_t wm[5], uint16_t min)
+{
+	int level, max_level = ilk_wm_max_level(dev_priv->dev);
+
+	if (wm[0] >= min)
+		return false;
+
+	wm[0] = max(wm[0], min);
+	for (level = 1; level <= max_level; level++)
+		wm[level] = max_t(uint16_t, wm[level], DIV_ROUND_UP(min, 5));
+
+	return true;
+}
+
+static void snb_wm_latency_quirk(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	bool changed;
+
+	/*
+	 * The BIOS provided WM memory latency values are often
+	 * inadequate for high resolution displays. Adjust them.
+	 */
+	changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12) |
+		ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12) |
+		ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12);
+
+	if (!changed)
+		return;
+
+	DRM_DEBUG_KMS("WM latency values increased to avoid potential underruns\n");
+	intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
+	intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
+	intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
+}
+
+static void ilk_setup_wm_latency(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	intel_read_wm_latency(dev, dev_priv->wm.pri_latency);
+
+	memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency,
+	       sizeof(dev_priv->wm.pri_latency));
+	memcpy(dev_priv->wm.cur_latency, dev_priv->wm.pri_latency,
+	       sizeof(dev_priv->wm.pri_latency));
+
+	intel_fixup_spr_wm_latency(dev, dev_priv->wm.spr_latency);
+	intel_fixup_cur_wm_latency(dev, dev_priv->wm.cur_latency);
+
+	intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
+	intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
+	intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
+
+	if (IS_GEN6(dev))
+		snb_wm_latency_quirk(dev);
+}
+
+static void skl_setup_wm_latency(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	intel_read_wm_latency(dev, dev_priv->wm.skl_latency);
+	intel_print_wm_latency(dev, "Gen9 Plane", dev_priv->wm.skl_latency);
+}
+
+static void ilk_compute_wm_config(struct drm_device *dev,
+				  struct intel_wm_config *config)
+{
+	struct intel_crtc *intel_crtc;
+
+	/* Compute the currently _active_ config */
+	for_each_intel_crtc(dev, intel_crtc) {
+		const struct intel_pipe_wm *wm = &intel_crtc->wm.active;
+
+		if (!wm->pipe_enabled)
+			continue;
+
+		config->sprites_enabled |= wm->sprites_enabled;
+		config->sprites_scaled |= wm->sprites_scaled;
+		config->num_pipes_active++;
+	}
+}
+
+/* Compute new watermarks for the pipe */
+static bool intel_compute_pipe_wm(struct intel_crtc_state *cstate,
+				  struct intel_pipe_wm *pipe_wm)
+{
+	struct drm_crtc *crtc = cstate->base.crtc;
+	struct drm_device *dev = crtc->dev;
+	const struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct intel_plane *intel_plane;
+	struct intel_plane_state *sprstate = NULL;
+	int level, max_level = ilk_wm_max_level(dev);
+	/* LP0 watermark maximums depend on this pipe alone */
+	struct intel_wm_config config = {
+		.num_pipes_active = 1,
+	};
+	struct ilk_wm_maximums max;
+
+	for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
+		if (intel_plane->base.type == DRM_PLANE_TYPE_OVERLAY) {
+			sprstate = to_intel_plane_state(intel_plane->base.state);
+			break;
+		}
+	}
+
+	config.sprites_enabled = sprstate->visible;
+	config.sprites_scaled = sprstate->visible &&
+		(drm_rect_width(&sprstate->dst) != drm_rect_width(&sprstate->src) >> 16 ||
+		drm_rect_height(&sprstate->dst) != drm_rect_height(&sprstate->src) >> 16);
+
+	pipe_wm->pipe_enabled = cstate->base.active;
+	pipe_wm->sprites_enabled = sprstate->visible;
+	pipe_wm->sprites_scaled = config.sprites_scaled;
+
+	/* ILK/SNB: LP2+ watermarks only w/o sprites */
+	if (INTEL_INFO(dev)->gen <= 6 && sprstate->visible)
+		max_level = 1;
+
+	/* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
+	if (config.sprites_scaled)
+		max_level = 0;
+
+	ilk_compute_wm_level(dev_priv, intel_crtc, 0, cstate, &pipe_wm->wm[0]);
+
+	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+		pipe_wm->linetime = hsw_compute_linetime_wm(dev, crtc);
+
+	/* LP0 watermarks always use 1/2 DDB partitioning */
+	ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max);
+
+	/* At least LP0 must be valid */
+	if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0]))
+		return false;
+
+	ilk_compute_wm_reg_maximums(dev, 1, &max);
+
+	for (level = 1; level <= max_level; level++) {
+		struct intel_wm_level wm = {};
+
+		ilk_compute_wm_level(dev_priv, intel_crtc, level, cstate, &wm);
+
+		/*
+		 * Disable any watermark level that exceeds the
+		 * register maximums since such watermarks are
+		 * always invalid.
+		 */
+		if (!ilk_validate_wm_level(level, &max, &wm))
+			break;
+
+		pipe_wm->wm[level] = wm;
+	}
+
+	return true;
+}
+
+/*
+ * Merge the watermarks from all active pipes for a specific level.
+ */
+static void ilk_merge_wm_level(struct drm_device *dev,
+			       int level,
+			       struct intel_wm_level *ret_wm)
+{
+	const struct intel_crtc *intel_crtc;
+
+	ret_wm->enable = true;
+
+	for_each_intel_crtc(dev, intel_crtc) {
+		const struct intel_pipe_wm *active = &intel_crtc->wm.active;
+		const struct intel_wm_level *wm = &active->wm[level];
+
+		if (!active->pipe_enabled)
+			continue;
+
+		/*
+		 * The watermark values may have been used in the past,
+		 * so we must maintain them in the registers for some
+		 * time even if the level is now disabled.
+		 */
+		if (!wm->enable)
+			ret_wm->enable = false;
+
+		ret_wm->pri_val = max(ret_wm->pri_val, wm->pri_val);
+		ret_wm->spr_val = max(ret_wm->spr_val, wm->spr_val);
+		ret_wm->cur_val = max(ret_wm->cur_val, wm->cur_val);
+		ret_wm->fbc_val = max(ret_wm->fbc_val, wm->fbc_val);
+	}
+}
+
+/*
+ * Merge all low power watermarks for all active pipes.
+ */
+static void ilk_wm_merge(struct drm_device *dev,
+			 const struct intel_wm_config *config,
+			 const struct ilk_wm_maximums *max,
+			 struct intel_pipe_wm *merged)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int level, max_level = ilk_wm_max_level(dev);
+	int last_enabled_level = max_level;
+
+	/* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
+	if ((INTEL_INFO(dev)->gen <= 6 || IS_IVYBRIDGE(dev)) &&
+	    config->num_pipes_active > 1)
+		return;
+
+	/* ILK: FBC WM must be disabled always */
+	merged->fbc_wm_enabled = INTEL_INFO(dev)->gen >= 6;
+
+	/* merge each WM1+ level */
+	for (level = 1; level <= max_level; level++) {
+		struct intel_wm_level *wm = &merged->wm[level];
+
+		ilk_merge_wm_level(dev, level, wm);
+
+		if (level > last_enabled_level)
+			wm->enable = false;
+		else if (!ilk_validate_wm_level(level, max, wm))
+			/* make sure all following levels get disabled */
+			last_enabled_level = level - 1;
+
+		/*
+		 * The spec says it is preferred to disable
+		 * FBC WMs instead of disabling a WM level.
+		 */
+		if (wm->fbc_val > max->fbc) {
+			if (wm->enable)
+				merged->fbc_wm_enabled = false;
+			wm->fbc_val = 0;
+		}
+	}
+
+	/* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */
+	/*
+	 * FIXME this is racy. FBC might get enabled later.
+	 * What we should check here is whether FBC can be
+	 * enabled sometime later.
+	 */
+	if (IS_GEN5(dev) && !merged->fbc_wm_enabled &&
+	    intel_fbc_enabled(dev_priv)) {
+		for (level = 2; level <= max_level; level++) {
+			struct intel_wm_level *wm = &merged->wm[level];
+
+			wm->enable = false;
+		}
+	}
+}
+
+static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
+{
+	/* LP1,LP2,LP3 levels are either 1,2,3 or 1,3,4 */
+	return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable);
+}
+
+/* The value we need to program into the WM_LPx latency field */
+static unsigned int ilk_wm_lp_latency(struct drm_device *dev, int level)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+		return 2 * level;
+	else
+		return dev_priv->wm.pri_latency[level];
+}
+
+static void ilk_compute_wm_results(struct drm_device *dev,
+				   const struct intel_pipe_wm *merged,
+				   enum intel_ddb_partitioning partitioning,
+				   struct ilk_wm_values *results)
+{
+	struct intel_crtc *intel_crtc;
+	int level, wm_lp;
+
+	results->enable_fbc_wm = merged->fbc_wm_enabled;
+	results->partitioning = partitioning;
+
+	/* LP1+ register values */
+	for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
+		const struct intel_wm_level *r;
+
+		level = ilk_wm_lp_to_level(wm_lp, merged);
+
+		r = &merged->wm[level];
+
+		/*
+		 * Maintain the watermark values even if the level is
+		 * disabled. Doing otherwise could cause underruns.
+		 */
+		results->wm_lp[wm_lp - 1] =
+			(ilk_wm_lp_latency(dev, level) << WM1_LP_LATENCY_SHIFT) |
+			(r->pri_val << WM1_LP_SR_SHIFT) |
+			r->cur_val;
+
+		if (r->enable)
+			results->wm_lp[wm_lp - 1] |= WM1_LP_SR_EN;
+
+		if (INTEL_INFO(dev)->gen >= 8)
+			results->wm_lp[wm_lp - 1] |=
+				r->fbc_val << WM1_LP_FBC_SHIFT_BDW;
+		else
+			results->wm_lp[wm_lp - 1] |=
+				r->fbc_val << WM1_LP_FBC_SHIFT;
+
+		/*
+		 * Always set WM1S_LP_EN when spr_val != 0, even if the
+		 * level is disabled. Doing otherwise could cause underruns.
+		 */
+		if (INTEL_INFO(dev)->gen <= 6 && r->spr_val) {
+			WARN_ON(wm_lp != 1);
+			results->wm_lp_spr[wm_lp - 1] = WM1S_LP_EN | r->spr_val;
+		} else
+			results->wm_lp_spr[wm_lp - 1] = r->spr_val;
+	}
+
+	/* LP0 register values */
+	for_each_intel_crtc(dev, intel_crtc) {
+		enum pipe pipe = intel_crtc->pipe;
+		const struct intel_wm_level *r =
+			&intel_crtc->wm.active.wm[0];
+
+		if (WARN_ON(!r->enable))
+			continue;
+
+		results->wm_linetime[pipe] = intel_crtc->wm.active.linetime;
+
+		results->wm_pipe[pipe] =
+			(r->pri_val << WM0_PIPE_PLANE_SHIFT) |
+			(r->spr_val << WM0_PIPE_SPRITE_SHIFT) |
+			r->cur_val;
+	}
+}
+
+/* Find the result with the highest level enabled. Check for enable_fbc_wm in
+ * case both are at the same level. Prefer r1 in case they're the same. */
+static struct intel_pipe_wm *ilk_find_best_result(struct drm_device *dev,
+						  struct intel_pipe_wm *r1,
+						  struct intel_pipe_wm *r2)
+{
+	int level, max_level = ilk_wm_max_level(dev);
+	int level1 = 0, level2 = 0;
+
+	for (level = 1; level <= max_level; level++) {
+		if (r1->wm[level].enable)
+			level1 = level;
+		if (r2->wm[level].enable)
+			level2 = level;
+	}
+
+	if (level1 == level2) {
+		if (r2->fbc_wm_enabled && !r1->fbc_wm_enabled)
+			return r2;
+		else
+			return r1;
+	} else if (level1 > level2) {
+		return r1;
+	} else {
+		return r2;
+	}
+}
+
+/* dirty bits used to track which watermarks need changes */
+#define WM_DIRTY_PIPE(pipe) (1 << (pipe))
+#define WM_DIRTY_LINETIME(pipe) (1 << (8 + (pipe)))
+#define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp)))
+#define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3))
+#define WM_DIRTY_FBC (1 << 24)
+#define WM_DIRTY_DDB (1 << 25)
+
+static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv,
+					 const struct ilk_wm_values *old,
+					 const struct ilk_wm_values *new)
+{
+	unsigned int dirty = 0;
+	enum pipe pipe;
+	int wm_lp;
+
+	for_each_pipe(dev_priv, pipe) {
+		if (old->wm_linetime[pipe] != new->wm_linetime[pipe]) {
+			dirty |= WM_DIRTY_LINETIME(pipe);
+			/* Must disable LP1+ watermarks too */
+			dirty |= WM_DIRTY_LP_ALL;
+		}
+
+		if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) {
+			dirty |= WM_DIRTY_PIPE(pipe);
+			/* Must disable LP1+ watermarks too */
+			dirty |= WM_DIRTY_LP_ALL;
+		}
+	}
+
+	if (old->enable_fbc_wm != new->enable_fbc_wm) {
+		dirty |= WM_DIRTY_FBC;
+		/* Must disable LP1+ watermarks too */
+		dirty |= WM_DIRTY_LP_ALL;
+	}
+
+	if (old->partitioning != new->partitioning) {
+		dirty |= WM_DIRTY_DDB;
+		/* Must disable LP1+ watermarks too */
+		dirty |= WM_DIRTY_LP_ALL;
+	}
+
+	/* LP1+ watermarks already deemed dirty, no need to continue */
+	if (dirty & WM_DIRTY_LP_ALL)
+		return dirty;
+
+	/* Find the lowest numbered LP1+ watermark in need of an update... */
+	for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
+		if (old->wm_lp[wm_lp - 1] != new->wm_lp[wm_lp - 1] ||
+		    old->wm_lp_spr[wm_lp - 1] != new->wm_lp_spr[wm_lp - 1])
+			break;
+	}
+
+	/* ...and mark it and all higher numbered LP1+ watermarks as dirty */
+	for (; wm_lp <= 3; wm_lp++)
+		dirty |= WM_DIRTY_LP(wm_lp);
+
+	return dirty;
+}
+
+static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
+			       unsigned int dirty)
+{
+	struct ilk_wm_values *previous = &dev_priv->wm.hw;
+	bool changed = false;
+
+	if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM1_LP_SR_EN) {
+		previous->wm_lp[2] &= ~WM1_LP_SR_EN;
+		I915_WRITE(WM3_LP_ILK, previous->wm_lp[2]);
+		changed = true;
+	}
+	if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM1_LP_SR_EN) {
+		previous->wm_lp[1] &= ~WM1_LP_SR_EN;
+		I915_WRITE(WM2_LP_ILK, previous->wm_lp[1]);
+		changed = true;
+	}
+	if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM1_LP_SR_EN) {
+		previous->wm_lp[0] &= ~WM1_LP_SR_EN;
+		I915_WRITE(WM1_LP_ILK, previous->wm_lp[0]);
+		changed = true;
+	}
+
+	/*
+	 * Don't touch WM1S_LP_EN here.
+	 * Doing so could cause underruns.
+	 */
+
+	return changed;
+}
+
+/*
+ * The spec says we shouldn't write when we don't need, because every write
+ * causes WMs to be re-evaluated, expending some power.
+ */
+static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
+				struct ilk_wm_values *results)
+{
+	struct drm_device *dev = dev_priv->dev;
+	struct ilk_wm_values *previous = &dev_priv->wm.hw;
+	unsigned int dirty;
+	uint32_t val;
+
+	dirty = ilk_compute_wm_dirty(dev_priv, previous, results);
+	if (!dirty)
+		return;
+
+	_ilk_disable_lp_wm(dev_priv, dirty);
+
+	if (dirty & WM_DIRTY_PIPE(PIPE_A))
+		I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]);
+	if (dirty & WM_DIRTY_PIPE(PIPE_B))
+		I915_WRITE(WM0_PIPEB_ILK, results->wm_pipe[1]);
+	if (dirty & WM_DIRTY_PIPE(PIPE_C))
+		I915_WRITE(WM0_PIPEC_IVB, results->wm_pipe[2]);
+
+	if (dirty & WM_DIRTY_LINETIME(PIPE_A))
+		I915_WRITE(PIPE_WM_LINETIME(PIPE_A), results->wm_linetime[0]);
+	if (dirty & WM_DIRTY_LINETIME(PIPE_B))
+		I915_WRITE(PIPE_WM_LINETIME(PIPE_B), results->wm_linetime[1]);
+	if (dirty & WM_DIRTY_LINETIME(PIPE_C))
+		I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]);
+
+	if (dirty & WM_DIRTY_DDB) {
+		if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+			val = I915_READ(WM_MISC);
+			if (results->partitioning == INTEL_DDB_PART_1_2)
+				val &= ~WM_MISC_DATA_PARTITION_5_6;
+			else
+				val |= WM_MISC_DATA_PARTITION_5_6;
+			I915_WRITE(WM_MISC, val);
+		} else {
+			val = I915_READ(DISP_ARB_CTL2);
+			if (results->partitioning == INTEL_DDB_PART_1_2)
+				val &= ~DISP_DATA_PARTITION_5_6;
+			else
+				val |= DISP_DATA_PARTITION_5_6;
+			I915_WRITE(DISP_ARB_CTL2, val);
+		}
+	}
+
+	if (dirty & WM_DIRTY_FBC) {
+		val = I915_READ(DISP_ARB_CTL);
+		if (results->enable_fbc_wm)
+			val &= ~DISP_FBC_WM_DIS;
+		else
+			val |= DISP_FBC_WM_DIS;
+		I915_WRITE(DISP_ARB_CTL, val);
+	}
+
+	if (dirty & WM_DIRTY_LP(1) &&
+	    previous->wm_lp_spr[0] != results->wm_lp_spr[0])
+		I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]);
+
+	if (INTEL_INFO(dev)->gen >= 7) {
+		if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
+			I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]);
+		if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
+			I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]);
+	}
+
+	if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0])
+		I915_WRITE(WM1_LP_ILK, results->wm_lp[0]);
+	if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1])
+		I915_WRITE(WM2_LP_ILK, results->wm_lp[1]);
+	if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2])
+		I915_WRITE(WM3_LP_ILK, results->wm_lp[2]);
+
+	dev_priv->wm.hw = *results;
+}
+
+static bool ilk_disable_lp_wm(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
+}
+
+/*
+ * On gen9, we need to allocate Display Data Buffer (DDB) portions to the
+ * different active planes.
+ */
+
+#define SKL_DDB_SIZE		896	/* in blocks */
+#define BXT_DDB_SIZE		512
+
+static void
+skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
+				   struct drm_crtc *for_crtc,
+				   const struct intel_wm_config *config,
+				   const struct skl_pipe_wm_parameters *params,
+				   struct skl_ddb_entry *alloc /* out */)
+{
+	struct drm_crtc *crtc;
+	unsigned int pipe_size, ddb_size;
+	int nth_active_pipe;
+
+	if (!params->active) {
+		alloc->start = 0;
+		alloc->end = 0;
+		return;
+	}
+
+	if (IS_BROXTON(dev))
+		ddb_size = BXT_DDB_SIZE;
+	else
+		ddb_size = SKL_DDB_SIZE;
+
+	ddb_size -= 4; /* 4 blocks for bypass path allocation */
+
+	nth_active_pipe = 0;
+	for_each_crtc(dev, crtc) {
+		if (!to_intel_crtc(crtc)->active)
+			continue;
+
+		if (crtc == for_crtc)
+			break;
+
+		nth_active_pipe++;
+	}
+
+	pipe_size = ddb_size / config->num_pipes_active;
+	alloc->start = nth_active_pipe * ddb_size / config->num_pipes_active;
+	alloc->end = alloc->start + pipe_size;
+}
+
+static unsigned int skl_cursor_allocation(const struct intel_wm_config *config)
+{
+	if (config->num_pipes_active == 1)
+		return 32;
+
+	return 8;
+}
+
+static void skl_ddb_entry_init_from_hw(struct skl_ddb_entry *entry, u32 reg)
+{
+	entry->start = reg & 0x3ff;
+	entry->end = (reg >> 16) & 0x3ff;
+	if (entry->end)
+		entry->end += 1;
+}
+
+void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
+			  struct skl_ddb_allocation *ddb /* out */)
+{
+	enum pipe pipe;
+	int plane;
+	u32 val;
+
+	memset(ddb, 0, sizeof(*ddb));
+
+	for_each_pipe(dev_priv, pipe) {
+		if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PIPE(pipe)))
+			continue;
+
+		for_each_plane(dev_priv, pipe, plane) {
+			val = I915_READ(PLANE_BUF_CFG(pipe, plane));
+			skl_ddb_entry_init_from_hw(&ddb->plane[pipe][plane],
+						   val);
+		}
+
+		val = I915_READ(CUR_BUF_CFG(pipe));
+		skl_ddb_entry_init_from_hw(&ddb->plane[pipe][PLANE_CURSOR],
+					   val);
+	}
+}
+
+static unsigned int
+skl_plane_relative_data_rate(const struct intel_plane_wm_parameters *p, int y)
+{
+
+	/* for planar format */
+	if (p->y_bytes_per_pixel) {
+		if (y)  /* y-plane data rate */
+			return p->horiz_pixels * p->vert_pixels * p->y_bytes_per_pixel;
+		else    /* uv-plane data rate */
+			return (p->horiz_pixels/2) * (p->vert_pixels/2) * p->bytes_per_pixel;
+	}
+
+	/* for packed formats */
+	return p->horiz_pixels * p->vert_pixels * p->bytes_per_pixel;
+}
+
+/*
+ * We don't overflow 32 bits. Worst case is 3 planes enabled, each fetching
+ * a 8192x4096@32bpp framebuffer:
+ *   3 * 4096 * 8192  * 4 < 2^32
+ */
+static unsigned int
+skl_get_total_relative_data_rate(struct intel_crtc *intel_crtc,
+				 const struct skl_pipe_wm_parameters *params)
+{
+	unsigned int total_data_rate = 0;
+	int plane;
+
+	for (plane = 0; plane < intel_num_planes(intel_crtc); plane++) {
+		const struct intel_plane_wm_parameters *p;
+
+		p = &params->plane[plane];
+		if (!p->enabled)
+			continue;
+
+		total_data_rate += skl_plane_relative_data_rate(p, 0); /* packed/uv */
+		if (p->y_bytes_per_pixel) {
+			total_data_rate += skl_plane_relative_data_rate(p, 1); /* y-plane */
+		}
+	}
+
+	return total_data_rate;
+}
+
+static void
+skl_allocate_pipe_ddb(struct drm_crtc *crtc,
+		      const struct intel_wm_config *config,
+		      const struct skl_pipe_wm_parameters *params,
+		      struct skl_ddb_allocation *ddb /* out */)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	enum pipe pipe = intel_crtc->pipe;
+	struct skl_ddb_entry *alloc = &ddb->pipe[pipe];
+	uint16_t alloc_size, start, cursor_blocks;
+	uint16_t minimum[I915_MAX_PLANES];
+	uint16_t y_minimum[I915_MAX_PLANES];
+	unsigned int total_data_rate;
+	int plane;
+
+	skl_ddb_get_pipe_allocation_limits(dev, crtc, config, params, alloc);
+	alloc_size = skl_ddb_entry_size(alloc);
+	if (alloc_size == 0) {
+		memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
+		memset(&ddb->plane[pipe][PLANE_CURSOR], 0,
+		       sizeof(ddb->plane[pipe][PLANE_CURSOR]));
+		return;
+	}
+
+	cursor_blocks = skl_cursor_allocation(config);
+	ddb->plane[pipe][PLANE_CURSOR].start = alloc->end - cursor_blocks;
+	ddb->plane[pipe][PLANE_CURSOR].end = alloc->end;
+
+	alloc_size -= cursor_blocks;
+	alloc->end -= cursor_blocks;
+
+	/* 1. Allocate the mininum required blocks for each active plane */
+	for_each_plane(dev_priv, pipe, plane) {
+		const struct intel_plane_wm_parameters *p;
+
+		p = &params->plane[plane];
+		if (!p->enabled)
+			continue;
+
+		minimum[plane] = 8;
+		alloc_size -= minimum[plane];
+		y_minimum[plane] = p->y_bytes_per_pixel ? 8 : 0;
+		alloc_size -= y_minimum[plane];
+	}
+
+	/*
+	 * 2. Distribute the remaining space in proportion to the amount of
+	 * data each plane needs to fetch from memory.
+	 *
+	 * FIXME: we may not allocate every single block here.
+	 */
+	total_data_rate = skl_get_total_relative_data_rate(intel_crtc, params);
+
+	start = alloc->start;
+	for (plane = 0; plane < intel_num_planes(intel_crtc); plane++) {
+		const struct intel_plane_wm_parameters *p;
+		unsigned int data_rate, y_data_rate;
+		uint16_t plane_blocks, y_plane_blocks = 0;
+
+		p = &params->plane[plane];
+		if (!p->enabled)
+			continue;
+
+		data_rate = skl_plane_relative_data_rate(p, 0);
+
+		/*
+		 * allocation for (packed formats) or (uv-plane part of planar format):
+		 * promote the expression to 64 bits to avoid overflowing, the
+		 * result is < available as data_rate / total_data_rate < 1
+		 */
+		plane_blocks = minimum[plane];
+		plane_blocks += div_u64((uint64_t)alloc_size * data_rate,
+					total_data_rate);
+
+		ddb->plane[pipe][plane].start = start;
+		ddb->plane[pipe][plane].end = start + plane_blocks;
+
+		start += plane_blocks;
+
+		/*
+		 * allocation for y_plane part of planar format:
+		 */
+		if (p->y_bytes_per_pixel) {
+			y_data_rate = skl_plane_relative_data_rate(p, 1);
+			y_plane_blocks = y_minimum[plane];
+			y_plane_blocks += div_u64((uint64_t)alloc_size * y_data_rate,
+						total_data_rate);
+
+			ddb->y_plane[pipe][plane].start = start;
+			ddb->y_plane[pipe][plane].end = start + y_plane_blocks;
+
+			start += y_plane_blocks;
+		}
+
+	}
+
+}
+
+static uint32_t skl_pipe_pixel_rate(const struct intel_crtc_state *config)
+{
+	/* TODO: Take into account the scalers once we support them */
+	return config->base.adjusted_mode.crtc_clock;
+}
+
+/*
+ * The max latency should be 257 (max the punit can code is 255 and we add 2us
+ * for the read latency) and bytes_per_pixel should always be <= 8, so that
+ * should allow pixel_rate up to ~2 GHz which seems sufficient since max
+ * 2xcdclk is 1350 MHz and the pixel rate should never exceed that.
+*/
+static uint32_t skl_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel,
+			       uint32_t latency)
+{
+	uint32_t wm_intermediate_val, ret;
+
+	if (latency == 0)
+		return UINT_MAX;
+
+	wm_intermediate_val = latency * pixel_rate * bytes_per_pixel / 512;
+	ret = DIV_ROUND_UP(wm_intermediate_val, 1000);
+
+	return ret;
+}
+
+static uint32_t skl_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
+			       uint32_t horiz_pixels, uint8_t bytes_per_pixel,
+			       uint64_t tiling, uint32_t latency)
+{
+	uint32_t ret;
+	uint32_t plane_bytes_per_line, plane_blocks_per_line;
+	uint32_t wm_intermediate_val;
+
+	if (latency == 0)
+		return UINT_MAX;
+
+	plane_bytes_per_line = horiz_pixels * bytes_per_pixel;
+
+	if (tiling == I915_FORMAT_MOD_Y_TILED ||
+	    tiling == I915_FORMAT_MOD_Yf_TILED) {
+		plane_bytes_per_line *= 4;
+		plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
+		plane_blocks_per_line /= 4;
+	} else {
+		plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
+	}
+
+	wm_intermediate_val = latency * pixel_rate;
+	ret = DIV_ROUND_UP(wm_intermediate_val, pipe_htotal * 1000) *
+				plane_blocks_per_line;
+
+	return ret;
+}
+
+static bool skl_ddb_allocation_changed(const struct skl_ddb_allocation *new_ddb,
+				       const struct intel_crtc *intel_crtc)
+{
+	struct drm_device *dev = intel_crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	const struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb;
+	enum pipe pipe = intel_crtc->pipe;
+
+	if (memcmp(new_ddb->plane[pipe], cur_ddb->plane[pipe],
+		   sizeof(new_ddb->plane[pipe])))
+		return true;
+
+	if (memcmp(&new_ddb->plane[pipe][PLANE_CURSOR], &cur_ddb->plane[pipe][PLANE_CURSOR],
+		    sizeof(new_ddb->plane[pipe][PLANE_CURSOR])))
+		return true;
+
+	return false;
+}
+
+static void skl_compute_wm_global_parameters(struct drm_device *dev,
+					     struct intel_wm_config *config)
+{
+	struct drm_crtc *crtc;
+	struct drm_plane *plane;
+
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+		config->num_pipes_active += to_intel_crtc(crtc)->active;
+
+	/* FIXME: I don't think we need those two global parameters on SKL */
+	list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
+		struct intel_plane *intel_plane = to_intel_plane(plane);
+
+		config->sprites_enabled |= intel_plane->wm.enabled;
+		config->sprites_scaled |= intel_plane->wm.scaled;
+	}
+}
+
+static void skl_compute_wm_pipe_parameters(struct drm_crtc *crtc,
+					   struct skl_pipe_wm_parameters *p)
+{
+	struct drm_device *dev = crtc->dev;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	enum pipe pipe = intel_crtc->pipe;
+	struct drm_plane *plane;
+	struct drm_framebuffer *fb;
+	int i = 1; /* Index for sprite planes start */
+
+	p->active = intel_crtc->active;
+	if (p->active) {
+		p->pipe_htotal = intel_crtc->config->base.adjusted_mode.crtc_htotal;
+		p->pixel_rate = skl_pipe_pixel_rate(intel_crtc->config);
+
+		fb = crtc->primary->state->fb;
+		/* For planar: Bpp is for uv plane, y_Bpp is for y plane */
+		if (fb) {
+			p->plane[0].enabled = true;
+			p->plane[0].bytes_per_pixel = fb->pixel_format == DRM_FORMAT_NV12 ?
+				drm_format_plane_cpp(fb->pixel_format, 1) :
+				drm_format_plane_cpp(fb->pixel_format, 0);
+			p->plane[0].y_bytes_per_pixel = fb->pixel_format == DRM_FORMAT_NV12 ?
+				drm_format_plane_cpp(fb->pixel_format, 0) : 0;
+			p->plane[0].tiling = fb->modifier[0];
+		} else {
+			p->plane[0].enabled = false;
+			p->plane[0].bytes_per_pixel = 0;
+			p->plane[0].y_bytes_per_pixel = 0;
+			p->plane[0].tiling = DRM_FORMAT_MOD_NONE;
+		}
+		p->plane[0].horiz_pixels = intel_crtc->config->pipe_src_w;
+		p->plane[0].vert_pixels = intel_crtc->config->pipe_src_h;
+		p->plane[0].rotation = crtc->primary->state->rotation;
+
+		fb = crtc->cursor->state->fb;
+		p->plane[PLANE_CURSOR].y_bytes_per_pixel = 0;
+		if (fb) {
+			p->plane[PLANE_CURSOR].enabled = true;
+			p->plane[PLANE_CURSOR].bytes_per_pixel = fb->bits_per_pixel / 8;
+			p->plane[PLANE_CURSOR].horiz_pixels = crtc->cursor->state->crtc_w;
+			p->plane[PLANE_CURSOR].vert_pixels = crtc->cursor->state->crtc_h;
+		} else {
+			p->plane[PLANE_CURSOR].enabled = false;
+			p->plane[PLANE_CURSOR].bytes_per_pixel = 0;
+			p->plane[PLANE_CURSOR].horiz_pixels = 64;
+			p->plane[PLANE_CURSOR].vert_pixels = 64;
+		}
+	}
+
+	list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
+		struct intel_plane *intel_plane = to_intel_plane(plane);
+
+		if (intel_plane->pipe == pipe &&
+			plane->type == DRM_PLANE_TYPE_OVERLAY)
+			p->plane[i++] = intel_plane->wm;
+	}
+}
+
+static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
+				 struct skl_pipe_wm_parameters *p,
+				 struct intel_plane_wm_parameters *p_params,
+				 uint16_t ddb_allocation,
+				 int level,
+				 uint16_t *out_blocks, /* out */
+				 uint8_t *out_lines /* out */)
+{
+	uint32_t latency = dev_priv->wm.skl_latency[level];
+	uint32_t method1, method2;
+	uint32_t plane_bytes_per_line, plane_blocks_per_line;
+	uint32_t res_blocks, res_lines;
+	uint32_t selected_result;
+	uint8_t bytes_per_pixel;
+
+	if (latency == 0 || !p->active || !p_params->enabled)
+		return false;
+
+	bytes_per_pixel = p_params->y_bytes_per_pixel ?
+		p_params->y_bytes_per_pixel :
+		p_params->bytes_per_pixel;
+	method1 = skl_wm_method1(p->pixel_rate,
+				 bytes_per_pixel,
+				 latency);
+	method2 = skl_wm_method2(p->pixel_rate,
+				 p->pipe_htotal,
+				 p_params->horiz_pixels,
+				 bytes_per_pixel,
+				 p_params->tiling,
+				 latency);
+
+	plane_bytes_per_line = p_params->horiz_pixels * bytes_per_pixel;
+	plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
+
+	if (p_params->tiling == I915_FORMAT_MOD_Y_TILED ||
+	    p_params->tiling == I915_FORMAT_MOD_Yf_TILED) {
+		uint32_t min_scanlines = 4;
+		uint32_t y_tile_minimum;
+		if (intel_rotation_90_or_270(p_params->rotation)) {
+			switch (p_params->bytes_per_pixel) {
+			case 1:
+				min_scanlines = 16;
+				break;
+			case 2:
+				min_scanlines = 8;
+				break;
+			case 8:
+				WARN(1, "Unsupported pixel depth for rotation");
+			}
+		}
+		y_tile_minimum = plane_blocks_per_line * min_scanlines;
+		selected_result = max(method2, y_tile_minimum);
+	} else {
+		if ((ddb_allocation / plane_blocks_per_line) >= 1)
+			selected_result = min(method1, method2);
+		else
+			selected_result = method1;
+	}
+
+	res_blocks = selected_result + 1;
+	res_lines = DIV_ROUND_UP(selected_result, plane_blocks_per_line);
+
+	if (level >= 1 && level <= 7) {
+		if (p_params->tiling == I915_FORMAT_MOD_Y_TILED ||
+		    p_params->tiling == I915_FORMAT_MOD_Yf_TILED)
+			res_lines += 4;
+		else
+			res_blocks++;
+	}
+
+	if (res_blocks >= ddb_allocation || res_lines > 31)
+		return false;
+
+	*out_blocks = res_blocks;
+	*out_lines = res_lines;
+
+	return true;
+}
+
+static void skl_compute_wm_level(const struct drm_i915_private *dev_priv,
+				 struct skl_ddb_allocation *ddb,
+				 struct skl_pipe_wm_parameters *p,
+				 enum pipe pipe,
+				 int level,
+				 int num_planes,
+				 struct skl_wm_level *result)
+{
+	uint16_t ddb_blocks;
+	int i;
+
+	for (i = 0; i < num_planes; i++) {
+		ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][i]);
+
+		result->plane_en[i] = skl_compute_plane_wm(dev_priv,
+						p, &p->plane[i],
+						ddb_blocks,
+						level,
+						&result->plane_res_b[i],
+						&result->plane_res_l[i]);
+	}
+
+	ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][PLANE_CURSOR]);
+	result->plane_en[PLANE_CURSOR] = skl_compute_plane_wm(dev_priv, p,
+						 &p->plane[PLANE_CURSOR],
+						 ddb_blocks, level,
+						 &result->plane_res_b[PLANE_CURSOR],
+						 &result->plane_res_l[PLANE_CURSOR]);
+}
+
+static uint32_t
+skl_compute_linetime_wm(struct drm_crtc *crtc, struct skl_pipe_wm_parameters *p)
+{
+	if (!to_intel_crtc(crtc)->active)
+		return 0;
+
+	if (WARN_ON(p->pixel_rate == 0))
+		return 0;
+
+	return DIV_ROUND_UP(8 * p->pipe_htotal * 1000, p->pixel_rate);
+}
+
+static void skl_compute_transition_wm(struct drm_crtc *crtc,
+				      struct skl_pipe_wm_parameters *params,
+				      struct skl_wm_level *trans_wm /* out */)
+{
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	int i;
+
+	if (!params->active)
+		return;
+
+	/* Until we know more, just disable transition WMs */
+	for (i = 0; i < intel_num_planes(intel_crtc); i++)
+		trans_wm->plane_en[i] = false;
+	trans_wm->plane_en[PLANE_CURSOR] = false;
+}
+
+static void skl_compute_pipe_wm(struct drm_crtc *crtc,
+				struct skl_ddb_allocation *ddb,
+				struct skl_pipe_wm_parameters *params,
+				struct skl_pipe_wm *pipe_wm)
+{
+	struct drm_device *dev = crtc->dev;
+	const struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	int level, max_level = ilk_wm_max_level(dev);
+
+	for (level = 0; level <= max_level; level++) {
+		skl_compute_wm_level(dev_priv, ddb, params, intel_crtc->pipe,
+				     level, intel_num_planes(intel_crtc),
+				     &pipe_wm->wm[level]);
+	}
+	pipe_wm->linetime = skl_compute_linetime_wm(crtc, params);
+
+	skl_compute_transition_wm(crtc, params, &pipe_wm->trans_wm);
+}
+
+static void skl_compute_wm_results(struct drm_device *dev,
+				   struct skl_pipe_wm_parameters *p,
+				   struct skl_pipe_wm *p_wm,
+				   struct skl_wm_values *r,
+				   struct intel_crtc *intel_crtc)
+{
+	int level, max_level = ilk_wm_max_level(dev);
+	enum pipe pipe = intel_crtc->pipe;
+	uint32_t temp;
+	int i;
+
+	for (level = 0; level <= max_level; level++) {
+		for (i = 0; i < intel_num_planes(intel_crtc); i++) {
+			temp = 0;
+
+			temp |= p_wm->wm[level].plane_res_l[i] <<
+					PLANE_WM_LINES_SHIFT;
+			temp |= p_wm->wm[level].plane_res_b[i];
+			if (p_wm->wm[level].plane_en[i])
+				temp |= PLANE_WM_EN;
+
+			r->plane[pipe][i][level] = temp;
+		}
+
+		temp = 0;
+
+		temp |= p_wm->wm[level].plane_res_l[PLANE_CURSOR] << PLANE_WM_LINES_SHIFT;
+		temp |= p_wm->wm[level].plane_res_b[PLANE_CURSOR];
+
+		if (p_wm->wm[level].plane_en[PLANE_CURSOR])
+			temp |= PLANE_WM_EN;
+
+		r->plane[pipe][PLANE_CURSOR][level] = temp;
+
+	}
+
+	/* transition WMs */
+	for (i = 0; i < intel_num_planes(intel_crtc); i++) {
+		temp = 0;
+		temp |= p_wm->trans_wm.plane_res_l[i] << PLANE_WM_LINES_SHIFT;
+		temp |= p_wm->trans_wm.plane_res_b[i];
+		if (p_wm->trans_wm.plane_en[i])
+			temp |= PLANE_WM_EN;
+
+		r->plane_trans[pipe][i] = temp;
+	}
+
+	temp = 0;
+	temp |= p_wm->trans_wm.plane_res_l[PLANE_CURSOR] << PLANE_WM_LINES_SHIFT;
+	temp |= p_wm->trans_wm.plane_res_b[PLANE_CURSOR];
+	if (p_wm->trans_wm.plane_en[PLANE_CURSOR])
+		temp |= PLANE_WM_EN;
+
+	r->plane_trans[pipe][PLANE_CURSOR] = temp;
+
+	r->wm_linetime[pipe] = p_wm->linetime;
+}
+
+static void skl_ddb_entry_write(struct drm_i915_private *dev_priv, uint32_t reg,
+				const struct skl_ddb_entry *entry)
+{
+	if (entry->end)
+		I915_WRITE(reg, (entry->end - 1) << 16 | entry->start);
+	else
+		I915_WRITE(reg, 0);
+}
+
+static void skl_write_wm_values(struct drm_i915_private *dev_priv,
+				const struct skl_wm_values *new)
+{
+	struct drm_device *dev = dev_priv->dev;
+	struct intel_crtc *crtc;
+
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
+		int i, level, max_level = ilk_wm_max_level(dev);
+		enum pipe pipe = crtc->pipe;
+
+		if (!new->dirty[pipe])
+			continue;
+
+		I915_WRITE(PIPE_WM_LINETIME(pipe), new->wm_linetime[pipe]);
+
+		for (level = 0; level <= max_level; level++) {
+			for (i = 0; i < intel_num_planes(crtc); i++)
+				I915_WRITE(PLANE_WM(pipe, i, level),
+					   new->plane[pipe][i][level]);
+			I915_WRITE(CUR_WM(pipe, level),
+				   new->plane[pipe][PLANE_CURSOR][level]);
+		}
+		for (i = 0; i < intel_num_planes(crtc); i++)
+			I915_WRITE(PLANE_WM_TRANS(pipe, i),
+				   new->plane_trans[pipe][i]);
+		I915_WRITE(CUR_WM_TRANS(pipe),
+			   new->plane_trans[pipe][PLANE_CURSOR]);
+
+		for (i = 0; i < intel_num_planes(crtc); i++) {
+			skl_ddb_entry_write(dev_priv,
+					    PLANE_BUF_CFG(pipe, i),
+					    &new->ddb.plane[pipe][i]);
+			skl_ddb_entry_write(dev_priv,
+					    PLANE_NV12_BUF_CFG(pipe, i),
+					    &new->ddb.y_plane[pipe][i]);
+		}
+
+		skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe),
+				    &new->ddb.plane[pipe][PLANE_CURSOR]);
+	}
+}
+
+/*
+ * When setting up a new DDB allocation arrangement, we need to correctly
+ * sequence the times at which the new allocations for the pipes are taken into
+ * account or we'll have pipes fetching from space previously allocated to
+ * another pipe.
+ *
+ * Roughly the sequence looks like:
+ *  1. re-allocate the pipe(s) with the allocation being reduced and not
+ *     overlapping with a previous light-up pipe (another way to put it is:
+ *     pipes with their new allocation strickly included into their old ones).
+ *  2. re-allocate the other pipes that get their allocation reduced
+ *  3. allocate the pipes having their allocation increased
+ *
+ * Steps 1. and 2. are here to take care of the following case:
+ * - Initially DDB looks like this:
+ *     |   B    |   C    |
+ * - enable pipe A.
+ * - pipe B has a reduced DDB allocation that overlaps with the old pipe C
+ *   allocation
+ *     |  A  |  B  |  C  |
+ *
+ * We need to sequence the re-allocation: C, B, A (and not B, C, A).
+ */
+
+static void
+skl_wm_flush_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, int pass)
+{
+	int plane;
+
+	DRM_DEBUG_KMS("flush pipe %c (pass %d)\n", pipe_name(pipe), pass);
+
+	for_each_plane(dev_priv, pipe, plane) {
+		I915_WRITE(PLANE_SURF(pipe, plane),
+			   I915_READ(PLANE_SURF(pipe, plane)));
+	}
+	I915_WRITE(CURBASE(pipe), I915_READ(CURBASE(pipe)));
+}
+
+static bool
+skl_ddb_allocation_included(const struct skl_ddb_allocation *old,
+			    const struct skl_ddb_allocation *new,
+			    enum pipe pipe)
+{
+	uint16_t old_size, new_size;
+
+	old_size = skl_ddb_entry_size(&old->pipe[pipe]);
+	new_size = skl_ddb_entry_size(&new->pipe[pipe]);
+
+	return old_size != new_size &&
+	       new->pipe[pipe].start >= old->pipe[pipe].start &&
+	       new->pipe[pipe].end <= old->pipe[pipe].end;
+}
+
+static void skl_flush_wm_values(struct drm_i915_private *dev_priv,
+				struct skl_wm_values *new_values)
+{
+	struct drm_device *dev = dev_priv->dev;
+	struct skl_ddb_allocation *cur_ddb, *new_ddb;
+	bool reallocated[I915_MAX_PIPES] = {};
+	struct intel_crtc *crtc;
+	enum pipe pipe;
+
+	new_ddb = &new_values->ddb;
+	cur_ddb = &dev_priv->wm.skl_hw.ddb;
+
+	/*
+	 * First pass: flush the pipes with the new allocation contained into
+	 * the old space.
+	 *
+	 * We'll wait for the vblank on those pipes to ensure we can safely
+	 * re-allocate the freed space without this pipe fetching from it.
+	 */
+	for_each_intel_crtc(dev, crtc) {
+		if (!crtc->active)
+			continue;
+
+		pipe = crtc->pipe;
+
+		if (!skl_ddb_allocation_included(cur_ddb, new_ddb, pipe))
+			continue;
+
+		skl_wm_flush_pipe(dev_priv, pipe, 1);
+		intel_wait_for_vblank(dev, pipe);
+
+		reallocated[pipe] = true;
+	}
+
+
+	/*
+	 * Second pass: flush the pipes that are having their allocation
+	 * reduced, but overlapping with a previous allocation.
+	 *
+	 * Here as well we need to wait for the vblank to make sure the freed
+	 * space is not used anymore.
+	 */
+	for_each_intel_crtc(dev, crtc) {
+		if (!crtc->active)
+			continue;
+
+		pipe = crtc->pipe;
+
+		if (reallocated[pipe])
+			continue;
+
+		if (skl_ddb_entry_size(&new_ddb->pipe[pipe]) <
+		    skl_ddb_entry_size(&cur_ddb->pipe[pipe])) {
+			skl_wm_flush_pipe(dev_priv, pipe, 2);
+			intel_wait_for_vblank(dev, pipe);
+			reallocated[pipe] = true;
+		}
+	}
+
+	/*
+	 * Third pass: flush the pipes that got more space allocated.
+	 *
+	 * We don't need to actively wait for the update here, next vblank
+	 * will just get more DDB space with the correct WM values.
+	 */
+	for_each_intel_crtc(dev, crtc) {
+		if (!crtc->active)
+			continue;
+
+		pipe = crtc->pipe;
+
+		/*
+		 * At this point, only the pipes more space than before are
+		 * left to re-allocate.
+		 */
+		if (reallocated[pipe])
+			continue;
+
+		skl_wm_flush_pipe(dev_priv, pipe, 3);
+	}
+}
+
+static bool skl_update_pipe_wm(struct drm_crtc *crtc,
+			       struct skl_pipe_wm_parameters *params,
+			       struct intel_wm_config *config,
+			       struct skl_ddb_allocation *ddb, /* out */
+			       struct skl_pipe_wm *pipe_wm /* out */)
+{
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+	skl_compute_wm_pipe_parameters(crtc, params);
+	skl_allocate_pipe_ddb(crtc, config, params, ddb);
+	skl_compute_pipe_wm(crtc, ddb, params, pipe_wm);
+
+	if (!memcmp(&intel_crtc->wm.skl_active, pipe_wm, sizeof(*pipe_wm)))
+		return false;
+
+	intel_crtc->wm.skl_active = *pipe_wm;
+
+	return true;
+}
+
+static void skl_update_other_pipe_wm(struct drm_device *dev,
+				     struct drm_crtc *crtc,
+				     struct intel_wm_config *config,
+				     struct skl_wm_values *r)
+{
+	struct intel_crtc *intel_crtc;
+	struct intel_crtc *this_crtc = to_intel_crtc(crtc);
+
+	/*
+	 * If the WM update hasn't changed the allocation for this_crtc (the
+	 * crtc we are currently computing the new WM values for), other
+	 * enabled crtcs will keep the same allocation and we don't need to
+	 * recompute anything for them.
+	 */
+	if (!skl_ddb_allocation_changed(&r->ddb, this_crtc))
+		return;
+
+	/*
+	 * Otherwise, because of this_crtc being freshly enabled/disabled, the
+	 * other active pipes need new DDB allocation and WM values.
+	 */
+	list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
+				base.head) {
+		struct skl_pipe_wm_parameters params = {};
+		struct skl_pipe_wm pipe_wm = {};
+		bool wm_changed;
+
+		if (this_crtc->pipe == intel_crtc->pipe)
+			continue;
+
+		if (!intel_crtc->active)
+			continue;
+
+		wm_changed = skl_update_pipe_wm(&intel_crtc->base,
+						&params, config,
+						&r->ddb, &pipe_wm);
+
+		/*
+		 * If we end up re-computing the other pipe WM values, it's
+		 * because it was really needed, so we expect the WM values to
+		 * be different.
+		 */
+		WARN_ON(!wm_changed);
+
+		skl_compute_wm_results(dev, &params, &pipe_wm, r, intel_crtc);
+		r->dirty[intel_crtc->pipe] = true;
+	}
+}
+
+static void skl_clear_wm(struct skl_wm_values *watermarks, enum pipe pipe)
+{
+	watermarks->wm_linetime[pipe] = 0;
+	memset(watermarks->plane[pipe], 0,
+	       sizeof(uint32_t) * 8 * I915_MAX_PLANES);
+	memset(watermarks->plane_trans[pipe],
+	       0, sizeof(uint32_t) * I915_MAX_PLANES);
+	watermarks->plane_trans[pipe][PLANE_CURSOR] = 0;
+
+	/* Clear ddb entries for pipe */
+	memset(&watermarks->ddb.pipe[pipe], 0, sizeof(struct skl_ddb_entry));
+	memset(&watermarks->ddb.plane[pipe], 0,
+	       sizeof(struct skl_ddb_entry) * I915_MAX_PLANES);
+	memset(&watermarks->ddb.y_plane[pipe], 0,
+	       sizeof(struct skl_ddb_entry) * I915_MAX_PLANES);
+	memset(&watermarks->ddb.plane[pipe][PLANE_CURSOR], 0,
+	       sizeof(struct skl_ddb_entry));
+
+}
+
+static void skl_update_wm(struct drm_crtc *crtc)
+{
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct skl_pipe_wm_parameters params = {};
+	struct skl_wm_values *results = &dev_priv->wm.skl_results;
+	struct skl_pipe_wm pipe_wm = {};
+	struct intel_wm_config config = {};
+
+
+	/* Clear all dirty flags */
+	memset(results->dirty, 0, sizeof(bool) * I915_MAX_PIPES);
+
+	skl_clear_wm(results, intel_crtc->pipe);
+
+	skl_compute_wm_global_parameters(dev, &config);
+
+	if (!skl_update_pipe_wm(crtc, &params, &config,
+				&results->ddb, &pipe_wm))
+		return;
+
+	skl_compute_wm_results(dev, &params, &pipe_wm, results, intel_crtc);
+	results->dirty[intel_crtc->pipe] = true;
+
+	skl_update_other_pipe_wm(dev, crtc, &config, results);
+	skl_write_wm_values(dev_priv, results);
+	skl_flush_wm_values(dev_priv, results);
+
+	/* store the new configuration */
+	dev_priv->wm.skl_hw = *results;
+}
+
+static void
+skl_update_sprite_wm(struct drm_plane *plane, struct drm_crtc *crtc,
+		     uint32_t sprite_width, uint32_t sprite_height,
+		     int pixel_size, bool enabled, bool scaled)
+{
+	struct intel_plane *intel_plane = to_intel_plane(plane);
+	struct drm_framebuffer *fb = plane->state->fb;
+
+	intel_plane->wm.enabled = enabled;
+	intel_plane->wm.scaled = scaled;
+	intel_plane->wm.horiz_pixels = sprite_width;
+	intel_plane->wm.vert_pixels = sprite_height;
+	intel_plane->wm.tiling = DRM_FORMAT_MOD_NONE;
+
+	/* For planar: Bpp is for UV plane, y_Bpp is for Y plane */
+	intel_plane->wm.bytes_per_pixel =
+		(fb && fb->pixel_format == DRM_FORMAT_NV12) ?
+		drm_format_plane_cpp(plane->state->fb->pixel_format, 1) : pixel_size;
+	intel_plane->wm.y_bytes_per_pixel =
+		(fb && fb->pixel_format == DRM_FORMAT_NV12) ?
+		drm_format_plane_cpp(plane->state->fb->pixel_format, 0) : 0;
+
+	/*
+	 * Framebuffer can be NULL on plane disable, but it does not
+	 * matter for watermarks if we assume no tiling in that case.
+	 */
+	if (fb)
+		intel_plane->wm.tiling = fb->modifier[0];
+	intel_plane->wm.rotation = plane->state->rotation;
+
+	skl_update_wm(crtc);
+}
+
+static void ilk_update_wm(struct drm_crtc *crtc)
+{
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct ilk_wm_maximums max;
+	struct ilk_wm_values results = {};
+	enum intel_ddb_partitioning partitioning;
+	struct intel_pipe_wm pipe_wm = {};
+	struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
+	struct intel_wm_config config = {};
+
+	WARN_ON(cstate->base.active != intel_crtc->active);
+
+	intel_compute_pipe_wm(cstate, &pipe_wm);
+
+	if (!memcmp(&intel_crtc->wm.active, &pipe_wm, sizeof(pipe_wm)))
+		return;
+
+	intel_crtc->wm.active = pipe_wm;
+
+	ilk_compute_wm_config(dev, &config);
+
+	ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max);
+	ilk_wm_merge(dev, &config, &max, &lp_wm_1_2);
+
+	/* 5/6 split only in single pipe config on IVB+ */
+	if (INTEL_INFO(dev)->gen >= 7 &&
+	    config.num_pipes_active == 1 && config.sprites_enabled) {
+		ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max);
+		ilk_wm_merge(dev, &config, &max, &lp_wm_5_6);
+
+		best_lp_wm = ilk_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6);
+	} else {
+		best_lp_wm = &lp_wm_1_2;
+	}
+
+	partitioning = (best_lp_wm == &lp_wm_1_2) ?
+		       INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
+
+	ilk_compute_wm_results(dev, best_lp_wm, partitioning, &results);
+
+	ilk_write_wm_values(dev_priv, &results);
+}
+
+static void
+ilk_update_sprite_wm(struct drm_plane *plane,
+		     struct drm_crtc *crtc,
+		     uint32_t sprite_width, uint32_t sprite_height,
+		     int pixel_size, bool enabled, bool scaled)
+{
+	struct drm_device *dev = plane->dev;
+	struct intel_plane *intel_plane = to_intel_plane(plane);
+
+	/*
+	 * IVB workaround: must disable low power watermarks for at least
+	 * one frame before enabling scaling.  LP watermarks can be re-enabled
+	 * when scaling is disabled.
+	 *
+	 * WaCxSRDisabledForSpriteScaling:ivb
+	 */
+	if (IS_IVYBRIDGE(dev) && scaled && ilk_disable_lp_wm(dev))
+		intel_wait_for_vblank(dev, intel_plane->pipe);
+
+	ilk_update_wm(crtc);
+}
+
+static void skl_pipe_wm_active_state(uint32_t val,
+				     struct skl_pipe_wm *active,
+				     bool is_transwm,
+				     bool is_cursor,
+				     int i,
+				     int level)
+{
+	bool is_enabled = (val & PLANE_WM_EN) != 0;
+
+	if (!is_transwm) {
+		if (!is_cursor) {
+			active->wm[level].plane_en[i] = is_enabled;
+			active->wm[level].plane_res_b[i] =
+					val & PLANE_WM_BLOCKS_MASK;
+			active->wm[level].plane_res_l[i] =
+					(val >> PLANE_WM_LINES_SHIFT) &
+						PLANE_WM_LINES_MASK;
+		} else {
+			active->wm[level].plane_en[PLANE_CURSOR] = is_enabled;
+			active->wm[level].plane_res_b[PLANE_CURSOR] =
+					val & PLANE_WM_BLOCKS_MASK;
+			active->wm[level].plane_res_l[PLANE_CURSOR] =
+					(val >> PLANE_WM_LINES_SHIFT) &
+						PLANE_WM_LINES_MASK;
+		}
+	} else {
+		if (!is_cursor) {
+			active->trans_wm.plane_en[i] = is_enabled;
+			active->trans_wm.plane_res_b[i] =
+					val & PLANE_WM_BLOCKS_MASK;
+			active->trans_wm.plane_res_l[i] =
+					(val >> PLANE_WM_LINES_SHIFT) &
+						PLANE_WM_LINES_MASK;
+		} else {
+			active->trans_wm.plane_en[PLANE_CURSOR] = is_enabled;
+			active->trans_wm.plane_res_b[PLANE_CURSOR] =
+					val & PLANE_WM_BLOCKS_MASK;
+			active->trans_wm.plane_res_l[PLANE_CURSOR] =
+					(val >> PLANE_WM_LINES_SHIFT) &
+						PLANE_WM_LINES_MASK;
+		}
+	}
+}
+
+static void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct skl_wm_values *hw = &dev_priv->wm.skl_hw;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct skl_pipe_wm *active = &intel_crtc->wm.skl_active;
+	enum pipe pipe = intel_crtc->pipe;
+	int level, i, max_level;
+	uint32_t temp;
+
+	max_level = ilk_wm_max_level(dev);
+
+	hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
+
+	for (level = 0; level <= max_level; level++) {
+		for (i = 0; i < intel_num_planes(intel_crtc); i++)
+			hw->plane[pipe][i][level] =
+					I915_READ(PLANE_WM(pipe, i, level));
+		hw->plane[pipe][PLANE_CURSOR][level] = I915_READ(CUR_WM(pipe, level));
+	}
+
+	for (i = 0; i < intel_num_planes(intel_crtc); i++)
+		hw->plane_trans[pipe][i] = I915_READ(PLANE_WM_TRANS(pipe, i));
+	hw->plane_trans[pipe][PLANE_CURSOR] = I915_READ(CUR_WM_TRANS(pipe));
+
+	if (!intel_crtc->active)
+		return;
+
+	hw->dirty[pipe] = true;
+
+	active->linetime = hw->wm_linetime[pipe];
+
+	for (level = 0; level <= max_level; level++) {
+		for (i = 0; i < intel_num_planes(intel_crtc); i++) {
+			temp = hw->plane[pipe][i][level];
+			skl_pipe_wm_active_state(temp, active, false,
+						false, i, level);
+		}
+		temp = hw->plane[pipe][PLANE_CURSOR][level];
+		skl_pipe_wm_active_state(temp, active, false, true, i, level);
+	}
+
+	for (i = 0; i < intel_num_planes(intel_crtc); i++) {
+		temp = hw->plane_trans[pipe][i];
+		skl_pipe_wm_active_state(temp, active, true, false, i, 0);
+	}
+
+	temp = hw->plane_trans[pipe][PLANE_CURSOR];
+	skl_pipe_wm_active_state(temp, active, true, true, i, 0);
+}
+
+void skl_wm_get_hw_state(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct skl_ddb_allocation *ddb = &dev_priv->wm.skl_hw.ddb;
+	struct drm_crtc *crtc;
+
+	skl_ddb_get_hw_state(dev_priv, ddb);
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+		skl_pipe_wm_get_hw_state(crtc);
+}
+
+static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct ilk_wm_values *hw = &dev_priv->wm.hw;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct intel_pipe_wm *active = &intel_crtc->wm.active;
+	enum pipe pipe = intel_crtc->pipe;
+	static const unsigned int wm0_pipe_reg[] = {
+		[PIPE_A] = WM0_PIPEA_ILK,
+		[PIPE_B] = WM0_PIPEB_ILK,
+		[PIPE_C] = WM0_PIPEC_IVB,
+	};
+
+	hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]);
+	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+		hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
+
+	memset(active, 0, sizeof(*active));
+
+	active->pipe_enabled = intel_crtc->active;
+
+	if (active->pipe_enabled) {
+		u32 tmp = hw->wm_pipe[pipe];
+
+		/*
+		 * For active pipes LP0 watermark is marked as
+		 * enabled, and LP1+ watermaks as disabled since
+		 * we can't really reverse compute them in case
+		 * multiple pipes are active.
+		 */
+		active->wm[0].enable = true;
+		active->wm[0].pri_val = (tmp & WM0_PIPE_PLANE_MASK) >> WM0_PIPE_PLANE_SHIFT;
+		active->wm[0].spr_val = (tmp & WM0_PIPE_SPRITE_MASK) >> WM0_PIPE_SPRITE_SHIFT;
+		active->wm[0].cur_val = tmp & WM0_PIPE_CURSOR_MASK;
+		active->linetime = hw->wm_linetime[pipe];
+	} else {
+		int level, max_level = ilk_wm_max_level(dev);
+
+		/*
+		 * For inactive pipes, all watermark levels
+		 * should be marked as enabled but zeroed,
+		 * which is what we'd compute them to.
+		 */
+		for (level = 0; level <= max_level; level++)
+			active->wm[level].enable = true;
+	}
+}
+
+#define _FW_WM(value, plane) \
+	(((value) & DSPFW_ ## plane ## _MASK) >> DSPFW_ ## plane ## _SHIFT)
+#define _FW_WM_VLV(value, plane) \
+	(((value) & DSPFW_ ## plane ## _MASK_VLV) >> DSPFW_ ## plane ## _SHIFT)
+
+static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
+			       struct vlv_wm_values *wm)
+{
+	enum pipe pipe;
+	uint32_t tmp;
+
+	for_each_pipe(dev_priv, pipe) {
+		tmp = I915_READ(VLV_DDL(pipe));
+
+		wm->ddl[pipe].primary =
+			(tmp >> DDL_PLANE_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
+		wm->ddl[pipe].cursor =
+			(tmp >> DDL_CURSOR_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
+		wm->ddl[pipe].sprite[0] =
+			(tmp >> DDL_SPRITE_SHIFT(0)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
+		wm->ddl[pipe].sprite[1] =
+			(tmp >> DDL_SPRITE_SHIFT(1)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
+	}
+
+	tmp = I915_READ(DSPFW1);
+	wm->sr.plane = _FW_WM(tmp, SR);
+	wm->pipe[PIPE_B].cursor = _FW_WM(tmp, CURSORB);
+	wm->pipe[PIPE_B].primary = _FW_WM_VLV(tmp, PLANEB);
+	wm->pipe[PIPE_A].primary = _FW_WM_VLV(tmp, PLANEA);
+
+	tmp = I915_READ(DSPFW2);
+	wm->pipe[PIPE_A].sprite[1] = _FW_WM_VLV(tmp, SPRITEB);
+	wm->pipe[PIPE_A].cursor = _FW_WM(tmp, CURSORA);
+	wm->pipe[PIPE_A].sprite[0] = _FW_WM_VLV(tmp, SPRITEA);
+
+	tmp = I915_READ(DSPFW3);
+	wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
+
+	if (IS_CHERRYVIEW(dev_priv)) {
+		tmp = I915_READ(DSPFW7_CHV);
+		wm->pipe[PIPE_B].sprite[1] = _FW_WM_VLV(tmp, SPRITED);
+		wm->pipe[PIPE_B].sprite[0] = _FW_WM_VLV(tmp, SPRITEC);
+
+		tmp = I915_READ(DSPFW8_CHV);
+		wm->pipe[PIPE_C].sprite[1] = _FW_WM_VLV(tmp, SPRITEF);
+		wm->pipe[PIPE_C].sprite[0] = _FW_WM_VLV(tmp, SPRITEE);
+
+		tmp = I915_READ(DSPFW9_CHV);
+		wm->pipe[PIPE_C].primary = _FW_WM_VLV(tmp, PLANEC);
+		wm->pipe[PIPE_C].cursor = _FW_WM(tmp, CURSORC);
+
+		tmp = I915_READ(DSPHOWM);
+		wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
+		wm->pipe[PIPE_C].sprite[1] |= _FW_WM(tmp, SPRITEF_HI) << 8;
+		wm->pipe[PIPE_C].sprite[0] |= _FW_WM(tmp, SPRITEE_HI) << 8;
+		wm->pipe[PIPE_C].primary |= _FW_WM(tmp, PLANEC_HI) << 8;
+		wm->pipe[PIPE_B].sprite[1] |= _FW_WM(tmp, SPRITED_HI) << 8;
+		wm->pipe[PIPE_B].sprite[0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
+		wm->pipe[PIPE_B].primary |= _FW_WM(tmp, PLANEB_HI) << 8;
+		wm->pipe[PIPE_A].sprite[1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
+		wm->pipe[PIPE_A].sprite[0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
+		wm->pipe[PIPE_A].primary |= _FW_WM(tmp, PLANEA_HI) << 8;
+	} else {
+		tmp = I915_READ(DSPFW7);
+		wm->pipe[PIPE_B].sprite[1] = _FW_WM_VLV(tmp, SPRITED);
+		wm->pipe[PIPE_B].sprite[0] = _FW_WM_VLV(tmp, SPRITEC);
+
+		tmp = I915_READ(DSPHOWM);
+		wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
+		wm->pipe[PIPE_B].sprite[1] |= _FW_WM(tmp, SPRITED_HI) << 8;
+		wm->pipe[PIPE_B].sprite[0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
+		wm->pipe[PIPE_B].primary |= _FW_WM(tmp, PLANEB_HI) << 8;
+		wm->pipe[PIPE_A].sprite[1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
+		wm->pipe[PIPE_A].sprite[0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
+		wm->pipe[PIPE_A].primary |= _FW_WM(tmp, PLANEA_HI) << 8;
+	}
+}
+
+#undef _FW_WM
+#undef _FW_WM_VLV
+
+void vlv_wm_get_hw_state(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct vlv_wm_values *wm = &dev_priv->wm.vlv;
+	struct intel_plane *plane;
+	enum pipe pipe;
+	u32 val;
+
+	vlv_read_wm_values(dev_priv, wm);
+
+	for_each_intel_plane(dev, plane) {
+		switch (plane->base.type) {
+			int sprite;
+		case DRM_PLANE_TYPE_CURSOR:
+			plane->wm.fifo_size = 63;
+			break;
+		case DRM_PLANE_TYPE_PRIMARY:
+			plane->wm.fifo_size = vlv_get_fifo_size(dev, plane->pipe, 0);
+			break;
+		case DRM_PLANE_TYPE_OVERLAY:
+			sprite = plane->plane;
+			plane->wm.fifo_size = vlv_get_fifo_size(dev, plane->pipe, sprite + 1);
+			break;
+		}
+	}
+
+	wm->cxsr = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
+	wm->level = VLV_WM_LEVEL_PM2;
+
+	if (IS_CHERRYVIEW(dev_priv)) {
+		mutex_lock(&dev_priv->rps.hw_lock);
+
+		val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
+		if (val & DSP_MAXFIFO_PM5_ENABLE)
+			wm->level = VLV_WM_LEVEL_PM5;
+
+		/*
+		 * If DDR DVFS is disabled in the BIOS, Punit
+		 * will never ack the request. So if that happens
+		 * assume we don't have to enable/disable DDR DVFS
+		 * dynamically. To test that just set the REQ_ACK
+		 * bit to poke the Punit, but don't change the
+		 * HIGH/LOW bits so that we don't actually change
+		 * the current state.
+		 */
+		val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
+		val |= FORCE_DDR_FREQ_REQ_ACK;
+		vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
+
+		if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
+			      FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) {
+			DRM_DEBUG_KMS("Punit not acking DDR DVFS request, "
+				      "assuming DDR DVFS is disabled\n");
+			dev_priv->wm.max_level = VLV_WM_LEVEL_PM5;
+		} else {
+			val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
+			if ((val & FORCE_DDR_HIGH_FREQ) == 0)
+				wm->level = VLV_WM_LEVEL_DDR_DVFS;
+		}
+
+		mutex_unlock(&dev_priv->rps.hw_lock);
+	}
+
+	for_each_pipe(dev_priv, pipe)
+		DRM_DEBUG_KMS("Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n",
+			      pipe_name(pipe), wm->pipe[pipe].primary, wm->pipe[pipe].cursor,
+			      wm->pipe[pipe].sprite[0], wm->pipe[pipe].sprite[1]);
+
+	DRM_DEBUG_KMS("Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n",
+		      wm->sr.plane, wm->sr.cursor, wm->level, wm->cxsr);
+}
+
+void ilk_wm_get_hw_state(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct ilk_wm_values *hw = &dev_priv->wm.hw;
+	struct drm_crtc *crtc;
+
+	for_each_crtc(dev, crtc)
+		ilk_pipe_wm_get_hw_state(crtc);
+
+	hw->wm_lp[0] = I915_READ(WM1_LP_ILK);
+	hw->wm_lp[1] = I915_READ(WM2_LP_ILK);
+	hw->wm_lp[2] = I915_READ(WM3_LP_ILK);
+
+	hw->wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
+	if (INTEL_INFO(dev)->gen >= 7) {
+		hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
+		hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
+	}
+
+	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+		hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
+			INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
+	else if (IS_IVYBRIDGE(dev))
+		hw->partitioning = (I915_READ(DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ?
+			INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
+
+	hw->enable_fbc_wm =
+		!(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS);
+}
+
+/**
+ * intel_update_watermarks - update FIFO watermark values based on current modes
+ *
+ * Calculate watermark values for the various WM regs based on current mode
+ * and plane configuration.
+ *
+ * There are several cases to deal with here:
+ *   - normal (i.e. non-self-refresh)
+ *   - self-refresh (SR) mode
+ *   - lines are large relative to FIFO size (buffer can hold up to 2)
+ *   - lines are small relative to FIFO size (buffer can hold more than 2
+ *     lines), so need to account for TLB latency
+ *
+ *   The normal calculation is:
+ *     watermark = dotclock * bytes per pixel * latency
+ *   where latency is platform & configuration dependent (we assume pessimal
+ *   values here).
+ *
+ *   The SR calculation is:
+ *     watermark = (trunc(latency/line time)+1) * surface width *
+ *       bytes per pixel
+ *   where
+ *     line time = htotal / dotclock
+ *     surface width = hdisplay for normal plane and 64 for cursor
+ *   and latency is assumed to be high, as above.
+ *
+ * The final value programmed to the register should always be rounded up,
+ * and include an extra 2 entries to account for clock crossings.
+ *
+ * We don't use the sprite, so we can ignore that.  And on Crestline we have
+ * to set the non-SR watermarks to 8.
+ */
+void intel_update_watermarks(struct drm_crtc *crtc)
+{
+	struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+
+	if (dev_priv->display.update_wm)
+		dev_priv->display.update_wm(crtc);
+}
+
+void intel_update_sprite_watermarks(struct drm_plane *plane,
+				    struct drm_crtc *crtc,
+				    uint32_t sprite_width,
+				    uint32_t sprite_height,
+				    int pixel_size,
+				    bool enabled, bool scaled)
+{
+	struct drm_i915_private *dev_priv = plane->dev->dev_private;
+
+	if (dev_priv->display.update_sprite_wm)
+		dev_priv->display.update_sprite_wm(plane, crtc,
+						   sprite_width, sprite_height,
+						   pixel_size, enabled, scaled);
+}
+
+/**
+ * Lock protecting IPS related data structures
+ */
+DEFINE_SPINLOCK(mchdev_lock);
+
+/* Global for IPS driver to get at the current i915 device. Protected by
+ * mchdev_lock. */
+static struct drm_i915_private *i915_mch_dev;
+
+bool ironlake_set_drps(struct drm_device *dev, u8 val)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u16 rgvswctl;
+
+	assert_spin_locked(&mchdev_lock);
+
+	rgvswctl = I915_READ16(MEMSWCTL);
+	if (rgvswctl & MEMCTL_CMD_STS) {
+		DRM_DEBUG("gpu busy, RCS change rejected\n");
+		return false; /* still busy with another command */
+	}
+
+	rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
+		(val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
+	I915_WRITE16(MEMSWCTL, rgvswctl);
+	POSTING_READ16(MEMSWCTL);
+
+	rgvswctl |= MEMCTL_CMD_STS;
+	I915_WRITE16(MEMSWCTL, rgvswctl);
+
+	return true;
+}
+
+static void ironlake_enable_drps(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 rgvmodectl = I915_READ(MEMMODECTL);
+	u8 fmax, fmin, fstart, vstart;
+
+	spin_lock_irq(&mchdev_lock);
+
+	/* Enable temp reporting */
+	I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
+	I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
+
+	/* 100ms RC evaluation intervals */
+	I915_WRITE(RCUPEI, 100000);
+	I915_WRITE(RCDNEI, 100000);
+
+	/* Set max/min thresholds to 90ms and 80ms respectively */
+	I915_WRITE(RCBMAXAVG, 90000);
+	I915_WRITE(RCBMINAVG, 80000);
+
+	I915_WRITE(MEMIHYST, 1);
+
+	/* Set up min, max, and cur for interrupt handling */
+	fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
+	fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
+	fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
+		MEMMODE_FSTART_SHIFT;
+
+	vstart = (I915_READ(PXVFREQ(fstart)) & PXVFREQ_PX_MASK) >>
+		PXVFREQ_PX_SHIFT;
+
+	dev_priv->ips.fmax = fmax; /* IPS callback will increase this */
+	dev_priv->ips.fstart = fstart;
+
+	dev_priv->ips.max_delay = fstart;
+	dev_priv->ips.min_delay = fmin;
+	dev_priv->ips.cur_delay = fstart;
+
+	DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
+			 fmax, fmin, fstart);
+
+	I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
+
+	/*
+	 * Interrupts will be enabled in ironlake_irq_postinstall
+	 */
+
+	I915_WRITE(VIDSTART, vstart);
+	POSTING_READ(VIDSTART);
+
+	rgvmodectl |= MEMMODE_SWMODE_EN;
+	I915_WRITE(MEMMODECTL, rgvmodectl);
+
+	if (wait_for_atomic((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
+		DRM_ERROR("stuck trying to change perf mode\n");
+	mdelay(1);
+
+	ironlake_set_drps(dev, fstart);
+
+	dev_priv->ips.last_count1 = I915_READ(DMIEC) +
+		I915_READ(DDREC) + I915_READ(CSIEC);
+	dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies);
+	dev_priv->ips.last_count2 = I915_READ(GFXEC);
+	dev_priv->ips.last_time2 = ktime_get_raw_ns();
+
+	spin_unlock_irq(&mchdev_lock);
+}
+
+static void ironlake_disable_drps(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u16 rgvswctl;
+
+	spin_lock_irq(&mchdev_lock);
+
+	rgvswctl = I915_READ16(MEMSWCTL);
+
+	/* Ack interrupts, disable EFC interrupt */
+	I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
+	I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
+	I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
+	I915_WRITE(DEIIR, DE_PCU_EVENT);
+	I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
+
+	/* Go back to the starting frequency */
+	ironlake_set_drps(dev, dev_priv->ips.fstart);
+	mdelay(1);
+	rgvswctl |= MEMCTL_CMD_STS;
+	I915_WRITE(MEMSWCTL, rgvswctl);
+	mdelay(1);
+
+	spin_unlock_irq(&mchdev_lock);
+}
+
+/* There's a funny hw issue where the hw returns all 0 when reading from
+ * GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value
+ * ourselves, instead of doing a rmw cycle (which might result in us clearing
+ * all limits and the gpu stuck at whatever frequency it is at atm).
+ */
+static u32 intel_rps_limits(struct drm_i915_private *dev_priv, u8 val)
+{
+	u32 limits;
+
+	/* Only set the down limit when we've reached the lowest level to avoid
+	 * getting more interrupts, otherwise leave this clear. This prevents a
+	 * race in the hw when coming out of rc6: There's a tiny window where
+	 * the hw runs at the minimal clock before selecting the desired
+	 * frequency, if the down threshold expires in that window we will not
+	 * receive a down interrupt. */
+	if (IS_GEN9(dev_priv->dev)) {
+		limits = (dev_priv->rps.max_freq_softlimit) << 23;
+		if (val <= dev_priv->rps.min_freq_softlimit)
+			limits |= (dev_priv->rps.min_freq_softlimit) << 14;
+	} else {
+		limits = dev_priv->rps.max_freq_softlimit << 24;
+		if (val <= dev_priv->rps.min_freq_softlimit)
+			limits |= dev_priv->rps.min_freq_softlimit << 16;
+	}
+
+	return limits;
+}
+
+static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
+{
+	int new_power;
+	u32 threshold_up = 0, threshold_down = 0; /* in % */
+	u32 ei_up = 0, ei_down = 0;
+
+	new_power = dev_priv->rps.power;
+	switch (dev_priv->rps.power) {
+	case LOW_POWER:
+		if (val > dev_priv->rps.efficient_freq + 1 && val > dev_priv->rps.cur_freq)
+			new_power = BETWEEN;
+		break;
+
+	case BETWEEN:
+		if (val <= dev_priv->rps.efficient_freq && val < dev_priv->rps.cur_freq)
+			new_power = LOW_POWER;
+		else if (val >= dev_priv->rps.rp0_freq && val > dev_priv->rps.cur_freq)
+			new_power = HIGH_POWER;
+		break;
+
+	case HIGH_POWER:
+		if (val < (dev_priv->rps.rp1_freq + dev_priv->rps.rp0_freq) >> 1 && val < dev_priv->rps.cur_freq)
+			new_power = BETWEEN;
+		break;
+	}
+	/* Max/min bins are special */
+	if (val <= dev_priv->rps.min_freq_softlimit)
+		new_power = LOW_POWER;
+	if (val >= dev_priv->rps.max_freq_softlimit)
+		new_power = HIGH_POWER;
+	if (new_power == dev_priv->rps.power)
+		return;
+
+	/* Note the units here are not exactly 1us, but 1280ns. */
+	switch (new_power) {
+	case LOW_POWER:
+		/* Upclock if more than 95% busy over 16ms */
+		ei_up = 16000;
+		threshold_up = 95;
+
+		/* Downclock if less than 85% busy over 32ms */
+		ei_down = 32000;
+		threshold_down = 85;
+		break;
+
+	case BETWEEN:
+		/* Upclock if more than 90% busy over 13ms */
+		ei_up = 13000;
+		threshold_up = 90;
+
+		/* Downclock if less than 75% busy over 32ms */
+		ei_down = 32000;
+		threshold_down = 75;
+		break;
+
+	case HIGH_POWER:
+		/* Upclock if more than 85% busy over 10ms */
+		ei_up = 10000;
+		threshold_up = 85;
+
+		/* Downclock if less than 60% busy over 32ms */
+		ei_down = 32000;
+		threshold_down = 60;
+		break;
+	}
+
+	/* When byt can survive without system hang with dynamic
+	 * sw freq adjustments, this restriction can be lifted.
+	 */
+	if (IS_VALLEYVIEW(dev_priv))
+		goto skip_hw_write;
+
+	I915_WRITE(GEN6_RP_UP_EI,
+		GT_INTERVAL_FROM_US(dev_priv, ei_up));
+	I915_WRITE(GEN6_RP_UP_THRESHOLD,
+		GT_INTERVAL_FROM_US(dev_priv, (ei_up * threshold_up / 100)));
+
+	I915_WRITE(GEN6_RP_DOWN_EI,
+		GT_INTERVAL_FROM_US(dev_priv, ei_down));
+	I915_WRITE(GEN6_RP_DOWN_THRESHOLD,
+		GT_INTERVAL_FROM_US(dev_priv, (ei_down * threshold_down / 100)));
+
+	 I915_WRITE(GEN6_RP_CONTROL,
+		    GEN6_RP_MEDIA_TURBO |
+		    GEN6_RP_MEDIA_HW_NORMAL_MODE |
+		    GEN6_RP_MEDIA_IS_GFX |
+		    GEN6_RP_ENABLE |
+		    GEN6_RP_UP_BUSY_AVG |
+		    GEN6_RP_DOWN_IDLE_AVG);
+
+skip_hw_write:
+	dev_priv->rps.power = new_power;
+	dev_priv->rps.up_threshold = threshold_up;
+	dev_priv->rps.down_threshold = threshold_down;
+	dev_priv->rps.last_adj = 0;
+}
+
+static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
+{
+	u32 mask = 0;
+
+	/* We use UP_EI_EXPIRED interupts for both up/down in manual mode */
+	if (val > dev_priv->rps.min_freq_softlimit)
+		mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
+	if (val < dev_priv->rps.max_freq_softlimit)
+		mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD;
+
+	mask &= dev_priv->pm_rps_events;
+
+	return gen6_sanitize_rps_pm_mask(dev_priv, ~mask);
+}
+
+/* gen6_set_rps is called to update the frequency request, but should also be
+ * called when the range (min_delay and max_delay) is modified so that we can
+ * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */
+static void gen6_set_rps(struct drm_device *dev, u8 val)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	/* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */
+	if (IS_BROXTON(dev) && (INTEL_REVID(dev) < BXT_REVID_B0))
+		return;
+
+	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+	WARN_ON(val > dev_priv->rps.max_freq);
+	WARN_ON(val < dev_priv->rps.min_freq);
+
+	/* min/max delay may still have been modified so be sure to
+	 * write the limits value.
+	 */
+	if (val != dev_priv->rps.cur_freq) {
+		gen6_set_rps_thresholds(dev_priv, val);
+
+		if (IS_GEN9(dev))
+			I915_WRITE(GEN6_RPNSWREQ,
+				   GEN9_FREQUENCY(val));
+		else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+			I915_WRITE(GEN6_RPNSWREQ,
+				   HSW_FREQUENCY(val));
+		else
+			I915_WRITE(GEN6_RPNSWREQ,
+				   GEN6_FREQUENCY(val) |
+				   GEN6_OFFSET(0) |
+				   GEN6_AGGRESSIVE_TURBO);
+	}
+
+	/* Make sure we continue to get interrupts
+	 * until we hit the minimum or maximum frequencies.
+	 */
+	I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, intel_rps_limits(dev_priv, val));
+	I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
+
+	POSTING_READ(GEN6_RPNSWREQ);
+
+	dev_priv->rps.cur_freq = val;
+	trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
+}
+
+static void valleyview_set_rps(struct drm_device *dev, u8 val)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+	WARN_ON(val > dev_priv->rps.max_freq);
+	WARN_ON(val < dev_priv->rps.min_freq);
+
+	if (WARN_ONCE(IS_CHERRYVIEW(dev) && (val & 1),
+		      "Odd GPU freq value\n"))
+		val &= ~1;
+
+	I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
+
+	if (val != dev_priv->rps.cur_freq) {
+		vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
+		if (!IS_CHERRYVIEW(dev_priv))
+			gen6_set_rps_thresholds(dev_priv, val);
+	}
+
+	dev_priv->rps.cur_freq = val;
+	trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
+}
+
+/* vlv_set_rps_idle: Set the frequency to idle, if Gfx clocks are down
+ *
+ * * If Gfx is Idle, then
+ * 1. Forcewake Media well.
+ * 2. Request idle freq.
+ * 3. Release Forcewake of Media well.
+*/
+static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
+{
+	u32 val = dev_priv->rps.idle_freq;
+
+	if (dev_priv->rps.cur_freq <= val)
+		return;
+
+	/* Wake up the media well, as that takes a lot less
+	 * power than the Render well. */
+	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_MEDIA);
+	valleyview_set_rps(dev_priv->dev, val);
+	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_MEDIA);
+}
+
+void gen6_rps_busy(struct drm_i915_private *dev_priv)
+{
+	mutex_lock(&dev_priv->rps.hw_lock);
+	if (dev_priv->rps.enabled) {
+		if (dev_priv->pm_rps_events & GEN6_PM_RP_UP_EI_EXPIRED)
+			gen6_rps_reset_ei(dev_priv);
+		I915_WRITE(GEN6_PMINTRMSK,
+			   gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq));
+	}
+	mutex_unlock(&dev_priv->rps.hw_lock);
+}
+
+void gen6_rps_idle(struct drm_i915_private *dev_priv)
+{
+	struct drm_device *dev = dev_priv->dev;
+
+	mutex_lock(&dev_priv->rps.hw_lock);
+	if (dev_priv->rps.enabled) {
+		if (IS_VALLEYVIEW(dev))
+			vlv_set_rps_idle(dev_priv);
+		else
+			gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
+		dev_priv->rps.last_adj = 0;
+		I915_WRITE(GEN6_PMINTRMSK,
+			   gen6_sanitize_rps_pm_mask(dev_priv, ~0));
+	}
+	mutex_unlock(&dev_priv->rps.hw_lock);
+
+	spin_lock(&dev_priv->rps.client_lock);
+	while (!list_empty(&dev_priv->rps.clients))
+		list_del_init(dev_priv->rps.clients.next);
+	spin_unlock(&dev_priv->rps.client_lock);
+}
+
+void gen6_rps_boost(struct drm_i915_private *dev_priv,
+		    struct intel_rps_client *rps,
+		    unsigned long submitted)
+{
+	/* This is intentionally racy! We peek at the state here, then
+	 * validate inside the RPS worker.
+	 */
+	if (!(dev_priv->mm.busy &&
+	      dev_priv->rps.enabled &&
+	      dev_priv->rps.cur_freq < dev_priv->rps.max_freq_softlimit))
+		return;
+
+	/* Force a RPS boost (and don't count it against the client) if
+	 * the GPU is severely congested.
+	 */
+	if (rps && time_after(jiffies, submitted + DRM_I915_THROTTLE_JIFFIES))
+		rps = NULL;
+
+	spin_lock(&dev_priv->rps.client_lock);
+	if (rps == NULL || list_empty(&rps->link)) {
+		spin_lock_irq(&dev_priv->irq_lock);
+		if (dev_priv->rps.interrupts_enabled) {
+			dev_priv->rps.client_boost = true;
+			queue_work(dev_priv->wq, &dev_priv->rps.work);
+		}
+		spin_unlock_irq(&dev_priv->irq_lock);
+
+		if (rps != NULL) {
+			list_add(&rps->link, &dev_priv->rps.clients);
+			rps->boosts++;
+		} else
+			dev_priv->rps.boosts++;
+	}
+	spin_unlock(&dev_priv->rps.client_lock);
+}
+
+void intel_set_rps(struct drm_device *dev, u8 val)
+{
+	if (IS_VALLEYVIEW(dev))
+		valleyview_set_rps(dev, val);
+	else
+		gen6_set_rps(dev, val);
+}
+
+static void gen9_disable_rps(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	I915_WRITE(GEN6_RC_CONTROL, 0);
+	I915_WRITE(GEN9_PG_ENABLE, 0);
+}
+
+static void gen6_disable_rps(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	I915_WRITE(GEN6_RC_CONTROL, 0);
+	I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
+}
+
+static void cherryview_disable_rps(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	I915_WRITE(GEN6_RC_CONTROL, 0);
+}
+
+static void valleyview_disable_rps(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	/* we're doing forcewake before Disabling RC6,
+	 * This what the BIOS expects when going into suspend */
+	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+
+	I915_WRITE(GEN6_RC_CONTROL, 0);
+
+	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+}
+
+static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
+{
+	if (IS_VALLEYVIEW(dev)) {
+		if (mode & (GEN7_RC_CTL_TO_MODE | GEN6_RC_CTL_EI_MODE(1)))
+			mode = GEN6_RC_CTL_RC6_ENABLE;
+		else
+			mode = 0;
+	}
+	if (HAS_RC6p(dev))
+		DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s RC6p %s RC6pp %s\n",
+			      (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
+			      (mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
+			      (mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
+
+	else
+		DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s\n",
+			      (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off");
+}
+
+static int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6)
+{
+	/* No RC6 before Ironlake and code is gone for ilk. */
+	if (INTEL_INFO(dev)->gen < 6)
+		return 0;
+
+	/* Respect the kernel parameter if it is set */
+	if (enable_rc6 >= 0) {
+		int mask;
+
+		if (HAS_RC6p(dev))
+			mask = INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE |
+			       INTEL_RC6pp_ENABLE;
+		else
+			mask = INTEL_RC6_ENABLE;
+
+		if ((enable_rc6 & mask) != enable_rc6)
+			DRM_DEBUG_KMS("Adjusting RC6 mask to %d (requested %d, valid %d)\n",
+				      enable_rc6 & mask, enable_rc6, mask);
+
+		return enable_rc6 & mask;
+	}
+
+	if (IS_IVYBRIDGE(dev))
+		return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
+
+	return INTEL_RC6_ENABLE;
+}
+
+int intel_enable_rc6(const struct drm_device *dev)
+{
+	return i915.enable_rc6;
+}
+
+static void gen6_init_rps_frequencies(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t rp_state_cap;
+	u32 ddcc_status = 0;
+	int ret;
+
+	/* All of these values are in units of 50MHz */
+	dev_priv->rps.cur_freq		= 0;
+	/* static values from HW: RP0 > RP1 > RPn (min_freq) */
+	if (IS_BROXTON(dev)) {
+		rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
+		dev_priv->rps.rp0_freq = (rp_state_cap >> 16) & 0xff;
+		dev_priv->rps.rp1_freq = (rp_state_cap >>  8) & 0xff;
+		dev_priv->rps.min_freq = (rp_state_cap >>  0) & 0xff;
+	} else {
+		rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+		dev_priv->rps.rp0_freq = (rp_state_cap >>  0) & 0xff;
+		dev_priv->rps.rp1_freq = (rp_state_cap >>  8) & 0xff;
+		dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff;
+	}
+
+	/* hw_max = RP0 until we check for overclocking */
+	dev_priv->rps.max_freq		= dev_priv->rps.rp0_freq;
+
+	dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
+	if (IS_HASWELL(dev) || IS_BROADWELL(dev) || IS_SKYLAKE(dev)) {
+		ret = sandybridge_pcode_read(dev_priv,
+					HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
+					&ddcc_status);
+		if (0 == ret)
+			dev_priv->rps.efficient_freq =
+				clamp_t(u8,
+					((ddcc_status >> 8) & 0xff),
+					dev_priv->rps.min_freq,
+					dev_priv->rps.max_freq);
+	}
+
+	if (IS_SKYLAKE(dev)) {
+		/* Store the frequency values in 16.66 MHZ units, which is
+		   the natural hardware unit for SKL */
+		dev_priv->rps.rp0_freq *= GEN9_FREQ_SCALER;
+		dev_priv->rps.rp1_freq *= GEN9_FREQ_SCALER;
+		dev_priv->rps.min_freq *= GEN9_FREQ_SCALER;
+		dev_priv->rps.max_freq *= GEN9_FREQ_SCALER;
+		dev_priv->rps.efficient_freq *= GEN9_FREQ_SCALER;
+	}
+
+	dev_priv->rps.idle_freq = dev_priv->rps.min_freq;
+
+	/* Preserve min/max settings in case of re-init */
+	if (dev_priv->rps.max_freq_softlimit == 0)
+		dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
+
+	if (dev_priv->rps.min_freq_softlimit == 0) {
+		if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+			dev_priv->rps.min_freq_softlimit =
+				max_t(int, dev_priv->rps.efficient_freq,
+				      intel_freq_opcode(dev_priv, 450));
+		else
+			dev_priv->rps.min_freq_softlimit =
+				dev_priv->rps.min_freq;
+	}
+}
+
+/* See the Gen9_GT_PM_Programming_Guide doc for the below */
+static void gen9_enable_rps(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+
+	gen6_init_rps_frequencies(dev);
+
+	/* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */
+	if (IS_BROXTON(dev) && (INTEL_REVID(dev) < BXT_REVID_B0)) {
+		intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+		return;
+	}
+
+	/* Program defaults and thresholds for RPS*/
+	I915_WRITE(GEN6_RC_VIDEO_FREQ,
+		GEN9_FREQUENCY(dev_priv->rps.rp1_freq));
+
+	/* 1 second timeout*/
+	I915_WRITE(GEN6_RP_DOWN_TIMEOUT,
+		GT_INTERVAL_FROM_US(dev_priv, 1000000));
+
+	I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 0xa);
+
+	/* Leaning on the below call to gen6_set_rps to program/setup the
+	 * Up/Down EI & threshold registers, as well as the RP_CONTROL,
+	 * RP_INTERRUPT_LIMITS & RPNSWREQ registers */
+	dev_priv->rps.power = HIGH_POWER; /* force a reset */
+	gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
+
+	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+}
+
+static void gen9_enable_rc6(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_engine_cs *ring;
+	uint32_t rc6_mask = 0;
+	int unused;
+
+	/* 1a: Software RC state - RC0 */
+	I915_WRITE(GEN6_RC_STATE, 0);
+
+	/* 1b: Get forcewake during program sequence. Although the driver
+	 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
+	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+
+	/* 2a: Disable RC states. */
+	I915_WRITE(GEN6_RC_CONTROL, 0);
+
+	/* 2b: Program RC6 thresholds.*/
+
+	/* WaRsDoubleRc6WrlWithCoarsePowerGating: Doubling WRL only when CPG is enabled */
+	if (IS_SKYLAKE(dev))
+		I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 108 << 16);
+	else
+		I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16);
+	I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
+	I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
+	for_each_ring(ring, dev_priv, unused)
+		I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
+
+	if (HAS_GUC_UCODE(dev))
+		I915_WRITE(GUC_MAX_IDLE_COUNT, 0xA);
+
+	I915_WRITE(GEN6_RC_SLEEP, 0);
+
+	/* 2c: Program Coarse Power Gating Policies. */
+	I915_WRITE(GEN9_MEDIA_PG_IDLE_HYSTERESIS, 25);
+	I915_WRITE(GEN9_RENDER_PG_IDLE_HYSTERESIS, 25);
+
+	/* 3a: Enable RC6 */
+	if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
+		rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
+	DRM_INFO("RC6 %s\n", (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
+			"on" : "off");
+	/* WaRsUseTimeoutMode */
+	if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_D0) ||
+	    (IS_BROXTON(dev) && INTEL_REVID(dev) <= BXT_REVID_A0)) {
+		I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us */
+		I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
+			   GEN7_RC_CTL_TO_MODE |
+			   rc6_mask);
+	} else {
+		I915_WRITE(GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */
+		I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
+			   GEN6_RC_CTL_EI_MODE(1) |
+			   rc6_mask);
+	}
+
+	/*
+	 * 3b: Enable Coarse Power Gating only when RC6 is enabled.
+	 * WaRsDisableCoarsePowerGating:skl,bxt - Render/Media PG need to be disabled with RC6.
+	 */
+	if ((IS_BROXTON(dev) && (INTEL_REVID(dev) < BXT_REVID_B0)) ||
+	    ((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) && (INTEL_REVID(dev) <= SKL_REVID_F0)))
+		I915_WRITE(GEN9_PG_ENABLE, 0);
+	else
+		I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
+				(GEN9_RENDER_PG_ENABLE | GEN9_MEDIA_PG_ENABLE) : 0);
+
+	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+
+}
+
+static void gen8_enable_rps(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_engine_cs *ring;
+	uint32_t rc6_mask = 0;
+	int unused;
+
+	/* 1a: Software RC state - RC0 */
+	I915_WRITE(GEN6_RC_STATE, 0);
+
+	/* 1c & 1d: Get forcewake during program sequence. Although the driver
+	 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
+	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+
+	/* 2a: Disable RC states. */
+	I915_WRITE(GEN6_RC_CONTROL, 0);
+
+	/* Initialize rps frequencies */
+	gen6_init_rps_frequencies(dev);
+
+	/* 2b: Program RC6 thresholds.*/
+	I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
+	I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
+	I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
+	for_each_ring(ring, dev_priv, unused)
+		I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
+	I915_WRITE(GEN6_RC_SLEEP, 0);
+	if (IS_BROADWELL(dev))
+		I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */
+	else
+		I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
+
+	/* 3: Enable RC6 */
+	if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
+		rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
+	intel_print_rc6_info(dev, rc6_mask);
+	if (IS_BROADWELL(dev))
+		I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
+				GEN7_RC_CTL_TO_MODE |
+				rc6_mask);
+	else
+		I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
+				GEN6_RC_CTL_EI_MODE(1) |
+				rc6_mask);
+
+	/* 4 Program defaults and thresholds for RPS*/
+	I915_WRITE(GEN6_RPNSWREQ,
+		   HSW_FREQUENCY(dev_priv->rps.rp1_freq));
+	I915_WRITE(GEN6_RC_VIDEO_FREQ,
+		   HSW_FREQUENCY(dev_priv->rps.rp1_freq));
+	/* NB: Docs say 1s, and 1000000 - which aren't equivalent */
+	I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 100000000 / 128); /* 1 second timeout */
+
+	/* Docs recommend 900MHz, and 300 MHz respectively */
+	I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
+		   dev_priv->rps.max_freq_softlimit << 24 |
+		   dev_priv->rps.min_freq_softlimit << 16);
+
+	I915_WRITE(GEN6_RP_UP_THRESHOLD, 7600000 / 128); /* 76ms busyness per EI, 90% */
+	I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 31300000 / 128); /* 313ms busyness per EI, 70%*/
+	I915_WRITE(GEN6_RP_UP_EI, 66000); /* 84.48ms, XXX: random? */
+	I915_WRITE(GEN6_RP_DOWN_EI, 350000); /* 448ms, XXX: random? */
+
+	I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
+
+	/* 5: Enable RPS */
+	I915_WRITE(GEN6_RP_CONTROL,
+		   GEN6_RP_MEDIA_TURBO |
+		   GEN6_RP_MEDIA_HW_NORMAL_MODE |
+		   GEN6_RP_MEDIA_IS_GFX |
+		   GEN6_RP_ENABLE |
+		   GEN6_RP_UP_BUSY_AVG |
+		   GEN6_RP_DOWN_IDLE_AVG);
+
+	/* 6: Ring frequency + overclocking (our driver does this later */
+
+	dev_priv->rps.power = HIGH_POWER; /* force a reset */
+	gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
+
+	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+}
+
+static void gen6_enable_rps(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_engine_cs *ring;
+	u32 rc6vids, pcu_mbox = 0, rc6_mask = 0;
+	u32 gtfifodbg;
+	int rc6_mode;
+	int i, ret;
+
+	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+
+	/* Here begins a magic sequence of register writes to enable
+	 * auto-downclocking.
+	 *
+	 * Perhaps there might be some value in exposing these to
+	 * userspace...
+	 */
+	I915_WRITE(GEN6_RC_STATE, 0);
+
+	/* Clear the DBG now so we don't confuse earlier errors */
+	if ((gtfifodbg = I915_READ(GTFIFODBG))) {
+		DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
+		I915_WRITE(GTFIFODBG, gtfifodbg);
+	}
+
+	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+
+	/* Initialize rps frequencies */
+	gen6_init_rps_frequencies(dev);
+
+	/* disable the counters and set deterministic thresholds */
+	I915_WRITE(GEN6_RC_CONTROL, 0);
+
+	I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
+	I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
+	I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
+	I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
+	I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
+
+	for_each_ring(ring, dev_priv, i)
+		I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
+
+	I915_WRITE(GEN6_RC_SLEEP, 0);
+	I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
+	if (IS_IVYBRIDGE(dev))
+		I915_WRITE(GEN6_RC6_THRESHOLD, 125000);
+	else
+		I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
+	I915_WRITE(GEN6_RC6p_THRESHOLD, 150000);
+	I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
+
+	/* Check if we are enabling RC6 */
+	rc6_mode = intel_enable_rc6(dev_priv->dev);
+	if (rc6_mode & INTEL_RC6_ENABLE)
+		rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
+
+	/* We don't use those on Haswell */
+	if (!IS_HASWELL(dev)) {
+		if (rc6_mode & INTEL_RC6p_ENABLE)
+			rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
+
+		if (rc6_mode & INTEL_RC6pp_ENABLE)
+			rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
+	}
+
+	intel_print_rc6_info(dev, rc6_mask);
+
+	I915_WRITE(GEN6_RC_CONTROL,
+		   rc6_mask |
+		   GEN6_RC_CTL_EI_MODE(1) |
+		   GEN6_RC_CTL_HW_ENABLE);
+
+	/* Power down if completely idle for over 50ms */
+	I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000);
+	I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
+
+	ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0);
+	if (ret)
+		DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
+
+	ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox);
+	if (!ret && (pcu_mbox & (1<<31))) { /* OC supported */
+		DRM_DEBUG_DRIVER("Overclocking supported. Max: %dMHz, Overclock max: %dMHz\n",
+				 (dev_priv->rps.max_freq_softlimit & 0xff) * 50,
+				 (pcu_mbox & 0xff) * 50);
+		dev_priv->rps.max_freq = pcu_mbox & 0xff;
+	}
+
+	dev_priv->rps.power = HIGH_POWER; /* force a reset */
+	gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
+
+	rc6vids = 0;
+	ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
+	if (IS_GEN6(dev) && ret) {
+		DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
+	} else if (IS_GEN6(dev) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
+		DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
+			  GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
+		rc6vids &= 0xffff00;
+		rc6vids |= GEN6_ENCODE_RC6_VID(450);
+		ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
+		if (ret)
+			DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
+	}
+
+	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+}
+
+static void __gen6_update_ring_freq(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int min_freq = 15;
+	unsigned int gpu_freq;
+	unsigned int max_ia_freq, min_ring_freq;
+	unsigned int max_gpu_freq, min_gpu_freq;
+	int scaling_factor = 180;
+	struct cpufreq_policy *policy;
+
+	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+
+	policy = cpufreq_cpu_get(0);
+	if (policy) {
+		max_ia_freq = policy->cpuinfo.max_freq;
+		cpufreq_cpu_put(policy);
+	} else {
+		/*
+		 * Default to measured freq if none found, PCU will ensure we
+		 * don't go over
+		 */
+		max_ia_freq = tsc_khz;
+	}
+
+	/* Convert from kHz to MHz */
+	max_ia_freq /= 1000;
+
+	min_ring_freq = I915_READ(DCLK) & 0xf;
+	/* convert DDR frequency from units of 266.6MHz to bandwidth */
+	min_ring_freq = mult_frac(min_ring_freq, 8, 3);
+
+	if (IS_SKYLAKE(dev)) {
+		/* Convert GT frequency to 50 HZ units */
+		min_gpu_freq = dev_priv->rps.min_freq / GEN9_FREQ_SCALER;
+		max_gpu_freq = dev_priv->rps.max_freq / GEN9_FREQ_SCALER;
+	} else {
+		min_gpu_freq = dev_priv->rps.min_freq;
+		max_gpu_freq = dev_priv->rps.max_freq;
+	}
+
+	/*
+	 * For each potential GPU frequency, load a ring frequency we'd like
+	 * to use for memory access.  We do this by specifying the IA frequency
+	 * the PCU should use as a reference to determine the ring frequency.
+	 */
+	for (gpu_freq = max_gpu_freq; gpu_freq >= min_gpu_freq; gpu_freq--) {
+		int diff = max_gpu_freq - gpu_freq;
+		unsigned int ia_freq = 0, ring_freq = 0;
+
+		if (IS_SKYLAKE(dev)) {
+			/*
+			 * ring_freq = 2 * GT. ring_freq is in 100MHz units
+			 * No floor required for ring frequency on SKL.
+			 */
+			ring_freq = gpu_freq;
+		} else if (INTEL_INFO(dev)->gen >= 8) {
+			/* max(2 * GT, DDR). NB: GT is 50MHz units */
+			ring_freq = max(min_ring_freq, gpu_freq);
+		} else if (IS_HASWELL(dev)) {
+			ring_freq = mult_frac(gpu_freq, 5, 4);
+			ring_freq = max(min_ring_freq, ring_freq);
+			/* leave ia_freq as the default, chosen by cpufreq */
+		} else {
+			/* On older processors, there is no separate ring
+			 * clock domain, so in order to boost the bandwidth
+			 * of the ring, we need to upclock the CPU (ia_freq).
+			 *
+			 * For GPU frequencies less than 750MHz,
+			 * just use the lowest ring freq.
+			 */
+			if (gpu_freq < min_freq)
+				ia_freq = 800;
+			else
+				ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
+			ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
+		}
+
+		sandybridge_pcode_write(dev_priv,
+					GEN6_PCODE_WRITE_MIN_FREQ_TABLE,
+					ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT |
+					ring_freq << GEN6_PCODE_FREQ_RING_RATIO_SHIFT |
+					gpu_freq);
+	}
+}
+
+void gen6_update_ring_freq(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (!HAS_CORE_RING_FREQ(dev))
+		return;
+
+	mutex_lock(&dev_priv->rps.hw_lock);
+	__gen6_update_ring_freq(dev);
+	mutex_unlock(&dev_priv->rps.hw_lock);
+}
+
+static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv)
+{
+	struct drm_device *dev = dev_priv->dev;
+	u32 val, rp0;
+
+	val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
+
+	switch (INTEL_INFO(dev)->eu_total) {
+	case 8:
+		/* (2 * 4) config */
+		rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT);
+		break;
+	case 12:
+		/* (2 * 6) config */
+		rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS6EU_FUSE_SHIFT);
+		break;
+	case 16:
+		/* (2 * 8) config */
+	default:
+		/* Setting (2 * 8) Min RP0 for any other combination */
+		rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS8EU_FUSE_SHIFT);
+		break;
+	}
+
+	rp0 = (rp0 & FB_GFX_FREQ_FUSE_MASK);
+
+	return rp0;
+}
+
+static int cherryview_rps_rpe_freq(struct drm_i915_private *dev_priv)
+{
+	u32 val, rpe;
+
+	val = vlv_punit_read(dev_priv, PUNIT_GPU_DUTYCYCLE_REG);
+	rpe = (val >> PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT) & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK;
+
+	return rpe;
+}
+
+static int cherryview_rps_guar_freq(struct drm_i915_private *dev_priv)
+{
+	u32 val, rp1;
+
+	val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
+	rp1 = (val & FB_GFX_FREQ_FUSE_MASK);
+
+	return rp1;
+}
+
+static int valleyview_rps_guar_freq(struct drm_i915_private *dev_priv)
+{
+	u32 val, rp1;
+
+	val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
+
+	rp1 = (val & FB_GFX_FGUARANTEED_FREQ_FUSE_MASK) >> FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT;
+
+	return rp1;
+}
+
+static int valleyview_rps_max_freq(struct drm_i915_private *dev_priv)
+{
+	u32 val, rp0;
+
+	val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
+
+	rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT;
+	/* Clamp to max */
+	rp0 = min_t(u32, rp0, 0xea);
+
+	return rp0;
+}
+
+static int valleyview_rps_rpe_freq(struct drm_i915_private *dev_priv)
+{
+	u32 val, rpe;
+
+	val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_LO);
+	rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT;
+	val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_HI);
+	rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5;
+
+	return rpe;
+}
+
+static int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
+{
+	return vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff;
+}
+
+/* Check that the pctx buffer wasn't move under us. */
+static void valleyview_check_pctx(struct drm_i915_private *dev_priv)
+{
+	unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
+
+	WARN_ON(pctx_addr != dev_priv->mm.stolen_base +
+			     dev_priv->vlv_pctx->stolen->start);
+}
+
+
+/* Check that the pcbr address is not empty. */
+static void cherryview_check_pctx(struct drm_i915_private *dev_priv)
+{
+	unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
+
+	WARN_ON((pctx_addr >> VLV_PCBR_ADDR_SHIFT) == 0);
+}
+
+static void cherryview_setup_pctx(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	unsigned long pctx_paddr, paddr;
+	struct i915_gtt *gtt = &dev_priv->gtt;
+	u32 pcbr;
+	int pctx_size = 32*1024;
+
+	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+	pcbr = I915_READ(VLV_PCBR);
+	if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) {
+		DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
+		paddr = (dev_priv->mm.stolen_base +
+			 (gtt->stolen_size - pctx_size));
+
+		pctx_paddr = (paddr & (~4095));
+		I915_WRITE(VLV_PCBR, pctx_paddr);
+	}
+
+	DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
+}
+
+static void valleyview_setup_pctx(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_gem_object *pctx;
+	unsigned long pctx_paddr;
+	u32 pcbr;
+	int pctx_size = 24*1024;
+
+	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+	pcbr = I915_READ(VLV_PCBR);
+	if (pcbr) {
+		/* BIOS set it up already, grab the pre-alloc'd space */
+		int pcbr_offset;
+
+		pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base;
+		pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv->dev,
+								      pcbr_offset,
+								      I915_GTT_OFFSET_NONE,
+								      pctx_size);
+		goto out;
+	}
+
+	DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
+
+	/*
+	 * From the Gunit register HAS:
+	 * The Gfx driver is expected to program this register and ensure
+	 * proper allocation within Gfx stolen memory.  For example, this
+	 * register should be programmed such than the PCBR range does not
+	 * overlap with other ranges, such as the frame buffer, protected
+	 * memory, or any other relevant ranges.
+	 */
+	pctx = i915_gem_object_create_stolen(dev, pctx_size);
+	if (!pctx) {
+		DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
+		return;
+	}
+
+	pctx_paddr = dev_priv->mm.stolen_base + pctx->stolen->start;
+	I915_WRITE(VLV_PCBR, pctx_paddr);
+
+out:
+	DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
+	dev_priv->vlv_pctx = pctx;
+}
+
+static void valleyview_cleanup_pctx(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (WARN_ON(!dev_priv->vlv_pctx))
+		return;
+
+	drm_gem_object_unreference(&dev_priv->vlv_pctx->base);
+	dev_priv->vlv_pctx = NULL;
+}
+
+static void valleyview_init_gt_powersave(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 val;
+
+	valleyview_setup_pctx(dev);
+
+	mutex_lock(&dev_priv->rps.hw_lock);
+
+	val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
+	switch ((val >> 6) & 3) {
+	case 0:
+	case 1:
+		dev_priv->mem_freq = 800;
+		break;
+	case 2:
+		dev_priv->mem_freq = 1066;
+		break;
+	case 3:
+		dev_priv->mem_freq = 1333;
+		break;
+	}
+	DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
+
+	dev_priv->rps.max_freq = valleyview_rps_max_freq(dev_priv);
+	dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
+	DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
+			 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq),
+			 dev_priv->rps.max_freq);
+
+	dev_priv->rps.efficient_freq = valleyview_rps_rpe_freq(dev_priv);
+	DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
+			 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
+			 dev_priv->rps.efficient_freq);
+
+	dev_priv->rps.rp1_freq = valleyview_rps_guar_freq(dev_priv);
+	DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
+			 intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
+			 dev_priv->rps.rp1_freq);
+
+	dev_priv->rps.min_freq = valleyview_rps_min_freq(dev_priv);
+	DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
+			 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
+			 dev_priv->rps.min_freq);
+
+	dev_priv->rps.idle_freq = dev_priv->rps.min_freq;
+
+	/* Preserve min/max settings in case of re-init */
+	if (dev_priv->rps.max_freq_softlimit == 0)
+		dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
+
+	if (dev_priv->rps.min_freq_softlimit == 0)
+		dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
+
+	mutex_unlock(&dev_priv->rps.hw_lock);
+}
+
+static void cherryview_init_gt_powersave(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 val;
+
+	cherryview_setup_pctx(dev);
+
+	mutex_lock(&dev_priv->rps.hw_lock);
+
+	mutex_lock(&dev_priv->sb_lock);
+	val = vlv_cck_read(dev_priv, CCK_FUSE_REG);
+	mutex_unlock(&dev_priv->sb_lock);
+
+	switch ((val >> 2) & 0x7) {
+	case 3:
+		dev_priv->mem_freq = 2000;
+		break;
+	default:
+		dev_priv->mem_freq = 1600;
+		break;
+	}
+	DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
+
+	dev_priv->rps.max_freq = cherryview_rps_max_freq(dev_priv);
+	dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
+	DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
+			 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq),
+			 dev_priv->rps.max_freq);
+
+	dev_priv->rps.efficient_freq = cherryview_rps_rpe_freq(dev_priv);
+	DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
+			 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
+			 dev_priv->rps.efficient_freq);
+
+	dev_priv->rps.rp1_freq = cherryview_rps_guar_freq(dev_priv);
+	DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n",
+			 intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
+			 dev_priv->rps.rp1_freq);
+
+	/* PUnit validated range is only [RPe, RP0] */
+	dev_priv->rps.min_freq = dev_priv->rps.efficient_freq;
+	DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
+			 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
+			 dev_priv->rps.min_freq);
+
+	WARN_ONCE((dev_priv->rps.max_freq |
+		   dev_priv->rps.efficient_freq |
+		   dev_priv->rps.rp1_freq |
+		   dev_priv->rps.min_freq) & 1,
+		  "Odd GPU freq values\n");
+
+	dev_priv->rps.idle_freq = dev_priv->rps.min_freq;
+
+	/* Preserve min/max settings in case of re-init */
+	if (dev_priv->rps.max_freq_softlimit == 0)
+		dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
+
+	if (dev_priv->rps.min_freq_softlimit == 0)
+		dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
+
+	mutex_unlock(&dev_priv->rps.hw_lock);
+}
+
+static void valleyview_cleanup_gt_powersave(struct drm_device *dev)
+{
+	valleyview_cleanup_pctx(dev);
+}
+
+static void cherryview_enable_rps(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_engine_cs *ring;
+	u32 gtfifodbg, val, rc6_mode = 0, pcbr;
+	int i;
+
+	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+
+	gtfifodbg = I915_READ(GTFIFODBG);
+	if (gtfifodbg) {
+		DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
+				 gtfifodbg);
+		I915_WRITE(GTFIFODBG, gtfifodbg);
+	}
+
+	cherryview_check_pctx(dev_priv);
+
+	/* 1a & 1b: Get forcewake during program sequence. Although the driver
+	 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
+	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+
+	/*  Disable RC states. */
+	I915_WRITE(GEN6_RC_CONTROL, 0);
+
+	/* 2a: Program RC6 thresholds.*/
+	I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
+	I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
+	I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
+
+	for_each_ring(ring, dev_priv, i)
+		I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
+	I915_WRITE(GEN6_RC_SLEEP, 0);
+
+	/* TO threshold set to 500 us ( 0x186 * 1.28 us) */
+	I915_WRITE(GEN6_RC6_THRESHOLD, 0x186);
+
+	/* allows RC6 residency counter to work */
+	I915_WRITE(VLV_COUNTER_CONTROL,
+		   _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
+				      VLV_MEDIA_RC6_COUNT_EN |
+				      VLV_RENDER_RC6_COUNT_EN));
+
+	/* For now we assume BIOS is allocating and populating the PCBR  */
+	pcbr = I915_READ(VLV_PCBR);
+
+	/* 3: Enable RC6 */
+	if ((intel_enable_rc6(dev) & INTEL_RC6_ENABLE) &&
+						(pcbr >> VLV_PCBR_ADDR_SHIFT))
+		rc6_mode = GEN7_RC_CTL_TO_MODE;
+
+	I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
+
+	/* 4 Program defaults and thresholds for RPS*/
+	I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
+	I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
+	I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
+	I915_WRITE(GEN6_RP_UP_EI, 66000);
+	I915_WRITE(GEN6_RP_DOWN_EI, 350000);
+
+	I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
+
+	/* 5: Enable RPS */
+	I915_WRITE(GEN6_RP_CONTROL,
+		   GEN6_RP_MEDIA_HW_NORMAL_MODE |
+		   GEN6_RP_MEDIA_IS_GFX |
+		   GEN6_RP_ENABLE |
+		   GEN6_RP_UP_BUSY_AVG |
+		   GEN6_RP_DOWN_IDLE_AVG);
+
+	/* Setting Fixed Bias */
+	val = VLV_OVERRIDE_EN |
+		  VLV_SOC_TDP_EN |
+		  CHV_BIAS_CPU_50_SOC_50;
+	vlv_punit_write(dev_priv, VLV_TURBO_SOC_OVERRIDE, val);
+
+	val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
+
+	/* RPS code assumes GPLL is used */
+	WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
+
+	DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
+	DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
+
+	dev_priv->rps.cur_freq = (val >> 8) & 0xff;
+	DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
+			 intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
+			 dev_priv->rps.cur_freq);
+
+	DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
+			 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
+			 dev_priv->rps.efficient_freq);
+
+	valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
+
+	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+}
+
+static void valleyview_enable_rps(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_engine_cs *ring;
+	u32 gtfifodbg, val, rc6_mode = 0;
+	int i;
+
+	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+
+	valleyview_check_pctx(dev_priv);
+
+	if ((gtfifodbg = I915_READ(GTFIFODBG))) {
+		DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
+				 gtfifodbg);
+		I915_WRITE(GTFIFODBG, gtfifodbg);
+	}
+
+	/* If VLV, Forcewake all wells, else re-direct to regular path */
+	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+
+	/*  Disable RC states. */
+	I915_WRITE(GEN6_RC_CONTROL, 0);
+
+	I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
+	I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
+	I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
+	I915_WRITE(GEN6_RP_UP_EI, 66000);
+	I915_WRITE(GEN6_RP_DOWN_EI, 350000);
+
+	I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
+
+	I915_WRITE(GEN6_RP_CONTROL,
+		   GEN6_RP_MEDIA_TURBO |
+		   GEN6_RP_MEDIA_HW_NORMAL_MODE |
+		   GEN6_RP_MEDIA_IS_GFX |
+		   GEN6_RP_ENABLE |
+		   GEN6_RP_UP_BUSY_AVG |
+		   GEN6_RP_DOWN_IDLE_CONT);
+
+	I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 0x00280000);
+	I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
+	I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
+
+	for_each_ring(ring, dev_priv, i)
+		I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
+
+	I915_WRITE(GEN6_RC6_THRESHOLD, 0x557);
+
+	/* allows RC6 residency counter to work */
+	I915_WRITE(VLV_COUNTER_CONTROL,
+		   _MASKED_BIT_ENABLE(VLV_MEDIA_RC0_COUNT_EN |
+				      VLV_RENDER_RC0_COUNT_EN |
+				      VLV_MEDIA_RC6_COUNT_EN |
+				      VLV_RENDER_RC6_COUNT_EN));
+
+	if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
+		rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL;
+
+	intel_print_rc6_info(dev, rc6_mode);
+
+	I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
+
+	/* Setting Fixed Bias */
+	val = VLV_OVERRIDE_EN |
+		  VLV_SOC_TDP_EN |
+		  VLV_BIAS_CPU_125_SOC_875;
+	vlv_punit_write(dev_priv, VLV_TURBO_SOC_OVERRIDE, val);
+
+	val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
+
+	/* RPS code assumes GPLL is used */
+	WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
+
+	DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
+	DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
+
+	dev_priv->rps.cur_freq = (val >> 8) & 0xff;
+	DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
+			 intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
+			 dev_priv->rps.cur_freq);
+
+	DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
+			 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
+			 dev_priv->rps.efficient_freq);
+
+	valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
+
+	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+}
+
+static unsigned long intel_pxfreq(u32 vidfreq)
+{
+	unsigned long freq;
+	int div = (vidfreq & 0x3f0000) >> 16;
+	int post = (vidfreq & 0x3000) >> 12;
+	int pre = (vidfreq & 0x7);
+
+	if (!pre)
+		return 0;
+
+	freq = ((div * 133333) / ((1<<post) * pre));
+
+	return freq;
+}
+
+static const struct cparams {
+	u16 i;
+	u16 t;
+	u16 m;
+	u16 c;
+} cparams[] = {
+	{ 1, 1333, 301, 28664 },
+	{ 1, 1066, 294, 24460 },
+	{ 1, 800, 294, 25192 },
+	{ 0, 1333, 276, 27605 },
+	{ 0, 1066, 276, 27605 },
+	{ 0, 800, 231, 23784 },
+};
+
+static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv)
+{
+	u64 total_count, diff, ret;
+	u32 count1, count2, count3, m = 0, c = 0;
+	unsigned long now = jiffies_to_msecs(jiffies), diff1;
+	int i;
+
+	assert_spin_locked(&mchdev_lock);
+
+	diff1 = now - dev_priv->ips.last_time1;
+
+	/* Prevent division-by-zero if we are asking too fast.
+	 * Also, we don't get interesting results if we are polling
+	 * faster than once in 10ms, so just return the saved value
+	 * in such cases.
+	 */
+	if (diff1 <= 10)
+		return dev_priv->ips.chipset_power;
+
+	count1 = I915_READ(DMIEC);
+	count2 = I915_READ(DDREC);
+	count3 = I915_READ(CSIEC);
+
+	total_count = count1 + count2 + count3;
+
+	/* FIXME: handle per-counter overflow */
+	if (total_count < dev_priv->ips.last_count1) {
+		diff = ~0UL - dev_priv->ips.last_count1;
+		diff += total_count;
+	} else {
+		diff = total_count - dev_priv->ips.last_count1;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(cparams); i++) {
+		if (cparams[i].i == dev_priv->ips.c_m &&
+		    cparams[i].t == dev_priv->ips.r_t) {
+			m = cparams[i].m;
+			c = cparams[i].c;
+			break;
+		}
+	}
+
+	diff = div_u64(diff, diff1);
+	ret = ((m * diff) + c);
+	ret = div_u64(ret, 10);
+
+	dev_priv->ips.last_count1 = total_count;
+	dev_priv->ips.last_time1 = now;
+
+	dev_priv->ips.chipset_power = ret;
+
+	return ret;
+}
+
+unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
+{
+	struct drm_device *dev = dev_priv->dev;
+	unsigned long val;
+
+	if (INTEL_INFO(dev)->gen != 5)
+		return 0;
+
+	spin_lock_irq(&mchdev_lock);
+
+	val = __i915_chipset_val(dev_priv);
+
+	spin_unlock_irq(&mchdev_lock);
+
+	return val;
+}
+
+unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
+{
+	unsigned long m, x, b;
+	u32 tsfs;
+
+	tsfs = I915_READ(TSFS);
+
+	m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
+	x = I915_READ8(TR1);
+
+	b = tsfs & TSFS_INTR_MASK;
+
+	return ((m * x) / 127) - b;
+}
+
+static int _pxvid_to_vd(u8 pxvid)
+{
+	if (pxvid == 0)
+		return 0;
+
+	if (pxvid >= 8 && pxvid < 31)
+		pxvid = 31;
+
+	return (pxvid + 2) * 125;
+}
+
+static u32 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
+{
+	struct drm_device *dev = dev_priv->dev;
+	const int vd = _pxvid_to_vd(pxvid);
+	const int vm = vd - 1125;
+
+	if (INTEL_INFO(dev)->is_mobile)
+		return vm > 0 ? vm : 0;
+
+	return vd;
+}
+
+static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
+{
+	u64 now, diff, diffms;
+	u32 count;
+
+	assert_spin_locked(&mchdev_lock);
+
+	now = ktime_get_raw_ns();
+	diffms = now - dev_priv->ips.last_time2;
+	do_div(diffms, NSEC_PER_MSEC);
+
+	/* Don't divide by 0 */
+	if (!diffms)
+		return;
+
+	count = I915_READ(GFXEC);
+
+	if (count < dev_priv->ips.last_count2) {
+		diff = ~0UL - dev_priv->ips.last_count2;
+		diff += count;
+	} else {
+		diff = count - dev_priv->ips.last_count2;
+	}
+
+	dev_priv->ips.last_count2 = count;
+	dev_priv->ips.last_time2 = now;
+
+	/* More magic constants... */
+	diff = diff * 1181;
+	diff = div_u64(diff, diffms * 10);
+	dev_priv->ips.gfx_power = diff;
+}
+
+void i915_update_gfx_val(struct drm_i915_private *dev_priv)
+{
+	struct drm_device *dev = dev_priv->dev;
+
+	if (INTEL_INFO(dev)->gen != 5)
+		return;
+
+	spin_lock_irq(&mchdev_lock);
+
+	__i915_update_gfx_val(dev_priv);
+
+	spin_unlock_irq(&mchdev_lock);
+}
+
+static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
+{
+	unsigned long t, corr, state1, corr2, state2;
+	u32 pxvid, ext_v;
+
+	assert_spin_locked(&mchdev_lock);
+
+	pxvid = I915_READ(PXVFREQ(dev_priv->rps.cur_freq));
+	pxvid = (pxvid >> 24) & 0x7f;
+	ext_v = pvid_to_extvid(dev_priv, pxvid);
+
+	state1 = ext_v;
+
+	t = i915_mch_val(dev_priv);
+
+	/* Revel in the empirically derived constants */
+
+	/* Correction factor in 1/100000 units */
+	if (t > 80)
+		corr = ((t * 2349) + 135940);
+	else if (t >= 50)
+		corr = ((t * 964) + 29317);
+	else /* < 50 */
+		corr = ((t * 301) + 1004);
+
+	corr = corr * ((150142 * state1) / 10000 - 78642);
+	corr /= 100000;
+	corr2 = (corr * dev_priv->ips.corr);
+
+	state2 = (corr2 * state1) / 10000;
+	state2 /= 100; /* convert to mW */
+
+	__i915_update_gfx_val(dev_priv);
+
+	return dev_priv->ips.gfx_power + state2;
+}
+
+unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
+{
+	struct drm_device *dev = dev_priv->dev;
+	unsigned long val;
+
+	if (INTEL_INFO(dev)->gen != 5)
+		return 0;
+
+	spin_lock_irq(&mchdev_lock);
+
+	val = __i915_gfx_val(dev_priv);
+
+	spin_unlock_irq(&mchdev_lock);
+
+	return val;
+}
+
+/**
+ * i915_read_mch_val - return value for IPS use
+ *
+ * Calculate and return a value for the IPS driver to use when deciding whether
+ * we have thermal and power headroom to increase CPU or GPU power budget.
+ */
+unsigned long i915_read_mch_val(void)
+{
+	struct drm_i915_private *dev_priv;
+	unsigned long chipset_val, graphics_val, ret = 0;
+
+	spin_lock_irq(&mchdev_lock);
+	if (!i915_mch_dev)
+		goto out_unlock;
+	dev_priv = i915_mch_dev;
+
+	chipset_val = __i915_chipset_val(dev_priv);
+	graphics_val = __i915_gfx_val(dev_priv);
+
+	ret = chipset_val + graphics_val;
+
+out_unlock:
+	spin_unlock_irq(&mchdev_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(i915_read_mch_val);
+
+/**
+ * i915_gpu_raise - raise GPU frequency limit
+ *
+ * Raise the limit; IPS indicates we have thermal headroom.
+ */
+bool i915_gpu_raise(void)
+{
+	struct drm_i915_private *dev_priv;
+	bool ret = true;
+
+	spin_lock_irq(&mchdev_lock);
+	if (!i915_mch_dev) {
+		ret = false;
+		goto out_unlock;
+	}
+	dev_priv = i915_mch_dev;
+
+	if (dev_priv->ips.max_delay > dev_priv->ips.fmax)
+		dev_priv->ips.max_delay--;
+
+out_unlock:
+	spin_unlock_irq(&mchdev_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(i915_gpu_raise);
+
+/**
+ * i915_gpu_lower - lower GPU frequency limit
+ *
+ * IPS indicates we're close to a thermal limit, so throttle back the GPU
+ * frequency maximum.
+ */
+bool i915_gpu_lower(void)
+{
+	struct drm_i915_private *dev_priv;
+	bool ret = true;
+
+	spin_lock_irq(&mchdev_lock);
+	if (!i915_mch_dev) {
+		ret = false;
+		goto out_unlock;
+	}
+	dev_priv = i915_mch_dev;
+
+	if (dev_priv->ips.max_delay < dev_priv->ips.min_delay)
+		dev_priv->ips.max_delay++;
+
+out_unlock:
+	spin_unlock_irq(&mchdev_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(i915_gpu_lower);
+
+/**
+ * i915_gpu_busy - indicate GPU business to IPS
+ *
+ * Tell the IPS driver whether or not the GPU is busy.
+ */
+bool i915_gpu_busy(void)
+{
+	struct drm_i915_private *dev_priv;
+	struct intel_engine_cs *ring;
+	bool ret = false;
+	int i;
+
+	spin_lock_irq(&mchdev_lock);
+	if (!i915_mch_dev)
+		goto out_unlock;
+	dev_priv = i915_mch_dev;
+
+	for_each_ring(ring, dev_priv, i)
+		ret |= !list_empty(&ring->request_list);
+
+out_unlock:
+	spin_unlock_irq(&mchdev_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(i915_gpu_busy);
+
+/**
+ * i915_gpu_turbo_disable - disable graphics turbo
+ *
+ * Disable graphics turbo by resetting the max frequency and setting the
+ * current frequency to the default.
+ */
+bool i915_gpu_turbo_disable(void)
+{
+	struct drm_i915_private *dev_priv;
+	bool ret = true;
+
+	spin_lock_irq(&mchdev_lock);
+	if (!i915_mch_dev) {
+		ret = false;
+		goto out_unlock;
+	}
+	dev_priv = i915_mch_dev;
+
+	dev_priv->ips.max_delay = dev_priv->ips.fstart;
+
+	if (!ironlake_set_drps(dev_priv->dev, dev_priv->ips.fstart))
+		ret = false;
+
+out_unlock:
+	spin_unlock_irq(&mchdev_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
+
+/**
+ * Tells the intel_ips driver that the i915 driver is now loaded, if
+ * IPS got loaded first.
+ *
+ * This awkward dance is so that neither module has to depend on the
+ * other in order for IPS to do the appropriate communication of
+ * GPU turbo limits to i915.
+ */
+static void
+ips_ping_for_i915_load(void)
+{
+	void (*link)(void);
+
+	link = symbol_get(ips_link_to_i915_driver);
+	if (link) {
+		link();
+		symbol_put(ips_link_to_i915_driver);
+	}
+}
+
+void intel_gpu_ips_init(struct drm_i915_private *dev_priv)
+{
+	/* We only register the i915 ips part with intel-ips once everything is
+	 * set up, to avoid intel-ips sneaking in and reading bogus values. */
+	spin_lock_irq(&mchdev_lock);
+	i915_mch_dev = dev_priv;
+	spin_unlock_irq(&mchdev_lock);
+
+	ips_ping_for_i915_load();
+}
+
+void intel_gpu_ips_teardown(void)
+{
+	spin_lock_irq(&mchdev_lock);
+	i915_mch_dev = NULL;
+	spin_unlock_irq(&mchdev_lock);
+}
+
+static void intel_init_emon(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 lcfuse;
+	u8 pxw[16];
+	int i;
+
+	/* Disable to program */
+	I915_WRITE(ECR, 0);
+	POSTING_READ(ECR);
+
+	/* Program energy weights for various events */
+	I915_WRITE(SDEW, 0x15040d00);
+	I915_WRITE(CSIEW0, 0x007f0000);
+	I915_WRITE(CSIEW1, 0x1e220004);
+	I915_WRITE(CSIEW2, 0x04000004);
+
+	for (i = 0; i < 5; i++)
+		I915_WRITE(PEW(i), 0);
+	for (i = 0; i < 3; i++)
+		I915_WRITE(DEW(i), 0);
+
+	/* Program P-state weights to account for frequency power adjustment */
+	for (i = 0; i < 16; i++) {
+		u32 pxvidfreq = I915_READ(PXVFREQ(i));
+		unsigned long freq = intel_pxfreq(pxvidfreq);
+		unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
+			PXVFREQ_PX_SHIFT;
+		unsigned long val;
+
+		val = vid * vid;
+		val *= (freq / 1000);
+		val *= 255;
+		val /= (127*127*900);
+		if (val > 0xff)
+			DRM_ERROR("bad pxval: %ld\n", val);
+		pxw[i] = val;
+	}
+	/* Render standby states get 0 weight */
+	pxw[14] = 0;
+	pxw[15] = 0;
+
+	for (i = 0; i < 4; i++) {
+		u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
+			(pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
+		I915_WRITE(PXW(i), val);
+	}
+
+	/* Adjust magic regs to magic values (more experimental results) */
+	I915_WRITE(OGW0, 0);
+	I915_WRITE(OGW1, 0);
+	I915_WRITE(EG0, 0x00007f00);
+	I915_WRITE(EG1, 0x0000000e);
+	I915_WRITE(EG2, 0x000e0000);
+	I915_WRITE(EG3, 0x68000300);
+	I915_WRITE(EG4, 0x42000000);
+	I915_WRITE(EG5, 0x00140031);
+	I915_WRITE(EG6, 0);
+	I915_WRITE(EG7, 0);
+
+	for (i = 0; i < 8; i++)
+		I915_WRITE(PXWL(i), 0);
+
+	/* Enable PMON + select events */
+	I915_WRITE(ECR, 0x80000019);
+
+	lcfuse = I915_READ(LCFUSE02);
+
+	dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
+}
+
+void intel_init_gt_powersave(struct drm_device *dev)
+{
+	i915.enable_rc6 = sanitize_rc6_option(dev, i915.enable_rc6);
+
+	if (IS_CHERRYVIEW(dev))
+		cherryview_init_gt_powersave(dev);
+	else if (IS_VALLEYVIEW(dev))
+		valleyview_init_gt_powersave(dev);
+}
+
+void intel_cleanup_gt_powersave(struct drm_device *dev)
+{
+	if (IS_CHERRYVIEW(dev))
+		return;
+	else if (IS_VALLEYVIEW(dev))
+		valleyview_cleanup_gt_powersave(dev);
+}
+
+static void gen6_suspend_rps(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
+	gen6_disable_rps_interrupts(dev);
+}
+
+/**
+ * intel_suspend_gt_powersave - suspend PM work and helper threads
+ * @dev: drm device
+ *
+ * We don't want to disable RC6 or other features here, we just want
+ * to make sure any work we've queued has finished and won't bother
+ * us while we're suspended.
+ */
+void intel_suspend_gt_powersave(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (INTEL_INFO(dev)->gen < 6)
+		return;
+
+	gen6_suspend_rps(dev);
+
+	/* Force GPU to min freq during suspend */
+	gen6_rps_idle(dev_priv);
+}
+
+void intel_disable_gt_powersave(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (IS_IRONLAKE_M(dev)) {
+		ironlake_disable_drps(dev);
+	} else if (INTEL_INFO(dev)->gen >= 6) {
+		intel_suspend_gt_powersave(dev);
+
+		mutex_lock(&dev_priv->rps.hw_lock);
+		if (INTEL_INFO(dev)->gen >= 9)
+			gen9_disable_rps(dev);
+		else if (IS_CHERRYVIEW(dev))
+			cherryview_disable_rps(dev);
+		else if (IS_VALLEYVIEW(dev))
+			valleyview_disable_rps(dev);
+		else
+			gen6_disable_rps(dev);
+
+		dev_priv->rps.enabled = false;
+		mutex_unlock(&dev_priv->rps.hw_lock);
+	}
+}
+
+static void intel_gen6_powersave_work(struct work_struct *work)
+{
+	struct drm_i915_private *dev_priv =
+		container_of(work, struct drm_i915_private,
+			     rps.delayed_resume_work.work);
+	struct drm_device *dev = dev_priv->dev;
+
+	mutex_lock(&dev_priv->rps.hw_lock);
+
+	gen6_reset_rps_interrupts(dev);
+
+	if (IS_CHERRYVIEW(dev)) {
+		cherryview_enable_rps(dev);
+	} else if (IS_VALLEYVIEW(dev)) {
+		valleyview_enable_rps(dev);
+	} else if (INTEL_INFO(dev)->gen >= 9) {
+		gen9_enable_rc6(dev);
+		gen9_enable_rps(dev);
+		if (IS_SKYLAKE(dev))
+			__gen6_update_ring_freq(dev);
+	} else if (IS_BROADWELL(dev)) {
+		gen8_enable_rps(dev);
+		__gen6_update_ring_freq(dev);
+	} else {
+		gen6_enable_rps(dev);
+		__gen6_update_ring_freq(dev);
+	}
+
+	WARN_ON(dev_priv->rps.max_freq < dev_priv->rps.min_freq);
+	WARN_ON(dev_priv->rps.idle_freq > dev_priv->rps.max_freq);
+
+	WARN_ON(dev_priv->rps.efficient_freq < dev_priv->rps.min_freq);
+	WARN_ON(dev_priv->rps.efficient_freq > dev_priv->rps.max_freq);
+
+	dev_priv->rps.enabled = true;
+
+	gen6_enable_rps_interrupts(dev);
+
+	mutex_unlock(&dev_priv->rps.hw_lock);
+
+	intel_runtime_pm_put(dev_priv);
+}
+
+void intel_enable_gt_powersave(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	/* Powersaving is controlled by the host when inside a VM */
+	if (intel_vgpu_active(dev))
+		return;
+
+	if (IS_IRONLAKE_M(dev)) {
+		mutex_lock(&dev->struct_mutex);
+		ironlake_enable_drps(dev);
+		intel_init_emon(dev);
+		mutex_unlock(&dev->struct_mutex);
+	} else if (INTEL_INFO(dev)->gen >= 6) {
+		/*
+		 * PCU communication is slow and this doesn't need to be
+		 * done at any specific time, so do this out of our fast path
+		 * to make resume and init faster.
+		 *
+		 * We depend on the HW RC6 power context save/restore
+		 * mechanism when entering D3 through runtime PM suspend. So
+		 * disable RPM until RPS/RC6 is properly setup. We can only
+		 * get here via the driver load/system resume/runtime resume
+		 * paths, so the _noresume version is enough (and in case of
+		 * runtime resume it's necessary).
+		 */
+		if (schedule_delayed_work(&dev_priv->rps.delayed_resume_work,
+					   round_jiffies_up_relative(HZ)))
+			intel_runtime_pm_get_noresume(dev_priv);
+	}
+}
+
+void intel_reset_gt_powersave(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (INTEL_INFO(dev)->gen < 6)
+		return;
+
+	gen6_suspend_rps(dev);
+	dev_priv->rps.enabled = false;
+}
+
+static void ibx_init_clock_gating(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	/*
+	 * On Ibex Peak and Cougar Point, we need to disable clock
+	 * gating for the panel power sequencer or it will fail to
+	 * start up when no ports are active.
+	 */
+	I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
+}
+
+static void g4x_disable_trickle_feed(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum pipe pipe;
+
+	for_each_pipe(dev_priv, pipe) {
+		I915_WRITE(DSPCNTR(pipe),
+			   I915_READ(DSPCNTR(pipe)) |
+			   DISPPLANE_TRICKLE_FEED_DISABLE);
+
+		I915_WRITE(DSPSURF(pipe), I915_READ(DSPSURF(pipe)));
+		POSTING_READ(DSPSURF(pipe));
+	}
+}
+
+static void ilk_init_lp_watermarks(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN);
+	I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN);
+	I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN);
+
+	/*
+	 * Don't touch WM1S_LP_EN here.
+	 * Doing so could cause underruns.
+	 */
+}
+
+static void ironlake_init_clock_gating(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
+
+	/*
+	 * Required for FBC
+	 * WaFbcDisableDpfcClockGating:ilk
+	 */
+	dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE |
+		   ILK_DPFCUNIT_CLOCK_GATE_DISABLE |
+		   ILK_DPFDUNIT_CLOCK_GATE_ENABLE;
+
+	I915_WRITE(PCH_3DCGDIS0,
+		   MARIUNIT_CLOCK_GATE_DISABLE |
+		   SVSMUNIT_CLOCK_GATE_DISABLE);
+	I915_WRITE(PCH_3DCGDIS1,
+		   VFMUNIT_CLOCK_GATE_DISABLE);
+
+	/*
+	 * According to the spec the following bits should be set in
+	 * order to enable memory self-refresh
+	 * The bit 22/21 of 0x42004
+	 * The bit 5 of 0x42020
+	 * The bit 15 of 0x45000
+	 */
+	I915_WRITE(ILK_DISPLAY_CHICKEN2,
+		   (I915_READ(ILK_DISPLAY_CHICKEN2) |
+		    ILK_DPARB_GATE | ILK_VSDPFD_FULL));
+	dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE;
+	I915_WRITE(DISP_ARB_CTL,
+		   (I915_READ(DISP_ARB_CTL) |
+		    DISP_FBC_WM_DIS));
+
+	ilk_init_lp_watermarks(dev);
+
+	/*
+	 * Based on the document from hardware guys the following bits
+	 * should be set unconditionally in order to enable FBC.
+	 * The bit 22 of 0x42000
+	 * The bit 22 of 0x42004
+	 * The bit 7,8,9 of 0x42020.
+	 */
+	if (IS_IRONLAKE_M(dev)) {
+		/* WaFbcAsynchFlipDisableFbcQueue:ilk */
+		I915_WRITE(ILK_DISPLAY_CHICKEN1,
+			   I915_READ(ILK_DISPLAY_CHICKEN1) |
+			   ILK_FBCQ_DIS);
+		I915_WRITE(ILK_DISPLAY_CHICKEN2,
+			   I915_READ(ILK_DISPLAY_CHICKEN2) |
+			   ILK_DPARB_GATE);
+	}
+
+	I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
+
+	I915_WRITE(ILK_DISPLAY_CHICKEN2,
+		   I915_READ(ILK_DISPLAY_CHICKEN2) |
+		   ILK_ELPIN_409_SELECT);
+	I915_WRITE(_3D_CHICKEN2,
+		   _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
+		   _3D_CHICKEN2_WM_READ_PIPELINED);
+
+	/* WaDisableRenderCachePipelinedFlush:ilk */
+	I915_WRITE(CACHE_MODE_0,
+		   _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
+
+	/* WaDisable_RenderCache_OperationalFlush:ilk */
+	I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
+
+	g4x_disable_trickle_feed(dev);
+
+	ibx_init_clock_gating(dev);
+}
+
+static void cpt_init_clock_gating(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int pipe;
+	uint32_t val;
+
+	/*
+	 * On Ibex Peak and Cougar Point, we need to disable clock
+	 * gating for the panel power sequencer or it will fail to
+	 * start up when no ports are active.
+	 */
+	I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE |
+		   PCH_DPLUNIT_CLOCK_GATE_DISABLE |
+		   PCH_CPUNIT_CLOCK_GATE_DISABLE);
+	I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
+		   DPLS_EDP_PPS_FIX_DIS);
+	/* The below fixes the weird display corruption, a few pixels shifted
+	 * downward, on (only) LVDS of some HP laptops with IVY.
+	 */
+	for_each_pipe(dev_priv, pipe) {
+		val = I915_READ(TRANS_CHICKEN2(pipe));
+		val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
+		val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
+		if (dev_priv->vbt.fdi_rx_polarity_inverted)
+			val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
+		val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
+		val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER;
+		val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH;
+		I915_WRITE(TRANS_CHICKEN2(pipe), val);
+	}
+	/* WADP0ClockGatingDisable */
+	for_each_pipe(dev_priv, pipe) {
+		I915_WRITE(TRANS_CHICKEN1(pipe),
+			   TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
+	}
+}
+
+static void gen6_check_mch_setup(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t tmp;
+
+	tmp = I915_READ(MCH_SSKPD);
+	if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL)
+		DRM_DEBUG_KMS("Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n",
+			      tmp);
+}
+
+static void gen6_init_clock_gating(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
+
+	I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
+
+	I915_WRITE(ILK_DISPLAY_CHICKEN2,
+		   I915_READ(ILK_DISPLAY_CHICKEN2) |
+		   ILK_ELPIN_409_SELECT);
+
+	/* WaDisableHiZPlanesWhenMSAAEnabled:snb */
+	I915_WRITE(_3D_CHICKEN,
+		   _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB));
+
+	/* WaDisable_RenderCache_OperationalFlush:snb */
+	I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
+
+	/*
+	 * BSpec recoomends 8x4 when MSAA is used,
+	 * however in practice 16x4 seems fastest.
+	 *
+	 * Note that PS/WM thread counts depend on the WIZ hashing
+	 * disable bit, which we don't touch here, but it's good
+	 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
+	 */
+	I915_WRITE(GEN6_GT_MODE,
+		   _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
+
+	ilk_init_lp_watermarks(dev);
+
+	I915_WRITE(CACHE_MODE_0,
+		   _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
+
+	I915_WRITE(GEN6_UCGCTL1,
+		   I915_READ(GEN6_UCGCTL1) |
+		   GEN6_BLBUNIT_CLOCK_GATE_DISABLE |
+		   GEN6_CSUNIT_CLOCK_GATE_DISABLE);
+
+	/* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
+	 * gating disable must be set.  Failure to set it results in
+	 * flickering pixels due to Z write ordering failures after
+	 * some amount of runtime in the Mesa "fire" demo, and Unigine
+	 * Sanctuary and Tropics, and apparently anything else with
+	 * alpha test or pixel discard.
+	 *
+	 * According to the spec, bit 11 (RCCUNIT) must also be set,
+	 * but we didn't debug actual testcases to find it out.
+	 *
+	 * WaDisableRCCUnitClockGating:snb
+	 * WaDisableRCPBUnitClockGating:snb
+	 */
+	I915_WRITE(GEN6_UCGCTL2,
+		   GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
+		   GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
+
+	/* WaStripsFansDisableFastClipPerformanceFix:snb */
+	I915_WRITE(_3D_CHICKEN3,
+		   _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL));
+
+	/*
+	 * Bspec says:
+	 * "This bit must be set if 3DSTATE_CLIP clip mode is set to normal and
+	 * 3DSTATE_SF number of SF output attributes is more than 16."
+	 */
+	I915_WRITE(_3D_CHICKEN3,
+		   _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH));
+
+	/*
+	 * According to the spec the following bits should be
+	 * set in order to enable memory self-refresh and fbc:
+	 * The bit21 and bit22 of 0x42000
+	 * The bit21 and bit22 of 0x42004
+	 * The bit5 and bit7 of 0x42020
+	 * The bit14 of 0x70180
+	 * The bit14 of 0x71180
+	 *
+	 * WaFbcAsynchFlipDisableFbcQueue:snb
+	 */
+	I915_WRITE(ILK_DISPLAY_CHICKEN1,
+		   I915_READ(ILK_DISPLAY_CHICKEN1) |
+		   ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
+	I915_WRITE(ILK_DISPLAY_CHICKEN2,
+		   I915_READ(ILK_DISPLAY_CHICKEN2) |
+		   ILK_DPARB_GATE | ILK_VSDPFD_FULL);
+	I915_WRITE(ILK_DSPCLK_GATE_D,
+		   I915_READ(ILK_DSPCLK_GATE_D) |
+		   ILK_DPARBUNIT_CLOCK_GATE_ENABLE  |
+		   ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
+
+	g4x_disable_trickle_feed(dev);
+
+	cpt_init_clock_gating(dev);
+
+	gen6_check_mch_setup(dev);
+}
+
+static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
+{
+	uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE);
+
+	/*
+	 * WaVSThreadDispatchOverride:ivb,vlv
+	 *
+	 * This actually overrides the dispatch
+	 * mode for all thread types.
+	 */
+	reg &= ~GEN7_FF_SCHED_MASK;
+	reg |= GEN7_FF_TS_SCHED_HW;
+	reg |= GEN7_FF_VS_SCHED_HW;
+	reg |= GEN7_FF_DS_SCHED_HW;
+
+	I915_WRITE(GEN7_FF_THREAD_MODE, reg);
+}
+
+static void lpt_init_clock_gating(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	/*
+	 * TODO: this bit should only be enabled when really needed, then
+	 * disabled when not needed anymore in order to save power.
+	 */
+	if (HAS_PCH_LPT_LP(dev))
+		I915_WRITE(SOUTH_DSPCLK_GATE_D,
+			   I915_READ(SOUTH_DSPCLK_GATE_D) |
+			   PCH_LP_PARTITION_LEVEL_DISABLE);
+
+	/* WADPOClockGatingDisable:hsw */
+	I915_WRITE(TRANS_CHICKEN1(PIPE_A),
+		   I915_READ(TRANS_CHICKEN1(PIPE_A)) |
+		   TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
+}
+
+static void lpt_suspend_hw(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (HAS_PCH_LPT_LP(dev)) {
+		uint32_t val = I915_READ(SOUTH_DSPCLK_GATE_D);
+
+		val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
+		I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
+	}
+}
+
+static void broadwell_init_clock_gating(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum pipe pipe;
+	uint32_t misccpctl;
+
+	ilk_init_lp_watermarks(dev);
+
+	/* WaSwitchSolVfFArbitrationPriority:bdw */
+	I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
+
+	/* WaPsrDPAMaskVBlankInSRD:bdw */
+	I915_WRITE(CHICKEN_PAR1_1,
+		   I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD);
+
+	/* WaPsrDPRSUnmaskVBlankInSRD:bdw */
+	for_each_pipe(dev_priv, pipe) {
+		I915_WRITE(CHICKEN_PIPESL_1(pipe),
+			   I915_READ(CHICKEN_PIPESL_1(pipe)) |
+			   BDW_DPRS_MASK_VBLANK_SRD);
+	}
+
+	/* WaVSRefCountFullforceMissDisable:bdw */
+	/* WaDSRefCountFullforceMissDisable:bdw */
+	I915_WRITE(GEN7_FF_THREAD_MODE,
+		   I915_READ(GEN7_FF_THREAD_MODE) &
+		   ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
+
+	I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
+		   _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
+
+	/* WaDisableSDEUnitClockGating:bdw */
+	I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
+		   GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
+
+	/*
+	 * WaProgramL3SqcReg1Default:bdw
+	 * WaTempDisableDOPClkGating:bdw
+	 */
+	misccpctl = I915_READ(GEN7_MISCCPCTL);
+	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
+	I915_WRITE(GEN8_L3SQCREG1, BDW_WA_L3SQCREG1_DEFAULT);
+	/*
+	 * Wait at least 100 clocks before re-enabling clock gating. See
+	 * the definition of L3SQCREG1 in BSpec.
+	 */
+	POSTING_READ(GEN8_L3SQCREG1);
+	udelay(1);
+	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
+
+	/*
+	 * WaGttCachingOffByDefault:bdw
+	 * GTT cache may not work with big pages, so if those
+	 * are ever enabled GTT cache may need to be disabled.
+	 */
+	I915_WRITE(HSW_GTT_CACHE_EN, GTT_CACHE_EN_ALL);
+
+	lpt_init_clock_gating(dev);
+}
+
+static void haswell_init_clock_gating(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	ilk_init_lp_watermarks(dev);
+
+	/* L3 caching of data atomics doesn't work -- disable it. */
+	I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE);
+	I915_WRITE(HSW_ROW_CHICKEN3,
+		   _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE));
+
+	/* This is required by WaCatErrorRejectionIssue:hsw */
+	I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
+			I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
+			GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
+
+	/* WaVSRefCountFullforceMissDisable:hsw */
+	I915_WRITE(GEN7_FF_THREAD_MODE,
+		   I915_READ(GEN7_FF_THREAD_MODE) & ~GEN7_FF_VS_REF_CNT_FFME);
+
+	/* WaDisable_RenderCache_OperationalFlush:hsw */
+	I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
+
+	/* enable HiZ Raw Stall Optimization */
+	I915_WRITE(CACHE_MODE_0_GEN7,
+		   _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
+
+	/* WaDisable4x2SubspanOptimization:hsw */
+	I915_WRITE(CACHE_MODE_1,
+		   _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
+
+	/*
+	 * BSpec recommends 8x4 when MSAA is used,
+	 * however in practice 16x4 seems fastest.
+	 *
+	 * Note that PS/WM thread counts depend on the WIZ hashing
+	 * disable bit, which we don't touch here, but it's good
+	 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
+	 */
+	I915_WRITE(GEN7_GT_MODE,
+		   _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
+
+	/* WaSampleCChickenBitEnable:hsw */
+	I915_WRITE(HALF_SLICE_CHICKEN3,
+		   _MASKED_BIT_ENABLE(HSW_SAMPLE_C_PERFORMANCE));
+
+	/* WaSwitchSolVfFArbitrationPriority:hsw */
+	I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
+
+	/* WaRsPkgCStateDisplayPMReq:hsw */
+	I915_WRITE(CHICKEN_PAR1_1,
+		   I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
+
+	lpt_init_clock_gating(dev);
+}
+
+static void ivybridge_init_clock_gating(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t snpcr;
+
+	ilk_init_lp_watermarks(dev);
+
+	I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
+
+	/* WaDisableEarlyCull:ivb */
+	I915_WRITE(_3D_CHICKEN3,
+		   _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
+
+	/* WaDisableBackToBackFlipFix:ivb */
+	I915_WRITE(IVB_CHICKEN3,
+		   CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
+		   CHICKEN3_DGMG_DONE_FIX_DISABLE);
+
+	/* WaDisablePSDDualDispatchEnable:ivb */
+	if (IS_IVB_GT1(dev))
+		I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
+			   _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
+
+	/* WaDisable_RenderCache_OperationalFlush:ivb */
+	I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
+
+	/* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
+	I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
+		   GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
+
+	/* WaApplyL3ControlAndL3ChickenMode:ivb */
+	I915_WRITE(GEN7_L3CNTLREG1,
+			GEN7_WA_FOR_GEN7_L3_CONTROL);
+	I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
+		   GEN7_WA_L3_CHICKEN_MODE);
+	if (IS_IVB_GT1(dev))
+		I915_WRITE(GEN7_ROW_CHICKEN2,
+			   _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
+	else {
+		/* must write both registers */
+		I915_WRITE(GEN7_ROW_CHICKEN2,
+			   _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
+		I915_WRITE(GEN7_ROW_CHICKEN2_GT2,
+			   _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
+	}
+
+	/* WaForceL3Serialization:ivb */
+	I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
+		   ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
+
+	/*
+	 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
+	 * This implements the WaDisableRCZUnitClockGating:ivb workaround.
+	 */
+	I915_WRITE(GEN6_UCGCTL2,
+		   GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
+
+	/* This is required by WaCatErrorRejectionIssue:ivb */
+	I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
+			I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
+			GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
+
+	g4x_disable_trickle_feed(dev);
+
+	gen7_setup_fixed_func_scheduler(dev_priv);
+
+	if (0) { /* causes HiZ corruption on ivb:gt1 */
+		/* enable HiZ Raw Stall Optimization */
+		I915_WRITE(CACHE_MODE_0_GEN7,
+			   _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
+	}
+
+	/* WaDisable4x2SubspanOptimization:ivb */
+	I915_WRITE(CACHE_MODE_1,
+		   _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
+
+	/*
+	 * BSpec recommends 8x4 when MSAA is used,
+	 * however in practice 16x4 seems fastest.
+	 *
+	 * Note that PS/WM thread counts depend on the WIZ hashing
+	 * disable bit, which we don't touch here, but it's good
+	 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
+	 */
+	I915_WRITE(GEN7_GT_MODE,
+		   _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
+
+	snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
+	snpcr &= ~GEN6_MBC_SNPCR_MASK;
+	snpcr |= GEN6_MBC_SNPCR_MED;
+	I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
+
+	if (!HAS_PCH_NOP(dev))
+		cpt_init_clock_gating(dev);
+
+	gen6_check_mch_setup(dev);
+}
+
+static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
+{
+        u32 val;
+
+        /*
+        * On driver load, a pipe may be active and driving a DSI display.
+        * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck
+        * (and never recovering) in this case. intel_dsi_post_disable() will
+        * clear it when we turn off the display.
+        */
+        val = I915_READ(DSPCLK_GATE_D);
+        val &= DPOUNIT_CLOCK_GATE_DISABLE;
+        val |= VRHUNIT_CLOCK_GATE_DISABLE;
+        I915_WRITE(DSPCLK_GATE_D, val);
+
+	/*
+	 * Disable trickle feed and enable pnd deadline calculation
+	 */
+	I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
+	I915_WRITE(CBR1_VLV, 0);
+}
+
+static void valleyview_init_clock_gating(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	vlv_init_display_clock_gating(dev_priv);
+
+	/* WaDisableEarlyCull:vlv */
+	I915_WRITE(_3D_CHICKEN3,
+		   _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
+
+	/* WaDisableBackToBackFlipFix:vlv */
+	I915_WRITE(IVB_CHICKEN3,
+		   CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
+		   CHICKEN3_DGMG_DONE_FIX_DISABLE);
+
+	/* WaPsdDispatchEnable:vlv */
+	/* WaDisablePSDDualDispatchEnable:vlv */
+	I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
+		   _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP |
+				      GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
+
+	/* WaDisable_RenderCache_OperationalFlush:vlv */
+	I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
+
+	/* WaForceL3Serialization:vlv */
+	I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
+		   ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
+
+	/* WaDisableDopClockGating:vlv */
+	I915_WRITE(GEN7_ROW_CHICKEN2,
+		   _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
+
+	/* This is required by WaCatErrorRejectionIssue:vlv */
+	I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
+		   I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
+		   GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
+
+	gen7_setup_fixed_func_scheduler(dev_priv);
+
+	/*
+	 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
+	 * This implements the WaDisableRCZUnitClockGating:vlv workaround.
+	 */
+	I915_WRITE(GEN6_UCGCTL2,
+		   GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
+
+	/* WaDisableL3Bank2xClockGate:vlv
+	 * Disabling L3 clock gating- MMIO 940c[25] = 1
+	 * Set bit 25, to disable L3_BANK_2x_CLK_GATING */
+	I915_WRITE(GEN7_UCGCTL4,
+		   I915_READ(GEN7_UCGCTL4) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
+
+	/*
+	 * BSpec says this must be set, even though
+	 * WaDisable4x2SubspanOptimization isn't listed for VLV.
+	 */
+	I915_WRITE(CACHE_MODE_1,
+		   _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
+
+	/*
+	 * BSpec recommends 8x4 when MSAA is used,
+	 * however in practice 16x4 seems fastest.
+	 *
+	 * Note that PS/WM thread counts depend on the WIZ hashing
+	 * disable bit, which we don't touch here, but it's good
+	 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
+	 */
+	I915_WRITE(GEN7_GT_MODE,
+		   _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
+
+	/*
+	 * WaIncreaseL3CreditsForVLVB0:vlv
+	 * This is the hardware default actually.
+	 */
+	I915_WRITE(GEN7_L3SQCREG1, VLV_B0_WA_L3SQCREG1_VALUE);
+
+	/*
+	 * WaDisableVLVClockGating_VBIIssue:vlv
+	 * Disable clock gating on th GCFG unit to prevent a delay
+	 * in the reporting of vblank events.
+	 */
+	I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
+}
+
+static void cherryview_init_clock_gating(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	vlv_init_display_clock_gating(dev_priv);
+
+	/* WaVSRefCountFullforceMissDisable:chv */
+	/* WaDSRefCountFullforceMissDisable:chv */
+	I915_WRITE(GEN7_FF_THREAD_MODE,
+		   I915_READ(GEN7_FF_THREAD_MODE) &
+		   ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
+
+	/* WaDisableSemaphoreAndSyncFlipWait:chv */
+	I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
+		   _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
+
+	/* WaDisableCSUnitClockGating:chv */
+	I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
+		   GEN6_CSUNIT_CLOCK_GATE_DISABLE);
+
+	/* WaDisableSDEUnitClockGating:chv */
+	I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
+		   GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
+
+	/*
+	 * GTT cache may not work with big pages, so if those
+	 * are ever enabled GTT cache may need to be disabled.
+	 */
+	I915_WRITE(HSW_GTT_CACHE_EN, GTT_CACHE_EN_ALL);
+}
+
+static void g4x_init_clock_gating(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t dspclk_gate;
+
+	I915_WRITE(RENCLK_GATE_D1, 0);
+	I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
+		   GS_UNIT_CLOCK_GATE_DISABLE |
+		   CL_UNIT_CLOCK_GATE_DISABLE);
+	I915_WRITE(RAMCLK_GATE_D, 0);
+	dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
+		OVRUNIT_CLOCK_GATE_DISABLE |
+		OVCUNIT_CLOCK_GATE_DISABLE;
+	if (IS_GM45(dev))
+		dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
+	I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
+
+	/* WaDisableRenderCachePipelinedFlush */
+	I915_WRITE(CACHE_MODE_0,
+		   _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
+
+	/* WaDisable_RenderCache_OperationalFlush:g4x */
+	I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
+
+	g4x_disable_trickle_feed(dev);
+}
+
+static void crestline_init_clock_gating(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
+	I915_WRITE(RENCLK_GATE_D2, 0);
+	I915_WRITE(DSPCLK_GATE_D, 0);
+	I915_WRITE(RAMCLK_GATE_D, 0);
+	I915_WRITE16(DEUC, 0);
+	I915_WRITE(MI_ARB_STATE,
+		   _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
+
+	/* WaDisable_RenderCache_OperationalFlush:gen4 */
+	I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
+}
+
+static void broadwater_init_clock_gating(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
+		   I965_RCC_CLOCK_GATE_DISABLE |
+		   I965_RCPB_CLOCK_GATE_DISABLE |
+		   I965_ISC_CLOCK_GATE_DISABLE |
+		   I965_FBC_CLOCK_GATE_DISABLE);
+	I915_WRITE(RENCLK_GATE_D2, 0);
+	I915_WRITE(MI_ARB_STATE,
+		   _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
+
+	/* WaDisable_RenderCache_OperationalFlush:gen4 */
+	I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
+}
+
+static void gen3_init_clock_gating(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 dstate = I915_READ(D_STATE);
+
+	dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
+		DSTATE_DOT_CLOCK_GATING;
+	I915_WRITE(D_STATE, dstate);
+
+	if (IS_PINEVIEW(dev))
+		I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
+
+	/* IIR "flip pending" means done if this bit is set */
+	I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
+
+	/* interrupts should cause a wake up from C3 */
+	I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN));
+
+	/* On GEN3 we really need to make sure the ARB C3 LP bit is set */
+	I915_WRITE(MI_ARB_STATE, _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
+
+	I915_WRITE(MI_ARB_STATE,
+		   _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
+}
+
+static void i85x_init_clock_gating(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
+
+	/* interrupts should cause a wake up from C3 */
+	I915_WRITE(MI_STATE, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN) |
+		   _MASKED_BIT_DISABLE(MI_AGPBUSY_830_MODE));
+
+	I915_WRITE(MEM_MODE,
+		   _MASKED_BIT_ENABLE(MEM_DISPLAY_TRICKLE_FEED_DISABLE));
+}
+
+static void i830_init_clock_gating(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
+
+	I915_WRITE(MEM_MODE,
+		   _MASKED_BIT_ENABLE(MEM_DISPLAY_A_TRICKLE_FEED_DISABLE) |
+		   _MASKED_BIT_ENABLE(MEM_DISPLAY_B_TRICKLE_FEED_DISABLE));
+}
+
+void intel_init_clock_gating(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (dev_priv->display.init_clock_gating)
+		dev_priv->display.init_clock_gating(dev);
+}
+
+void intel_suspend_hw(struct drm_device *dev)
+{
+	if (HAS_PCH_LPT(dev))
+		lpt_suspend_hw(dev);
+}
+
+/* Set up chip specific power management-related functions */
+void intel_init_pm(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	intel_fbc_init(dev_priv);
+
+	/* For cxsr */
+	if (IS_PINEVIEW(dev))
+		i915_pineview_get_mem_freq(dev);
+	else if (IS_GEN5(dev))
+		i915_ironlake_get_mem_freq(dev);
+
+	/* For FIFO watermark updates */
+	if (INTEL_INFO(dev)->gen >= 9) {
+		skl_setup_wm_latency(dev);
+
+		if (IS_BROXTON(dev))
+			dev_priv->display.init_clock_gating =
+				bxt_init_clock_gating;
+		dev_priv->display.update_wm = skl_update_wm;
+		dev_priv->display.update_sprite_wm = skl_update_sprite_wm;
+	} else if (HAS_PCH_SPLIT(dev)) {
+		ilk_setup_wm_latency(dev);
+
+		if ((IS_GEN5(dev) && dev_priv->wm.pri_latency[1] &&
+		     dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) ||
+		    (!IS_GEN5(dev) && dev_priv->wm.pri_latency[0] &&
+		     dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) {
+			dev_priv->display.update_wm = ilk_update_wm;
+			dev_priv->display.update_sprite_wm = ilk_update_sprite_wm;
+		} else {
+			DRM_DEBUG_KMS("Failed to read display plane latency. "
+				      "Disable CxSR\n");
+		}
+
+		if (IS_GEN5(dev))
+			dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
+		else if (IS_GEN6(dev))
+			dev_priv->display.init_clock_gating = gen6_init_clock_gating;
+		else if (IS_IVYBRIDGE(dev))
+			dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
+		else if (IS_HASWELL(dev))
+			dev_priv->display.init_clock_gating = haswell_init_clock_gating;
+		else if (INTEL_INFO(dev)->gen == 8)
+			dev_priv->display.init_clock_gating = broadwell_init_clock_gating;
+	} else if (IS_CHERRYVIEW(dev)) {
+		vlv_setup_wm_latency(dev);
+
+		dev_priv->display.update_wm = vlv_update_wm;
+		dev_priv->display.init_clock_gating =
+			cherryview_init_clock_gating;
+	} else if (IS_VALLEYVIEW(dev)) {
+		vlv_setup_wm_latency(dev);
+
+		dev_priv->display.update_wm = vlv_update_wm;
+		dev_priv->display.init_clock_gating =
+			valleyview_init_clock_gating;
+	} else if (IS_PINEVIEW(dev)) {
+		if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
+					    dev_priv->is_ddr3,
+					    dev_priv->fsb_freq,
+					    dev_priv->mem_freq)) {
+			DRM_INFO("failed to find known CxSR latency "
+				 "(found ddr%s fsb freq %d, mem freq %d), "
+				 "disabling CxSR\n",
+				 (dev_priv->is_ddr3 == 1) ? "3" : "2",
+				 dev_priv->fsb_freq, dev_priv->mem_freq);
+			/* Disable CxSR and never update its watermark again */
+			intel_set_memory_cxsr(dev_priv, false);
+			dev_priv->display.update_wm = NULL;
+		} else
+			dev_priv->display.update_wm = pineview_update_wm;
+		dev_priv->display.init_clock_gating = gen3_init_clock_gating;
+	} else if (IS_G4X(dev)) {
+		dev_priv->display.update_wm = g4x_update_wm;
+		dev_priv->display.init_clock_gating = g4x_init_clock_gating;
+	} else if (IS_GEN4(dev)) {
+		dev_priv->display.update_wm = i965_update_wm;
+		if (IS_CRESTLINE(dev))
+			dev_priv->display.init_clock_gating = crestline_init_clock_gating;
+		else if (IS_BROADWATER(dev))
+			dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
+	} else if (IS_GEN3(dev)) {
+		dev_priv->display.update_wm = i9xx_update_wm;
+		dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
+		dev_priv->display.init_clock_gating = gen3_init_clock_gating;
+	} else if (IS_GEN2(dev)) {
+		if (INTEL_INFO(dev)->num_pipes == 1) {
+			dev_priv->display.update_wm = i845_update_wm;
+			dev_priv->display.get_fifo_size = i845_get_fifo_size;
+		} else {
+			dev_priv->display.update_wm = i9xx_update_wm;
+			dev_priv->display.get_fifo_size = i830_get_fifo_size;
+		}
+
+		if (IS_I85X(dev) || IS_I865G(dev))
+			dev_priv->display.init_clock_gating = i85x_init_clock_gating;
+		else
+			dev_priv->display.init_clock_gating = i830_init_clock_gating;
+	} else {
+		DRM_ERROR("unexpected fall-through in intel_init_pm\n");
+	}
+}
+
+int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val)
+{
+	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+
+	if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
+		DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n");
+		return -EAGAIN;
+	}
+
+	I915_WRITE(GEN6_PCODE_DATA, *val);
+	I915_WRITE(GEN6_PCODE_DATA1, 0);
+	I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
+
+	if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
+		     500)) {
+		DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox);
+		return -ETIMEDOUT;
+	}
+
+	*val = I915_READ(GEN6_PCODE_DATA);
+	I915_WRITE(GEN6_PCODE_DATA, 0);
+
+	return 0;
+}
+
+int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val)
+{
+	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+
+	if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
+		DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n");
+		return -EAGAIN;
+	}
+
+	I915_WRITE(GEN6_PCODE_DATA, val);
+	I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
+
+	if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
+		     500)) {
+		DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox);
+		return -ETIMEDOUT;
+	}
+
+	I915_WRITE(GEN6_PCODE_DATA, 0);
+
+	return 0;
+}
+
+static int vlv_gpu_freq_div(unsigned int czclk_freq)
+{
+	switch (czclk_freq) {
+	case 200:
+		return 10;
+	case 267:
+		return 12;
+	case 320:
+	case 333:
+		return 16;
+	case 400:
+		return 20;
+	default:
+		return -1;
+	}
+}
+
+static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
+{
+	int div, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->czclk_freq, 1000);
+
+	div = vlv_gpu_freq_div(czclk_freq);
+	if (div < 0)
+		return div;
+
+	return DIV_ROUND_CLOSEST(czclk_freq * (val + 6 - 0xbd), div);
+}
+
+static int byt_freq_opcode(struct drm_i915_private *dev_priv, int val)
+{
+	int mul, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->czclk_freq, 1000);
+
+	mul = vlv_gpu_freq_div(czclk_freq);
+	if (mul < 0)
+		return mul;
+
+	return DIV_ROUND_CLOSEST(mul * val, czclk_freq) + 0xbd - 6;
+}
+
+static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val)
+{
+	int div, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->czclk_freq, 1000);
+
+	div = vlv_gpu_freq_div(czclk_freq) / 2;
+	if (div < 0)
+		return div;
+
+	return DIV_ROUND_CLOSEST(czclk_freq * val, 2 * div) / 2;
+}
+
+static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val)
+{
+	int mul, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->czclk_freq, 1000);
+
+	mul = vlv_gpu_freq_div(czclk_freq) / 2;
+	if (mul < 0)
+		return mul;
+
+	/* CHV needs even values */
+	return DIV_ROUND_CLOSEST(val * 2 * mul, czclk_freq) * 2;
+}
+
+int intel_gpu_freq(struct drm_i915_private *dev_priv, int val)
+{
+	if (IS_GEN9(dev_priv->dev))
+		return DIV_ROUND_CLOSEST(val * GT_FREQUENCY_MULTIPLIER,
+					 GEN9_FREQ_SCALER);
+	else if (IS_CHERRYVIEW(dev_priv->dev))
+		return chv_gpu_freq(dev_priv, val);
+	else if (IS_VALLEYVIEW(dev_priv->dev))
+		return byt_gpu_freq(dev_priv, val);
+	else
+		return val * GT_FREQUENCY_MULTIPLIER;
+}
+
+int intel_freq_opcode(struct drm_i915_private *dev_priv, int val)
+{
+	if (IS_GEN9(dev_priv->dev))
+		return DIV_ROUND_CLOSEST(val * GEN9_FREQ_SCALER,
+					 GT_FREQUENCY_MULTIPLIER);
+	else if (IS_CHERRYVIEW(dev_priv->dev))
+		return chv_freq_opcode(dev_priv, val);
+	else if (IS_VALLEYVIEW(dev_priv->dev))
+		return byt_freq_opcode(dev_priv, val);
+	else
+		return DIV_ROUND_CLOSEST(val, GT_FREQUENCY_MULTIPLIER);
+}
+
+struct request_boost {
+	struct work_struct work;
+	struct drm_i915_gem_request *req;
+};
+
+static void __intel_rps_boost_work(struct work_struct *work)
+{
+	struct request_boost *boost = container_of(work, struct request_boost, work);
+	struct drm_i915_gem_request *req = boost->req;
+
+	if (!i915_gem_request_completed(req, true))
+		gen6_rps_boost(to_i915(req->ring->dev), NULL,
+			       req->emitted_jiffies);
+
+	i915_gem_request_unreference__unlocked(req);
+	kfree(boost);
+}
+
+void intel_queue_rps_boost_for_request(struct drm_device *dev,
+				       struct drm_i915_gem_request *req)
+{
+	struct request_boost *boost;
+
+	if (req == NULL || INTEL_INFO(dev)->gen < 6)
+		return;
+
+	if (i915_gem_request_completed(req, true))
+		return;
+
+	boost = kmalloc(sizeof(*boost), GFP_ATOMIC);
+	if (boost == NULL)
+		return;
+
+	i915_gem_request_reference(req);
+	boost->req = req;
+
+	INIT_WORK(&boost->work, __intel_rps_boost_work);
+	queue_work(to_i915(dev)->wq, &boost->work);
+}
+
+void intel_pm_setup(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	mutex_init(&dev_priv->rps.hw_lock);
+	spin_lock_init(&dev_priv->rps.client_lock);
+
+	INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
+			  intel_gen6_powersave_work);
+	INIT_LIST_HEAD(&dev_priv->rps.clients);
+	INIT_LIST_HEAD(&dev_priv->rps.semaphores.link);
+	INIT_LIST_HEAD(&dev_priv->rps.mmioflips.link);
+
+	dev_priv->pm.suspended = false;
+}
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
new file mode 100644
index 0000000..213581c
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -0,0 +1,756 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * DOC: Panel Self Refresh (PSR/SRD)
+ *
+ * Since Haswell Display controller supports Panel Self-Refresh on display
+ * panels witch have a remote frame buffer (RFB) implemented according to PSR
+ * spec in eDP1.3. PSR feature allows the display to go to lower standby states
+ * when system is idle but display is on as it eliminates display refresh
+ * request to DDR memory completely as long as the frame buffer for that
+ * display is unchanged.
+ *
+ * Panel Self Refresh must be supported by both Hardware (source) and
+ * Panel (sink).
+ *
+ * PSR saves power by caching the framebuffer in the panel RFB, which allows us
+ * to power down the link and memory controller. For DSI panels the same idea
+ * is called "manual mode".
+ *
+ * The implementation uses the hardware-based PSR support which automatically
+ * enters/exits self-refresh mode. The hardware takes care of sending the
+ * required DP aux message and could even retrain the link (that part isn't
+ * enabled yet though). The hardware also keeps track of any frontbuffer
+ * changes to know when to exit self-refresh mode again. Unfortunately that
+ * part doesn't work too well, hence why the i915 PSR support uses the
+ * software frontbuffer tracking to make sure it doesn't miss a screen
+ * update. For this integration intel_psr_invalidate() and intel_psr_flush()
+ * get called by the frontbuffer tracking code. Note that because of locking
+ * issues the self-refresh re-enable code is done from a work queue, which
+ * must be correctly synchronized/cancelled when shutting down the pipe."
+ */
+
+#include <drm/drmP.h>
+
+#include "intel_drv.h"
+#include "i915_drv.h"
+
+static bool is_edp_psr(struct intel_dp *intel_dp)
+{
+	return intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED;
+}
+
+static bool vlv_is_psr_active_on_pipe(struct drm_device *dev, int pipe)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t val;
+
+	val = I915_READ(VLV_PSRSTAT(pipe)) &
+	      VLV_EDP_PSR_CURR_STATE_MASK;
+	return (val == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
+	       (val == VLV_EDP_PSR_ACTIVE_SF_UPDATE);
+}
+
+static void intel_psr_write_vsc(struct intel_dp *intel_dp,
+				const struct edp_vsc_psr *vsc_psr)
+{
+	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+	struct drm_device *dev = dig_port->base.base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
+	enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
+	u32 ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder);
+	uint32_t *data = (uint32_t *) vsc_psr;
+	unsigned int i;
+
+	/* As per BSPec (Pipe Video Data Island Packet), we need to disable
+	   the video DIP being updated before program video DIP data buffer
+	   registers for DIP being updated. */
+	I915_WRITE(ctl_reg, 0);
+	POSTING_READ(ctl_reg);
+
+	for (i = 0; i < sizeof(*vsc_psr); i += 4) {
+		I915_WRITE(HSW_TVIDEO_DIP_VSC_DATA(cpu_transcoder,
+						   i >> 2), *data);
+		data++;
+	}
+	for (; i < VIDEO_DIP_VSC_DATA_SIZE; i += 4)
+		I915_WRITE(HSW_TVIDEO_DIP_VSC_DATA(cpu_transcoder,
+						   i >> 2), 0);
+
+	I915_WRITE(ctl_reg, VIDEO_DIP_ENABLE_VSC_HSW);
+	POSTING_READ(ctl_reg);
+}
+
+static void vlv_psr_setup_vsc(struct intel_dp *intel_dp)
+{
+	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+	struct drm_device *dev = intel_dig_port->base.base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
+	enum pipe pipe = to_intel_crtc(crtc)->pipe;
+	uint32_t val;
+
+	/* VLV auto-generate VSC package as per EDP 1.3 spec, Table 3.10 */
+	val  = I915_READ(VLV_VSCSDP(pipe));
+	val &= ~VLV_EDP_PSR_SDP_FREQ_MASK;
+	val |= VLV_EDP_PSR_SDP_FREQ_EVFRAME;
+	I915_WRITE(VLV_VSCSDP(pipe), val);
+}
+
+static void skl_psr_setup_su_vsc(struct intel_dp *intel_dp)
+{
+	struct edp_vsc_psr psr_vsc;
+
+	/* Prepare VSC Header for SU as per EDP 1.4 spec, Table 6.11 */
+	memset(&psr_vsc, 0, sizeof(psr_vsc));
+	psr_vsc.sdp_header.HB0 = 0;
+	psr_vsc.sdp_header.HB1 = 0x7;
+	psr_vsc.sdp_header.HB2 = 0x3;
+	psr_vsc.sdp_header.HB3 = 0xb;
+	intel_psr_write_vsc(intel_dp, &psr_vsc);
+}
+
+static void hsw_psr_setup_vsc(struct intel_dp *intel_dp)
+{
+	struct edp_vsc_psr psr_vsc;
+
+	/* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
+	memset(&psr_vsc, 0, sizeof(psr_vsc));
+	psr_vsc.sdp_header.HB0 = 0;
+	psr_vsc.sdp_header.HB1 = 0x7;
+	psr_vsc.sdp_header.HB2 = 0x2;
+	psr_vsc.sdp_header.HB3 = 0x8;
+	intel_psr_write_vsc(intel_dp, &psr_vsc);
+}
+
+static void vlv_psr_enable_sink(struct intel_dp *intel_dp)
+{
+	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
+			   DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
+}
+
+static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
+{
+	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+	struct drm_device *dev = dig_port->base.base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t aux_clock_divider;
+	uint32_t aux_data_reg, aux_ctl_reg;
+	int precharge = 0x3;
+	static const uint8_t aux_msg[] = {
+		[0] = DP_AUX_NATIVE_WRITE << 4,
+		[1] = DP_SET_POWER >> 8,
+		[2] = DP_SET_POWER & 0xff,
+		[3] = 1 - 1,
+		[4] = DP_SET_POWER_D0,
+	};
+	int i;
+
+	BUILD_BUG_ON(sizeof(aux_msg) > 20);
+
+	aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
+
+	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
+			   DP_PSR_ENABLE & ~DP_PSR_MAIN_LINK_ACTIVE);
+
+	/* Enable AUX frame sync at sink */
+	if (dev_priv->psr.aux_frame_sync)
+		drm_dp_dpcd_writeb(&intel_dp->aux,
+				DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF,
+				DP_AUX_FRAME_SYNC_ENABLE);
+
+	aux_data_reg = (INTEL_INFO(dev)->gen >= 9) ?
+				DPA_AUX_CH_DATA1 : EDP_PSR_AUX_DATA1(dev);
+	aux_ctl_reg = (INTEL_INFO(dev)->gen >= 9) ?
+				DPA_AUX_CH_CTL : EDP_PSR_AUX_CTL(dev);
+
+	/* Setup AUX registers */
+	for (i = 0; i < sizeof(aux_msg); i += 4)
+		I915_WRITE(aux_data_reg + i,
+			   intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
+
+	if (INTEL_INFO(dev)->gen >= 9) {
+		uint32_t val;
+
+		val = I915_READ(aux_ctl_reg);
+		val &= ~DP_AUX_CH_CTL_TIME_OUT_MASK;
+		val |= DP_AUX_CH_CTL_TIME_OUT_1600us;
+		val &= ~DP_AUX_CH_CTL_MESSAGE_SIZE_MASK;
+		val |= (sizeof(aux_msg) << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
+		/* Use hardcoded data values for PSR, frame sync and GTC */
+		val &= ~DP_AUX_CH_CTL_PSR_DATA_AUX_REG_SKL;
+		val &= ~DP_AUX_CH_CTL_FS_DATA_AUX_REG_SKL;
+		val &= ~DP_AUX_CH_CTL_GTC_DATA_AUX_REG_SKL;
+		I915_WRITE(aux_ctl_reg, val);
+	} else {
+		I915_WRITE(aux_ctl_reg,
+		   DP_AUX_CH_CTL_TIME_OUT_400us |
+		   (sizeof(aux_msg) << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
+		   (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
+		   (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT));
+	}
+
+	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, DP_PSR_ENABLE);
+}
+
+static void vlv_psr_enable_source(struct intel_dp *intel_dp)
+{
+	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+	struct drm_device *dev = dig_port->base.base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_crtc *crtc = dig_port->base.base.crtc;
+	enum pipe pipe = to_intel_crtc(crtc)->pipe;
+
+	/* Transition from PSR_state 0 to PSR_state 1, i.e. PSR Inactive */
+	I915_WRITE(VLV_PSRCTL(pipe),
+		   VLV_EDP_PSR_MODE_SW_TIMER |
+		   VLV_EDP_PSR_SRC_TRANSMITTER_STATE |
+		   VLV_EDP_PSR_ENABLE);
+}
+
+static void vlv_psr_activate(struct intel_dp *intel_dp)
+{
+	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+	struct drm_device *dev = dig_port->base.base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_crtc *crtc = dig_port->base.base.crtc;
+	enum pipe pipe = to_intel_crtc(crtc)->pipe;
+
+	/* Let's do the transition from PSR_state 1 to PSR_state 2
+	 * that is PSR transition to active - static frame transmission.
+	 * Then Hardware is responsible for the transition to PSR_state 3
+	 * that is PSR active - no Remote Frame Buffer (RFB) update.
+	 */
+	I915_WRITE(VLV_PSRCTL(pipe), I915_READ(VLV_PSRCTL(pipe)) |
+		   VLV_EDP_PSR_ACTIVE_ENTRY);
+}
+
+static void hsw_psr_enable_source(struct intel_dp *intel_dp)
+{
+	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+	struct drm_device *dev = dig_port->base.base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	uint32_t max_sleep_time = 0x1f;
+	/* Lately it was identified that depending on panel idle frame count
+	 * calculated at HW can be off by 1. So let's use what came
+	 * from VBT + 1.
+	 * There are also other cases where panel demands at least 4
+	 * but VBT is not being set. To cover these 2 cases lets use
+	 * at least 5 when VBT isn't set to be on the safest side.
+	 */
+	uint32_t idle_frames = dev_priv->vbt.psr.idle_frames ?
+			       dev_priv->vbt.psr.idle_frames + 1 : 5;
+	uint32_t val = 0x0;
+	const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
+
+	if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) {
+		/* It doesn't mean we shouldn't send TPS patters, so let's
+		   send the minimal TP1 possible and skip TP2. */
+		val |= EDP_PSR_TP1_TIME_100us;
+		val |= EDP_PSR_TP2_TP3_TIME_0us;
+		val |= EDP_PSR_SKIP_AUX_EXIT;
+		/* Sink should be able to train with the 5 or 6 idle patterns */
+		idle_frames += 4;
+	}
+
+	I915_WRITE(EDP_PSR_CTL(dev), val |
+		   (IS_BROADWELL(dev) ? 0 : link_entry_time) |
+		   max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT |
+		   idle_frames << EDP_PSR_IDLE_FRAME_SHIFT |
+		   EDP_PSR_ENABLE);
+
+	if (dev_priv->psr.psr2_support)
+		I915_WRITE(EDP_PSR2_CTL, EDP_PSR2_ENABLE |
+				EDP_SU_TRACK_ENABLE | EDP_PSR2_TP2_TIME_100);
+}
+
+static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
+{
+	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+	struct drm_device *dev = dig_port->base.base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_crtc *crtc = dig_port->base.base.crtc;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+	lockdep_assert_held(&dev_priv->psr.lock);
+	WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+	WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
+
+	dev_priv->psr.source_ok = false;
+
+	if (IS_HASWELL(dev) && dig_port->port != PORT_A) {
+		DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n");
+		return false;
+	}
+
+	if (!i915.enable_psr) {
+		DRM_DEBUG_KMS("PSR disable by flag\n");
+		return false;
+	}
+
+	if (IS_HASWELL(dev) &&
+	    I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config->cpu_transcoder)) &
+		      S3D_ENABLE) {
+		DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
+		return false;
+	}
+
+	if (IS_HASWELL(dev) &&
+	    intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
+		DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
+		return false;
+	}
+
+	if (!IS_VALLEYVIEW(dev) && ((dev_priv->vbt.psr.full_link) ||
+				    (dig_port->port != PORT_A))) {
+		DRM_DEBUG_KMS("PSR condition failed: Link Standby requested/needed but not supported on this platform\n");
+		return false;
+	}
+
+	dev_priv->psr.source_ok = true;
+	return true;
+}
+
+static void intel_psr_activate(struct intel_dp *intel_dp)
+{
+	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+	struct drm_device *dev = intel_dig_port->base.base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
+	WARN_ON(dev_priv->psr.active);
+	lockdep_assert_held(&dev_priv->psr.lock);
+
+	/* Enable/Re-enable PSR on the host */
+	if (HAS_DDI(dev))
+		/* On HSW+ after we enable PSR on source it will activate it
+		 * as soon as it match configure idle_frame count. So
+		 * we just actually enable it here on activation time.
+		 */
+		hsw_psr_enable_source(intel_dp);
+	else
+		vlv_psr_activate(intel_dp);
+
+	dev_priv->psr.active = true;
+}
+
+/**
+ * intel_psr_enable - Enable PSR
+ * @intel_dp: Intel DP
+ *
+ * This function can only be called after the pipe is fully trained and enabled.
+ */
+void intel_psr_enable(struct intel_dp *intel_dp)
+{
+	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+	struct drm_device *dev = intel_dig_port->base.base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
+
+	if (!HAS_PSR(dev)) {
+		DRM_DEBUG_KMS("PSR not supported on this platform\n");
+		return;
+	}
+
+	if (!is_edp_psr(intel_dp)) {
+		DRM_DEBUG_KMS("PSR not supported by this panel\n");
+		return;
+	}
+
+	mutex_lock(&dev_priv->psr.lock);
+	if (dev_priv->psr.enabled) {
+		DRM_DEBUG_KMS("PSR already in use\n");
+		goto unlock;
+	}
+
+	if (!intel_psr_match_conditions(intel_dp))
+		goto unlock;
+
+	dev_priv->psr.busy_frontbuffer_bits = 0;
+
+	if (HAS_DDI(dev)) {
+		hsw_psr_setup_vsc(intel_dp);
+
+		if (dev_priv->psr.psr2_support) {
+			/* PSR2 is restricted to work with panel resolutions upto 3200x2000 */
+			if (crtc->config->pipe_src_w > 3200 ||
+				crtc->config->pipe_src_h > 2000)
+				dev_priv->psr.psr2_support = false;
+			else
+				skl_psr_setup_su_vsc(intel_dp);
+		}
+
+		/* Avoid continuous PSR exit by masking memup and hpd */
+		I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP |
+			   EDP_PSR_DEBUG_MASK_HPD);
+
+		/* Enable PSR on the panel */
+		hsw_psr_enable_sink(intel_dp);
+
+		if (INTEL_INFO(dev)->gen >= 9)
+			intel_psr_activate(intel_dp);
+	} else {
+		vlv_psr_setup_vsc(intel_dp);
+
+		/* Enable PSR on the panel */
+		vlv_psr_enable_sink(intel_dp);
+
+		/* On HSW+ enable_source also means go to PSR entry/active
+		 * state as soon as idle_frame achieved and here would be
+		 * to soon. However on VLV enable_source just enable PSR
+		 * but let it on inactive state. So we might do this prior
+		 * to active transition, i.e. here.
+		 */
+		vlv_psr_enable_source(intel_dp);
+	}
+
+	dev_priv->psr.enabled = intel_dp;
+unlock:
+	mutex_unlock(&dev_priv->psr.lock);
+}
+
+static void vlv_psr_disable(struct intel_dp *intel_dp)
+{
+	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+	struct drm_device *dev = intel_dig_port->base.base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc =
+		to_intel_crtc(intel_dig_port->base.base.crtc);
+	uint32_t val;
+
+	if (dev_priv->psr.active) {
+		/* Put VLV PSR back to PSR_state 0 that is PSR Disabled. */
+		if (wait_for((I915_READ(VLV_PSRSTAT(intel_crtc->pipe)) &
+			      VLV_EDP_PSR_IN_TRANS) == 0, 1))
+			WARN(1, "PSR transition took longer than expected\n");
+
+		val = I915_READ(VLV_PSRCTL(intel_crtc->pipe));
+		val &= ~VLV_EDP_PSR_ACTIVE_ENTRY;
+		val &= ~VLV_EDP_PSR_ENABLE;
+		val &= ~VLV_EDP_PSR_MODE_MASK;
+		I915_WRITE(VLV_PSRCTL(intel_crtc->pipe), val);
+
+		dev_priv->psr.active = false;
+	} else {
+		WARN_ON(vlv_is_psr_active_on_pipe(dev, intel_crtc->pipe));
+	}
+}
+
+static void hsw_psr_disable(struct intel_dp *intel_dp)
+{
+	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+	struct drm_device *dev = intel_dig_port->base.base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (dev_priv->psr.active) {
+		I915_WRITE(EDP_PSR_CTL(dev),
+			   I915_READ(EDP_PSR_CTL(dev)) & ~EDP_PSR_ENABLE);
+
+		/* Wait till PSR is idle */
+		if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev)) &
+			       EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
+			DRM_ERROR("Timed out waiting for PSR Idle State\n");
+
+		dev_priv->psr.active = false;
+	} else {
+		WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
+	}
+}
+
+/**
+ * intel_psr_disable - Disable PSR
+ * @intel_dp: Intel DP
+ *
+ * This function needs to be called before disabling pipe.
+ */
+void intel_psr_disable(struct intel_dp *intel_dp)
+{
+	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+	struct drm_device *dev = intel_dig_port->base.base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	mutex_lock(&dev_priv->psr.lock);
+	if (!dev_priv->psr.enabled) {
+		mutex_unlock(&dev_priv->psr.lock);
+		return;
+	}
+
+	if (HAS_DDI(dev))
+		hsw_psr_disable(intel_dp);
+	else
+		vlv_psr_disable(intel_dp);
+
+	dev_priv->psr.enabled = NULL;
+	mutex_unlock(&dev_priv->psr.lock);
+
+	cancel_delayed_work_sync(&dev_priv->psr.work);
+}
+
+static void intel_psr_work(struct work_struct *work)
+{
+	struct drm_i915_private *dev_priv =
+		container_of(work, typeof(*dev_priv), psr.work.work);
+	struct intel_dp *intel_dp = dev_priv->psr.enabled;
+	struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
+	enum pipe pipe = to_intel_crtc(crtc)->pipe;
+
+	/* We have to make sure PSR is ready for re-enable
+	 * otherwise it keeps disabled until next full enable/disable cycle.
+	 * PSR might take some time to get fully disabled
+	 * and be ready for re-enable.
+	 */
+	if (HAS_DDI(dev_priv->dev)) {
+		if (wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev_priv->dev)) &
+			      EDP_PSR_STATUS_STATE_MASK) == 0, 50)) {
+			DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
+			return;
+		}
+	} else {
+		if (wait_for((I915_READ(VLV_PSRSTAT(pipe)) &
+			      VLV_EDP_PSR_IN_TRANS) == 0, 1)) {
+			DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
+			return;
+		}
+	}
+	mutex_lock(&dev_priv->psr.lock);
+	intel_dp = dev_priv->psr.enabled;
+
+	if (!intel_dp)
+		goto unlock;
+
+	/*
+	 * The delayed work can race with an invalidate hence we need to
+	 * recheck. Since psr_flush first clears this and then reschedules we
+	 * won't ever miss a flush when bailing out here.
+	 */
+	if (dev_priv->psr.busy_frontbuffer_bits)
+		goto unlock;
+
+	intel_psr_activate(intel_dp);
+unlock:
+	mutex_unlock(&dev_priv->psr.lock);
+}
+
+static void intel_psr_exit(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_dp *intel_dp = dev_priv->psr.enabled;
+	struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
+	enum pipe pipe = to_intel_crtc(crtc)->pipe;
+	u32 val;
+
+	if (!dev_priv->psr.active)
+		return;
+
+	if (HAS_DDI(dev)) {
+		val = I915_READ(EDP_PSR_CTL(dev));
+
+		WARN_ON(!(val & EDP_PSR_ENABLE));
+
+		I915_WRITE(EDP_PSR_CTL(dev), val & ~EDP_PSR_ENABLE);
+	} else {
+		val = I915_READ(VLV_PSRCTL(pipe));
+
+		/* Here we do the transition from PSR_state 3 to PSR_state 5
+		 * directly once PSR State 4 that is active with single frame
+		 * update can be skipped. PSR_state 5 that is PSR exit then
+		 * Hardware is responsible to transition back to PSR_state 1
+		 * that is PSR inactive. Same state after
+		 * vlv_edp_psr_enable_source.
+		 */
+		val &= ~VLV_EDP_PSR_ACTIVE_ENTRY;
+		I915_WRITE(VLV_PSRCTL(pipe), val);
+
+		/* Send AUX wake up - Spec says after transitioning to PSR
+		 * active we have to send AUX wake up by writing 01h in DPCD
+		 * 600h of sink device.
+		 * XXX: This might slow down the transition, but without this
+		 * HW doesn't complete the transition to PSR_state 1 and we
+		 * never get the screen updated.
+		 */
+		drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
+				   DP_SET_POWER_D0);
+	}
+
+	dev_priv->psr.active = false;
+}
+
+/**
+ * intel_psr_single_frame_update - Single Frame Update
+ * @dev: DRM device
+ * @frontbuffer_bits: frontbuffer plane tracking bits
+ *
+ * Some platforms support a single frame update feature that is used to
+ * send and update only one frame on Remote Frame Buffer.
+ * So far it is only implemented for Valleyview and Cherryview because
+ * hardware requires this to be done before a page flip.
+ */
+void intel_psr_single_frame_update(struct drm_device *dev,
+				   unsigned frontbuffer_bits)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_crtc *crtc;
+	enum pipe pipe;
+	u32 val;
+
+	/*
+	 * Single frame update is already supported on BDW+ but it requires
+	 * many W/A and it isn't really needed.
+	 */
+	if (!IS_VALLEYVIEW(dev))
+		return;
+
+	mutex_lock(&dev_priv->psr.lock);
+	if (!dev_priv->psr.enabled) {
+		mutex_unlock(&dev_priv->psr.lock);
+		return;
+	}
+
+	crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
+	pipe = to_intel_crtc(crtc)->pipe;
+
+	if (frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe)) {
+		val = I915_READ(VLV_PSRCTL(pipe));
+
+		/*
+		 * We need to set this bit before writing registers for a flip.
+		 * This bit will be self-clear when it gets to the PSR active state.
+		 */
+		I915_WRITE(VLV_PSRCTL(pipe), val | VLV_EDP_PSR_SINGLE_FRAME_UPDATE);
+	}
+	mutex_unlock(&dev_priv->psr.lock);
+}
+
+/**
+ * intel_psr_invalidate - Invalidade PSR
+ * @dev: DRM device
+ * @frontbuffer_bits: frontbuffer plane tracking bits
+ *
+ * Since the hardware frontbuffer tracking has gaps we need to integrate
+ * with the software frontbuffer tracking. This function gets called every
+ * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
+ * disabled if the frontbuffer mask contains a buffer relevant to PSR.
+ *
+ * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
+ */
+void intel_psr_invalidate(struct drm_device *dev,
+			  unsigned frontbuffer_bits)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_crtc *crtc;
+	enum pipe pipe;
+
+	mutex_lock(&dev_priv->psr.lock);
+	if (!dev_priv->psr.enabled) {
+		mutex_unlock(&dev_priv->psr.lock);
+		return;
+	}
+
+	crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
+	pipe = to_intel_crtc(crtc)->pipe;
+
+	frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
+	dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
+
+	if (frontbuffer_bits)
+		intel_psr_exit(dev);
+
+	mutex_unlock(&dev_priv->psr.lock);
+}
+
+/**
+ * intel_psr_flush - Flush PSR
+ * @dev: DRM device
+ * @frontbuffer_bits: frontbuffer plane tracking bits
+ * @origin: which operation caused the flush
+ *
+ * Since the hardware frontbuffer tracking has gaps we need to integrate
+ * with the software frontbuffer tracking. This function gets called every
+ * time frontbuffer rendering has completed and flushed out to memory. PSR
+ * can be enabled again if no other frontbuffer relevant to PSR is dirty.
+ *
+ * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
+ */
+void intel_psr_flush(struct drm_device *dev,
+		     unsigned frontbuffer_bits, enum fb_op_origin origin)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_crtc *crtc;
+	enum pipe pipe;
+	int delay_ms = HAS_DDI(dev) ? 100 : 500;
+
+	mutex_lock(&dev_priv->psr.lock);
+	if (!dev_priv->psr.enabled) {
+		mutex_unlock(&dev_priv->psr.lock);
+		return;
+	}
+
+	crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
+	pipe = to_intel_crtc(crtc)->pipe;
+
+	frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
+	dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
+
+	if (HAS_DDI(dev)) {
+		/*
+		 * By definition every flush should mean invalidate + flush,
+		 * however on core platforms let's minimize the
+		 * disable/re-enable so we can avoid the invalidate when flip
+		 * originated the flush.
+		 */
+		if (frontbuffer_bits && origin != ORIGIN_FLIP)
+			intel_psr_exit(dev);
+	} else {
+		/*
+		 * On Valleyview and Cherryview we don't use hardware tracking
+		 * so any plane updates or cursor moves don't result in a PSR
+		 * invalidating. Which means we need to manually fake this in
+		 * software for all flushes.
+		 */
+		if (frontbuffer_bits)
+			intel_psr_exit(dev);
+	}
+
+	if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
+		schedule_delayed_work(&dev_priv->psr.work,
+				      msecs_to_jiffies(delay_ms));
+	mutex_unlock(&dev_priv->psr.lock);
+}
+
+/**
+ * intel_psr_init - Init basic PSR work and mutex.
+ * @dev: DRM device
+ *
+ * This function is  called only once at driver load to initialize basic
+ * PSR stuff.
+ */
+void intel_psr_init(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	INIT_DELAYED_WORK(&dev_priv->psr.work, intel_psr_work);
+	mutex_init(&dev_priv->psr.lock);
+}
diff --git a/drivers/gpu/drm/i915/intel_renderstate.h b/drivers/gpu/drm/i915/intel_renderstate.h
new file mode 100644
index 0000000..5bd6985
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_renderstate.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _INTEL_RENDERSTATE_H
+#define _INTEL_RENDERSTATE_H
+
+#include "i915_drv.h"
+
+extern const struct intel_renderstate_rodata gen6_null_state;
+extern const struct intel_renderstate_rodata gen7_null_state;
+extern const struct intel_renderstate_rodata gen8_null_state;
+extern const struct intel_renderstate_rodata gen9_null_state;
+
+#define RO_RENDERSTATE(_g)						\
+	const struct intel_renderstate_rodata gen ## _g ## _null_state = { \
+		.reloc = gen ## _g ## _null_state_relocs,		\
+		.batch = gen ## _g ## _null_state_batch,		\
+		.batch_items = sizeof(gen ## _g ## _null_state_batch)/4, \
+	}
+
+#endif /* INTEL_RENDERSTATE_H */
diff --git a/drivers/gpu/drm/i915/intel_renderstate_gen6.c b/drivers/gpu/drm/i915/intel_renderstate_gen6.c
new file mode 100644
index 0000000..11c8e7b
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_renderstate_gen6.c
@@ -0,0 +1,315 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Generated by: intel-gpu-tools-1.8-220-g01153e7
+ */
+
+#include "intel_renderstate.h"
+
+static const u32 gen6_null_state_relocs[] = {
+	0x00000020,
+	0x00000024,
+	0x0000002c,
+	0x000001e0,
+	0x000001e4,
+	-1,
+};
+
+static const u32 gen6_null_state_batch[] = {
+	0x69040000,
+	0x790d0001,
+	0x00000000,
+	0x00000000,
+	0x78180000,
+	0x00000001,
+	0x61010008,
+	0x00000000,
+	0x00000001,	 /* reloc */
+	0x00000001,	 /* reloc */
+	0x00000000,
+	0x00000001,	 /* reloc */
+	0x00000000,
+	0x00000001,
+	0x00000000,
+	0x00000001,
+	0x61020000,
+	0x00000000,
+	0x78050001,
+	0x00000018,
+	0x00000000,
+	0x780d1002,
+	0x00000000,
+	0x00000000,
+	0x00000420,
+	0x78150003,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x78100004,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x78160003,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x78110005,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x78120002,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x78170003,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x79050005,
+	0xe0040000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x79100000,
+	0x00000000,
+	0x79000002,
+	0xffffffff,
+	0x00000000,
+	0x00000000,
+	0x780e0002,
+	0x00000441,
+	0x00000401,
+	0x00000401,
+	0x78021002,
+	0x00000000,
+	0x00000000,
+	0x00000400,
+	0x78130012,
+	0x00400810,
+	0x00000000,
+	0x20000000,
+	0x04000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x78140007,
+	0x00000280,
+	0x08080000,
+	0x00000000,
+	0x00060000,
+	0x4e080002,
+	0x00100400,
+	0x00000000,
+	0x00000000,
+	0x78090005,
+	0x02000000,
+	0x22220000,
+	0x02f60000,
+	0x11330000,
+	0x02850004,
+	0x11220000,
+	0x78011002,
+	0x00000000,
+	0x00000000,
+	0x00000200,
+	0x78080003,
+	0x00002000,
+	0x00000448,	 /* reloc */
+	0x00000448,	 /* reloc */
+	0x00000000,
+	0x05000000,	 /* cmds end */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000220,	 /* state start */
+	0x00000240,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x0060005a,
+	0x204077be,
+	0x000000c0,
+	0x008d0040,
+	0x0060005a,
+	0x206077be,
+	0x000000c0,
+	0x008d0080,
+	0x0060005a,
+	0x208077be,
+	0x000000d0,
+	0x008d0040,
+	0x0060005a,
+	0x20a077be,
+	0x000000d0,
+	0x008d0080,
+	0x00000201,
+	0x20080061,
+	0x00000000,
+	0x00000000,
+	0x00600001,
+	0x20200022,
+	0x008d0000,
+	0x00000000,
+	0x02800031,
+	0x21c01cc9,
+	0x00000020,
+	0x0a8a0001,
+	0x00600001,
+	0x204003be,
+	0x008d01c0,
+	0x00000000,
+	0x00600001,
+	0x206003be,
+	0x008d01e0,
+	0x00000000,
+	0x00600001,
+	0x208003be,
+	0x008d0200,
+	0x00000000,
+	0x00600001,
+	0x20a003be,
+	0x008d0220,
+	0x00000000,
+	0x00600001,
+	0x20c003be,
+	0x008d0240,
+	0x00000000,
+	0x00600001,
+	0x20e003be,
+	0x008d0260,
+	0x00000000,
+	0x00600001,
+	0x210003be,
+	0x008d0280,
+	0x00000000,
+	0x00600001,
+	0x212003be,
+	0x008d02a0,
+	0x00000000,
+	0x05800031,
+	0x24001cc8,
+	0x00000040,
+	0x90019000,
+	0x0000007e,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x0000007e,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x0000007e,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x0000007e,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x0000007e,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x0000007e,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x0000007e,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x0000007e,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x30000000,
+	0x00000124,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0xf99a130c,
+	0x799a130c,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x80000031,
+	0x00000003,
+	0x00000000,	 /* state end */
+};
+
+RO_RENDERSTATE(6);
diff --git a/drivers/gpu/drm/i915/intel_renderstate_gen7.c b/drivers/gpu/drm/i915/intel_renderstate_gen7.c
new file mode 100644
index 0000000..6551806
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_renderstate_gen7.c
@@ -0,0 +1,279 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Generated by: intel-gpu-tools-1.8-220-g01153e7
+ */
+
+#include "intel_renderstate.h"
+
+static const u32 gen7_null_state_relocs[] = {
+	0x0000000c,
+	0x00000010,
+	0x00000018,
+	0x000001ec,
+	-1,
+};
+
+static const u32 gen7_null_state_batch[] = {
+	0x69040000,
+	0x61010008,
+	0x00000000,
+	0x00000001,	 /* reloc */
+	0x00000001,	 /* reloc */
+	0x00000000,
+	0x00000001,	 /* reloc */
+	0x00000000,
+	0x00000001,
+	0x00000000,
+	0x00000001,
+	0x790d0002,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x78180000,
+	0x00000001,
+	0x79160000,
+	0x00000008,
+	0x78300000,
+	0x02010040,
+	0x78310000,
+	0x04000000,
+	0x78320000,
+	0x04000000,
+	0x78330000,
+	0x02000000,
+	0x78100004,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x781b0005,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x781c0002,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x781d0004,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x78110005,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x78120002,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x78210000,
+	0x00000000,
+	0x78130005,
+	0x00000000,
+	0x20000000,
+	0x04000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x78140001,
+	0x20000800,
+	0x00000000,
+	0x781e0001,
+	0x00000000,
+	0x00000000,
+	0x78050005,
+	0xe0040000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x78040001,
+	0x00000000,
+	0x00000000,
+	0x78240000,
+	0x00000240,
+	0x78230000,
+	0x00000260,
+	0x782f0000,
+	0x00000280,
+	0x781f000c,
+	0x00400810,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x78200006,
+	0x000002c0,
+	0x08080000,
+	0x00000000,
+	0x28000402,
+	0x00060000,
+	0x00000000,
+	0x00000000,
+	0x78090005,
+	0x02000000,
+	0x22220000,
+	0x02f60000,
+	0x11230000,
+	0x02f60004,
+	0x11230000,
+	0x78080003,
+	0x00006008,
+	0x00000340,	 /* reloc */
+	0xffffffff,
+	0x00000000,
+	0x782a0000,
+	0x00000360,
+	0x79000002,
+	0xffffffff,
+	0x00000000,
+	0x00000000,
+	0x7b000005,
+	0x0000000f,
+	0x00000003,
+	0x00000000,
+	0x00000001,
+	0x00000000,
+	0x00000000,
+	0x05000000,	 /* cmds end */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000031,	 /* state start */
+	0x00000003,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0xf99a130c,
+	0x799a130c,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000492,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x0080005a,
+	0x2e2077bd,
+	0x000000c0,
+	0x008d0040,
+	0x0080005a,
+	0x2e6077bd,
+	0x000000d0,
+	0x008d0040,
+	0x02800031,
+	0x21801fa9,
+	0x008d0e20,
+	0x08840001,
+	0x00800001,
+	0x2e2003bd,
+	0x008d0180,
+	0x00000000,
+	0x00800001,
+	0x2e6003bd,
+	0x008d01c0,
+	0x00000000,
+	0x00800001,
+	0x2ea003bd,
+	0x008d0200,
+	0x00000000,
+	0x00800001,
+	0x2ee003bd,
+	0x008d0240,
+	0x00000000,
+	0x05800031,
+	0x20001fa8,
+	0x008d0e20,
+	0x90031000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000380,
+	0x000003a0,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,	 /* state end */
+};
+
+RO_RENDERSTATE(7);
diff --git a/drivers/gpu/drm/i915/intel_renderstate_gen8.c b/drivers/gpu/drm/i915/intel_renderstate_gen8.c
new file mode 100644
index 0000000..95288a3
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_renderstate_gen8.c
@@ -0,0 +1,983 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Generated by: intel-gpu-tools-1.8-220-g01153e7
+ */
+
+#include "intel_renderstate.h"
+
+static const u32 gen8_null_state_relocs[] = {
+	0x00000798,
+	0x000007a4,
+	0x000007ac,
+	0x000007bc,
+	-1,
+};
+
+static const u32 gen8_null_state_batch[] = {
+	0x7a000004,
+	0x01000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x69040000,
+	0x78140000,
+	0x04000000,
+	0x7820000a,
+	0x00000000,
+	0x00000000,
+	0x80000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x78130002,
+	0x00000000,
+	0x00000000,
+	0x02001808,
+	0x781f0002,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x78510009,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x78100007,
+	0x00000000,
+	0x00000000,
+	0x00010000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x781b0007,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000800,
+	0x00000000,
+	0x78110008,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x781e0003,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x781d0007,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x78120002,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x78500003,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x781c0002,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x780c0000,
+	0x00000000,
+	0x78520003,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x78300000,
+	0x08010040,
+	0x78310000,
+	0x1e000000,
+	0x78320000,
+	0x1e000000,
+	0x78330000,
+	0x1e000000,
+	0x79190002,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x791a0002,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x791b0002,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x79120000,
+	0x00000000,
+	0x79130000,
+	0x00000000,
+	0x79140000,
+	0x00000000,
+	0x79150000,
+	0x00000000,
+	0x79160000,
+	0x00000000,
+	0x78150009,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x78190009,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x781a0009,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x78160009,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x78170009,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x78490001,
+	0x00000000,
+	0x00000000,
+	0x784a0000,
+	0x00000000,
+	0x784b0000,
+	0x00000004,
+	0x79170101,
+	0x00000000,
+	0x00000080,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x79180006,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x79180006,
+	0x20000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x79180006,
+	0x40000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x79180006,
+	0x60000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x6101000e,
+	0x00000001,	 /* reloc */
+	0x00000000,
+	0x00000000,
+	0x00000001,	 /* reloc */
+	0x00000000,
+	0x00000001,	 /* reloc */
+	0x00000000,
+	0x00000001,
+	0x00000000,
+	0x00000001,	 /* reloc */
+	0x00000000,
+	0x00001001,
+	0x00001001,
+	0x00000001,
+	0x00001001,
+	0x61020001,
+	0x00000000,
+	0x00000000,
+	0x79000002,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x78050006,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x79040002,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x79040002,
+	0x40000000,
+	0x00000000,
+	0x00000000,
+	0x79040002,
+	0x80000000,
+	0x00000000,
+	0x00000000,
+	0x79040002,
+	0xc0000000,
+	0x00000000,
+	0x00000000,
+	0x79080001,
+	0x00000000,
+	0x00000000,
+	0x790a0001,
+	0x00000000,
+	0x00000000,
+	0x78060003,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x78070003,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x78040001,
+	0x00000000,
+	0x00000000,
+	0x79110000,
+	0x00000000,
+	0x780d0000,
+	0x00000000,
+	0x79060000,
+	0x00000000,
+	0x7907001f,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x7902000f,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x790c000f,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x780a0003,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x78080083,
+	0x00004000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x04004000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x08004000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x0c004000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x10004000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x14004000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x18004000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x1c004000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x20004000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x24004000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x28004000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x2c004000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x30004000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x34004000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x38004000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x3c004000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x40004000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x44004000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x48004000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x4c004000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x50004000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x54004000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x58004000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x5c004000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x60004000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x64004000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x68004000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x6c004000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x70004000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x74004000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x78004000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x7c004000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x80004000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x78090043,
+	0x02000000,
+	0x22220000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x680b0001,
+	0x78260000,
+	0x00000000,
+	0x78270000,
+	0x00000000,
+	0x78280000,
+	0x00000000,
+	0x78290000,
+	0x00000000,
+	0x782a0000,
+	0x00000000,
+	0x780e0000,
+	0x00000dc1,
+	0x78240000,
+	0x00000e01,
+	0x784f0000,
+	0x80000100,
+	0x784d0000,
+	0x40000000,
+	0x782b0000,
+	0x00000000,
+	0x782c0000,
+	0x00000000,
+	0x782d0000,
+	0x00000000,
+	0x782e0000,
+	0x00000000,
+	0x782f0000,
+	0x00000000,
+	0x780f0000,
+	0x00000000,
+	0x78230000,
+	0x00000e60,
+	0x78210000,
+	0x00000e80,
+	0x7b000005,
+	0x00000004,
+	0x00000001,
+	0x00000000,
+	0x00000001,
+	0x00000000,
+	0x00000000,
+	0x05000000,	 /* cmds end */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,	 /* state start */
+	0x00000000,
+	0x3f800000,
+	0x3f800000,
+	0x3f800000,
+	0x3f800000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,	 /* state end */
+};
+
+RO_RENDERSTATE(8);
diff --git a/drivers/gpu/drm/i915/intel_renderstate_gen9.c b/drivers/gpu/drm/i915/intel_renderstate_gen9.c
new file mode 100644
index 0000000..16a7ec2
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_renderstate_gen9.c
@@ -0,0 +1,999 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Generated by: intel-gpu-tools-1.8-220-g01153e7
+ */
+
+#include "intel_renderstate.h"
+
+static const u32 gen9_null_state_relocs[] = {
+	0x000007a8,
+	0x000007b4,
+	0x000007bc,
+	0x000007cc,
+	-1,
+};
+
+static const u32 gen9_null_state_batch[] = {
+	0x7a000004,
+	0x01000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x69040300,
+	0x78140000,
+	0x04000000,
+	0x7820000a,
+	0x00000000,
+	0x00000000,
+	0x80000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x78130002,
+	0x00000000,
+	0x00000000,
+	0x02001808,
+	0x781f0004,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x78510009,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x78100007,
+	0x00000000,
+	0x00000000,
+	0x00010000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x781b0007,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000800,
+	0x00000000,
+	0x78110008,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x781e0003,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x781d0009,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x78120002,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x78500003,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x781c0002,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x780c0000,
+	0x00000000,
+	0x78520003,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x78300000,
+	0x08010040,
+	0x78310000,
+	0x1e000000,
+	0x78320000,
+	0x1e000000,
+	0x78330000,
+	0x1e000000,
+	0x79190002,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x791a0002,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x791b0002,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x79120000,
+	0x00000000,
+	0x79130000,
+	0x00000000,
+	0x79140000,
+	0x00000000,
+	0x79150000,
+	0x00000000,
+	0x79160000,
+	0x00000000,
+	0x78150009,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x78190009,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x781a0009,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x78160009,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x78170009,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x78490001,
+	0x00000000,
+	0x00000000,
+	0x784a0000,
+	0x00000000,
+	0x784b0000,
+	0x00000004,
+	0x79170101,
+	0x00000000,
+	0x00000080,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x79180006,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x79180006,
+	0x20000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x79180006,
+	0x40000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x79180006,
+	0x60000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x61010011,
+	0x00000001,	 /* reloc */
+	0x00000000,
+	0x00000000,
+	0x00000001,	 /* reloc */
+	0x00000000,
+	0x00000001,	 /* reloc */
+	0x00000000,
+	0x00000001,
+	0x00000000,
+	0x00000001,	 /* reloc */
+	0x00000000,
+	0x00001001,
+	0x00001001,
+	0x00000001,
+	0x00001001,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x61020001,
+	0x00000000,
+	0x00000000,
+	0x79000002,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x78050006,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x79040002,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x79040002,
+	0x40000000,
+	0x00000000,
+	0x00000000,
+	0x79040002,
+	0x80000000,
+	0x00000000,
+	0x00000000,
+	0x79040002,
+	0xc0000000,
+	0x00000000,
+	0x00000000,
+	0x79080001,
+	0x00000000,
+	0x00000000,
+	0x790a0001,
+	0x00000000,
+	0x00000000,
+	0x78060003,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x78070003,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x78040001,
+	0x00000000,
+	0x00000000,
+	0x79110000,
+	0x00000000,
+	0x780d0000,
+	0x00000000,
+	0x79060000,
+	0x00000000,
+	0x7907001f,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x7902000f,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x790c000f,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x780a0003,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x78080083,
+	0x00004000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x04004000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x08004000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x0c004000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x10004000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x14004000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x18004000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x1c004000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x20004000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x24004000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x28004000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x2c004000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x30004000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x34004000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x38004000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x3c004000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x40004000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x44004000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x48004000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x4c004000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x50004000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x54004000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x58004000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x5c004000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x60004000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x64004000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x68004000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x6c004000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x70004000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x74004000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x78004000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x7c004000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x80004000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x78090043,
+	0x02000000,
+	0x22220000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x78550003,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x680b0001,
+	0x780e0000,
+	0x00000e01,
+	0x78240000,
+	0x00000e41,
+	0x784f0000,
+	0x80000100,
+	0x784d0000,
+	0x40000000,
+	0x782b0000,
+	0x00000000,
+	0x782c0000,
+	0x00000000,
+	0x782d0000,
+	0x00000000,
+	0x782e0000,
+	0x00000000,
+	0x782f0000,
+	0x00000000,
+	0x780f0000,
+	0x00000000,
+	0x78230000,
+	0x00000ea0,
+	0x78210000,
+	0x00000ec0,
+	0x78260000,
+	0x00000000,
+	0x78270000,
+	0x00000000,
+	0x78280000,
+	0x00000000,
+	0x78290000,
+	0x00000000,
+	0x782a0000,
+	0x00000000,
+	0x7b000005,
+	0x00000004,
+	0x00000001,
+	0x00000000,
+	0x00000001,
+	0x00000000,
+	0x00000000,
+	0x05000000,	 /* cmds end */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,	 /* state start */
+	0x00000000,
+	0x3f800000,
+	0x3f800000,
+	0x3f800000,
+	0x3f800000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,	 /* state end */
+};
+
+RO_RENDERSTATE(9);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
new file mode 100644
index 0000000..9d48443
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -0,0 +1,3062 @@
+/*
+ * Copyright © 2008-2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Eric Anholt <eric@anholt.net>
+ *    Zou Nan hai <nanhai.zou@intel.com>
+ *    Xiang Hai hao<haihao.xiang@intel.com>
+ *
+ */
+
+#include <drm/drmP.h>
+#include "i915_drv.h"
+#include <drm/i915_drm.h>
+#include "i915_trace.h"
+#include "intel_drv.h"
+
+bool
+intel_ring_initialized(struct intel_engine_cs *ring)
+{
+	struct drm_device *dev = ring->dev;
+
+	if (!dev)
+		return false;
+
+	if (i915.enable_execlists) {
+		struct intel_context *dctx = ring->default_context;
+		struct intel_ringbuffer *ringbuf = dctx->engine[ring->id].ringbuf;
+
+		return ringbuf->obj;
+	} else
+		return ring->buffer && ring->buffer->obj;
+}
+
+int __intel_ring_space(int head, int tail, int size)
+{
+	int space = head - tail;
+	if (space <= 0)
+		space += size;
+	return space - I915_RING_FREE_SPACE;
+}
+
+void intel_ring_update_space(struct intel_ringbuffer *ringbuf)
+{
+	if (ringbuf->last_retired_head != -1) {
+		ringbuf->head = ringbuf->last_retired_head;
+		ringbuf->last_retired_head = -1;
+	}
+
+	ringbuf->space = __intel_ring_space(ringbuf->head & HEAD_ADDR,
+					    ringbuf->tail, ringbuf->size);
+}
+
+int intel_ring_space(struct intel_ringbuffer *ringbuf)
+{
+	intel_ring_update_space(ringbuf);
+	return ringbuf->space;
+}
+
+bool intel_ring_stopped(struct intel_engine_cs *ring)
+{
+	struct drm_i915_private *dev_priv = ring->dev->dev_private;
+	return dev_priv->gpu_error.stop_rings & intel_ring_flag(ring);
+}
+
+static void __intel_ring_advance(struct intel_engine_cs *ring)
+{
+	struct intel_ringbuffer *ringbuf = ring->buffer;
+	ringbuf->tail &= ringbuf->size - 1;
+	if (intel_ring_stopped(ring))
+		return;
+	ring->write_tail(ring, ringbuf->tail);
+}
+
+static int
+gen2_render_ring_flush(struct drm_i915_gem_request *req,
+		       u32	invalidate_domains,
+		       u32	flush_domains)
+{
+	struct intel_engine_cs *ring = req->ring;
+	u32 cmd;
+	int ret;
+
+	cmd = MI_FLUSH;
+	if (((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER) == 0)
+		cmd |= MI_NO_WRITE_FLUSH;
+
+	if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
+		cmd |= MI_READ_FLUSH;
+
+	ret = intel_ring_begin(req, 2);
+	if (ret)
+		return ret;
+
+	intel_ring_emit(ring, cmd);
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
+
+	return 0;
+}
+
+static int
+gen4_render_ring_flush(struct drm_i915_gem_request *req,
+		       u32	invalidate_domains,
+		       u32	flush_domains)
+{
+	struct intel_engine_cs *ring = req->ring;
+	struct drm_device *dev = ring->dev;
+	u32 cmd;
+	int ret;
+
+	/*
+	 * read/write caches:
+	 *
+	 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
+	 * only flushed if MI_NO_WRITE_FLUSH is unset.  On 965, it is
+	 * also flushed at 2d versus 3d pipeline switches.
+	 *
+	 * read-only caches:
+	 *
+	 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
+	 * MI_READ_FLUSH is set, and is always flushed on 965.
+	 *
+	 * I915_GEM_DOMAIN_COMMAND may not exist?
+	 *
+	 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
+	 * invalidated when MI_EXE_FLUSH is set.
+	 *
+	 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
+	 * invalidated with every MI_FLUSH.
+	 *
+	 * TLBs:
+	 *
+	 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
+	 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
+	 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
+	 * are flushed at any MI_FLUSH.
+	 */
+
+	cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
+	if ((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER)
+		cmd &= ~MI_NO_WRITE_FLUSH;
+	if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
+		cmd |= MI_EXE_FLUSH;
+
+	if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
+	    (IS_G4X(dev) || IS_GEN5(dev)))
+		cmd |= MI_INVALIDATE_ISP;
+
+	ret = intel_ring_begin(req, 2);
+	if (ret)
+		return ret;
+
+	intel_ring_emit(ring, cmd);
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
+
+	return 0;
+}
+
+/**
+ * Emits a PIPE_CONTROL with a non-zero post-sync operation, for
+ * implementing two workarounds on gen6.  From section 1.4.7.1
+ * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
+ *
+ * [DevSNB-C+{W/A}] Before any depth stall flush (including those
+ * produced by non-pipelined state commands), software needs to first
+ * send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
+ * 0.
+ *
+ * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
+ * =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
+ *
+ * And the workaround for these two requires this workaround first:
+ *
+ * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
+ * BEFORE the pipe-control with a post-sync op and no write-cache
+ * flushes.
+ *
+ * And this last workaround is tricky because of the requirements on
+ * that bit.  From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
+ * volume 2 part 1:
+ *
+ *     "1 of the following must also be set:
+ *      - Render Target Cache Flush Enable ([12] of DW1)
+ *      - Depth Cache Flush Enable ([0] of DW1)
+ *      - Stall at Pixel Scoreboard ([1] of DW1)
+ *      - Depth Stall ([13] of DW1)
+ *      - Post-Sync Operation ([13] of DW1)
+ *      - Notify Enable ([8] of DW1)"
+ *
+ * The cache flushes require the workaround flush that triggered this
+ * one, so we can't use it.  Depth stall would trigger the same.
+ * Post-sync nonzero is what triggered this second workaround, so we
+ * can't use that one either.  Notify enable is IRQs, which aren't
+ * really our business.  That leaves only stall at scoreboard.
+ */
+static int
+intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
+{
+	struct intel_engine_cs *ring = req->ring;
+	u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
+	int ret;
+
+	ret = intel_ring_begin(req, 6);
+	if (ret)
+		return ret;
+
+	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
+	intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
+			PIPE_CONTROL_STALL_AT_SCOREBOARD);
+	intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
+	intel_ring_emit(ring, 0); /* low dword */
+	intel_ring_emit(ring, 0); /* high dword */
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
+
+	ret = intel_ring_begin(req, 6);
+	if (ret)
+		return ret;
+
+	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
+	intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE);
+	intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
+	intel_ring_emit(ring, 0);
+	intel_ring_emit(ring, 0);
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
+
+	return 0;
+}
+
+static int
+gen6_render_ring_flush(struct drm_i915_gem_request *req,
+		       u32 invalidate_domains, u32 flush_domains)
+{
+	struct intel_engine_cs *ring = req->ring;
+	u32 flags = 0;
+	u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
+	int ret;
+
+	/* Force SNB workarounds for PIPE_CONTROL flushes */
+	ret = intel_emit_post_sync_nonzero_flush(req);
+	if (ret)
+		return ret;
+
+	/* Just flush everything.  Experiments have shown that reducing the
+	 * number of bits based on the write domains has little performance
+	 * impact.
+	 */
+	if (flush_domains) {
+		flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
+		flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
+		/*
+		 * Ensure that any following seqno writes only happen
+		 * when the render cache is indeed flushed.
+		 */
+		flags |= PIPE_CONTROL_CS_STALL;
+	}
+	if (invalidate_domains) {
+		flags |= PIPE_CONTROL_TLB_INVALIDATE;
+		flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
+		flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
+		flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
+		flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
+		flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
+		/*
+		 * TLB invalidate requires a post-sync write.
+		 */
+		flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
+	}
+
+	ret = intel_ring_begin(req, 4);
+	if (ret)
+		return ret;
+
+	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
+	intel_ring_emit(ring, flags);
+	intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
+	intel_ring_emit(ring, 0);
+	intel_ring_advance(ring);
+
+	return 0;
+}
+
+static int
+gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req)
+{
+	struct intel_engine_cs *ring = req->ring;
+	int ret;
+
+	ret = intel_ring_begin(req, 4);
+	if (ret)
+		return ret;
+
+	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
+	intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
+			      PIPE_CONTROL_STALL_AT_SCOREBOARD);
+	intel_ring_emit(ring, 0);
+	intel_ring_emit(ring, 0);
+	intel_ring_advance(ring);
+
+	return 0;
+}
+
+static int
+gen7_render_ring_flush(struct drm_i915_gem_request *req,
+		       u32 invalidate_domains, u32 flush_domains)
+{
+	struct intel_engine_cs *ring = req->ring;
+	u32 flags = 0;
+	u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
+	int ret;
+
+	/*
+	 * Ensure that any following seqno writes only happen when the render
+	 * cache is indeed flushed.
+	 *
+	 * Workaround: 4th PIPE_CONTROL command (except the ones with only
+	 * read-cache invalidate bits set) must have the CS_STALL bit set. We
+	 * don't try to be clever and just set it unconditionally.
+	 */
+	flags |= PIPE_CONTROL_CS_STALL;
+
+	/* Just flush everything.  Experiments have shown that reducing the
+	 * number of bits based on the write domains has little performance
+	 * impact.
+	 */
+	if (flush_domains) {
+		flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
+		flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
+		flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
+		flags |= PIPE_CONTROL_FLUSH_ENABLE;
+	}
+	if (invalidate_domains) {
+		flags |= PIPE_CONTROL_TLB_INVALIDATE;
+		flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
+		flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
+		flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
+		flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
+		flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
+		flags |= PIPE_CONTROL_MEDIA_STATE_CLEAR;
+		/*
+		 * TLB invalidate requires a post-sync write.
+		 */
+		flags |= PIPE_CONTROL_QW_WRITE;
+		flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
+
+		flags |= PIPE_CONTROL_STALL_AT_SCOREBOARD;
+
+		/* Workaround: we must issue a pipe_control with CS-stall bit
+		 * set before a pipe_control command that has the state cache
+		 * invalidate bit set. */
+		gen7_render_ring_cs_stall_wa(req);
+	}
+
+	ret = intel_ring_begin(req, 4);
+	if (ret)
+		return ret;
+
+	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
+	intel_ring_emit(ring, flags);
+	intel_ring_emit(ring, scratch_addr);
+	intel_ring_emit(ring, 0);
+	intel_ring_advance(ring);
+
+	return 0;
+}
+
+static int
+gen8_emit_pipe_control(struct drm_i915_gem_request *req,
+		       u32 flags, u32 scratch_addr)
+{
+	struct intel_engine_cs *ring = req->ring;
+	int ret;
+
+	ret = intel_ring_begin(req, 6);
+	if (ret)
+		return ret;
+
+	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
+	intel_ring_emit(ring, flags);
+	intel_ring_emit(ring, scratch_addr);
+	intel_ring_emit(ring, 0);
+	intel_ring_emit(ring, 0);
+	intel_ring_emit(ring, 0);
+	intel_ring_advance(ring);
+
+	return 0;
+}
+
+static int
+gen8_render_ring_flush(struct drm_i915_gem_request *req,
+		       u32 invalidate_domains, u32 flush_domains)
+{
+	u32 flags = 0;
+	u32 scratch_addr = req->ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
+	int ret;
+
+	flags |= PIPE_CONTROL_CS_STALL;
+
+	if (flush_domains) {
+		flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
+		flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
+		flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
+		flags |= PIPE_CONTROL_FLUSH_ENABLE;
+	}
+	if (invalidate_domains) {
+		flags |= PIPE_CONTROL_TLB_INVALIDATE;
+		flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
+		flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
+		flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
+		flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
+		flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
+		flags |= PIPE_CONTROL_QW_WRITE;
+		flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
+
+		/* WaCsStallBeforeStateCacheInvalidate:bdw,chv */
+		ret = gen8_emit_pipe_control(req,
+					     PIPE_CONTROL_CS_STALL |
+					     PIPE_CONTROL_STALL_AT_SCOREBOARD,
+					     0);
+		if (ret)
+			return ret;
+	}
+
+	return gen8_emit_pipe_control(req, flags, scratch_addr);
+}
+
+static void ring_write_tail(struct intel_engine_cs *ring,
+			    u32 value)
+{
+	struct drm_i915_private *dev_priv = ring->dev->dev_private;
+	I915_WRITE_TAIL(ring, value);
+}
+
+u64 intel_ring_get_active_head(struct intel_engine_cs *ring)
+{
+	struct drm_i915_private *dev_priv = ring->dev->dev_private;
+	u64 acthd;
+
+	if (INTEL_INFO(ring->dev)->gen >= 8)
+		acthd = I915_READ64_2x32(RING_ACTHD(ring->mmio_base),
+					 RING_ACTHD_UDW(ring->mmio_base));
+	else if (INTEL_INFO(ring->dev)->gen >= 4)
+		acthd = I915_READ(RING_ACTHD(ring->mmio_base));
+	else
+		acthd = I915_READ(ACTHD);
+
+	return acthd;
+}
+
+static void ring_setup_phys_status_page(struct intel_engine_cs *ring)
+{
+	struct drm_i915_private *dev_priv = ring->dev->dev_private;
+	u32 addr;
+
+	addr = dev_priv->status_page_dmah->busaddr;
+	if (INTEL_INFO(ring->dev)->gen >= 4)
+		addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
+	I915_WRITE(HWS_PGA, addr);
+}
+
+static void intel_ring_setup_status_page(struct intel_engine_cs *ring)
+{
+	struct drm_device *dev = ring->dev;
+	struct drm_i915_private *dev_priv = ring->dev->dev_private;
+	u32 mmio = 0;
+
+	/* The ring status page addresses are no longer next to the rest of
+	 * the ring registers as of gen7.
+	 */
+	if (IS_GEN7(dev)) {
+		switch (ring->id) {
+		case RCS:
+			mmio = RENDER_HWS_PGA_GEN7;
+			break;
+		case BCS:
+			mmio = BLT_HWS_PGA_GEN7;
+			break;
+		/*
+		 * VCS2 actually doesn't exist on Gen7. Only shut up
+		 * gcc switch check warning
+		 */
+		case VCS2:
+		case VCS:
+			mmio = BSD_HWS_PGA_GEN7;
+			break;
+		case VECS:
+			mmio = VEBOX_HWS_PGA_GEN7;
+			break;
+		}
+	} else if (IS_GEN6(ring->dev)) {
+		mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
+	} else {
+		/* XXX: gen8 returns to sanity */
+		mmio = RING_HWS_PGA(ring->mmio_base);
+	}
+
+	I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
+	POSTING_READ(mmio);
+
+	/*
+	 * Flush the TLB for this page
+	 *
+	 * FIXME: These two bits have disappeared on gen8, so a question
+	 * arises: do we still need this and if so how should we go about
+	 * invalidating the TLB?
+	 */
+	if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) {
+		u32 reg = RING_INSTPM(ring->mmio_base);
+
+		/* ring should be idle before issuing a sync flush*/
+		WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
+
+		I915_WRITE(reg,
+			   _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
+					      INSTPM_SYNC_FLUSH));
+		if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0,
+			     1000))
+			DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
+				  ring->name);
+	}
+}
+
+static bool stop_ring(struct intel_engine_cs *ring)
+{
+	struct drm_i915_private *dev_priv = to_i915(ring->dev);
+
+	if (!IS_GEN2(ring->dev)) {
+		I915_WRITE_MODE(ring, _MASKED_BIT_ENABLE(STOP_RING));
+		if (wait_for((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) {
+			DRM_ERROR("%s : timed out trying to stop ring\n", ring->name);
+			/* Sometimes we observe that the idle flag is not
+			 * set even though the ring is empty. So double
+			 * check before giving up.
+			 */
+			if (I915_READ_HEAD(ring) != I915_READ_TAIL(ring))
+				return false;
+		}
+	}
+
+	I915_WRITE_CTL(ring, 0);
+	I915_WRITE_HEAD(ring, 0);
+	ring->write_tail(ring, 0);
+
+	if (!IS_GEN2(ring->dev)) {
+		(void)I915_READ_CTL(ring);
+		I915_WRITE_MODE(ring, _MASKED_BIT_DISABLE(STOP_RING));
+	}
+
+	return (I915_READ_HEAD(ring) & HEAD_ADDR) == 0;
+}
+
+static int init_ring_common(struct intel_engine_cs *ring)
+{
+	struct drm_device *dev = ring->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_ringbuffer *ringbuf = ring->buffer;
+	struct drm_i915_gem_object *obj = ringbuf->obj;
+	int ret = 0;
+
+	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+
+	if (!stop_ring(ring)) {
+		/* G45 ring initialization often fails to reset head to zero */
+		DRM_DEBUG_KMS("%s head not reset to zero "
+			      "ctl %08x head %08x tail %08x start %08x\n",
+			      ring->name,
+			      I915_READ_CTL(ring),
+			      I915_READ_HEAD(ring),
+			      I915_READ_TAIL(ring),
+			      I915_READ_START(ring));
+
+		if (!stop_ring(ring)) {
+			DRM_ERROR("failed to set %s head to zero "
+				  "ctl %08x head %08x tail %08x start %08x\n",
+				  ring->name,
+				  I915_READ_CTL(ring),
+				  I915_READ_HEAD(ring),
+				  I915_READ_TAIL(ring),
+				  I915_READ_START(ring));
+			ret = -EIO;
+			goto out;
+		}
+	}
+
+	if (I915_NEED_GFX_HWS(dev))
+		intel_ring_setup_status_page(ring);
+	else
+		ring_setup_phys_status_page(ring);
+
+	/* Enforce ordering by reading HEAD register back */
+	I915_READ_HEAD(ring);
+
+	/* Initialize the ring. This must happen _after_ we've cleared the ring
+	 * registers with the above sequence (the readback of the HEAD registers
+	 * also enforces ordering), otherwise the hw might lose the new ring
+	 * register values. */
+	I915_WRITE_START(ring, i915_gem_obj_ggtt_offset(obj));
+
+	/* WaClearRingBufHeadRegAtInit:ctg,elk */
+	if (I915_READ_HEAD(ring))
+		DRM_DEBUG("%s initialization failed [head=%08x], fudging\n",
+			  ring->name, I915_READ_HEAD(ring));
+	I915_WRITE_HEAD(ring, 0);
+	(void)I915_READ_HEAD(ring);
+
+	I915_WRITE_CTL(ring,
+			((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES)
+			| RING_VALID);
+
+	/* If the head is still not zero, the ring is dead */
+	if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 &&
+		     I915_READ_START(ring) == i915_gem_obj_ggtt_offset(obj) &&
+		     (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) {
+		DRM_ERROR("%s initialization failed "
+			  "ctl %08x (valid? %d) head %08x tail %08x start %08x [expected %08lx]\n",
+			  ring->name,
+			  I915_READ_CTL(ring), I915_READ_CTL(ring) & RING_VALID,
+			  I915_READ_HEAD(ring), I915_READ_TAIL(ring),
+			  I915_READ_START(ring), (unsigned long)i915_gem_obj_ggtt_offset(obj));
+		ret = -EIO;
+		goto out;
+	}
+
+	ringbuf->last_retired_head = -1;
+	ringbuf->head = I915_READ_HEAD(ring);
+	ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
+	intel_ring_update_space(ringbuf);
+
+	memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
+
+out:
+	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+
+	return ret;
+}
+
+void
+intel_fini_pipe_control(struct intel_engine_cs *ring)
+{
+	struct drm_device *dev = ring->dev;
+
+	if (ring->scratch.obj == NULL)
+		return;
+
+	if (INTEL_INFO(dev)->gen >= 5) {
+		kunmap(sg_page(ring->scratch.obj->pages->sgl));
+		i915_gem_object_ggtt_unpin(ring->scratch.obj);
+	}
+
+	drm_gem_object_unreference(&ring->scratch.obj->base);
+	ring->scratch.obj = NULL;
+}
+
+int
+intel_init_pipe_control(struct intel_engine_cs *ring)
+{
+	int ret;
+
+	WARN_ON(ring->scratch.obj);
+
+	ring->scratch.obj = i915_gem_alloc_object(ring->dev, 4096);
+	if (ring->scratch.obj == NULL) {
+		DRM_ERROR("Failed to allocate seqno page\n");
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	ret = i915_gem_object_set_cache_level(ring->scratch.obj, I915_CACHE_LLC);
+	if (ret)
+		goto err_unref;
+
+	ret = i915_gem_obj_ggtt_pin(ring->scratch.obj, 4096, 0);
+	if (ret)
+		goto err_unref;
+
+	ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(ring->scratch.obj);
+	ring->scratch.cpu_page = kmap(sg_page(ring->scratch.obj->pages->sgl));
+	if (ring->scratch.cpu_page == NULL) {
+		ret = -ENOMEM;
+		goto err_unpin;
+	}
+
+	DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n",
+			 ring->name, ring->scratch.gtt_offset);
+	return 0;
+
+err_unpin:
+	i915_gem_object_ggtt_unpin(ring->scratch.obj);
+err_unref:
+	drm_gem_object_unreference(&ring->scratch.obj->base);
+err:
+	return ret;
+}
+
+static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
+{
+	int ret, i;
+	struct intel_engine_cs *ring = req->ring;
+	struct drm_device *dev = ring->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct i915_workarounds *w = &dev_priv->workarounds;
+
+	if (w->count == 0)
+		return 0;
+
+	ring->gpu_caches_dirty = true;
+	ret = intel_ring_flush_all_caches(req);
+	if (ret)
+		return ret;
+
+	ret = intel_ring_begin(req, (w->count * 2 + 2));
+	if (ret)
+		return ret;
+
+	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count));
+	for (i = 0; i < w->count; i++) {
+		intel_ring_emit(ring, w->reg[i].addr);
+		intel_ring_emit(ring, w->reg[i].value);
+	}
+	intel_ring_emit(ring, MI_NOOP);
+
+	intel_ring_advance(ring);
+
+	ring->gpu_caches_dirty = true;
+	ret = intel_ring_flush_all_caches(req);
+	if (ret)
+		return ret;
+
+	DRM_DEBUG_DRIVER("Number of Workarounds emitted: %d\n", w->count);
+
+	return 0;
+}
+
+static int intel_rcs_ctx_init(struct drm_i915_gem_request *req)
+{
+	int ret;
+
+	ret = intel_ring_workarounds_emit(req);
+	if (ret != 0)
+		return ret;
+
+	ret = i915_gem_render_state_init(req);
+	if (ret)
+		DRM_ERROR("init render state: %d\n", ret);
+
+	return ret;
+}
+
+static int wa_add(struct drm_i915_private *dev_priv,
+		  const u32 addr, const u32 mask, const u32 val)
+{
+	const u32 idx = dev_priv->workarounds.count;
+
+	if (WARN_ON(idx >= I915_MAX_WA_REGS))
+		return -ENOSPC;
+
+	dev_priv->workarounds.reg[idx].addr = addr;
+	dev_priv->workarounds.reg[idx].value = val;
+	dev_priv->workarounds.reg[idx].mask = mask;
+
+	dev_priv->workarounds.count++;
+
+	return 0;
+}
+
+#define WA_REG(addr, mask, val) do { \
+		const int r = wa_add(dev_priv, (addr), (mask), (val)); \
+		if (r) \
+			return r; \
+	} while (0)
+
+#define WA_SET_BIT_MASKED(addr, mask) \
+	WA_REG(addr, (mask), _MASKED_BIT_ENABLE(mask))
+
+#define WA_CLR_BIT_MASKED(addr, mask) \
+	WA_REG(addr, (mask), _MASKED_BIT_DISABLE(mask))
+
+#define WA_SET_FIELD_MASKED(addr, mask, value) \
+	WA_REG(addr, mask, _MASKED_FIELD(mask, value))
+
+#define WA_SET_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) | (mask))
+#define WA_CLR_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) & ~(mask))
+
+#define WA_WRITE(addr, val) WA_REG(addr, 0xffffffff, val)
+
+static int gen8_init_workarounds(struct intel_engine_cs *ring)
+{
+	struct drm_device *dev = ring->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
+
+	/* WaDisableAsyncFlipPerfMode:bdw,chv */
+	WA_SET_BIT_MASKED(MI_MODE, ASYNC_FLIP_PERF_DISABLE);
+
+	/* WaDisablePartialInstShootdown:bdw,chv */
+	WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
+			  PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
+
+	/* Use Force Non-Coherent whenever executing a 3D context. This is a
+	 * workaround for for a possible hang in the unlikely event a TLB
+	 * invalidation occurs during a PSD flush.
+	 */
+	/* WaForceEnableNonCoherent:bdw,chv */
+	/* WaHdcDisableFetchWhenMasked:bdw,chv */
+	WA_SET_BIT_MASKED(HDC_CHICKEN0,
+			  HDC_DONOT_FETCH_MEM_WHEN_MASKED |
+			  HDC_FORCE_NON_COHERENT);
+
+	/* From the Haswell PRM, Command Reference: Registers, CACHE_MODE_0:
+	 * "The Hierarchical Z RAW Stall Optimization allows non-overlapping
+	 *  polygons in the same 8x4 pixel/sample area to be processed without
+	 *  stalling waiting for the earlier ones to write to Hierarchical Z
+	 *  buffer."
+	 *
+	 * This optimization is off by default for BDW and CHV; turn it on.
+	 */
+	WA_CLR_BIT_MASKED(CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE);
+
+	/* Wa4x4STCOptimizationDisable:bdw,chv */
+	WA_SET_BIT_MASKED(CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE);
+
+	/*
+	 * BSpec recommends 8x4 when MSAA is used,
+	 * however in practice 16x4 seems fastest.
+	 *
+	 * Note that PS/WM thread counts depend on the WIZ hashing
+	 * disable bit, which we don't touch here, but it's good
+	 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
+	 */
+	WA_SET_FIELD_MASKED(GEN7_GT_MODE,
+			    GEN6_WIZ_HASHING_MASK,
+			    GEN6_WIZ_HASHING_16x4);
+
+	return 0;
+}
+
+static int bdw_init_workarounds(struct intel_engine_cs *ring)
+{
+	int ret;
+	struct drm_device *dev = ring->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	ret = gen8_init_workarounds(ring);
+	if (ret)
+		return ret;
+
+	/* WaDisableThreadStallDopClockGating:bdw (pre-production) */
+	WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
+
+	/* WaDisableDopClockGating:bdw */
+	WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
+			  DOP_CLOCK_GATING_DISABLE);
+
+	WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
+			  GEN8_SAMPLER_POWER_BYPASS_DIS);
+
+	WA_SET_BIT_MASKED(HDC_CHICKEN0,
+			  /* WaForceContextSaveRestoreNonCoherent:bdw */
+			  HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
+			  /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
+			  (IS_BDW_GT3(dev) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
+
+	return 0;
+}
+
+static int chv_init_workarounds(struct intel_engine_cs *ring)
+{
+	int ret;
+	struct drm_device *dev = ring->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	ret = gen8_init_workarounds(ring);
+	if (ret)
+		return ret;
+
+	/* WaDisableThreadStallDopClockGating:chv */
+	WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
+
+	/* Improve HiZ throughput on CHV. */
+	WA_SET_BIT_MASKED(HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X);
+
+	return 0;
+}
+
+static int gen9_init_workarounds(struct intel_engine_cs *ring)
+{
+	struct drm_device *dev = ring->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t tmp;
+
+	/* WaEnableLbsSlaRetryTimerDecrement:skl */
+	I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
+		   GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
+
+	/* WaDisableKillLogic:bxt,skl */
+	I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
+		   ECOCHK_DIS_TLB);
+
+	/* WaDisablePartialInstShootdown:skl,bxt */
+	WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
+			  PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
+
+	/* Syncing dependencies between camera and graphics:skl,bxt */
+	WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
+			  GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
+
+	if ((IS_SKYLAKE(dev) && (INTEL_REVID(dev) == SKL_REVID_A0 ||
+	    INTEL_REVID(dev) == SKL_REVID_B0)) ||
+	    (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0)) {
+		/* WaDisableDgMirrorFixInHalfSliceChicken5:skl,bxt */
+		WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
+				  GEN9_DG_MIRROR_FIX_ENABLE);
+	}
+
+	if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0) ||
+	    (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0)) {
+		/* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
+		WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1,
+				  GEN9_RHWO_OPTIMIZATION_DISABLE);
+		/*
+		 * WA also requires GEN9_SLICE_COMMON_ECO_CHICKEN0[14:14] to be set
+		 * but we do that in per ctx batchbuffer as there is an issue
+		 * with this register not getting restored on ctx restore
+		 */
+	}
+
+	if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) >= SKL_REVID_C0) ||
+	    IS_BROXTON(dev)) {
+		/* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt */
+		WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
+				  GEN9_ENABLE_YV12_BUGFIX);
+	}
+
+	/* Wa4x4STCOptimizationDisable:skl,bxt */
+	/* WaDisablePartialResolveInVc:skl,bxt */
+	WA_SET_BIT_MASKED(CACHE_MODE_1, (GEN8_4x4_STC_OPTIMIZATION_DISABLE |
+					 GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE));
+
+	/* WaCcsTlbPrefetchDisable:skl,bxt */
+	WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
+			  GEN9_CCS_TLB_PREFETCH_ENABLE);
+
+	/* WaDisableMaskBasedCammingInRCC:skl,bxt */
+	if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) == SKL_REVID_C0) ||
+	    (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0))
+		WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0,
+				  PIXEL_MASK_CAMMING_DISABLE);
+
+	/* WaForceContextSaveRestoreNonCoherent:skl,bxt */
+	tmp = HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT;
+	if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) == SKL_REVID_F0) ||
+	    (IS_BROXTON(dev) && INTEL_REVID(dev) >= BXT_REVID_B0))
+		tmp |= HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE;
+	WA_SET_BIT_MASKED(HDC_CHICKEN0, tmp);
+
+	/* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt */
+	if (IS_SKYLAKE(dev) ||
+	    (IS_BROXTON(dev) && INTEL_REVID(dev) <= BXT_REVID_B0)) {
+		WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
+				  GEN8_SAMPLER_POWER_BYPASS_DIS);
+	}
+
+	/* WaDisableSTUnitPowerOptimization:skl,bxt */
+	WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
+
+	return 0;
+}
+
+static int skl_tune_iz_hashing(struct intel_engine_cs *ring)
+{
+	struct drm_device *dev = ring->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u8 vals[3] = { 0, 0, 0 };
+	unsigned int i;
+
+	for (i = 0; i < 3; i++) {
+		u8 ss;
+
+		/*
+		 * Only consider slices where one, and only one, subslice has 7
+		 * EUs
+		 */
+		if (hweight8(dev_priv->info.subslice_7eu[i]) != 1)
+			continue;
+
+		/*
+		 * subslice_7eu[i] != 0 (because of the check above) and
+		 * ss_max == 4 (maximum number of subslices possible per slice)
+		 *
+		 * ->    0 <= ss <= 3;
+		 */
+		ss = ffs(dev_priv->info.subslice_7eu[i]) - 1;
+		vals[i] = 3 - ss;
+	}
+
+	if (vals[0] == 0 && vals[1] == 0 && vals[2] == 0)
+		return 0;
+
+	/* Tune IZ hashing. See intel_device_info_runtime_init() */
+	WA_SET_FIELD_MASKED(GEN7_GT_MODE,
+			    GEN9_IZ_HASHING_MASK(2) |
+			    GEN9_IZ_HASHING_MASK(1) |
+			    GEN9_IZ_HASHING_MASK(0),
+			    GEN9_IZ_HASHING(2, vals[2]) |
+			    GEN9_IZ_HASHING(1, vals[1]) |
+			    GEN9_IZ_HASHING(0, vals[0]));
+
+	return 0;
+}
+
+static int skl_init_workarounds(struct intel_engine_cs *ring)
+{
+	int ret;
+	struct drm_device *dev = ring->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	ret = gen9_init_workarounds(ring);
+	if (ret)
+		return ret;
+
+	if (INTEL_REVID(dev) <= SKL_REVID_D0) {
+		/* WaDisableHDCInvalidation:skl */
+		I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
+			   BDW_DISABLE_HDC_INVALIDATION);
+
+		/* WaDisableChickenBitTSGBarrierAckForFFSliceCS:skl */
+		I915_WRITE(FF_SLICE_CS_CHICKEN2,
+			   _MASKED_BIT_ENABLE(GEN9_TSG_BARRIER_ACK_DISABLE));
+	}
+
+	/* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes
+	 * involving this register should also be added to WA batch as required.
+	 */
+	if (INTEL_REVID(dev) <= SKL_REVID_E0)
+		/* WaDisableLSQCROPERFforOCL:skl */
+		I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
+			   GEN8_LQSC_RO_PERF_DIS);
+
+	/* WaEnableGapsTsvCreditFix:skl */
+	if (IS_SKYLAKE(dev) && (INTEL_REVID(dev) >= SKL_REVID_C0)) {
+		I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
+					   GEN9_GAPS_TSV_CREDIT_DISABLE));
+	}
+
+	/* WaDisablePowerCompilerClockGating:skl */
+	if (INTEL_REVID(dev) == SKL_REVID_B0)
+		WA_SET_BIT_MASKED(HIZ_CHICKEN,
+				  BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE);
+
+	if (INTEL_REVID(dev) <= SKL_REVID_D0) {
+		/*
+		 *Use Force Non-Coherent whenever executing a 3D context. This
+		 * is a workaround for a possible hang in the unlikely event
+		 * a TLB invalidation occurs during a PSD flush.
+		 */
+		/* WaForceEnableNonCoherent:skl */
+		WA_SET_BIT_MASKED(HDC_CHICKEN0,
+				  HDC_FORCE_NON_COHERENT);
+	}
+
+	if (INTEL_REVID(dev) == SKL_REVID_C0 ||
+	    INTEL_REVID(dev) == SKL_REVID_D0)
+		/* WaBarrierPerformanceFixDisable:skl */
+		WA_SET_BIT_MASKED(HDC_CHICKEN0,
+				  HDC_FENCE_DEST_SLM_DISABLE |
+				  HDC_BARRIER_PERFORMANCE_DISABLE);
+
+	/* WaDisableSbeCacheDispatchPortSharing:skl */
+	if (INTEL_REVID(dev) <= SKL_REVID_F0) {
+		WA_SET_BIT_MASKED(
+			GEN7_HALF_SLICE_CHICKEN1,
+			GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
+	}
+
+	return skl_tune_iz_hashing(ring);
+}
+
+static int bxt_init_workarounds(struct intel_engine_cs *ring)
+{
+	int ret;
+	struct drm_device *dev = ring->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	ret = gen9_init_workarounds(ring);
+	if (ret)
+		return ret;
+
+	/* WaStoreMultiplePTEenable:bxt */
+	/* This is a requirement according to Hardware specification */
+	if (INTEL_REVID(dev) == BXT_REVID_A0)
+		I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF);
+
+	/* WaSetClckGatingDisableMedia:bxt */
+	if (INTEL_REVID(dev) == BXT_REVID_A0) {
+		I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) &
+					    ~GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE));
+	}
+
+	/* WaDisableThreadStallDopClockGating:bxt */
+	WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
+			  STALL_DOP_GATING_DISABLE);
+
+	/* WaDisableSbeCacheDispatchPortSharing:bxt */
+	if (INTEL_REVID(dev) <= BXT_REVID_B0) {
+		WA_SET_BIT_MASKED(
+			GEN7_HALF_SLICE_CHICKEN1,
+			GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
+	}
+
+	return 0;
+}
+
+int init_workarounds_ring(struct intel_engine_cs *ring)
+{
+	struct drm_device *dev = ring->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	WARN_ON(ring->id != RCS);
+
+	dev_priv->workarounds.count = 0;
+
+	if (IS_BROADWELL(dev))
+		return bdw_init_workarounds(ring);
+
+	if (IS_CHERRYVIEW(dev))
+		return chv_init_workarounds(ring);
+
+	if (IS_SKYLAKE(dev))
+		return skl_init_workarounds(ring);
+
+	if (IS_BROXTON(dev))
+		return bxt_init_workarounds(ring);
+
+	return 0;
+}
+
+static int init_render_ring(struct intel_engine_cs *ring)
+{
+	struct drm_device *dev = ring->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret = init_ring_common(ring);
+	if (ret)
+		return ret;
+
+	/* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
+	if (INTEL_INFO(dev)->gen >= 4 && INTEL_INFO(dev)->gen < 7)
+		I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
+
+	/* We need to disable the AsyncFlip performance optimisations in order
+	 * to use MI_WAIT_FOR_EVENT within the CS. It should already be
+	 * programmed to '1' on all products.
+	 *
+	 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
+	 */
+	if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8)
+		I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
+
+	/* Required for the hardware to program scanline values for waiting */
+	/* WaEnableFlushTlbInvalidationMode:snb */
+	if (INTEL_INFO(dev)->gen == 6)
+		I915_WRITE(GFX_MODE,
+			   _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT));
+
+	/* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
+	if (IS_GEN7(dev))
+		I915_WRITE(GFX_MODE_GEN7,
+			   _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) |
+			   _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
+
+	if (IS_GEN6(dev)) {
+		/* From the Sandybridge PRM, volume 1 part 3, page 24:
+		 * "If this bit is set, STCunit will have LRA as replacement
+		 *  policy. [...] This bit must be reset.  LRA replacement
+		 *  policy is not supported."
+		 */
+		I915_WRITE(CACHE_MODE_0,
+			   _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
+	}
+
+	if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8)
+		I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
+
+	if (HAS_L3_DPF(dev))
+		I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev));
+
+	return init_workarounds_ring(ring);
+}
+
+static void render_ring_cleanup(struct intel_engine_cs *ring)
+{
+	struct drm_device *dev = ring->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (dev_priv->semaphore_obj) {
+		i915_gem_object_ggtt_unpin(dev_priv->semaphore_obj);
+		drm_gem_object_unreference(&dev_priv->semaphore_obj->base);
+		dev_priv->semaphore_obj = NULL;
+	}
+
+	intel_fini_pipe_control(ring);
+}
+
+static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
+			   unsigned int num_dwords)
+{
+#define MBOX_UPDATE_DWORDS 8
+	struct intel_engine_cs *signaller = signaller_req->ring;
+	struct drm_device *dev = signaller->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_engine_cs *waiter;
+	int i, ret, num_rings;
+
+	num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
+	num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
+#undef MBOX_UPDATE_DWORDS
+
+	ret = intel_ring_begin(signaller_req, num_dwords);
+	if (ret)
+		return ret;
+
+	for_each_ring(waiter, dev_priv, i) {
+		u32 seqno;
+		u64 gtt_offset = signaller->semaphore.signal_ggtt[i];
+		if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
+			continue;
+
+		seqno = i915_gem_request_get_seqno(signaller_req);
+		intel_ring_emit(signaller, GFX_OP_PIPE_CONTROL(6));
+		intel_ring_emit(signaller, PIPE_CONTROL_GLOBAL_GTT_IVB |
+					   PIPE_CONTROL_QW_WRITE |
+					   PIPE_CONTROL_FLUSH_ENABLE);
+		intel_ring_emit(signaller, lower_32_bits(gtt_offset));
+		intel_ring_emit(signaller, upper_32_bits(gtt_offset));
+		intel_ring_emit(signaller, seqno);
+		intel_ring_emit(signaller, 0);
+		intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
+					   MI_SEMAPHORE_TARGET(waiter->id));
+		intel_ring_emit(signaller, 0);
+	}
+
+	return 0;
+}
+
+static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
+			   unsigned int num_dwords)
+{
+#define MBOX_UPDATE_DWORDS 6
+	struct intel_engine_cs *signaller = signaller_req->ring;
+	struct drm_device *dev = signaller->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_engine_cs *waiter;
+	int i, ret, num_rings;
+
+	num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
+	num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
+#undef MBOX_UPDATE_DWORDS
+
+	ret = intel_ring_begin(signaller_req, num_dwords);
+	if (ret)
+		return ret;
+
+	for_each_ring(waiter, dev_priv, i) {
+		u32 seqno;
+		u64 gtt_offset = signaller->semaphore.signal_ggtt[i];
+		if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
+			continue;
+
+		seqno = i915_gem_request_get_seqno(signaller_req);
+		intel_ring_emit(signaller, (MI_FLUSH_DW + 1) |
+					   MI_FLUSH_DW_OP_STOREDW);
+		intel_ring_emit(signaller, lower_32_bits(gtt_offset) |
+					   MI_FLUSH_DW_USE_GTT);
+		intel_ring_emit(signaller, upper_32_bits(gtt_offset));
+		intel_ring_emit(signaller, seqno);
+		intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
+					   MI_SEMAPHORE_TARGET(waiter->id));
+		intel_ring_emit(signaller, 0);
+	}
+
+	return 0;
+}
+
+static int gen6_signal(struct drm_i915_gem_request *signaller_req,
+		       unsigned int num_dwords)
+{
+	struct intel_engine_cs *signaller = signaller_req->ring;
+	struct drm_device *dev = signaller->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_engine_cs *useless;
+	int i, ret, num_rings;
+
+#define MBOX_UPDATE_DWORDS 3
+	num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
+	num_dwords += round_up((num_rings-1) * MBOX_UPDATE_DWORDS, 2);
+#undef MBOX_UPDATE_DWORDS
+
+	ret = intel_ring_begin(signaller_req, num_dwords);
+	if (ret)
+		return ret;
+
+	for_each_ring(useless, dev_priv, i) {
+		u32 mbox_reg = signaller->semaphore.mbox.signal[i];
+		if (mbox_reg != GEN6_NOSYNC) {
+			u32 seqno = i915_gem_request_get_seqno(signaller_req);
+			intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1));
+			intel_ring_emit(signaller, mbox_reg);
+			intel_ring_emit(signaller, seqno);
+		}
+	}
+
+	/* If num_dwords was rounded, make sure the tail pointer is correct */
+	if (num_rings % 2 == 0)
+		intel_ring_emit(signaller, MI_NOOP);
+
+	return 0;
+}
+
+/**
+ * gen6_add_request - Update the semaphore mailbox registers
+ *
+ * @request - request to write to the ring
+ *
+ * Update the mailbox registers in the *other* rings with the current seqno.
+ * This acts like a signal in the canonical semaphore.
+ */
+static int
+gen6_add_request(struct drm_i915_gem_request *req)
+{
+	struct intel_engine_cs *ring = req->ring;
+	int ret;
+
+	if (ring->semaphore.signal)
+		ret = ring->semaphore.signal(req, 4);
+	else
+		ret = intel_ring_begin(req, 4);
+
+	if (ret)
+		return ret;
+
+	intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
+	intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+	intel_ring_emit(ring, i915_gem_request_get_seqno(req));
+	intel_ring_emit(ring, MI_USER_INTERRUPT);
+	__intel_ring_advance(ring);
+
+	return 0;
+}
+
+static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev,
+					      u32 seqno)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	return dev_priv->last_seqno < seqno;
+}
+
+/**
+ * intel_ring_sync - sync the waiter to the signaller on seqno
+ *
+ * @waiter - ring that is waiting
+ * @signaller - ring which has, or will signal
+ * @seqno - seqno which the waiter will block on
+ */
+
+static int
+gen8_ring_sync(struct drm_i915_gem_request *waiter_req,
+	       struct intel_engine_cs *signaller,
+	       u32 seqno)
+{
+	struct intel_engine_cs *waiter = waiter_req->ring;
+	struct drm_i915_private *dev_priv = waiter->dev->dev_private;
+	int ret;
+
+	ret = intel_ring_begin(waiter_req, 4);
+	if (ret)
+		return ret;
+
+	intel_ring_emit(waiter, MI_SEMAPHORE_WAIT |
+				MI_SEMAPHORE_GLOBAL_GTT |
+				MI_SEMAPHORE_POLL |
+				MI_SEMAPHORE_SAD_GTE_SDD);
+	intel_ring_emit(waiter, seqno);
+	intel_ring_emit(waiter,
+			lower_32_bits(GEN8_WAIT_OFFSET(waiter, signaller->id)));
+	intel_ring_emit(waiter,
+			upper_32_bits(GEN8_WAIT_OFFSET(waiter, signaller->id)));
+	intel_ring_advance(waiter);
+	return 0;
+}
+
+static int
+gen6_ring_sync(struct drm_i915_gem_request *waiter_req,
+	       struct intel_engine_cs *signaller,
+	       u32 seqno)
+{
+	struct intel_engine_cs *waiter = waiter_req->ring;
+	u32 dw1 = MI_SEMAPHORE_MBOX |
+		  MI_SEMAPHORE_COMPARE |
+		  MI_SEMAPHORE_REGISTER;
+	u32 wait_mbox = signaller->semaphore.mbox.wait[waiter->id];
+	int ret;
+
+	/* Throughout all of the GEM code, seqno passed implies our current
+	 * seqno is >= the last seqno executed. However for hardware the
+	 * comparison is strictly greater than.
+	 */
+	seqno -= 1;
+
+	WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID);
+
+	ret = intel_ring_begin(waiter_req, 4);
+	if (ret)
+		return ret;
+
+	/* If seqno wrap happened, omit the wait with no-ops */
+	if (likely(!i915_gem_has_seqno_wrapped(waiter->dev, seqno))) {
+		intel_ring_emit(waiter, dw1 | wait_mbox);
+		intel_ring_emit(waiter, seqno);
+		intel_ring_emit(waiter, 0);
+		intel_ring_emit(waiter, MI_NOOP);
+	} else {
+		intel_ring_emit(waiter, MI_NOOP);
+		intel_ring_emit(waiter, MI_NOOP);
+		intel_ring_emit(waiter, MI_NOOP);
+		intel_ring_emit(waiter, MI_NOOP);
+	}
+	intel_ring_advance(waiter);
+
+	return 0;
+}
+
+#define PIPE_CONTROL_FLUSH(ring__, addr__)					\
+do {									\
+	intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |		\
+		 PIPE_CONTROL_DEPTH_STALL);				\
+	intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT);			\
+	intel_ring_emit(ring__, 0);							\
+	intel_ring_emit(ring__, 0);							\
+} while (0)
+
+static int
+pc_render_add_request(struct drm_i915_gem_request *req)
+{
+	struct intel_engine_cs *ring = req->ring;
+	u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
+	int ret;
+
+	/* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
+	 * incoherent with writes to memory, i.e. completely fubar,
+	 * so we need to use PIPE_NOTIFY instead.
+	 *
+	 * However, we also need to workaround the qword write
+	 * incoherence by flushing the 6 PIPE_NOTIFY buffers out to
+	 * memory before requesting an interrupt.
+	 */
+	ret = intel_ring_begin(req, 32);
+	if (ret)
+		return ret;
+
+	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
+			PIPE_CONTROL_WRITE_FLUSH |
+			PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
+	intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
+	intel_ring_emit(ring, i915_gem_request_get_seqno(req));
+	intel_ring_emit(ring, 0);
+	PIPE_CONTROL_FLUSH(ring, scratch_addr);
+	scratch_addr += 2 * CACHELINE_BYTES; /* write to separate cachelines */
+	PIPE_CONTROL_FLUSH(ring, scratch_addr);
+	scratch_addr += 2 * CACHELINE_BYTES;
+	PIPE_CONTROL_FLUSH(ring, scratch_addr);
+	scratch_addr += 2 * CACHELINE_BYTES;
+	PIPE_CONTROL_FLUSH(ring, scratch_addr);
+	scratch_addr += 2 * CACHELINE_BYTES;
+	PIPE_CONTROL_FLUSH(ring, scratch_addr);
+	scratch_addr += 2 * CACHELINE_BYTES;
+	PIPE_CONTROL_FLUSH(ring, scratch_addr);
+
+	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
+			PIPE_CONTROL_WRITE_FLUSH |
+			PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
+			PIPE_CONTROL_NOTIFY);
+	intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
+	intel_ring_emit(ring, i915_gem_request_get_seqno(req));
+	intel_ring_emit(ring, 0);
+	__intel_ring_advance(ring);
+
+	return 0;
+}
+
+static u32
+gen6_ring_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
+{
+	/* Workaround to force correct ordering between irq and seqno writes on
+	 * ivb (and maybe also on snb) by reading from a CS register (like
+	 * ACTHD) before reading the status page. */
+	if (!lazy_coherency) {
+		struct drm_i915_private *dev_priv = ring->dev->dev_private;
+		POSTING_READ(RING_ACTHD(ring->mmio_base));
+	}
+
+	return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
+}
+
+static u32
+ring_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
+{
+	return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
+}
+
+static void
+ring_set_seqno(struct intel_engine_cs *ring, u32 seqno)
+{
+	intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
+}
+
+static u32
+pc_render_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
+{
+	return ring->scratch.cpu_page[0];
+}
+
+static void
+pc_render_set_seqno(struct intel_engine_cs *ring, u32 seqno)
+{
+	ring->scratch.cpu_page[0] = seqno;
+}
+
+static bool
+gen5_ring_get_irq(struct intel_engine_cs *ring)
+{
+	struct drm_device *dev = ring->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	unsigned long flags;
+
+	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
+		return false;
+
+	spin_lock_irqsave(&dev_priv->irq_lock, flags);
+	if (ring->irq_refcount++ == 0)
+		gen5_enable_gt_irq(dev_priv, ring->irq_enable_mask);
+	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+
+	return true;
+}
+
+static void
+gen5_ring_put_irq(struct intel_engine_cs *ring)
+{
+	struct drm_device *dev = ring->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev_priv->irq_lock, flags);
+	if (--ring->irq_refcount == 0)
+		gen5_disable_gt_irq(dev_priv, ring->irq_enable_mask);
+	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+}
+
+static bool
+i9xx_ring_get_irq(struct intel_engine_cs *ring)
+{
+	struct drm_device *dev = ring->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	unsigned long flags;
+
+	if (!intel_irqs_enabled(dev_priv))
+		return false;
+
+	spin_lock_irqsave(&dev_priv->irq_lock, flags);
+	if (ring->irq_refcount++ == 0) {
+		dev_priv->irq_mask &= ~ring->irq_enable_mask;
+		I915_WRITE(IMR, dev_priv->irq_mask);
+		POSTING_READ(IMR);
+	}
+	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+
+	return true;
+}
+
+static void
+i9xx_ring_put_irq(struct intel_engine_cs *ring)
+{
+	struct drm_device *dev = ring->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev_priv->irq_lock, flags);
+	if (--ring->irq_refcount == 0) {
+		dev_priv->irq_mask |= ring->irq_enable_mask;
+		I915_WRITE(IMR, dev_priv->irq_mask);
+		POSTING_READ(IMR);
+	}
+	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+}
+
+static bool
+i8xx_ring_get_irq(struct intel_engine_cs *ring)
+{
+	struct drm_device *dev = ring->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	unsigned long flags;
+
+	if (!intel_irqs_enabled(dev_priv))
+		return false;
+
+	spin_lock_irqsave(&dev_priv->irq_lock, flags);
+	if (ring->irq_refcount++ == 0) {
+		dev_priv->irq_mask &= ~ring->irq_enable_mask;
+		I915_WRITE16(IMR, dev_priv->irq_mask);
+		POSTING_READ16(IMR);
+	}
+	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+
+	return true;
+}
+
+static void
+i8xx_ring_put_irq(struct intel_engine_cs *ring)
+{
+	struct drm_device *dev = ring->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev_priv->irq_lock, flags);
+	if (--ring->irq_refcount == 0) {
+		dev_priv->irq_mask |= ring->irq_enable_mask;
+		I915_WRITE16(IMR, dev_priv->irq_mask);
+		POSTING_READ16(IMR);
+	}
+	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+}
+
+static int
+bsd_ring_flush(struct drm_i915_gem_request *req,
+	       u32     invalidate_domains,
+	       u32     flush_domains)
+{
+	struct intel_engine_cs *ring = req->ring;
+	int ret;
+
+	ret = intel_ring_begin(req, 2);
+	if (ret)
+		return ret;
+
+	intel_ring_emit(ring, MI_FLUSH);
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
+	return 0;
+}
+
+static int
+i9xx_add_request(struct drm_i915_gem_request *req)
+{
+	struct intel_engine_cs *ring = req->ring;
+	int ret;
+
+	ret = intel_ring_begin(req, 4);
+	if (ret)
+		return ret;
+
+	intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
+	intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+	intel_ring_emit(ring, i915_gem_request_get_seqno(req));
+	intel_ring_emit(ring, MI_USER_INTERRUPT);
+	__intel_ring_advance(ring);
+
+	return 0;
+}
+
+static bool
+gen6_ring_get_irq(struct intel_engine_cs *ring)
+{
+	struct drm_device *dev = ring->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	unsigned long flags;
+
+	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
+		return false;
+
+	spin_lock_irqsave(&dev_priv->irq_lock, flags);
+	if (ring->irq_refcount++ == 0) {
+		if (HAS_L3_DPF(dev) && ring->id == RCS)
+			I915_WRITE_IMR(ring,
+				       ~(ring->irq_enable_mask |
+					 GT_PARITY_ERROR(dev)));
+		else
+			I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
+		gen5_enable_gt_irq(dev_priv, ring->irq_enable_mask);
+	}
+	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+
+	return true;
+}
+
+static void
+gen6_ring_put_irq(struct intel_engine_cs *ring)
+{
+	struct drm_device *dev = ring->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev_priv->irq_lock, flags);
+	if (--ring->irq_refcount == 0) {
+		if (HAS_L3_DPF(dev) && ring->id == RCS)
+			I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev));
+		else
+			I915_WRITE_IMR(ring, ~0);
+		gen5_disable_gt_irq(dev_priv, ring->irq_enable_mask);
+	}
+	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+}
+
+static bool
+hsw_vebox_get_irq(struct intel_engine_cs *ring)
+{
+	struct drm_device *dev = ring->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	unsigned long flags;
+
+	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
+		return false;
+
+	spin_lock_irqsave(&dev_priv->irq_lock, flags);
+	if (ring->irq_refcount++ == 0) {
+		I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
+		gen6_enable_pm_irq(dev_priv, ring->irq_enable_mask);
+	}
+	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+
+	return true;
+}
+
+static void
+hsw_vebox_put_irq(struct intel_engine_cs *ring)
+{
+	struct drm_device *dev = ring->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev_priv->irq_lock, flags);
+	if (--ring->irq_refcount == 0) {
+		I915_WRITE_IMR(ring, ~0);
+		gen6_disable_pm_irq(dev_priv, ring->irq_enable_mask);
+	}
+	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+}
+
+static bool
+gen8_ring_get_irq(struct intel_engine_cs *ring)
+{
+	struct drm_device *dev = ring->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	unsigned long flags;
+
+	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
+		return false;
+
+	spin_lock_irqsave(&dev_priv->irq_lock, flags);
+	if (ring->irq_refcount++ == 0) {
+		if (HAS_L3_DPF(dev) && ring->id == RCS) {
+			I915_WRITE_IMR(ring,
+				       ~(ring->irq_enable_mask |
+					 GT_RENDER_L3_PARITY_ERROR_INTERRUPT));
+		} else {
+			I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
+		}
+		POSTING_READ(RING_IMR(ring->mmio_base));
+	}
+	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+
+	return true;
+}
+
+static void
+gen8_ring_put_irq(struct intel_engine_cs *ring)
+{
+	struct drm_device *dev = ring->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev_priv->irq_lock, flags);
+	if (--ring->irq_refcount == 0) {
+		if (HAS_L3_DPF(dev) && ring->id == RCS) {
+			I915_WRITE_IMR(ring,
+				       ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
+		} else {
+			I915_WRITE_IMR(ring, ~0);
+		}
+		POSTING_READ(RING_IMR(ring->mmio_base));
+	}
+	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+}
+
+static int
+i965_dispatch_execbuffer(struct drm_i915_gem_request *req,
+			 u64 offset, u32 length,
+			 unsigned dispatch_flags)
+{
+	struct intel_engine_cs *ring = req->ring;
+	int ret;
+
+	ret = intel_ring_begin(req, 2);
+	if (ret)
+		return ret;
+
+	intel_ring_emit(ring,
+			MI_BATCH_BUFFER_START |
+			MI_BATCH_GTT |
+			(dispatch_flags & I915_DISPATCH_SECURE ?
+			 0 : MI_BATCH_NON_SECURE_I965));
+	intel_ring_emit(ring, offset);
+	intel_ring_advance(ring);
+
+	return 0;
+}
+
+/* Just userspace ABI convention to limit the wa batch bo to a resonable size */
+#define I830_BATCH_LIMIT (256*1024)
+#define I830_TLB_ENTRIES (2)
+#define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT)
+static int
+i830_dispatch_execbuffer(struct drm_i915_gem_request *req,
+			 u64 offset, u32 len,
+			 unsigned dispatch_flags)
+{
+	struct intel_engine_cs *ring = req->ring;
+	u32 cs_offset = ring->scratch.gtt_offset;
+	int ret;
+
+	ret = intel_ring_begin(req, 6);
+	if (ret)
+		return ret;
+
+	/* Evict the invalid PTE TLBs */
+	intel_ring_emit(ring, COLOR_BLT_CMD | BLT_WRITE_RGBA);
+	intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096);
+	intel_ring_emit(ring, I830_TLB_ENTRIES << 16 | 4); /* load each page */
+	intel_ring_emit(ring, cs_offset);
+	intel_ring_emit(ring, 0xdeadbeef);
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
+
+	if ((dispatch_flags & I915_DISPATCH_PINNED) == 0) {
+		if (len > I830_BATCH_LIMIT)
+			return -ENOSPC;
+
+		ret = intel_ring_begin(req, 6 + 2);
+		if (ret)
+			return ret;
+
+		/* Blit the batch (which has now all relocs applied) to the
+		 * stable batch scratch bo area (so that the CS never
+		 * stumbles over its tlb invalidation bug) ...
+		 */
+		intel_ring_emit(ring, SRC_COPY_BLT_CMD | BLT_WRITE_RGBA);
+		intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096);
+		intel_ring_emit(ring, DIV_ROUND_UP(len, 4096) << 16 | 4096);
+		intel_ring_emit(ring, cs_offset);
+		intel_ring_emit(ring, 4096);
+		intel_ring_emit(ring, offset);
+
+		intel_ring_emit(ring, MI_FLUSH);
+		intel_ring_emit(ring, MI_NOOP);
+		intel_ring_advance(ring);
+
+		/* ... and execute it. */
+		offset = cs_offset;
+	}
+
+	ret = intel_ring_begin(req, 4);
+	if (ret)
+		return ret;
+
+	intel_ring_emit(ring, MI_BATCH_BUFFER);
+	intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
+					0 : MI_BATCH_NON_SECURE));
+	intel_ring_emit(ring, offset + len - 8);
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
+
+	return 0;
+}
+
+static int
+i915_dispatch_execbuffer(struct drm_i915_gem_request *req,
+			 u64 offset, u32 len,
+			 unsigned dispatch_flags)
+{
+	struct intel_engine_cs *ring = req->ring;
+	int ret;
+
+	ret = intel_ring_begin(req, 2);
+	if (ret)
+		return ret;
+
+	intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
+	intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
+					0 : MI_BATCH_NON_SECURE));
+	intel_ring_advance(ring);
+
+	return 0;
+}
+
+static void cleanup_phys_status_page(struct intel_engine_cs *ring)
+{
+	struct drm_i915_private *dev_priv = to_i915(ring->dev);
+
+	if (!dev_priv->status_page_dmah)
+		return;
+
+	drm_pci_free(ring->dev, dev_priv->status_page_dmah);
+	ring->status_page.page_addr = NULL;
+}
+
+static void cleanup_status_page(struct intel_engine_cs *ring)
+{
+	struct drm_i915_gem_object *obj;
+
+	obj = ring->status_page.obj;
+	if (obj == NULL)
+		return;
+
+	kunmap(sg_page(obj->pages->sgl));
+	i915_gem_object_ggtt_unpin(obj);
+	drm_gem_object_unreference(&obj->base);
+	ring->status_page.obj = NULL;
+}
+
+static int init_status_page(struct intel_engine_cs *ring)
+{
+	struct drm_i915_gem_object *obj = ring->status_page.obj;
+
+	if (obj == NULL) {
+		unsigned flags;
+		int ret;
+
+		obj = i915_gem_alloc_object(ring->dev, 4096);
+		if (obj == NULL) {
+			DRM_ERROR("Failed to allocate status page\n");
+			return -ENOMEM;
+		}
+
+		ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
+		if (ret)
+			goto err_unref;
+
+		flags = 0;
+		if (!HAS_LLC(ring->dev))
+			/* On g33, we cannot place HWS above 256MiB, so
+			 * restrict its pinning to the low mappable arena.
+			 * Though this restriction is not documented for
+			 * gen4, gen5, or byt, they also behave similarly
+			 * and hang if the HWS is placed at the top of the
+			 * GTT. To generalise, it appears that all !llc
+			 * platforms have issues with us placing the HWS
+			 * above the mappable region (even though we never
+			 * actualy map it).
+			 */
+			flags |= PIN_MAPPABLE;
+		ret = i915_gem_obj_ggtt_pin(obj, 4096, flags);
+		if (ret) {
+err_unref:
+			drm_gem_object_unreference(&obj->base);
+			return ret;
+		}
+
+		ring->status_page.obj = obj;
+	}
+
+	ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj);
+	ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl));
+	memset(ring->status_page.page_addr, 0, PAGE_SIZE);
+
+	DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
+			ring->name, ring->status_page.gfx_addr);
+
+	return 0;
+}
+
+static int init_phys_status_page(struct intel_engine_cs *ring)
+{
+	struct drm_i915_private *dev_priv = ring->dev->dev_private;
+
+	if (!dev_priv->status_page_dmah) {
+		dev_priv->status_page_dmah =
+			drm_pci_alloc(ring->dev, PAGE_SIZE, PAGE_SIZE);
+		if (!dev_priv->status_page_dmah)
+			return -ENOMEM;
+	}
+
+	ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
+	memset(ring->status_page.page_addr, 0, PAGE_SIZE);
+
+	return 0;
+}
+
+void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
+{
+	iounmap(ringbuf->virtual_start);
+	ringbuf->virtual_start = NULL;
+	i915_gem_object_ggtt_unpin(ringbuf->obj);
+}
+
+int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
+				     struct intel_ringbuffer *ringbuf)
+{
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_gem_object *obj = ringbuf->obj;
+	int ret;
+
+	ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE);
+	if (ret)
+		return ret;
+
+	ret = i915_gem_object_set_to_gtt_domain(obj, true);
+	if (ret) {
+		i915_gem_object_ggtt_unpin(obj);
+		return ret;
+	}
+
+	ringbuf->virtual_start = ioremap_wc(dev_priv->gtt.mappable_base +
+			i915_gem_obj_ggtt_offset(obj), ringbuf->size);
+	if (ringbuf->virtual_start == NULL) {
+		i915_gem_object_ggtt_unpin(obj);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
+{
+	drm_gem_object_unreference(&ringbuf->obj->base);
+	ringbuf->obj = NULL;
+}
+
+static int intel_alloc_ringbuffer_obj(struct drm_device *dev,
+				      struct intel_ringbuffer *ringbuf)
+{
+	struct drm_i915_gem_object *obj;
+
+	obj = NULL;
+	if (!HAS_LLC(dev))
+		obj = i915_gem_object_create_stolen(dev, ringbuf->size);
+	if (obj == NULL)
+		obj = i915_gem_alloc_object(dev, ringbuf->size);
+	if (obj == NULL)
+		return -ENOMEM;
+
+	/* mark ring buffers as read-only from GPU side by default */
+	obj->gt_ro = 1;
+
+	ringbuf->obj = obj;
+
+	return 0;
+}
+
+struct intel_ringbuffer *
+intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size)
+{
+	struct intel_ringbuffer *ring;
+	int ret;
+
+	ring = kzalloc(sizeof(*ring), GFP_KERNEL);
+	if (ring == NULL)
+		return ERR_PTR(-ENOMEM);
+
+	ring->ring = engine;
+
+	ring->size = size;
+	/* Workaround an erratum on the i830 which causes a hang if
+	 * the TAIL pointer points to within the last 2 cachelines
+	 * of the buffer.
+	 */
+	ring->effective_size = size;
+	if (IS_I830(engine->dev) || IS_845G(engine->dev))
+		ring->effective_size -= 2 * CACHELINE_BYTES;
+
+	ring->last_retired_head = -1;
+	intel_ring_update_space(ring);
+
+	ret = intel_alloc_ringbuffer_obj(engine->dev, ring);
+	if (ret) {
+		DRM_ERROR("Failed to allocate ringbuffer %s: %d\n",
+			  engine->name, ret);
+		kfree(ring);
+		return ERR_PTR(ret);
+	}
+
+	return ring;
+}
+
+void
+intel_ringbuffer_free(struct intel_ringbuffer *ring)
+{
+	intel_destroy_ringbuffer_obj(ring);
+	kfree(ring);
+}
+
+static int intel_init_ring_buffer(struct drm_device *dev,
+				  struct intel_engine_cs *ring)
+{
+	struct intel_ringbuffer *ringbuf;
+	int ret;
+
+	WARN_ON(ring->buffer);
+
+	ring->dev = dev;
+	INIT_LIST_HEAD(&ring->active_list);
+	INIT_LIST_HEAD(&ring->request_list);
+	INIT_LIST_HEAD(&ring->execlist_queue);
+	i915_gem_batch_pool_init(dev, &ring->batch_pool);
+	memset(ring->semaphore.sync_seqno, 0, sizeof(ring->semaphore.sync_seqno));
+
+	init_waitqueue_head(&ring->irq_queue);
+
+	ringbuf = intel_engine_create_ringbuffer(ring, 32 * PAGE_SIZE);
+	if (IS_ERR(ringbuf))
+		return PTR_ERR(ringbuf);
+	ring->buffer = ringbuf;
+
+	if (I915_NEED_GFX_HWS(dev)) {
+		ret = init_status_page(ring);
+		if (ret)
+			goto error;
+	} else {
+		WARN_ON(ring->id != RCS);
+		ret = init_phys_status_page(ring);
+		if (ret)
+			goto error;
+	}
+
+	ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf);
+	if (ret) {
+		DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n",
+				ring->name, ret);
+		intel_destroy_ringbuffer_obj(ringbuf);
+		goto error;
+	}
+
+	ret = i915_cmd_parser_init_ring(ring);
+	if (ret)
+		goto error;
+
+	return 0;
+
+error:
+	intel_ringbuffer_free(ringbuf);
+	ring->buffer = NULL;
+	return ret;
+}
+
+void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
+{
+	struct drm_i915_private *dev_priv;
+
+	if (!intel_ring_initialized(ring))
+		return;
+
+	dev_priv = to_i915(ring->dev);
+
+	intel_stop_ring_buffer(ring);
+	WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0);
+
+	intel_unpin_ringbuffer_obj(ring->buffer);
+	intel_ringbuffer_free(ring->buffer);
+	ring->buffer = NULL;
+
+	if (ring->cleanup)
+		ring->cleanup(ring);
+
+	if (I915_NEED_GFX_HWS(ring->dev)) {
+		cleanup_status_page(ring);
+	} else {
+		WARN_ON(ring->id != RCS);
+		cleanup_phys_status_page(ring);
+	}
+
+	i915_cmd_parser_fini_ring(ring);
+	i915_gem_batch_pool_fini(&ring->batch_pool);
+}
+
+static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
+{
+	struct intel_ringbuffer *ringbuf = ring->buffer;
+	struct drm_i915_gem_request *request;
+	unsigned space;
+	int ret;
+
+	if (intel_ring_space(ringbuf) >= n)
+		return 0;
+
+	/* The whole point of reserving space is to not wait! */
+	WARN_ON(ringbuf->reserved_in_use);
+
+	list_for_each_entry(request, &ring->request_list, list) {
+		space = __intel_ring_space(request->postfix, ringbuf->tail,
+					   ringbuf->size);
+		if (space >= n)
+			break;
+	}
+
+	if (WARN_ON(&request->list == &ring->request_list))
+		return -ENOSPC;
+
+	ret = i915_wait_request(request);
+	if (ret)
+		return ret;
+
+	ringbuf->space = space;
+	return 0;
+}
+
+static void __wrap_ring_buffer(struct intel_ringbuffer *ringbuf)
+{
+	uint32_t __iomem *virt;
+	int rem = ringbuf->size - ringbuf->tail;
+
+	virt = ringbuf->virtual_start + ringbuf->tail;
+	rem /= 4;
+	while (rem--)
+		iowrite32(MI_NOOP, virt++);
+
+	ringbuf->tail = 0;
+	intel_ring_update_space(ringbuf);
+}
+
+int intel_ring_idle(struct intel_engine_cs *ring)
+{
+	struct drm_i915_gem_request *req;
+
+	/* Wait upon the last request to be completed */
+	if (list_empty(&ring->request_list))
+		return 0;
+
+	req = list_entry(ring->request_list.prev,
+			struct drm_i915_gem_request,
+			list);
+
+	/* Make sure we do not trigger any retires */
+	return __i915_wait_request(req,
+				   atomic_read(&to_i915(ring->dev)->gpu_error.reset_counter),
+				   to_i915(ring->dev)->mm.interruptible,
+				   NULL, NULL);
+}
+
+int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request)
+{
+	request->ringbuf = request->ring->buffer;
+	return 0;
+}
+
+int intel_ring_reserve_space(struct drm_i915_gem_request *request)
+{
+	/*
+	 * The first call merely notes the reserve request and is common for
+	 * all back ends. The subsequent localised _begin() call actually
+	 * ensures that the reservation is available. Without the begin, if
+	 * the request creator immediately submitted the request without
+	 * adding any commands to it then there might not actually be
+	 * sufficient room for the submission commands.
+	 */
+	intel_ring_reserved_space_reserve(request->ringbuf, MIN_SPACE_FOR_ADD_REQUEST);
+
+	return intel_ring_begin(request, 0);
+}
+
+void intel_ring_reserved_space_reserve(struct intel_ringbuffer *ringbuf, int size)
+{
+	WARN_ON(ringbuf->reserved_size);
+	WARN_ON(ringbuf->reserved_in_use);
+
+	ringbuf->reserved_size = size;
+}
+
+void intel_ring_reserved_space_cancel(struct intel_ringbuffer *ringbuf)
+{
+	WARN_ON(ringbuf->reserved_in_use);
+
+	ringbuf->reserved_size   = 0;
+	ringbuf->reserved_in_use = false;
+}
+
+void intel_ring_reserved_space_use(struct intel_ringbuffer *ringbuf)
+{
+	WARN_ON(ringbuf->reserved_in_use);
+
+	ringbuf->reserved_in_use = true;
+	ringbuf->reserved_tail   = ringbuf->tail;
+}
+
+void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf)
+{
+	WARN_ON(!ringbuf->reserved_in_use);
+	if (ringbuf->tail > ringbuf->reserved_tail) {
+		WARN(ringbuf->tail > ringbuf->reserved_tail + ringbuf->reserved_size,
+		     "request reserved size too small: %d vs %d!\n",
+		     ringbuf->tail - ringbuf->reserved_tail, ringbuf->reserved_size);
+	} else {
+		/*
+		 * The ring was wrapped while the reserved space was in use.
+		 * That means that some unknown amount of the ring tail was
+		 * no-op filled and skipped. Thus simply adding the ring size
+		 * to the tail and doing the above space check will not work.
+		 * Rather than attempt to track how much tail was skipped,
+		 * it is much simpler to say that also skipping the sanity
+		 * check every once in a while is not a big issue.
+		 */
+	}
+
+	ringbuf->reserved_size   = 0;
+	ringbuf->reserved_in_use = false;
+}
+
+static int __intel_ring_prepare(struct intel_engine_cs *ring, int bytes)
+{
+	struct intel_ringbuffer *ringbuf = ring->buffer;
+	int remain_usable = ringbuf->effective_size - ringbuf->tail;
+	int remain_actual = ringbuf->size - ringbuf->tail;
+	int ret, total_bytes, wait_bytes = 0;
+	bool need_wrap = false;
+
+	if (ringbuf->reserved_in_use)
+		total_bytes = bytes;
+	else
+		total_bytes = bytes + ringbuf->reserved_size;
+
+	if (unlikely(bytes > remain_usable)) {
+		/*
+		 * Not enough space for the basic request. So need to flush
+		 * out the remainder and then wait for base + reserved.
+		 */
+		wait_bytes = remain_actual + total_bytes;
+		need_wrap = true;
+	} else {
+		if (unlikely(total_bytes > remain_usable)) {
+			/*
+			 * The base request will fit but the reserved space
+			 * falls off the end. So don't need an immediate wrap
+			 * and only need to effectively wait for the reserved
+			 * size space from the start of ringbuffer.
+			 */
+			wait_bytes = remain_actual + ringbuf->reserved_size;
+		} else if (total_bytes > ringbuf->space) {
+			/* No wrapping required, just waiting. */
+			wait_bytes = total_bytes;
+		}
+	}
+
+	if (wait_bytes) {
+		ret = ring_wait_for_space(ring, wait_bytes);
+		if (unlikely(ret))
+			return ret;
+
+		if (need_wrap)
+			__wrap_ring_buffer(ringbuf);
+	}
+
+	return 0;
+}
+
+int intel_ring_begin(struct drm_i915_gem_request *req,
+		     int num_dwords)
+{
+	struct intel_engine_cs *ring;
+	struct drm_i915_private *dev_priv;
+	int ret;
+
+	WARN_ON(req == NULL);
+	ring = req->ring;
+	dev_priv = ring->dev->dev_private;
+
+	ret = i915_gem_check_wedge(&dev_priv->gpu_error,
+				   dev_priv->mm.interruptible);
+	if (ret)
+		return ret;
+
+	ret = __intel_ring_prepare(ring, num_dwords * sizeof(uint32_t));
+	if (ret)
+		return ret;
+
+	ring->buffer->space -= num_dwords * sizeof(uint32_t);
+	return 0;
+}
+
+/* Align the ring tail to a cacheline boundary */
+int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
+{
+	struct intel_engine_cs *ring = req->ring;
+	int num_dwords = (ring->buffer->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
+	int ret;
+
+	if (num_dwords == 0)
+		return 0;
+
+	num_dwords = CACHELINE_BYTES / sizeof(uint32_t) - num_dwords;
+	ret = intel_ring_begin(req, num_dwords);
+	if (ret)
+		return ret;
+
+	while (num_dwords--)
+		intel_ring_emit(ring, MI_NOOP);
+
+	intel_ring_advance(ring);
+
+	return 0;
+}
+
+void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno)
+{
+	struct drm_device *dev = ring->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (INTEL_INFO(dev)->gen == 6 || INTEL_INFO(dev)->gen == 7) {
+		I915_WRITE(RING_SYNC_0(ring->mmio_base), 0);
+		I915_WRITE(RING_SYNC_1(ring->mmio_base), 0);
+		if (HAS_VEBOX(dev))
+			I915_WRITE(RING_SYNC_2(ring->mmio_base), 0);
+	}
+
+	ring->set_seqno(ring, seqno);
+	ring->hangcheck.seqno = seqno;
+}
+
+static void gen6_bsd_ring_write_tail(struct intel_engine_cs *ring,
+				     u32 value)
+{
+	struct drm_i915_private *dev_priv = ring->dev->dev_private;
+
+       /* Every tail move must follow the sequence below */
+
+	/* Disable notification that the ring is IDLE. The GT
+	 * will then assume that it is busy and bring it out of rc6.
+	 */
+	I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
+		   _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
+
+	/* Clear the context id. Here be magic! */
+	I915_WRITE64(GEN6_BSD_RNCID, 0x0);
+
+	/* Wait for the ring not to be idle, i.e. for it to wake up. */
+	if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
+		      GEN6_BSD_SLEEP_INDICATOR) == 0,
+		     50))
+		DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
+
+	/* Now that the ring is fully powered up, update the tail */
+	I915_WRITE_TAIL(ring, value);
+	POSTING_READ(RING_TAIL(ring->mmio_base));
+
+	/* Let the ring send IDLE messages to the GT again,
+	 * and so let it sleep to conserve power when idle.
+	 */
+	I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
+		   _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
+}
+
+static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
+			       u32 invalidate, u32 flush)
+{
+	struct intel_engine_cs *ring = req->ring;
+	uint32_t cmd;
+	int ret;
+
+	ret = intel_ring_begin(req, 4);
+	if (ret)
+		return ret;
+
+	cmd = MI_FLUSH_DW;
+	if (INTEL_INFO(ring->dev)->gen >= 8)
+		cmd += 1;
+
+	/* We always require a command barrier so that subsequent
+	 * commands, such as breadcrumb interrupts, are strictly ordered
+	 * wrt the contents of the write cache being flushed to memory
+	 * (and thus being coherent from the CPU).
+	 */
+	cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
+
+	/*
+	 * Bspec vol 1c.5 - video engine command streamer:
+	 * "If ENABLED, all TLBs will be invalidated once the flush
+	 * operation is complete. This bit is only valid when the
+	 * Post-Sync Operation field is a value of 1h or 3h."
+	 */
+	if (invalidate & I915_GEM_GPU_DOMAINS)
+		cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
+
+	intel_ring_emit(ring, cmd);
+	intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
+	if (INTEL_INFO(ring->dev)->gen >= 8) {
+		intel_ring_emit(ring, 0); /* upper addr */
+		intel_ring_emit(ring, 0); /* value */
+	} else  {
+		intel_ring_emit(ring, 0);
+		intel_ring_emit(ring, MI_NOOP);
+	}
+	intel_ring_advance(ring);
+	return 0;
+}
+
+static int
+gen8_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
+			      u64 offset, u32 len,
+			      unsigned dispatch_flags)
+{
+	struct intel_engine_cs *ring = req->ring;
+	bool ppgtt = USES_PPGTT(ring->dev) &&
+			!(dispatch_flags & I915_DISPATCH_SECURE);
+	int ret;
+
+	ret = intel_ring_begin(req, 4);
+	if (ret)
+		return ret;
+
+	/* FIXME(BDW): Address space and security selectors. */
+	intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8) |
+			(dispatch_flags & I915_DISPATCH_RS ?
+			 MI_BATCH_RESOURCE_STREAMER : 0));
+	intel_ring_emit(ring, lower_32_bits(offset));
+	intel_ring_emit(ring, upper_32_bits(offset));
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
+
+	return 0;
+}
+
+static int
+hsw_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
+			     u64 offset, u32 len,
+			     unsigned dispatch_flags)
+{
+	struct intel_engine_cs *ring = req->ring;
+	int ret;
+
+	ret = intel_ring_begin(req, 2);
+	if (ret)
+		return ret;
+
+	intel_ring_emit(ring,
+			MI_BATCH_BUFFER_START |
+			(dispatch_flags & I915_DISPATCH_SECURE ?
+			 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW) |
+			(dispatch_flags & I915_DISPATCH_RS ?
+			 MI_BATCH_RESOURCE_STREAMER : 0));
+	/* bit0-7 is the length on GEN6+ */
+	intel_ring_emit(ring, offset);
+	intel_ring_advance(ring);
+
+	return 0;
+}
+
+static int
+gen6_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
+			      u64 offset, u32 len,
+			      unsigned dispatch_flags)
+{
+	struct intel_engine_cs *ring = req->ring;
+	int ret;
+
+	ret = intel_ring_begin(req, 2);
+	if (ret)
+		return ret;
+
+	intel_ring_emit(ring,
+			MI_BATCH_BUFFER_START |
+			(dispatch_flags & I915_DISPATCH_SECURE ?
+			 0 : MI_BATCH_NON_SECURE_I965));
+	/* bit0-7 is the length on GEN6+ */
+	intel_ring_emit(ring, offset);
+	intel_ring_advance(ring);
+
+	return 0;
+}
+
+/* Blitter support (SandyBridge+) */
+
+static int gen6_ring_flush(struct drm_i915_gem_request *req,
+			   u32 invalidate, u32 flush)
+{
+	struct intel_engine_cs *ring = req->ring;
+	struct drm_device *dev = ring->dev;
+	uint32_t cmd;
+	int ret;
+
+	ret = intel_ring_begin(req, 4);
+	if (ret)
+		return ret;
+
+	cmd = MI_FLUSH_DW;
+	if (INTEL_INFO(dev)->gen >= 8)
+		cmd += 1;
+
+	/* We always require a command barrier so that subsequent
+	 * commands, such as breadcrumb interrupts, are strictly ordered
+	 * wrt the contents of the write cache being flushed to memory
+	 * (and thus being coherent from the CPU).
+	 */
+	cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
+
+	/*
+	 * Bspec vol 1c.3 - blitter engine command streamer:
+	 * "If ENABLED, all TLBs will be invalidated once the flush
+	 * operation is complete. This bit is only valid when the
+	 * Post-Sync Operation field is a value of 1h or 3h."
+	 */
+	if (invalidate & I915_GEM_DOMAIN_RENDER)
+		cmd |= MI_INVALIDATE_TLB;
+	intel_ring_emit(ring, cmd);
+	intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
+	if (INTEL_INFO(dev)->gen >= 8) {
+		intel_ring_emit(ring, 0); /* upper addr */
+		intel_ring_emit(ring, 0); /* value */
+	} else  {
+		intel_ring_emit(ring, 0);
+		intel_ring_emit(ring, MI_NOOP);
+	}
+	intel_ring_advance(ring);
+
+	return 0;
+}
+
+int intel_init_render_ring_buffer(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_engine_cs *ring = &dev_priv->ring[RCS];
+	struct drm_i915_gem_object *obj;
+	int ret;
+
+	ring->name = "render ring";
+	ring->id = RCS;
+	ring->mmio_base = RENDER_RING_BASE;
+
+	if (INTEL_INFO(dev)->gen >= 8) {
+		if (i915_semaphore_is_enabled(dev)) {
+			obj = i915_gem_alloc_object(dev, 4096);
+			if (obj == NULL) {
+				DRM_ERROR("Failed to allocate semaphore bo. Disabling semaphores\n");
+				i915.semaphores = 0;
+			} else {
+				i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
+				ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_NONBLOCK);
+				if (ret != 0) {
+					drm_gem_object_unreference(&obj->base);
+					DRM_ERROR("Failed to pin semaphore bo. Disabling semaphores\n");
+					i915.semaphores = 0;
+				} else
+					dev_priv->semaphore_obj = obj;
+			}
+		}
+
+		ring->init_context = intel_rcs_ctx_init;
+		ring->add_request = gen6_add_request;
+		ring->flush = gen8_render_ring_flush;
+		ring->irq_get = gen8_ring_get_irq;
+		ring->irq_put = gen8_ring_put_irq;
+		ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
+		ring->get_seqno = gen6_ring_get_seqno;
+		ring->set_seqno = ring_set_seqno;
+		if (i915_semaphore_is_enabled(dev)) {
+			WARN_ON(!dev_priv->semaphore_obj);
+			ring->semaphore.sync_to = gen8_ring_sync;
+			ring->semaphore.signal = gen8_rcs_signal;
+			GEN8_RING_SEMAPHORE_INIT;
+		}
+	} else if (INTEL_INFO(dev)->gen >= 6) {
+		ring->init_context = intel_rcs_ctx_init;
+		ring->add_request = gen6_add_request;
+		ring->flush = gen7_render_ring_flush;
+		if (INTEL_INFO(dev)->gen == 6)
+			ring->flush = gen6_render_ring_flush;
+		ring->irq_get = gen6_ring_get_irq;
+		ring->irq_put = gen6_ring_put_irq;
+		ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
+		ring->get_seqno = gen6_ring_get_seqno;
+		ring->set_seqno = ring_set_seqno;
+		if (i915_semaphore_is_enabled(dev)) {
+			ring->semaphore.sync_to = gen6_ring_sync;
+			ring->semaphore.signal = gen6_signal;
+			/*
+			 * The current semaphore is only applied on pre-gen8
+			 * platform.  And there is no VCS2 ring on the pre-gen8
+			 * platform. So the semaphore between RCS and VCS2 is
+			 * initialized as INVALID.  Gen8 will initialize the
+			 * sema between VCS2 and RCS later.
+			 */
+			ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID;
+			ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_RV;
+			ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_RB;
+			ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_RVE;
+			ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
+			ring->semaphore.mbox.signal[RCS] = GEN6_NOSYNC;
+			ring->semaphore.mbox.signal[VCS] = GEN6_VRSYNC;
+			ring->semaphore.mbox.signal[BCS] = GEN6_BRSYNC;
+			ring->semaphore.mbox.signal[VECS] = GEN6_VERSYNC;
+			ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
+		}
+	} else if (IS_GEN5(dev)) {
+		ring->add_request = pc_render_add_request;
+		ring->flush = gen4_render_ring_flush;
+		ring->get_seqno = pc_render_get_seqno;
+		ring->set_seqno = pc_render_set_seqno;
+		ring->irq_get = gen5_ring_get_irq;
+		ring->irq_put = gen5_ring_put_irq;
+		ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT |
+					GT_RENDER_PIPECTL_NOTIFY_INTERRUPT;
+	} else {
+		ring->add_request = i9xx_add_request;
+		if (INTEL_INFO(dev)->gen < 4)
+			ring->flush = gen2_render_ring_flush;
+		else
+			ring->flush = gen4_render_ring_flush;
+		ring->get_seqno = ring_get_seqno;
+		ring->set_seqno = ring_set_seqno;
+		if (IS_GEN2(dev)) {
+			ring->irq_get = i8xx_ring_get_irq;
+			ring->irq_put = i8xx_ring_put_irq;
+		} else {
+			ring->irq_get = i9xx_ring_get_irq;
+			ring->irq_put = i9xx_ring_put_irq;
+		}
+		ring->irq_enable_mask = I915_USER_INTERRUPT;
+	}
+	ring->write_tail = ring_write_tail;
+
+	if (IS_HASWELL(dev))
+		ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
+	else if (IS_GEN8(dev))
+		ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
+	else if (INTEL_INFO(dev)->gen >= 6)
+		ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
+	else if (INTEL_INFO(dev)->gen >= 4)
+		ring->dispatch_execbuffer = i965_dispatch_execbuffer;
+	else if (IS_I830(dev) || IS_845G(dev))
+		ring->dispatch_execbuffer = i830_dispatch_execbuffer;
+	else
+		ring->dispatch_execbuffer = i915_dispatch_execbuffer;
+	ring->init_hw = init_render_ring;
+	ring->cleanup = render_ring_cleanup;
+
+	/* Workaround batchbuffer to combat CS tlb bug. */
+	if (HAS_BROKEN_CS_TLB(dev)) {
+		obj = i915_gem_alloc_object(dev, I830_WA_SIZE);
+		if (obj == NULL) {
+			DRM_ERROR("Failed to allocate batch bo\n");
+			return -ENOMEM;
+		}
+
+		ret = i915_gem_obj_ggtt_pin(obj, 0, 0);
+		if (ret != 0) {
+			drm_gem_object_unreference(&obj->base);
+			DRM_ERROR("Failed to ping batch bo\n");
+			return ret;
+		}
+
+		ring->scratch.obj = obj;
+		ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj);
+	}
+
+	ret = intel_init_ring_buffer(dev, ring);
+	if (ret)
+		return ret;
+
+	if (INTEL_INFO(dev)->gen >= 5) {
+		ret = intel_init_pipe_control(ring);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+int intel_init_bsd_ring_buffer(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_engine_cs *ring = &dev_priv->ring[VCS];
+
+	ring->name = "bsd ring";
+	ring->id = VCS;
+
+	ring->write_tail = ring_write_tail;
+	if (INTEL_INFO(dev)->gen >= 6) {
+		ring->mmio_base = GEN6_BSD_RING_BASE;
+		/* gen6 bsd needs a special wa for tail updates */
+		if (IS_GEN6(dev))
+			ring->write_tail = gen6_bsd_ring_write_tail;
+		ring->flush = gen6_bsd_ring_flush;
+		ring->add_request = gen6_add_request;
+		ring->get_seqno = gen6_ring_get_seqno;
+		ring->set_seqno = ring_set_seqno;
+		if (INTEL_INFO(dev)->gen >= 8) {
+			ring->irq_enable_mask =
+				GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
+			ring->irq_get = gen8_ring_get_irq;
+			ring->irq_put = gen8_ring_put_irq;
+			ring->dispatch_execbuffer =
+				gen8_ring_dispatch_execbuffer;
+			if (i915_semaphore_is_enabled(dev)) {
+				ring->semaphore.sync_to = gen8_ring_sync;
+				ring->semaphore.signal = gen8_xcs_signal;
+				GEN8_RING_SEMAPHORE_INIT;
+			}
+		} else {
+			ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
+			ring->irq_get = gen6_ring_get_irq;
+			ring->irq_put = gen6_ring_put_irq;
+			ring->dispatch_execbuffer =
+				gen6_ring_dispatch_execbuffer;
+			if (i915_semaphore_is_enabled(dev)) {
+				ring->semaphore.sync_to = gen6_ring_sync;
+				ring->semaphore.signal = gen6_signal;
+				ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VR;
+				ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID;
+				ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VB;
+				ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_VVE;
+				ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
+				ring->semaphore.mbox.signal[RCS] = GEN6_RVSYNC;
+				ring->semaphore.mbox.signal[VCS] = GEN6_NOSYNC;
+				ring->semaphore.mbox.signal[BCS] = GEN6_BVSYNC;
+				ring->semaphore.mbox.signal[VECS] = GEN6_VEVSYNC;
+				ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
+			}
+		}
+	} else {
+		ring->mmio_base = BSD_RING_BASE;
+		ring->flush = bsd_ring_flush;
+		ring->add_request = i9xx_add_request;
+		ring->get_seqno = ring_get_seqno;
+		ring->set_seqno = ring_set_seqno;
+		if (IS_GEN5(dev)) {
+			ring->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
+			ring->irq_get = gen5_ring_get_irq;
+			ring->irq_put = gen5_ring_put_irq;
+		} else {
+			ring->irq_enable_mask = I915_BSD_USER_INTERRUPT;
+			ring->irq_get = i9xx_ring_get_irq;
+			ring->irq_put = i9xx_ring_put_irq;
+		}
+		ring->dispatch_execbuffer = i965_dispatch_execbuffer;
+	}
+	ring->init_hw = init_ring_common;
+
+	return intel_init_ring_buffer(dev, ring);
+}
+
+/**
+ * Initialize the second BSD ring (eg. Broadwell GT3, Skylake GT3)
+ */
+int intel_init_bsd2_ring_buffer(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_engine_cs *ring = &dev_priv->ring[VCS2];
+
+	ring->name = "bsd2 ring";
+	ring->id = VCS2;
+
+	ring->write_tail = ring_write_tail;
+	ring->mmio_base = GEN8_BSD2_RING_BASE;
+	ring->flush = gen6_bsd_ring_flush;
+	ring->add_request = gen6_add_request;
+	ring->get_seqno = gen6_ring_get_seqno;
+	ring->set_seqno = ring_set_seqno;
+	ring->irq_enable_mask =
+			GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT;
+	ring->irq_get = gen8_ring_get_irq;
+	ring->irq_put = gen8_ring_put_irq;
+	ring->dispatch_execbuffer =
+			gen8_ring_dispatch_execbuffer;
+	if (i915_semaphore_is_enabled(dev)) {
+		ring->semaphore.sync_to = gen8_ring_sync;
+		ring->semaphore.signal = gen8_xcs_signal;
+		GEN8_RING_SEMAPHORE_INIT;
+	}
+	ring->init_hw = init_ring_common;
+
+	return intel_init_ring_buffer(dev, ring);
+}
+
+int intel_init_blt_ring_buffer(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_engine_cs *ring = &dev_priv->ring[BCS];
+
+	ring->name = "blitter ring";
+	ring->id = BCS;
+
+	ring->mmio_base = BLT_RING_BASE;
+	ring->write_tail = ring_write_tail;
+	ring->flush = gen6_ring_flush;
+	ring->add_request = gen6_add_request;
+	ring->get_seqno = gen6_ring_get_seqno;
+	ring->set_seqno = ring_set_seqno;
+	if (INTEL_INFO(dev)->gen >= 8) {
+		ring->irq_enable_mask =
+			GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
+		ring->irq_get = gen8_ring_get_irq;
+		ring->irq_put = gen8_ring_put_irq;
+		ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
+		if (i915_semaphore_is_enabled(dev)) {
+			ring->semaphore.sync_to = gen8_ring_sync;
+			ring->semaphore.signal = gen8_xcs_signal;
+			GEN8_RING_SEMAPHORE_INIT;
+		}
+	} else {
+		ring->irq_enable_mask = GT_BLT_USER_INTERRUPT;
+		ring->irq_get = gen6_ring_get_irq;
+		ring->irq_put = gen6_ring_put_irq;
+		ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
+		if (i915_semaphore_is_enabled(dev)) {
+			ring->semaphore.signal = gen6_signal;
+			ring->semaphore.sync_to = gen6_ring_sync;
+			/*
+			 * The current semaphore is only applied on pre-gen8
+			 * platform.  And there is no VCS2 ring on the pre-gen8
+			 * platform. So the semaphore between BCS and VCS2 is
+			 * initialized as INVALID.  Gen8 will initialize the
+			 * sema between BCS and VCS2 later.
+			 */
+			ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_BR;
+			ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_BV;
+			ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID;
+			ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_BVE;
+			ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
+			ring->semaphore.mbox.signal[RCS] = GEN6_RBSYNC;
+			ring->semaphore.mbox.signal[VCS] = GEN6_VBSYNC;
+			ring->semaphore.mbox.signal[BCS] = GEN6_NOSYNC;
+			ring->semaphore.mbox.signal[VECS] = GEN6_VEBSYNC;
+			ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
+		}
+	}
+	ring->init_hw = init_ring_common;
+
+	return intel_init_ring_buffer(dev, ring);
+}
+
+int intel_init_vebox_ring_buffer(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_engine_cs *ring = &dev_priv->ring[VECS];
+
+	ring->name = "video enhancement ring";
+	ring->id = VECS;
+
+	ring->mmio_base = VEBOX_RING_BASE;
+	ring->write_tail = ring_write_tail;
+	ring->flush = gen6_ring_flush;
+	ring->add_request = gen6_add_request;
+	ring->get_seqno = gen6_ring_get_seqno;
+	ring->set_seqno = ring_set_seqno;
+
+	if (INTEL_INFO(dev)->gen >= 8) {
+		ring->irq_enable_mask =
+			GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
+		ring->irq_get = gen8_ring_get_irq;
+		ring->irq_put = gen8_ring_put_irq;
+		ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
+		if (i915_semaphore_is_enabled(dev)) {
+			ring->semaphore.sync_to = gen8_ring_sync;
+			ring->semaphore.signal = gen8_xcs_signal;
+			GEN8_RING_SEMAPHORE_INIT;
+		}
+	} else {
+		ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
+		ring->irq_get = hsw_vebox_get_irq;
+		ring->irq_put = hsw_vebox_put_irq;
+		ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
+		if (i915_semaphore_is_enabled(dev)) {
+			ring->semaphore.sync_to = gen6_ring_sync;
+			ring->semaphore.signal = gen6_signal;
+			ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VER;
+			ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_VEV;
+			ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VEB;
+			ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID;
+			ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
+			ring->semaphore.mbox.signal[RCS] = GEN6_RVESYNC;
+			ring->semaphore.mbox.signal[VCS] = GEN6_VVESYNC;
+			ring->semaphore.mbox.signal[BCS] = GEN6_BVESYNC;
+			ring->semaphore.mbox.signal[VECS] = GEN6_NOSYNC;
+			ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
+		}
+	}
+	ring->init_hw = init_ring_common;
+
+	return intel_init_ring_buffer(dev, ring);
+}
+
+int
+intel_ring_flush_all_caches(struct drm_i915_gem_request *req)
+{
+	struct intel_engine_cs *ring = req->ring;
+	int ret;
+
+	if (!ring->gpu_caches_dirty)
+		return 0;
+
+	ret = ring->flush(req, 0, I915_GEM_GPU_DOMAINS);
+	if (ret)
+		return ret;
+
+	trace_i915_gem_ring_flush(req, 0, I915_GEM_GPU_DOMAINS);
+
+	ring->gpu_caches_dirty = false;
+	return 0;
+}
+
+int
+intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req)
+{
+	struct intel_engine_cs *ring = req->ring;
+	uint32_t flush_domains;
+	int ret;
+
+	flush_domains = 0;
+	if (ring->gpu_caches_dirty)
+		flush_domains = I915_GEM_GPU_DOMAINS;
+
+	ret = ring->flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
+	if (ret)
+		return ret;
+
+	trace_i915_gem_ring_flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
+
+	ring->gpu_caches_dirty = false;
+	return 0;
+}
+
+void
+intel_stop_ring_buffer(struct intel_engine_cs *ring)
+{
+	int ret;
+
+	if (!intel_ring_initialized(ring))
+		return;
+
+	ret = intel_ring_idle(ring);
+	if (ret && !i915_reset_in_progress(&to_i915(ring->dev)->gpu_error))
+		DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
+			  ring->name, ret);
+
+	stop_ring(ring);
+}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
new file mode 100644
index 0000000..49fa41d
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -0,0 +1,502 @@
+#ifndef _INTEL_RINGBUFFER_H_
+#define _INTEL_RINGBUFFER_H_
+
+#include <linux/hashtable.h>
+#include "i915_gem_batch_pool.h"
+
+#define I915_CMD_HASH_ORDER 9
+
+/* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
+ * but keeps the logic simple. Indeed, the whole purpose of this macro is just
+ * to give some inclination as to some of the magic values used in the various
+ * workarounds!
+ */
+#define CACHELINE_BYTES 64
+#define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(uint32_t))
+
+/*
+ * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use"
+ * Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use"
+ * Gen4+ BSpec "vol1c Memory Interface and Command Stream" / 5.3.4.5 "Ring Buffer Use"
+ *
+ * "If the Ring Buffer Head Pointer and the Tail Pointer are on the same
+ * cacheline, the Head Pointer must not be greater than the Tail
+ * Pointer."
+ */
+#define I915_RING_FREE_SPACE 64
+
+struct  intel_hw_status_page {
+	u32		*page_addr;
+	unsigned int	gfx_addr;
+	struct		drm_i915_gem_object *obj;
+};
+
+#define I915_READ_TAIL(ring) I915_READ(RING_TAIL((ring)->mmio_base))
+#define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val)
+
+#define I915_READ_START(ring) I915_READ(RING_START((ring)->mmio_base))
+#define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val)
+
+#define I915_READ_HEAD(ring)  I915_READ(RING_HEAD((ring)->mmio_base))
+#define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val)
+
+#define I915_READ_CTL(ring) I915_READ(RING_CTL((ring)->mmio_base))
+#define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val)
+
+#define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base))
+#define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
+
+#define I915_READ_MODE(ring) I915_READ(RING_MI_MODE((ring)->mmio_base))
+#define I915_WRITE_MODE(ring, val) I915_WRITE(RING_MI_MODE((ring)->mmio_base), val)
+
+/* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to
+ * do the writes, and that must have qw aligned offsets, simply pretend it's 8b.
+ */
+#define i915_semaphore_seqno_size sizeof(uint64_t)
+#define GEN8_SIGNAL_OFFSET(__ring, to)			     \
+	(i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
+	((__ring)->id * I915_NUM_RINGS * i915_semaphore_seqno_size) +	\
+	(i915_semaphore_seqno_size * (to)))
+
+#define GEN8_WAIT_OFFSET(__ring, from)			     \
+	(i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
+	((from) * I915_NUM_RINGS * i915_semaphore_seqno_size) + \
+	(i915_semaphore_seqno_size * (__ring)->id))
+
+#define GEN8_RING_SEMAPHORE_INIT do { \
+	if (!dev_priv->semaphore_obj) { \
+		break; \
+	} \
+	ring->semaphore.signal_ggtt[RCS] = GEN8_SIGNAL_OFFSET(ring, RCS); \
+	ring->semaphore.signal_ggtt[VCS] = GEN8_SIGNAL_OFFSET(ring, VCS); \
+	ring->semaphore.signal_ggtt[BCS] = GEN8_SIGNAL_OFFSET(ring, BCS); \
+	ring->semaphore.signal_ggtt[VECS] = GEN8_SIGNAL_OFFSET(ring, VECS); \
+	ring->semaphore.signal_ggtt[VCS2] = GEN8_SIGNAL_OFFSET(ring, VCS2); \
+	ring->semaphore.signal_ggtt[ring->id] = MI_SEMAPHORE_SYNC_INVALID; \
+	} while(0)
+
+enum intel_ring_hangcheck_action {
+	HANGCHECK_IDLE = 0,
+	HANGCHECK_WAIT,
+	HANGCHECK_ACTIVE,
+	HANGCHECK_ACTIVE_LOOP,
+	HANGCHECK_KICK,
+	HANGCHECK_HUNG,
+};
+
+#define HANGCHECK_SCORE_RING_HUNG 31
+
+struct intel_ring_hangcheck {
+	u64 acthd;
+	u64 max_acthd;
+	u32 seqno;
+	int score;
+	enum intel_ring_hangcheck_action action;
+	int deadlock;
+};
+
+struct intel_ringbuffer {
+	struct drm_i915_gem_object *obj;
+	void __iomem *virtual_start;
+
+	struct intel_engine_cs *ring;
+
+	u32 head;
+	u32 tail;
+	int space;
+	int size;
+	int effective_size;
+	int reserved_size;
+	int reserved_tail;
+	bool reserved_in_use;
+
+	/** We track the position of the requests in the ring buffer, and
+	 * when each is retired we increment last_retired_head as the GPU
+	 * must have finished processing the request and so we know we
+	 * can advance the ringbuffer up to that position.
+	 *
+	 * last_retired_head is set to -1 after the value is consumed so
+	 * we can detect new retirements.
+	 */
+	u32 last_retired_head;
+};
+
+struct	intel_context;
+struct drm_i915_reg_descriptor;
+
+/*
+ * we use a single page to load ctx workarounds so all of these
+ * values are referred in terms of dwords
+ *
+ * struct i915_wa_ctx_bb:
+ *  offset: specifies batch starting position, also helpful in case
+ *    if we want to have multiple batches at different offsets based on
+ *    some criteria. It is not a requirement at the moment but provides
+ *    an option for future use.
+ *  size: size of the batch in DWORDS
+ */
+struct  i915_ctx_workarounds {
+	struct i915_wa_ctx_bb {
+		u32 offset;
+		u32 size;
+	} indirect_ctx, per_ctx;
+	struct drm_i915_gem_object *obj;
+};
+
+struct  intel_engine_cs {
+	const char	*name;
+	enum intel_ring_id {
+		RCS = 0x0,
+		VCS,
+		BCS,
+		VECS,
+		VCS2
+	} id;
+#define I915_NUM_RINGS 5
+#define LAST_USER_RING (VECS + 1)
+	u32		mmio_base;
+	struct		drm_device *dev;
+	struct intel_ringbuffer *buffer;
+
+	/*
+	 * A pool of objects to use as shadow copies of client batch buffers
+	 * when the command parser is enabled. Prevents the client from
+	 * modifying the batch contents after software parsing.
+	 */
+	struct i915_gem_batch_pool batch_pool;
+
+	struct intel_hw_status_page status_page;
+	struct i915_ctx_workarounds wa_ctx;
+
+	unsigned irq_refcount; /* protected by dev_priv->irq_lock */
+	u32		irq_enable_mask;	/* bitmask to enable ring interrupt */
+	struct drm_i915_gem_request *trace_irq_req;
+	bool __must_check (*irq_get)(struct intel_engine_cs *ring);
+	void		(*irq_put)(struct intel_engine_cs *ring);
+
+	int		(*init_hw)(struct intel_engine_cs *ring);
+
+	int		(*init_context)(struct drm_i915_gem_request *req);
+
+	void		(*write_tail)(struct intel_engine_cs *ring,
+				      u32 value);
+	int __must_check (*flush)(struct drm_i915_gem_request *req,
+				  u32	invalidate_domains,
+				  u32	flush_domains);
+	int		(*add_request)(struct drm_i915_gem_request *req);
+	/* Some chipsets are not quite as coherent as advertised and need
+	 * an expensive kick to force a true read of the up-to-date seqno.
+	 * However, the up-to-date seqno is not always required and the last
+	 * seen value is good enough. Note that the seqno will always be
+	 * monotonic, even if not coherent.
+	 */
+	u32		(*get_seqno)(struct intel_engine_cs *ring,
+				     bool lazy_coherency);
+	void		(*set_seqno)(struct intel_engine_cs *ring,
+				     u32 seqno);
+	int		(*dispatch_execbuffer)(struct drm_i915_gem_request *req,
+					       u64 offset, u32 length,
+					       unsigned dispatch_flags);
+#define I915_DISPATCH_SECURE 0x1
+#define I915_DISPATCH_PINNED 0x2
+#define I915_DISPATCH_RS     0x4
+	void		(*cleanup)(struct intel_engine_cs *ring);
+
+	/* GEN8 signal/wait table - never trust comments!
+	 *	  signal to	signal to    signal to   signal to      signal to
+	 *	    RCS		   VCS          BCS        VECS		 VCS2
+	 *      --------------------------------------------------------------------
+	 *  RCS | NOP (0x00) | VCS (0x08) | BCS (0x10) | VECS (0x18) | VCS2 (0x20) |
+	 *	|-------------------------------------------------------------------
+	 *  VCS | RCS (0x28) | NOP (0x30) | BCS (0x38) | VECS (0x40) | VCS2 (0x48) |
+	 *	|-------------------------------------------------------------------
+	 *  BCS | RCS (0x50) | VCS (0x58) | NOP (0x60) | VECS (0x68) | VCS2 (0x70) |
+	 *	|-------------------------------------------------------------------
+	 * VECS | RCS (0x78) | VCS (0x80) | BCS (0x88) |  NOP (0x90) | VCS2 (0x98) |
+	 *	|-------------------------------------------------------------------
+	 * VCS2 | RCS (0xa0) | VCS (0xa8) | BCS (0xb0) | VECS (0xb8) | NOP  (0xc0) |
+	 *	|-------------------------------------------------------------------
+	 *
+	 * Generalization:
+	 *  f(x, y) := (x->id * NUM_RINGS * seqno_size) + (seqno_size * y->id)
+	 *  ie. transpose of g(x, y)
+	 *
+	 *	 sync from	sync from    sync from    sync from	sync from
+	 *	    RCS		   VCS          BCS        VECS		 VCS2
+	 *      --------------------------------------------------------------------
+	 *  RCS | NOP (0x00) | VCS (0x28) | BCS (0x50) | VECS (0x78) | VCS2 (0xa0) |
+	 *	|-------------------------------------------------------------------
+	 *  VCS | RCS (0x08) | NOP (0x30) | BCS (0x58) | VECS (0x80) | VCS2 (0xa8) |
+	 *	|-------------------------------------------------------------------
+	 *  BCS | RCS (0x10) | VCS (0x38) | NOP (0x60) | VECS (0x88) | VCS2 (0xb0) |
+	 *	|-------------------------------------------------------------------
+	 * VECS | RCS (0x18) | VCS (0x40) | BCS (0x68) |  NOP (0x90) | VCS2 (0xb8) |
+	 *	|-------------------------------------------------------------------
+	 * VCS2 | RCS (0x20) | VCS (0x48) | BCS (0x70) | VECS (0x98) |  NOP (0xc0) |
+	 *	|-------------------------------------------------------------------
+	 *
+	 * Generalization:
+	 *  g(x, y) := (y->id * NUM_RINGS * seqno_size) + (seqno_size * x->id)
+	 *  ie. transpose of f(x, y)
+	 */
+	struct {
+		u32	sync_seqno[I915_NUM_RINGS-1];
+
+		union {
+			struct {
+				/* our mbox written by others */
+				u32		wait[I915_NUM_RINGS];
+				/* mboxes this ring signals to */
+				u32		signal[I915_NUM_RINGS];
+			} mbox;
+			u64		signal_ggtt[I915_NUM_RINGS];
+		};
+
+		/* AKA wait() */
+		int	(*sync_to)(struct drm_i915_gem_request *to_req,
+				   struct intel_engine_cs *from,
+				   u32 seqno);
+		int	(*signal)(struct drm_i915_gem_request *signaller_req,
+				  /* num_dwords needed by caller */
+				  unsigned int num_dwords);
+	} semaphore;
+
+	/* Execlists */
+	spinlock_t execlist_lock;
+	struct list_head execlist_queue;
+	struct list_head execlist_retired_req_list;
+	u8 next_context_status_buffer;
+	u32             irq_keep_mask; /* bitmask for interrupts that should not be masked */
+	int		(*emit_request)(struct drm_i915_gem_request *request);
+	int		(*emit_flush)(struct drm_i915_gem_request *request,
+				      u32 invalidate_domains,
+				      u32 flush_domains);
+	int		(*emit_bb_start)(struct drm_i915_gem_request *req,
+					 u64 offset, unsigned dispatch_flags);
+
+	/**
+	 * List of objects currently involved in rendering from the
+	 * ringbuffer.
+	 *
+	 * Includes buffers having the contents of their GPU caches
+	 * flushed, not necessarily primitives.  last_read_req
+	 * represents when the rendering involved will be completed.
+	 *
+	 * A reference is held on the buffer while on this list.
+	 */
+	struct list_head active_list;
+
+	/**
+	 * List of breadcrumbs associated with GPU requests currently
+	 * outstanding.
+	 */
+	struct list_head request_list;
+
+	/**
+	 * Seqno of request most recently submitted to request_list.
+	 * Used exclusively by hang checker to avoid grabbing lock while
+	 * inspecting request list.
+	 */
+	u32 last_submitted_seqno;
+
+	bool gpu_caches_dirty;
+
+	wait_queue_head_t irq_queue;
+
+	struct intel_context *default_context;
+	struct intel_context *last_context;
+
+	struct intel_ring_hangcheck hangcheck;
+
+	struct {
+		struct drm_i915_gem_object *obj;
+		u32 gtt_offset;
+		volatile u32 *cpu_page;
+	} scratch;
+
+	bool needs_cmd_parser;
+
+	/*
+	 * Table of commands the command parser needs to know about
+	 * for this ring.
+	 */
+	DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER);
+
+	/*
+	 * Table of registers allowed in commands that read/write registers.
+	 */
+	const struct drm_i915_reg_descriptor *reg_table;
+	int reg_count;
+
+	/*
+	 * Table of registers allowed in commands that read/write registers, but
+	 * only from the DRM master.
+	 */
+	const struct drm_i915_reg_descriptor *master_reg_table;
+	int master_reg_count;
+
+	/*
+	 * Returns the bitmask for the length field of the specified command.
+	 * Return 0 for an unrecognized/invalid command.
+	 *
+	 * If the command parser finds an entry for a command in the ring's
+	 * cmd_tables, it gets the command's length based on the table entry.
+	 * If not, it calls this function to determine the per-ring length field
+	 * encoding for the command (i.e. certain opcode ranges use certain bits
+	 * to encode the command length in the header).
+	 */
+	u32 (*get_cmd_length_mask)(u32 cmd_header);
+};
+
+bool intel_ring_initialized(struct intel_engine_cs *ring);
+
+static inline unsigned
+intel_ring_flag(struct intel_engine_cs *ring)
+{
+	return 1 << ring->id;
+}
+
+static inline u32
+intel_ring_sync_index(struct intel_engine_cs *ring,
+		      struct intel_engine_cs *other)
+{
+	int idx;
+
+	/*
+	 * rcs -> 0 = vcs, 1 = bcs, 2 = vecs, 3 = vcs2;
+	 * vcs -> 0 = bcs, 1 = vecs, 2 = vcs2, 3 = rcs;
+	 * bcs -> 0 = vecs, 1 = vcs2. 2 = rcs, 3 = vcs;
+	 * vecs -> 0 = vcs2, 1 = rcs, 2 = vcs, 3 = bcs;
+	 * vcs2 -> 0 = rcs, 1 = vcs, 2 = bcs, 3 = vecs;
+	 */
+
+	idx = (other - ring) - 1;
+	if (idx < 0)
+		idx += I915_NUM_RINGS;
+
+	return idx;
+}
+
+static inline void
+intel_flush_status_page(struct intel_engine_cs *ring, int reg)
+{
+	drm_clflush_virt_range(&ring->status_page.page_addr[reg],
+			       sizeof(uint32_t));
+}
+
+static inline u32
+intel_read_status_page(struct intel_engine_cs *ring,
+		       int reg)
+{
+	/* Ensure that the compiler doesn't optimize away the load. */
+	barrier();
+	return ring->status_page.page_addr[reg];
+}
+
+static inline void
+intel_write_status_page(struct intel_engine_cs *ring,
+			int reg, u32 value)
+{
+	ring->status_page.page_addr[reg] = value;
+}
+
+/**
+ * Reads a dword out of the status page, which is written to from the command
+ * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
+ * MI_STORE_DATA_IMM.
+ *
+ * The following dwords have a reserved meaning:
+ * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
+ * 0x04: ring 0 head pointer
+ * 0x05: ring 1 head pointer (915-class)
+ * 0x06: ring 2 head pointer (915-class)
+ * 0x10-0x1b: Context status DWords (GM45)
+ * 0x1f: Last written status offset. (GM45)
+ * 0x20-0x2f: Reserved (Gen6+)
+ *
+ * The area from dword 0x30 to 0x3ff is available for driver usage.
+ */
+#define I915_GEM_HWS_INDEX		0x30
+#define I915_GEM_HWS_SCRATCH_INDEX	0x40
+#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
+
+struct intel_ringbuffer *
+intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size);
+int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
+				     struct intel_ringbuffer *ringbuf);
+void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
+void intel_ringbuffer_free(struct intel_ringbuffer *ring);
+
+void intel_stop_ring_buffer(struct intel_engine_cs *ring);
+void intel_cleanup_ring_buffer(struct intel_engine_cs *ring);
+
+int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request);
+
+int __must_check intel_ring_begin(struct drm_i915_gem_request *req, int n);
+int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req);
+static inline void intel_ring_emit(struct intel_engine_cs *ring,
+				   u32 data)
+{
+	struct intel_ringbuffer *ringbuf = ring->buffer;
+	iowrite32(data, ringbuf->virtual_start + ringbuf->tail);
+	ringbuf->tail += 4;
+}
+static inline void intel_ring_advance(struct intel_engine_cs *ring)
+{
+	struct intel_ringbuffer *ringbuf = ring->buffer;
+	ringbuf->tail &= ringbuf->size - 1;
+}
+int __intel_ring_space(int head, int tail, int size);
+void intel_ring_update_space(struct intel_ringbuffer *ringbuf);
+int intel_ring_space(struct intel_ringbuffer *ringbuf);
+bool intel_ring_stopped(struct intel_engine_cs *ring);
+
+int __must_check intel_ring_idle(struct intel_engine_cs *ring);
+void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno);
+int intel_ring_flush_all_caches(struct drm_i915_gem_request *req);
+int intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req);
+
+void intel_fini_pipe_control(struct intel_engine_cs *ring);
+int intel_init_pipe_control(struct intel_engine_cs *ring);
+
+int intel_init_render_ring_buffer(struct drm_device *dev);
+int intel_init_bsd_ring_buffer(struct drm_device *dev);
+int intel_init_bsd2_ring_buffer(struct drm_device *dev);
+int intel_init_blt_ring_buffer(struct drm_device *dev);
+int intel_init_vebox_ring_buffer(struct drm_device *dev);
+
+u64 intel_ring_get_active_head(struct intel_engine_cs *ring);
+
+int init_workarounds_ring(struct intel_engine_cs *ring);
+
+static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf)
+{
+	return ringbuf->tail;
+}
+
+/*
+ * Arbitrary size for largest possible 'add request' sequence. The code paths
+ * are complex and variable. Empirical measurement shows that the worst case
+ * is ILK at 136 words. Reserving too much is better than reserving too little
+ * as that allows for corner cases that might have been missed. So the figure
+ * has been rounded up to 160 words.
+ */
+#define MIN_SPACE_FOR_ADD_REQUEST	160
+
+/*
+ * Reserve space in the ring to guarantee that the i915_add_request() call
+ * will always have sufficient room to do its stuff. The request creation
+ * code calls this automatically.
+ */
+void intel_ring_reserved_space_reserve(struct intel_ringbuffer *ringbuf, int size);
+/* Cancel the reservation, e.g. because the request is being discarded. */
+void intel_ring_reserved_space_cancel(struct intel_ringbuffer *ringbuf);
+/* Use the reserved space - for use by i915_add_request() only. */
+void intel_ring_reserved_space_use(struct intel_ringbuffer *ringbuf);
+/* Finish with the reserved space - for use by i915_add_request() only. */
+void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf);
+
+/* Legacy ringbuffer specific portion of reservation code: */
+int intel_ring_reserve_space(struct drm_i915_gem_request *request);
+
+#endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
new file mode 100644
index 0000000..7e23d65
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -0,0 +1,2174 @@
+/*
+ * Copyright © 2012-2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Eugeni Dodonov <eugeni.dodonov@intel.com>
+ *    Daniel Vetter <daniel.vetter@ffwll.ch>
+ *
+ */
+
+#include <linux/pm_runtime.h>
+#include <linux/vgaarb.h>
+
+#include "i915_drv.h"
+#include "intel_drv.h"
+
+/**
+ * DOC: runtime pm
+ *
+ * The i915 driver supports dynamic enabling and disabling of entire hardware
+ * blocks at runtime. This is especially important on the display side where
+ * software is supposed to control many power gates manually on recent hardware,
+ * since on the GT side a lot of the power management is done by the hardware.
+ * But even there some manual control at the device level is required.
+ *
+ * Since i915 supports a diverse set of platforms with a unified codebase and
+ * hardware engineers just love to shuffle functionality around between power
+ * domains there's a sizeable amount of indirection required. This file provides
+ * generic functions to the driver for grabbing and releasing references for
+ * abstract power domains. It then maps those to the actual power wells
+ * present for a given platform.
+ */
+
+#define GEN9_ENABLE_DC5(dev) 0
+#define SKL_ENABLE_DC6(dev) IS_SKYLAKE(dev)
+
+#define for_each_power_well(i, power_well, domain_mask, power_domains)	\
+	for (i = 0;							\
+	     i < (power_domains)->power_well_count &&			\
+		 ((power_well) = &(power_domains)->power_wells[i]);	\
+	     i++)							\
+		if ((power_well)->domains & (domain_mask))
+
+#define for_each_power_well_rev(i, power_well, domain_mask, power_domains) \
+	for (i = (power_domains)->power_well_count - 1;			 \
+	     i >= 0 && ((power_well) = &(power_domains)->power_wells[i]);\
+	     i--)							 \
+		if ((power_well)->domains & (domain_mask))
+
+bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
+				    int power_well_id);
+
+static void intel_power_well_enable(struct drm_i915_private *dev_priv,
+				    struct i915_power_well *power_well)
+{
+	DRM_DEBUG_KMS("enabling %s\n", power_well->name);
+	power_well->ops->enable(dev_priv, power_well);
+	power_well->hw_enabled = true;
+}
+
+static void intel_power_well_disable(struct drm_i915_private *dev_priv,
+				     struct i915_power_well *power_well)
+{
+	DRM_DEBUG_KMS("disabling %s\n", power_well->name);
+	power_well->hw_enabled = false;
+	power_well->ops->disable(dev_priv, power_well);
+}
+
+/*
+ * We should only use the power well if we explicitly asked the hardware to
+ * enable it, so check if it's enabled and also check if we've requested it to
+ * be enabled.
+ */
+static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
+				   struct i915_power_well *power_well)
+{
+	return I915_READ(HSW_PWR_WELL_DRIVER) ==
+		     (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
+}
+
+/**
+ * __intel_display_power_is_enabled - unlocked check for a power domain
+ * @dev_priv: i915 device instance
+ * @domain: power domain to check
+ *
+ * This is the unlocked version of intel_display_power_is_enabled() and should
+ * only be used from error capture and recovery code where deadlocks are
+ * possible.
+ *
+ * Returns:
+ * True when the power domain is enabled, false otherwise.
+ */
+bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
+				      enum intel_display_power_domain domain)
+{
+	struct i915_power_domains *power_domains;
+	struct i915_power_well *power_well;
+	bool is_enabled;
+	int i;
+
+	if (dev_priv->pm.suspended)
+		return false;
+
+	power_domains = &dev_priv->power_domains;
+
+	is_enabled = true;
+
+	for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
+		if (power_well->always_on)
+			continue;
+
+		if (!power_well->hw_enabled) {
+			is_enabled = false;
+			break;
+		}
+	}
+
+	return is_enabled;
+}
+
+/**
+ * intel_display_power_is_enabled - check for a power domain
+ * @dev_priv: i915 device instance
+ * @domain: power domain to check
+ *
+ * This function can be used to check the hw power domain state. It is mostly
+ * used in hardware state readout functions. Everywhere else code should rely
+ * upon explicit power domain reference counting to ensure that the hardware
+ * block is powered up before accessing it.
+ *
+ * Callers must hold the relevant modesetting locks to ensure that concurrent
+ * threads can't disable the power well while the caller tries to read a few
+ * registers.
+ *
+ * Returns:
+ * True when the power domain is enabled, false otherwise.
+ */
+bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
+				    enum intel_display_power_domain domain)
+{
+	struct i915_power_domains *power_domains;
+	bool ret;
+
+	power_domains = &dev_priv->power_domains;
+
+	mutex_lock(&power_domains->lock);
+	ret = __intel_display_power_is_enabled(dev_priv, domain);
+	mutex_unlock(&power_domains->lock);
+
+	return ret;
+}
+
+/**
+ * intel_display_set_init_power - set the initial power domain state
+ * @dev_priv: i915 device instance
+ * @enable: whether to enable or disable the initial power domain state
+ *
+ * For simplicity our driver load/unload and system suspend/resume code assumes
+ * that all power domains are always enabled. This functions controls the state
+ * of this little hack. While the initial power domain state is enabled runtime
+ * pm is effectively disabled.
+ */
+void intel_display_set_init_power(struct drm_i915_private *dev_priv,
+				  bool enable)
+{
+	if (dev_priv->power_domains.init_power_on == enable)
+		return;
+
+	if (enable)
+		intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
+	else
+		intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
+
+	dev_priv->power_domains.init_power_on = enable;
+}
+
+/*
+ * Starting with Haswell, we have a "Power Down Well" that can be turned off
+ * when not needed anymore. We have 4 registers that can request the power well
+ * to be enabled, and it will only be disabled if none of the registers is
+ * requesting it to be enabled.
+ */
+static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
+{
+	struct drm_device *dev = dev_priv->dev;
+
+	/*
+	 * After we re-enable the power well, if we touch VGA register 0x3d5
+	 * we'll get unclaimed register interrupts. This stops after we write
+	 * anything to the VGA MSR register. The vgacon module uses this
+	 * register all the time, so if we unbind our driver and, as a
+	 * consequence, bind vgacon, we'll get stuck in an infinite loop at
+	 * console_unlock(). So make here we touch the VGA MSR register, making
+	 * sure vgacon can keep working normally without triggering interrupts
+	 * and error messages.
+	 */
+	vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
+	outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
+	vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
+
+	if (IS_BROADWELL(dev))
+		gen8_irq_power_well_post_enable(dev_priv,
+						1 << PIPE_C | 1 << PIPE_B);
+}
+
+static void skl_power_well_post_enable(struct drm_i915_private *dev_priv,
+				       struct i915_power_well *power_well)
+{
+	struct drm_device *dev = dev_priv->dev;
+
+	/*
+	 * After we re-enable the power well, if we touch VGA register 0x3d5
+	 * we'll get unclaimed register interrupts. This stops after we write
+	 * anything to the VGA MSR register. The vgacon module uses this
+	 * register all the time, so if we unbind our driver and, as a
+	 * consequence, bind vgacon, we'll get stuck in an infinite loop at
+	 * console_unlock(). So make here we touch the VGA MSR register, making
+	 * sure vgacon can keep working normally without triggering interrupts
+	 * and error messages.
+	 */
+	if (power_well->data == SKL_DISP_PW_2) {
+		vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
+		outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
+		vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
+
+		gen8_irq_power_well_post_enable(dev_priv,
+						1 << PIPE_C | 1 << PIPE_B);
+	}
+
+	if (power_well->data == SKL_DISP_PW_1) {
+		if (!dev_priv->power_domains.initializing)
+			intel_prepare_ddi(dev);
+		gen8_irq_power_well_post_enable(dev_priv, 1 << PIPE_A);
+	}
+}
+
+static void hsw_set_power_well(struct drm_i915_private *dev_priv,
+			       struct i915_power_well *power_well, bool enable)
+{
+	bool is_enabled, enable_requested;
+	uint32_t tmp;
+
+	tmp = I915_READ(HSW_PWR_WELL_DRIVER);
+	is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED;
+	enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST;
+
+	if (enable) {
+		if (!enable_requested)
+			I915_WRITE(HSW_PWR_WELL_DRIVER,
+				   HSW_PWR_WELL_ENABLE_REQUEST);
+
+		if (!is_enabled) {
+			DRM_DEBUG_KMS("Enabling power well\n");
+			if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
+				      HSW_PWR_WELL_STATE_ENABLED), 20))
+				DRM_ERROR("Timeout enabling power well\n");
+			hsw_power_well_post_enable(dev_priv);
+		}
+
+	} else {
+		if (enable_requested) {
+			I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
+			POSTING_READ(HSW_PWR_WELL_DRIVER);
+			DRM_DEBUG_KMS("Requesting to disable the power well\n");
+		}
+	}
+}
+
+#define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
+	BIT(POWER_DOMAIN_TRANSCODER_A) |		\
+	BIT(POWER_DOMAIN_PIPE_B) |			\
+	BIT(POWER_DOMAIN_TRANSCODER_B) |		\
+	BIT(POWER_DOMAIN_PIPE_C) |			\
+	BIT(POWER_DOMAIN_TRANSCODER_C) |		\
+	BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
+	BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
+	BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) |		\
+	BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |		\
+	BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) |		\
+	BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |		\
+	BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) |		\
+	BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) |		\
+	BIT(POWER_DOMAIN_PORT_DDI_E_2_LANES) |		\
+	BIT(POWER_DOMAIN_AUX_B) |                       \
+	BIT(POWER_DOMAIN_AUX_C) |			\
+	BIT(POWER_DOMAIN_AUX_D) |			\
+	BIT(POWER_DOMAIN_AUDIO) |			\
+	BIT(POWER_DOMAIN_VGA) |				\
+	BIT(POWER_DOMAIN_INIT))
+#define SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS (		\
+	SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
+	BIT(POWER_DOMAIN_PLLS) |			\
+	BIT(POWER_DOMAIN_PIPE_A) |			\
+	BIT(POWER_DOMAIN_TRANSCODER_EDP) |		\
+	BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |		\
+	BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) |		\
+	BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) |		\
+	BIT(POWER_DOMAIN_AUX_A) |			\
+	BIT(POWER_DOMAIN_INIT))
+#define SKL_DISPLAY_DDI_A_E_POWER_DOMAINS (		\
+	BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) |		\
+	BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) |		\
+	BIT(POWER_DOMAIN_PORT_DDI_E_2_LANES) |		\
+	BIT(POWER_DOMAIN_INIT))
+#define SKL_DISPLAY_DDI_B_POWER_DOMAINS (		\
+	BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) |		\
+	BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |		\
+	BIT(POWER_DOMAIN_INIT))
+#define SKL_DISPLAY_DDI_C_POWER_DOMAINS (		\
+	BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) |		\
+	BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |		\
+	BIT(POWER_DOMAIN_INIT))
+#define SKL_DISPLAY_DDI_D_POWER_DOMAINS (		\
+	BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) |		\
+	BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) |		\
+	BIT(POWER_DOMAIN_INIT))
+#define SKL_DISPLAY_MISC_IO_POWER_DOMAINS (		\
+	SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS |		\
+	BIT(POWER_DOMAIN_PLLS) |			\
+	BIT(POWER_DOMAIN_INIT))
+#define SKL_DISPLAY_ALWAYS_ON_POWER_DOMAINS (		\
+	(POWER_DOMAIN_MASK & ~(SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS |	\
+	SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
+	SKL_DISPLAY_DDI_A_E_POWER_DOMAINS |		\
+	SKL_DISPLAY_DDI_B_POWER_DOMAINS |		\
+	SKL_DISPLAY_DDI_C_POWER_DOMAINS |		\
+	SKL_DISPLAY_DDI_D_POWER_DOMAINS |		\
+	SKL_DISPLAY_MISC_IO_POWER_DOMAINS)) |		\
+	BIT(POWER_DOMAIN_INIT))
+
+#define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
+	BIT(POWER_DOMAIN_TRANSCODER_A) |		\
+	BIT(POWER_DOMAIN_PIPE_B) |			\
+	BIT(POWER_DOMAIN_TRANSCODER_B) |		\
+	BIT(POWER_DOMAIN_PIPE_C) |			\
+	BIT(POWER_DOMAIN_TRANSCODER_C) |		\
+	BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
+	BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
+	BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) |		\
+	BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |		\
+	BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) |		\
+	BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |		\
+	BIT(POWER_DOMAIN_AUX_B) |			\
+	BIT(POWER_DOMAIN_AUX_C) |			\
+	BIT(POWER_DOMAIN_AUDIO) |			\
+	BIT(POWER_DOMAIN_VGA) |				\
+	BIT(POWER_DOMAIN_GMBUS) |			\
+	BIT(POWER_DOMAIN_INIT))
+#define BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS (		\
+	BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
+	BIT(POWER_DOMAIN_PIPE_A) |			\
+	BIT(POWER_DOMAIN_TRANSCODER_EDP) |		\
+	BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |		\
+	BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) |		\
+	BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) |		\
+	BIT(POWER_DOMAIN_AUX_A) |			\
+	BIT(POWER_DOMAIN_PLLS) |			\
+	BIT(POWER_DOMAIN_INIT))
+#define BXT_DISPLAY_ALWAYS_ON_POWER_DOMAINS (		\
+	(POWER_DOMAIN_MASK & ~(BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS |	\
+	BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS)) |	\
+	BIT(POWER_DOMAIN_INIT))
+
+static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
+{
+	struct drm_device *dev = dev_priv->dev;
+
+	WARN(!IS_BROXTON(dev), "Platform doesn't support DC9.\n");
+	WARN((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
+		"DC9 already programmed to be enabled.\n");
+	WARN(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
+		"DC5 still not disabled to enable DC9.\n");
+	WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on.\n");
+	WARN(intel_irqs_enabled(dev_priv), "Interrupts not disabled yet.\n");
+
+	 /*
+	  * TODO: check for the following to verify the conditions to enter DC9
+	  * state are satisfied:
+	  * 1] Check relevant display engine registers to verify if mode set
+	  * disable sequence was followed.
+	  * 2] Check if display uninitialize sequence is initialized.
+	  */
+}
+
+static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
+{
+	WARN(intel_irqs_enabled(dev_priv), "Interrupts not disabled yet.\n");
+	WARN(!(I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
+		"DC9 already programmed to be disabled.\n");
+	WARN(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
+		"DC5 still not disabled.\n");
+
+	 /*
+	  * TODO: check for the following to verify DC9 state was indeed
+	  * entered before programming to disable it:
+	  * 1] Check relevant display engine registers to verify if mode
+	  *  set disable sequence was followed.
+	  * 2] Check if display uninitialize sequence is initialized.
+	  */
+}
+
+void bxt_enable_dc9(struct drm_i915_private *dev_priv)
+{
+	uint32_t val;
+
+	assert_can_enable_dc9(dev_priv);
+
+	DRM_DEBUG_KMS("Enabling DC9\n");
+
+	val = I915_READ(DC_STATE_EN);
+	val |= DC_STATE_EN_DC9;
+	I915_WRITE(DC_STATE_EN, val);
+	POSTING_READ(DC_STATE_EN);
+}
+
+void bxt_disable_dc9(struct drm_i915_private *dev_priv)
+{
+	uint32_t val;
+
+	assert_can_disable_dc9(dev_priv);
+
+	DRM_DEBUG_KMS("Disabling DC9\n");
+
+	val = I915_READ(DC_STATE_EN);
+	val &= ~DC_STATE_EN_DC9;
+	I915_WRITE(DC_STATE_EN, val);
+	POSTING_READ(DC_STATE_EN);
+}
+
+static void gen9_set_dc_state_debugmask_memory_up(
+			struct drm_i915_private *dev_priv)
+{
+	uint32_t val;
+
+	/* The below bit doesn't need to be cleared ever afterwards */
+	val = I915_READ(DC_STATE_DEBUG);
+	if (!(val & DC_STATE_DEBUG_MASK_MEMORY_UP)) {
+		val |= DC_STATE_DEBUG_MASK_MEMORY_UP;
+		I915_WRITE(DC_STATE_DEBUG, val);
+		POSTING_READ(DC_STATE_DEBUG);
+	}
+}
+
+static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
+{
+	struct drm_device *dev = dev_priv->dev;
+	bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
+					SKL_DISP_PW_2);
+
+	WARN_ONCE(!IS_SKYLAKE(dev), "Platform doesn't support DC5.\n");
+	WARN_ONCE(!HAS_RUNTIME_PM(dev), "Runtime PM not enabled.\n");
+	WARN_ONCE(pg2_enabled, "PG2 not disabled to enable DC5.\n");
+
+	WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5),
+		  "DC5 already programmed to be enabled.\n");
+	WARN_ONCE(dev_priv->pm.suspended,
+		  "DC5 cannot be enabled, if platform is runtime-suspended.\n");
+
+	assert_csr_loaded(dev_priv);
+}
+
+static void assert_can_disable_dc5(struct drm_i915_private *dev_priv)
+{
+	bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
+					SKL_DISP_PW_2);
+	/*
+	 * During initialization, the firmware may not be loaded yet.
+	 * We still want to make sure that the DC enabling flag is cleared.
+	 */
+	if (dev_priv->power_domains.initializing)
+		return;
+
+	WARN_ONCE(!pg2_enabled, "PG2 not enabled to disable DC5.\n");
+	WARN_ONCE(dev_priv->pm.suspended,
+		"Disabling of DC5 while platform is runtime-suspended should never happen.\n");
+}
+
+static void gen9_enable_dc5(struct drm_i915_private *dev_priv)
+{
+	uint32_t val;
+
+	assert_can_enable_dc5(dev_priv);
+
+	DRM_DEBUG_KMS("Enabling DC5\n");
+
+	gen9_set_dc_state_debugmask_memory_up(dev_priv);
+
+	val = I915_READ(DC_STATE_EN);
+	val &= ~DC_STATE_EN_UPTO_DC5_DC6_MASK;
+	val |= DC_STATE_EN_UPTO_DC5;
+	I915_WRITE(DC_STATE_EN, val);
+	POSTING_READ(DC_STATE_EN);
+}
+
+static void gen9_disable_dc5(struct drm_i915_private *dev_priv)
+{
+	uint32_t val;
+
+	assert_can_disable_dc5(dev_priv);
+
+	DRM_DEBUG_KMS("Disabling DC5\n");
+
+	val = I915_READ(DC_STATE_EN);
+	val &= ~DC_STATE_EN_UPTO_DC5;
+	I915_WRITE(DC_STATE_EN, val);
+	POSTING_READ(DC_STATE_EN);
+}
+
+static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
+{
+	struct drm_device *dev = dev_priv->dev;
+
+	WARN_ONCE(!IS_SKYLAKE(dev), "Platform doesn't support DC6.\n");
+	WARN_ONCE(!HAS_RUNTIME_PM(dev), "Runtime PM not enabled.\n");
+	WARN_ONCE(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
+		  "Backlight is not disabled.\n");
+	WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
+		  "DC6 already programmed to be enabled.\n");
+
+	assert_csr_loaded(dev_priv);
+}
+
+static void assert_can_disable_dc6(struct drm_i915_private *dev_priv)
+{
+	/*
+	 * During initialization, the firmware may not be loaded yet.
+	 * We still want to make sure that the DC enabling flag is cleared.
+	 */
+	if (dev_priv->power_domains.initializing)
+		return;
+
+	assert_csr_loaded(dev_priv);
+	WARN_ONCE(!(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
+		  "DC6 already programmed to be disabled.\n");
+}
+
+static void skl_enable_dc6(struct drm_i915_private *dev_priv)
+{
+	uint32_t val;
+
+	assert_can_enable_dc6(dev_priv);
+
+	DRM_DEBUG_KMS("Enabling DC6\n");
+
+	gen9_set_dc_state_debugmask_memory_up(dev_priv);
+
+	val = I915_READ(DC_STATE_EN);
+	val &= ~DC_STATE_EN_UPTO_DC5_DC6_MASK;
+	val |= DC_STATE_EN_UPTO_DC6;
+	I915_WRITE(DC_STATE_EN, val);
+	POSTING_READ(DC_STATE_EN);
+}
+
+static void skl_disable_dc6(struct drm_i915_private *dev_priv)
+{
+	uint32_t val;
+
+	assert_can_disable_dc6(dev_priv);
+
+	DRM_DEBUG_KMS("Disabling DC6\n");
+
+	val = I915_READ(DC_STATE_EN);
+	val &= ~DC_STATE_EN_UPTO_DC6;
+	I915_WRITE(DC_STATE_EN, val);
+	POSTING_READ(DC_STATE_EN);
+}
+
+static void skl_set_power_well(struct drm_i915_private *dev_priv,
+			struct i915_power_well *power_well, bool enable)
+{
+	struct drm_device *dev = dev_priv->dev;
+	uint32_t tmp, fuse_status;
+	uint32_t req_mask, state_mask;
+	bool is_enabled, enable_requested, check_fuse_status = false;
+
+	tmp = I915_READ(HSW_PWR_WELL_DRIVER);
+	fuse_status = I915_READ(SKL_FUSE_STATUS);
+
+	switch (power_well->data) {
+	case SKL_DISP_PW_1:
+		if (wait_for((I915_READ(SKL_FUSE_STATUS) &
+			SKL_FUSE_PG0_DIST_STATUS), 1)) {
+			DRM_ERROR("PG0 not enabled\n");
+			return;
+		}
+		break;
+	case SKL_DISP_PW_2:
+		if (!(fuse_status & SKL_FUSE_PG1_DIST_STATUS)) {
+			DRM_ERROR("PG1 in disabled state\n");
+			return;
+		}
+		break;
+	case SKL_DISP_PW_DDI_A_E:
+	case SKL_DISP_PW_DDI_B:
+	case SKL_DISP_PW_DDI_C:
+	case SKL_DISP_PW_DDI_D:
+	case SKL_DISP_PW_MISC_IO:
+		break;
+	default:
+		WARN(1, "Unknown power well %lu\n", power_well->data);
+		return;
+	}
+
+	req_mask = SKL_POWER_WELL_REQ(power_well->data);
+	enable_requested = tmp & req_mask;
+	state_mask = SKL_POWER_WELL_STATE(power_well->data);
+	is_enabled = tmp & state_mask;
+
+	if (enable) {
+		if (!enable_requested) {
+			WARN((tmp & state_mask) &&
+				!I915_READ(HSW_PWR_WELL_BIOS),
+				"Invalid for power well status to be enabled, unless done by the BIOS, \
+				when request is to disable!\n");
+			if ((GEN9_ENABLE_DC5(dev) || SKL_ENABLE_DC6(dev)) &&
+				power_well->data == SKL_DISP_PW_2) {
+				if (SKL_ENABLE_DC6(dev)) {
+					skl_disable_dc6(dev_priv);
+					/*
+					 * DDI buffer programming unnecessary during driver-load/resume
+					 * as it's already done during modeset initialization then.
+					 * It's also invalid here as encoder list is still uninitialized.
+					 */
+					if (!dev_priv->power_domains.initializing)
+						intel_prepare_ddi(dev);
+				} else {
+					gen9_disable_dc5(dev_priv);
+				}
+			}
+			I915_WRITE(HSW_PWR_WELL_DRIVER, tmp | req_mask);
+		}
+
+		if (!is_enabled) {
+			DRM_DEBUG_KMS("Enabling %s\n", power_well->name);
+			if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
+				state_mask), 1))
+				DRM_ERROR("%s enable timeout\n",
+					power_well->name);
+			check_fuse_status = true;
+		}
+	} else {
+		if (enable_requested) {
+			if (IS_SKYLAKE(dev) &&
+				(power_well->data == SKL_DISP_PW_1) &&
+				(intel_csr_load_status_get(dev_priv) == FW_LOADED))
+				DRM_DEBUG_KMS("Not Disabling PW1, dmc will handle\n");
+			else {
+				I915_WRITE(HSW_PWR_WELL_DRIVER,	tmp & ~req_mask);
+				POSTING_READ(HSW_PWR_WELL_DRIVER);
+				DRM_DEBUG_KMS("Disabling %s\n", power_well->name);
+			}
+
+			if ((GEN9_ENABLE_DC5(dev) || SKL_ENABLE_DC6(dev)) &&
+				power_well->data == SKL_DISP_PW_2) {
+				enum csr_state state;
+				/* TODO: wait for a completion event or
+				 * similar here instead of busy
+				 * waiting using wait_for function.
+				 */
+				wait_for((state = intel_csr_load_status_get(dev_priv)) !=
+						FW_UNINITIALIZED, 1000);
+				if (state != FW_LOADED)
+					DRM_DEBUG("CSR firmware not ready (%d)\n",
+							state);
+				else
+					if (SKL_ENABLE_DC6(dev))
+						skl_enable_dc6(dev_priv);
+					else
+						gen9_enable_dc5(dev_priv);
+			}
+		}
+	}
+
+	if (check_fuse_status) {
+		if (power_well->data == SKL_DISP_PW_1) {
+			if (wait_for((I915_READ(SKL_FUSE_STATUS) &
+				SKL_FUSE_PG1_DIST_STATUS), 1))
+				DRM_ERROR("PG1 distributing status timeout\n");
+		} else if (power_well->data == SKL_DISP_PW_2) {
+			if (wait_for((I915_READ(SKL_FUSE_STATUS) &
+				SKL_FUSE_PG2_DIST_STATUS), 1))
+				DRM_ERROR("PG2 distributing status timeout\n");
+		}
+	}
+
+	if (enable && !is_enabled)
+		skl_power_well_post_enable(dev_priv, power_well);
+}
+
+static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
+				   struct i915_power_well *power_well)
+{
+	hsw_set_power_well(dev_priv, power_well, power_well->count > 0);
+
+	/*
+	 * We're taking over the BIOS, so clear any requests made by it since
+	 * the driver is in charge now.
+	 */
+	if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST)
+		I915_WRITE(HSW_PWR_WELL_BIOS, 0);
+}
+
+static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
+				  struct i915_power_well *power_well)
+{
+	hsw_set_power_well(dev_priv, power_well, true);
+}
+
+static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
+				   struct i915_power_well *power_well)
+{
+	hsw_set_power_well(dev_priv, power_well, false);
+}
+
+static bool skl_power_well_enabled(struct drm_i915_private *dev_priv,
+					struct i915_power_well *power_well)
+{
+	uint32_t mask = SKL_POWER_WELL_REQ(power_well->data) |
+		SKL_POWER_WELL_STATE(power_well->data);
+
+	return (I915_READ(HSW_PWR_WELL_DRIVER) & mask) == mask;
+}
+
+static void skl_power_well_sync_hw(struct drm_i915_private *dev_priv,
+				struct i915_power_well *power_well)
+{
+	skl_set_power_well(dev_priv, power_well, power_well->count > 0);
+
+	/* Clear any request made by BIOS as driver is taking over */
+	I915_WRITE(HSW_PWR_WELL_BIOS, 0);
+}
+
+static void skl_power_well_enable(struct drm_i915_private *dev_priv,
+				struct i915_power_well *power_well)
+{
+	skl_set_power_well(dev_priv, power_well, true);
+}
+
+static void skl_power_well_disable(struct drm_i915_private *dev_priv,
+				struct i915_power_well *power_well)
+{
+	skl_set_power_well(dev_priv, power_well, false);
+}
+
+static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
+					   struct i915_power_well *power_well)
+{
+}
+
+static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
+					     struct i915_power_well *power_well)
+{
+	return true;
+}
+
+static void vlv_set_power_well(struct drm_i915_private *dev_priv,
+			       struct i915_power_well *power_well, bool enable)
+{
+	enum punit_power_well power_well_id = power_well->data;
+	u32 mask;
+	u32 state;
+	u32 ctrl;
+
+	mask = PUNIT_PWRGT_MASK(power_well_id);
+	state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
+			 PUNIT_PWRGT_PWR_GATE(power_well_id);
+
+	mutex_lock(&dev_priv->rps.hw_lock);
+
+#define COND \
+	((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
+
+	if (COND)
+		goto out;
+
+	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
+	ctrl &= ~mask;
+	ctrl |= state;
+	vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
+
+	if (wait_for(COND, 100))
+		DRM_ERROR("timeout setting power well state %08x (%08x)\n",
+			  state,
+			  vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
+
+#undef COND
+
+out:
+	mutex_unlock(&dev_priv->rps.hw_lock);
+}
+
+static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv,
+				   struct i915_power_well *power_well)
+{
+	vlv_set_power_well(dev_priv, power_well, power_well->count > 0);
+}
+
+static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
+				  struct i915_power_well *power_well)
+{
+	vlv_set_power_well(dev_priv, power_well, true);
+}
+
+static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
+				   struct i915_power_well *power_well)
+{
+	vlv_set_power_well(dev_priv, power_well, false);
+}
+
+static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
+				   struct i915_power_well *power_well)
+{
+	int power_well_id = power_well->data;
+	bool enabled = false;
+	u32 mask;
+	u32 state;
+	u32 ctrl;
+
+	mask = PUNIT_PWRGT_MASK(power_well_id);
+	ctrl = PUNIT_PWRGT_PWR_ON(power_well_id);
+
+	mutex_lock(&dev_priv->rps.hw_lock);
+
+	state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
+	/*
+	 * We only ever set the power-on and power-gate states, anything
+	 * else is unexpected.
+	 */
+	WARN_ON(state != PUNIT_PWRGT_PWR_ON(power_well_id) &&
+		state != PUNIT_PWRGT_PWR_GATE(power_well_id));
+	if (state == ctrl)
+		enabled = true;
+
+	/*
+	 * A transient state at this point would mean some unexpected party
+	 * is poking at the power controls too.
+	 */
+	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
+	WARN_ON(ctrl != state);
+
+	mutex_unlock(&dev_priv->rps.hw_lock);
+
+	return enabled;
+}
+
+static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
+{
+	enum pipe pipe;
+
+	/*
+	 * Enable the CRI clock source so we can get at the
+	 * display and the reference clock for VGA
+	 * hotplug / manual detection. Supposedly DSI also
+	 * needs the ref clock up and running.
+	 *
+	 * CHV DPLL B/C have some issues if VGA mode is enabled.
+	 */
+	for_each_pipe(dev_priv->dev, pipe) {
+		u32 val = I915_READ(DPLL(pipe));
+
+		val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
+		if (pipe != PIPE_A)
+			val |= DPLL_INTEGRATED_CRI_CLK_VLV;
+
+		I915_WRITE(DPLL(pipe), val);
+	}
+
+	spin_lock_irq(&dev_priv->irq_lock);
+	valleyview_enable_display_irqs(dev_priv);
+	spin_unlock_irq(&dev_priv->irq_lock);
+
+	/*
+	 * During driver initialization/resume we can avoid restoring the
+	 * part of the HW/SW state that will be inited anyway explicitly.
+	 */
+	if (dev_priv->power_domains.initializing)
+		return;
+
+	intel_hpd_init(dev_priv);
+
+	i915_redisable_vga_power_on(dev_priv->dev);
+}
+
+static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
+{
+	spin_lock_irq(&dev_priv->irq_lock);
+	valleyview_disable_display_irqs(dev_priv);
+	spin_unlock_irq(&dev_priv->irq_lock);
+
+	vlv_power_sequencer_reset(dev_priv);
+}
+
+static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
+					  struct i915_power_well *power_well)
+{
+	WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
+
+	vlv_set_power_well(dev_priv, power_well, true);
+
+	vlv_display_power_well_init(dev_priv);
+}
+
+static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
+					   struct i915_power_well *power_well)
+{
+	WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
+
+	vlv_display_power_well_deinit(dev_priv);
+
+	vlv_set_power_well(dev_priv, power_well, false);
+}
+
+static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
+					   struct i915_power_well *power_well)
+{
+	WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
+
+	/* since ref/cri clock was enabled */
+	udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
+
+	vlv_set_power_well(dev_priv, power_well, true);
+
+	/*
+	 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
+	 *  6.	De-assert cmn_reset/side_reset. Same as VLV X0.
+	 *   a.	GUnit 0x2110 bit[0] set to 1 (def 0)
+	 *   b.	The other bits such as sfr settings / modesel may all
+	 *	be set to 0.
+	 *
+	 * This should only be done on init and resume from S3 with
+	 * both PLLs disabled, or we risk losing DPIO and PLL
+	 * synchronization.
+	 */
+	I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
+}
+
+static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
+					    struct i915_power_well *power_well)
+{
+	enum pipe pipe;
+
+	WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
+
+	for_each_pipe(dev_priv, pipe)
+		assert_pll_disabled(dev_priv, pipe);
+
+	/* Assert common reset */
+	I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
+
+	vlv_set_power_well(dev_priv, power_well, false);
+}
+
+#define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
+
+static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv,
+						 int power_well_id)
+{
+	struct i915_power_domains *power_domains = &dev_priv->power_domains;
+	struct i915_power_well *power_well;
+	int i;
+
+	for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
+		if (power_well->data == power_well_id)
+			return power_well;
+	}
+
+	return NULL;
+}
+
+#define BITS_SET(val, bits) (((val) & (bits)) == (bits))
+
+static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
+{
+	struct i915_power_well *cmn_bc =
+		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
+	struct i915_power_well *cmn_d =
+		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_D);
+	u32 phy_control = dev_priv->chv_phy_control;
+	u32 phy_status = 0;
+	u32 phy_status_mask = 0xffffffff;
+	u32 tmp;
+
+	/*
+	 * The BIOS can leave the PHY is some weird state
+	 * where it doesn't fully power down some parts.
+	 * Disable the asserts until the PHY has been fully
+	 * reset (ie. the power well has been disabled at
+	 * least once).
+	 */
+	if (!dev_priv->chv_phy_assert[DPIO_PHY0])
+		phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) |
+				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) |
+				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) |
+				     PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) |
+				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) |
+				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1));
+
+	if (!dev_priv->chv_phy_assert[DPIO_PHY1])
+		phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) |
+				     PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) |
+				     PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1));
+
+	if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc)) {
+		phy_status |= PHY_POWERGOOD(DPIO_PHY0);
+
+		/* this assumes override is only used to enable lanes */
+		if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0)
+			phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0);
+
+		if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0)
+			phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1);
+
+		/* CL1 is on whenever anything is on in either channel */
+		if (BITS_SET(phy_control,
+			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) |
+			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)))
+			phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0);
+
+		/*
+		 * The DPLLB check accounts for the pipe B + port A usage
+		 * with CL2 powered up but all the lanes in the second channel
+		 * powered down.
+		 */
+		if (BITS_SET(phy_control,
+			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) &&
+		    (I915_READ(DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0)
+			phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1);
+
+		if (BITS_SET(phy_control,
+			     PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0)))
+			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0);
+		if (BITS_SET(phy_control,
+			     PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0)))
+			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1);
+
+		if (BITS_SET(phy_control,
+			     PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1)))
+			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0);
+		if (BITS_SET(phy_control,
+			     PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1)))
+			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1);
+	}
+
+	if (cmn_d->ops->is_enabled(dev_priv, cmn_d)) {
+		phy_status |= PHY_POWERGOOD(DPIO_PHY1);
+
+		/* this assumes override is only used to enable lanes */
+		if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0)
+			phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0);
+
+		if (BITS_SET(phy_control,
+			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0)))
+			phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0);
+
+		if (BITS_SET(phy_control,
+			     PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0)))
+			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0);
+		if (BITS_SET(phy_control,
+			     PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0)))
+			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1);
+	}
+
+	phy_status &= phy_status_mask;
+
+	/*
+	 * The PHY may be busy with some initial calibration and whatnot,
+	 * so the power state can take a while to actually change.
+	 */
+	if (wait_for((tmp = I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask) == phy_status, 10))
+		WARN(phy_status != tmp,
+		     "Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
+		     tmp, phy_status, dev_priv->chv_phy_control);
+}
+
+#undef BITS_SET
+
+static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
+					   struct i915_power_well *power_well)
+{
+	enum dpio_phy phy;
+	enum pipe pipe;
+	uint32_t tmp;
+
+	WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
+		     power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
+
+	if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
+		pipe = PIPE_A;
+		phy = DPIO_PHY0;
+	} else {
+		pipe = PIPE_C;
+		phy = DPIO_PHY1;
+	}
+
+	/* since ref/cri clock was enabled */
+	udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
+	vlv_set_power_well(dev_priv, power_well, true);
+
+	/* Poll for phypwrgood signal */
+	if (wait_for(I915_READ(DISPLAY_PHY_STATUS) & PHY_POWERGOOD(phy), 1))
+		DRM_ERROR("Display PHY %d is not power up\n", phy);
+
+	mutex_lock(&dev_priv->sb_lock);
+
+	/* Enable dynamic power down */
+	tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28);
+	tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN |
+		DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
+	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp);
+
+	if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
+		tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1);
+		tmp |= DPIO_DYNPWRDOWNEN_CH1;
+		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp);
+	} else {
+		/*
+		 * Force the non-existing CL2 off. BXT does this
+		 * too, so maybe it saves some power even though
+		 * CL2 doesn't exist?
+		 */
+		tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
+		tmp |= DPIO_CL2_LDOFUSE_PWRENB;
+		vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp);
+	}
+
+	mutex_unlock(&dev_priv->sb_lock);
+
+	dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
+	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
+
+	DRM_DEBUG_KMS("Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
+		      phy, dev_priv->chv_phy_control);
+
+	assert_chv_phy_status(dev_priv);
+}
+
+static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
+					    struct i915_power_well *power_well)
+{
+	enum dpio_phy phy;
+
+	WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
+		     power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
+
+	if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
+		phy = DPIO_PHY0;
+		assert_pll_disabled(dev_priv, PIPE_A);
+		assert_pll_disabled(dev_priv, PIPE_B);
+	} else {
+		phy = DPIO_PHY1;
+		assert_pll_disabled(dev_priv, PIPE_C);
+	}
+
+	dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
+	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
+
+	vlv_set_power_well(dev_priv, power_well, false);
+
+	DRM_DEBUG_KMS("Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
+		      phy, dev_priv->chv_phy_control);
+
+	/* PHY is fully reset now, so we can enable the PHY state asserts */
+	dev_priv->chv_phy_assert[phy] = true;
+
+	assert_chv_phy_status(dev_priv);
+}
+
+static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy,
+				     enum dpio_channel ch, bool override, unsigned int mask)
+{
+	enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C;
+	u32 reg, val, expected, actual;
+
+	/*
+	 * The BIOS can leave the PHY is some weird state
+	 * where it doesn't fully power down some parts.
+	 * Disable the asserts until the PHY has been fully
+	 * reset (ie. the power well has been disabled at
+	 * least once).
+	 */
+	if (!dev_priv->chv_phy_assert[phy])
+		return;
+
+	if (ch == DPIO_CH0)
+		reg = _CHV_CMN_DW0_CH0;
+	else
+		reg = _CHV_CMN_DW6_CH1;
+
+	mutex_lock(&dev_priv->sb_lock);
+	val = vlv_dpio_read(dev_priv, pipe, reg);
+	mutex_unlock(&dev_priv->sb_lock);
+
+	/*
+	 * This assumes !override is only used when the port is disabled.
+	 * All lanes should power down even without the override when
+	 * the port is disabled.
+	 */
+	if (!override || mask == 0xf) {
+		expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
+		/*
+		 * If CH1 common lane is not active anymore
+		 * (eg. for pipe B DPLL) the entire channel will
+		 * shut down, which causes the common lane registers
+		 * to read as 0. That means we can't actually check
+		 * the lane power down status bits, but as the entire
+		 * register reads as 0 it's a good indication that the
+		 * channel is indeed entirely powered down.
+		 */
+		if (ch == DPIO_CH1 && val == 0)
+			expected = 0;
+	} else if (mask != 0x0) {
+		expected = DPIO_ANYDL_POWERDOWN;
+	} else {
+		expected = 0;
+	}
+
+	if (ch == DPIO_CH0)
+		actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0;
+	else
+		actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1;
+	actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
+
+	WARN(actual != expected,
+	     "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
+	     !!(actual & DPIO_ALLDL_POWERDOWN), !!(actual & DPIO_ANYDL_POWERDOWN),
+	     !!(expected & DPIO_ALLDL_POWERDOWN), !!(expected & DPIO_ANYDL_POWERDOWN),
+	     reg, val);
+}
+
+bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
+			  enum dpio_channel ch, bool override)
+{
+	struct i915_power_domains *power_domains = &dev_priv->power_domains;
+	bool was_override;
+
+	mutex_lock(&power_domains->lock);
+
+	was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
+
+	if (override == was_override)
+		goto out;
+
+	if (override)
+		dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
+	else
+		dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
+
+	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
+
+	DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",
+		      phy, ch, dev_priv->chv_phy_control);
+
+	assert_chv_phy_status(dev_priv);
+
+out:
+	mutex_unlock(&power_domains->lock);
+
+	return was_override;
+}
+
+void chv_phy_powergate_lanes(struct intel_encoder *encoder,
+			     bool override, unsigned int mask)
+{
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct i915_power_domains *power_domains = &dev_priv->power_domains;
+	enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(&encoder->base));
+	enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
+
+	mutex_lock(&power_domains->lock);
+
+	dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch);
+	dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch);
+
+	if (override)
+		dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
+	else
+		dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
+
+	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
+
+	DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",
+		      phy, ch, mask, dev_priv->chv_phy_control);
+
+	assert_chv_phy_status(dev_priv);
+
+	assert_chv_phy_powergate(dev_priv, phy, ch, override, mask);
+
+	mutex_unlock(&power_domains->lock);
+}
+
+static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
+					struct i915_power_well *power_well)
+{
+	enum pipe pipe = power_well->data;
+	bool enabled;
+	u32 state, ctrl;
+
+	mutex_lock(&dev_priv->rps.hw_lock);
+
+	state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe);
+	/*
+	 * We only ever set the power-on and power-gate states, anything
+	 * else is unexpected.
+	 */
+	WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe));
+	enabled = state == DP_SSS_PWR_ON(pipe);
+
+	/*
+	 * A transient state at this point would mean some unexpected party
+	 * is poking at the power controls too.
+	 */
+	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe);
+	WARN_ON(ctrl << 16 != state);
+
+	mutex_unlock(&dev_priv->rps.hw_lock);
+
+	return enabled;
+}
+
+static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
+				    struct i915_power_well *power_well,
+				    bool enable)
+{
+	enum pipe pipe = power_well->data;
+	u32 state;
+	u32 ctrl;
+
+	state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
+
+	mutex_lock(&dev_priv->rps.hw_lock);
+
+#define COND \
+	((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state)
+
+	if (COND)
+		goto out;
+
+	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
+	ctrl &= ~DP_SSC_MASK(pipe);
+	ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
+	vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, ctrl);
+
+	if (wait_for(COND, 100))
+		DRM_ERROR("timeout setting power well state %08x (%08x)\n",
+			  state,
+			  vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ));
+
+#undef COND
+
+out:
+	mutex_unlock(&dev_priv->rps.hw_lock);
+}
+
+static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
+					struct i915_power_well *power_well)
+{
+	WARN_ON_ONCE(power_well->data != PIPE_A);
+
+	chv_set_pipe_power_well(dev_priv, power_well, power_well->count > 0);
+}
+
+static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
+				       struct i915_power_well *power_well)
+{
+	WARN_ON_ONCE(power_well->data != PIPE_A);
+
+	chv_set_pipe_power_well(dev_priv, power_well, true);
+
+	vlv_display_power_well_init(dev_priv);
+}
+
+static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
+					struct i915_power_well *power_well)
+{
+	WARN_ON_ONCE(power_well->data != PIPE_A);
+
+	vlv_display_power_well_deinit(dev_priv);
+
+	chv_set_pipe_power_well(dev_priv, power_well, false);
+}
+
+/**
+ * intel_display_power_get - grab a power domain reference
+ * @dev_priv: i915 device instance
+ * @domain: power domain to reference
+ *
+ * This function grabs a power domain reference for @domain and ensures that the
+ * power domain and all its parents are powered up. Therefore users should only
+ * grab a reference to the innermost power domain they need.
+ *
+ * Any power domain reference obtained by this function must have a symmetric
+ * call to intel_display_power_put() to release the reference again.
+ */
+void intel_display_power_get(struct drm_i915_private *dev_priv,
+			     enum intel_display_power_domain domain)
+{
+	struct i915_power_domains *power_domains;
+	struct i915_power_well *power_well;
+	int i;
+
+	intel_runtime_pm_get(dev_priv);
+
+	power_domains = &dev_priv->power_domains;
+
+	mutex_lock(&power_domains->lock);
+
+	for_each_power_well(i, power_well, BIT(domain), power_domains) {
+		if (!power_well->count++)
+			intel_power_well_enable(dev_priv, power_well);
+	}
+
+	power_domains->domain_use_count[domain]++;
+
+	mutex_unlock(&power_domains->lock);
+}
+
+/**
+ * intel_display_power_put - release a power domain reference
+ * @dev_priv: i915 device instance
+ * @domain: power domain to reference
+ *
+ * This function drops the power domain reference obtained by
+ * intel_display_power_get() and might power down the corresponding hardware
+ * block right away if this is the last reference.
+ */
+void intel_display_power_put(struct drm_i915_private *dev_priv,
+			     enum intel_display_power_domain domain)
+{
+	struct i915_power_domains *power_domains;
+	struct i915_power_well *power_well;
+	int i;
+
+	power_domains = &dev_priv->power_domains;
+
+	mutex_lock(&power_domains->lock);
+
+	WARN_ON(!power_domains->domain_use_count[domain]);
+	power_domains->domain_use_count[domain]--;
+
+	for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
+		WARN_ON(!power_well->count);
+
+		if (!--power_well->count && i915.disable_power_well)
+			intel_power_well_disable(dev_priv, power_well);
+	}
+
+	mutex_unlock(&power_domains->lock);
+
+	intel_runtime_pm_put(dev_priv);
+}
+
+#define HSW_ALWAYS_ON_POWER_DOMAINS (			\
+	BIT(POWER_DOMAIN_PIPE_A) |			\
+	BIT(POWER_DOMAIN_TRANSCODER_EDP) |		\
+	BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) |		\
+	BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) |		\
+	BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) |		\
+	BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |		\
+	BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) |		\
+	BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |		\
+	BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) |		\
+	BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) |		\
+	BIT(POWER_DOMAIN_PORT_CRT) |			\
+	BIT(POWER_DOMAIN_PLLS) |			\
+	BIT(POWER_DOMAIN_AUX_A) |			\
+	BIT(POWER_DOMAIN_AUX_B) |			\
+	BIT(POWER_DOMAIN_AUX_C) |			\
+	BIT(POWER_DOMAIN_AUX_D) |			\
+	BIT(POWER_DOMAIN_GMBUS) |			\
+	BIT(POWER_DOMAIN_INIT))
+#define HSW_DISPLAY_POWER_DOMAINS (				\
+	(POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) |	\
+	BIT(POWER_DOMAIN_INIT))
+
+#define BDW_ALWAYS_ON_POWER_DOMAINS (			\
+	HSW_ALWAYS_ON_POWER_DOMAINS |			\
+	BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER))
+#define BDW_DISPLAY_POWER_DOMAINS (				\
+	(POWER_DOMAIN_MASK & ~BDW_ALWAYS_ON_POWER_DOMAINS) |	\
+	BIT(POWER_DOMAIN_INIT))
+
+#define VLV_ALWAYS_ON_POWER_DOMAINS	BIT(POWER_DOMAIN_INIT)
+#define VLV_DISPLAY_POWER_DOMAINS	POWER_DOMAIN_MASK
+
+#define VLV_DPIO_CMN_BC_POWER_DOMAINS (		\
+	BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) |	\
+	BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |	\
+	BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) |	\
+	BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |	\
+	BIT(POWER_DOMAIN_PORT_CRT) |		\
+	BIT(POWER_DOMAIN_AUX_B) |		\
+	BIT(POWER_DOMAIN_AUX_C) |		\
+	BIT(POWER_DOMAIN_INIT))
+
+#define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS (	\
+	BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) |	\
+	BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |	\
+	BIT(POWER_DOMAIN_AUX_B) |		\
+	BIT(POWER_DOMAIN_INIT))
+
+#define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS (	\
+	BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |	\
+	BIT(POWER_DOMAIN_AUX_B) |		\
+	BIT(POWER_DOMAIN_INIT))
+
+#define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS (	\
+	BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) |	\
+	BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |	\
+	BIT(POWER_DOMAIN_AUX_C) |		\
+	BIT(POWER_DOMAIN_INIT))
+
+#define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS (	\
+	BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |	\
+	BIT(POWER_DOMAIN_AUX_C) |		\
+	BIT(POWER_DOMAIN_INIT))
+
+#define CHV_DPIO_CMN_BC_POWER_DOMAINS (		\
+	BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) |	\
+	BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |	\
+	BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) |	\
+	BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |	\
+	BIT(POWER_DOMAIN_AUX_B) |		\
+	BIT(POWER_DOMAIN_AUX_C) |		\
+	BIT(POWER_DOMAIN_INIT))
+
+#define CHV_DPIO_CMN_D_POWER_DOMAINS (		\
+	BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) |	\
+	BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) |	\
+	BIT(POWER_DOMAIN_AUX_D) |		\
+	BIT(POWER_DOMAIN_INIT))
+
+static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
+	.sync_hw = i9xx_always_on_power_well_noop,
+	.enable = i9xx_always_on_power_well_noop,
+	.disable = i9xx_always_on_power_well_noop,
+	.is_enabled = i9xx_always_on_power_well_enabled,
+};
+
+static const struct i915_power_well_ops chv_pipe_power_well_ops = {
+	.sync_hw = chv_pipe_power_well_sync_hw,
+	.enable = chv_pipe_power_well_enable,
+	.disable = chv_pipe_power_well_disable,
+	.is_enabled = chv_pipe_power_well_enabled,
+};
+
+static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
+	.sync_hw = vlv_power_well_sync_hw,
+	.enable = chv_dpio_cmn_power_well_enable,
+	.disable = chv_dpio_cmn_power_well_disable,
+	.is_enabled = vlv_power_well_enabled,
+};
+
+static struct i915_power_well i9xx_always_on_power_well[] = {
+	{
+		.name = "always-on",
+		.always_on = 1,
+		.domains = POWER_DOMAIN_MASK,
+		.ops = &i9xx_always_on_power_well_ops,
+	},
+};
+
+static const struct i915_power_well_ops hsw_power_well_ops = {
+	.sync_hw = hsw_power_well_sync_hw,
+	.enable = hsw_power_well_enable,
+	.disable = hsw_power_well_disable,
+	.is_enabled = hsw_power_well_enabled,
+};
+
+static const struct i915_power_well_ops skl_power_well_ops = {
+	.sync_hw = skl_power_well_sync_hw,
+	.enable = skl_power_well_enable,
+	.disable = skl_power_well_disable,
+	.is_enabled = skl_power_well_enabled,
+};
+
+static struct i915_power_well hsw_power_wells[] = {
+	{
+		.name = "always-on",
+		.always_on = 1,
+		.domains = HSW_ALWAYS_ON_POWER_DOMAINS,
+		.ops = &i9xx_always_on_power_well_ops,
+	},
+	{
+		.name = "display",
+		.domains = HSW_DISPLAY_POWER_DOMAINS,
+		.ops = &hsw_power_well_ops,
+	},
+};
+
+static struct i915_power_well bdw_power_wells[] = {
+	{
+		.name = "always-on",
+		.always_on = 1,
+		.domains = BDW_ALWAYS_ON_POWER_DOMAINS,
+		.ops = &i9xx_always_on_power_well_ops,
+	},
+	{
+		.name = "display",
+		.domains = BDW_DISPLAY_POWER_DOMAINS,
+		.ops = &hsw_power_well_ops,
+	},
+};
+
+static const struct i915_power_well_ops vlv_display_power_well_ops = {
+	.sync_hw = vlv_power_well_sync_hw,
+	.enable = vlv_display_power_well_enable,
+	.disable = vlv_display_power_well_disable,
+	.is_enabled = vlv_power_well_enabled,
+};
+
+static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
+	.sync_hw = vlv_power_well_sync_hw,
+	.enable = vlv_dpio_cmn_power_well_enable,
+	.disable = vlv_dpio_cmn_power_well_disable,
+	.is_enabled = vlv_power_well_enabled,
+};
+
+static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
+	.sync_hw = vlv_power_well_sync_hw,
+	.enable = vlv_power_well_enable,
+	.disable = vlv_power_well_disable,
+	.is_enabled = vlv_power_well_enabled,
+};
+
+static struct i915_power_well vlv_power_wells[] = {
+	{
+		.name = "always-on",
+		.always_on = 1,
+		.domains = VLV_ALWAYS_ON_POWER_DOMAINS,
+		.ops = &i9xx_always_on_power_well_ops,
+	},
+	{
+		.name = "display",
+		.domains = VLV_DISPLAY_POWER_DOMAINS,
+		.data = PUNIT_POWER_WELL_DISP2D,
+		.ops = &vlv_display_power_well_ops,
+	},
+	{
+		.name = "dpio-tx-b-01",
+		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
+			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
+			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
+			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
+		.ops = &vlv_dpio_power_well_ops,
+		.data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
+	},
+	{
+		.name = "dpio-tx-b-23",
+		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
+			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
+			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
+			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
+		.ops = &vlv_dpio_power_well_ops,
+		.data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
+	},
+	{
+		.name = "dpio-tx-c-01",
+		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
+			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
+			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
+			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
+		.ops = &vlv_dpio_power_well_ops,
+		.data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
+	},
+	{
+		.name = "dpio-tx-c-23",
+		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
+			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
+			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
+			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
+		.ops = &vlv_dpio_power_well_ops,
+		.data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
+	},
+	{
+		.name = "dpio-common",
+		.domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
+		.data = PUNIT_POWER_WELL_DPIO_CMN_BC,
+		.ops = &vlv_dpio_cmn_power_well_ops,
+	},
+};
+
+static struct i915_power_well chv_power_wells[] = {
+	{
+		.name = "always-on",
+		.always_on = 1,
+		.domains = VLV_ALWAYS_ON_POWER_DOMAINS,
+		.ops = &i9xx_always_on_power_well_ops,
+	},
+	{
+		.name = "display",
+		/*
+		 * Pipe A power well is the new disp2d well. Pipe B and C
+		 * power wells don't actually exist. Pipe A power well is
+		 * required for any pipe to work.
+		 */
+		.domains = VLV_DISPLAY_POWER_DOMAINS,
+		.data = PIPE_A,
+		.ops = &chv_pipe_power_well_ops,
+	},
+	{
+		.name = "dpio-common-bc",
+		.domains = CHV_DPIO_CMN_BC_POWER_DOMAINS,
+		.data = PUNIT_POWER_WELL_DPIO_CMN_BC,
+		.ops = &chv_dpio_cmn_power_well_ops,
+	},
+	{
+		.name = "dpio-common-d",
+		.domains = CHV_DPIO_CMN_D_POWER_DOMAINS,
+		.data = PUNIT_POWER_WELL_DPIO_CMN_D,
+		.ops = &chv_dpio_cmn_power_well_ops,
+	},
+};
+
+bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
+				    int power_well_id)
+{
+	struct i915_power_well *power_well;
+	bool ret;
+
+	power_well = lookup_power_well(dev_priv, power_well_id);
+	ret = power_well->ops->is_enabled(dev_priv, power_well);
+
+	return ret;
+}
+
+static struct i915_power_well skl_power_wells[] = {
+	{
+		.name = "always-on",
+		.always_on = 1,
+		.domains = SKL_DISPLAY_ALWAYS_ON_POWER_DOMAINS,
+		.ops = &i9xx_always_on_power_well_ops,
+	},
+	{
+		.name = "power well 1",
+		.domains = SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS,
+		.ops = &skl_power_well_ops,
+		.data = SKL_DISP_PW_1,
+	},
+	{
+		.name = "MISC IO power well",
+		.domains = SKL_DISPLAY_MISC_IO_POWER_DOMAINS,
+		.ops = &skl_power_well_ops,
+		.data = SKL_DISP_PW_MISC_IO,
+	},
+	{
+		.name = "power well 2",
+		.domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
+		.ops = &skl_power_well_ops,
+		.data = SKL_DISP_PW_2,
+	},
+	{
+		.name = "DDI A/E power well",
+		.domains = SKL_DISPLAY_DDI_A_E_POWER_DOMAINS,
+		.ops = &skl_power_well_ops,
+		.data = SKL_DISP_PW_DDI_A_E,
+	},
+	{
+		.name = "DDI B power well",
+		.domains = SKL_DISPLAY_DDI_B_POWER_DOMAINS,
+		.ops = &skl_power_well_ops,
+		.data = SKL_DISP_PW_DDI_B,
+	},
+	{
+		.name = "DDI C power well",
+		.domains = SKL_DISPLAY_DDI_C_POWER_DOMAINS,
+		.ops = &skl_power_well_ops,
+		.data = SKL_DISP_PW_DDI_C,
+	},
+	{
+		.name = "DDI D power well",
+		.domains = SKL_DISPLAY_DDI_D_POWER_DOMAINS,
+		.ops = &skl_power_well_ops,
+		.data = SKL_DISP_PW_DDI_D,
+	},
+};
+
+static struct i915_power_well bxt_power_wells[] = {
+	{
+		.name = "always-on",
+		.always_on = 1,
+		.domains = BXT_DISPLAY_ALWAYS_ON_POWER_DOMAINS,
+		.ops = &i9xx_always_on_power_well_ops,
+	},
+	{
+		.name = "power well 1",
+		.domains = BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS,
+		.ops = &skl_power_well_ops,
+		.data = SKL_DISP_PW_1,
+	},
+	{
+		.name = "power well 2",
+		.domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS,
+		.ops = &skl_power_well_ops,
+		.data = SKL_DISP_PW_2,
+	}
+};
+
+static int
+sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
+				   int disable_power_well)
+{
+	if (disable_power_well >= 0)
+		return !!disable_power_well;
+
+	if (IS_SKYLAKE(dev_priv)) {
+		DRM_DEBUG_KMS("Disabling display power well support\n");
+		return 0;
+	}
+
+	return 1;
+}
+
+#define set_power_wells(power_domains, __power_wells) ({		\
+	(power_domains)->power_wells = (__power_wells);			\
+	(power_domains)->power_well_count = ARRAY_SIZE(__power_wells);	\
+})
+
+/**
+ * intel_power_domains_init - initializes the power domain structures
+ * @dev_priv: i915 device instance
+ *
+ * Initializes the power domain structures for @dev_priv depending upon the
+ * supported platform.
+ */
+int intel_power_domains_init(struct drm_i915_private *dev_priv)
+{
+	struct i915_power_domains *power_domains = &dev_priv->power_domains;
+
+	i915.disable_power_well = sanitize_disable_power_well_option(dev_priv,
+						     i915.disable_power_well);
+
+	BUILD_BUG_ON(POWER_DOMAIN_NUM > 31);
+
+	mutex_init(&power_domains->lock);
+
+	/*
+	 * The enabling order will be from lower to higher indexed wells,
+	 * the disabling order is reversed.
+	 */
+	if (IS_HASWELL(dev_priv->dev)) {
+		set_power_wells(power_domains, hsw_power_wells);
+	} else if (IS_BROADWELL(dev_priv->dev)) {
+		set_power_wells(power_domains, bdw_power_wells);
+	} else if (IS_SKYLAKE(dev_priv->dev)) {
+		set_power_wells(power_domains, skl_power_wells);
+	} else if (IS_BROXTON(dev_priv->dev)) {
+		set_power_wells(power_domains, bxt_power_wells);
+	} else if (IS_CHERRYVIEW(dev_priv->dev)) {
+		set_power_wells(power_domains, chv_power_wells);
+	} else if (IS_VALLEYVIEW(dev_priv->dev)) {
+		set_power_wells(power_domains, vlv_power_wells);
+	} else {
+		set_power_wells(power_domains, i9xx_always_on_power_well);
+	}
+
+	return 0;
+}
+
+static void intel_runtime_pm_disable(struct drm_i915_private *dev_priv)
+{
+	struct drm_device *dev = dev_priv->dev;
+	struct device *device = &dev->pdev->dev;
+
+	if (!HAS_RUNTIME_PM(dev))
+		return;
+
+	if (!intel_enable_rc6(dev))
+		return;
+
+	/* Make sure we're not suspended first. */
+	pm_runtime_get_sync(device);
+}
+
+/**
+ * intel_power_domains_fini - finalizes the power domain structures
+ * @dev_priv: i915 device instance
+ *
+ * Finalizes the power domain structures for @dev_priv depending upon the
+ * supported platform. This function also disables runtime pm and ensures that
+ * the device stays powered up so that the driver can be reloaded.
+ */
+void intel_power_domains_fini(struct drm_i915_private *dev_priv)
+{
+	intel_runtime_pm_disable(dev_priv);
+
+	/* The i915.ko module is still not prepared to be loaded when
+	 * the power well is not enabled, so just enable it in case
+	 * we're going to unload/reload. */
+	intel_display_set_init_power(dev_priv, true);
+}
+
+static void intel_power_domains_resume(struct drm_i915_private *dev_priv)
+{
+	struct i915_power_domains *power_domains = &dev_priv->power_domains;
+	struct i915_power_well *power_well;
+	int i;
+
+	mutex_lock(&power_domains->lock);
+	for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
+		power_well->ops->sync_hw(dev_priv, power_well);
+		power_well->hw_enabled = power_well->ops->is_enabled(dev_priv,
+								     power_well);
+	}
+	mutex_unlock(&power_domains->lock);
+}
+
+static void chv_phy_control_init(struct drm_i915_private *dev_priv)
+{
+	struct i915_power_well *cmn_bc =
+		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
+	struct i915_power_well *cmn_d =
+		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_D);
+
+	/*
+	 * DISPLAY_PHY_CONTROL can get corrupted if read. As a
+	 * workaround never ever read DISPLAY_PHY_CONTROL, and
+	 * instead maintain a shadow copy ourselves. Use the actual
+	 * power well state and lane status to reconstruct the
+	 * expected initial value.
+	 */
+	dev_priv->chv_phy_control =
+		PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) |
+		PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) |
+		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) |
+		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) |
+		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0);
+
+	/*
+	 * If all lanes are disabled we leave the override disabled
+	 * with all power down bits cleared to match the state we
+	 * would use after disabling the port. Otherwise enable the
+	 * override and set the lane powerdown bits accding to the
+	 * current lane status.
+	 */
+	if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc)) {
+		uint32_t status = I915_READ(DPLL(PIPE_A));
+		unsigned int mask;
+
+		mask = status & DPLL_PORTB_READY_MASK;
+		if (mask == 0xf)
+			mask = 0x0;
+		else
+			dev_priv->chv_phy_control |=
+				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0);
+
+		dev_priv->chv_phy_control |=
+			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0);
+
+		mask = (status & DPLL_PORTC_READY_MASK) >> 4;
+		if (mask == 0xf)
+			mask = 0x0;
+		else
+			dev_priv->chv_phy_control |=
+				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1);
+
+		dev_priv->chv_phy_control |=
+			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1);
+
+		dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
+
+		dev_priv->chv_phy_assert[DPIO_PHY0] = false;
+	} else {
+		dev_priv->chv_phy_assert[DPIO_PHY0] = true;
+	}
+
+	if (cmn_d->ops->is_enabled(dev_priv, cmn_d)) {
+		uint32_t status = I915_READ(DPIO_PHY_STATUS);
+		unsigned int mask;
+
+		mask = status & DPLL_PORTD_READY_MASK;
+
+		if (mask == 0xf)
+			mask = 0x0;
+		else
+			dev_priv->chv_phy_control |=
+				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0);
+
+		dev_priv->chv_phy_control |=
+			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0);
+
+		dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
+
+		dev_priv->chv_phy_assert[DPIO_PHY1] = false;
+	} else {
+		dev_priv->chv_phy_assert[DPIO_PHY1] = true;
+	}
+
+	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
+
+	DRM_DEBUG_KMS("Initial PHY_CONTROL=0x%08x\n",
+		      dev_priv->chv_phy_control);
+}
+
+static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
+{
+	struct i915_power_well *cmn =
+		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
+	struct i915_power_well *disp2d =
+		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DISP2D);
+
+	/* If the display might be already active skip this */
+	if (cmn->ops->is_enabled(dev_priv, cmn) &&
+	    disp2d->ops->is_enabled(dev_priv, disp2d) &&
+	    I915_READ(DPIO_CTL) & DPIO_CMNRST)
+		return;
+
+	DRM_DEBUG_KMS("toggling display PHY side reset\n");
+
+	/* cmnlane needs DPLL registers */
+	disp2d->ops->enable(dev_priv, disp2d);
+
+	/*
+	 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
+	 * Need to assert and de-assert PHY SB reset by gating the
+	 * common lane power, then un-gating it.
+	 * Simply ungating isn't enough to reset the PHY enough to get
+	 * ports and lanes running.
+	 */
+	cmn->ops->disable(dev_priv, cmn);
+}
+
+/**
+ * intel_power_domains_init_hw - initialize hardware power domain state
+ * @dev_priv: i915 device instance
+ *
+ * This function initializes the hardware power domain state and enables all
+ * power domains using intel_display_set_init_power().
+ */
+void intel_power_domains_init_hw(struct drm_i915_private *dev_priv)
+{
+	struct drm_device *dev = dev_priv->dev;
+	struct i915_power_domains *power_domains = &dev_priv->power_domains;
+
+	power_domains->initializing = true;
+
+	if (IS_CHERRYVIEW(dev)) {
+		mutex_lock(&power_domains->lock);
+		chv_phy_control_init(dev_priv);
+		mutex_unlock(&power_domains->lock);
+	} else if (IS_VALLEYVIEW(dev)) {
+		mutex_lock(&power_domains->lock);
+		vlv_cmnlane_wa(dev_priv);
+		mutex_unlock(&power_domains->lock);
+	}
+
+	/* For now, we need the power well to be always enabled. */
+	intel_display_set_init_power(dev_priv, true);
+	intel_power_domains_resume(dev_priv);
+	power_domains->initializing = false;
+}
+
+/**
+ * intel_runtime_pm_get - grab a runtime pm reference
+ * @dev_priv: i915 device instance
+ *
+ * This function grabs a device-level runtime pm reference (mostly used for GEM
+ * code to ensure the GTT or GT is on) and ensures that it is powered up.
+ *
+ * Any runtime pm reference obtained by this function must have a symmetric
+ * call to intel_runtime_pm_put() to release the reference again.
+ */
+void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
+{
+	struct drm_device *dev = dev_priv->dev;
+	struct device *device = &dev->pdev->dev;
+
+	if (!HAS_RUNTIME_PM(dev))
+		return;
+
+	pm_runtime_get_sync(device);
+	WARN(dev_priv->pm.suspended, "Device still suspended.\n");
+}
+
+/**
+ * intel_runtime_pm_get_noresume - grab a runtime pm reference
+ * @dev_priv: i915 device instance
+ *
+ * This function grabs a device-level runtime pm reference (mostly used for GEM
+ * code to ensure the GTT or GT is on).
+ *
+ * It will _not_ power up the device but instead only check that it's powered
+ * on.  Therefore it is only valid to call this functions from contexts where
+ * the device is known to be powered up and where trying to power it up would
+ * result in hilarity and deadlocks. That pretty much means only the system
+ * suspend/resume code where this is used to grab runtime pm references for
+ * delayed setup down in work items.
+ *
+ * Any runtime pm reference obtained by this function must have a symmetric
+ * call to intel_runtime_pm_put() to release the reference again.
+ */
+void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
+{
+	struct drm_device *dev = dev_priv->dev;
+	struct device *device = &dev->pdev->dev;
+
+	if (!HAS_RUNTIME_PM(dev))
+		return;
+
+	WARN(dev_priv->pm.suspended, "Getting nosync-ref while suspended.\n");
+	pm_runtime_get_noresume(device);
+}
+
+/**
+ * intel_runtime_pm_put - release a runtime pm reference
+ * @dev_priv: i915 device instance
+ *
+ * This function drops the device-level runtime pm reference obtained by
+ * intel_runtime_pm_get() and might power down the corresponding
+ * hardware block right away if this is the last reference.
+ */
+void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
+{
+	struct drm_device *dev = dev_priv->dev;
+	struct device *device = &dev->pdev->dev;
+
+	if (!HAS_RUNTIME_PM(dev))
+		return;
+
+	pm_runtime_mark_last_busy(device);
+	pm_runtime_put_autosuspend(device);
+}
+
+/**
+ * intel_runtime_pm_enable - enable runtime pm
+ * @dev_priv: i915 device instance
+ *
+ * This function enables runtime pm at the end of the driver load sequence.
+ *
+ * Note that this function does currently not enable runtime pm for the
+ * subordinate display power domains. That is only done on the first modeset
+ * using intel_display_set_init_power().
+ */
+void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
+{
+	struct drm_device *dev = dev_priv->dev;
+	struct device *device = &dev->pdev->dev;
+
+	if (!HAS_RUNTIME_PM(dev))
+		return;
+
+	/*
+	 * RPM depends on RC6 to save restore the GT HW context, so make RC6 a
+	 * requirement.
+	 */
+	if (!intel_enable_rc6(dev)) {
+		DRM_INFO("RC6 disabled, disabling runtime PM support\n");
+		return;
+	}
+
+	pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */
+	pm_runtime_mark_last_busy(device);
+	pm_runtime_use_autosuspend(device);
+
+	pm_runtime_put_autosuspend(device);
+}
+
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
new file mode 100644
index 0000000..c42b636
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -0,0 +1,3057 @@
+/*
+ * Copyright 2006 Dave Airlie <airlied@linux.ie>
+ * Copyright © 2006-2007 Intel Corporation
+ *   Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *	Eric Anholt <eric@anholt.net>
+ */
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/export.h>
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
+#include "intel_drv.h"
+#include <drm/i915_drm.h>
+#include "i915_drv.h"
+#include "intel_sdvo_regs.h"
+
+#define SDVO_TMDS_MASK (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)
+#define SDVO_RGB_MASK  (SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1)
+#define SDVO_LVDS_MASK (SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1)
+#define SDVO_TV_MASK   (SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_YPRPB0)
+
+#define SDVO_OUTPUT_MASK (SDVO_TMDS_MASK | SDVO_RGB_MASK | SDVO_LVDS_MASK |\
+			SDVO_TV_MASK)
+
+#define IS_TV(c)	(c->output_flag & SDVO_TV_MASK)
+#define IS_TMDS(c)	(c->output_flag & SDVO_TMDS_MASK)
+#define IS_LVDS(c)	(c->output_flag & SDVO_LVDS_MASK)
+#define IS_TV_OR_LVDS(c) (c->output_flag & (SDVO_TV_MASK | SDVO_LVDS_MASK))
+#define IS_DIGITAL(c) (c->output_flag & (SDVO_TMDS_MASK | SDVO_LVDS_MASK))
+
+
+static const char * const tv_format_names[] = {
+	"NTSC_M"   , "NTSC_J"  , "NTSC_443",
+	"PAL_B"    , "PAL_D"   , "PAL_G"   ,
+	"PAL_H"    , "PAL_I"   , "PAL_M"   ,
+	"PAL_N"    , "PAL_NC"  , "PAL_60"  ,
+	"SECAM_B"  , "SECAM_D" , "SECAM_G" ,
+	"SECAM_K"  , "SECAM_K1", "SECAM_L" ,
+	"SECAM_60"
+};
+
+#define TV_FORMAT_NUM  ARRAY_SIZE(tv_format_names)
+
+struct intel_sdvo {
+	struct intel_encoder base;
+
+	struct i2c_adapter *i2c;
+	u8 slave_addr;
+
+	struct i2c_adapter ddc;
+
+	/* Register for the SDVO device: SDVOB or SDVOC */
+	uint32_t sdvo_reg;
+
+	/* Active outputs controlled by this SDVO output */
+	uint16_t controlled_output;
+
+	/*
+	 * Capabilities of the SDVO device returned by
+	 * intel_sdvo_get_capabilities()
+	 */
+	struct intel_sdvo_caps caps;
+
+	/* Pixel clock limitations reported by the SDVO device, in kHz */
+	int pixel_clock_min, pixel_clock_max;
+
+	/*
+	* For multiple function SDVO device,
+	* this is for current attached outputs.
+	*/
+	uint16_t attached_output;
+
+	/*
+	 * Hotplug activation bits for this device
+	 */
+	uint16_t hotplug_active;
+
+	/**
+	 * This is used to select the color range of RBG outputs in HDMI mode.
+	 * It is only valid when using TMDS encoding and 8 bit per color mode.
+	 */
+	uint32_t color_range;
+	bool color_range_auto;
+
+	/**
+	 * HDMI user specified aspect ratio
+	 */
+	enum hdmi_picture_aspect aspect_ratio;
+
+	/**
+	 * This is set if we're going to treat the device as TV-out.
+	 *
+	 * While we have these nice friendly flags for output types that ought
+	 * to decide this for us, the S-Video output on our HDMI+S-Video card
+	 * shows up as RGB1 (VGA).
+	 */
+	bool is_tv;
+
+	/* On different gens SDVOB is at different places. */
+	bool is_sdvob;
+
+	/* This is for current tv format name */
+	int tv_format_index;
+
+	/**
+	 * This is set if we treat the device as HDMI, instead of DVI.
+	 */
+	bool is_hdmi;
+	bool has_hdmi_monitor;
+	bool has_hdmi_audio;
+	bool rgb_quant_range_selectable;
+
+	/**
+	 * This is set if we detect output of sdvo device as LVDS and
+	 * have a valid fixed mode to use with the panel.
+	 */
+	bool is_lvds;
+
+	/**
+	 * This is sdvo fixed pannel mode pointer
+	 */
+	struct drm_display_mode *sdvo_lvds_fixed_mode;
+
+	/* DDC bus used by this SDVO encoder */
+	uint8_t ddc_bus;
+
+	/*
+	 * the sdvo flag gets lost in round trip: dtd->adjusted_mode->dtd
+	 */
+	uint8_t dtd_sdvo_flags;
+};
+
+struct intel_sdvo_connector {
+	struct intel_connector base;
+
+	/* Mark the type of connector */
+	uint16_t output_flag;
+
+	enum hdmi_force_audio force_audio;
+
+	/* This contains all current supported TV format */
+	u8 tv_format_supported[TV_FORMAT_NUM];
+	int   format_supported_num;
+	struct drm_property *tv_format;
+
+	/* add the property for the SDVO-TV */
+	struct drm_property *left;
+	struct drm_property *right;
+	struct drm_property *top;
+	struct drm_property *bottom;
+	struct drm_property *hpos;
+	struct drm_property *vpos;
+	struct drm_property *contrast;
+	struct drm_property *saturation;
+	struct drm_property *hue;
+	struct drm_property *sharpness;
+	struct drm_property *flicker_filter;
+	struct drm_property *flicker_filter_adaptive;
+	struct drm_property *flicker_filter_2d;
+	struct drm_property *tv_chroma_filter;
+	struct drm_property *tv_luma_filter;
+	struct drm_property *dot_crawl;
+
+	/* add the property for the SDVO-TV/LVDS */
+	struct drm_property *brightness;
+
+	/* Add variable to record current setting for the above property */
+	u32	left_margin, right_margin, top_margin, bottom_margin;
+
+	/* this is to get the range of margin.*/
+	u32	max_hscan,  max_vscan;
+	u32	max_hpos, cur_hpos;
+	u32	max_vpos, cur_vpos;
+	u32	cur_brightness, max_brightness;
+	u32	cur_contrast,	max_contrast;
+	u32	cur_saturation, max_saturation;
+	u32	cur_hue,	max_hue;
+	u32	cur_sharpness,	max_sharpness;
+	u32	cur_flicker_filter,		max_flicker_filter;
+	u32	cur_flicker_filter_adaptive,	max_flicker_filter_adaptive;
+	u32	cur_flicker_filter_2d,		max_flicker_filter_2d;
+	u32	cur_tv_chroma_filter,	max_tv_chroma_filter;
+	u32	cur_tv_luma_filter,	max_tv_luma_filter;
+	u32	cur_dot_crawl,	max_dot_crawl;
+};
+
+static struct intel_sdvo *to_sdvo(struct intel_encoder *encoder)
+{
+	return container_of(encoder, struct intel_sdvo, base);
+}
+
+static struct intel_sdvo *intel_attached_sdvo(struct drm_connector *connector)
+{
+	return to_sdvo(intel_attached_encoder(connector));
+}
+
+static struct intel_sdvo_connector *to_intel_sdvo_connector(struct drm_connector *connector)
+{
+	return container_of(to_intel_connector(connector), struct intel_sdvo_connector, base);
+}
+
+static bool
+intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, uint16_t flags);
+static bool
+intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
+			      struct intel_sdvo_connector *intel_sdvo_connector,
+			      int type);
+static bool
+intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
+				   struct intel_sdvo_connector *intel_sdvo_connector);
+
+/**
+ * Writes the SDVOB or SDVOC with the given value, but always writes both
+ * SDVOB and SDVOC to work around apparent hardware issues (according to
+ * comments in the BIOS).
+ */
+static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val)
+{
+	struct drm_device *dev = intel_sdvo->base.base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 bval = val, cval = val;
+	int i;
+
+	if (intel_sdvo->sdvo_reg == PCH_SDVOB) {
+		I915_WRITE(intel_sdvo->sdvo_reg, val);
+		POSTING_READ(intel_sdvo->sdvo_reg);
+		/*
+		 * HW workaround, need to write this twice for issue
+		 * that may result in first write getting masked.
+		 */
+		if (HAS_PCH_IBX(dev)) {
+			I915_WRITE(intel_sdvo->sdvo_reg, val);
+			POSTING_READ(intel_sdvo->sdvo_reg);
+		}
+		return;
+	}
+
+	if (intel_sdvo->sdvo_reg == GEN3_SDVOB)
+		cval = I915_READ(GEN3_SDVOC);
+	else
+		bval = I915_READ(GEN3_SDVOB);
+
+	/*
+	 * Write the registers twice for luck. Sometimes,
+	 * writing them only once doesn't appear to 'stick'.
+	 * The BIOS does this too. Yay, magic
+	 */
+	for (i = 0; i < 2; i++)
+	{
+		I915_WRITE(GEN3_SDVOB, bval);
+		POSTING_READ(GEN3_SDVOB);
+		I915_WRITE(GEN3_SDVOC, cval);
+		POSTING_READ(GEN3_SDVOC);
+	}
+}
+
+static bool intel_sdvo_read_byte(struct intel_sdvo *intel_sdvo, u8 addr, u8 *ch)
+{
+	struct i2c_msg msgs[] = {
+		{
+			.addr = intel_sdvo->slave_addr,
+			.flags = 0,
+			.len = 1,
+			.buf = &addr,
+		},
+		{
+			.addr = intel_sdvo->slave_addr,
+			.flags = I2C_M_RD,
+			.len = 1,
+			.buf = ch,
+		}
+	};
+	int ret;
+
+	if ((ret = i2c_transfer(intel_sdvo->i2c, msgs, 2)) == 2)
+		return true;
+
+	DRM_DEBUG_KMS("i2c transfer returned %d\n", ret);
+	return false;
+}
+
+#define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd}
+/** Mapping of command numbers to names, for debug output */
+static const struct _sdvo_cmd_name {
+	u8 cmd;
+	const char *name;
+} sdvo_cmd_names[] = {
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_RESET),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DEVICE_CAPS),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FIRMWARE_REV),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TRAINED_INPUTS),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_OUTPUTS),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_OUTPUTS),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_IN_OUT_MAP),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_IN_OUT_MAP),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ATTACHED_DISPLAYS),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HOT_PLUG_SUPPORT),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_HOT_PLUG),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_HOT_PLUG),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_INPUT),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_OUTPUT),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART1),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART2),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART2),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART1),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART2),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART1),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART2),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CLOCK_RATE_MULT),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CLOCK_RATE_MULT),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_TV_FORMATS),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_FORMAT),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_FORMAT),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_POWER_STATES),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_POWER_STATE),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ENCODER_POWER_STATE),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DISPLAY_POWER_STATE),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTROL_BUS_SWITCH),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS),
+
+	/* Add the op code for SDVO enhancements */
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HPOS),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HPOS),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HPOS),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_VPOS),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_VPOS),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_VPOS),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SATURATION),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SATURATION),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SATURATION),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HUE),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HUE),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HUE),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_CONTRAST),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CONTRAST),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTRAST),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_BRIGHTNESS),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_BRIGHTNESS),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_BRIGHTNESS),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_H),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_H),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_H),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_V),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_V),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_V),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_ADAPTIVE),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_ADAPTIVE),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_ADAPTIVE),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_2D),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_2D),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_2D),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SHARPNESS),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SHARPNESS),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SHARPNESS),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DOT_CRAWL),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DOT_CRAWL),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_CHROMA_FILTER),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_CHROMA_FILTER),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_CHROMA_FILTER),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_LUMA_FILTER),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_LUMA_FILTER),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_LUMA_FILTER),
+
+	/* HDMI op code */
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPP_ENCODE),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ENCODE),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ENCODE),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_PIXEL_REPLI),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PIXEL_REPLI),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_COLORIMETRY_CAP),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_COLORIMETRY),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_COLORIMETRY),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_AUDIO_ENCRYPT_PREFER),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_AUDIO_STAT),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_AUDIO_STAT),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_INDEX),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_INDEX),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_INFO),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_AV_SPLIT),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_AV_SPLIT),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_TXRATE),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_TXRATE),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_DATA),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA),
+};
+
+#define SDVO_NAME(svdo) ((svdo)->is_sdvob ? "SDVOB" : "SDVOC")
+
+static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd,
+				   const void *args, int args_len)
+{
+	int i, pos = 0;
+#define BUF_LEN 256
+	char buffer[BUF_LEN];
+
+#define BUF_PRINT(args...) \
+	pos += snprintf(buffer + pos, max_t(int, BUF_LEN - pos, 0), args)
+
+
+	for (i = 0; i < args_len; i++) {
+		BUF_PRINT("%02X ", ((u8 *)args)[i]);
+	}
+	for (; i < 8; i++) {
+		BUF_PRINT("   ");
+	}
+	for (i = 0; i < ARRAY_SIZE(sdvo_cmd_names); i++) {
+		if (cmd == sdvo_cmd_names[i].cmd) {
+			BUF_PRINT("(%s)", sdvo_cmd_names[i].name);
+			break;
+		}
+	}
+	if (i == ARRAY_SIZE(sdvo_cmd_names)) {
+		BUF_PRINT("(%02X)", cmd);
+	}
+	BUG_ON(pos >= BUF_LEN - 1);
+#undef BUF_PRINT
+#undef BUF_LEN
+
+	DRM_DEBUG_KMS("%s: W: %02X %s\n", SDVO_NAME(intel_sdvo), cmd, buffer);
+}
+
+static const char * const cmd_status_names[] = {
+	"Power on",
+	"Success",
+	"Not supported",
+	"Invalid arg",
+	"Pending",
+	"Target not specified",
+	"Scaling not supported"
+};
+
+static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd,
+				 const void *args, int args_len)
+{
+	u8 *buf, status;
+	struct i2c_msg *msgs;
+	int i, ret = true;
+
+        /* Would be simpler to allocate both in one go ? */        
+	buf = kzalloc(args_len * 2 + 2, GFP_KERNEL);
+	if (!buf)
+		return false;
+
+	msgs = kcalloc(args_len + 3, sizeof(*msgs), GFP_KERNEL);
+	if (!msgs) {
+	        kfree(buf);
+		return false;
+        }
+
+	intel_sdvo_debug_write(intel_sdvo, cmd, args, args_len);
+
+	for (i = 0; i < args_len; i++) {
+		msgs[i].addr = intel_sdvo->slave_addr;
+		msgs[i].flags = 0;
+		msgs[i].len = 2;
+		msgs[i].buf = buf + 2 *i;
+		buf[2*i + 0] = SDVO_I2C_ARG_0 - i;
+		buf[2*i + 1] = ((u8*)args)[i];
+	}
+	msgs[i].addr = intel_sdvo->slave_addr;
+	msgs[i].flags = 0;
+	msgs[i].len = 2;
+	msgs[i].buf = buf + 2*i;
+	buf[2*i + 0] = SDVO_I2C_OPCODE;
+	buf[2*i + 1] = cmd;
+
+	/* the following two are to read the response */
+	status = SDVO_I2C_CMD_STATUS;
+	msgs[i+1].addr = intel_sdvo->slave_addr;
+	msgs[i+1].flags = 0;
+	msgs[i+1].len = 1;
+	msgs[i+1].buf = &status;
+
+	msgs[i+2].addr = intel_sdvo->slave_addr;
+	msgs[i+2].flags = I2C_M_RD;
+	msgs[i+2].len = 1;
+	msgs[i+2].buf = &status;
+
+	ret = i2c_transfer(intel_sdvo->i2c, msgs, i+3);
+	if (ret < 0) {
+		DRM_DEBUG_KMS("I2c transfer returned %d\n", ret);
+		ret = false;
+		goto out;
+	}
+	if (ret != i+3) {
+		/* failure in I2C transfer */
+		DRM_DEBUG_KMS("I2c transfer returned %d/%d\n", ret, i+3);
+		ret = false;
+	}
+
+out:
+	kfree(msgs);
+	kfree(buf);
+	return ret;
+}
+
+static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
+				     void *response, int response_len)
+{
+	u8 retry = 15; /* 5 quick checks, followed by 10 long checks */
+	u8 status;
+	int i, pos = 0;
+#define BUF_LEN 256
+	char buffer[BUF_LEN];
+
+
+	/*
+	 * The documentation states that all commands will be
+	 * processed within 15µs, and that we need only poll
+	 * the status byte a maximum of 3 times in order for the
+	 * command to be complete.
+	 *
+	 * Check 5 times in case the hardware failed to read the docs.
+	 *
+	 * Also beware that the first response by many devices is to
+	 * reply PENDING and stall for time. TVs are notorious for
+	 * requiring longer than specified to complete their replies.
+	 * Originally (in the DDX long ago), the delay was only ever 15ms
+	 * with an additional delay of 30ms applied for TVs added later after
+	 * many experiments. To accommodate both sets of delays, we do a
+	 * sequence of slow checks if the device is falling behind and fails
+	 * to reply within 5*15µs.
+	 */
+	if (!intel_sdvo_read_byte(intel_sdvo,
+				  SDVO_I2C_CMD_STATUS,
+				  &status))
+		goto log_fail;
+
+	while ((status == SDVO_CMD_STATUS_PENDING ||
+		status == SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED) && --retry) {
+		if (retry < 10)
+			msleep(15);
+		else
+			udelay(15);
+
+		if (!intel_sdvo_read_byte(intel_sdvo,
+					  SDVO_I2C_CMD_STATUS,
+					  &status))
+			goto log_fail;
+	}
+
+#define BUF_PRINT(args...) \
+	pos += snprintf(buffer + pos, max_t(int, BUF_LEN - pos, 0), args)
+
+	if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP)
+		BUF_PRINT("(%s)", cmd_status_names[status]);
+	else
+		BUF_PRINT("(??? %d)", status);
+
+	if (status != SDVO_CMD_STATUS_SUCCESS)
+		goto log_fail;
+
+	/* Read the command response */
+	for (i = 0; i < response_len; i++) {
+		if (!intel_sdvo_read_byte(intel_sdvo,
+					  SDVO_I2C_RETURN_0 + i,
+					  &((u8 *)response)[i]))
+			goto log_fail;
+		BUF_PRINT(" %02X", ((u8 *)response)[i]);
+	}
+	BUG_ON(pos >= BUF_LEN - 1);
+#undef BUF_PRINT
+#undef BUF_LEN
+
+	DRM_DEBUG_KMS("%s: R: %s\n", SDVO_NAME(intel_sdvo), buffer);
+	return true;
+
+log_fail:
+	DRM_DEBUG_KMS("%s: R: ... failed\n", SDVO_NAME(intel_sdvo));
+	return false;
+}
+
+static int intel_sdvo_get_pixel_multiplier(const struct drm_display_mode *adjusted_mode)
+{
+	if (adjusted_mode->crtc_clock >= 100000)
+		return 1;
+	else if (adjusted_mode->crtc_clock >= 50000)
+		return 2;
+	else
+		return 4;
+}
+
+static bool intel_sdvo_set_control_bus_switch(struct intel_sdvo *intel_sdvo,
+					      u8 ddc_bus)
+{
+	/* This must be the immediately preceding write before the i2c xfer */
+	return intel_sdvo_write_cmd(intel_sdvo,
+				    SDVO_CMD_SET_CONTROL_BUS_SWITCH,
+				    &ddc_bus, 1);
+}
+
+static bool intel_sdvo_set_value(struct intel_sdvo *intel_sdvo, u8 cmd, const void *data, int len)
+{
+	if (!intel_sdvo_write_cmd(intel_sdvo, cmd, data, len))
+		return false;
+
+	return intel_sdvo_read_response(intel_sdvo, NULL, 0);
+}
+
+static bool
+intel_sdvo_get_value(struct intel_sdvo *intel_sdvo, u8 cmd, void *value, int len)
+{
+	if (!intel_sdvo_write_cmd(intel_sdvo, cmd, NULL, 0))
+		return false;
+
+	return intel_sdvo_read_response(intel_sdvo, value, len);
+}
+
+static bool intel_sdvo_set_target_input(struct intel_sdvo *intel_sdvo)
+{
+	struct intel_sdvo_set_target_input_args targets = {0};
+	return intel_sdvo_set_value(intel_sdvo,
+				    SDVO_CMD_SET_TARGET_INPUT,
+				    &targets, sizeof(targets));
+}
+
+/**
+ * Return whether each input is trained.
+ *
+ * This function is making an assumption about the layout of the response,
+ * which should be checked against the docs.
+ */
+static bool intel_sdvo_get_trained_inputs(struct intel_sdvo *intel_sdvo, bool *input_1, bool *input_2)
+{
+	struct intel_sdvo_get_trained_inputs_response response;
+
+	BUILD_BUG_ON(sizeof(response) != 1);
+	if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_TRAINED_INPUTS,
+				  &response, sizeof(response)))
+		return false;
+
+	*input_1 = response.input0_trained;
+	*input_2 = response.input1_trained;
+	return true;
+}
+
+static bool intel_sdvo_set_active_outputs(struct intel_sdvo *intel_sdvo,
+					  u16 outputs)
+{
+	return intel_sdvo_set_value(intel_sdvo,
+				    SDVO_CMD_SET_ACTIVE_OUTPUTS,
+				    &outputs, sizeof(outputs));
+}
+
+static bool intel_sdvo_get_active_outputs(struct intel_sdvo *intel_sdvo,
+					  u16 *outputs)
+{
+	return intel_sdvo_get_value(intel_sdvo,
+				    SDVO_CMD_GET_ACTIVE_OUTPUTS,
+				    outputs, sizeof(*outputs));
+}
+
+static bool intel_sdvo_set_encoder_power_state(struct intel_sdvo *intel_sdvo,
+					       int mode)
+{
+	u8 state = SDVO_ENCODER_STATE_ON;
+
+	switch (mode) {
+	case DRM_MODE_DPMS_ON:
+		state = SDVO_ENCODER_STATE_ON;
+		break;
+	case DRM_MODE_DPMS_STANDBY:
+		state = SDVO_ENCODER_STATE_STANDBY;
+		break;
+	case DRM_MODE_DPMS_SUSPEND:
+		state = SDVO_ENCODER_STATE_SUSPEND;
+		break;
+	case DRM_MODE_DPMS_OFF:
+		state = SDVO_ENCODER_STATE_OFF;
+		break;
+	}
+
+	return intel_sdvo_set_value(intel_sdvo,
+				    SDVO_CMD_SET_ENCODER_POWER_STATE, &state, sizeof(state));
+}
+
+static bool intel_sdvo_get_input_pixel_clock_range(struct intel_sdvo *intel_sdvo,
+						   int *clock_min,
+						   int *clock_max)
+{
+	struct intel_sdvo_pixel_clock_range clocks;
+
+	BUILD_BUG_ON(sizeof(clocks) != 4);
+	if (!intel_sdvo_get_value(intel_sdvo,
+				  SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE,
+				  &clocks, sizeof(clocks)))
+		return false;
+
+	/* Convert the values from units of 10 kHz to kHz. */
+	*clock_min = clocks.min * 10;
+	*clock_max = clocks.max * 10;
+	return true;
+}
+
+static bool intel_sdvo_set_target_output(struct intel_sdvo *intel_sdvo,
+					 u16 outputs)
+{
+	return intel_sdvo_set_value(intel_sdvo,
+				    SDVO_CMD_SET_TARGET_OUTPUT,
+				    &outputs, sizeof(outputs));
+}
+
+static bool intel_sdvo_set_timing(struct intel_sdvo *intel_sdvo, u8 cmd,
+				  struct intel_sdvo_dtd *dtd)
+{
+	return intel_sdvo_set_value(intel_sdvo, cmd, &dtd->part1, sizeof(dtd->part1)) &&
+		intel_sdvo_set_value(intel_sdvo, cmd + 1, &dtd->part2, sizeof(dtd->part2));
+}
+
+static bool intel_sdvo_get_timing(struct intel_sdvo *intel_sdvo, u8 cmd,
+				  struct intel_sdvo_dtd *dtd)
+{
+	return intel_sdvo_get_value(intel_sdvo, cmd, &dtd->part1, sizeof(dtd->part1)) &&
+		intel_sdvo_get_value(intel_sdvo, cmd + 1, &dtd->part2, sizeof(dtd->part2));
+}
+
+static bool intel_sdvo_set_input_timing(struct intel_sdvo *intel_sdvo,
+					 struct intel_sdvo_dtd *dtd)
+{
+	return intel_sdvo_set_timing(intel_sdvo,
+				     SDVO_CMD_SET_INPUT_TIMINGS_PART1, dtd);
+}
+
+static bool intel_sdvo_set_output_timing(struct intel_sdvo *intel_sdvo,
+					 struct intel_sdvo_dtd *dtd)
+{
+	return intel_sdvo_set_timing(intel_sdvo,
+				     SDVO_CMD_SET_OUTPUT_TIMINGS_PART1, dtd);
+}
+
+static bool intel_sdvo_get_input_timing(struct intel_sdvo *intel_sdvo,
+					struct intel_sdvo_dtd *dtd)
+{
+	return intel_sdvo_get_timing(intel_sdvo,
+				     SDVO_CMD_GET_INPUT_TIMINGS_PART1, dtd);
+}
+
+static bool
+intel_sdvo_create_preferred_input_timing(struct intel_sdvo *intel_sdvo,
+					 uint16_t clock,
+					 uint16_t width,
+					 uint16_t height)
+{
+	struct intel_sdvo_preferred_input_timing_args args;
+
+	memset(&args, 0, sizeof(args));
+	args.clock = clock;
+	args.width = width;
+	args.height = height;
+	args.interlace = 0;
+
+	if (intel_sdvo->is_lvds &&
+	   (intel_sdvo->sdvo_lvds_fixed_mode->hdisplay != width ||
+	    intel_sdvo->sdvo_lvds_fixed_mode->vdisplay != height))
+		args.scaled = 1;
+
+	return intel_sdvo_set_value(intel_sdvo,
+				    SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING,
+				    &args, sizeof(args));
+}
+
+static bool intel_sdvo_get_preferred_input_timing(struct intel_sdvo *intel_sdvo,
+						  struct intel_sdvo_dtd *dtd)
+{
+	BUILD_BUG_ON(sizeof(dtd->part1) != 8);
+	BUILD_BUG_ON(sizeof(dtd->part2) != 8);
+	return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1,
+				    &dtd->part1, sizeof(dtd->part1)) &&
+		intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2,
+				     &dtd->part2, sizeof(dtd->part2));
+}
+
+static bool intel_sdvo_set_clock_rate_mult(struct intel_sdvo *intel_sdvo, u8 val)
+{
+	return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_CLOCK_RATE_MULT, &val, 1);
+}
+
+static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd,
+					 const struct drm_display_mode *mode)
+{
+	uint16_t width, height;
+	uint16_t h_blank_len, h_sync_len, v_blank_len, v_sync_len;
+	uint16_t h_sync_offset, v_sync_offset;
+	int mode_clock;
+
+	memset(dtd, 0, sizeof(*dtd));
+
+	width = mode->hdisplay;
+	height = mode->vdisplay;
+
+	/* do some mode translations */
+	h_blank_len = mode->htotal - mode->hdisplay;
+	h_sync_len = mode->hsync_end - mode->hsync_start;
+
+	v_blank_len = mode->vtotal - mode->vdisplay;
+	v_sync_len = mode->vsync_end - mode->vsync_start;
+
+	h_sync_offset = mode->hsync_start - mode->hdisplay;
+	v_sync_offset = mode->vsync_start - mode->vdisplay;
+
+	mode_clock = mode->clock;
+	mode_clock /= 10;
+	dtd->part1.clock = mode_clock;
+
+	dtd->part1.h_active = width & 0xff;
+	dtd->part1.h_blank = h_blank_len & 0xff;
+	dtd->part1.h_high = (((width >> 8) & 0xf) << 4) |
+		((h_blank_len >> 8) & 0xf);
+	dtd->part1.v_active = height & 0xff;
+	dtd->part1.v_blank = v_blank_len & 0xff;
+	dtd->part1.v_high = (((height >> 8) & 0xf) << 4) |
+		((v_blank_len >> 8) & 0xf);
+
+	dtd->part2.h_sync_off = h_sync_offset & 0xff;
+	dtd->part2.h_sync_width = h_sync_len & 0xff;
+	dtd->part2.v_sync_off_width = (v_sync_offset & 0xf) << 4 |
+		(v_sync_len & 0xf);
+	dtd->part2.sync_off_width_high = ((h_sync_offset & 0x300) >> 2) |
+		((h_sync_len & 0x300) >> 4) | ((v_sync_offset & 0x30) >> 2) |
+		((v_sync_len & 0x30) >> 4);
+
+	dtd->part2.dtd_flags = 0x18;
+	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+		dtd->part2.dtd_flags |= DTD_FLAG_INTERLACE;
+	if (mode->flags & DRM_MODE_FLAG_PHSYNC)
+		dtd->part2.dtd_flags |= DTD_FLAG_HSYNC_POSITIVE;
+	if (mode->flags & DRM_MODE_FLAG_PVSYNC)
+		dtd->part2.dtd_flags |= DTD_FLAG_VSYNC_POSITIVE;
+
+	dtd->part2.v_sync_off_high = v_sync_offset & 0xc0;
+}
+
+static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode *pmode,
+					 const struct intel_sdvo_dtd *dtd)
+{
+	struct drm_display_mode mode = {};
+
+	mode.hdisplay = dtd->part1.h_active;
+	mode.hdisplay += ((dtd->part1.h_high >> 4) & 0x0f) << 8;
+	mode.hsync_start = mode.hdisplay + dtd->part2.h_sync_off;
+	mode.hsync_start += (dtd->part2.sync_off_width_high & 0xc0) << 2;
+	mode.hsync_end = mode.hsync_start + dtd->part2.h_sync_width;
+	mode.hsync_end += (dtd->part2.sync_off_width_high & 0x30) << 4;
+	mode.htotal = mode.hdisplay + dtd->part1.h_blank;
+	mode.htotal += (dtd->part1.h_high & 0xf) << 8;
+
+	mode.vdisplay = dtd->part1.v_active;
+	mode.vdisplay += ((dtd->part1.v_high >> 4) & 0x0f) << 8;
+	mode.vsync_start = mode.vdisplay;
+	mode.vsync_start += (dtd->part2.v_sync_off_width >> 4) & 0xf;
+	mode.vsync_start += (dtd->part2.sync_off_width_high & 0x0c) << 2;
+	mode.vsync_start += dtd->part2.v_sync_off_high & 0xc0;
+	mode.vsync_end = mode.vsync_start +
+		(dtd->part2.v_sync_off_width & 0xf);
+	mode.vsync_end += (dtd->part2.sync_off_width_high & 0x3) << 4;
+	mode.vtotal = mode.vdisplay + dtd->part1.v_blank;
+	mode.vtotal += (dtd->part1.v_high & 0xf) << 8;
+
+	mode.clock = dtd->part1.clock * 10;
+
+	if (dtd->part2.dtd_flags & DTD_FLAG_INTERLACE)
+		mode.flags |= DRM_MODE_FLAG_INTERLACE;
+	if (dtd->part2.dtd_flags & DTD_FLAG_HSYNC_POSITIVE)
+		mode.flags |= DRM_MODE_FLAG_PHSYNC;
+	else
+		mode.flags |= DRM_MODE_FLAG_NHSYNC;
+	if (dtd->part2.dtd_flags & DTD_FLAG_VSYNC_POSITIVE)
+		mode.flags |= DRM_MODE_FLAG_PVSYNC;
+	else
+		mode.flags |= DRM_MODE_FLAG_NVSYNC;
+
+	drm_mode_set_crtcinfo(&mode, 0);
+
+	drm_mode_copy(pmode, &mode);
+}
+
+static bool intel_sdvo_check_supp_encode(struct intel_sdvo *intel_sdvo)
+{
+	struct intel_sdvo_encode encode;
+
+	BUILD_BUG_ON(sizeof(encode) != 2);
+	return intel_sdvo_get_value(intel_sdvo,
+				  SDVO_CMD_GET_SUPP_ENCODE,
+				  &encode, sizeof(encode));
+}
+
+static bool intel_sdvo_set_encode(struct intel_sdvo *intel_sdvo,
+				  uint8_t mode)
+{
+	return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_ENCODE, &mode, 1);
+}
+
+static bool intel_sdvo_set_colorimetry(struct intel_sdvo *intel_sdvo,
+				       uint8_t mode)
+{
+	return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_COLORIMETRY, &mode, 1);
+}
+
+#if 0
+static void intel_sdvo_dump_hdmi_buf(struct intel_sdvo *intel_sdvo)
+{
+	int i, j;
+	uint8_t set_buf_index[2];
+	uint8_t av_split;
+	uint8_t buf_size;
+	uint8_t buf[48];
+	uint8_t *pos;
+
+	intel_sdvo_get_value(encoder, SDVO_CMD_GET_HBUF_AV_SPLIT, &av_split, 1);
+
+	for (i = 0; i <= av_split; i++) {
+		set_buf_index[0] = i; set_buf_index[1] = 0;
+		intel_sdvo_write_cmd(encoder, SDVO_CMD_SET_HBUF_INDEX,
+				     set_buf_index, 2);
+		intel_sdvo_write_cmd(encoder, SDVO_CMD_GET_HBUF_INFO, NULL, 0);
+		intel_sdvo_read_response(encoder, &buf_size, 1);
+
+		pos = buf;
+		for (j = 0; j <= buf_size; j += 8) {
+			intel_sdvo_write_cmd(encoder, SDVO_CMD_GET_HBUF_DATA,
+					     NULL, 0);
+			intel_sdvo_read_response(encoder, pos, 8);
+			pos += 8;
+		}
+	}
+}
+#endif
+
+static bool intel_sdvo_write_infoframe(struct intel_sdvo *intel_sdvo,
+				       unsigned if_index, uint8_t tx_rate,
+				       const uint8_t *data, unsigned length)
+{
+	uint8_t set_buf_index[2] = { if_index, 0 };
+	uint8_t hbuf_size, tmp[8];
+	int i;
+
+	if (!intel_sdvo_set_value(intel_sdvo,
+				  SDVO_CMD_SET_HBUF_INDEX,
+				  set_buf_index, 2))
+		return false;
+
+	if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HBUF_INFO,
+				  &hbuf_size, 1))
+		return false;
+
+	/* Buffer size is 0 based, hooray! */
+	hbuf_size++;
+
+	DRM_DEBUG_KMS("writing sdvo hbuf: %i, hbuf_size %i, hbuf_size: %i\n",
+		      if_index, length, hbuf_size);
+
+	for (i = 0; i < hbuf_size; i += 8) {
+		memset(tmp, 0, 8);
+		if (i < length)
+			memcpy(tmp, data + i, min_t(unsigned, 8, length - i));
+
+		if (!intel_sdvo_set_value(intel_sdvo,
+					  SDVO_CMD_SET_HBUF_DATA,
+					  tmp, 8))
+			return false;
+	}
+
+	return intel_sdvo_set_value(intel_sdvo,
+				    SDVO_CMD_SET_HBUF_TXRATE,
+				    &tx_rate, 1);
+}
+
+static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo,
+					 const struct drm_display_mode *adjusted_mode)
+{
+	uint8_t sdvo_data[HDMI_INFOFRAME_SIZE(AVI)];
+	struct drm_crtc *crtc = intel_sdvo->base.base.crtc;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	union hdmi_infoframe frame;
+	int ret;
+	ssize_t len;
+
+	ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
+						       adjusted_mode);
+	if (ret < 0) {
+		DRM_ERROR("couldn't fill AVI infoframe\n");
+		return false;
+	}
+
+	if (intel_sdvo->rgb_quant_range_selectable) {
+		if (intel_crtc->config->limited_color_range)
+			frame.avi.quantization_range =
+				HDMI_QUANTIZATION_RANGE_LIMITED;
+		else
+			frame.avi.quantization_range =
+				HDMI_QUANTIZATION_RANGE_FULL;
+	}
+
+	len = hdmi_infoframe_pack(&frame, sdvo_data, sizeof(sdvo_data));
+	if (len < 0)
+		return false;
+
+	return intel_sdvo_write_infoframe(intel_sdvo, SDVO_HBUF_INDEX_AVI_IF,
+					  SDVO_HBUF_TX_VSYNC,
+					  sdvo_data, sizeof(sdvo_data));
+}
+
+static bool intel_sdvo_set_tv_format(struct intel_sdvo *intel_sdvo)
+{
+	struct intel_sdvo_tv_format format;
+	uint32_t format_map;
+
+	format_map = 1 << intel_sdvo->tv_format_index;
+	memset(&format, 0, sizeof(format));
+	memcpy(&format, &format_map, min(sizeof(format), sizeof(format_map)));
+
+	BUILD_BUG_ON(sizeof(format) != 6);
+	return intel_sdvo_set_value(intel_sdvo,
+				    SDVO_CMD_SET_TV_FORMAT,
+				    &format, sizeof(format));
+}
+
+static bool
+intel_sdvo_set_output_timings_from_mode(struct intel_sdvo *intel_sdvo,
+					const struct drm_display_mode *mode)
+{
+	struct intel_sdvo_dtd output_dtd;
+
+	if (!intel_sdvo_set_target_output(intel_sdvo,
+					  intel_sdvo->attached_output))
+		return false;
+
+	intel_sdvo_get_dtd_from_mode(&output_dtd, mode);
+	if (!intel_sdvo_set_output_timing(intel_sdvo, &output_dtd))
+		return false;
+
+	return true;
+}
+
+/* Asks the sdvo controller for the preferred input mode given the output mode.
+ * Unfortunately we have to set up the full output mode to do that. */
+static bool
+intel_sdvo_get_preferred_input_mode(struct intel_sdvo *intel_sdvo,
+				    const struct drm_display_mode *mode,
+				    struct drm_display_mode *adjusted_mode)
+{
+	struct intel_sdvo_dtd input_dtd;
+
+	/* Reset the input timing to the screen. Assume always input 0. */
+	if (!intel_sdvo_set_target_input(intel_sdvo))
+		return false;
+
+	if (!intel_sdvo_create_preferred_input_timing(intel_sdvo,
+						      mode->clock / 10,
+						      mode->hdisplay,
+						      mode->vdisplay))
+		return false;
+
+	if (!intel_sdvo_get_preferred_input_timing(intel_sdvo,
+						   &input_dtd))
+		return false;
+
+	intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd);
+	intel_sdvo->dtd_sdvo_flags = input_dtd.part2.sdvo_flags;
+
+	return true;
+}
+
+static void i9xx_adjust_sdvo_tv_clock(struct intel_crtc_state *pipe_config)
+{
+	unsigned dotclock = pipe_config->port_clock;
+	struct dpll *clock = &pipe_config->dpll;
+
+	/* SDVO TV has fixed PLL values depend on its clock range,
+	   this mirrors vbios setting. */
+	if (dotclock >= 100000 && dotclock < 140500) {
+		clock->p1 = 2;
+		clock->p2 = 10;
+		clock->n = 3;
+		clock->m1 = 16;
+		clock->m2 = 8;
+	} else if (dotclock >= 140500 && dotclock <= 200000) {
+		clock->p1 = 1;
+		clock->p2 = 10;
+		clock->n = 6;
+		clock->m1 = 12;
+		clock->m2 = 8;
+	} else {
+		WARN(1, "SDVO TV clock out of range: %i\n", dotclock);
+	}
+
+	pipe_config->clock_set = true;
+}
+
+static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
+				      struct intel_crtc_state *pipe_config)
+{
+	struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
+	struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+	struct drm_display_mode *mode = &pipe_config->base.mode;
+
+	DRM_DEBUG_KMS("forcing bpc to 8 for SDVO\n");
+	pipe_config->pipe_bpp = 8*3;
+
+	if (HAS_PCH_SPLIT(encoder->base.dev))
+		pipe_config->has_pch_encoder = true;
+
+	/* We need to construct preferred input timings based on our
+	 * output timings.  To do that, we have to set the output
+	 * timings, even though this isn't really the right place in
+	 * the sequence to do it. Oh well.
+	 */
+	if (intel_sdvo->is_tv) {
+		if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo, mode))
+			return false;
+
+		(void) intel_sdvo_get_preferred_input_mode(intel_sdvo,
+							   mode,
+							   adjusted_mode);
+		pipe_config->sdvo_tv_clock = true;
+	} else if (intel_sdvo->is_lvds) {
+		if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo,
+							     intel_sdvo->sdvo_lvds_fixed_mode))
+			return false;
+
+		(void) intel_sdvo_get_preferred_input_mode(intel_sdvo,
+							   mode,
+							   adjusted_mode);
+	}
+
+	/* Make the CRTC code factor in the SDVO pixel multiplier.  The
+	 * SDVO device will factor out the multiplier during mode_set.
+	 */
+	pipe_config->pixel_multiplier =
+		intel_sdvo_get_pixel_multiplier(adjusted_mode);
+
+	pipe_config->has_hdmi_sink = intel_sdvo->has_hdmi_monitor;
+
+	if (intel_sdvo->color_range_auto) {
+		/* See CEA-861-E - 5.1 Default Encoding Parameters */
+		/* FIXME: This bit is only valid when using TMDS encoding and 8
+		 * bit per color mode. */
+		if (pipe_config->has_hdmi_sink &&
+		    drm_match_cea_mode(adjusted_mode) > 1)
+			pipe_config->limited_color_range = true;
+	} else {
+		if (pipe_config->has_hdmi_sink &&
+		    intel_sdvo->color_range == HDMI_COLOR_RANGE_16_235)
+			pipe_config->limited_color_range = true;
+	}
+
+	/* Clock computation needs to happen after pixel multiplier. */
+	if (intel_sdvo->is_tv)
+		i9xx_adjust_sdvo_tv_clock(pipe_config);
+
+	/* Set user selected PAR to incoming mode's member */
+	if (intel_sdvo->is_hdmi)
+		adjusted_mode->picture_aspect_ratio = intel_sdvo->aspect_ratio;
+
+	return true;
+}
+
+static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder)
+{
+	struct drm_device *dev = intel_encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *crtc = to_intel_crtc(intel_encoder->base.crtc);
+	const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
+	struct drm_display_mode *mode = &crtc->config->base.mode;
+	struct intel_sdvo *intel_sdvo = to_sdvo(intel_encoder);
+	u32 sdvox;
+	struct intel_sdvo_in_out_map in_out;
+	struct intel_sdvo_dtd input_dtd, output_dtd;
+	int rate;
+
+	if (!mode)
+		return;
+
+	/* First, set the input mapping for the first input to our controlled
+	 * output. This is only correct if we're a single-input device, in
+	 * which case the first input is the output from the appropriate SDVO
+	 * channel on the motherboard.  In a two-input device, the first input
+	 * will be SDVOB and the second SDVOC.
+	 */
+	in_out.in0 = intel_sdvo->attached_output;
+	in_out.in1 = 0;
+
+	intel_sdvo_set_value(intel_sdvo,
+			     SDVO_CMD_SET_IN_OUT_MAP,
+			     &in_out, sizeof(in_out));
+
+	/* Set the output timings to the screen */
+	if (!intel_sdvo_set_target_output(intel_sdvo,
+					  intel_sdvo->attached_output))
+		return;
+
+	/* lvds has a special fixed output timing. */
+	if (intel_sdvo->is_lvds)
+		intel_sdvo_get_dtd_from_mode(&output_dtd,
+					     intel_sdvo->sdvo_lvds_fixed_mode);
+	else
+		intel_sdvo_get_dtd_from_mode(&output_dtd, mode);
+	if (!intel_sdvo_set_output_timing(intel_sdvo, &output_dtd))
+		DRM_INFO("Setting output timings on %s failed\n",
+			 SDVO_NAME(intel_sdvo));
+
+	/* Set the input timing to the screen. Assume always input 0. */
+	if (!intel_sdvo_set_target_input(intel_sdvo))
+		return;
+
+	if (crtc->config->has_hdmi_sink) {
+		intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI);
+		intel_sdvo_set_colorimetry(intel_sdvo,
+					   SDVO_COLORIMETRY_RGB256);
+		intel_sdvo_set_avi_infoframe(intel_sdvo, adjusted_mode);
+	} else
+		intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_DVI);
+
+	if (intel_sdvo->is_tv &&
+	    !intel_sdvo_set_tv_format(intel_sdvo))
+		return;
+
+	intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
+
+	if (intel_sdvo->is_tv || intel_sdvo->is_lvds)
+		input_dtd.part2.sdvo_flags = intel_sdvo->dtd_sdvo_flags;
+	if (!intel_sdvo_set_input_timing(intel_sdvo, &input_dtd))
+		DRM_INFO("Setting input timings on %s failed\n",
+			 SDVO_NAME(intel_sdvo));
+
+	switch (crtc->config->pixel_multiplier) {
+	default:
+		WARN(1, "unknown pixel multiplier specified\n");
+	case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break;
+	case 2: rate = SDVO_CLOCK_RATE_MULT_2X; break;
+	case 4: rate = SDVO_CLOCK_RATE_MULT_4X; break;
+	}
+	if (!intel_sdvo_set_clock_rate_mult(intel_sdvo, rate))
+		return;
+
+	/* Set the SDVO control regs. */
+	if (INTEL_INFO(dev)->gen >= 4) {
+		/* The real mode polarity is set by the SDVO commands, using
+		 * struct intel_sdvo_dtd. */
+		sdvox = SDVO_VSYNC_ACTIVE_HIGH | SDVO_HSYNC_ACTIVE_HIGH;
+		if (!HAS_PCH_SPLIT(dev) && crtc->config->limited_color_range)
+			sdvox |= HDMI_COLOR_RANGE_16_235;
+		if (INTEL_INFO(dev)->gen < 5)
+			sdvox |= SDVO_BORDER_ENABLE;
+	} else {
+		sdvox = I915_READ(intel_sdvo->sdvo_reg);
+		switch (intel_sdvo->sdvo_reg) {
+		case GEN3_SDVOB:
+			sdvox &= SDVOB_PRESERVE_MASK;
+			break;
+		case GEN3_SDVOC:
+			sdvox &= SDVOC_PRESERVE_MASK;
+			break;
+		}
+		sdvox |= (9 << 19) | SDVO_BORDER_ENABLE;
+	}
+
+	if (INTEL_PCH_TYPE(dev) >= PCH_CPT)
+		sdvox |= SDVO_PIPE_SEL_CPT(crtc->pipe);
+	else
+		sdvox |= SDVO_PIPE_SEL(crtc->pipe);
+
+	if (intel_sdvo->has_hdmi_audio)
+		sdvox |= SDVO_AUDIO_ENABLE;
+
+	if (INTEL_INFO(dev)->gen >= 4) {
+		/* done in crtc_mode_set as the dpll_md reg must be written early */
+	} else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
+		/* done in crtc_mode_set as it lives inside the dpll register */
+	} else {
+		sdvox |= (crtc->config->pixel_multiplier - 1)
+			<< SDVO_PORT_MULTIPLY_SHIFT;
+	}
+
+	if (input_dtd.part2.sdvo_flags & SDVO_NEED_TO_STALL &&
+	    INTEL_INFO(dev)->gen < 5)
+		sdvox |= SDVO_STALL_SELECT;
+	intel_sdvo_write_sdvox(intel_sdvo, sdvox);
+}
+
+static bool intel_sdvo_connector_get_hw_state(struct intel_connector *connector)
+{
+	struct intel_sdvo_connector *intel_sdvo_connector =
+		to_intel_sdvo_connector(&connector->base);
+	struct intel_sdvo *intel_sdvo = intel_attached_sdvo(&connector->base);
+	u16 active_outputs = 0;
+
+	intel_sdvo_get_active_outputs(intel_sdvo, &active_outputs);
+
+	if (active_outputs & intel_sdvo_connector->output_flag)
+		return true;
+	else
+		return false;
+}
+
+static bool intel_sdvo_get_hw_state(struct intel_encoder *encoder,
+				    enum pipe *pipe)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
+	u16 active_outputs = 0;
+	u32 tmp;
+
+	tmp = I915_READ(intel_sdvo->sdvo_reg);
+	intel_sdvo_get_active_outputs(intel_sdvo, &active_outputs);
+
+	if (!(tmp & SDVO_ENABLE) && (active_outputs == 0))
+		return false;
+
+	if (HAS_PCH_CPT(dev))
+		*pipe = PORT_TO_PIPE_CPT(tmp);
+	else
+		*pipe = PORT_TO_PIPE(tmp);
+
+	return true;
+}
+
+static void intel_sdvo_get_config(struct intel_encoder *encoder,
+				  struct intel_crtc_state *pipe_config)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
+	struct intel_sdvo_dtd dtd;
+	int encoder_pixel_multiplier = 0;
+	int dotclock;
+	u32 flags = 0, sdvox;
+	u8 val;
+	bool ret;
+
+	sdvox = I915_READ(intel_sdvo->sdvo_reg);
+
+	ret = intel_sdvo_get_input_timing(intel_sdvo, &dtd);
+	if (!ret) {
+		/* Some sdvo encoders are not spec compliant and don't
+		 * implement the mandatory get_timings function. */
+		DRM_DEBUG_DRIVER("failed to retrieve SDVO DTD\n");
+		pipe_config->quirks |= PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS;
+	} else {
+		if (dtd.part2.dtd_flags & DTD_FLAG_HSYNC_POSITIVE)
+			flags |= DRM_MODE_FLAG_PHSYNC;
+		else
+			flags |= DRM_MODE_FLAG_NHSYNC;
+
+		if (dtd.part2.dtd_flags & DTD_FLAG_VSYNC_POSITIVE)
+			flags |= DRM_MODE_FLAG_PVSYNC;
+		else
+			flags |= DRM_MODE_FLAG_NVSYNC;
+	}
+
+	pipe_config->base.adjusted_mode.flags |= flags;
+
+	/*
+	 * pixel multiplier readout is tricky: Only on i915g/gm it is stored in
+	 * the sdvo port register, on all other platforms it is part of the dpll
+	 * state. Since the general pipe state readout happens before the
+	 * encoder->get_config we so already have a valid pixel multplier on all
+	 * other platfroms.
+	 */
+	if (IS_I915G(dev) || IS_I915GM(dev)) {
+		pipe_config->pixel_multiplier =
+			((sdvox & SDVO_PORT_MULTIPLY_MASK)
+			 >> SDVO_PORT_MULTIPLY_SHIFT) + 1;
+	}
+
+	dotclock = pipe_config->port_clock;
+	if (pipe_config->pixel_multiplier)
+		dotclock /= pipe_config->pixel_multiplier;
+
+	if (HAS_PCH_SPLIT(dev))
+		ironlake_check_encoder_dotclock(pipe_config, dotclock);
+
+	pipe_config->base.adjusted_mode.crtc_clock = dotclock;
+
+	/* Cross check the port pixel multiplier with the sdvo encoder state. */
+	if (intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_CLOCK_RATE_MULT,
+				 &val, 1)) {
+		switch (val) {
+		case SDVO_CLOCK_RATE_MULT_1X:
+			encoder_pixel_multiplier = 1;
+			break;
+		case SDVO_CLOCK_RATE_MULT_2X:
+			encoder_pixel_multiplier = 2;
+			break;
+		case SDVO_CLOCK_RATE_MULT_4X:
+			encoder_pixel_multiplier = 4;
+			break;
+		}
+	}
+
+	if (sdvox & HDMI_COLOR_RANGE_16_235)
+		pipe_config->limited_color_range = true;
+
+	if (intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ENCODE,
+				 &val, 1)) {
+		if (val == SDVO_ENCODE_HDMI)
+			pipe_config->has_hdmi_sink = true;
+	}
+
+	WARN(encoder_pixel_multiplier != pipe_config->pixel_multiplier,
+	     "SDVO pixel multiplier mismatch, port: %i, encoder: %i\n",
+	     pipe_config->pixel_multiplier, encoder_pixel_multiplier);
+}
+
+static void intel_disable_sdvo(struct intel_encoder *encoder)
+{
+	struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+	struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
+	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+	u32 temp;
+
+	intel_sdvo_set_active_outputs(intel_sdvo, 0);
+	if (0)
+		intel_sdvo_set_encoder_power_state(intel_sdvo,
+						   DRM_MODE_DPMS_OFF);
+
+	temp = I915_READ(intel_sdvo->sdvo_reg);
+
+	temp &= ~SDVO_ENABLE;
+	intel_sdvo_write_sdvox(intel_sdvo, temp);
+
+	/*
+	 * HW workaround for IBX, we need to move the port
+	 * to transcoder A after disabling it to allow the
+	 * matching DP port to be enabled on transcoder A.
+	 */
+	if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B) {
+		temp &= ~SDVO_PIPE_B_SELECT;
+		temp |= SDVO_ENABLE;
+		intel_sdvo_write_sdvox(intel_sdvo, temp);
+
+		temp &= ~SDVO_ENABLE;
+		intel_sdvo_write_sdvox(intel_sdvo, temp);
+	}
+}
+
+static void pch_disable_sdvo(struct intel_encoder *encoder)
+{
+}
+
+static void pch_post_disable_sdvo(struct intel_encoder *encoder)
+{
+	intel_disable_sdvo(encoder);
+}
+
+static void intel_enable_sdvo(struct intel_encoder *encoder)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
+	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+	u32 temp;
+	bool input1, input2;
+	int i;
+	bool success;
+
+	temp = I915_READ(intel_sdvo->sdvo_reg);
+	temp |= SDVO_ENABLE;
+	intel_sdvo_write_sdvox(intel_sdvo, temp);
+
+	for (i = 0; i < 2; i++)
+		intel_wait_for_vblank(dev, intel_crtc->pipe);
+
+	success = intel_sdvo_get_trained_inputs(intel_sdvo, &input1, &input2);
+	/* Warn if the device reported failure to sync.
+	 * A lot of SDVO devices fail to notify of sync, but it's
+	 * a given it the status is a success, we succeeded.
+	 */
+	if (success && !input1) {
+		DRM_DEBUG_KMS("First %s output reported failure to "
+				"sync\n", SDVO_NAME(intel_sdvo));
+	}
+
+	if (0)
+		intel_sdvo_set_encoder_power_state(intel_sdvo,
+						   DRM_MODE_DPMS_ON);
+	intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo->attached_output);
+}
+
+static enum drm_mode_status
+intel_sdvo_mode_valid(struct drm_connector *connector,
+		      struct drm_display_mode *mode)
+{
+	struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
+
+	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+		return MODE_NO_DBLESCAN;
+
+	if (intel_sdvo->pixel_clock_min > mode->clock)
+		return MODE_CLOCK_LOW;
+
+	if (intel_sdvo->pixel_clock_max < mode->clock)
+		return MODE_CLOCK_HIGH;
+
+	if (intel_sdvo->is_lvds) {
+		if (mode->hdisplay > intel_sdvo->sdvo_lvds_fixed_mode->hdisplay)
+			return MODE_PANEL;
+
+		if (mode->vdisplay > intel_sdvo->sdvo_lvds_fixed_mode->vdisplay)
+			return MODE_PANEL;
+	}
+
+	return MODE_OK;
+}
+
+static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct intel_sdvo_caps *caps)
+{
+	BUILD_BUG_ON(sizeof(*caps) != 8);
+	if (!intel_sdvo_get_value(intel_sdvo,
+				  SDVO_CMD_GET_DEVICE_CAPS,
+				  caps, sizeof(*caps)))
+		return false;
+
+	DRM_DEBUG_KMS("SDVO capabilities:\n"
+		      "  vendor_id: %d\n"
+		      "  device_id: %d\n"
+		      "  device_rev_id: %d\n"
+		      "  sdvo_version_major: %d\n"
+		      "  sdvo_version_minor: %d\n"
+		      "  sdvo_inputs_mask: %d\n"
+		      "  smooth_scaling: %d\n"
+		      "  sharp_scaling: %d\n"
+		      "  up_scaling: %d\n"
+		      "  down_scaling: %d\n"
+		      "  stall_support: %d\n"
+		      "  output_flags: %d\n",
+		      caps->vendor_id,
+		      caps->device_id,
+		      caps->device_rev_id,
+		      caps->sdvo_version_major,
+		      caps->sdvo_version_minor,
+		      caps->sdvo_inputs_mask,
+		      caps->smooth_scaling,
+		      caps->sharp_scaling,
+		      caps->up_scaling,
+		      caps->down_scaling,
+		      caps->stall_support,
+		      caps->output_flags);
+
+	return true;
+}
+
+static uint16_t intel_sdvo_get_hotplug_support(struct intel_sdvo *intel_sdvo)
+{
+	struct drm_device *dev = intel_sdvo->base.base.dev;
+	uint16_t hotplug;
+
+	if (!I915_HAS_HOTPLUG(dev))
+		return 0;
+
+	/* HW Erratum: SDVO Hotplug is broken on all i945G chips, there's noise
+	 * on the line. */
+	if (IS_I945G(dev) || IS_I945GM(dev))
+		return 0;
+
+	if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT,
+					&hotplug, sizeof(hotplug)))
+		return 0;
+
+	return hotplug;
+}
+
+static void intel_sdvo_enable_hotplug(struct intel_encoder *encoder)
+{
+	struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
+
+	intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG,
+			&intel_sdvo->hotplug_active, 2);
+}
+
+static bool
+intel_sdvo_multifunc_encoder(struct intel_sdvo *intel_sdvo)
+{
+	/* Is there more than one type of output? */
+	return hweight16(intel_sdvo->caps.output_flags) > 1;
+}
+
+static struct edid *
+intel_sdvo_get_edid(struct drm_connector *connector)
+{
+	struct intel_sdvo *sdvo = intel_attached_sdvo(connector);
+	return drm_get_edid(connector, &sdvo->ddc);
+}
+
+/* Mac mini hack -- use the same DDC as the analog connector */
+static struct edid *
+intel_sdvo_get_analog_edid(struct drm_connector *connector)
+{
+	struct drm_i915_private *dev_priv = connector->dev->dev_private;
+
+	return drm_get_edid(connector,
+			    intel_gmbus_get_adapter(dev_priv,
+						    dev_priv->vbt.crt_ddc_pin));
+}
+
+static enum drm_connector_status
+intel_sdvo_tmds_sink_detect(struct drm_connector *connector)
+{
+	struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
+	enum drm_connector_status status;
+	struct edid *edid;
+
+	edid = intel_sdvo_get_edid(connector);
+
+	if (edid == NULL && intel_sdvo_multifunc_encoder(intel_sdvo)) {
+		u8 ddc, saved_ddc = intel_sdvo->ddc_bus;
+
+		/*
+		 * Don't use the 1 as the argument of DDC bus switch to get
+		 * the EDID. It is used for SDVO SPD ROM.
+		 */
+		for (ddc = intel_sdvo->ddc_bus >> 1; ddc > 1; ddc >>= 1) {
+			intel_sdvo->ddc_bus = ddc;
+			edid = intel_sdvo_get_edid(connector);
+			if (edid)
+				break;
+		}
+		/*
+		 * If we found the EDID on the other bus,
+		 * assume that is the correct DDC bus.
+		 */
+		if (edid == NULL)
+			intel_sdvo->ddc_bus = saved_ddc;
+	}
+
+	/*
+	 * When there is no edid and no monitor is connected with VGA
+	 * port, try to use the CRT ddc to read the EDID for DVI-connector.
+	 */
+	if (edid == NULL)
+		edid = intel_sdvo_get_analog_edid(connector);
+
+	status = connector_status_unknown;
+	if (edid != NULL) {
+		/* DDC bus is shared, match EDID to connector type */
+		if (edid->input & DRM_EDID_INPUT_DIGITAL) {
+			status = connector_status_connected;
+			if (intel_sdvo->is_hdmi) {
+				intel_sdvo->has_hdmi_monitor = drm_detect_hdmi_monitor(edid);
+				intel_sdvo->has_hdmi_audio = drm_detect_monitor_audio(edid);
+				intel_sdvo->rgb_quant_range_selectable =
+					drm_rgb_quant_range_selectable(edid);
+			}
+		} else
+			status = connector_status_disconnected;
+		kfree(edid);
+	}
+
+	if (status == connector_status_connected) {
+		struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
+		if (intel_sdvo_connector->force_audio != HDMI_AUDIO_AUTO)
+			intel_sdvo->has_hdmi_audio = (intel_sdvo_connector->force_audio == HDMI_AUDIO_ON);
+	}
+
+	return status;
+}
+
+static bool
+intel_sdvo_connector_matches_edid(struct intel_sdvo_connector *sdvo,
+				  struct edid *edid)
+{
+	bool monitor_is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL);
+	bool connector_is_digital = !!IS_DIGITAL(sdvo);
+
+	DRM_DEBUG_KMS("connector_is_digital? %d, monitor_is_digital? %d\n",
+		      connector_is_digital, monitor_is_digital);
+	return connector_is_digital == monitor_is_digital;
+}
+
+static enum drm_connector_status
+intel_sdvo_detect(struct drm_connector *connector, bool force)
+{
+	uint16_t response;
+	struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
+	struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
+	enum drm_connector_status ret;
+
+	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+		      connector->base.id, connector->name);
+
+	if (!intel_sdvo_get_value(intel_sdvo,
+				  SDVO_CMD_GET_ATTACHED_DISPLAYS,
+				  &response, 2))
+		return connector_status_unknown;
+
+	DRM_DEBUG_KMS("SDVO response %d %d [%x]\n",
+		      response & 0xff, response >> 8,
+		      intel_sdvo_connector->output_flag);
+
+	if (response == 0)
+		return connector_status_disconnected;
+
+	intel_sdvo->attached_output = response;
+
+	intel_sdvo->has_hdmi_monitor = false;
+	intel_sdvo->has_hdmi_audio = false;
+	intel_sdvo->rgb_quant_range_selectable = false;
+
+	if ((intel_sdvo_connector->output_flag & response) == 0)
+		ret = connector_status_disconnected;
+	else if (IS_TMDS(intel_sdvo_connector))
+		ret = intel_sdvo_tmds_sink_detect(connector);
+	else {
+		struct edid *edid;
+
+		/* if we have an edid check it matches the connection */
+		edid = intel_sdvo_get_edid(connector);
+		if (edid == NULL)
+			edid = intel_sdvo_get_analog_edid(connector);
+		if (edid != NULL) {
+			if (intel_sdvo_connector_matches_edid(intel_sdvo_connector,
+							      edid))
+				ret = connector_status_connected;
+			else
+				ret = connector_status_disconnected;
+
+			kfree(edid);
+		} else
+			ret = connector_status_connected;
+	}
+
+	/* May update encoder flag for like clock for SDVO TV, etc.*/
+	if (ret == connector_status_connected) {
+		intel_sdvo->is_tv = false;
+		intel_sdvo->is_lvds = false;
+
+		if (response & SDVO_TV_MASK)
+			intel_sdvo->is_tv = true;
+		if (response & SDVO_LVDS_MASK)
+			intel_sdvo->is_lvds = intel_sdvo->sdvo_lvds_fixed_mode != NULL;
+	}
+
+	return ret;
+}
+
+static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
+{
+	struct edid *edid;
+
+	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+		      connector->base.id, connector->name);
+
+	/* set the bus switch and get the modes */
+	edid = intel_sdvo_get_edid(connector);
+
+	/*
+	 * Mac mini hack.  On this device, the DVI-I connector shares one DDC
+	 * link between analog and digital outputs. So, if the regular SDVO
+	 * DDC fails, check to see if the analog output is disconnected, in
+	 * which case we'll look there for the digital DDC data.
+	 */
+	if (edid == NULL)
+		edid = intel_sdvo_get_analog_edid(connector);
+
+	if (edid != NULL) {
+		if (intel_sdvo_connector_matches_edid(to_intel_sdvo_connector(connector),
+						      edid)) {
+			drm_mode_connector_update_edid_property(connector, edid);
+			drm_add_edid_modes(connector, edid);
+		}
+
+		kfree(edid);
+	}
+}
+
+/*
+ * Set of SDVO TV modes.
+ * Note!  This is in reply order (see loop in get_tv_modes).
+ * XXX: all 60Hz refresh?
+ */
+static const struct drm_display_mode sdvo_tv_modes[] = {
+	{ DRM_MODE("320x200", DRM_MODE_TYPE_DRIVER, 5815, 320, 321, 384,
+		   416, 0, 200, 201, 232, 233, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	{ DRM_MODE("320x240", DRM_MODE_TYPE_DRIVER, 6814, 320, 321, 384,
+		   416, 0, 240, 241, 272, 273, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	{ DRM_MODE("400x300", DRM_MODE_TYPE_DRIVER, 9910, 400, 401, 464,
+		   496, 0, 300, 301, 332, 333, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	{ DRM_MODE("640x350", DRM_MODE_TYPE_DRIVER, 16913, 640, 641, 704,
+		   736, 0, 350, 351, 382, 383, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	{ DRM_MODE("640x400", DRM_MODE_TYPE_DRIVER, 19121, 640, 641, 704,
+		   736, 0, 400, 401, 432, 433, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 22654, 640, 641, 704,
+		   736, 0, 480, 481, 512, 513, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	{ DRM_MODE("704x480", DRM_MODE_TYPE_DRIVER, 24624, 704, 705, 768,
+		   800, 0, 480, 481, 512, 513, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	{ DRM_MODE("704x576", DRM_MODE_TYPE_DRIVER, 29232, 704, 705, 768,
+		   800, 0, 576, 577, 608, 609, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	{ DRM_MODE("720x350", DRM_MODE_TYPE_DRIVER, 18751, 720, 721, 784,
+		   816, 0, 350, 351, 382, 383, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	{ DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 21199, 720, 721, 784,
+		   816, 0, 400, 401, 432, 433, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 25116, 720, 721, 784,
+		   816, 0, 480, 481, 512, 513, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	{ DRM_MODE("720x540", DRM_MODE_TYPE_DRIVER, 28054, 720, 721, 784,
+		   816, 0, 540, 541, 572, 573, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 29816, 720, 721, 784,
+		   816, 0, 576, 577, 608, 609, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	{ DRM_MODE("768x576", DRM_MODE_TYPE_DRIVER, 31570, 768, 769, 832,
+		   864, 0, 576, 577, 608, 609, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 34030, 800, 801, 864,
+		   896, 0, 600, 601, 632, 633, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	{ DRM_MODE("832x624", DRM_MODE_TYPE_DRIVER, 36581, 832, 833, 896,
+		   928, 0, 624, 625, 656, 657, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	{ DRM_MODE("920x766", DRM_MODE_TYPE_DRIVER, 48707, 920, 921, 984,
+		   1016, 0, 766, 767, 798, 799, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 53827, 1024, 1025, 1088,
+		   1120, 0, 768, 769, 800, 801, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 87265, 1280, 1281, 1344,
+		   1376, 0, 1024, 1025, 1056, 1057, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+};
+
+static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
+{
+	struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
+	struct intel_sdvo_sdtv_resolution_request tv_res;
+	uint32_t reply = 0, format_map = 0;
+	int i;
+
+	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+		      connector->base.id, connector->name);
+
+	/* Read the list of supported input resolutions for the selected TV
+	 * format.
+	 */
+	format_map = 1 << intel_sdvo->tv_format_index;
+	memcpy(&tv_res, &format_map,
+	       min(sizeof(format_map), sizeof(struct intel_sdvo_sdtv_resolution_request)));
+
+	if (!intel_sdvo_set_target_output(intel_sdvo, intel_sdvo->attached_output))
+		return;
+
+	BUILD_BUG_ON(sizeof(tv_res) != 3);
+	if (!intel_sdvo_write_cmd(intel_sdvo,
+				  SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT,
+				  &tv_res, sizeof(tv_res)))
+		return;
+	if (!intel_sdvo_read_response(intel_sdvo, &reply, 3))
+		return;
+
+	for (i = 0; i < ARRAY_SIZE(sdvo_tv_modes); i++)
+		if (reply & (1 << i)) {
+			struct drm_display_mode *nmode;
+			nmode = drm_mode_duplicate(connector->dev,
+						   &sdvo_tv_modes[i]);
+			if (nmode)
+				drm_mode_probed_add(connector, nmode);
+		}
+}
+
+static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
+{
+	struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
+	struct drm_i915_private *dev_priv = connector->dev->dev_private;
+	struct drm_display_mode *newmode;
+
+	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+		      connector->base.id, connector->name);
+
+	/*
+	 * Fetch modes from VBT. For SDVO prefer the VBT mode since some
+	 * SDVO->LVDS transcoders can't cope with the EDID mode.
+	 */
+	if (dev_priv->vbt.sdvo_lvds_vbt_mode != NULL) {
+		newmode = drm_mode_duplicate(connector->dev,
+					     dev_priv->vbt.sdvo_lvds_vbt_mode);
+		if (newmode != NULL) {
+			/* Guarantee the mode is preferred */
+			newmode->type = (DRM_MODE_TYPE_PREFERRED |
+					 DRM_MODE_TYPE_DRIVER);
+			drm_mode_probed_add(connector, newmode);
+		}
+	}
+
+	/*
+	 * Attempt to get the mode list from DDC.
+	 * Assume that the preferred modes are
+	 * arranged in priority order.
+	 */
+	intel_ddc_get_modes(connector, &intel_sdvo->ddc);
+
+	list_for_each_entry(newmode, &connector->probed_modes, head) {
+		if (newmode->type & DRM_MODE_TYPE_PREFERRED) {
+			intel_sdvo->sdvo_lvds_fixed_mode =
+				drm_mode_duplicate(connector->dev, newmode);
+
+			intel_sdvo->is_lvds = true;
+			break;
+		}
+	}
+}
+
+static int intel_sdvo_get_modes(struct drm_connector *connector)
+{
+	struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
+
+	if (IS_TV(intel_sdvo_connector))
+		intel_sdvo_get_tv_modes(connector);
+	else if (IS_LVDS(intel_sdvo_connector))
+		intel_sdvo_get_lvds_modes(connector);
+	else
+		intel_sdvo_get_ddc_modes(connector);
+
+	return !list_empty(&connector->probed_modes);
+}
+
+static void intel_sdvo_destroy(struct drm_connector *connector)
+{
+	struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
+
+	drm_connector_cleanup(connector);
+	kfree(intel_sdvo_connector);
+}
+
+static bool intel_sdvo_detect_hdmi_audio(struct drm_connector *connector)
+{
+	struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
+	struct edid *edid;
+	bool has_audio = false;
+
+	if (!intel_sdvo->is_hdmi)
+		return false;
+
+	edid = intel_sdvo_get_edid(connector);
+	if (edid != NULL && edid->input & DRM_EDID_INPUT_DIGITAL)
+		has_audio = drm_detect_monitor_audio(edid);
+	kfree(edid);
+
+	return has_audio;
+}
+
+static int
+intel_sdvo_set_property(struct drm_connector *connector,
+			struct drm_property *property,
+			uint64_t val)
+{
+	struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
+	struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
+	struct drm_i915_private *dev_priv = connector->dev->dev_private;
+	uint16_t temp_value;
+	uint8_t cmd;
+	int ret;
+
+	ret = drm_object_property_set_value(&connector->base, property, val);
+	if (ret)
+		return ret;
+
+	if (property == dev_priv->force_audio_property) {
+		int i = val;
+		bool has_audio;
+
+		if (i == intel_sdvo_connector->force_audio)
+			return 0;
+
+		intel_sdvo_connector->force_audio = i;
+
+		if (i == HDMI_AUDIO_AUTO)
+			has_audio = intel_sdvo_detect_hdmi_audio(connector);
+		else
+			has_audio = (i == HDMI_AUDIO_ON);
+
+		if (has_audio == intel_sdvo->has_hdmi_audio)
+			return 0;
+
+		intel_sdvo->has_hdmi_audio = has_audio;
+		goto done;
+	}
+
+	if (property == dev_priv->broadcast_rgb_property) {
+		bool old_auto = intel_sdvo->color_range_auto;
+		uint32_t old_range = intel_sdvo->color_range;
+
+		switch (val) {
+		case INTEL_BROADCAST_RGB_AUTO:
+			intel_sdvo->color_range_auto = true;
+			break;
+		case INTEL_BROADCAST_RGB_FULL:
+			intel_sdvo->color_range_auto = false;
+			intel_sdvo->color_range = 0;
+			break;
+		case INTEL_BROADCAST_RGB_LIMITED:
+			intel_sdvo->color_range_auto = false;
+			/* FIXME: this bit is only valid when using TMDS
+			 * encoding and 8 bit per color mode. */
+			intel_sdvo->color_range = HDMI_COLOR_RANGE_16_235;
+			break;
+		default:
+			return -EINVAL;
+		}
+
+		if (old_auto == intel_sdvo->color_range_auto &&
+		    old_range == intel_sdvo->color_range)
+			return 0;
+
+		goto done;
+	}
+
+	if (property == connector->dev->mode_config.aspect_ratio_property) {
+		switch (val) {
+		case DRM_MODE_PICTURE_ASPECT_NONE:
+			intel_sdvo->aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
+			break;
+		case DRM_MODE_PICTURE_ASPECT_4_3:
+			intel_sdvo->aspect_ratio = HDMI_PICTURE_ASPECT_4_3;
+			break;
+		case DRM_MODE_PICTURE_ASPECT_16_9:
+			intel_sdvo->aspect_ratio = HDMI_PICTURE_ASPECT_16_9;
+			break;
+		default:
+			return -EINVAL;
+		}
+		goto done;
+	}
+
+#define CHECK_PROPERTY(name, NAME) \
+	if (intel_sdvo_connector->name == property) { \
+		if (intel_sdvo_connector->cur_##name == temp_value) return 0; \
+		if (intel_sdvo_connector->max_##name < temp_value) return -EINVAL; \
+		cmd = SDVO_CMD_SET_##NAME; \
+		intel_sdvo_connector->cur_##name = temp_value; \
+		goto set_value; \
+	}
+
+	if (property == intel_sdvo_connector->tv_format) {
+		if (val >= TV_FORMAT_NUM)
+			return -EINVAL;
+
+		if (intel_sdvo->tv_format_index ==
+		    intel_sdvo_connector->tv_format_supported[val])
+			return 0;
+
+		intel_sdvo->tv_format_index = intel_sdvo_connector->tv_format_supported[val];
+		goto done;
+	} else if (IS_TV_OR_LVDS(intel_sdvo_connector)) {
+		temp_value = val;
+		if (intel_sdvo_connector->left == property) {
+			drm_object_property_set_value(&connector->base,
+							 intel_sdvo_connector->right, val);
+			if (intel_sdvo_connector->left_margin == temp_value)
+				return 0;
+
+			intel_sdvo_connector->left_margin = temp_value;
+			intel_sdvo_connector->right_margin = temp_value;
+			temp_value = intel_sdvo_connector->max_hscan -
+				intel_sdvo_connector->left_margin;
+			cmd = SDVO_CMD_SET_OVERSCAN_H;
+			goto set_value;
+		} else if (intel_sdvo_connector->right == property) {
+			drm_object_property_set_value(&connector->base,
+							 intel_sdvo_connector->left, val);
+			if (intel_sdvo_connector->right_margin == temp_value)
+				return 0;
+
+			intel_sdvo_connector->left_margin = temp_value;
+			intel_sdvo_connector->right_margin = temp_value;
+			temp_value = intel_sdvo_connector->max_hscan -
+				intel_sdvo_connector->left_margin;
+			cmd = SDVO_CMD_SET_OVERSCAN_H;
+			goto set_value;
+		} else if (intel_sdvo_connector->top == property) {
+			drm_object_property_set_value(&connector->base,
+							 intel_sdvo_connector->bottom, val);
+			if (intel_sdvo_connector->top_margin == temp_value)
+				return 0;
+
+			intel_sdvo_connector->top_margin = temp_value;
+			intel_sdvo_connector->bottom_margin = temp_value;
+			temp_value = intel_sdvo_connector->max_vscan -
+				intel_sdvo_connector->top_margin;
+			cmd = SDVO_CMD_SET_OVERSCAN_V;
+			goto set_value;
+		} else if (intel_sdvo_connector->bottom == property) {
+			drm_object_property_set_value(&connector->base,
+							 intel_sdvo_connector->top, val);
+			if (intel_sdvo_connector->bottom_margin == temp_value)
+				return 0;
+
+			intel_sdvo_connector->top_margin = temp_value;
+			intel_sdvo_connector->bottom_margin = temp_value;
+			temp_value = intel_sdvo_connector->max_vscan -
+				intel_sdvo_connector->top_margin;
+			cmd = SDVO_CMD_SET_OVERSCAN_V;
+			goto set_value;
+		}
+		CHECK_PROPERTY(hpos, HPOS)
+		CHECK_PROPERTY(vpos, VPOS)
+		CHECK_PROPERTY(saturation, SATURATION)
+		CHECK_PROPERTY(contrast, CONTRAST)
+		CHECK_PROPERTY(hue, HUE)
+		CHECK_PROPERTY(brightness, BRIGHTNESS)
+		CHECK_PROPERTY(sharpness, SHARPNESS)
+		CHECK_PROPERTY(flicker_filter, FLICKER_FILTER)
+		CHECK_PROPERTY(flicker_filter_2d, FLICKER_FILTER_2D)
+		CHECK_PROPERTY(flicker_filter_adaptive, FLICKER_FILTER_ADAPTIVE)
+		CHECK_PROPERTY(tv_chroma_filter, TV_CHROMA_FILTER)
+		CHECK_PROPERTY(tv_luma_filter, TV_LUMA_FILTER)
+		CHECK_PROPERTY(dot_crawl, DOT_CRAWL)
+	}
+
+	return -EINVAL; /* unknown property */
+
+set_value:
+	if (!intel_sdvo_set_value(intel_sdvo, cmd, &temp_value, 2))
+		return -EIO;
+
+
+done:
+	if (intel_sdvo->base.base.crtc)
+		intel_crtc_restore_mode(intel_sdvo->base.base.crtc);
+
+	return 0;
+#undef CHECK_PROPERTY
+}
+
+static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
+	.dpms = drm_atomic_helper_connector_dpms,
+	.detect = intel_sdvo_detect,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.set_property = intel_sdvo_set_property,
+	.atomic_get_property = intel_connector_atomic_get_property,
+	.destroy = intel_sdvo_destroy,
+	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+	.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+};
+
+static const struct drm_connector_helper_funcs intel_sdvo_connector_helper_funcs = {
+	.get_modes = intel_sdvo_get_modes,
+	.mode_valid = intel_sdvo_mode_valid,
+	.best_encoder = intel_best_encoder,
+};
+
+static void intel_sdvo_enc_destroy(struct drm_encoder *encoder)
+{
+	struct intel_sdvo *intel_sdvo = to_sdvo(to_intel_encoder(encoder));
+
+	if (intel_sdvo->sdvo_lvds_fixed_mode != NULL)
+		drm_mode_destroy(encoder->dev,
+				 intel_sdvo->sdvo_lvds_fixed_mode);
+
+	i2c_del_adapter(&intel_sdvo->ddc);
+	intel_encoder_destroy(encoder);
+}
+
+static const struct drm_encoder_funcs intel_sdvo_enc_funcs = {
+	.destroy = intel_sdvo_enc_destroy,
+};
+
+static void
+intel_sdvo_guess_ddc_bus(struct intel_sdvo *sdvo)
+{
+	uint16_t mask = 0;
+	unsigned int num_bits;
+
+	/* Make a mask of outputs less than or equal to our own priority in the
+	 * list.
+	 */
+	switch (sdvo->controlled_output) {
+	case SDVO_OUTPUT_LVDS1:
+		mask |= SDVO_OUTPUT_LVDS1;
+	case SDVO_OUTPUT_LVDS0:
+		mask |= SDVO_OUTPUT_LVDS0;
+	case SDVO_OUTPUT_TMDS1:
+		mask |= SDVO_OUTPUT_TMDS1;
+	case SDVO_OUTPUT_TMDS0:
+		mask |= SDVO_OUTPUT_TMDS0;
+	case SDVO_OUTPUT_RGB1:
+		mask |= SDVO_OUTPUT_RGB1;
+	case SDVO_OUTPUT_RGB0:
+		mask |= SDVO_OUTPUT_RGB0;
+		break;
+	}
+
+	/* Count bits to find what number we are in the priority list. */
+	mask &= sdvo->caps.output_flags;
+	num_bits = hweight16(mask);
+	/* If more than 3 outputs, default to DDC bus 3 for now. */
+	if (num_bits > 3)
+		num_bits = 3;
+
+	/* Corresponds to SDVO_CONTROL_BUS_DDCx */
+	sdvo->ddc_bus = 1 << num_bits;
+}
+
+/**
+ * Choose the appropriate DDC bus for control bus switch command for this
+ * SDVO output based on the controlled output.
+ *
+ * DDC bus number assignment is in a priority order of RGB outputs, then TMDS
+ * outputs, then LVDS outputs.
+ */
+static void
+intel_sdvo_select_ddc_bus(struct drm_i915_private *dev_priv,
+			  struct intel_sdvo *sdvo)
+{
+	struct sdvo_device_mapping *mapping;
+
+	if (sdvo->is_sdvob)
+		mapping = &(dev_priv->sdvo_mappings[0]);
+	else
+		mapping = &(dev_priv->sdvo_mappings[1]);
+
+	if (mapping->initialized)
+		sdvo->ddc_bus = 1 << ((mapping->ddc_pin & 0xf0) >> 4);
+	else
+		intel_sdvo_guess_ddc_bus(sdvo);
+}
+
+static void
+intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv,
+			  struct intel_sdvo *sdvo)
+{
+	struct sdvo_device_mapping *mapping;
+	u8 pin;
+
+	if (sdvo->is_sdvob)
+		mapping = &dev_priv->sdvo_mappings[0];
+	else
+		mapping = &dev_priv->sdvo_mappings[1];
+
+	if (mapping->initialized &&
+	    intel_gmbus_is_valid_pin(dev_priv, mapping->i2c_pin))
+		pin = mapping->i2c_pin;
+	else
+		pin = GMBUS_PIN_DPB;
+
+	sdvo->i2c = intel_gmbus_get_adapter(dev_priv, pin);
+
+	/* With gmbus we should be able to drive sdvo i2c at 2MHz, but somehow
+	 * our code totally fails once we start using gmbus. Hence fall back to
+	 * bit banging for now. */
+	intel_gmbus_force_bit(sdvo->i2c, true);
+}
+
+/* undo any changes intel_sdvo_select_i2c_bus() did to sdvo->i2c */
+static void
+intel_sdvo_unselect_i2c_bus(struct intel_sdvo *sdvo)
+{
+	intel_gmbus_force_bit(sdvo->i2c, false);
+}
+
+static bool
+intel_sdvo_is_hdmi_connector(struct intel_sdvo *intel_sdvo, int device)
+{
+	return intel_sdvo_check_supp_encode(intel_sdvo);
+}
+
+static u8
+intel_sdvo_get_slave_addr(struct drm_device *dev, struct intel_sdvo *sdvo)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct sdvo_device_mapping *my_mapping, *other_mapping;
+
+	if (sdvo->is_sdvob) {
+		my_mapping = &dev_priv->sdvo_mappings[0];
+		other_mapping = &dev_priv->sdvo_mappings[1];
+	} else {
+		my_mapping = &dev_priv->sdvo_mappings[1];
+		other_mapping = &dev_priv->sdvo_mappings[0];
+	}
+
+	/* If the BIOS described our SDVO device, take advantage of it. */
+	if (my_mapping->slave_addr)
+		return my_mapping->slave_addr;
+
+	/* If the BIOS only described a different SDVO device, use the
+	 * address that it isn't using.
+	 */
+	if (other_mapping->slave_addr) {
+		if (other_mapping->slave_addr == 0x70)
+			return 0x72;
+		else
+			return 0x70;
+	}
+
+	/* No SDVO device info is found for another DVO port,
+	 * so use mapping assumption we had before BIOS parsing.
+	 */
+	if (sdvo->is_sdvob)
+		return 0x70;
+	else
+		return 0x72;
+}
+
+static void
+intel_sdvo_connector_unregister(struct intel_connector *intel_connector)
+{
+	struct drm_connector *drm_connector;
+	struct intel_sdvo *sdvo_encoder;
+
+	drm_connector = &intel_connector->base;
+	sdvo_encoder = intel_attached_sdvo(&intel_connector->base);
+
+	sysfs_remove_link(&drm_connector->kdev->kobj,
+			  sdvo_encoder->ddc.dev.kobj.name);
+	intel_connector_unregister(intel_connector);
+}
+
+static int
+intel_sdvo_connector_init(struct intel_sdvo_connector *connector,
+			  struct intel_sdvo *encoder)
+{
+	struct drm_connector *drm_connector;
+	int ret;
+
+	drm_connector = &connector->base.base;
+	ret = drm_connector_init(encoder->base.base.dev,
+			   drm_connector,
+			   &intel_sdvo_connector_funcs,
+			   connector->base.base.connector_type);
+	if (ret < 0)
+		return ret;
+
+	drm_connector_helper_add(drm_connector,
+				 &intel_sdvo_connector_helper_funcs);
+
+	connector->base.base.interlace_allowed = 1;
+	connector->base.base.doublescan_allowed = 0;
+	connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB;
+	connector->base.get_hw_state = intel_sdvo_connector_get_hw_state;
+	connector->base.unregister = intel_sdvo_connector_unregister;
+
+	intel_connector_attach_encoder(&connector->base, &encoder->base);
+	ret = drm_connector_register(drm_connector);
+	if (ret < 0)
+		goto err1;
+
+	ret = sysfs_create_link(&drm_connector->kdev->kobj,
+				&encoder->ddc.dev.kobj,
+				encoder->ddc.dev.kobj.name);
+	if (ret < 0)
+		goto err2;
+
+	return 0;
+
+err2:
+	drm_connector_unregister(drm_connector);
+err1:
+	drm_connector_cleanup(drm_connector);
+
+	return ret;
+}
+
+static void
+intel_sdvo_add_hdmi_properties(struct intel_sdvo *intel_sdvo,
+			       struct intel_sdvo_connector *connector)
+{
+	struct drm_device *dev = connector->base.base.dev;
+
+	intel_attach_force_audio_property(&connector->base.base);
+	if (INTEL_INFO(dev)->gen >= 4 && IS_MOBILE(dev)) {
+		intel_attach_broadcast_rgb_property(&connector->base.base);
+		intel_sdvo->color_range_auto = true;
+	}
+	intel_attach_aspect_ratio_property(&connector->base.base);
+	intel_sdvo->aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
+}
+
+static struct intel_sdvo_connector *intel_sdvo_connector_alloc(void)
+{
+	struct intel_sdvo_connector *sdvo_connector;
+
+	sdvo_connector = kzalloc(sizeof(*sdvo_connector), GFP_KERNEL);
+	if (!sdvo_connector)
+		return NULL;
+
+	if (intel_connector_init(&sdvo_connector->base) < 0) {
+		kfree(sdvo_connector);
+		return NULL;
+	}
+
+	return sdvo_connector;
+}
+
+static bool
+intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
+{
+	struct drm_encoder *encoder = &intel_sdvo->base.base;
+	struct drm_connector *connector;
+	struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
+	struct intel_connector *intel_connector;
+	struct intel_sdvo_connector *intel_sdvo_connector;
+
+	DRM_DEBUG_KMS("initialising DVI device %d\n", device);
+
+	intel_sdvo_connector = intel_sdvo_connector_alloc();
+	if (!intel_sdvo_connector)
+		return false;
+
+	if (device == 0) {
+		intel_sdvo->controlled_output |= SDVO_OUTPUT_TMDS0;
+		intel_sdvo_connector->output_flag = SDVO_OUTPUT_TMDS0;
+	} else if (device == 1) {
+		intel_sdvo->controlled_output |= SDVO_OUTPUT_TMDS1;
+		intel_sdvo_connector->output_flag = SDVO_OUTPUT_TMDS1;
+	}
+
+	intel_connector = &intel_sdvo_connector->base;
+	connector = &intel_connector->base;
+	if (intel_sdvo_get_hotplug_support(intel_sdvo) &
+		intel_sdvo_connector->output_flag) {
+		intel_sdvo->hotplug_active |= intel_sdvo_connector->output_flag;
+		/* Some SDVO devices have one-shot hotplug interrupts.
+		 * Ensure that they get re-enabled when an interrupt happens.
+		 */
+		intel_encoder->hot_plug = intel_sdvo_enable_hotplug;
+		intel_sdvo_enable_hotplug(intel_encoder);
+	} else {
+		intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
+	}
+	encoder->encoder_type = DRM_MODE_ENCODER_TMDS;
+	connector->connector_type = DRM_MODE_CONNECTOR_DVID;
+
+	if (intel_sdvo_is_hdmi_connector(intel_sdvo, device)) {
+		connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
+		intel_sdvo->is_hdmi = true;
+	}
+
+	if (intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo) < 0) {
+		kfree(intel_sdvo_connector);
+		return false;
+	}
+
+	if (intel_sdvo->is_hdmi)
+		intel_sdvo_add_hdmi_properties(intel_sdvo, intel_sdvo_connector);
+
+	return true;
+}
+
+static bool
+intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
+{
+	struct drm_encoder *encoder = &intel_sdvo->base.base;
+	struct drm_connector *connector;
+	struct intel_connector *intel_connector;
+	struct intel_sdvo_connector *intel_sdvo_connector;
+
+	DRM_DEBUG_KMS("initialising TV type %d\n", type);
+
+	intel_sdvo_connector = intel_sdvo_connector_alloc();
+	if (!intel_sdvo_connector)
+		return false;
+
+	intel_connector = &intel_sdvo_connector->base;
+	connector = &intel_connector->base;
+	encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
+	connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
+
+	intel_sdvo->controlled_output |= type;
+	intel_sdvo_connector->output_flag = type;
+
+	intel_sdvo->is_tv = true;
+
+	if (intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo) < 0) {
+		kfree(intel_sdvo_connector);
+		return false;
+	}
+
+	if (!intel_sdvo_tv_create_property(intel_sdvo, intel_sdvo_connector, type))
+		goto err;
+
+	if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
+		goto err;
+
+	return true;
+
+err:
+	drm_connector_unregister(connector);
+	intel_sdvo_destroy(connector);
+	return false;
+}
+
+static bool
+intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device)
+{
+	struct drm_encoder *encoder = &intel_sdvo->base.base;
+	struct drm_connector *connector;
+	struct intel_connector *intel_connector;
+	struct intel_sdvo_connector *intel_sdvo_connector;
+
+	DRM_DEBUG_KMS("initialising analog device %d\n", device);
+
+	intel_sdvo_connector = intel_sdvo_connector_alloc();
+	if (!intel_sdvo_connector)
+		return false;
+
+	intel_connector = &intel_sdvo_connector->base;
+	connector = &intel_connector->base;
+	intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+	encoder->encoder_type = DRM_MODE_ENCODER_DAC;
+	connector->connector_type = DRM_MODE_CONNECTOR_VGA;
+
+	if (device == 0) {
+		intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB0;
+		intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB0;
+	} else if (device == 1) {
+		intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB1;
+		intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1;
+	}
+
+	if (intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo) < 0) {
+		kfree(intel_sdvo_connector);
+		return false;
+	}
+
+	return true;
+}
+
+static bool
+intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
+{
+	struct drm_encoder *encoder = &intel_sdvo->base.base;
+	struct drm_connector *connector;
+	struct intel_connector *intel_connector;
+	struct intel_sdvo_connector *intel_sdvo_connector;
+
+	DRM_DEBUG_KMS("initialising LVDS device %d\n", device);
+
+	intel_sdvo_connector = intel_sdvo_connector_alloc();
+	if (!intel_sdvo_connector)
+		return false;
+
+	intel_connector = &intel_sdvo_connector->base;
+	connector = &intel_connector->base;
+	encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
+	connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
+
+	if (device == 0) {
+		intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS0;
+		intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS0;
+	} else if (device == 1) {
+		intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS1;
+		intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1;
+	}
+
+	if (intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo) < 0) {
+		kfree(intel_sdvo_connector);
+		return false;
+	}
+
+	if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
+		goto err;
+
+	return true;
+
+err:
+	drm_connector_unregister(connector);
+	intel_sdvo_destroy(connector);
+	return false;
+}
+
+static bool
+intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, uint16_t flags)
+{
+	intel_sdvo->is_tv = false;
+	intel_sdvo->is_lvds = false;
+
+	/* SDVO requires XXX1 function may not exist unless it has XXX0 function.*/
+
+	if (flags & SDVO_OUTPUT_TMDS0)
+		if (!intel_sdvo_dvi_init(intel_sdvo, 0))
+			return false;
+
+	if ((flags & SDVO_TMDS_MASK) == SDVO_TMDS_MASK)
+		if (!intel_sdvo_dvi_init(intel_sdvo, 1))
+			return false;
+
+	/* TV has no XXX1 function block */
+	if (flags & SDVO_OUTPUT_SVID0)
+		if (!intel_sdvo_tv_init(intel_sdvo, SDVO_OUTPUT_SVID0))
+			return false;
+
+	if (flags & SDVO_OUTPUT_CVBS0)
+		if (!intel_sdvo_tv_init(intel_sdvo, SDVO_OUTPUT_CVBS0))
+			return false;
+
+	if (flags & SDVO_OUTPUT_YPRPB0)
+		if (!intel_sdvo_tv_init(intel_sdvo, SDVO_OUTPUT_YPRPB0))
+			return false;
+
+	if (flags & SDVO_OUTPUT_RGB0)
+		if (!intel_sdvo_analog_init(intel_sdvo, 0))
+			return false;
+
+	if ((flags & SDVO_RGB_MASK) == SDVO_RGB_MASK)
+		if (!intel_sdvo_analog_init(intel_sdvo, 1))
+			return false;
+
+	if (flags & SDVO_OUTPUT_LVDS0)
+		if (!intel_sdvo_lvds_init(intel_sdvo, 0))
+			return false;
+
+	if ((flags & SDVO_LVDS_MASK) == SDVO_LVDS_MASK)
+		if (!intel_sdvo_lvds_init(intel_sdvo, 1))
+			return false;
+
+	if ((flags & SDVO_OUTPUT_MASK) == 0) {
+		unsigned char bytes[2];
+
+		intel_sdvo->controlled_output = 0;
+		memcpy(bytes, &intel_sdvo->caps.output_flags, 2);
+		DRM_DEBUG_KMS("%s: Unknown SDVO output type (0x%02x%02x)\n",
+			      SDVO_NAME(intel_sdvo),
+			      bytes[0], bytes[1]);
+		return false;
+	}
+	intel_sdvo->base.crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+
+	return true;
+}
+
+static void intel_sdvo_output_cleanup(struct intel_sdvo *intel_sdvo)
+{
+	struct drm_device *dev = intel_sdvo->base.base.dev;
+	struct drm_connector *connector, *tmp;
+
+	list_for_each_entry_safe(connector, tmp,
+				 &dev->mode_config.connector_list, head) {
+		if (intel_attached_encoder(connector) == &intel_sdvo->base) {
+			drm_connector_unregister(connector);
+			intel_sdvo_destroy(connector);
+		}
+	}
+}
+
+static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
+					  struct intel_sdvo_connector *intel_sdvo_connector,
+					  int type)
+{
+	struct drm_device *dev = intel_sdvo->base.base.dev;
+	struct intel_sdvo_tv_format format;
+	uint32_t format_map, i;
+
+	if (!intel_sdvo_set_target_output(intel_sdvo, type))
+		return false;
+
+	BUILD_BUG_ON(sizeof(format) != 6);
+	if (!intel_sdvo_get_value(intel_sdvo,
+				  SDVO_CMD_GET_SUPPORTED_TV_FORMATS,
+				  &format, sizeof(format)))
+		return false;
+
+	memcpy(&format_map, &format, min(sizeof(format_map), sizeof(format)));
+
+	if (format_map == 0)
+		return false;
+
+	intel_sdvo_connector->format_supported_num = 0;
+	for (i = 0 ; i < TV_FORMAT_NUM; i++)
+		if (format_map & (1 << i))
+			intel_sdvo_connector->tv_format_supported[intel_sdvo_connector->format_supported_num++] = i;
+
+
+	intel_sdvo_connector->tv_format =
+			drm_property_create(dev, DRM_MODE_PROP_ENUM,
+					    "mode", intel_sdvo_connector->format_supported_num);
+	if (!intel_sdvo_connector->tv_format)
+		return false;
+
+	for (i = 0; i < intel_sdvo_connector->format_supported_num; i++)
+		drm_property_add_enum(
+				intel_sdvo_connector->tv_format, i,
+				i, tv_format_names[intel_sdvo_connector->tv_format_supported[i]]);
+
+	intel_sdvo->tv_format_index = intel_sdvo_connector->tv_format_supported[0];
+	drm_object_attach_property(&intel_sdvo_connector->base.base.base,
+				      intel_sdvo_connector->tv_format, 0);
+	return true;
+
+}
+
+#define ENHANCEMENT(name, NAME) do { \
+	if (enhancements.name) { \
+		if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_MAX_##NAME, &data_value, 4) || \
+		    !intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_##NAME, &response, 2)) \
+			return false; \
+		intel_sdvo_connector->max_##name = data_value[0]; \
+		intel_sdvo_connector->cur_##name = response; \
+		intel_sdvo_connector->name = \
+			drm_property_create_range(dev, 0, #name, 0, data_value[0]); \
+		if (!intel_sdvo_connector->name) return false; \
+		drm_object_attach_property(&connector->base, \
+					      intel_sdvo_connector->name, \
+					      intel_sdvo_connector->cur_##name); \
+		DRM_DEBUG_KMS(#name ": max %d, default %d, current %d\n", \
+			      data_value[0], data_value[1], response); \
+	} \
+} while (0)
+
+static bool
+intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
+				      struct intel_sdvo_connector *intel_sdvo_connector,
+				      struct intel_sdvo_enhancements_reply enhancements)
+{
+	struct drm_device *dev = intel_sdvo->base.base.dev;
+	struct drm_connector *connector = &intel_sdvo_connector->base.base;
+	uint16_t response, data_value[2];
+
+	/* when horizontal overscan is supported, Add the left/right  property */
+	if (enhancements.overscan_h) {
+		if (!intel_sdvo_get_value(intel_sdvo,
+					  SDVO_CMD_GET_MAX_OVERSCAN_H,
+					  &data_value, 4))
+			return false;
+
+		if (!intel_sdvo_get_value(intel_sdvo,
+					  SDVO_CMD_GET_OVERSCAN_H,
+					  &response, 2))
+			return false;
+
+		intel_sdvo_connector->max_hscan = data_value[0];
+		intel_sdvo_connector->left_margin = data_value[0] - response;
+		intel_sdvo_connector->right_margin = intel_sdvo_connector->left_margin;
+		intel_sdvo_connector->left =
+			drm_property_create_range(dev, 0, "left_margin", 0, data_value[0]);
+		if (!intel_sdvo_connector->left)
+			return false;
+
+		drm_object_attach_property(&connector->base,
+					      intel_sdvo_connector->left,
+					      intel_sdvo_connector->left_margin);
+
+		intel_sdvo_connector->right =
+			drm_property_create_range(dev, 0, "right_margin", 0, data_value[0]);
+		if (!intel_sdvo_connector->right)
+			return false;
+
+		drm_object_attach_property(&connector->base,
+					      intel_sdvo_connector->right,
+					      intel_sdvo_connector->right_margin);
+		DRM_DEBUG_KMS("h_overscan: max %d, "
+			      "default %d, current %d\n",
+			      data_value[0], data_value[1], response);
+	}
+
+	if (enhancements.overscan_v) {
+		if (!intel_sdvo_get_value(intel_sdvo,
+					  SDVO_CMD_GET_MAX_OVERSCAN_V,
+					  &data_value, 4))
+			return false;
+
+		if (!intel_sdvo_get_value(intel_sdvo,
+					  SDVO_CMD_GET_OVERSCAN_V,
+					  &response, 2))
+			return false;
+
+		intel_sdvo_connector->max_vscan = data_value[0];
+		intel_sdvo_connector->top_margin = data_value[0] - response;
+		intel_sdvo_connector->bottom_margin = intel_sdvo_connector->top_margin;
+		intel_sdvo_connector->top =
+			drm_property_create_range(dev, 0,
+					    "top_margin", 0, data_value[0]);
+		if (!intel_sdvo_connector->top)
+			return false;
+
+		drm_object_attach_property(&connector->base,
+					      intel_sdvo_connector->top,
+					      intel_sdvo_connector->top_margin);
+
+		intel_sdvo_connector->bottom =
+			drm_property_create_range(dev, 0,
+					    "bottom_margin", 0, data_value[0]);
+		if (!intel_sdvo_connector->bottom)
+			return false;
+
+		drm_object_attach_property(&connector->base,
+					      intel_sdvo_connector->bottom,
+					      intel_sdvo_connector->bottom_margin);
+		DRM_DEBUG_KMS("v_overscan: max %d, "
+			      "default %d, current %d\n",
+			      data_value[0], data_value[1], response);
+	}
+
+	ENHANCEMENT(hpos, HPOS);
+	ENHANCEMENT(vpos, VPOS);
+	ENHANCEMENT(saturation, SATURATION);
+	ENHANCEMENT(contrast, CONTRAST);
+	ENHANCEMENT(hue, HUE);
+	ENHANCEMENT(sharpness, SHARPNESS);
+	ENHANCEMENT(brightness, BRIGHTNESS);
+	ENHANCEMENT(flicker_filter, FLICKER_FILTER);
+	ENHANCEMENT(flicker_filter_adaptive, FLICKER_FILTER_ADAPTIVE);
+	ENHANCEMENT(flicker_filter_2d, FLICKER_FILTER_2D);
+	ENHANCEMENT(tv_chroma_filter, TV_CHROMA_FILTER);
+	ENHANCEMENT(tv_luma_filter, TV_LUMA_FILTER);
+
+	if (enhancements.dot_crawl) {
+		if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_DOT_CRAWL, &response, 2))
+			return false;
+
+		intel_sdvo_connector->max_dot_crawl = 1;
+		intel_sdvo_connector->cur_dot_crawl = response & 0x1;
+		intel_sdvo_connector->dot_crawl =
+			drm_property_create_range(dev, 0, "dot_crawl", 0, 1);
+		if (!intel_sdvo_connector->dot_crawl)
+			return false;
+
+		drm_object_attach_property(&connector->base,
+					      intel_sdvo_connector->dot_crawl,
+					      intel_sdvo_connector->cur_dot_crawl);
+		DRM_DEBUG_KMS("dot crawl: current %d\n", response);
+	}
+
+	return true;
+}
+
+static bool
+intel_sdvo_create_enhance_property_lvds(struct intel_sdvo *intel_sdvo,
+					struct intel_sdvo_connector *intel_sdvo_connector,
+					struct intel_sdvo_enhancements_reply enhancements)
+{
+	struct drm_device *dev = intel_sdvo->base.base.dev;
+	struct drm_connector *connector = &intel_sdvo_connector->base.base;
+	uint16_t response, data_value[2];
+
+	ENHANCEMENT(brightness, BRIGHTNESS);
+
+	return true;
+}
+#undef ENHANCEMENT
+
+static bool intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
+					       struct intel_sdvo_connector *intel_sdvo_connector)
+{
+	union {
+		struct intel_sdvo_enhancements_reply reply;
+		uint16_t response;
+	} enhancements;
+
+	BUILD_BUG_ON(sizeof(enhancements) != 2);
+
+	enhancements.response = 0;
+	intel_sdvo_get_value(intel_sdvo,
+			     SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS,
+			     &enhancements, sizeof(enhancements));
+	if (enhancements.response == 0) {
+		DRM_DEBUG_KMS("No enhancement is supported\n");
+		return true;
+	}
+
+	if (IS_TV(intel_sdvo_connector))
+		return intel_sdvo_create_enhance_property_tv(intel_sdvo, intel_sdvo_connector, enhancements.reply);
+	else if (IS_LVDS(intel_sdvo_connector))
+		return intel_sdvo_create_enhance_property_lvds(intel_sdvo, intel_sdvo_connector, enhancements.reply);
+	else
+		return true;
+}
+
+static int intel_sdvo_ddc_proxy_xfer(struct i2c_adapter *adapter,
+				     struct i2c_msg *msgs,
+				     int num)
+{
+	struct intel_sdvo *sdvo = adapter->algo_data;
+
+	if (!intel_sdvo_set_control_bus_switch(sdvo, sdvo->ddc_bus))
+		return -EIO;
+
+	return sdvo->i2c->algo->master_xfer(sdvo->i2c, msgs, num);
+}
+
+static u32 intel_sdvo_ddc_proxy_func(struct i2c_adapter *adapter)
+{
+	struct intel_sdvo *sdvo = adapter->algo_data;
+	return sdvo->i2c->algo->functionality(sdvo->i2c);
+}
+
+static const struct i2c_algorithm intel_sdvo_ddc_proxy = {
+	.master_xfer	= intel_sdvo_ddc_proxy_xfer,
+	.functionality	= intel_sdvo_ddc_proxy_func
+};
+
+static bool
+intel_sdvo_init_ddc_proxy(struct intel_sdvo *sdvo,
+			  struct drm_device *dev)
+{
+	sdvo->ddc.owner = THIS_MODULE;
+	sdvo->ddc.class = I2C_CLASS_DDC;
+	snprintf(sdvo->ddc.name, I2C_NAME_SIZE, "SDVO DDC proxy");
+	sdvo->ddc.dev.parent = &dev->pdev->dev;
+	sdvo->ddc.algo_data = sdvo;
+	sdvo->ddc.algo = &intel_sdvo_ddc_proxy;
+
+	return i2c_add_adapter(&sdvo->ddc) == 0;
+}
+
+bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_encoder *intel_encoder;
+	struct intel_sdvo *intel_sdvo;
+	int i;
+	intel_sdvo = kzalloc(sizeof(*intel_sdvo), GFP_KERNEL);
+	if (!intel_sdvo)
+		return false;
+
+	intel_sdvo->sdvo_reg = sdvo_reg;
+	intel_sdvo->is_sdvob = is_sdvob;
+	intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, intel_sdvo) >> 1;
+	intel_sdvo_select_i2c_bus(dev_priv, intel_sdvo);
+	if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev))
+		goto err_i2c_bus;
+
+	/* encoder type will be decided later */
+	intel_encoder = &intel_sdvo->base;
+	intel_encoder->type = INTEL_OUTPUT_SDVO;
+	drm_encoder_init(dev, &intel_encoder->base, &intel_sdvo_enc_funcs, 0);
+
+	/* Read the regs to test if we can talk to the device */
+	for (i = 0; i < 0x40; i++) {
+		u8 byte;
+
+		if (!intel_sdvo_read_byte(intel_sdvo, i, &byte)) {
+			DRM_DEBUG_KMS("No SDVO device found on %s\n",
+				      SDVO_NAME(intel_sdvo));
+			goto err;
+		}
+	}
+
+	intel_encoder->compute_config = intel_sdvo_compute_config;
+	if (HAS_PCH_SPLIT(dev)) {
+		intel_encoder->disable = pch_disable_sdvo;
+		intel_encoder->post_disable = pch_post_disable_sdvo;
+	} else {
+		intel_encoder->disable = intel_disable_sdvo;
+	}
+	intel_encoder->pre_enable = intel_sdvo_pre_enable;
+	intel_encoder->enable = intel_enable_sdvo;
+	intel_encoder->get_hw_state = intel_sdvo_get_hw_state;
+	intel_encoder->get_config = intel_sdvo_get_config;
+
+	/* In default case sdvo lvds is false */
+	if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps))
+		goto err;
+
+	if (intel_sdvo_output_setup(intel_sdvo,
+				    intel_sdvo->caps.output_flags) != true) {
+		DRM_DEBUG_KMS("SDVO output failed to setup on %s\n",
+			      SDVO_NAME(intel_sdvo));
+		/* Output_setup can leave behind connectors! */
+		goto err_output;
+	}
+
+	/* Only enable the hotplug irq if we need it, to work around noisy
+	 * hotplug lines.
+	 */
+	if (intel_sdvo->hotplug_active) {
+		intel_encoder->hpd_pin =
+			intel_sdvo->is_sdvob ?  HPD_SDVO_B : HPD_SDVO_C;
+	}
+
+	/*
+	 * Cloning SDVO with anything is often impossible, since the SDVO
+	 * encoder can request a special input timing mode. And even if that's
+	 * not the case we have evidence that cloning a plain unscaled mode with
+	 * VGA doesn't really work. Furthermore the cloning flags are way too
+	 * simplistic anyway to express such constraints, so just give up on
+	 * cloning for SDVO encoders.
+	 */
+	intel_sdvo->base.cloneable = 0;
+
+	intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo);
+
+	/* Set the input timing to the screen. Assume always input 0. */
+	if (!intel_sdvo_set_target_input(intel_sdvo))
+		goto err_output;
+
+	if (!intel_sdvo_get_input_pixel_clock_range(intel_sdvo,
+						    &intel_sdvo->pixel_clock_min,
+						    &intel_sdvo->pixel_clock_max))
+		goto err_output;
+
+	DRM_DEBUG_KMS("%s device VID/DID: %02X:%02X.%02X, "
+			"clock range %dMHz - %dMHz, "
+			"input 1: %c, input 2: %c, "
+			"output 1: %c, output 2: %c\n",
+			SDVO_NAME(intel_sdvo),
+			intel_sdvo->caps.vendor_id, intel_sdvo->caps.device_id,
+			intel_sdvo->caps.device_rev_id,
+			intel_sdvo->pixel_clock_min / 1000,
+			intel_sdvo->pixel_clock_max / 1000,
+			(intel_sdvo->caps.sdvo_inputs_mask & 0x1) ? 'Y' : 'N',
+			(intel_sdvo->caps.sdvo_inputs_mask & 0x2) ? 'Y' : 'N',
+			/* check currently supported outputs */
+			intel_sdvo->caps.output_flags &
+			(SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_RGB0) ? 'Y' : 'N',
+			intel_sdvo->caps.output_flags &
+			(SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N');
+	return true;
+
+err_output:
+	intel_sdvo_output_cleanup(intel_sdvo);
+
+err:
+	drm_encoder_cleanup(&intel_encoder->base);
+	i2c_del_adapter(&intel_sdvo->ddc);
+err_i2c_bus:
+	intel_sdvo_unselect_i2c_bus(intel_sdvo);
+	kfree(intel_sdvo);
+
+	return false;
+}
diff --git a/drivers/gpu/drm/i915/intel_sdvo_regs.h b/drivers/gpu/drm/i915/intel_sdvo_regs.h
new file mode 100644
index 0000000..2e2d4eb
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_sdvo_regs.h
@@ -0,0 +1,730 @@
+/*
+ * Copyright © 2006-2007 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *	Eric Anholt <eric@anholt.net>
+ */
+
+/**
+ * @file SDVO command definitions and structures.
+ */
+
+#define SDVO_OUTPUT_FIRST   (0)
+#define SDVO_OUTPUT_TMDS0   (1 << 0)
+#define SDVO_OUTPUT_RGB0    (1 << 1)
+#define SDVO_OUTPUT_CVBS0   (1 << 2)
+#define SDVO_OUTPUT_SVID0   (1 << 3)
+#define SDVO_OUTPUT_YPRPB0  (1 << 4)
+#define SDVO_OUTPUT_SCART0  (1 << 5)
+#define SDVO_OUTPUT_LVDS0   (1 << 6)
+#define SDVO_OUTPUT_TMDS1   (1 << 8)
+#define SDVO_OUTPUT_RGB1    (1 << 9)
+#define SDVO_OUTPUT_CVBS1   (1 << 10)
+#define SDVO_OUTPUT_SVID1   (1 << 11)
+#define SDVO_OUTPUT_YPRPB1  (1 << 12)
+#define SDVO_OUTPUT_SCART1  (1 << 13)
+#define SDVO_OUTPUT_LVDS1   (1 << 14)
+#define SDVO_OUTPUT_LAST    (14)
+
+struct intel_sdvo_caps {
+	u8 vendor_id;
+	u8 device_id;
+	u8 device_rev_id;
+	u8 sdvo_version_major;
+	u8 sdvo_version_minor;
+	unsigned int sdvo_inputs_mask:2;
+	unsigned int smooth_scaling:1;
+	unsigned int sharp_scaling:1;
+	unsigned int up_scaling:1;
+	unsigned int down_scaling:1;
+	unsigned int stall_support:1;
+	unsigned int pad:1;
+	u16 output_flags;
+} __packed;
+
+/* Note: SDVO detailed timing flags match EDID misc flags. */
+#define DTD_FLAG_HSYNC_POSITIVE (1 << 1)
+#define DTD_FLAG_VSYNC_POSITIVE (1 << 2)
+#define DTD_FLAG_INTERLACE	(1 << 7)
+
+/** This matches the EDID DTD structure, more or less */
+struct intel_sdvo_dtd {
+	struct {
+		u16 clock;	/**< pixel clock, in 10kHz units */
+		u8 h_active;	/**< lower 8 bits (pixels) */
+		u8 h_blank;	/**< lower 8 bits (pixels) */
+		u8 h_high;	/**< upper 4 bits each h_active, h_blank */
+		u8 v_active;	/**< lower 8 bits (lines) */
+		u8 v_blank;	/**< lower 8 bits (lines) */
+		u8 v_high;	/**< upper 4 bits each v_active, v_blank */
+	} part1;
+
+	struct {
+		u8 h_sync_off;	/**< lower 8 bits, from hblank start */
+		u8 h_sync_width;	/**< lower 8 bits (pixels) */
+		/** lower 4 bits each vsync offset, vsync width */
+		u8 v_sync_off_width;
+		/**
+		* 2 high bits of hsync offset, 2 high bits of hsync width,
+		* bits 4-5 of vsync offset, and 2 high bits of vsync width.
+		*/
+		u8 sync_off_width_high;
+		u8 dtd_flags;
+		u8 sdvo_flags;
+		/** bits 6-7 of vsync offset at bits 6-7 */
+		u8 v_sync_off_high;
+		u8 reserved;
+	} part2;
+} __packed;
+
+struct intel_sdvo_pixel_clock_range {
+	u16 min;	/**< pixel clock, in 10kHz units */
+	u16 max;	/**< pixel clock, in 10kHz units */
+} __packed;
+
+struct intel_sdvo_preferred_input_timing_args {
+	u16 clock;
+	u16 width;
+	u16 height;
+	u8	interlace:1;
+	u8	scaled:1;
+	u8	pad:6;
+} __packed;
+
+/* I2C registers for SDVO */
+#define SDVO_I2C_ARG_0				0x07
+#define SDVO_I2C_ARG_1				0x06
+#define SDVO_I2C_ARG_2				0x05
+#define SDVO_I2C_ARG_3				0x04
+#define SDVO_I2C_ARG_4				0x03
+#define SDVO_I2C_ARG_5				0x02
+#define SDVO_I2C_ARG_6				0x01
+#define SDVO_I2C_ARG_7				0x00
+#define SDVO_I2C_OPCODE				0x08
+#define SDVO_I2C_CMD_STATUS			0x09
+#define SDVO_I2C_RETURN_0			0x0a
+#define SDVO_I2C_RETURN_1			0x0b
+#define SDVO_I2C_RETURN_2			0x0c
+#define SDVO_I2C_RETURN_3			0x0d
+#define SDVO_I2C_RETURN_4			0x0e
+#define SDVO_I2C_RETURN_5			0x0f
+#define SDVO_I2C_RETURN_6			0x10
+#define SDVO_I2C_RETURN_7			0x11
+#define SDVO_I2C_VENDOR_BEGIN			0x20
+
+/* Status results */
+#define SDVO_CMD_STATUS_POWER_ON		0x0
+#define SDVO_CMD_STATUS_SUCCESS			0x1
+#define SDVO_CMD_STATUS_NOTSUPP			0x2
+#define SDVO_CMD_STATUS_INVALID_ARG		0x3
+#define SDVO_CMD_STATUS_PENDING			0x4
+#define SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED	0x5
+#define SDVO_CMD_STATUS_SCALING_NOT_SUPP	0x6
+
+/* SDVO commands, argument/result registers */
+
+#define SDVO_CMD_RESET					0x01
+
+/** Returns a struct intel_sdvo_caps */
+#define SDVO_CMD_GET_DEVICE_CAPS			0x02
+
+#define SDVO_CMD_GET_FIRMWARE_REV			0x86
+# define SDVO_DEVICE_FIRMWARE_MINOR			SDVO_I2C_RETURN_0
+# define SDVO_DEVICE_FIRMWARE_MAJOR			SDVO_I2C_RETURN_1
+# define SDVO_DEVICE_FIRMWARE_PATCH			SDVO_I2C_RETURN_2
+
+/**
+ * Reports which inputs are trained (managed to sync).
+ *
+ * Devices must have trained within 2 vsyncs of a mode change.
+ */
+#define SDVO_CMD_GET_TRAINED_INPUTS			0x03
+struct intel_sdvo_get_trained_inputs_response {
+	unsigned int input0_trained:1;
+	unsigned int input1_trained:1;
+	unsigned int pad:6;
+} __packed;
+
+/** Returns a struct intel_sdvo_output_flags of active outputs. */
+#define SDVO_CMD_GET_ACTIVE_OUTPUTS			0x04
+
+/**
+ * Sets the current set of active outputs.
+ *
+ * Takes a struct intel_sdvo_output_flags.  Must be preceded by a SET_IN_OUT_MAP
+ * on multi-output devices.
+ */
+#define SDVO_CMD_SET_ACTIVE_OUTPUTS			0x05
+
+/**
+ * Returns the current mapping of SDVO inputs to outputs on the device.
+ *
+ * Returns two struct intel_sdvo_output_flags structures.
+ */
+#define SDVO_CMD_GET_IN_OUT_MAP				0x06
+struct intel_sdvo_in_out_map {
+	u16 in0, in1;
+};
+
+/**
+ * Sets the current mapping of SDVO inputs to outputs on the device.
+ *
+ * Takes two struct i380_sdvo_output_flags structures.
+ */
+#define SDVO_CMD_SET_IN_OUT_MAP				0x07
+
+/**
+ * Returns a struct intel_sdvo_output_flags of attached displays.
+ */
+#define SDVO_CMD_GET_ATTACHED_DISPLAYS			0x0b
+
+/**
+ * Returns a struct intel_sdvo_ouptut_flags of displays supporting hot plugging.
+ */
+#define SDVO_CMD_GET_HOT_PLUG_SUPPORT			0x0c
+
+/**
+ * Takes a struct intel_sdvo_output_flags.
+ */
+#define SDVO_CMD_SET_ACTIVE_HOT_PLUG			0x0d
+
+/**
+ * Returns a struct intel_sdvo_output_flags of displays with hot plug
+ * interrupts enabled.
+ */
+#define SDVO_CMD_GET_ACTIVE_HOT_PLUG			0x0e
+
+#define SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE		0x0f
+struct intel_sdvo_get_interrupt_event_source_response {
+	u16 interrupt_status;
+	unsigned int ambient_light_interrupt:1;
+	unsigned int hdmi_audio_encrypt_change:1;
+	unsigned int pad:6;
+} __packed;
+
+/**
+ * Selects which input is affected by future input commands.
+ *
+ * Commands affected include SET_INPUT_TIMINGS_PART[12],
+ * GET_INPUT_TIMINGS_PART[12], GET_PREFERRED_INPUT_TIMINGS_PART[12],
+ * GET_INPUT_PIXEL_CLOCK_RANGE, and CREATE_PREFERRED_INPUT_TIMINGS.
+ */
+#define SDVO_CMD_SET_TARGET_INPUT			0x10
+struct intel_sdvo_set_target_input_args {
+	unsigned int target_1:1;
+	unsigned int pad:7;
+} __packed;
+
+/**
+ * Takes a struct intel_sdvo_output_flags of which outputs are targeted by
+ * future output commands.
+ *
+ * Affected commands inclue SET_OUTPUT_TIMINGS_PART[12],
+ * GET_OUTPUT_TIMINGS_PART[12], and GET_OUTPUT_PIXEL_CLOCK_RANGE.
+ */
+#define SDVO_CMD_SET_TARGET_OUTPUT			0x11
+
+#define SDVO_CMD_GET_INPUT_TIMINGS_PART1		0x12
+#define SDVO_CMD_GET_INPUT_TIMINGS_PART2		0x13
+#define SDVO_CMD_SET_INPUT_TIMINGS_PART1		0x14
+#define SDVO_CMD_SET_INPUT_TIMINGS_PART2		0x15
+#define SDVO_CMD_SET_OUTPUT_TIMINGS_PART1		0x16
+#define SDVO_CMD_SET_OUTPUT_TIMINGS_PART2		0x17
+#define SDVO_CMD_GET_OUTPUT_TIMINGS_PART1		0x18
+#define SDVO_CMD_GET_OUTPUT_TIMINGS_PART2		0x19
+/* Part 1 */
+# define SDVO_DTD_CLOCK_LOW				SDVO_I2C_ARG_0
+# define SDVO_DTD_CLOCK_HIGH				SDVO_I2C_ARG_1
+# define SDVO_DTD_H_ACTIVE				SDVO_I2C_ARG_2
+# define SDVO_DTD_H_BLANK				SDVO_I2C_ARG_3
+# define SDVO_DTD_H_HIGH				SDVO_I2C_ARG_4
+# define SDVO_DTD_V_ACTIVE				SDVO_I2C_ARG_5
+# define SDVO_DTD_V_BLANK				SDVO_I2C_ARG_6
+# define SDVO_DTD_V_HIGH				SDVO_I2C_ARG_7
+/* Part 2 */
+# define SDVO_DTD_HSYNC_OFF				SDVO_I2C_ARG_0
+# define SDVO_DTD_HSYNC_WIDTH				SDVO_I2C_ARG_1
+# define SDVO_DTD_VSYNC_OFF_WIDTH			SDVO_I2C_ARG_2
+# define SDVO_DTD_SYNC_OFF_WIDTH_HIGH			SDVO_I2C_ARG_3
+# define SDVO_DTD_DTD_FLAGS				SDVO_I2C_ARG_4
+# define SDVO_DTD_DTD_FLAG_INTERLACED				(1 << 7)
+# define SDVO_DTD_DTD_FLAG_STEREO_MASK				(3 << 5)
+# define SDVO_DTD_DTD_FLAG_INPUT_MASK				(3 << 3)
+# define SDVO_DTD_DTD_FLAG_SYNC_MASK				(3 << 1)
+# define SDVO_DTD_SDVO_FLAS				SDVO_I2C_ARG_5
+# define SDVO_DTD_SDVO_FLAG_STALL				(1 << 7)
+# define SDVO_DTD_SDVO_FLAG_CENTERED				(0 << 6)
+# define SDVO_DTD_SDVO_FLAG_UPPER_LEFT				(1 << 6)
+# define SDVO_DTD_SDVO_FLAG_SCALING_MASK			(3 << 4)
+# define SDVO_DTD_SDVO_FLAG_SCALING_NONE			(0 << 4)
+# define SDVO_DTD_SDVO_FLAG_SCALING_SHARP			(1 << 4)
+# define SDVO_DTD_SDVO_FLAG_SCALING_SMOOTH			(2 << 4)
+# define SDVO_DTD_VSYNC_OFF_HIGH			SDVO_I2C_ARG_6
+
+/**
+ * Generates a DTD based on the given width, height, and flags.
+ *
+ * This will be supported by any device supporting scaling or interlaced
+ * modes.
+ */
+#define SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING		0x1a
+# define SDVO_PREFERRED_INPUT_TIMING_CLOCK_LOW		SDVO_I2C_ARG_0
+# define SDVO_PREFERRED_INPUT_TIMING_CLOCK_HIGH		SDVO_I2C_ARG_1
+# define SDVO_PREFERRED_INPUT_TIMING_WIDTH_LOW		SDVO_I2C_ARG_2
+# define SDVO_PREFERRED_INPUT_TIMING_WIDTH_HIGH		SDVO_I2C_ARG_3
+# define SDVO_PREFERRED_INPUT_TIMING_HEIGHT_LOW		SDVO_I2C_ARG_4
+# define SDVO_PREFERRED_INPUT_TIMING_HEIGHT_HIGH	SDVO_I2C_ARG_5
+# define SDVO_PREFERRED_INPUT_TIMING_FLAGS		SDVO_I2C_ARG_6
+# define SDVO_PREFERRED_INPUT_TIMING_FLAGS_INTERLACED		(1 << 0)
+# define SDVO_PREFERRED_INPUT_TIMING_FLAGS_SCALED		(1 << 1)
+
+#define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1	0x1b
+#define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2	0x1c
+
+/** Returns a struct intel_sdvo_pixel_clock_range */
+#define SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE		0x1d
+/** Returns a struct intel_sdvo_pixel_clock_range */
+#define SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE		0x1e
+
+/** Returns a byte bitfield containing SDVO_CLOCK_RATE_MULT_* flags */
+#define SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS		0x1f
+
+/** Returns a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
+#define SDVO_CMD_GET_CLOCK_RATE_MULT			0x20
+/** Takes a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
+#define SDVO_CMD_SET_CLOCK_RATE_MULT			0x21
+# define SDVO_CLOCK_RATE_MULT_1X				(1 << 0)
+# define SDVO_CLOCK_RATE_MULT_2X				(1 << 1)
+# define SDVO_CLOCK_RATE_MULT_4X				(1 << 3)
+
+#define SDVO_CMD_GET_SUPPORTED_TV_FORMATS		0x27
+/** 6 bytes of bit flags for TV formats shared by all TV format functions */
+struct intel_sdvo_tv_format {
+	unsigned int ntsc_m:1;
+	unsigned int ntsc_j:1;
+	unsigned int ntsc_443:1;
+	unsigned int pal_b:1;
+	unsigned int pal_d:1;
+	unsigned int pal_g:1;
+	unsigned int pal_h:1;
+	unsigned int pal_i:1;
+
+	unsigned int pal_m:1;
+	unsigned int pal_n:1;
+	unsigned int pal_nc:1;
+	unsigned int pal_60:1;
+	unsigned int secam_b:1;
+	unsigned int secam_d:1;
+	unsigned int secam_g:1;
+	unsigned int secam_k:1;
+
+	unsigned int secam_k1:1;
+	unsigned int secam_l:1;
+	unsigned int secam_60:1;
+	unsigned int hdtv_std_smpte_240m_1080i_59:1;
+	unsigned int hdtv_std_smpte_240m_1080i_60:1;
+	unsigned int hdtv_std_smpte_260m_1080i_59:1;
+	unsigned int hdtv_std_smpte_260m_1080i_60:1;
+	unsigned int hdtv_std_smpte_274m_1080i_50:1;
+
+	unsigned int hdtv_std_smpte_274m_1080i_59:1;
+	unsigned int hdtv_std_smpte_274m_1080i_60:1;
+	unsigned int hdtv_std_smpte_274m_1080p_23:1;
+	unsigned int hdtv_std_smpte_274m_1080p_24:1;
+	unsigned int hdtv_std_smpte_274m_1080p_25:1;
+	unsigned int hdtv_std_smpte_274m_1080p_29:1;
+	unsigned int hdtv_std_smpte_274m_1080p_30:1;
+	unsigned int hdtv_std_smpte_274m_1080p_50:1;
+
+	unsigned int hdtv_std_smpte_274m_1080p_59:1;
+	unsigned int hdtv_std_smpte_274m_1080p_60:1;
+	unsigned int hdtv_std_smpte_295m_1080i_50:1;
+	unsigned int hdtv_std_smpte_295m_1080p_50:1;
+	unsigned int hdtv_std_smpte_296m_720p_59:1;
+	unsigned int hdtv_std_smpte_296m_720p_60:1;
+	unsigned int hdtv_std_smpte_296m_720p_50:1;
+	unsigned int hdtv_std_smpte_293m_480p_59:1;
+
+	unsigned int hdtv_std_smpte_170m_480i_59:1;
+	unsigned int hdtv_std_iturbt601_576i_50:1;
+	unsigned int hdtv_std_iturbt601_576p_50:1;
+	unsigned int hdtv_std_eia_7702a_480i_60:1;
+	unsigned int hdtv_std_eia_7702a_480p_60:1;
+	unsigned int pad:3;
+} __packed;
+
+#define SDVO_CMD_GET_TV_FORMAT				0x28
+
+#define SDVO_CMD_SET_TV_FORMAT				0x29
+
+/** Returns the resolutiosn that can be used with the given TV format */
+#define SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT		0x83
+struct intel_sdvo_sdtv_resolution_request {
+	unsigned int ntsc_m:1;
+	unsigned int ntsc_j:1;
+	unsigned int ntsc_443:1;
+	unsigned int pal_b:1;
+	unsigned int pal_d:1;
+	unsigned int pal_g:1;
+	unsigned int pal_h:1;
+	unsigned int pal_i:1;
+
+	unsigned int pal_m:1;
+	unsigned int pal_n:1;
+	unsigned int pal_nc:1;
+	unsigned int pal_60:1;
+	unsigned int secam_b:1;
+	unsigned int secam_d:1;
+	unsigned int secam_g:1;
+	unsigned int secam_k:1;
+
+	unsigned int secam_k1:1;
+	unsigned int secam_l:1;
+	unsigned int secam_60:1;
+	unsigned int pad:5;
+} __packed;
+
+struct intel_sdvo_sdtv_resolution_reply {
+	unsigned int res_320x200:1;
+	unsigned int res_320x240:1;
+	unsigned int res_400x300:1;
+	unsigned int res_640x350:1;
+	unsigned int res_640x400:1;
+	unsigned int res_640x480:1;
+	unsigned int res_704x480:1;
+	unsigned int res_704x576:1;
+
+	unsigned int res_720x350:1;
+	unsigned int res_720x400:1;
+	unsigned int res_720x480:1;
+	unsigned int res_720x540:1;
+	unsigned int res_720x576:1;
+	unsigned int res_768x576:1;
+	unsigned int res_800x600:1;
+	unsigned int res_832x624:1;
+
+	unsigned int res_920x766:1;
+	unsigned int res_1024x768:1;
+	unsigned int res_1280x1024:1;
+	unsigned int pad:5;
+} __packed;
+
+/* Get supported resolution with squire pixel aspect ratio that can be
+   scaled for the requested HDTV format */
+#define SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT		0x85
+
+struct intel_sdvo_hdtv_resolution_request {
+	unsigned int hdtv_std_smpte_240m_1080i_59:1;
+	unsigned int hdtv_std_smpte_240m_1080i_60:1;
+	unsigned int hdtv_std_smpte_260m_1080i_59:1;
+	unsigned int hdtv_std_smpte_260m_1080i_60:1;
+	unsigned int hdtv_std_smpte_274m_1080i_50:1;
+	unsigned int hdtv_std_smpte_274m_1080i_59:1;
+	unsigned int hdtv_std_smpte_274m_1080i_60:1;
+	unsigned int hdtv_std_smpte_274m_1080p_23:1;
+
+	unsigned int hdtv_std_smpte_274m_1080p_24:1;
+	unsigned int hdtv_std_smpte_274m_1080p_25:1;
+	unsigned int hdtv_std_smpte_274m_1080p_29:1;
+	unsigned int hdtv_std_smpte_274m_1080p_30:1;
+	unsigned int hdtv_std_smpte_274m_1080p_50:1;
+	unsigned int hdtv_std_smpte_274m_1080p_59:1;
+	unsigned int hdtv_std_smpte_274m_1080p_60:1;
+	unsigned int hdtv_std_smpte_295m_1080i_50:1;
+
+	unsigned int hdtv_std_smpte_295m_1080p_50:1;
+	unsigned int hdtv_std_smpte_296m_720p_59:1;
+	unsigned int hdtv_std_smpte_296m_720p_60:1;
+	unsigned int hdtv_std_smpte_296m_720p_50:1;
+	unsigned int hdtv_std_smpte_293m_480p_59:1;
+	unsigned int hdtv_std_smpte_170m_480i_59:1;
+	unsigned int hdtv_std_iturbt601_576i_50:1;
+	unsigned int hdtv_std_iturbt601_576p_50:1;
+
+	unsigned int hdtv_std_eia_7702a_480i_60:1;
+	unsigned int hdtv_std_eia_7702a_480p_60:1;
+	unsigned int pad:6;
+} __packed;
+
+struct intel_sdvo_hdtv_resolution_reply {
+	unsigned int res_640x480:1;
+	unsigned int res_800x600:1;
+	unsigned int res_1024x768:1;
+	unsigned int res_1280x960:1;
+	unsigned int res_1400x1050:1;
+	unsigned int res_1600x1200:1;
+	unsigned int res_1920x1440:1;
+	unsigned int res_2048x1536:1;
+
+	unsigned int res_2560x1920:1;
+	unsigned int res_3200x2400:1;
+	unsigned int res_3840x2880:1;
+	unsigned int pad1:5;
+
+	unsigned int res_848x480:1;
+	unsigned int res_1064x600:1;
+	unsigned int res_1280x720:1;
+	unsigned int res_1360x768:1;
+	unsigned int res_1704x960:1;
+	unsigned int res_1864x1050:1;
+	unsigned int res_1920x1080:1;
+	unsigned int res_2128x1200:1;
+
+	unsigned int res_2560x1400:1;
+	unsigned int res_2728x1536:1;
+	unsigned int res_3408x1920:1;
+	unsigned int res_4264x2400:1;
+	unsigned int res_5120x2880:1;
+	unsigned int pad2:3;
+
+	unsigned int res_768x480:1;
+	unsigned int res_960x600:1;
+	unsigned int res_1152x720:1;
+	unsigned int res_1124x768:1;
+	unsigned int res_1536x960:1;
+	unsigned int res_1680x1050:1;
+	unsigned int res_1728x1080:1;
+	unsigned int res_1920x1200:1;
+
+	unsigned int res_2304x1440:1;
+	unsigned int res_2456x1536:1;
+	unsigned int res_3072x1920:1;
+	unsigned int res_3840x2400:1;
+	unsigned int res_4608x2880:1;
+	unsigned int pad3:3;
+
+	unsigned int res_1280x1024:1;
+	unsigned int pad4:7;
+
+	unsigned int res_1280x768:1;
+	unsigned int pad5:7;
+} __packed;
+
+/* Get supported power state returns info for encoder and monitor, rely on
+   last SetTargetInput and SetTargetOutput calls */
+#define SDVO_CMD_GET_SUPPORTED_POWER_STATES		0x2a
+/* Get power state returns info for encoder and monitor, rely on last
+   SetTargetInput and SetTargetOutput calls */
+#define SDVO_CMD_GET_POWER_STATE			0x2b
+#define SDVO_CMD_GET_ENCODER_POWER_STATE		0x2b
+#define SDVO_CMD_SET_ENCODER_POWER_STATE		0x2c
+# define SDVO_ENCODER_STATE_ON					(1 << 0)
+# define SDVO_ENCODER_STATE_STANDBY				(1 << 1)
+# define SDVO_ENCODER_STATE_SUSPEND				(1 << 2)
+# define SDVO_ENCODER_STATE_OFF					(1 << 3)
+# define SDVO_MONITOR_STATE_ON					(1 << 4)
+# define SDVO_MONITOR_STATE_STANDBY				(1 << 5)
+# define SDVO_MONITOR_STATE_SUSPEND				(1 << 6)
+# define SDVO_MONITOR_STATE_OFF					(1 << 7)
+
+#define SDVO_CMD_GET_MAX_PANEL_POWER_SEQUENCING		0x2d
+#define SDVO_CMD_GET_PANEL_POWER_SEQUENCING		0x2e
+#define SDVO_CMD_SET_PANEL_POWER_SEQUENCING		0x2f
+/**
+ * The panel power sequencing parameters are in units of milliseconds.
+ * The high fields are bits 8:9 of the 10-bit values.
+ */
+struct sdvo_panel_power_sequencing {
+	u8 t0;
+	u8 t1;
+	u8 t2;
+	u8 t3;
+	u8 t4;
+
+	unsigned int t0_high:2;
+	unsigned int t1_high:2;
+	unsigned int t2_high:2;
+	unsigned int t3_high:2;
+
+	unsigned int t4_high:2;
+	unsigned int pad:6;
+} __packed;
+
+#define SDVO_CMD_GET_MAX_BACKLIGHT_LEVEL		0x30
+struct sdvo_max_backlight_reply {
+	u8 max_value;
+	u8 default_value;
+} __packed;
+
+#define SDVO_CMD_GET_BACKLIGHT_LEVEL			0x31
+#define SDVO_CMD_SET_BACKLIGHT_LEVEL			0x32
+
+#define SDVO_CMD_GET_AMBIENT_LIGHT			0x33
+struct sdvo_get_ambient_light_reply {
+	u16 trip_low;
+	u16 trip_high;
+	u16 value;
+} __packed;
+#define SDVO_CMD_SET_AMBIENT_LIGHT			0x34
+struct sdvo_set_ambient_light_reply {
+	u16 trip_low;
+	u16 trip_high;
+	unsigned int enable:1;
+	unsigned int pad:7;
+} __packed;
+
+/* Set display power state */
+#define SDVO_CMD_SET_DISPLAY_POWER_STATE		0x7d
+# define SDVO_DISPLAY_STATE_ON				(1 << 0)
+# define SDVO_DISPLAY_STATE_STANDBY			(1 << 1)
+# define SDVO_DISPLAY_STATE_SUSPEND			(1 << 2)
+# define SDVO_DISPLAY_STATE_OFF				(1 << 3)
+
+#define SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS		0x84
+struct intel_sdvo_enhancements_reply {
+	unsigned int flicker_filter:1;
+	unsigned int flicker_filter_adaptive:1;
+	unsigned int flicker_filter_2d:1;
+	unsigned int saturation:1;
+	unsigned int hue:1;
+	unsigned int brightness:1;
+	unsigned int contrast:1;
+	unsigned int overscan_h:1;
+
+	unsigned int overscan_v:1;
+	unsigned int hpos:1;
+	unsigned int vpos:1;
+	unsigned int sharpness:1;
+	unsigned int dot_crawl:1;
+	unsigned int dither:1;
+	unsigned int tv_chroma_filter:1;
+	unsigned int tv_luma_filter:1;
+} __packed;
+
+/* Picture enhancement limits below are dependent on the current TV format,
+ * and thus need to be queried and set after it.
+ */
+#define SDVO_CMD_GET_MAX_FLICKER_FILTER			0x4d
+#define SDVO_CMD_GET_MAX_FLICKER_FILTER_ADAPTIVE	0x7b
+#define SDVO_CMD_GET_MAX_FLICKER_FILTER_2D		0x52
+#define SDVO_CMD_GET_MAX_SATURATION			0x55
+#define SDVO_CMD_GET_MAX_HUE				0x58
+#define SDVO_CMD_GET_MAX_BRIGHTNESS			0x5b
+#define SDVO_CMD_GET_MAX_CONTRAST			0x5e
+#define SDVO_CMD_GET_MAX_OVERSCAN_H			0x61
+#define SDVO_CMD_GET_MAX_OVERSCAN_V			0x64
+#define SDVO_CMD_GET_MAX_HPOS				0x67
+#define SDVO_CMD_GET_MAX_VPOS				0x6a
+#define SDVO_CMD_GET_MAX_SHARPNESS			0x6d
+#define SDVO_CMD_GET_MAX_TV_CHROMA_FILTER		0x74
+#define SDVO_CMD_GET_MAX_TV_LUMA_FILTER			0x77
+struct intel_sdvo_enhancement_limits_reply {
+	u16 max_value;
+	u16 default_value;
+} __packed;
+
+#define SDVO_CMD_GET_LVDS_PANEL_INFORMATION		0x7f
+#define SDVO_CMD_SET_LVDS_PANEL_INFORMATION		0x80
+# define SDVO_LVDS_COLOR_DEPTH_18			(0 << 0)
+# define SDVO_LVDS_COLOR_DEPTH_24			(1 << 0)
+# define SDVO_LVDS_CONNECTOR_SPWG			(0 << 2)
+# define SDVO_LVDS_CONNECTOR_OPENLDI			(1 << 2)
+# define SDVO_LVDS_SINGLE_CHANNEL			(0 << 4)
+# define SDVO_LVDS_DUAL_CHANNEL				(1 << 4)
+
+#define SDVO_CMD_GET_FLICKER_FILTER			0x4e
+#define SDVO_CMD_SET_FLICKER_FILTER			0x4f
+#define SDVO_CMD_GET_FLICKER_FILTER_ADAPTIVE		0x50
+#define SDVO_CMD_SET_FLICKER_FILTER_ADAPTIVE		0x51
+#define SDVO_CMD_GET_FLICKER_FILTER_2D			0x53
+#define SDVO_CMD_SET_FLICKER_FILTER_2D			0x54
+#define SDVO_CMD_GET_SATURATION				0x56
+#define SDVO_CMD_SET_SATURATION				0x57
+#define SDVO_CMD_GET_HUE				0x59
+#define SDVO_CMD_SET_HUE				0x5a
+#define SDVO_CMD_GET_BRIGHTNESS				0x5c
+#define SDVO_CMD_SET_BRIGHTNESS				0x5d
+#define SDVO_CMD_GET_CONTRAST				0x5f
+#define SDVO_CMD_SET_CONTRAST				0x60
+#define SDVO_CMD_GET_OVERSCAN_H				0x62
+#define SDVO_CMD_SET_OVERSCAN_H				0x63
+#define SDVO_CMD_GET_OVERSCAN_V				0x65
+#define SDVO_CMD_SET_OVERSCAN_V				0x66
+#define SDVO_CMD_GET_HPOS				0x68
+#define SDVO_CMD_SET_HPOS				0x69
+#define SDVO_CMD_GET_VPOS				0x6b
+#define SDVO_CMD_SET_VPOS				0x6c
+#define SDVO_CMD_GET_SHARPNESS				0x6e
+#define SDVO_CMD_SET_SHARPNESS				0x6f
+#define SDVO_CMD_GET_TV_CHROMA_FILTER			0x75
+#define SDVO_CMD_SET_TV_CHROMA_FILTER			0x76
+#define SDVO_CMD_GET_TV_LUMA_FILTER			0x78
+#define SDVO_CMD_SET_TV_LUMA_FILTER			0x79
+struct intel_sdvo_enhancements_arg {
+	u16 value;
+} __packed;
+
+#define SDVO_CMD_GET_DOT_CRAWL				0x70
+#define SDVO_CMD_SET_DOT_CRAWL				0x71
+# define SDVO_DOT_CRAWL_ON					(1 << 0)
+# define SDVO_DOT_CRAWL_DEFAULT_ON				(1 << 1)
+
+#define SDVO_CMD_GET_DITHER				0x72
+#define SDVO_CMD_SET_DITHER				0x73
+# define SDVO_DITHER_ON						(1 << 0)
+# define SDVO_DITHER_DEFAULT_ON					(1 << 1)
+
+#define SDVO_CMD_SET_CONTROL_BUS_SWITCH			0x7a
+# define SDVO_CONTROL_BUS_PROM				(1 << 0)
+# define SDVO_CONTROL_BUS_DDC1				(1 << 1)
+# define SDVO_CONTROL_BUS_DDC2				(1 << 2)
+# define SDVO_CONTROL_BUS_DDC3				(1 << 3)
+
+/* HDMI op codes */
+#define SDVO_CMD_GET_SUPP_ENCODE	0x9d
+#define SDVO_CMD_GET_ENCODE		0x9e
+#define SDVO_CMD_SET_ENCODE		0x9f
+  #define SDVO_ENCODE_DVI	0x0
+  #define SDVO_ENCODE_HDMI	0x1
+#define SDVO_CMD_SET_PIXEL_REPLI	0x8b
+#define SDVO_CMD_GET_PIXEL_REPLI	0x8c
+#define SDVO_CMD_GET_COLORIMETRY_CAP	0x8d
+#define SDVO_CMD_SET_COLORIMETRY	0x8e
+  #define SDVO_COLORIMETRY_RGB256   0x0
+  #define SDVO_COLORIMETRY_RGB220   0x1
+  #define SDVO_COLORIMETRY_YCrCb422 0x3
+  #define SDVO_COLORIMETRY_YCrCb444 0x4
+#define SDVO_CMD_GET_COLORIMETRY	0x8f
+#define SDVO_CMD_GET_AUDIO_ENCRYPT_PREFER 0x90
+#define SDVO_CMD_SET_AUDIO_STAT		0x91
+#define SDVO_CMD_GET_AUDIO_STAT		0x92
+#define SDVO_CMD_SET_HBUF_INDEX		0x93
+  #define SDVO_HBUF_INDEX_ELD		0
+  #define SDVO_HBUF_INDEX_AVI_IF	1
+#define SDVO_CMD_GET_HBUF_INDEX		0x94
+#define SDVO_CMD_GET_HBUF_INFO		0x95
+#define SDVO_CMD_SET_HBUF_AV_SPLIT	0x96
+#define SDVO_CMD_GET_HBUF_AV_SPLIT	0x97
+#define SDVO_CMD_SET_HBUF_DATA		0x98
+#define SDVO_CMD_GET_HBUF_DATA		0x99
+#define SDVO_CMD_SET_HBUF_TXRATE	0x9a
+#define SDVO_CMD_GET_HBUF_TXRATE	0x9b
+  #define SDVO_HBUF_TX_DISABLED	(0 << 6)
+  #define SDVO_HBUF_TX_ONCE	(2 << 6)
+  #define SDVO_HBUF_TX_VSYNC	(3 << 6)
+#define SDVO_CMD_GET_AUDIO_TX_INFO	0x9c
+#define SDVO_NEED_TO_STALL  (1 << 7)
+
+struct intel_sdvo_encode {
+	u8 dvi_rev;
+	u8 hdmi_rev;
+} __packed;
diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c
new file mode 100644
index 0000000..8831fc5
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_sideband.c
@@ -0,0 +1,282 @@
+/*
+ * Copyright © 2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "i915_drv.h"
+#include "intel_drv.h"
+
+/*
+ * IOSF sideband, see VLV2_SidebandMsg_HAS.docx and
+ * VLV_VLV2_PUNIT_HAS_0.8.docx
+ */
+
+/* Standard MMIO read, non-posted */
+#define SB_MRD_NP	0x00
+/* Standard MMIO write, non-posted */
+#define SB_MWR_NP	0x01
+/* Private register read, double-word addressing, non-posted */
+#define SB_CRRDDA_NP	0x06
+/* Private register write, double-word addressing, non-posted */
+#define SB_CRWRDA_NP	0x07
+
+static int vlv_sideband_rw(struct drm_i915_private *dev_priv, u32 devfn,
+			   u32 port, u32 opcode, u32 addr, u32 *val)
+{
+	u32 cmd, be = 0xf, bar = 0;
+	bool is_read = (opcode == SB_MRD_NP || opcode == SB_CRRDDA_NP);
+
+	cmd = (devfn << IOSF_DEVFN_SHIFT) | (opcode << IOSF_OPCODE_SHIFT) |
+		(port << IOSF_PORT_SHIFT) | (be << IOSF_BYTE_ENABLES_SHIFT) |
+		(bar << IOSF_BAR_SHIFT);
+
+	WARN_ON(!mutex_is_locked(&dev_priv->sb_lock));
+
+	if (wait_for((I915_READ(VLV_IOSF_DOORBELL_REQ) & IOSF_SB_BUSY) == 0, 5)) {
+		DRM_DEBUG_DRIVER("IOSF sideband idle wait (%s) timed out\n",
+				 is_read ? "read" : "write");
+		return -EAGAIN;
+	}
+
+	I915_WRITE(VLV_IOSF_ADDR, addr);
+	if (!is_read)
+		I915_WRITE(VLV_IOSF_DATA, *val);
+	I915_WRITE(VLV_IOSF_DOORBELL_REQ, cmd);
+
+	if (wait_for((I915_READ(VLV_IOSF_DOORBELL_REQ) & IOSF_SB_BUSY) == 0, 5)) {
+		DRM_DEBUG_DRIVER("IOSF sideband finish wait (%s) timed out\n",
+				 is_read ? "read" : "write");
+		return -ETIMEDOUT;
+	}
+
+	if (is_read)
+		*val = I915_READ(VLV_IOSF_DATA);
+	I915_WRITE(VLV_IOSF_DATA, 0);
+
+	return 0;
+}
+
+u32 vlv_punit_read(struct drm_i915_private *dev_priv, u32 addr)
+{
+	u32 val = 0;
+
+	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+
+	mutex_lock(&dev_priv->sb_lock);
+	vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT,
+			SB_CRRDDA_NP, addr, &val);
+	mutex_unlock(&dev_priv->sb_lock);
+
+	return val;
+}
+
+void vlv_punit_write(struct drm_i915_private *dev_priv, u32 addr, u32 val)
+{
+	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+
+	mutex_lock(&dev_priv->sb_lock);
+	vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT,
+			SB_CRWRDA_NP, addr, &val);
+	mutex_unlock(&dev_priv->sb_lock);
+}
+
+u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg)
+{
+	u32 val = 0;
+
+	vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_BUNIT,
+			SB_CRRDDA_NP, reg, &val);
+
+	return val;
+}
+
+void vlv_bunit_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
+{
+	vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_BUNIT,
+			SB_CRWRDA_NP, reg, &val);
+}
+
+u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr)
+{
+	u32 val = 0;
+
+	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+
+	mutex_lock(&dev_priv->sb_lock);
+	vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_NC,
+			SB_CRRDDA_NP, addr, &val);
+	mutex_unlock(&dev_priv->sb_lock);
+
+	return val;
+}
+
+u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv, u32 reg)
+{
+	u32 val = 0;
+	vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_GPIO_NC,
+			SB_CRRDDA_NP, reg, &val);
+	return val;
+}
+
+void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
+{
+	vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_GPIO_NC,
+			SB_CRWRDA_NP, reg, &val);
+}
+
+u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg)
+{
+	u32 val = 0;
+	vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_CCK,
+			SB_CRRDDA_NP, reg, &val);
+	return val;
+}
+
+void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
+{
+	vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_CCK,
+			SB_CRWRDA_NP, reg, &val);
+}
+
+u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg)
+{
+	u32 val = 0;
+	vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_CCU,
+			SB_CRRDDA_NP, reg, &val);
+	return val;
+}
+
+void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
+{
+	vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_CCU,
+			SB_CRWRDA_NP, reg, &val);
+}
+
+u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg)
+{
+	u32 val = 0;
+	vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_GPS_CORE,
+			SB_CRRDDA_NP, reg, &val);
+	return val;
+}
+
+void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
+{
+	vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_GPS_CORE,
+			SB_CRWRDA_NP, reg, &val);
+}
+
+u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg)
+{
+	u32 val = 0;
+
+	vlv_sideband_rw(dev_priv, DPIO_DEVFN, DPIO_PHY_IOSF_PORT(DPIO_PHY(pipe)),
+			SB_MRD_NP, reg, &val);
+
+	/*
+	 * FIXME: There might be some registers where all 1's is a valid value,
+	 * so ideally we should check the register offset instead...
+	 */
+	WARN(val == 0xffffffff, "DPIO read pipe %c reg 0x%x == 0x%x\n",
+	     pipe_name(pipe), reg, val);
+
+	return val;
+}
+
+void vlv_dpio_write(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 val)
+{
+	vlv_sideband_rw(dev_priv, DPIO_DEVFN, DPIO_PHY_IOSF_PORT(DPIO_PHY(pipe)),
+			SB_MWR_NP, reg, &val);
+}
+
+/* SBI access */
+u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
+		   enum intel_sbi_destination destination)
+{
+	u32 value = 0;
+	WARN_ON(!mutex_is_locked(&dev_priv->sb_lock));
+
+	if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
+				100)) {
+		DRM_ERROR("timeout waiting for SBI to become ready\n");
+		return 0;
+	}
+
+	I915_WRITE(SBI_ADDR, (reg << 16));
+
+	if (destination == SBI_ICLK)
+		value = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRRD;
+	else
+		value = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IORD;
+	I915_WRITE(SBI_CTL_STAT, value | SBI_BUSY);
+
+	if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
+				100)) {
+		DRM_ERROR("timeout waiting for SBI to complete read transaction\n");
+		return 0;
+	}
+
+	return I915_READ(SBI_DATA);
+}
+
+void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
+		     enum intel_sbi_destination destination)
+{
+	u32 tmp;
+
+	WARN_ON(!mutex_is_locked(&dev_priv->sb_lock));
+
+	if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
+				100)) {
+		DRM_ERROR("timeout waiting for SBI to become ready\n");
+		return;
+	}
+
+	I915_WRITE(SBI_ADDR, (reg << 16));
+	I915_WRITE(SBI_DATA, value);
+
+	if (destination == SBI_ICLK)
+		tmp = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRWR;
+	else
+		tmp = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IOWR;
+	I915_WRITE(SBI_CTL_STAT, SBI_BUSY | tmp);
+
+	if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
+				100)) {
+		DRM_ERROR("timeout waiting for SBI to complete write transaction\n");
+		return;
+	}
+}
+
+u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg)
+{
+	u32 val = 0;
+	vlv_sideband_rw(dev_priv, DPIO_DEVFN, IOSF_PORT_FLISDSI, SB_CRRDDA_NP,
+			reg, &val);
+	return val;
+}
+
+void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
+{
+	vlv_sideband_rw(dev_priv, DPIO_DEVFN, IOSF_PORT_FLISDSI, SB_CRWRDA_NP,
+			reg, &val);
+}
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
new file mode 100644
index 0000000..2cc6aa0
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -0,0 +1,1156 @@
+/*
+ * Copyright © 2011 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *   Jesse Barnes <jbarnes@virtuousgeek.org>
+ *
+ * New plane/sprite handling.
+ *
+ * The older chips had a separate interface for programming plane related
+ * registers; newer ones are much simpler and we can use the new DRM plane
+ * support.
+ */
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_rect.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_plane_helper.h>
+#include "intel_drv.h"
+#include <drm/i915_drm.h>
+#include "i915_drv.h"
+
+static bool
+format_is_yuv(uint32_t format)
+{
+	switch (format) {
+	case DRM_FORMAT_YUYV:
+	case DRM_FORMAT_UYVY:
+	case DRM_FORMAT_VYUY:
+	case DRM_FORMAT_YVYU:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static int usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
+			      int usecs)
+{
+	/* paranoia */
+	if (!adjusted_mode->crtc_htotal)
+		return 1;
+
+	return DIV_ROUND_UP(usecs * adjusted_mode->crtc_clock,
+			    1000 * adjusted_mode->crtc_htotal);
+}
+
+/**
+ * intel_pipe_update_start() - start update of a set of display registers
+ * @crtc: the crtc of which the registers are going to be updated
+ * @start_vbl_count: vblank counter return pointer used for error checking
+ *
+ * Mark the start of an update to pipe registers that should be updated
+ * atomically regarding vblank. If the next vblank will happens within
+ * the next 100 us, this function waits until the vblank passes.
+ *
+ * After a successful call to this function, interrupts will be disabled
+ * until a subsequent call to intel_pipe_update_end(). That is done to
+ * avoid random delays. The value written to @start_vbl_count should be
+ * supplied to intel_pipe_update_end() for error checking.
+ */
+void intel_pipe_update_start(struct intel_crtc *crtc)
+{
+	struct drm_device *dev = crtc->base.dev;
+	const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
+	enum pipe pipe = crtc->pipe;
+	long timeout = msecs_to_jiffies_timeout(1);
+	int scanline, min, max, vblank_start;
+	wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base);
+	DEFINE_WAIT(wait);
+
+	vblank_start = adjusted_mode->crtc_vblank_start;
+	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
+		vblank_start = DIV_ROUND_UP(vblank_start, 2);
+
+	/* FIXME needs to be calibrated sensibly */
+	min = vblank_start - usecs_to_scanlines(adjusted_mode, 100);
+	max = vblank_start - 1;
+
+	local_irq_disable();
+
+	if (min <= 0 || max <= 0)
+		return;
+
+	if (WARN_ON(drm_crtc_vblank_get(&crtc->base)))
+		return;
+
+	crtc->debug.min_vbl = min;
+	crtc->debug.max_vbl = max;
+	trace_i915_pipe_update_start(crtc);
+
+	for (;;) {
+		/*
+		 * prepare_to_wait() has a memory barrier, which guarantees
+		 * other CPUs can see the task state update by the time we
+		 * read the scanline.
+		 */
+		prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE);
+
+		scanline = intel_get_crtc_scanline(crtc);
+		if (scanline < min || scanline > max)
+			break;
+
+		if (timeout <= 0) {
+			DRM_ERROR("Potential atomic update failure on pipe %c\n",
+				  pipe_name(crtc->pipe));
+			break;
+		}
+
+		local_irq_enable();
+
+		timeout = schedule_timeout(timeout);
+
+		local_irq_disable();
+	}
+
+	finish_wait(wq, &wait);
+
+	drm_crtc_vblank_put(&crtc->base);
+
+	crtc->debug.scanline_start = scanline;
+	crtc->debug.start_vbl_time = ktime_get();
+	crtc->debug.start_vbl_count =
+		dev->driver->get_vblank_counter(dev, pipe);
+
+	trace_i915_pipe_update_vblank_evaded(crtc);
+}
+
+/**
+ * intel_pipe_update_end() - end update of a set of display registers
+ * @crtc: the crtc of which the registers were updated
+ * @start_vbl_count: start vblank counter (used for error checking)
+ *
+ * Mark the end of an update started with intel_pipe_update_start(). This
+ * re-enables interrupts and verifies the update was actually completed
+ * before a vblank using the value of @start_vbl_count.
+ */
+void intel_pipe_update_end(struct intel_crtc *crtc)
+{
+	struct drm_device *dev = crtc->base.dev;
+	enum pipe pipe = crtc->pipe;
+	int scanline_end = intel_get_crtc_scanline(crtc);
+	u32 end_vbl_count = dev->driver->get_vblank_counter(dev, pipe);
+	ktime_t end_vbl_time = ktime_get();
+
+	trace_i915_pipe_update_end(crtc, end_vbl_count, scanline_end);
+
+	local_irq_enable();
+
+	if (crtc->debug.start_vbl_count &&
+	    crtc->debug.start_vbl_count != end_vbl_count) {
+		DRM_ERROR("Atomic update failure on pipe %c (start=%u end=%u) time %lld us, min %d, max %d, scanline start %d, end %d\n",
+			  pipe_name(pipe), crtc->debug.start_vbl_count,
+			  end_vbl_count,
+			  ktime_us_delta(end_vbl_time, crtc->debug.start_vbl_time),
+			  crtc->debug.min_vbl, crtc->debug.max_vbl,
+			  crtc->debug.scanline_start, scanline_end);
+	}
+}
+
+static void
+skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
+		 struct drm_framebuffer *fb,
+		 int crtc_x, int crtc_y,
+		 unsigned int crtc_w, unsigned int crtc_h,
+		 uint32_t x, uint32_t y,
+		 uint32_t src_w, uint32_t src_h)
+{
+	struct drm_device *dev = drm_plane->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_plane *intel_plane = to_intel_plane(drm_plane);
+	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+	const int pipe = intel_plane->pipe;
+	const int plane = intel_plane->plane + 1;
+	u32 plane_ctl, stride_div, stride;
+	int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
+	const struct drm_intel_sprite_colorkey *key =
+		&to_intel_plane_state(drm_plane->state)->ckey;
+	u32 surf_addr;
+	u32 tile_height, plane_offset, plane_size;
+	unsigned int rotation;
+	int x_offset, y_offset;
+	struct intel_crtc_state *crtc_state = to_intel_crtc(crtc)->config;
+	int scaler_id;
+
+	plane_ctl = PLANE_CTL_ENABLE |
+		PLANE_CTL_PIPE_GAMMA_ENABLE |
+		PLANE_CTL_PIPE_CSC_ENABLE;
+
+	plane_ctl |= skl_plane_ctl_format(fb->pixel_format);
+	plane_ctl |= skl_plane_ctl_tiling(fb->modifier[0]);
+
+	rotation = drm_plane->state->rotation;
+	plane_ctl |= skl_plane_ctl_rotation(rotation);
+
+	intel_update_sprite_watermarks(drm_plane, crtc, src_w, src_h,
+				       pixel_size, true,
+				       src_w != crtc_w || src_h != crtc_h);
+
+	stride_div = intel_fb_stride_alignment(dev, fb->modifier[0],
+					       fb->pixel_format);
+
+	scaler_id = to_intel_plane_state(drm_plane->state)->scaler_id;
+
+	/* Sizes are 0 based */
+	src_w--;
+	src_h--;
+	crtc_w--;
+	crtc_h--;
+
+	if (key->flags) {
+		I915_WRITE(PLANE_KEYVAL(pipe, plane), key->min_value);
+		I915_WRITE(PLANE_KEYMAX(pipe, plane), key->max_value);
+		I915_WRITE(PLANE_KEYMSK(pipe, plane), key->channel_mask);
+	}
+
+	if (key->flags & I915_SET_COLORKEY_DESTINATION)
+		plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION;
+	else if (key->flags & I915_SET_COLORKEY_SOURCE)
+		plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
+
+	surf_addr = intel_plane_obj_offset(intel_plane, obj, 0);
+
+	if (intel_rotation_90_or_270(rotation)) {
+		/* stride: Surface height in tiles */
+		tile_height = intel_tile_height(dev, fb->pixel_format,
+						fb->modifier[0], 0);
+		stride = DIV_ROUND_UP(fb->height, tile_height);
+		plane_size = (src_w << 16) | src_h;
+		x_offset = stride * tile_height - y - (src_h + 1);
+		y_offset = x;
+	} else {
+		stride = fb->pitches[0] / stride_div;
+		plane_size = (src_h << 16) | src_w;
+		x_offset = x;
+		y_offset = y;
+	}
+	plane_offset = y_offset << 16 | x_offset;
+
+	I915_WRITE(PLANE_OFFSET(pipe, plane), plane_offset);
+	I915_WRITE(PLANE_STRIDE(pipe, plane), stride);
+	I915_WRITE(PLANE_SIZE(pipe, plane), plane_size);
+
+	/* program plane scaler */
+	if (scaler_id >= 0) {
+		uint32_t ps_ctrl = 0;
+
+		DRM_DEBUG_KMS("plane = %d PS_PLANE_SEL(plane) = 0x%x\n", plane,
+			PS_PLANE_SEL(plane));
+		ps_ctrl = PS_SCALER_EN | PS_PLANE_SEL(plane) |
+			crtc_state->scaler_state.scalers[scaler_id].mode;
+		I915_WRITE(SKL_PS_CTRL(pipe, scaler_id), ps_ctrl);
+		I915_WRITE(SKL_PS_PWR_GATE(pipe, scaler_id), 0);
+		I915_WRITE(SKL_PS_WIN_POS(pipe, scaler_id), (crtc_x << 16) | crtc_y);
+		I915_WRITE(SKL_PS_WIN_SZ(pipe, scaler_id),
+			((crtc_w + 1) << 16)|(crtc_h + 1));
+
+		I915_WRITE(PLANE_POS(pipe, plane), 0);
+	} else {
+		I915_WRITE(PLANE_POS(pipe, plane), (crtc_y << 16) | crtc_x);
+	}
+
+	I915_WRITE(PLANE_CTL(pipe, plane), plane_ctl);
+	I915_WRITE(PLANE_SURF(pipe, plane), surf_addr);
+	POSTING_READ(PLANE_SURF(pipe, plane));
+}
+
+static void
+skl_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
+{
+	struct drm_device *dev = dplane->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_plane *intel_plane = to_intel_plane(dplane);
+	const int pipe = intel_plane->pipe;
+	const int plane = intel_plane->plane + 1;
+
+	I915_WRITE(PLANE_CTL(pipe, plane), 0);
+
+	I915_WRITE(PLANE_SURF(pipe, plane), 0);
+	POSTING_READ(PLANE_SURF(pipe, plane));
+
+	intel_update_sprite_watermarks(dplane, crtc, 0, 0, 0, false, false);
+}
+
+static void
+chv_update_csc(struct intel_plane *intel_plane, uint32_t format)
+{
+	struct drm_i915_private *dev_priv = intel_plane->base.dev->dev_private;
+	int plane = intel_plane->plane;
+
+	/* Seems RGB data bypasses the CSC always */
+	if (!format_is_yuv(format))
+		return;
+
+	/*
+	 * BT.601 limited range YCbCr -> full range RGB
+	 *
+	 * |r|   | 6537 4769     0|   |cr  |
+	 * |g| = |-3330 4769 -1605| x |y-64|
+	 * |b|   |    0 4769  8263|   |cb  |
+	 *
+	 * Cb and Cr apparently come in as signed already, so no
+	 * need for any offset. For Y we need to remove the offset.
+	 */
+	I915_WRITE(SPCSCYGOFF(plane), SPCSC_OOFF(0) | SPCSC_IOFF(-64));
+	I915_WRITE(SPCSCCBOFF(plane), SPCSC_OOFF(0) | SPCSC_IOFF(0));
+	I915_WRITE(SPCSCCROFF(plane), SPCSC_OOFF(0) | SPCSC_IOFF(0));
+
+	I915_WRITE(SPCSCC01(plane), SPCSC_C1(4769) | SPCSC_C0(6537));
+	I915_WRITE(SPCSCC23(plane), SPCSC_C1(-3330) | SPCSC_C0(0));
+	I915_WRITE(SPCSCC45(plane), SPCSC_C1(-1605) | SPCSC_C0(4769));
+	I915_WRITE(SPCSCC67(plane), SPCSC_C1(4769) | SPCSC_C0(0));
+	I915_WRITE(SPCSCC8(plane), SPCSC_C0(8263));
+
+	I915_WRITE(SPCSCYGICLAMP(plane), SPCSC_IMAX(940) | SPCSC_IMIN(64));
+	I915_WRITE(SPCSCCBICLAMP(plane), SPCSC_IMAX(448) | SPCSC_IMIN(-448));
+	I915_WRITE(SPCSCCRICLAMP(plane), SPCSC_IMAX(448) | SPCSC_IMIN(-448));
+
+	I915_WRITE(SPCSCYGOCLAMP(plane), SPCSC_OMAX(1023) | SPCSC_OMIN(0));
+	I915_WRITE(SPCSCCBOCLAMP(plane), SPCSC_OMAX(1023) | SPCSC_OMIN(0));
+	I915_WRITE(SPCSCCROCLAMP(plane), SPCSC_OMAX(1023) | SPCSC_OMIN(0));
+}
+
+static void
+vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
+		 struct drm_framebuffer *fb,
+		 int crtc_x, int crtc_y,
+		 unsigned int crtc_w, unsigned int crtc_h,
+		 uint32_t x, uint32_t y,
+		 uint32_t src_w, uint32_t src_h)
+{
+	struct drm_device *dev = dplane->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_plane *intel_plane = to_intel_plane(dplane);
+	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+	int pipe = intel_plane->pipe;
+	int plane = intel_plane->plane;
+	u32 sprctl;
+	unsigned long sprsurf_offset, linear_offset;
+	int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
+	const struct drm_intel_sprite_colorkey *key =
+		&to_intel_plane_state(dplane->state)->ckey;
+
+	sprctl = SP_ENABLE;
+
+	switch (fb->pixel_format) {
+	case DRM_FORMAT_YUYV:
+		sprctl |= SP_FORMAT_YUV422 | SP_YUV_ORDER_YUYV;
+		break;
+	case DRM_FORMAT_YVYU:
+		sprctl |= SP_FORMAT_YUV422 | SP_YUV_ORDER_YVYU;
+		break;
+	case DRM_FORMAT_UYVY:
+		sprctl |= SP_FORMAT_YUV422 | SP_YUV_ORDER_UYVY;
+		break;
+	case DRM_FORMAT_VYUY:
+		sprctl |= SP_FORMAT_YUV422 | SP_YUV_ORDER_VYUY;
+		break;
+	case DRM_FORMAT_RGB565:
+		sprctl |= SP_FORMAT_BGR565;
+		break;
+	case DRM_FORMAT_XRGB8888:
+		sprctl |= SP_FORMAT_BGRX8888;
+		break;
+	case DRM_FORMAT_ARGB8888:
+		sprctl |= SP_FORMAT_BGRA8888;
+		break;
+	case DRM_FORMAT_XBGR2101010:
+		sprctl |= SP_FORMAT_RGBX1010102;
+		break;
+	case DRM_FORMAT_ABGR2101010:
+		sprctl |= SP_FORMAT_RGBA1010102;
+		break;
+	case DRM_FORMAT_XBGR8888:
+		sprctl |= SP_FORMAT_RGBX8888;
+		break;
+	case DRM_FORMAT_ABGR8888:
+		sprctl |= SP_FORMAT_RGBA8888;
+		break;
+	default:
+		/*
+		 * If we get here one of the upper layers failed to filter
+		 * out the unsupported plane formats
+		 */
+		BUG();
+		break;
+	}
+
+	/*
+	 * Enable gamma to match primary/cursor plane behaviour.
+	 * FIXME should be user controllable via propertiesa.
+	 */
+	sprctl |= SP_GAMMA_ENABLE;
+
+	if (obj->tiling_mode != I915_TILING_NONE)
+		sprctl |= SP_TILED;
+
+	/* Sizes are 0 based */
+	src_w--;
+	src_h--;
+	crtc_w--;
+	crtc_h--;
+
+	linear_offset = y * fb->pitches[0] + x * pixel_size;
+	sprsurf_offset = intel_gen4_compute_page_offset(dev_priv,
+							&x, &y,
+							obj->tiling_mode,
+							pixel_size,
+							fb->pitches[0]);
+	linear_offset -= sprsurf_offset;
+
+	if (dplane->state->rotation == BIT(DRM_ROTATE_180)) {
+		sprctl |= SP_ROTATE_180;
+
+		x += src_w;
+		y += src_h;
+		linear_offset += src_h * fb->pitches[0] + src_w * pixel_size;
+	}
+
+	if (key->flags) {
+		I915_WRITE(SPKEYMINVAL(pipe, plane), key->min_value);
+		I915_WRITE(SPKEYMAXVAL(pipe, plane), key->max_value);
+		I915_WRITE(SPKEYMSK(pipe, plane), key->channel_mask);
+	}
+
+	if (key->flags & I915_SET_COLORKEY_SOURCE)
+		sprctl |= SP_SOURCE_KEY;
+
+	if (IS_CHERRYVIEW(dev) && pipe == PIPE_B)
+		chv_update_csc(intel_plane, fb->pixel_format);
+
+	I915_WRITE(SPSTRIDE(pipe, plane), fb->pitches[0]);
+	I915_WRITE(SPPOS(pipe, plane), (crtc_y << 16) | crtc_x);
+
+	if (obj->tiling_mode != I915_TILING_NONE)
+		I915_WRITE(SPTILEOFF(pipe, plane), (y << 16) | x);
+	else
+		I915_WRITE(SPLINOFF(pipe, plane), linear_offset);
+
+	I915_WRITE(SPCONSTALPHA(pipe, plane), 0);
+
+	I915_WRITE(SPSIZE(pipe, plane), (crtc_h << 16) | crtc_w);
+	I915_WRITE(SPCNTR(pipe, plane), sprctl);
+	I915_WRITE(SPSURF(pipe, plane), i915_gem_obj_ggtt_offset(obj) +
+		   sprsurf_offset);
+	POSTING_READ(SPSURF(pipe, plane));
+}
+
+static void
+vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
+{
+	struct drm_device *dev = dplane->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_plane *intel_plane = to_intel_plane(dplane);
+	int pipe = intel_plane->pipe;
+	int plane = intel_plane->plane;
+
+	I915_WRITE(SPCNTR(pipe, plane), 0);
+
+	I915_WRITE(SPSURF(pipe, plane), 0);
+	POSTING_READ(SPSURF(pipe, plane));
+}
+
+static void
+ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
+		 struct drm_framebuffer *fb,
+		 int crtc_x, int crtc_y,
+		 unsigned int crtc_w, unsigned int crtc_h,
+		 uint32_t x, uint32_t y,
+		 uint32_t src_w, uint32_t src_h)
+{
+	struct drm_device *dev = plane->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_plane *intel_plane = to_intel_plane(plane);
+	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+	enum pipe pipe = intel_plane->pipe;
+	u32 sprctl, sprscale = 0;
+	unsigned long sprsurf_offset, linear_offset;
+	int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
+	const struct drm_intel_sprite_colorkey *key =
+		&to_intel_plane_state(plane->state)->ckey;
+
+	sprctl = SPRITE_ENABLE;
+
+	switch (fb->pixel_format) {
+	case DRM_FORMAT_XBGR8888:
+		sprctl |= SPRITE_FORMAT_RGBX888 | SPRITE_RGB_ORDER_RGBX;
+		break;
+	case DRM_FORMAT_XRGB8888:
+		sprctl |= SPRITE_FORMAT_RGBX888;
+		break;
+	case DRM_FORMAT_YUYV:
+		sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_YUYV;
+		break;
+	case DRM_FORMAT_YVYU:
+		sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_YVYU;
+		break;
+	case DRM_FORMAT_UYVY:
+		sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_UYVY;
+		break;
+	case DRM_FORMAT_VYUY:
+		sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_VYUY;
+		break;
+	default:
+		BUG();
+	}
+
+	/*
+	 * Enable gamma to match primary/cursor plane behaviour.
+	 * FIXME should be user controllable via propertiesa.
+	 */
+	sprctl |= SPRITE_GAMMA_ENABLE;
+
+	if (obj->tiling_mode != I915_TILING_NONE)
+		sprctl |= SPRITE_TILED;
+
+	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+		sprctl &= ~SPRITE_TRICKLE_FEED_DISABLE;
+	else
+		sprctl |= SPRITE_TRICKLE_FEED_DISABLE;
+
+	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+		sprctl |= SPRITE_PIPE_CSC_ENABLE;
+
+	intel_update_sprite_watermarks(plane, crtc, src_w, src_h, pixel_size,
+				       true,
+				       src_w != crtc_w || src_h != crtc_h);
+
+	/* Sizes are 0 based */
+	src_w--;
+	src_h--;
+	crtc_w--;
+	crtc_h--;
+
+	if (crtc_w != src_w || crtc_h != src_h)
+		sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h;
+
+	linear_offset = y * fb->pitches[0] + x * pixel_size;
+	sprsurf_offset =
+		intel_gen4_compute_page_offset(dev_priv,
+					       &x, &y, obj->tiling_mode,
+					       pixel_size, fb->pitches[0]);
+	linear_offset -= sprsurf_offset;
+
+	if (plane->state->rotation == BIT(DRM_ROTATE_180)) {
+		sprctl |= SPRITE_ROTATE_180;
+
+		/* HSW and BDW does this automagically in hardware */
+		if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
+			x += src_w;
+			y += src_h;
+			linear_offset += src_h * fb->pitches[0] +
+				src_w * pixel_size;
+		}
+	}
+
+	if (key->flags) {
+		I915_WRITE(SPRKEYVAL(pipe), key->min_value);
+		I915_WRITE(SPRKEYMAX(pipe), key->max_value);
+		I915_WRITE(SPRKEYMSK(pipe), key->channel_mask);
+	}
+
+	if (key->flags & I915_SET_COLORKEY_DESTINATION)
+		sprctl |= SPRITE_DEST_KEY;
+	else if (key->flags & I915_SET_COLORKEY_SOURCE)
+		sprctl |= SPRITE_SOURCE_KEY;
+
+	I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]);
+	I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
+
+	/* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET
+	 * register */
+	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+		I915_WRITE(SPROFFSET(pipe), (y << 16) | x);
+	else if (obj->tiling_mode != I915_TILING_NONE)
+		I915_WRITE(SPRTILEOFF(pipe), (y << 16) | x);
+	else
+		I915_WRITE(SPRLINOFF(pipe), linear_offset);
+
+	I915_WRITE(SPRSIZE(pipe), (crtc_h << 16) | crtc_w);
+	if (intel_plane->can_scale)
+		I915_WRITE(SPRSCALE(pipe), sprscale);
+	I915_WRITE(SPRCTL(pipe), sprctl);
+	I915_WRITE(SPRSURF(pipe),
+		   i915_gem_obj_ggtt_offset(obj) + sprsurf_offset);
+	POSTING_READ(SPRSURF(pipe));
+}
+
+static void
+ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
+{
+	struct drm_device *dev = plane->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_plane *intel_plane = to_intel_plane(plane);
+	int pipe = intel_plane->pipe;
+
+	I915_WRITE(SPRCTL(pipe), 0);
+	/* Can't leave the scaler enabled... */
+	if (intel_plane->can_scale)
+		I915_WRITE(SPRSCALE(pipe), 0);
+
+	I915_WRITE(SPRSURF(pipe), 0);
+	POSTING_READ(SPRSURF(pipe));
+}
+
+static void
+ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
+		 struct drm_framebuffer *fb,
+		 int crtc_x, int crtc_y,
+		 unsigned int crtc_w, unsigned int crtc_h,
+		 uint32_t x, uint32_t y,
+		 uint32_t src_w, uint32_t src_h)
+{
+	struct drm_device *dev = plane->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_plane *intel_plane = to_intel_plane(plane);
+	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+	int pipe = intel_plane->pipe;
+	unsigned long dvssurf_offset, linear_offset;
+	u32 dvscntr, dvsscale;
+	int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
+	const struct drm_intel_sprite_colorkey *key =
+		&to_intel_plane_state(plane->state)->ckey;
+
+	dvscntr = DVS_ENABLE;
+
+	switch (fb->pixel_format) {
+	case DRM_FORMAT_XBGR8888:
+		dvscntr |= DVS_FORMAT_RGBX888 | DVS_RGB_ORDER_XBGR;
+		break;
+	case DRM_FORMAT_XRGB8888:
+		dvscntr |= DVS_FORMAT_RGBX888;
+		break;
+	case DRM_FORMAT_YUYV:
+		dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YUYV;
+		break;
+	case DRM_FORMAT_YVYU:
+		dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YVYU;
+		break;
+	case DRM_FORMAT_UYVY:
+		dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_UYVY;
+		break;
+	case DRM_FORMAT_VYUY:
+		dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_VYUY;
+		break;
+	default:
+		BUG();
+	}
+
+	/*
+	 * Enable gamma to match primary/cursor plane behaviour.
+	 * FIXME should be user controllable via propertiesa.
+	 */
+	dvscntr |= DVS_GAMMA_ENABLE;
+
+	if (obj->tiling_mode != I915_TILING_NONE)
+		dvscntr |= DVS_TILED;
+
+	if (IS_GEN6(dev))
+		dvscntr |= DVS_TRICKLE_FEED_DISABLE; /* must disable */
+
+	intel_update_sprite_watermarks(plane, crtc, src_w, src_h,
+				       pixel_size, true,
+				       src_w != crtc_w || src_h != crtc_h);
+
+	/* Sizes are 0 based */
+	src_w--;
+	src_h--;
+	crtc_w--;
+	crtc_h--;
+
+	dvsscale = 0;
+	if (crtc_w != src_w || crtc_h != src_h)
+		dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h;
+
+	linear_offset = y * fb->pitches[0] + x * pixel_size;
+	dvssurf_offset =
+		intel_gen4_compute_page_offset(dev_priv,
+					       &x, &y, obj->tiling_mode,
+					       pixel_size, fb->pitches[0]);
+	linear_offset -= dvssurf_offset;
+
+	if (plane->state->rotation == BIT(DRM_ROTATE_180)) {
+		dvscntr |= DVS_ROTATE_180;
+
+		x += src_w;
+		y += src_h;
+		linear_offset += src_h * fb->pitches[0] + src_w * pixel_size;
+	}
+
+	if (key->flags) {
+		I915_WRITE(DVSKEYVAL(pipe), key->min_value);
+		I915_WRITE(DVSKEYMAX(pipe), key->max_value);
+		I915_WRITE(DVSKEYMSK(pipe), key->channel_mask);
+	}
+
+	if (key->flags & I915_SET_COLORKEY_DESTINATION)
+		dvscntr |= DVS_DEST_KEY;
+	else if (key->flags & I915_SET_COLORKEY_SOURCE)
+		dvscntr |= DVS_SOURCE_KEY;
+
+	I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]);
+	I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
+
+	if (obj->tiling_mode != I915_TILING_NONE)
+		I915_WRITE(DVSTILEOFF(pipe), (y << 16) | x);
+	else
+		I915_WRITE(DVSLINOFF(pipe), linear_offset);
+
+	I915_WRITE(DVSSIZE(pipe), (crtc_h << 16) | crtc_w);
+	I915_WRITE(DVSSCALE(pipe), dvsscale);
+	I915_WRITE(DVSCNTR(pipe), dvscntr);
+	I915_WRITE(DVSSURF(pipe),
+		   i915_gem_obj_ggtt_offset(obj) + dvssurf_offset);
+	POSTING_READ(DVSSURF(pipe));
+}
+
+static void
+ilk_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
+{
+	struct drm_device *dev = plane->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_plane *intel_plane = to_intel_plane(plane);
+	int pipe = intel_plane->pipe;
+
+	I915_WRITE(DVSCNTR(pipe), 0);
+	/* Disable the scaler */
+	I915_WRITE(DVSSCALE(pipe), 0);
+
+	I915_WRITE(DVSSURF(pipe), 0);
+	POSTING_READ(DVSSURF(pipe));
+}
+
+static int
+intel_check_sprite_plane(struct drm_plane *plane,
+			 struct intel_crtc_state *crtc_state,
+			 struct intel_plane_state *state)
+{
+	struct drm_device *dev = plane->dev;
+	struct drm_crtc *crtc = state->base.crtc;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct intel_plane *intel_plane = to_intel_plane(plane);
+	struct drm_framebuffer *fb = state->base.fb;
+	int crtc_x, crtc_y;
+	unsigned int crtc_w, crtc_h;
+	uint32_t src_x, src_y, src_w, src_h;
+	struct drm_rect *src = &state->src;
+	struct drm_rect *dst = &state->dst;
+	const struct drm_rect *clip = &state->clip;
+	int hscale, vscale;
+	int max_scale, min_scale;
+	bool can_scale;
+	int pixel_size;
+
+	if (!fb) {
+		state->visible = false;
+		return 0;
+	}
+
+	/* Don't modify another pipe's plane */
+	if (intel_plane->pipe != intel_crtc->pipe) {
+		DRM_DEBUG_KMS("Wrong plane <-> crtc mapping\n");
+		return -EINVAL;
+	}
+
+	/* FIXME check all gen limits */
+	if (fb->width < 3 || fb->height < 3 || fb->pitches[0] > 16384) {
+		DRM_DEBUG_KMS("Unsuitable framebuffer for plane\n");
+		return -EINVAL;
+	}
+
+	/* setup can_scale, min_scale, max_scale */
+	if (INTEL_INFO(dev)->gen >= 9) {
+		/* use scaler when colorkey is not required */
+		if (state->ckey.flags == I915_SET_COLORKEY_NONE) {
+			can_scale = 1;
+			min_scale = 1;
+			max_scale = skl_max_scale(intel_crtc, crtc_state);
+		} else {
+			can_scale = 0;
+			min_scale = DRM_PLANE_HELPER_NO_SCALING;
+			max_scale = DRM_PLANE_HELPER_NO_SCALING;
+		}
+	} else {
+		can_scale = intel_plane->can_scale;
+		max_scale = intel_plane->max_downscale << 16;
+		min_scale = intel_plane->can_scale ? 1 : (1 << 16);
+	}
+
+	/*
+	 * FIXME the following code does a bunch of fuzzy adjustments to the
+	 * coordinates and sizes. We probably need some way to decide whether
+	 * more strict checking should be done instead.
+	 */
+	drm_rect_rotate(src, fb->width << 16, fb->height << 16,
+			state->base.rotation);
+
+	hscale = drm_rect_calc_hscale_relaxed(src, dst, min_scale, max_scale);
+	BUG_ON(hscale < 0);
+
+	vscale = drm_rect_calc_vscale_relaxed(src, dst, min_scale, max_scale);
+	BUG_ON(vscale < 0);
+
+	state->visible = drm_rect_clip_scaled(src, dst, clip, hscale, vscale);
+
+	crtc_x = dst->x1;
+	crtc_y = dst->y1;
+	crtc_w = drm_rect_width(dst);
+	crtc_h = drm_rect_height(dst);
+
+	if (state->visible) {
+		/* check again in case clipping clamped the results */
+		hscale = drm_rect_calc_hscale(src, dst, min_scale, max_scale);
+		if (hscale < 0) {
+			DRM_DEBUG_KMS("Horizontal scaling factor out of limits\n");
+			drm_rect_debug_print(src, true);
+			drm_rect_debug_print(dst, false);
+
+			return hscale;
+		}
+
+		vscale = drm_rect_calc_vscale(src, dst, min_scale, max_scale);
+		if (vscale < 0) {
+			DRM_DEBUG_KMS("Vertical scaling factor out of limits\n");
+			drm_rect_debug_print(src, true);
+			drm_rect_debug_print(dst, false);
+
+			return vscale;
+		}
+
+		/* Make the source viewport size an exact multiple of the scaling factors. */
+		drm_rect_adjust_size(src,
+				     drm_rect_width(dst) * hscale - drm_rect_width(src),
+				     drm_rect_height(dst) * vscale - drm_rect_height(src));
+
+		drm_rect_rotate_inv(src, fb->width << 16, fb->height << 16,
+				    state->base.rotation);
+
+		/* sanity check to make sure the src viewport wasn't enlarged */
+		WARN_ON(src->x1 < (int) state->base.src_x ||
+			src->y1 < (int) state->base.src_y ||
+			src->x2 > (int) state->base.src_x + state->base.src_w ||
+			src->y2 > (int) state->base.src_y + state->base.src_h);
+
+		/*
+		 * Hardware doesn't handle subpixel coordinates.
+		 * Adjust to (macro)pixel boundary, but be careful not to
+		 * increase the source viewport size, because that could
+		 * push the downscaling factor out of bounds.
+		 */
+		src_x = src->x1 >> 16;
+		src_w = drm_rect_width(src) >> 16;
+		src_y = src->y1 >> 16;
+		src_h = drm_rect_height(src) >> 16;
+
+		if (format_is_yuv(fb->pixel_format)) {
+			src_x &= ~1;
+			src_w &= ~1;
+
+			/*
+			 * Must keep src and dst the
+			 * same if we can't scale.
+			 */
+			if (!can_scale)
+				crtc_w &= ~1;
+
+			if (crtc_w == 0)
+				state->visible = false;
+		}
+	}
+
+	/* Check size restrictions when scaling */
+	if (state->visible && (src_w != crtc_w || src_h != crtc_h)) {
+		unsigned int width_bytes;
+
+		WARN_ON(!can_scale);
+
+		/* FIXME interlacing min height is 6 */
+
+		if (crtc_w < 3 || crtc_h < 3)
+			state->visible = false;
+
+		if (src_w < 3 || src_h < 3)
+			state->visible = false;
+
+		pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
+		width_bytes = ((src_x * pixel_size) & 63) +
+					src_w * pixel_size;
+
+		if (INTEL_INFO(dev)->gen < 9 && (src_w > 2048 || src_h > 2048 ||
+		    width_bytes > 4096 || fb->pitches[0] > 4096)) {
+			DRM_DEBUG_KMS("Source dimensions exceed hardware limits\n");
+			return -EINVAL;
+		}
+	}
+
+	if (state->visible) {
+		src->x1 = src_x << 16;
+		src->x2 = (src_x + src_w) << 16;
+		src->y1 = src_y << 16;
+		src->y2 = (src_y + src_h) << 16;
+	}
+
+	dst->x1 = crtc_x;
+	dst->x2 = crtc_x + crtc_w;
+	dst->y1 = crtc_y;
+	dst->y2 = crtc_y + crtc_h;
+
+	return 0;
+}
+
+static void
+intel_commit_sprite_plane(struct drm_plane *plane,
+			  struct intel_plane_state *state)
+{
+	struct drm_crtc *crtc = state->base.crtc;
+	struct intel_plane *intel_plane = to_intel_plane(plane);
+	struct drm_framebuffer *fb = state->base.fb;
+
+	crtc = crtc ? crtc : plane->crtc;
+
+	if (!crtc->state->active)
+		return;
+
+	if (state->visible) {
+		intel_plane->update_plane(plane, crtc, fb,
+					  state->dst.x1, state->dst.y1,
+					  drm_rect_width(&state->dst),
+					  drm_rect_height(&state->dst),
+					  state->src.x1 >> 16,
+					  state->src.y1 >> 16,
+					  drm_rect_width(&state->src) >> 16,
+					  drm_rect_height(&state->src) >> 16);
+	} else {
+		intel_plane->disable_plane(plane, crtc);
+	}
+}
+
+int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
+			      struct drm_file *file_priv)
+{
+	struct drm_intel_sprite_colorkey *set = data;
+	struct drm_plane *plane;
+	struct drm_plane_state *plane_state;
+	struct drm_atomic_state *state;
+	struct drm_modeset_acquire_ctx ctx;
+	int ret = 0;
+
+	/* Make sure we don't try to enable both src & dest simultaneously */
+	if ((set->flags & (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) == (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE))
+		return -EINVAL;
+
+	if (IS_VALLEYVIEW(dev) &&
+	    set->flags & I915_SET_COLORKEY_DESTINATION)
+		return -EINVAL;
+
+	plane = drm_plane_find(dev, set->plane_id);
+	if (!plane || plane->type != DRM_PLANE_TYPE_OVERLAY)
+		return -ENOENT;
+
+	drm_modeset_acquire_init(&ctx, 0);
+
+	state = drm_atomic_state_alloc(plane->dev);
+	if (!state) {
+		ret = -ENOMEM;
+		goto out;
+	}
+	state->acquire_ctx = &ctx;
+
+	while (1) {
+		plane_state = drm_atomic_get_plane_state(state, plane);
+		ret = PTR_ERR_OR_ZERO(plane_state);
+		if (!ret) {
+			to_intel_plane_state(plane_state)->ckey = *set;
+			ret = drm_atomic_commit(state);
+		}
+
+		if (ret != -EDEADLK)
+			break;
+
+		drm_atomic_state_clear(state);
+		drm_modeset_backoff(&ctx);
+	}
+
+	if (ret)
+		drm_atomic_state_free(state);
+
+out:
+	drm_modeset_drop_locks(&ctx);
+	drm_modeset_acquire_fini(&ctx);
+	return ret;
+}
+
+static const uint32_t ilk_plane_formats[] = {
+	DRM_FORMAT_XRGB8888,
+	DRM_FORMAT_YUYV,
+	DRM_FORMAT_YVYU,
+	DRM_FORMAT_UYVY,
+	DRM_FORMAT_VYUY,
+};
+
+static const uint32_t snb_plane_formats[] = {
+	DRM_FORMAT_XBGR8888,
+	DRM_FORMAT_XRGB8888,
+	DRM_FORMAT_YUYV,
+	DRM_FORMAT_YVYU,
+	DRM_FORMAT_UYVY,
+	DRM_FORMAT_VYUY,
+};
+
+static const uint32_t vlv_plane_formats[] = {
+	DRM_FORMAT_RGB565,
+	DRM_FORMAT_ABGR8888,
+	DRM_FORMAT_ARGB8888,
+	DRM_FORMAT_XBGR8888,
+	DRM_FORMAT_XRGB8888,
+	DRM_FORMAT_XBGR2101010,
+	DRM_FORMAT_ABGR2101010,
+	DRM_FORMAT_YUYV,
+	DRM_FORMAT_YVYU,
+	DRM_FORMAT_UYVY,
+	DRM_FORMAT_VYUY,
+};
+
+static uint32_t skl_plane_formats[] = {
+	DRM_FORMAT_RGB565,
+	DRM_FORMAT_ABGR8888,
+	DRM_FORMAT_ARGB8888,
+	DRM_FORMAT_XBGR8888,
+	DRM_FORMAT_XRGB8888,
+	DRM_FORMAT_YUYV,
+	DRM_FORMAT_YVYU,
+	DRM_FORMAT_UYVY,
+	DRM_FORMAT_VYUY,
+};
+
+int
+intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
+{
+	struct intel_plane *intel_plane;
+	struct intel_plane_state *state;
+	unsigned long possible_crtcs;
+	const uint32_t *plane_formats;
+	int num_plane_formats;
+	int ret;
+
+	if (INTEL_INFO(dev)->gen < 5)
+		return -ENODEV;
+
+	intel_plane = kzalloc(sizeof(*intel_plane), GFP_KERNEL);
+	if (!intel_plane)
+		return -ENOMEM;
+
+	state = intel_create_plane_state(&intel_plane->base);
+	if (!state) {
+		kfree(intel_plane);
+		return -ENOMEM;
+	}
+	intel_plane->base.state = &state->base;
+
+	switch (INTEL_INFO(dev)->gen) {
+	case 5:
+	case 6:
+		intel_plane->can_scale = true;
+		intel_plane->max_downscale = 16;
+		intel_plane->update_plane = ilk_update_plane;
+		intel_plane->disable_plane = ilk_disable_plane;
+
+		if (IS_GEN6(dev)) {
+			plane_formats = snb_plane_formats;
+			num_plane_formats = ARRAY_SIZE(snb_plane_formats);
+		} else {
+			plane_formats = ilk_plane_formats;
+			num_plane_formats = ARRAY_SIZE(ilk_plane_formats);
+		}
+		break;
+
+	case 7:
+	case 8:
+		if (IS_IVYBRIDGE(dev)) {
+			intel_plane->can_scale = true;
+			intel_plane->max_downscale = 2;
+		} else {
+			intel_plane->can_scale = false;
+			intel_plane->max_downscale = 1;
+		}
+
+		if (IS_VALLEYVIEW(dev)) {
+			intel_plane->update_plane = vlv_update_plane;
+			intel_plane->disable_plane = vlv_disable_plane;
+
+			plane_formats = vlv_plane_formats;
+			num_plane_formats = ARRAY_SIZE(vlv_plane_formats);
+		} else {
+			intel_plane->update_plane = ivb_update_plane;
+			intel_plane->disable_plane = ivb_disable_plane;
+
+			plane_formats = snb_plane_formats;
+			num_plane_formats = ARRAY_SIZE(snb_plane_formats);
+		}
+		break;
+	case 9:
+		intel_plane->can_scale = true;
+		intel_plane->update_plane = skl_update_plane;
+		intel_plane->disable_plane = skl_disable_plane;
+		state->scaler_id = -1;
+
+		plane_formats = skl_plane_formats;
+		num_plane_formats = ARRAY_SIZE(skl_plane_formats);
+		break;
+	default:
+		kfree(intel_plane);
+		return -ENODEV;
+	}
+
+	intel_plane->pipe = pipe;
+	intel_plane->plane = plane;
+	intel_plane->frontbuffer_bit = INTEL_FRONTBUFFER_SPRITE(pipe, plane);
+	intel_plane->check_plane = intel_check_sprite_plane;
+	intel_plane->commit_plane = intel_commit_sprite_plane;
+	possible_crtcs = (1 << pipe);
+	ret = drm_universal_plane_init(dev, &intel_plane->base, possible_crtcs,
+				       &intel_plane_funcs,
+				       plane_formats, num_plane_formats,
+				       DRM_PLANE_TYPE_OVERLAY);
+	if (ret) {
+		kfree(intel_plane);
+		goto out;
+	}
+
+	intel_create_rotation_property(dev, intel_plane);
+
+	drm_plane_helper_add(&intel_plane->base, &intel_plane_helper_funcs);
+
+out:
+	return ret;
+}
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
new file mode 100644
index 0000000..6bea789
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -0,0 +1,1700 @@
+/*
+ * Copyright © 2006-2008 Intel Corporation
+ *   Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Eric Anholt <eric@anholt.net>
+ *
+ */
+
+/** @file
+ * Integrated TV-out support for the 915GM and 945GM.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
+#include "intel_drv.h"
+#include <drm/i915_drm.h>
+#include "i915_drv.h"
+
+enum tv_margin {
+	TV_MARGIN_LEFT, TV_MARGIN_TOP,
+	TV_MARGIN_RIGHT, TV_MARGIN_BOTTOM
+};
+
+/** Private structure for the integrated TV support */
+struct intel_tv {
+	struct intel_encoder base;
+
+	int type;
+	const char *tv_format;
+	int margin[4];
+	u32 save_TV_H_CTL_1;
+	u32 save_TV_H_CTL_2;
+	u32 save_TV_H_CTL_3;
+	u32 save_TV_V_CTL_1;
+	u32 save_TV_V_CTL_2;
+	u32 save_TV_V_CTL_3;
+	u32 save_TV_V_CTL_4;
+	u32 save_TV_V_CTL_5;
+	u32 save_TV_V_CTL_6;
+	u32 save_TV_V_CTL_7;
+	u32 save_TV_SC_CTL_1, save_TV_SC_CTL_2, save_TV_SC_CTL_3;
+
+	u32 save_TV_CSC_Y;
+	u32 save_TV_CSC_Y2;
+	u32 save_TV_CSC_U;
+	u32 save_TV_CSC_U2;
+	u32 save_TV_CSC_V;
+	u32 save_TV_CSC_V2;
+	u32 save_TV_CLR_KNOBS;
+	u32 save_TV_CLR_LEVEL;
+	u32 save_TV_WIN_POS;
+	u32 save_TV_WIN_SIZE;
+	u32 save_TV_FILTER_CTL_1;
+	u32 save_TV_FILTER_CTL_2;
+	u32 save_TV_FILTER_CTL_3;
+
+	u32 save_TV_H_LUMA[60];
+	u32 save_TV_H_CHROMA[60];
+	u32 save_TV_V_LUMA[43];
+	u32 save_TV_V_CHROMA[43];
+
+	u32 save_TV_DAC;
+	u32 save_TV_CTL;
+};
+
+struct video_levels {
+	int blank, black, burst;
+};
+
+struct color_conversion {
+	u16 ry, gy, by, ay;
+	u16 ru, gu, bu, au;
+	u16 rv, gv, bv, av;
+};
+
+static const u32 filter_table[] = {
+	0xB1403000, 0x2E203500, 0x35002E20, 0x3000B140,
+	0x35A0B160, 0x2DC02E80, 0xB1403480, 0xB1603000,
+	0x2EA03640, 0x34002D80, 0x3000B120, 0x36E0B160,
+	0x2D202EF0, 0xB1203380, 0xB1603000, 0x2F303780,
+	0x33002CC0, 0x3000B100, 0x3820B160, 0x2C802F50,
+	0xB10032A0, 0xB1603000, 0x2F9038C0, 0x32202C20,
+	0x3000B0E0, 0x3980B160, 0x2BC02FC0, 0xB0E031C0,
+	0xB1603000, 0x2FF03A20, 0x31602B60, 0xB020B0C0,
+	0x3AE0B160, 0x2B001810, 0xB0C03120, 0xB140B020,
+	0x18283BA0, 0x30C02A80, 0xB020B0A0, 0x3C60B140,
+	0x2A201838, 0xB0A03080, 0xB120B020, 0x18383D20,
+	0x304029C0, 0xB040B080, 0x3DE0B100, 0x29601848,
+	0xB0803000, 0xB100B040, 0x18483EC0, 0xB0402900,
+	0xB040B060, 0x3F80B0C0, 0x28801858, 0xB060B080,
+	0xB0A0B060, 0x18602820, 0xB0A02820, 0x0000B060,
+	0xB1403000, 0x2E203500, 0x35002E20, 0x3000B140,
+	0x35A0B160, 0x2DC02E80, 0xB1403480, 0xB1603000,
+	0x2EA03640, 0x34002D80, 0x3000B120, 0x36E0B160,
+	0x2D202EF0, 0xB1203380, 0xB1603000, 0x2F303780,
+	0x33002CC0, 0x3000B100, 0x3820B160, 0x2C802F50,
+	0xB10032A0, 0xB1603000, 0x2F9038C0, 0x32202C20,
+	0x3000B0E0, 0x3980B160, 0x2BC02FC0, 0xB0E031C0,
+	0xB1603000, 0x2FF03A20, 0x31602B60, 0xB020B0C0,
+	0x3AE0B160, 0x2B001810, 0xB0C03120, 0xB140B020,
+	0x18283BA0, 0x30C02A80, 0xB020B0A0, 0x3C60B140,
+	0x2A201838, 0xB0A03080, 0xB120B020, 0x18383D20,
+	0x304029C0, 0xB040B080, 0x3DE0B100, 0x29601848,
+	0xB0803000, 0xB100B040, 0x18483EC0, 0xB0402900,
+	0xB040B060, 0x3F80B0C0, 0x28801858, 0xB060B080,
+	0xB0A0B060, 0x18602820, 0xB0A02820, 0x0000B060,
+	0x36403000, 0x2D002CC0, 0x30003640, 0x2D0036C0,
+	0x35C02CC0, 0x37403000, 0x2C802D40, 0x30003540,
+	0x2D8037C0, 0x34C02C40, 0x38403000, 0x2BC02E00,
+	0x30003440, 0x2E2038C0, 0x34002B80, 0x39803000,
+	0x2B402E40, 0x30003380, 0x2E603A00, 0x33402B00,
+	0x3A803040, 0x2A802EA0, 0x30403300, 0x2EC03B40,
+	0x32802A40, 0x3C003040, 0x2A002EC0, 0x30803240,
+	0x2EC03C80, 0x320029C0, 0x3D403080, 0x29402F00,
+	0x308031C0, 0x2F203DC0, 0x31802900, 0x3E8030C0,
+	0x28802F40, 0x30C03140, 0x2F203F40, 0x31402840,
+	0x28003100, 0x28002F00, 0x00003100, 0x36403000,
+	0x2D002CC0, 0x30003640, 0x2D0036C0,
+	0x35C02CC0, 0x37403000, 0x2C802D40, 0x30003540,
+	0x2D8037C0, 0x34C02C40, 0x38403000, 0x2BC02E00,
+	0x30003440, 0x2E2038C0, 0x34002B80, 0x39803000,
+	0x2B402E40, 0x30003380, 0x2E603A00, 0x33402B00,
+	0x3A803040, 0x2A802EA0, 0x30403300, 0x2EC03B40,
+	0x32802A40, 0x3C003040, 0x2A002EC0, 0x30803240,
+	0x2EC03C80, 0x320029C0, 0x3D403080, 0x29402F00,
+	0x308031C0, 0x2F203DC0, 0x31802900, 0x3E8030C0,
+	0x28802F40, 0x30C03140, 0x2F203F40, 0x31402840,
+	0x28003100, 0x28002F00, 0x00003100,
+};
+
+/*
+ * Color conversion values have 3 separate fixed point formats:
+ *
+ * 10 bit fields (ay, au)
+ *   1.9 fixed point (b.bbbbbbbbb)
+ * 11 bit fields (ry, by, ru, gu, gv)
+ *   exp.mantissa (ee.mmmmmmmmm)
+ *   ee = 00 = 10^-1 (0.mmmmmmmmm)
+ *   ee = 01 = 10^-2 (0.0mmmmmmmmm)
+ *   ee = 10 = 10^-3 (0.00mmmmmmmmm)
+ *   ee = 11 = 10^-4 (0.000mmmmmmmmm)
+ * 12 bit fields (gy, rv, bu)
+ *   exp.mantissa (eee.mmmmmmmmm)
+ *   eee = 000 = 10^-1 (0.mmmmmmmmm)
+ *   eee = 001 = 10^-2 (0.0mmmmmmmmm)
+ *   eee = 010 = 10^-3 (0.00mmmmmmmmm)
+ *   eee = 011 = 10^-4 (0.000mmmmmmmmm)
+ *   eee = 100 = reserved
+ *   eee = 101 = reserved
+ *   eee = 110 = reserved
+ *   eee = 111 = 10^0 (m.mmmmmmmm) (only usable for 1.0 representation)
+ *
+ * Saturation and contrast are 8 bits, with their own representation:
+ * 8 bit field (saturation, contrast)
+ *   exp.mantissa (ee.mmmmmm)
+ *   ee = 00 = 10^-1 (0.mmmmmm)
+ *   ee = 01 = 10^0 (m.mmmmm)
+ *   ee = 10 = 10^1 (mm.mmmm)
+ *   ee = 11 = 10^2 (mmm.mmm)
+ *
+ * Simple conversion function:
+ *
+ * static u32
+ * float_to_csc_11(float f)
+ * {
+ *     u32 exp;
+ *     u32 mant;
+ *     u32 ret;
+ *
+ *     if (f < 0)
+ *         f = -f;
+ *
+ *     if (f >= 1) {
+ *         exp = 0x7;
+ *	   mant = 1 << 8;
+ *     } else {
+ *         for (exp = 0; exp < 3 && f < 0.5; exp++)
+ *	   f *= 2.0;
+ *         mant = (f * (1 << 9) + 0.5);
+ *         if (mant >= (1 << 9))
+ *             mant = (1 << 9) - 1;
+ *     }
+ *     ret = (exp << 9) | mant;
+ *     return ret;
+ * }
+ */
+
+/*
+ * Behold, magic numbers!  If we plant them they might grow a big
+ * s-video cable to the sky... or something.
+ *
+ * Pre-converted to appropriate hex value.
+ */
+
+/*
+ * PAL & NTSC values for composite & s-video connections
+ */
+static const struct color_conversion ntsc_m_csc_composite = {
+	.ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104,
+	.ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0200,
+	.rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0200,
+};
+
+static const struct video_levels ntsc_m_levels_composite = {
+	.blank = 225, .black = 267, .burst = 113,
+};
+
+static const struct color_conversion ntsc_m_csc_svideo = {
+	.ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0133,
+	.ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0200,
+	.rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0200,
+};
+
+static const struct video_levels ntsc_m_levels_svideo = {
+	.blank = 266, .black = 316, .burst = 133,
+};
+
+static const struct color_conversion ntsc_j_csc_composite = {
+	.ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0119,
+	.ru = 0x074c, .gu = 0x0546, .bu = 0x05ec, .au = 0x0200,
+	.rv = 0x035a, .gv = 0x0322, .bv = 0x06e1, .av = 0x0200,
+};
+
+static const struct video_levels ntsc_j_levels_composite = {
+	.blank = 225, .black = 225, .burst = 113,
+};
+
+static const struct color_conversion ntsc_j_csc_svideo = {
+	.ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x014c,
+	.ru = 0x0788, .gu = 0x0581, .bu = 0x0322, .au = 0x0200,
+	.rv = 0x0399, .gv = 0x0356, .bv = 0x070a, .av = 0x0200,
+};
+
+static const struct video_levels ntsc_j_levels_svideo = {
+	.blank = 266, .black = 266, .burst = 133,
+};
+
+static const struct color_conversion pal_csc_composite = {
+	.ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0113,
+	.ru = 0x0745, .gu = 0x053f, .bu = 0x05e1, .au = 0x0200,
+	.rv = 0x0353, .gv = 0x031c, .bv = 0x06dc, .av = 0x0200,
+};
+
+static const struct video_levels pal_levels_composite = {
+	.blank = 237, .black = 237, .burst = 118,
+};
+
+static const struct color_conversion pal_csc_svideo = {
+	.ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0145,
+	.ru = 0x0780, .gu = 0x0579, .bu = 0x031c, .au = 0x0200,
+	.rv = 0x0390, .gv = 0x034f, .bv = 0x0705, .av = 0x0200,
+};
+
+static const struct video_levels pal_levels_svideo = {
+	.blank = 280, .black = 280, .burst = 139,
+};
+
+static const struct color_conversion pal_m_csc_composite = {
+	.ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104,
+	.ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0200,
+	.rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0200,
+};
+
+static const struct video_levels pal_m_levels_composite = {
+	.blank = 225, .black = 267, .burst = 113,
+};
+
+static const struct color_conversion pal_m_csc_svideo = {
+	.ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0133,
+	.ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0200,
+	.rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0200,
+};
+
+static const struct video_levels pal_m_levels_svideo = {
+	.blank = 266, .black = 316, .burst = 133,
+};
+
+static const struct color_conversion pal_n_csc_composite = {
+	.ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104,
+	.ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0200,
+	.rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0200,
+};
+
+static const struct video_levels pal_n_levels_composite = {
+	.blank = 225, .black = 267, .burst = 118,
+};
+
+static const struct color_conversion pal_n_csc_svideo = {
+	.ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0133,
+	.ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0200,
+	.rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0200,
+};
+
+static const struct video_levels pal_n_levels_svideo = {
+	.blank = 266, .black = 316, .burst = 139,
+};
+
+/*
+ * Component connections
+ */
+static const struct color_conversion sdtv_csc_yprpb = {
+	.ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0145,
+	.ru = 0x0559, .gu = 0x0353, .bu = 0x0100, .au = 0x0200,
+	.rv = 0x0100, .gv = 0x03ad, .bv = 0x074d, .av = 0x0200,
+};
+
+static const struct color_conversion sdtv_csc_rgb = {
+	.ry = 0x0000, .gy = 0x0f00, .by = 0x0000, .ay = 0x0166,
+	.ru = 0x0000, .gu = 0x0000, .bu = 0x0f00, .au = 0x0166,
+	.rv = 0x0f00, .gv = 0x0000, .bv = 0x0000, .av = 0x0166,
+};
+
+static const struct color_conversion hdtv_csc_yprpb = {
+	.ry = 0x05b3, .gy = 0x016e, .by = 0x0728, .ay = 0x0145,
+	.ru = 0x07d5, .gu = 0x038b, .bu = 0x0100, .au = 0x0200,
+	.rv = 0x0100, .gv = 0x03d1, .bv = 0x06bc, .av = 0x0200,
+};
+
+static const struct color_conversion hdtv_csc_rgb = {
+	.ry = 0x0000, .gy = 0x0f00, .by = 0x0000, .ay = 0x0166,
+	.ru = 0x0000, .gu = 0x0000, .bu = 0x0f00, .au = 0x0166,
+	.rv = 0x0f00, .gv = 0x0000, .bv = 0x0000, .av = 0x0166,
+};
+
+static const struct video_levels component_levels = {
+	.blank = 279, .black = 279, .burst = 0,
+};
+
+
+struct tv_mode {
+	const char *name;
+	int clock;
+	int refresh; /* in millihertz (for precision) */
+	u32 oversample;
+	int hsync_end, hblank_start, hblank_end, htotal;
+	bool progressive, trilevel_sync, component_only;
+	int vsync_start_f1, vsync_start_f2, vsync_len;
+	bool veq_ena;
+	int veq_start_f1, veq_start_f2, veq_len;
+	int vi_end_f1, vi_end_f2, nbr_end;
+	bool burst_ena;
+	int hburst_start, hburst_len;
+	int vburst_start_f1, vburst_end_f1;
+	int vburst_start_f2, vburst_end_f2;
+	int vburst_start_f3, vburst_end_f3;
+	int vburst_start_f4, vburst_end_f4;
+	/*
+	 * subcarrier programming
+	 */
+	int dda2_size, dda3_size, dda1_inc, dda2_inc, dda3_inc;
+	u32 sc_reset;
+	bool pal_burst;
+	/*
+	 * blank/black levels
+	 */
+	const struct video_levels *composite_levels, *svideo_levels;
+	const struct color_conversion *composite_color, *svideo_color;
+	const u32 *filter_table;
+	int max_srcw;
+};
+
+
+/*
+ * Sub carrier DDA
+ *
+ *  I think this works as follows:
+ *
+ *  subcarrier freq = pixel_clock * (dda1_inc + dda2_inc / dda2_size) / 4096
+ *
+ * Presumably, when dda3 is added in, it gets to adjust the dda2_inc value
+ *
+ * So,
+ *  dda1_ideal = subcarrier/pixel * 4096
+ *  dda1_inc = floor (dda1_ideal)
+ *  dda2 = dda1_ideal - dda1_inc
+ *
+ *  then pick a ratio for dda2 that gives the closest approximation. If
+ *  you can't get close enough, you can play with dda3 as well. This
+ *  seems likely to happen when dda2 is small as the jumps would be larger
+ *
+ * To invert this,
+ *
+ *  pixel_clock = subcarrier * 4096 / (dda1_inc + dda2_inc / dda2_size)
+ *
+ * The constants below were all computed using a 107.520MHz clock
+ */
+
+/**
+ * Register programming values for TV modes.
+ *
+ * These values account for -1s required.
+ */
+
+static const struct tv_mode tv_modes[] = {
+	{
+		.name		= "NTSC-M",
+		.clock		= 108000,
+		.refresh	= 59940,
+		.oversample	= TV_OVERSAMPLE_8X,
+		.component_only = 0,
+		/* 525 Lines, 60 Fields, 15.734KHz line, Sub-Carrier 3.580MHz */
+
+		.hsync_end	= 64,		    .hblank_end		= 124,
+		.hblank_start	= 836,		    .htotal		= 857,
+
+		.progressive	= false,	    .trilevel_sync = false,
+
+		.vsync_start_f1	= 6,		    .vsync_start_f2	= 7,
+		.vsync_len	= 6,
+
+		.veq_ena	= true,		    .veq_start_f1	= 0,
+		.veq_start_f2	= 1,		    .veq_len		= 18,
+
+		.vi_end_f1	= 20,		    .vi_end_f2		= 21,
+		.nbr_end	= 240,
+
+		.burst_ena	= true,
+		.hburst_start	= 72,		    .hburst_len		= 34,
+		.vburst_start_f1 = 9,		    .vburst_end_f1	= 240,
+		.vburst_start_f2 = 10,		    .vburst_end_f2	= 240,
+		.vburst_start_f3 = 9,		    .vburst_end_f3	= 240,
+		.vburst_start_f4 = 10,		    .vburst_end_f4	= 240,
+
+		/* desired 3.5800000 actual 3.5800000 clock 107.52 */
+		.dda1_inc	=    135,
+		.dda2_inc	=  20800,	    .dda2_size		=  27456,
+		.dda3_inc	=      0,	    .dda3_size		=      0,
+		.sc_reset	= TV_SC_RESET_EVERY_4,
+		.pal_burst	= false,
+
+		.composite_levels = &ntsc_m_levels_composite,
+		.composite_color = &ntsc_m_csc_composite,
+		.svideo_levels  = &ntsc_m_levels_svideo,
+		.svideo_color = &ntsc_m_csc_svideo,
+
+		.filter_table = filter_table,
+	},
+	{
+		.name		= "NTSC-443",
+		.clock		= 108000,
+		.refresh	= 59940,
+		.oversample	= TV_OVERSAMPLE_8X,
+		.component_only = 0,
+		/* 525 Lines, 60 Fields, 15.734KHz line, Sub-Carrier 4.43MHz */
+		.hsync_end	= 64,		    .hblank_end		= 124,
+		.hblank_start	= 836,		    .htotal		= 857,
+
+		.progressive	= false,	    .trilevel_sync = false,
+
+		.vsync_start_f1 = 6,		    .vsync_start_f2	= 7,
+		.vsync_len	= 6,
+
+		.veq_ena	= true,		    .veq_start_f1	= 0,
+		.veq_start_f2	= 1,		    .veq_len		= 18,
+
+		.vi_end_f1	= 20,		    .vi_end_f2		= 21,
+		.nbr_end	= 240,
+
+		.burst_ena	= true,
+		.hburst_start	= 72,		    .hburst_len		= 34,
+		.vburst_start_f1 = 9,		    .vburst_end_f1	= 240,
+		.vburst_start_f2 = 10,		    .vburst_end_f2	= 240,
+		.vburst_start_f3 = 9,		    .vburst_end_f3	= 240,
+		.vburst_start_f4 = 10,		    .vburst_end_f4	= 240,
+
+		/* desired 4.4336180 actual 4.4336180 clock 107.52 */
+		.dda1_inc       =    168,
+		.dda2_inc       =   4093,       .dda2_size      =  27456,
+		.dda3_inc       =    310,       .dda3_size      =    525,
+		.sc_reset   = TV_SC_RESET_NEVER,
+		.pal_burst  = false,
+
+		.composite_levels = &ntsc_m_levels_composite,
+		.composite_color = &ntsc_m_csc_composite,
+		.svideo_levels  = &ntsc_m_levels_svideo,
+		.svideo_color = &ntsc_m_csc_svideo,
+
+		.filter_table = filter_table,
+	},
+	{
+		.name		= "NTSC-J",
+		.clock		= 108000,
+		.refresh	= 59940,
+		.oversample	= TV_OVERSAMPLE_8X,
+		.component_only = 0,
+
+		/* 525 Lines, 60 Fields, 15.734KHz line, Sub-Carrier 3.580MHz */
+		.hsync_end	= 64,		    .hblank_end		= 124,
+		.hblank_start = 836,	    .htotal		= 857,
+
+		.progressive	= false,    .trilevel_sync = false,
+
+		.vsync_start_f1	= 6,	    .vsync_start_f2	= 7,
+		.vsync_len	= 6,
+
+		.veq_ena      = true,	    .veq_start_f1	= 0,
+		.veq_start_f2 = 1,	    .veq_len		= 18,
+
+		.vi_end_f1	= 20,		    .vi_end_f2		= 21,
+		.nbr_end	= 240,
+
+		.burst_ena	= true,
+		.hburst_start	= 72,		    .hburst_len		= 34,
+		.vburst_start_f1 = 9,		    .vburst_end_f1	= 240,
+		.vburst_start_f2 = 10,		    .vburst_end_f2	= 240,
+		.vburst_start_f3 = 9,		    .vburst_end_f3	= 240,
+		.vburst_start_f4 = 10,		    .vburst_end_f4	= 240,
+
+		/* desired 3.5800000 actual 3.5800000 clock 107.52 */
+		.dda1_inc	=    135,
+		.dda2_inc	=  20800,	    .dda2_size		=  27456,
+		.dda3_inc	=      0,	    .dda3_size		=      0,
+		.sc_reset	= TV_SC_RESET_EVERY_4,
+		.pal_burst	= false,
+
+		.composite_levels = &ntsc_j_levels_composite,
+		.composite_color = &ntsc_j_csc_composite,
+		.svideo_levels  = &ntsc_j_levels_svideo,
+		.svideo_color = &ntsc_j_csc_svideo,
+
+		.filter_table = filter_table,
+	},
+	{
+		.name		= "PAL-M",
+		.clock		= 108000,
+		.refresh	= 59940,
+		.oversample	= TV_OVERSAMPLE_8X,
+		.component_only = 0,
+
+		/* 525 Lines, 60 Fields, 15.734KHz line, Sub-Carrier 3.580MHz */
+		.hsync_end	= 64,		  .hblank_end		= 124,
+		.hblank_start = 836,	  .htotal		= 857,
+
+		.progressive	= false,	    .trilevel_sync = false,
+
+		.vsync_start_f1	= 6,		    .vsync_start_f2	= 7,
+		.vsync_len	= 6,
+
+		.veq_ena	= true,		    .veq_start_f1	= 0,
+		.veq_start_f2	= 1,		    .veq_len		= 18,
+
+		.vi_end_f1	= 20,		    .vi_end_f2		= 21,
+		.nbr_end	= 240,
+
+		.burst_ena	= true,
+		.hburst_start	= 72,		    .hburst_len		= 34,
+		.vburst_start_f1 = 9,		    .vburst_end_f1	= 240,
+		.vburst_start_f2 = 10,		    .vburst_end_f2	= 240,
+		.vburst_start_f3 = 9,		    .vburst_end_f3	= 240,
+		.vburst_start_f4 = 10,		    .vburst_end_f4	= 240,
+
+		/* desired 3.5800000 actual 3.5800000 clock 107.52 */
+		.dda1_inc	=    135,
+		.dda2_inc	=  16704,	    .dda2_size		=  27456,
+		.dda3_inc	=      0,	    .dda3_size		=      0,
+		.sc_reset	= TV_SC_RESET_EVERY_8,
+		.pal_burst  = true,
+
+		.composite_levels = &pal_m_levels_composite,
+		.composite_color = &pal_m_csc_composite,
+		.svideo_levels  = &pal_m_levels_svideo,
+		.svideo_color = &pal_m_csc_svideo,
+
+		.filter_table = filter_table,
+	},
+	{
+		/* 625 Lines, 50 Fields, 15.625KHz line, Sub-Carrier 4.434MHz */
+		.name	    = "PAL-N",
+		.clock		= 108000,
+		.refresh	= 50000,
+		.oversample	= TV_OVERSAMPLE_8X,
+		.component_only = 0,
+
+		.hsync_end	= 64,		    .hblank_end		= 128,
+		.hblank_start = 844,	    .htotal		= 863,
+
+		.progressive  = false,    .trilevel_sync = false,
+
+
+		.vsync_start_f1	= 6,	   .vsync_start_f2	= 7,
+		.vsync_len	= 6,
+
+		.veq_ena	= true,		    .veq_start_f1	= 0,
+		.veq_start_f2	= 1,		    .veq_len		= 18,
+
+		.vi_end_f1	= 24,		    .vi_end_f2		= 25,
+		.nbr_end	= 286,
+
+		.burst_ena	= true,
+		.hburst_start = 73,	    .hburst_len		= 34,
+		.vburst_start_f1 = 8,	    .vburst_end_f1	= 285,
+		.vburst_start_f2 = 8,	    .vburst_end_f2	= 286,
+		.vburst_start_f3 = 9,	    .vburst_end_f3	= 286,
+		.vburst_start_f4 = 9,	    .vburst_end_f4	= 285,
+
+
+		/* desired 4.4336180 actual 4.4336180 clock 107.52 */
+		.dda1_inc       =    135,
+		.dda2_inc       =  23578,       .dda2_size      =  27648,
+		.dda3_inc       =    134,       .dda3_size      =    625,
+		.sc_reset   = TV_SC_RESET_EVERY_8,
+		.pal_burst  = true,
+
+		.composite_levels = &pal_n_levels_composite,
+		.composite_color = &pal_n_csc_composite,
+		.svideo_levels  = &pal_n_levels_svideo,
+		.svideo_color = &pal_n_csc_svideo,
+
+		.filter_table = filter_table,
+	},
+	{
+		/* 625 Lines, 50 Fields, 15.625KHz line, Sub-Carrier 4.434MHz */
+		.name	    = "PAL",
+		.clock		= 108000,
+		.refresh	= 50000,
+		.oversample	= TV_OVERSAMPLE_8X,
+		.component_only = 0,
+
+		.hsync_end	= 64,		    .hblank_end		= 142,
+		.hblank_start	= 844,	    .htotal		= 863,
+
+		.progressive	= false,    .trilevel_sync = false,
+
+		.vsync_start_f1	= 5,	    .vsync_start_f2	= 6,
+		.vsync_len	= 5,
+
+		.veq_ena	= true,	    .veq_start_f1	= 0,
+		.veq_start_f2	= 1,	    .veq_len		= 15,
+
+		.vi_end_f1	= 24,		    .vi_end_f2		= 25,
+		.nbr_end	= 286,
+
+		.burst_ena	= true,
+		.hburst_start	= 73,		    .hburst_len		= 32,
+		.vburst_start_f1 = 8,		    .vburst_end_f1	= 285,
+		.vburst_start_f2 = 8,		    .vburst_end_f2	= 286,
+		.vburst_start_f3 = 9,		    .vburst_end_f3	= 286,
+		.vburst_start_f4 = 9,		    .vburst_end_f4	= 285,
+
+		/* desired 4.4336180 actual 4.4336180 clock 107.52 */
+		.dda1_inc       =    168,
+		.dda2_inc       =   4122,       .dda2_size      =  27648,
+		.dda3_inc       =     67,       .dda3_size      =    625,
+		.sc_reset   = TV_SC_RESET_EVERY_8,
+		.pal_burst  = true,
+
+		.composite_levels = &pal_levels_composite,
+		.composite_color = &pal_csc_composite,
+		.svideo_levels  = &pal_levels_svideo,
+		.svideo_color = &pal_csc_svideo,
+
+		.filter_table = filter_table,
+	},
+	{
+		.name       = "480p",
+		.clock		= 107520,
+		.refresh	= 59940,
+		.oversample     = TV_OVERSAMPLE_4X,
+		.component_only = 1,
+
+		.hsync_end      = 64,               .hblank_end         = 122,
+		.hblank_start   = 842,              .htotal             = 857,
+
+		.progressive    = true,		    .trilevel_sync = false,
+
+		.vsync_start_f1 = 12,               .vsync_start_f2     = 12,
+		.vsync_len      = 12,
+
+		.veq_ena        = false,
+
+		.vi_end_f1      = 44,               .vi_end_f2          = 44,
+		.nbr_end        = 479,
+
+		.burst_ena      = false,
+
+		.filter_table = filter_table,
+	},
+	{
+		.name       = "576p",
+		.clock		= 107520,
+		.refresh	= 50000,
+		.oversample     = TV_OVERSAMPLE_4X,
+		.component_only = 1,
+
+		.hsync_end      = 64,               .hblank_end         = 139,
+		.hblank_start   = 859,              .htotal             = 863,
+
+		.progressive    = true,		    .trilevel_sync = false,
+
+		.vsync_start_f1 = 10,               .vsync_start_f2     = 10,
+		.vsync_len      = 10,
+
+		.veq_ena        = false,
+
+		.vi_end_f1      = 48,               .vi_end_f2          = 48,
+		.nbr_end        = 575,
+
+		.burst_ena      = false,
+
+		.filter_table = filter_table,
+	},
+	{
+		.name       = "720p@60Hz",
+		.clock		= 148800,
+		.refresh	= 60000,
+		.oversample     = TV_OVERSAMPLE_2X,
+		.component_only = 1,
+
+		.hsync_end      = 80,               .hblank_end         = 300,
+		.hblank_start   = 1580,             .htotal             = 1649,
+
+		.progressive	= true,		    .trilevel_sync = true,
+
+		.vsync_start_f1 = 10,               .vsync_start_f2     = 10,
+		.vsync_len      = 10,
+
+		.veq_ena        = false,
+
+		.vi_end_f1      = 29,               .vi_end_f2          = 29,
+		.nbr_end        = 719,
+
+		.burst_ena      = false,
+
+		.filter_table = filter_table,
+	},
+	{
+		.name       = "720p@50Hz",
+		.clock		= 148800,
+		.refresh	= 50000,
+		.oversample     = TV_OVERSAMPLE_2X,
+		.component_only = 1,
+
+		.hsync_end      = 80,               .hblank_end         = 300,
+		.hblank_start   = 1580,             .htotal             = 1979,
+
+		.progressive	= true,		    .trilevel_sync = true,
+
+		.vsync_start_f1 = 10,               .vsync_start_f2     = 10,
+		.vsync_len      = 10,
+
+		.veq_ena        = false,
+
+		.vi_end_f1      = 29,               .vi_end_f2          = 29,
+		.nbr_end        = 719,
+
+		.burst_ena      = false,
+
+		.filter_table = filter_table,
+		.max_srcw = 800
+	},
+	{
+		.name       = "1080i@50Hz",
+		.clock		= 148800,
+		.refresh	= 50000,
+		.oversample     = TV_OVERSAMPLE_2X,
+		.component_only = 1,
+
+		.hsync_end      = 88,               .hblank_end         = 235,
+		.hblank_start   = 2155,             .htotal             = 2639,
+
+		.progressive	= false,	  .trilevel_sync = true,
+
+		.vsync_start_f1 = 4,              .vsync_start_f2     = 5,
+		.vsync_len      = 10,
+
+		.veq_ena	= true,	    .veq_start_f1	= 4,
+		.veq_start_f2   = 4,	    .veq_len		= 10,
+
+
+		.vi_end_f1      = 21,           .vi_end_f2          = 22,
+		.nbr_end        = 539,
+
+		.burst_ena      = false,
+
+		.filter_table = filter_table,
+	},
+	{
+		.name       = "1080i@60Hz",
+		.clock		= 148800,
+		.refresh	= 60000,
+		.oversample     = TV_OVERSAMPLE_2X,
+		.component_only = 1,
+
+		.hsync_end      = 88,               .hblank_end         = 235,
+		.hblank_start   = 2155,             .htotal             = 2199,
+
+		.progressive	= false,	    .trilevel_sync = true,
+
+		.vsync_start_f1 = 4,               .vsync_start_f2     = 5,
+		.vsync_len      = 10,
+
+		.veq_ena	= true,		    .veq_start_f1	= 4,
+		.veq_start_f2	= 4,		    .veq_len		= 10,
+
+
+		.vi_end_f1      = 21,               .vi_end_f2          = 22,
+		.nbr_end        = 539,
+
+		.burst_ena      = false,
+
+		.filter_table = filter_table,
+	},
+};
+
+static struct intel_tv *enc_to_tv(struct intel_encoder *encoder)
+{
+	return container_of(encoder, struct intel_tv, base);
+}
+
+static struct intel_tv *intel_attached_tv(struct drm_connector *connector)
+{
+	return enc_to_tv(intel_attached_encoder(connector));
+}
+
+static bool
+intel_tv_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 tmp = I915_READ(TV_CTL);
+
+	if (!(tmp & TV_ENC_ENABLE))
+		return false;
+
+	*pipe = PORT_TO_PIPE(tmp);
+
+	return true;
+}
+
+static void
+intel_enable_tv(struct intel_encoder *encoder)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	/* Prevents vblank waits from timing out in intel_tv_detect_type() */
+	intel_wait_for_vblank(encoder->base.dev,
+			      to_intel_crtc(encoder->base.crtc)->pipe);
+
+	I915_WRITE(TV_CTL, I915_READ(TV_CTL) | TV_ENC_ENABLE);
+}
+
+static void
+intel_disable_tv(struct intel_encoder *encoder)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	I915_WRITE(TV_CTL, I915_READ(TV_CTL) & ~TV_ENC_ENABLE);
+}
+
+static const struct tv_mode *
+intel_tv_mode_lookup(const char *tv_format)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(tv_modes); i++) {
+		const struct tv_mode *tv_mode = &tv_modes[i];
+
+		if (!strcmp(tv_format, tv_mode->name))
+			return tv_mode;
+	}
+	return NULL;
+}
+
+static const struct tv_mode *
+intel_tv_mode_find(struct intel_tv *intel_tv)
+{
+	return intel_tv_mode_lookup(intel_tv->tv_format);
+}
+
+static enum drm_mode_status
+intel_tv_mode_valid(struct drm_connector *connector,
+		    struct drm_display_mode *mode)
+{
+	struct intel_tv *intel_tv = intel_attached_tv(connector);
+	const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
+
+	/* Ensure TV refresh is close to desired refresh */
+	if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode) * 1000)
+				< 1000)
+		return MODE_OK;
+
+	return MODE_CLOCK_RANGE;
+}
+
+
+static void
+intel_tv_get_config(struct intel_encoder *encoder,
+		    struct intel_crtc_state *pipe_config)
+{
+	pipe_config->base.adjusted_mode.crtc_clock = pipe_config->port_clock;
+}
+
+static bool
+intel_tv_compute_config(struct intel_encoder *encoder,
+			struct intel_crtc_state *pipe_config)
+{
+	struct intel_tv *intel_tv = enc_to_tv(encoder);
+	const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
+
+	if (!tv_mode)
+		return false;
+
+	pipe_config->base.adjusted_mode.crtc_clock = tv_mode->clock;
+	DRM_DEBUG_KMS("forcing bpc to 8 for TV\n");
+	pipe_config->pipe_bpp = 8*3;
+
+	/* TV has it's own notion of sync and other mode flags, so clear them. */
+	pipe_config->base.adjusted_mode.flags = 0;
+
+	/*
+	 * FIXME: We don't check whether the input mode is actually what we want
+	 * or whether userspace is doing something stupid.
+	 */
+
+	return true;
+}
+
+static void
+set_tv_mode_timings(struct drm_i915_private *dev_priv,
+		    const struct tv_mode *tv_mode,
+		    bool burst_ena)
+{
+	u32 hctl1, hctl2, hctl3;
+	u32 vctl1, vctl2, vctl3, vctl4, vctl5, vctl6, vctl7;
+
+	hctl1 = (tv_mode->hsync_end << TV_HSYNC_END_SHIFT) |
+		(tv_mode->htotal << TV_HTOTAL_SHIFT);
+
+	hctl2 = (tv_mode->hburst_start << 16) |
+		(tv_mode->hburst_len << TV_HBURST_LEN_SHIFT);
+
+	if (burst_ena)
+		hctl2 |= TV_BURST_ENA;
+
+	hctl3 = (tv_mode->hblank_start << TV_HBLANK_START_SHIFT) |
+		(tv_mode->hblank_end << TV_HBLANK_END_SHIFT);
+
+	vctl1 = (tv_mode->nbr_end << TV_NBR_END_SHIFT) |
+		(tv_mode->vi_end_f1 << TV_VI_END_F1_SHIFT) |
+		(tv_mode->vi_end_f2 << TV_VI_END_F2_SHIFT);
+
+	vctl2 = (tv_mode->vsync_len << TV_VSYNC_LEN_SHIFT) |
+		(tv_mode->vsync_start_f1 << TV_VSYNC_START_F1_SHIFT) |
+		(tv_mode->vsync_start_f2 << TV_VSYNC_START_F2_SHIFT);
+
+	vctl3 = (tv_mode->veq_len << TV_VEQ_LEN_SHIFT) |
+		(tv_mode->veq_start_f1 << TV_VEQ_START_F1_SHIFT) |
+		(tv_mode->veq_start_f2 << TV_VEQ_START_F2_SHIFT);
+
+	if (tv_mode->veq_ena)
+		vctl3 |= TV_EQUAL_ENA;
+
+	vctl4 = (tv_mode->vburst_start_f1 << TV_VBURST_START_F1_SHIFT) |
+		(tv_mode->vburst_end_f1 << TV_VBURST_END_F1_SHIFT);
+
+	vctl5 = (tv_mode->vburst_start_f2 << TV_VBURST_START_F2_SHIFT) |
+		(tv_mode->vburst_end_f2 << TV_VBURST_END_F2_SHIFT);
+
+	vctl6 = (tv_mode->vburst_start_f3 << TV_VBURST_START_F3_SHIFT) |
+		(tv_mode->vburst_end_f3 << TV_VBURST_END_F3_SHIFT);
+
+	vctl7 = (tv_mode->vburst_start_f4 << TV_VBURST_START_F4_SHIFT) |
+		(tv_mode->vburst_end_f4 << TV_VBURST_END_F4_SHIFT);
+
+	I915_WRITE(TV_H_CTL_1, hctl1);
+	I915_WRITE(TV_H_CTL_2, hctl2);
+	I915_WRITE(TV_H_CTL_3, hctl3);
+	I915_WRITE(TV_V_CTL_1, vctl1);
+	I915_WRITE(TV_V_CTL_2, vctl2);
+	I915_WRITE(TV_V_CTL_3, vctl3);
+	I915_WRITE(TV_V_CTL_4, vctl4);
+	I915_WRITE(TV_V_CTL_5, vctl5);
+	I915_WRITE(TV_V_CTL_6, vctl6);
+	I915_WRITE(TV_V_CTL_7, vctl7);
+}
+
+static void set_color_conversion(struct drm_i915_private *dev_priv,
+				 const struct color_conversion *color_conversion)
+{
+	if (!color_conversion)
+		return;
+
+	I915_WRITE(TV_CSC_Y, (color_conversion->ry << 16) |
+		   color_conversion->gy);
+	I915_WRITE(TV_CSC_Y2, (color_conversion->by << 16) |
+		   color_conversion->ay);
+	I915_WRITE(TV_CSC_U, (color_conversion->ru << 16) |
+		   color_conversion->gu);
+	I915_WRITE(TV_CSC_U2, (color_conversion->bu << 16) |
+		   color_conversion->au);
+	I915_WRITE(TV_CSC_V, (color_conversion->rv << 16) |
+		   color_conversion->gv);
+	I915_WRITE(TV_CSC_V2, (color_conversion->bv << 16) |
+		   color_conversion->av);
+}
+
+static void intel_tv_pre_enable(struct intel_encoder *encoder)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+	struct intel_tv *intel_tv = enc_to_tv(encoder);
+	const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
+	u32 tv_ctl;
+	u32 scctl1, scctl2, scctl3;
+	int i, j;
+	const struct video_levels *video_levels;
+	const struct color_conversion *color_conversion;
+	bool burst_ena;
+	int xpos = 0x0, ypos = 0x0;
+	unsigned int xsize, ysize;
+
+	if (!tv_mode)
+		return;	/* can't happen (mode_prepare prevents this) */
+
+	tv_ctl = I915_READ(TV_CTL);
+	tv_ctl &= TV_CTL_SAVE;
+
+	switch (intel_tv->type) {
+	default:
+	case DRM_MODE_CONNECTOR_Unknown:
+	case DRM_MODE_CONNECTOR_Composite:
+		tv_ctl |= TV_ENC_OUTPUT_COMPOSITE;
+		video_levels = tv_mode->composite_levels;
+		color_conversion = tv_mode->composite_color;
+		burst_ena = tv_mode->burst_ena;
+		break;
+	case DRM_MODE_CONNECTOR_Component:
+		tv_ctl |= TV_ENC_OUTPUT_COMPONENT;
+		video_levels = &component_levels;
+		if (tv_mode->burst_ena)
+			color_conversion = &sdtv_csc_yprpb;
+		else
+			color_conversion = &hdtv_csc_yprpb;
+		burst_ena = false;
+		break;
+	case DRM_MODE_CONNECTOR_SVIDEO:
+		tv_ctl |= TV_ENC_OUTPUT_SVIDEO;
+		video_levels = tv_mode->svideo_levels;
+		color_conversion = tv_mode->svideo_color;
+		burst_ena = tv_mode->burst_ena;
+		break;
+	}
+
+	if (intel_crtc->pipe == 1)
+		tv_ctl |= TV_ENC_PIPEB_SELECT;
+	tv_ctl |= tv_mode->oversample;
+
+	if (tv_mode->progressive)
+		tv_ctl |= TV_PROGRESSIVE;
+	if (tv_mode->trilevel_sync)
+		tv_ctl |= TV_TRILEVEL_SYNC;
+	if (tv_mode->pal_burst)
+		tv_ctl |= TV_PAL_BURST;
+
+	scctl1 = 0;
+	if (tv_mode->dda1_inc)
+		scctl1 |= TV_SC_DDA1_EN;
+	if (tv_mode->dda2_inc)
+		scctl1 |= TV_SC_DDA2_EN;
+	if (tv_mode->dda3_inc)
+		scctl1 |= TV_SC_DDA3_EN;
+	scctl1 |= tv_mode->sc_reset;
+	if (video_levels)
+		scctl1 |= video_levels->burst << TV_BURST_LEVEL_SHIFT;
+	scctl1 |= tv_mode->dda1_inc << TV_SCDDA1_INC_SHIFT;
+
+	scctl2 = tv_mode->dda2_size << TV_SCDDA2_SIZE_SHIFT |
+		tv_mode->dda2_inc << TV_SCDDA2_INC_SHIFT;
+
+	scctl3 = tv_mode->dda3_size << TV_SCDDA3_SIZE_SHIFT |
+		tv_mode->dda3_inc << TV_SCDDA3_INC_SHIFT;
+
+	/* Enable two fixes for the chips that need them. */
+	if (IS_I915GM(dev))
+		tv_ctl |= TV_ENC_C0_FIX | TV_ENC_SDP_FIX;
+
+	set_tv_mode_timings(dev_priv, tv_mode, burst_ena);
+
+	I915_WRITE(TV_SC_CTL_1, scctl1);
+	I915_WRITE(TV_SC_CTL_2, scctl2);
+	I915_WRITE(TV_SC_CTL_3, scctl3);
+
+	set_color_conversion(dev_priv, color_conversion);
+
+	if (INTEL_INFO(dev)->gen >= 4)
+		I915_WRITE(TV_CLR_KNOBS, 0x00404000);
+	else
+		I915_WRITE(TV_CLR_KNOBS, 0x00606000);
+
+	if (video_levels)
+		I915_WRITE(TV_CLR_LEVEL,
+			   ((video_levels->black << TV_BLACK_LEVEL_SHIFT) |
+			    (video_levels->blank << TV_BLANK_LEVEL_SHIFT)));
+
+	assert_pipe_disabled(dev_priv, intel_crtc->pipe);
+
+	/* Filter ctl must be set before TV_WIN_SIZE */
+	I915_WRITE(TV_FILTER_CTL_1, TV_AUTO_SCALE);
+	xsize = tv_mode->hblank_start - tv_mode->hblank_end;
+	if (tv_mode->progressive)
+		ysize = tv_mode->nbr_end + 1;
+	else
+		ysize = 2*tv_mode->nbr_end + 1;
+
+	xpos += intel_tv->margin[TV_MARGIN_LEFT];
+	ypos += intel_tv->margin[TV_MARGIN_TOP];
+	xsize -= (intel_tv->margin[TV_MARGIN_LEFT] +
+		  intel_tv->margin[TV_MARGIN_RIGHT]);
+	ysize -= (intel_tv->margin[TV_MARGIN_TOP] +
+		  intel_tv->margin[TV_MARGIN_BOTTOM]);
+	I915_WRITE(TV_WIN_POS, (xpos<<16)|ypos);
+	I915_WRITE(TV_WIN_SIZE, (xsize<<16)|ysize);
+
+	j = 0;
+	for (i = 0; i < 60; i++)
+		I915_WRITE(TV_H_LUMA(i), tv_mode->filter_table[j++]);
+	for (i = 0; i < 60; i++)
+		I915_WRITE(TV_H_CHROMA(i), tv_mode->filter_table[j++]);
+	for (i = 0; i < 43; i++)
+		I915_WRITE(TV_V_LUMA(i), tv_mode->filter_table[j++]);
+	for (i = 0; i < 43; i++)
+		I915_WRITE(TV_V_CHROMA(i), tv_mode->filter_table[j++]);
+	I915_WRITE(TV_DAC, I915_READ(TV_DAC) & TV_DAC_SAVE);
+	I915_WRITE(TV_CTL, tv_ctl);
+}
+
+static const struct drm_display_mode reported_modes[] = {
+	{
+		.name = "NTSC 480i",
+		.clock = 107520,
+		.hdisplay = 1280,
+		.hsync_start = 1368,
+		.hsync_end = 1496,
+		.htotal = 1712,
+
+		.vdisplay = 1024,
+		.vsync_start = 1027,
+		.vsync_end = 1034,
+		.vtotal = 1104,
+		.type = DRM_MODE_TYPE_DRIVER,
+	},
+};
+
+/**
+ * Detects TV presence by checking for load.
+ *
+ * Requires that the current pipe's DPLL is active.
+
+ * \return true if TV is connected.
+ * \return false if TV is disconnected.
+ */
+static int
+intel_tv_detect_type(struct intel_tv *intel_tv,
+		      struct drm_connector *connector)
+{
+	struct drm_encoder *encoder = &intel_tv->base.base;
+	struct drm_crtc *crtc = encoder->crtc;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct drm_device *dev = encoder->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 tv_ctl, save_tv_ctl;
+	u32 tv_dac, save_tv_dac;
+	int type;
+
+	/* Disable TV interrupts around load detect or we'll recurse */
+	if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
+		spin_lock_irq(&dev_priv->irq_lock);
+		i915_disable_pipestat(dev_priv, 0,
+				      PIPE_HOTPLUG_INTERRUPT_STATUS |
+				      PIPE_HOTPLUG_TV_INTERRUPT_STATUS);
+		spin_unlock_irq(&dev_priv->irq_lock);
+	}
+
+	save_tv_dac = tv_dac = I915_READ(TV_DAC);
+	save_tv_ctl = tv_ctl = I915_READ(TV_CTL);
+
+	/* Poll for TV detection */
+	tv_ctl &= ~(TV_ENC_ENABLE | TV_TEST_MODE_MASK);
+	tv_ctl |= TV_TEST_MODE_MONITOR_DETECT;
+	if (intel_crtc->pipe == 1)
+		tv_ctl |= TV_ENC_PIPEB_SELECT;
+	else
+		tv_ctl &= ~TV_ENC_PIPEB_SELECT;
+
+	tv_dac &= ~(TVDAC_SENSE_MASK | DAC_A_MASK | DAC_B_MASK | DAC_C_MASK);
+	tv_dac |= (TVDAC_STATE_CHG_EN |
+		   TVDAC_A_SENSE_CTL |
+		   TVDAC_B_SENSE_CTL |
+		   TVDAC_C_SENSE_CTL |
+		   DAC_CTL_OVERRIDE |
+		   DAC_A_0_7_V |
+		   DAC_B_0_7_V |
+		   DAC_C_0_7_V);
+
+
+	/*
+	 * The TV sense state should be cleared to zero on cantiga platform. Otherwise
+	 * the TV is misdetected. This is hardware requirement.
+	 */
+	if (IS_GM45(dev))
+		tv_dac &= ~(TVDAC_STATE_CHG_EN | TVDAC_A_SENSE_CTL |
+			    TVDAC_B_SENSE_CTL | TVDAC_C_SENSE_CTL);
+
+	I915_WRITE(TV_CTL, tv_ctl);
+	I915_WRITE(TV_DAC, tv_dac);
+	POSTING_READ(TV_DAC);
+
+	intel_wait_for_vblank(intel_tv->base.base.dev,
+			      to_intel_crtc(intel_tv->base.base.crtc)->pipe);
+
+	type = -1;
+	tv_dac = I915_READ(TV_DAC);
+	DRM_DEBUG_KMS("TV detected: %x, %x\n", tv_ctl, tv_dac);
+	/*
+	 *  A B C
+	 *  0 1 1 Composite
+	 *  1 0 X svideo
+	 *  0 0 0 Component
+	 */
+	if ((tv_dac & TVDAC_SENSE_MASK) == (TVDAC_B_SENSE | TVDAC_C_SENSE)) {
+		DRM_DEBUG_KMS("Detected Composite TV connection\n");
+		type = DRM_MODE_CONNECTOR_Composite;
+	} else if ((tv_dac & (TVDAC_A_SENSE|TVDAC_B_SENSE)) == TVDAC_A_SENSE) {
+		DRM_DEBUG_KMS("Detected S-Video TV connection\n");
+		type = DRM_MODE_CONNECTOR_SVIDEO;
+	} else if ((tv_dac & TVDAC_SENSE_MASK) == 0) {
+		DRM_DEBUG_KMS("Detected Component TV connection\n");
+		type = DRM_MODE_CONNECTOR_Component;
+	} else {
+		DRM_DEBUG_KMS("Unrecognised TV connection\n");
+		type = -1;
+	}
+
+	I915_WRITE(TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN);
+	I915_WRITE(TV_CTL, save_tv_ctl);
+	POSTING_READ(TV_CTL);
+
+	/* For unknown reasons the hw barfs if we don't do this vblank wait. */
+	intel_wait_for_vblank(intel_tv->base.base.dev,
+			      to_intel_crtc(intel_tv->base.base.crtc)->pipe);
+
+	/* Restore interrupt config */
+	if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
+		spin_lock_irq(&dev_priv->irq_lock);
+		i915_enable_pipestat(dev_priv, 0,
+				     PIPE_HOTPLUG_INTERRUPT_STATUS |
+				     PIPE_HOTPLUG_TV_INTERRUPT_STATUS);
+		spin_unlock_irq(&dev_priv->irq_lock);
+	}
+
+	return type;
+}
+
+/*
+ * Here we set accurate tv format according to connector type
+ * i.e Component TV should not be assigned by NTSC or PAL
+ */
+static void intel_tv_find_better_format(struct drm_connector *connector)
+{
+	struct intel_tv *intel_tv = intel_attached_tv(connector);
+	const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
+	int i;
+
+	if ((intel_tv->type == DRM_MODE_CONNECTOR_Component) ==
+		tv_mode->component_only)
+		return;
+
+
+	for (i = 0; i < ARRAY_SIZE(tv_modes); i++) {
+		tv_mode = tv_modes + i;
+
+		if ((intel_tv->type == DRM_MODE_CONNECTOR_Component) ==
+			tv_mode->component_only)
+			break;
+	}
+
+	intel_tv->tv_format = tv_mode->name;
+	drm_object_property_set_value(&connector->base,
+		connector->dev->mode_config.tv_mode_property, i);
+}
+
+/**
+ * Detect the TV connection.
+ *
+ * Currently this always returns CONNECTOR_STATUS_UNKNOWN, as we need to be sure
+ * we have a pipe programmed in order to probe the TV.
+ */
+static enum drm_connector_status
+intel_tv_detect(struct drm_connector *connector, bool force)
+{
+	struct drm_display_mode mode;
+	struct intel_tv *intel_tv = intel_attached_tv(connector);
+	enum drm_connector_status status;
+	int type;
+
+	DRM_DEBUG_KMS("[CONNECTOR:%d:%s] force=%d\n",
+		      connector->base.id, connector->name,
+		      force);
+
+	mode = reported_modes[0];
+
+	if (force) {
+		struct intel_load_detect_pipe tmp;
+		struct drm_modeset_acquire_ctx ctx;
+
+		drm_modeset_acquire_init(&ctx, 0);
+
+		if (intel_get_load_detect_pipe(connector, &mode, &tmp, &ctx)) {
+			type = intel_tv_detect_type(intel_tv, connector);
+			intel_release_load_detect_pipe(connector, &tmp, &ctx);
+			status = type < 0 ?
+				connector_status_disconnected :
+				connector_status_connected;
+		} else
+			status = connector_status_unknown;
+
+		drm_modeset_drop_locks(&ctx);
+		drm_modeset_acquire_fini(&ctx);
+	} else
+		return connector->status;
+
+	if (status != connector_status_connected)
+		return status;
+
+	intel_tv->type = type;
+	intel_tv_find_better_format(connector);
+
+	return connector_status_connected;
+}
+
+static const struct input_res {
+	const char *name;
+	int w, h;
+} input_res_table[] = {
+	{"640x480", 640, 480},
+	{"800x600", 800, 600},
+	{"1024x768", 1024, 768},
+	{"1280x1024", 1280, 1024},
+	{"848x480", 848, 480},
+	{"1280x720", 1280, 720},
+	{"1920x1080", 1920, 1080},
+};
+
+/*
+ * Chose preferred mode  according to line number of TV format
+ */
+static void
+intel_tv_chose_preferred_modes(struct drm_connector *connector,
+			       struct drm_display_mode *mode_ptr)
+{
+	struct intel_tv *intel_tv = intel_attached_tv(connector);
+	const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
+
+	if (tv_mode->nbr_end < 480 && mode_ptr->vdisplay == 480)
+		mode_ptr->type |= DRM_MODE_TYPE_PREFERRED;
+	else if (tv_mode->nbr_end > 480) {
+		if (tv_mode->progressive == true && tv_mode->nbr_end < 720) {
+			if (mode_ptr->vdisplay == 720)
+				mode_ptr->type |= DRM_MODE_TYPE_PREFERRED;
+		} else if (mode_ptr->vdisplay == 1080)
+				mode_ptr->type |= DRM_MODE_TYPE_PREFERRED;
+	}
+}
+
+/**
+ * Stub get_modes function.
+ *
+ * This should probably return a set of fixed modes, unless we can figure out
+ * how to probe modes off of TV connections.
+ */
+
+static int
+intel_tv_get_modes(struct drm_connector *connector)
+{
+	struct drm_display_mode *mode_ptr;
+	struct intel_tv *intel_tv = intel_attached_tv(connector);
+	const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
+	int j, count = 0;
+	u64 tmp;
+
+	for (j = 0; j < ARRAY_SIZE(input_res_table);
+	     j++) {
+		const struct input_res *input = &input_res_table[j];
+		unsigned int hactive_s = input->w;
+		unsigned int vactive_s = input->h;
+
+		if (tv_mode->max_srcw && input->w > tv_mode->max_srcw)
+			continue;
+
+		if (input->w > 1024 && (!tv_mode->progressive
+					&& !tv_mode->component_only))
+			continue;
+
+		mode_ptr = drm_mode_create(connector->dev);
+		if (!mode_ptr)
+			continue;
+		strncpy(mode_ptr->name, input->name, DRM_DISPLAY_MODE_LEN);
+
+		mode_ptr->hdisplay = hactive_s;
+		mode_ptr->hsync_start = hactive_s + 1;
+		mode_ptr->hsync_end = hactive_s + 64;
+		if (mode_ptr->hsync_end <= mode_ptr->hsync_start)
+			mode_ptr->hsync_end = mode_ptr->hsync_start + 1;
+		mode_ptr->htotal = hactive_s + 96;
+
+		mode_ptr->vdisplay = vactive_s;
+		mode_ptr->vsync_start = vactive_s + 1;
+		mode_ptr->vsync_end = vactive_s + 32;
+		if (mode_ptr->vsync_end <= mode_ptr->vsync_start)
+			mode_ptr->vsync_end = mode_ptr->vsync_start  + 1;
+		mode_ptr->vtotal = vactive_s + 33;
+
+		tmp = (u64) tv_mode->refresh * mode_ptr->vtotal;
+		tmp *= mode_ptr->htotal;
+		tmp = div_u64(tmp, 1000000);
+		mode_ptr->clock = (int) tmp;
+
+		mode_ptr->type = DRM_MODE_TYPE_DRIVER;
+		intel_tv_chose_preferred_modes(connector, mode_ptr);
+		drm_mode_probed_add(connector, mode_ptr);
+		count++;
+	}
+
+	return count;
+}
+
+static void
+intel_tv_destroy(struct drm_connector *connector)
+{
+	drm_connector_cleanup(connector);
+	kfree(connector);
+}
+
+
+static int
+intel_tv_set_property(struct drm_connector *connector, struct drm_property *property,
+		      uint64_t val)
+{
+	struct drm_device *dev = connector->dev;
+	struct intel_tv *intel_tv = intel_attached_tv(connector);
+	struct drm_crtc *crtc = intel_tv->base.base.crtc;
+	int ret = 0;
+	bool changed = false;
+
+	ret = drm_object_property_set_value(&connector->base, property, val);
+	if (ret < 0)
+		goto out;
+
+	if (property == dev->mode_config.tv_left_margin_property &&
+		intel_tv->margin[TV_MARGIN_LEFT] != val) {
+		intel_tv->margin[TV_MARGIN_LEFT] = val;
+		changed = true;
+	} else if (property == dev->mode_config.tv_right_margin_property &&
+		intel_tv->margin[TV_MARGIN_RIGHT] != val) {
+		intel_tv->margin[TV_MARGIN_RIGHT] = val;
+		changed = true;
+	} else if (property == dev->mode_config.tv_top_margin_property &&
+		intel_tv->margin[TV_MARGIN_TOP] != val) {
+		intel_tv->margin[TV_MARGIN_TOP] = val;
+		changed = true;
+	} else if (property == dev->mode_config.tv_bottom_margin_property &&
+		intel_tv->margin[TV_MARGIN_BOTTOM] != val) {
+		intel_tv->margin[TV_MARGIN_BOTTOM] = val;
+		changed = true;
+	} else if (property == dev->mode_config.tv_mode_property) {
+		if (val >= ARRAY_SIZE(tv_modes)) {
+			ret = -EINVAL;
+			goto out;
+		}
+		if (!strcmp(intel_tv->tv_format, tv_modes[val].name))
+			goto out;
+
+		intel_tv->tv_format = tv_modes[val].name;
+		changed = true;
+	} else {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (changed && crtc)
+		intel_crtc_restore_mode(crtc);
+out:
+	return ret;
+}
+
+static const struct drm_connector_funcs intel_tv_connector_funcs = {
+	.dpms = drm_atomic_helper_connector_dpms,
+	.detect = intel_tv_detect,
+	.destroy = intel_tv_destroy,
+	.set_property = intel_tv_set_property,
+	.atomic_get_property = intel_connector_atomic_get_property,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+	.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+};
+
+static const struct drm_connector_helper_funcs intel_tv_connector_helper_funcs = {
+	.mode_valid = intel_tv_mode_valid,
+	.get_modes = intel_tv_get_modes,
+	.best_encoder = intel_best_encoder,
+};
+
+static const struct drm_encoder_funcs intel_tv_enc_funcs = {
+	.destroy = intel_encoder_destroy,
+};
+
+/*
+ * Enumerate the child dev array parsed from VBT to check whether
+ * the integrated TV is present.
+ * If it is present, return 1.
+ * If it is not present, return false.
+ * If no child dev is parsed from VBT, it assumes that the TV is present.
+ */
+static int tv_is_present_in_vbt(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	union child_device_config *p_child;
+	int i, ret;
+
+	if (!dev_priv->vbt.child_dev_num)
+		return 1;
+
+	ret = 0;
+	for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
+		p_child = dev_priv->vbt.child_dev + i;
+		/*
+		 * If the device type is not TV, continue.
+		 */
+		switch (p_child->old.device_type) {
+		case DEVICE_TYPE_INT_TV:
+		case DEVICE_TYPE_TV:
+		case DEVICE_TYPE_TV_SVIDEO_COMPOSITE:
+			break;
+		default:
+			continue;
+		}
+		/* Only when the addin_offset is non-zero, it is regarded
+		 * as present.
+		 */
+		if (p_child->old.addin_offset) {
+			ret = 1;
+			break;
+		}
+	}
+	return ret;
+}
+
+void
+intel_tv_init(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_connector *connector;
+	struct intel_tv *intel_tv;
+	struct intel_encoder *intel_encoder;
+	struct intel_connector *intel_connector;
+	u32 tv_dac_on, tv_dac_off, save_tv_dac;
+	const char *tv_format_names[ARRAY_SIZE(tv_modes)];
+	int i, initial_mode = 0;
+
+	if ((I915_READ(TV_CTL) & TV_FUSE_STATE_MASK) == TV_FUSE_STATE_DISABLED)
+		return;
+
+	if (!tv_is_present_in_vbt(dev)) {
+		DRM_DEBUG_KMS("Integrated TV is not present.\n");
+		return;
+	}
+	/* Even if we have an encoder we may not have a connector */
+	if (!dev_priv->vbt.int_tv_support)
+		return;
+
+	/*
+	 * Sanity check the TV output by checking to see if the
+	 * DAC register holds a value
+	 */
+	save_tv_dac = I915_READ(TV_DAC);
+
+	I915_WRITE(TV_DAC, save_tv_dac | TVDAC_STATE_CHG_EN);
+	tv_dac_on = I915_READ(TV_DAC);
+
+	I915_WRITE(TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN);
+	tv_dac_off = I915_READ(TV_DAC);
+
+	I915_WRITE(TV_DAC, save_tv_dac);
+
+	/*
+	 * If the register does not hold the state change enable
+	 * bit, (either as a 0 or a 1), assume it doesn't really
+	 * exist
+	 */
+	if ((tv_dac_on & TVDAC_STATE_CHG_EN) == 0 ||
+	    (tv_dac_off & TVDAC_STATE_CHG_EN) != 0)
+		return;
+
+	intel_tv = kzalloc(sizeof(*intel_tv), GFP_KERNEL);
+	if (!intel_tv) {
+		return;
+	}
+
+	intel_connector = intel_connector_alloc();
+	if (!intel_connector) {
+		kfree(intel_tv);
+		return;
+	}
+
+	intel_encoder = &intel_tv->base;
+	connector = &intel_connector->base;
+
+	/* The documentation, for the older chipsets at least, recommend
+	 * using a polling method rather than hotplug detection for TVs.
+	 * This is because in order to perform the hotplug detection, the PLLs
+	 * for the TV must be kept alive increasing power drain and starving
+	 * bandwidth from other encoders. Notably for instance, it causes
+	 * pipe underruns on Crestline when this encoder is supposedly idle.
+	 *
+	 * More recent chipsets favour HDMI rather than integrated S-Video.
+	 */
+	intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+
+	drm_connector_init(dev, connector, &intel_tv_connector_funcs,
+			   DRM_MODE_CONNECTOR_SVIDEO);
+
+	drm_encoder_init(dev, &intel_encoder->base, &intel_tv_enc_funcs,
+			 DRM_MODE_ENCODER_TVDAC);
+
+	intel_encoder->compute_config = intel_tv_compute_config;
+	intel_encoder->get_config = intel_tv_get_config;
+	intel_encoder->pre_enable = intel_tv_pre_enable;
+	intel_encoder->enable = intel_enable_tv;
+	intel_encoder->disable = intel_disable_tv;
+	intel_encoder->get_hw_state = intel_tv_get_hw_state;
+	intel_connector->get_hw_state = intel_connector_get_hw_state;
+	intel_connector->unregister = intel_connector_unregister;
+
+	intel_connector_attach_encoder(intel_connector, intel_encoder);
+	intel_encoder->type = INTEL_OUTPUT_TVOUT;
+	intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
+	intel_encoder->cloneable = 0;
+	intel_encoder->base.possible_crtcs = ((1 << 0) | (1 << 1));
+	intel_tv->type = DRM_MODE_CONNECTOR_Unknown;
+
+	/* BIOS margin values */
+	intel_tv->margin[TV_MARGIN_LEFT] = 54;
+	intel_tv->margin[TV_MARGIN_TOP] = 36;
+	intel_tv->margin[TV_MARGIN_RIGHT] = 46;
+	intel_tv->margin[TV_MARGIN_BOTTOM] = 37;
+
+	intel_tv->tv_format = tv_modes[initial_mode].name;
+
+	drm_connector_helper_add(connector, &intel_tv_connector_helper_funcs);
+	connector->interlace_allowed = false;
+	connector->doublescan_allowed = false;
+
+	/* Create TV properties then attach current values */
+	for (i = 0; i < ARRAY_SIZE(tv_modes); i++)
+		tv_format_names[i] = tv_modes[i].name;
+	drm_mode_create_tv_properties(dev,
+				      ARRAY_SIZE(tv_modes),
+				      tv_format_names);
+
+	drm_object_attach_property(&connector->base, dev->mode_config.tv_mode_property,
+				   initial_mode);
+	drm_object_attach_property(&connector->base,
+				   dev->mode_config.tv_left_margin_property,
+				   intel_tv->margin[TV_MARGIN_LEFT]);
+	drm_object_attach_property(&connector->base,
+				   dev->mode_config.tv_top_margin_property,
+				   intel_tv->margin[TV_MARGIN_TOP]);
+	drm_object_attach_property(&connector->base,
+				   dev->mode_config.tv_right_margin_property,
+				   intel_tv->margin[TV_MARGIN_RIGHT]);
+	drm_object_attach_property(&connector->base,
+				   dev->mode_config.tv_bottom_margin_property,
+				   intel_tv->margin[TV_MARGIN_BOTTOM]);
+	drm_connector_register(connector);
+}
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
new file mode 100644
index 0000000..6fd7b50
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -0,0 +1,1571 @@
+/*
+ * Copyright © 2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "i915_drv.h"
+#include "intel_drv.h"
+#include "i915_vgpu.h"
+
+#include <linux/pm_runtime.h>
+
+#define FORCEWAKE_ACK_TIMEOUT_MS 50
+
+#define __raw_i915_read8(dev_priv__, reg__) readb((dev_priv__)->regs + (reg__))
+#define __raw_i915_write8(dev_priv__, reg__, val__) writeb(val__, (dev_priv__)->regs + (reg__))
+
+#define __raw_i915_read16(dev_priv__, reg__) readw((dev_priv__)->regs + (reg__))
+#define __raw_i915_write16(dev_priv__, reg__, val__) writew(val__, (dev_priv__)->regs + (reg__))
+
+#define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
+#define __raw_i915_write32(dev_priv__, reg__, val__) writel(val__, (dev_priv__)->regs + (reg__))
+
+#define __raw_i915_read64(dev_priv__, reg__) readq((dev_priv__)->regs + (reg__))
+#define __raw_i915_write64(dev_priv__, reg__, val__) writeq(val__, (dev_priv__)->regs + (reg__))
+
+#define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32(dev_priv__, reg__)
+
+static const char * const forcewake_domain_names[] = {
+	"render",
+	"blitter",
+	"media",
+};
+
+const char *
+intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id)
+{
+	BUILD_BUG_ON(ARRAY_SIZE(forcewake_domain_names) != FW_DOMAIN_ID_COUNT);
+
+	if (id >= 0 && id < FW_DOMAIN_ID_COUNT)
+		return forcewake_domain_names[id];
+
+	WARN_ON(id);
+
+	return "unknown";
+}
+
+static void
+assert_device_not_suspended(struct drm_i915_private *dev_priv)
+{
+	WARN_ONCE(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended,
+		  "Device suspended\n");
+}
+
+static inline void
+fw_domain_reset(const struct intel_uncore_forcewake_domain *d)
+{
+	WARN_ON(d->reg_set == 0);
+	__raw_i915_write32(d->i915, d->reg_set, d->val_reset);
+}
+
+static inline void
+fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
+{
+	mod_timer_pinned(&d->timer, jiffies + 1);
+}
+
+static inline void
+fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d)
+{
+	if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) &
+			     FORCEWAKE_KERNEL) == 0,
+			    FORCEWAKE_ACK_TIMEOUT_MS))
+		DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n",
+			  intel_uncore_forcewake_domain_to_str(d->id));
+}
+
+static inline void
+fw_domain_get(const struct intel_uncore_forcewake_domain *d)
+{
+	__raw_i915_write32(d->i915, d->reg_set, d->val_set);
+}
+
+static inline void
+fw_domain_wait_ack(const struct intel_uncore_forcewake_domain *d)
+{
+	if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) &
+			     FORCEWAKE_KERNEL),
+			    FORCEWAKE_ACK_TIMEOUT_MS))
+		DRM_ERROR("%s: timed out waiting for forcewake ack request.\n",
+			  intel_uncore_forcewake_domain_to_str(d->id));
+}
+
+static inline void
+fw_domain_put(const struct intel_uncore_forcewake_domain *d)
+{
+	__raw_i915_write32(d->i915, d->reg_set, d->val_clear);
+}
+
+static inline void
+fw_domain_posting_read(const struct intel_uncore_forcewake_domain *d)
+{
+	/* something from same cacheline, but not from the set register */
+	if (d->reg_post)
+		__raw_posting_read(d->i915, d->reg_post);
+}
+
+static void
+fw_domains_get(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
+{
+	struct intel_uncore_forcewake_domain *d;
+	enum forcewake_domain_id id;
+
+	for_each_fw_domain_mask(d, fw_domains, dev_priv, id) {
+		fw_domain_wait_ack_clear(d);
+		fw_domain_get(d);
+		fw_domain_wait_ack(d);
+	}
+}
+
+static void
+fw_domains_put(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
+{
+	struct intel_uncore_forcewake_domain *d;
+	enum forcewake_domain_id id;
+
+	for_each_fw_domain_mask(d, fw_domains, dev_priv, id) {
+		fw_domain_put(d);
+		fw_domain_posting_read(d);
+	}
+}
+
+static void
+fw_domains_posting_read(struct drm_i915_private *dev_priv)
+{
+	struct intel_uncore_forcewake_domain *d;
+	enum forcewake_domain_id id;
+
+	/* No need to do for all, just do for first found */
+	for_each_fw_domain(d, dev_priv, id) {
+		fw_domain_posting_read(d);
+		break;
+	}
+}
+
+static void
+fw_domains_reset(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
+{
+	struct intel_uncore_forcewake_domain *d;
+	enum forcewake_domain_id id;
+
+	if (dev_priv->uncore.fw_domains == 0)
+		return;
+
+	for_each_fw_domain_mask(d, fw_domains, dev_priv, id)
+		fw_domain_reset(d);
+
+	fw_domains_posting_read(dev_priv);
+}
+
+static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
+{
+	/* w/a for a sporadic read returning 0 by waiting for the GT
+	 * thread to wake up.
+	 */
+	if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) &
+				GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 500))
+		DRM_ERROR("GT thread status wait timed out\n");
+}
+
+static void fw_domains_get_with_thread_status(struct drm_i915_private *dev_priv,
+					      enum forcewake_domains fw_domains)
+{
+	fw_domains_get(dev_priv, fw_domains);
+
+	/* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */
+	__gen6_gt_wait_for_thread_c0(dev_priv);
+}
+
+static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
+{
+	u32 gtfifodbg;
+
+	gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG);
+	if (WARN(gtfifodbg, "GT wake FIFO error 0x%x\n", gtfifodbg))
+		__raw_i915_write32(dev_priv, GTFIFODBG, gtfifodbg);
+}
+
+static void fw_domains_put_with_fifo(struct drm_i915_private *dev_priv,
+				     enum forcewake_domains fw_domains)
+{
+	fw_domains_put(dev_priv, fw_domains);
+	gen6_gt_check_fifodbg(dev_priv);
+}
+
+static inline u32 fifo_free_entries(struct drm_i915_private *dev_priv)
+{
+	u32 count = __raw_i915_read32(dev_priv, GTFIFOCTL);
+
+	return count & GT_FIFO_FREE_ENTRIES_MASK;
+}
+
+static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
+{
+	int ret = 0;
+
+	/* On VLV, FIFO will be shared by both SW and HW.
+	 * So, we need to read the FREE_ENTRIES everytime */
+	if (IS_VALLEYVIEW(dev_priv->dev))
+		dev_priv->uncore.fifo_count = fifo_free_entries(dev_priv);
+
+	if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
+		int loop = 500;
+		u32 fifo = fifo_free_entries(dev_priv);
+
+		while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
+			udelay(10);
+			fifo = fifo_free_entries(dev_priv);
+		}
+		if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
+			++ret;
+		dev_priv->uncore.fifo_count = fifo;
+	}
+	dev_priv->uncore.fifo_count--;
+
+	return ret;
+}
+
+static void intel_uncore_fw_release_timer(unsigned long arg)
+{
+	struct intel_uncore_forcewake_domain *domain = (void *)arg;
+	unsigned long irqflags;
+
+	assert_device_not_suspended(domain->i915);
+
+	spin_lock_irqsave(&domain->i915->uncore.lock, irqflags);
+	if (WARN_ON(domain->wake_count == 0))
+		domain->wake_count++;
+
+	if (--domain->wake_count == 0)
+		domain->i915->uncore.funcs.force_wake_put(domain->i915,
+							  1 << domain->id);
+
+	spin_unlock_irqrestore(&domain->i915->uncore.lock, irqflags);
+}
+
+void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	unsigned long irqflags;
+	struct intel_uncore_forcewake_domain *domain;
+	int retry_count = 100;
+	enum forcewake_domain_id id;
+	enum forcewake_domains fw = 0, active_domains;
+
+	/* Hold uncore.lock across reset to prevent any register access
+	 * with forcewake not set correctly. Wait until all pending
+	 * timers are run before holding.
+	 */
+	while (1) {
+		active_domains = 0;
+
+		for_each_fw_domain(domain, dev_priv, id) {
+			if (del_timer_sync(&domain->timer) == 0)
+				continue;
+
+			intel_uncore_fw_release_timer((unsigned long)domain);
+		}
+
+		spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+		for_each_fw_domain(domain, dev_priv, id) {
+			if (timer_pending(&domain->timer))
+				active_domains |= (1 << id);
+		}
+
+		if (active_domains == 0)
+			break;
+
+		if (--retry_count == 0) {
+			DRM_ERROR("Timed out waiting for forcewake timers to finish\n");
+			break;
+		}
+
+		spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+		cond_resched();
+	}
+
+	WARN_ON(active_domains);
+
+	for_each_fw_domain(domain, dev_priv, id)
+		if (domain->wake_count)
+			fw |= 1 << id;
+
+	if (fw)
+		dev_priv->uncore.funcs.force_wake_put(dev_priv, fw);
+
+	fw_domains_reset(dev_priv, FORCEWAKE_ALL);
+
+	if (restore) { /* If reset with a user forcewake, try to restore */
+		if (fw)
+			dev_priv->uncore.funcs.force_wake_get(dev_priv, fw);
+
+		if (IS_GEN6(dev) || IS_GEN7(dev))
+			dev_priv->uncore.fifo_count =
+				fifo_free_entries(dev_priv);
+	}
+
+	if (!restore)
+		assert_forcewakes_inactive(dev_priv);
+
+	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+}
+
+static void intel_uncore_ellc_detect(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if ((IS_HASWELL(dev) || IS_BROADWELL(dev) ||
+	     INTEL_INFO(dev)->gen >= 9) &&
+	    (__raw_i915_read32(dev_priv, HSW_EDRAM_PRESENT) & EDRAM_ENABLED)) {
+		/* The docs do not explain exactly how the calculation can be
+		 * made. It is somewhat guessable, but for now, it's always
+		 * 128MB.
+		 * NB: We can't write IDICR yet because we do not have gt funcs
+		 * set up */
+		dev_priv->ellc_size = 128;
+		DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size);
+	}
+}
+
+static void __intel_uncore_early_sanitize(struct drm_device *dev,
+					  bool restore_forcewake)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (HAS_FPGA_DBG_UNCLAIMED(dev))
+		__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
+
+	/* clear out old GT FIFO errors */
+	if (IS_GEN6(dev) || IS_GEN7(dev))
+		__raw_i915_write32(dev_priv, GTFIFODBG,
+				   __raw_i915_read32(dev_priv, GTFIFODBG));
+
+	/* WaDisableShadowRegForCpd:chv */
+	if (IS_CHERRYVIEW(dev)) {
+		__raw_i915_write32(dev_priv, GTFIFOCTL,
+				   __raw_i915_read32(dev_priv, GTFIFOCTL) |
+				   GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
+				   GT_FIFO_CTL_RC6_POLICY_STALL);
+	}
+
+	intel_uncore_forcewake_reset(dev, restore_forcewake);
+}
+
+void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake)
+{
+	__intel_uncore_early_sanitize(dev, restore_forcewake);
+	i915_check_and_clear_faults(dev);
+}
+
+void intel_uncore_sanitize(struct drm_device *dev)
+{
+	/* BIOS often leaves RC6 enabled, but disable it for hw init */
+	intel_disable_gt_powersave(dev);
+}
+
+static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
+					 enum forcewake_domains fw_domains)
+{
+	struct intel_uncore_forcewake_domain *domain;
+	enum forcewake_domain_id id;
+
+	if (!dev_priv->uncore.funcs.force_wake_get)
+		return;
+
+	fw_domains &= dev_priv->uncore.fw_domains;
+
+	for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
+		if (domain->wake_count++)
+			fw_domains &= ~(1 << id);
+	}
+
+	if (fw_domains)
+		dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
+}
+
+/**
+ * intel_uncore_forcewake_get - grab forcewake domain references
+ * @dev_priv: i915 device instance
+ * @fw_domains: forcewake domains to get reference on
+ *
+ * This function can be used get GT's forcewake domain references.
+ * Normal register access will handle the forcewake domains automatically.
+ * However if some sequence requires the GT to not power down a particular
+ * forcewake domains this function should be called at the beginning of the
+ * sequence. And subsequently the reference should be dropped by symmetric
+ * call to intel_unforce_forcewake_put(). Usually caller wants all the domains
+ * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL.
+ */
+void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
+				enum forcewake_domains fw_domains)
+{
+	unsigned long irqflags;
+
+	if (!dev_priv->uncore.funcs.force_wake_get)
+		return;
+
+	WARN_ON(dev_priv->pm.suspended);
+
+	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+	__intel_uncore_forcewake_get(dev_priv, fw_domains);
+	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+}
+
+/**
+ * intel_uncore_forcewake_get__locked - grab forcewake domain references
+ * @dev_priv: i915 device instance
+ * @fw_domains: forcewake domains to get reference on
+ *
+ * See intel_uncore_forcewake_get(). This variant places the onus
+ * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
+ */
+void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv,
+					enum forcewake_domains fw_domains)
+{
+	assert_spin_locked(&dev_priv->uncore.lock);
+
+	if (!dev_priv->uncore.funcs.force_wake_get)
+		return;
+
+	__intel_uncore_forcewake_get(dev_priv, fw_domains);
+}
+
+static void __intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
+					 enum forcewake_domains fw_domains)
+{
+	struct intel_uncore_forcewake_domain *domain;
+	enum forcewake_domain_id id;
+
+	if (!dev_priv->uncore.funcs.force_wake_put)
+		return;
+
+	fw_domains &= dev_priv->uncore.fw_domains;
+
+	for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
+		if (WARN_ON(domain->wake_count == 0))
+			continue;
+
+		if (--domain->wake_count)
+			continue;
+
+		domain->wake_count++;
+		fw_domain_arm_timer(domain);
+	}
+}
+
+/**
+ * intel_uncore_forcewake_put - release a forcewake domain reference
+ * @dev_priv: i915 device instance
+ * @fw_domains: forcewake domains to put references
+ *
+ * This function drops the device-level forcewakes for specified
+ * domains obtained by intel_uncore_forcewake_get().
+ */
+void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
+				enum forcewake_domains fw_domains)
+{
+	unsigned long irqflags;
+
+	if (!dev_priv->uncore.funcs.force_wake_put)
+		return;
+
+	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+	__intel_uncore_forcewake_put(dev_priv, fw_domains);
+	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+}
+
+/**
+ * intel_uncore_forcewake_put__locked - grab forcewake domain references
+ * @dev_priv: i915 device instance
+ * @fw_domains: forcewake domains to get reference on
+ *
+ * See intel_uncore_forcewake_put(). This variant places the onus
+ * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
+ */
+void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
+					enum forcewake_domains fw_domains)
+{
+	assert_spin_locked(&dev_priv->uncore.lock);
+
+	if (!dev_priv->uncore.funcs.force_wake_put)
+		return;
+
+	__intel_uncore_forcewake_put(dev_priv, fw_domains);
+}
+
+void assert_forcewakes_inactive(struct drm_i915_private *dev_priv)
+{
+	struct intel_uncore_forcewake_domain *domain;
+	enum forcewake_domain_id id;
+
+	if (!dev_priv->uncore.funcs.force_wake_get)
+		return;
+
+	for_each_fw_domain(domain, dev_priv, id)
+		WARN_ON(domain->wake_count);
+}
+
+/* We give fast paths for the really cool registers */
+#define NEEDS_FORCE_WAKE(reg) \
+	 ((reg) < 0x40000 && (reg) != FORCEWAKE)
+
+#define REG_RANGE(reg, start, end) ((reg) >= (start) && (reg) < (end))
+
+#define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \
+	(REG_RANGE((reg), 0x2000, 0x4000) || \
+	 REG_RANGE((reg), 0x5000, 0x8000) || \
+	 REG_RANGE((reg), 0xB000, 0x12000) || \
+	 REG_RANGE((reg), 0x2E000, 0x30000))
+
+#define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg) \
+	(REG_RANGE((reg), 0x12000, 0x14000) || \
+	 REG_RANGE((reg), 0x22000, 0x24000) || \
+	 REG_RANGE((reg), 0x30000, 0x40000))
+
+#define FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg) \
+	(REG_RANGE((reg), 0x2000, 0x4000) || \
+	 REG_RANGE((reg), 0x5200, 0x8000) || \
+	 REG_RANGE((reg), 0x8300, 0x8500) || \
+	 REG_RANGE((reg), 0xB000, 0xB480) || \
+	 REG_RANGE((reg), 0xE000, 0xE800))
+
+#define FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg) \
+	(REG_RANGE((reg), 0x8800, 0x8900) || \
+	 REG_RANGE((reg), 0xD000, 0xD800) || \
+	 REG_RANGE((reg), 0x12000, 0x14000) || \
+	 REG_RANGE((reg), 0x1A000, 0x1C000) || \
+	 REG_RANGE((reg), 0x1E800, 0x1EA00) || \
+	 REG_RANGE((reg), 0x30000, 0x38000))
+
+#define FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg) \
+	(REG_RANGE((reg), 0x4000, 0x5000) || \
+	 REG_RANGE((reg), 0x8000, 0x8300) || \
+	 REG_RANGE((reg), 0x8500, 0x8600) || \
+	 REG_RANGE((reg), 0x9000, 0xB000) || \
+	 REG_RANGE((reg), 0xF000, 0x10000))
+
+#define FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) \
+	REG_RANGE((reg), 0xB00,  0x2000)
+
+#define FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) \
+	(REG_RANGE((reg), 0x2000, 0x2700) || \
+	 REG_RANGE((reg), 0x3000, 0x4000) || \
+	 REG_RANGE((reg), 0x5200, 0x8000) || \
+	 REG_RANGE((reg), 0x8140, 0x8160) || \
+	 REG_RANGE((reg), 0x8300, 0x8500) || \
+	 REG_RANGE((reg), 0x8C00, 0x8D00) || \
+	 REG_RANGE((reg), 0xB000, 0xB480) || \
+	 REG_RANGE((reg), 0xE000, 0xE900) || \
+	 REG_RANGE((reg), 0x24400, 0x24800))
+
+#define FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) \
+	(REG_RANGE((reg), 0x8130, 0x8140) || \
+	 REG_RANGE((reg), 0x8800, 0x8A00) || \
+	 REG_RANGE((reg), 0xD000, 0xD800) || \
+	 REG_RANGE((reg), 0x12000, 0x14000) || \
+	 REG_RANGE((reg), 0x1A000, 0x1EA00) || \
+	 REG_RANGE((reg), 0x30000, 0x40000))
+
+#define FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg) \
+	REG_RANGE((reg), 0x9400, 0x9800)
+
+#define FORCEWAKE_GEN9_BLITTER_RANGE_OFFSET(reg) \
+	((reg) < 0x40000 &&\
+	 !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) && \
+	 !FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) && \
+	 !FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) && \
+	 !FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg))
+
+static void
+ilk_dummy_write(struct drm_i915_private *dev_priv)
+{
+	/* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
+	 * the chip from rc6 before touching it for real. MI_MODE is masked,
+	 * hence harmless to write 0 into. */
+	__raw_i915_write32(dev_priv, MI_MODE, 0);
+}
+
+static void
+hsw_unclaimed_reg_debug(struct drm_i915_private *dev_priv, u32 reg, bool read,
+			bool before)
+{
+	const char *op = read ? "reading" : "writing to";
+	const char *when = before ? "before" : "after";
+
+	if (!i915.mmio_debug)
+		return;
+
+	if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
+		WARN(1, "Unclaimed register detected %s %s register 0x%x\n",
+		     when, op, reg);
+		__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
+		i915.mmio_debug--; /* Only report the first N failures */
+	}
+}
+
+static void
+hsw_unclaimed_reg_detect(struct drm_i915_private *dev_priv)
+{
+	static bool mmio_debug_once = true;
+
+	if (i915.mmio_debug || !mmio_debug_once)
+		return;
+
+	if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
+		DRM_DEBUG("Unclaimed register detected, "
+			  "enabling oneshot unclaimed register reporting. "
+			  "Please use i915.mmio_debug=N for more information.\n");
+		__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
+		i915.mmio_debug = mmio_debug_once;
+		mmio_debug_once = false;
+	}
+}
+
+#define GEN2_READ_HEADER(x) \
+	u##x val = 0; \
+	assert_device_not_suspended(dev_priv);
+
+#define GEN2_READ_FOOTER \
+	trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
+	return val
+
+#define __gen2_read(x) \
+static u##x \
+gen2_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
+	GEN2_READ_HEADER(x); \
+	val = __raw_i915_read##x(dev_priv, reg); \
+	GEN2_READ_FOOTER; \
+}
+
+#define __gen5_read(x) \
+static u##x \
+gen5_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
+	GEN2_READ_HEADER(x); \
+	ilk_dummy_write(dev_priv); \
+	val = __raw_i915_read##x(dev_priv, reg); \
+	GEN2_READ_FOOTER; \
+}
+
+__gen5_read(8)
+__gen5_read(16)
+__gen5_read(32)
+__gen5_read(64)
+__gen2_read(8)
+__gen2_read(16)
+__gen2_read(32)
+__gen2_read(64)
+
+#undef __gen5_read
+#undef __gen2_read
+
+#undef GEN2_READ_FOOTER
+#undef GEN2_READ_HEADER
+
+#define GEN6_READ_HEADER(x) \
+	unsigned long irqflags; \
+	u##x val = 0; \
+	assert_device_not_suspended(dev_priv); \
+	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
+
+#define GEN6_READ_FOOTER \
+	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
+	trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
+	return val
+
+static inline void __force_wake_get(struct drm_i915_private *dev_priv,
+				    enum forcewake_domains fw_domains)
+{
+	struct intel_uncore_forcewake_domain *domain;
+	enum forcewake_domain_id id;
+
+	if (WARN_ON(!fw_domains))
+		return;
+
+	/* Ideally GCC would be constant-fold and eliminate this loop */
+	for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
+		if (domain->wake_count) {
+			fw_domains &= ~(1 << id);
+			continue;
+		}
+
+		domain->wake_count++;
+		fw_domain_arm_timer(domain);
+	}
+
+	if (fw_domains)
+		dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
+}
+
+#define __vgpu_read(x) \
+static u##x \
+vgpu_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
+	GEN6_READ_HEADER(x); \
+	val = __raw_i915_read##x(dev_priv, reg); \
+	GEN6_READ_FOOTER; \
+}
+
+#define __gen6_read(x) \
+static u##x \
+gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
+	GEN6_READ_HEADER(x); \
+	hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \
+	if (NEEDS_FORCE_WAKE(reg)) \
+		__force_wake_get(dev_priv, FORCEWAKE_RENDER); \
+	val = __raw_i915_read##x(dev_priv, reg); \
+	hsw_unclaimed_reg_debug(dev_priv, reg, true, false); \
+	GEN6_READ_FOOTER; \
+}
+
+#define __vlv_read(x) \
+static u##x \
+vlv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
+	GEN6_READ_HEADER(x); \
+	if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg)) \
+		__force_wake_get(dev_priv, FORCEWAKE_RENDER); \
+	else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)) \
+		__force_wake_get(dev_priv, FORCEWAKE_MEDIA); \
+	val = __raw_i915_read##x(dev_priv, reg); \
+	GEN6_READ_FOOTER; \
+}
+
+#define __chv_read(x) \
+static u##x \
+chv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
+	GEN6_READ_HEADER(x); \
+	if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) \
+		__force_wake_get(dev_priv, FORCEWAKE_RENDER); \
+	else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) \
+		__force_wake_get(dev_priv, FORCEWAKE_MEDIA); \
+	else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) \
+		__force_wake_get(dev_priv, \
+				 FORCEWAKE_RENDER | FORCEWAKE_MEDIA); \
+	val = __raw_i915_read##x(dev_priv, reg); \
+	GEN6_READ_FOOTER; \
+}
+
+#define SKL_NEEDS_FORCE_WAKE(reg) \
+	 ((reg) < 0x40000 && !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg))
+
+#define __gen9_read(x) \
+static u##x \
+gen9_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
+	enum forcewake_domains fw_engine; \
+	GEN6_READ_HEADER(x); \
+	hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \
+	if (!SKL_NEEDS_FORCE_WAKE(reg)) \
+		fw_engine = 0; \
+	else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) \
+		fw_engine = FORCEWAKE_RENDER; \
+	else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) \
+		fw_engine = FORCEWAKE_MEDIA; \
+	else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) \
+		fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
+	else \
+		fw_engine = FORCEWAKE_BLITTER; \
+	if (fw_engine) \
+		__force_wake_get(dev_priv, fw_engine); \
+	val = __raw_i915_read##x(dev_priv, reg); \
+	hsw_unclaimed_reg_debug(dev_priv, reg, true, false); \
+	GEN6_READ_FOOTER; \
+}
+
+__vgpu_read(8)
+__vgpu_read(16)
+__vgpu_read(32)
+__vgpu_read(64)
+__gen9_read(8)
+__gen9_read(16)
+__gen9_read(32)
+__gen9_read(64)
+__chv_read(8)
+__chv_read(16)
+__chv_read(32)
+__chv_read(64)
+__vlv_read(8)
+__vlv_read(16)
+__vlv_read(32)
+__vlv_read(64)
+__gen6_read(8)
+__gen6_read(16)
+__gen6_read(32)
+__gen6_read(64)
+
+#undef __gen9_read
+#undef __chv_read
+#undef __vlv_read
+#undef __gen6_read
+#undef __vgpu_read
+#undef GEN6_READ_FOOTER
+#undef GEN6_READ_HEADER
+
+#define GEN2_WRITE_HEADER \
+	trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
+	assert_device_not_suspended(dev_priv); \
+
+#define GEN2_WRITE_FOOTER
+
+#define __gen2_write(x) \
+static void \
+gen2_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
+	GEN2_WRITE_HEADER; \
+	__raw_i915_write##x(dev_priv, reg, val); \
+	GEN2_WRITE_FOOTER; \
+}
+
+#define __gen5_write(x) \
+static void \
+gen5_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
+	GEN2_WRITE_HEADER; \
+	ilk_dummy_write(dev_priv); \
+	__raw_i915_write##x(dev_priv, reg, val); \
+	GEN2_WRITE_FOOTER; \
+}
+
+__gen5_write(8)
+__gen5_write(16)
+__gen5_write(32)
+__gen5_write(64)
+__gen2_write(8)
+__gen2_write(16)
+__gen2_write(32)
+__gen2_write(64)
+
+#undef __gen5_write
+#undef __gen2_write
+
+#undef GEN2_WRITE_FOOTER
+#undef GEN2_WRITE_HEADER
+
+#define GEN6_WRITE_HEADER \
+	unsigned long irqflags; \
+	trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
+	assert_device_not_suspended(dev_priv); \
+	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
+
+#define GEN6_WRITE_FOOTER \
+	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
+
+#define __gen6_write(x) \
+static void \
+gen6_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
+	u32 __fifo_ret = 0; \
+	GEN6_WRITE_HEADER; \
+	if (NEEDS_FORCE_WAKE(reg)) { \
+		__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
+	} \
+	__raw_i915_write##x(dev_priv, reg, val); \
+	if (unlikely(__fifo_ret)) { \
+		gen6_gt_check_fifodbg(dev_priv); \
+	} \
+	GEN6_WRITE_FOOTER; \
+}
+
+#define __hsw_write(x) \
+static void \
+hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
+	u32 __fifo_ret = 0; \
+	GEN6_WRITE_HEADER; \
+	if (NEEDS_FORCE_WAKE(reg)) { \
+		__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
+	} \
+	hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
+	__raw_i915_write##x(dev_priv, reg, val); \
+	if (unlikely(__fifo_ret)) { \
+		gen6_gt_check_fifodbg(dev_priv); \
+	} \
+	hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
+	hsw_unclaimed_reg_detect(dev_priv); \
+	GEN6_WRITE_FOOTER; \
+}
+
+#define __vgpu_write(x) \
+static void vgpu_write##x(struct drm_i915_private *dev_priv, \
+			  off_t reg, u##x val, bool trace) { \
+	GEN6_WRITE_HEADER; \
+	__raw_i915_write##x(dev_priv, reg, val); \
+	GEN6_WRITE_FOOTER; \
+}
+
+static const u32 gen8_shadowed_regs[] = {
+	FORCEWAKE_MT,
+	GEN6_RPNSWREQ,
+	GEN6_RC_VIDEO_FREQ,
+	RING_TAIL(RENDER_RING_BASE),
+	RING_TAIL(GEN6_BSD_RING_BASE),
+	RING_TAIL(VEBOX_RING_BASE),
+	RING_TAIL(BLT_RING_BASE),
+	/* TODO: Other registers are not yet used */
+};
+
+static bool is_gen8_shadowed(struct drm_i915_private *dev_priv, u32 reg)
+{
+	int i;
+	for (i = 0; i < ARRAY_SIZE(gen8_shadowed_regs); i++)
+		if (reg == gen8_shadowed_regs[i])
+			return true;
+
+	return false;
+}
+
+#define __gen8_write(x) \
+static void \
+gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
+	GEN6_WRITE_HEADER; \
+	hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
+	if (reg < 0x40000 && !is_gen8_shadowed(dev_priv, reg)) \
+		__force_wake_get(dev_priv, FORCEWAKE_RENDER); \
+	__raw_i915_write##x(dev_priv, reg, val); \
+	hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
+	hsw_unclaimed_reg_detect(dev_priv); \
+	GEN6_WRITE_FOOTER; \
+}
+
+#define __chv_write(x) \
+static void \
+chv_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
+	bool shadowed = is_gen8_shadowed(dev_priv, reg); \
+	GEN6_WRITE_HEADER; \
+	if (!shadowed) { \
+		if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) \
+			__force_wake_get(dev_priv, FORCEWAKE_RENDER); \
+		else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) \
+			__force_wake_get(dev_priv, FORCEWAKE_MEDIA); \
+		else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) \
+			__force_wake_get(dev_priv, FORCEWAKE_RENDER | FORCEWAKE_MEDIA); \
+	} \
+	__raw_i915_write##x(dev_priv, reg, val); \
+	GEN6_WRITE_FOOTER; \
+}
+
+static const u32 gen9_shadowed_regs[] = {
+	RING_TAIL(RENDER_RING_BASE),
+	RING_TAIL(GEN6_BSD_RING_BASE),
+	RING_TAIL(VEBOX_RING_BASE),
+	RING_TAIL(BLT_RING_BASE),
+	FORCEWAKE_BLITTER_GEN9,
+	FORCEWAKE_RENDER_GEN9,
+	FORCEWAKE_MEDIA_GEN9,
+	GEN6_RPNSWREQ,
+	GEN6_RC_VIDEO_FREQ,
+	/* TODO: Other registers are not yet used */
+};
+
+static bool is_gen9_shadowed(struct drm_i915_private *dev_priv, u32 reg)
+{
+	int i;
+	for (i = 0; i < ARRAY_SIZE(gen9_shadowed_regs); i++)
+		if (reg == gen9_shadowed_regs[i])
+			return true;
+
+	return false;
+}
+
+#define __gen9_write(x) \
+static void \
+gen9_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, \
+		bool trace) { \
+	enum forcewake_domains fw_engine; \
+	GEN6_WRITE_HEADER; \
+	hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
+	if (!SKL_NEEDS_FORCE_WAKE(reg) || \
+	    is_gen9_shadowed(dev_priv, reg)) \
+		fw_engine = 0; \
+	else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) \
+		fw_engine = FORCEWAKE_RENDER; \
+	else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) \
+		fw_engine = FORCEWAKE_MEDIA; \
+	else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) \
+		fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
+	else \
+		fw_engine = FORCEWAKE_BLITTER; \
+	if (fw_engine) \
+		__force_wake_get(dev_priv, fw_engine); \
+	__raw_i915_write##x(dev_priv, reg, val); \
+	hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
+	hsw_unclaimed_reg_detect(dev_priv); \
+	GEN6_WRITE_FOOTER; \
+}
+
+__gen9_write(8)
+__gen9_write(16)
+__gen9_write(32)
+__gen9_write(64)
+__chv_write(8)
+__chv_write(16)
+__chv_write(32)
+__chv_write(64)
+__gen8_write(8)
+__gen8_write(16)
+__gen8_write(32)
+__gen8_write(64)
+__hsw_write(8)
+__hsw_write(16)
+__hsw_write(32)
+__hsw_write(64)
+__gen6_write(8)
+__gen6_write(16)
+__gen6_write(32)
+__gen6_write(64)
+__vgpu_write(8)
+__vgpu_write(16)
+__vgpu_write(32)
+__vgpu_write(64)
+
+#undef __gen9_write
+#undef __chv_write
+#undef __gen8_write
+#undef __hsw_write
+#undef __gen6_write
+#undef __vgpu_write
+#undef GEN6_WRITE_FOOTER
+#undef GEN6_WRITE_HEADER
+
+#define ASSIGN_WRITE_MMIO_VFUNCS(x) \
+do { \
+	dev_priv->uncore.funcs.mmio_writeb = x##_write8; \
+	dev_priv->uncore.funcs.mmio_writew = x##_write16; \
+	dev_priv->uncore.funcs.mmio_writel = x##_write32; \
+	dev_priv->uncore.funcs.mmio_writeq = x##_write64; \
+} while (0)
+
+#define ASSIGN_READ_MMIO_VFUNCS(x) \
+do { \
+	dev_priv->uncore.funcs.mmio_readb = x##_read8; \
+	dev_priv->uncore.funcs.mmio_readw = x##_read16; \
+	dev_priv->uncore.funcs.mmio_readl = x##_read32; \
+	dev_priv->uncore.funcs.mmio_readq = x##_read64; \
+} while (0)
+
+
+static void fw_domain_init(struct drm_i915_private *dev_priv,
+			   enum forcewake_domain_id domain_id,
+			   u32 reg_set, u32 reg_ack)
+{
+	struct intel_uncore_forcewake_domain *d;
+
+	if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT))
+		return;
+
+	d = &dev_priv->uncore.fw_domain[domain_id];
+
+	WARN_ON(d->wake_count);
+
+	d->wake_count = 0;
+	d->reg_set = reg_set;
+	d->reg_ack = reg_ack;
+
+	if (IS_GEN6(dev_priv)) {
+		d->val_reset = 0;
+		d->val_set = FORCEWAKE_KERNEL;
+		d->val_clear = 0;
+	} else {
+		/* WaRsClearFWBitsAtReset:bdw,skl */
+		d->val_reset = _MASKED_BIT_DISABLE(0xffff);
+		d->val_set = _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL);
+		d->val_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL);
+	}
+
+	if (IS_VALLEYVIEW(dev_priv))
+		d->reg_post = FORCEWAKE_ACK_VLV;
+	else if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv) || IS_GEN8(dev_priv))
+		d->reg_post = ECOBUS;
+	else
+		d->reg_post = 0;
+
+	d->i915 = dev_priv;
+	d->id = domain_id;
+
+	setup_timer(&d->timer, intel_uncore_fw_release_timer, (unsigned long)d);
+
+	dev_priv->uncore.fw_domains |= (1 << domain_id);
+
+	fw_domain_reset(d);
+}
+
+static void intel_uncore_fw_domains_init(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (INTEL_INFO(dev_priv->dev)->gen <= 5)
+		return;
+
+	if (IS_GEN9(dev)) {
+		dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
+		dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
+		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
+			       FORCEWAKE_RENDER_GEN9,
+			       FORCEWAKE_ACK_RENDER_GEN9);
+		fw_domain_init(dev_priv, FW_DOMAIN_ID_BLITTER,
+			       FORCEWAKE_BLITTER_GEN9,
+			       FORCEWAKE_ACK_BLITTER_GEN9);
+		fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
+			       FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
+	} else if (IS_VALLEYVIEW(dev)) {
+		dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
+		if (!IS_CHERRYVIEW(dev))
+			dev_priv->uncore.funcs.force_wake_put =
+				fw_domains_put_with_fifo;
+		else
+			dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
+		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
+			       FORCEWAKE_VLV, FORCEWAKE_ACK_VLV);
+		fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
+			       FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV);
+	} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+		dev_priv->uncore.funcs.force_wake_get =
+			fw_domains_get_with_thread_status;
+		if (IS_HASWELL(dev))
+			dev_priv->uncore.funcs.force_wake_put =
+				fw_domains_put_with_fifo;
+		else
+			dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
+		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
+			       FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
+	} else if (IS_IVYBRIDGE(dev)) {
+		u32 ecobus;
+
+		/* IVB configs may use multi-threaded forcewake */
+
+		/* A small trick here - if the bios hasn't configured
+		 * MT forcewake, and if the device is in RC6, then
+		 * force_wake_mt_get will not wake the device and the
+		 * ECOBUS read will return zero. Which will be
+		 * (correctly) interpreted by the test below as MT
+		 * forcewake being disabled.
+		 */
+		dev_priv->uncore.funcs.force_wake_get =
+			fw_domains_get_with_thread_status;
+		dev_priv->uncore.funcs.force_wake_put =
+			fw_domains_put_with_fifo;
+
+		/* We need to init first for ECOBUS access and then
+		 * determine later if we want to reinit, in case of MT access is
+		 * not working. In this stage we don't know which flavour this
+		 * ivb is, so it is better to reset also the gen6 fw registers
+		 * before the ecobus check.
+		 */
+
+		__raw_i915_write32(dev_priv, FORCEWAKE, 0);
+		__raw_posting_read(dev_priv, ECOBUS);
+
+		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
+			       FORCEWAKE_MT, FORCEWAKE_MT_ACK);
+
+		mutex_lock(&dev->struct_mutex);
+		fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_ALL);
+		ecobus = __raw_i915_read32(dev_priv, ECOBUS);
+		fw_domains_put_with_fifo(dev_priv, FORCEWAKE_ALL);
+		mutex_unlock(&dev->struct_mutex);
+
+		if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
+			DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
+			DRM_INFO("when using vblank-synced partial screen updates.\n");
+			fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
+				       FORCEWAKE, FORCEWAKE_ACK);
+		}
+	} else if (IS_GEN6(dev)) {
+		dev_priv->uncore.funcs.force_wake_get =
+			fw_domains_get_with_thread_status;
+		dev_priv->uncore.funcs.force_wake_put =
+			fw_domains_put_with_fifo;
+		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
+			       FORCEWAKE, FORCEWAKE_ACK);
+	}
+
+	/* All future platforms are expected to require complex power gating */
+	WARN_ON(dev_priv->uncore.fw_domains == 0);
+}
+
+void intel_uncore_init(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	i915_check_vgpu(dev);
+
+	intel_uncore_ellc_detect(dev);
+	intel_uncore_fw_domains_init(dev);
+	__intel_uncore_early_sanitize(dev, false);
+
+	switch (INTEL_INFO(dev)->gen) {
+	default:
+	case 9:
+		ASSIGN_WRITE_MMIO_VFUNCS(gen9);
+		ASSIGN_READ_MMIO_VFUNCS(gen9);
+		break;
+	case 8:
+		if (IS_CHERRYVIEW(dev)) {
+			ASSIGN_WRITE_MMIO_VFUNCS(chv);
+			ASSIGN_READ_MMIO_VFUNCS(chv);
+
+		} else {
+			ASSIGN_WRITE_MMIO_VFUNCS(gen8);
+			ASSIGN_READ_MMIO_VFUNCS(gen6);
+		}
+		break;
+	case 7:
+	case 6:
+		if (IS_HASWELL(dev)) {
+			ASSIGN_WRITE_MMIO_VFUNCS(hsw);
+		} else {
+			ASSIGN_WRITE_MMIO_VFUNCS(gen6);
+		}
+
+		if (IS_VALLEYVIEW(dev)) {
+			ASSIGN_READ_MMIO_VFUNCS(vlv);
+		} else {
+			ASSIGN_READ_MMIO_VFUNCS(gen6);
+		}
+		break;
+	case 5:
+		ASSIGN_WRITE_MMIO_VFUNCS(gen5);
+		ASSIGN_READ_MMIO_VFUNCS(gen5);
+		break;
+	case 4:
+	case 3:
+	case 2:
+		ASSIGN_WRITE_MMIO_VFUNCS(gen2);
+		ASSIGN_READ_MMIO_VFUNCS(gen2);
+		break;
+	}
+
+	if (intel_vgpu_active(dev)) {
+		ASSIGN_WRITE_MMIO_VFUNCS(vgpu);
+		ASSIGN_READ_MMIO_VFUNCS(vgpu);
+	}
+
+	i915_check_and_clear_faults(dev);
+}
+#undef ASSIGN_WRITE_MMIO_VFUNCS
+#undef ASSIGN_READ_MMIO_VFUNCS
+
+void intel_uncore_fini(struct drm_device *dev)
+{
+	/* Paranoia: make sure we have disabled everything before we exit. */
+	intel_uncore_sanitize(dev);
+	intel_uncore_forcewake_reset(dev, false);
+}
+
+#define GEN_RANGE(l, h) GENMASK(h, l)
+
+static const struct register_whitelist {
+	uint64_t offset;
+	uint32_t size;
+	/* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
+	uint32_t gen_bitmask;
+} whitelist[] = {
+	{ RING_TIMESTAMP(RENDER_RING_BASE), 8, GEN_RANGE(4, 9) },
+};
+
+int i915_reg_read_ioctl(struct drm_device *dev,
+			void *data, struct drm_file *file)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_reg_read *reg = data;
+	struct register_whitelist const *entry = whitelist;
+	unsigned size;
+	u64 offset;
+	int i, ret = 0;
+
+	for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
+		if (entry->offset == (reg->offset & -entry->size) &&
+		    (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
+			break;
+	}
+
+	if (i == ARRAY_SIZE(whitelist))
+		return -EINVAL;
+
+	/* We use the low bits to encode extra flags as the register should
+	 * be naturally aligned (and those that are not so aligned merely
+	 * limit the available flags for that register).
+	 */
+	offset = entry->offset;
+	size = entry->size;
+	size |= reg->offset ^ offset;
+
+	intel_runtime_pm_get(dev_priv);
+
+	switch (size) {
+	case 8 | 1:
+		reg->val = I915_READ64_2x32(offset, offset+4);
+		break;
+	case 8:
+		reg->val = I915_READ64(offset);
+		break;
+	case 4:
+		reg->val = I915_READ(offset);
+		break;
+	case 2:
+		reg->val = I915_READ16(offset);
+		break;
+	case 1:
+		reg->val = I915_READ8(offset);
+		break;
+	default:
+		ret = -EINVAL;
+		goto out;
+	}
+
+out:
+	intel_runtime_pm_put(dev_priv);
+	return ret;
+}
+
+int i915_get_reset_stats_ioctl(struct drm_device *dev,
+			       void *data, struct drm_file *file)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_reset_stats *args = data;
+	struct i915_ctx_hang_stats *hs;
+	struct intel_context *ctx;
+	int ret;
+
+	if (args->flags || args->pad)
+		return -EINVAL;
+
+	if (args->ctx_id == DEFAULT_CONTEXT_HANDLE && !capable(CAP_SYS_ADMIN))
+		return -EPERM;
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
+	ctx = i915_gem_context_get(file->driver_priv, args->ctx_id);
+	if (IS_ERR(ctx)) {
+		mutex_unlock(&dev->struct_mutex);
+		return PTR_ERR(ctx);
+	}
+	hs = &ctx->hang_stats;
+
+	if (capable(CAP_SYS_ADMIN))
+		args->reset_count = i915_reset_count(&dev_priv->gpu_error);
+	else
+		args->reset_count = 0;
+
+	args->batch_active = hs->batch_active;
+	args->batch_pending = hs->batch_pending;
+
+	mutex_unlock(&dev->struct_mutex);
+
+	return 0;
+}
+
+static int i915_reset_complete(struct drm_device *dev)
+{
+	u8 gdrst;
+	pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst);
+	return (gdrst & GRDOM_RESET_STATUS) == 0;
+}
+
+static int i915_do_reset(struct drm_device *dev)
+{
+	/* assert reset for at least 20 usec */
+	pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE);
+	udelay(20);
+	pci_write_config_byte(dev->pdev, I915_GDRST, 0);
+
+	return wait_for(i915_reset_complete(dev), 500);
+}
+
+static int g4x_reset_complete(struct drm_device *dev)
+{
+	u8 gdrst;
+	pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst);
+	return (gdrst & GRDOM_RESET_ENABLE) == 0;
+}
+
+static int g33_do_reset(struct drm_device *dev)
+{
+	pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE);
+	return wait_for(g4x_reset_complete(dev), 500);
+}
+
+static int g4x_do_reset(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret;
+
+	pci_write_config_byte(dev->pdev, I915_GDRST,
+			      GRDOM_RENDER | GRDOM_RESET_ENABLE);
+	ret =  wait_for(g4x_reset_complete(dev), 500);
+	if (ret)
+		return ret;
+
+	/* WaVcpClkGateDisableForMediaReset:ctg,elk */
+	I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE);
+	POSTING_READ(VDECCLK_GATE_D);
+
+	pci_write_config_byte(dev->pdev, I915_GDRST,
+			      GRDOM_MEDIA | GRDOM_RESET_ENABLE);
+	ret =  wait_for(g4x_reset_complete(dev), 500);
+	if (ret)
+		return ret;
+
+	/* WaVcpClkGateDisableForMediaReset:ctg,elk */
+	I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE);
+	POSTING_READ(VDECCLK_GATE_D);
+
+	pci_write_config_byte(dev->pdev, I915_GDRST, 0);
+
+	return 0;
+}
+
+static int ironlake_do_reset(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret;
+
+	I915_WRITE(ILK_GDSR,
+		   ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
+	ret = wait_for((I915_READ(ILK_GDSR) &
+			ILK_GRDOM_RESET_ENABLE) == 0, 500);
+	if (ret)
+		return ret;
+
+	I915_WRITE(ILK_GDSR,
+		   ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
+	ret = wait_for((I915_READ(ILK_GDSR) &
+			ILK_GRDOM_RESET_ENABLE) == 0, 500);
+	if (ret)
+		return ret;
+
+	I915_WRITE(ILK_GDSR, 0);
+
+	return 0;
+}
+
+static int gen6_do_reset(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int	ret;
+
+	/* Reset the chip */
+
+	/* GEN6_GDRST is not in the gt power well, no need to check
+	 * for fifo space for the write or forcewake the chip for
+	 * the read
+	 */
+	__raw_i915_write32(dev_priv, GEN6_GDRST, GEN6_GRDOM_FULL);
+
+	/* Spin waiting for the device to ack the reset request */
+	ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
+
+	intel_uncore_forcewake_reset(dev, true);
+
+	return ret;
+}
+
+static int wait_for_register(struct drm_i915_private *dev_priv,
+			     const u32 reg,
+			     const u32 mask,
+			     const u32 value,
+			     const unsigned long timeout_ms)
+{
+	return wait_for((I915_READ(reg) & mask) == value, timeout_ms);
+}
+
+static int gen8_do_reset(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_engine_cs *engine;
+	int i;
+
+	for_each_ring(engine, dev_priv, i) {
+		I915_WRITE(RING_RESET_CTL(engine->mmio_base),
+			   _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET));
+
+		if (wait_for_register(dev_priv,
+				      RING_RESET_CTL(engine->mmio_base),
+				      RESET_CTL_READY_TO_RESET,
+				      RESET_CTL_READY_TO_RESET,
+				      700)) {
+			DRM_ERROR("%s: reset request timeout\n", engine->name);
+			goto not_ready;
+		}
+	}
+
+	return gen6_do_reset(dev);
+
+not_ready:
+	for_each_ring(engine, dev_priv, i)
+		I915_WRITE(RING_RESET_CTL(engine->mmio_base),
+			   _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
+
+	return -EIO;
+}
+
+static int (*intel_get_gpu_reset(struct drm_device *dev))(struct drm_device *)
+{
+	if (!i915.reset)
+		return NULL;
+
+	if (INTEL_INFO(dev)->gen >= 8)
+		return gen8_do_reset;
+	else if (INTEL_INFO(dev)->gen >= 6)
+		return gen6_do_reset;
+	else if (IS_GEN5(dev))
+		return ironlake_do_reset;
+	else if (IS_G4X(dev))
+		return g4x_do_reset;
+	else if (IS_G33(dev))
+		return g33_do_reset;
+	else if (INTEL_INFO(dev)->gen >= 3)
+		return i915_do_reset;
+	else
+		return NULL;
+}
+
+int intel_gpu_reset(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	int (*reset)(struct drm_device *);
+	int ret;
+
+	reset = intel_get_gpu_reset(dev);
+	if (reset == NULL)
+		return -ENODEV;
+
+	/* If the power well sleeps during the reset, the reset
+	 * request may be dropped and never completes (causing -EIO).
+	 */
+	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+	ret = reset(dev);
+	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+
+	return ret;
+}
+
+bool intel_has_gpu_reset(struct drm_device *dev)
+{
+	return intel_get_gpu_reset(dev) != NULL;
+}
+
+void intel_uncore_check_errors(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (HAS_FPGA_DBG_UNCLAIMED(dev) &&
+	    (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
+		DRM_ERROR("Unclaimed register before interrupt\n");
+		__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
+	}
+}