Merge tag 'drm-intel-next-2014-12-19' of git://anongit.freedesktop.org/drm-intel...
authorDave Airlie <airlied@redhat.com>
Fri, 9 Jan 2015 22:46:24 +0000 (08:46 +1000)
committerDave Airlie <airlied@redhat.com>
Fri, 9 Jan 2015 22:46:24 +0000 (08:46 +1000)
- plane handling refactoring from Matt Roper and Gustavo Padovan in prep for
  atomic updates
- fixes and more patches for the seqno to request transformation from John
- docbook for fbc from Rodrigo
- prep work for dual-link dsi from Gaurav Signh
- crc fixes from Ville
- special ggtt views infrastructure from Tvrtko Ursulin
- shadow patch copying for the cmd parser from Brad Volkin
- execlist and full ppgtt by default on gen8, for testing for now

* tag 'drm-intel-next-2014-12-19' of git://anongit.freedesktop.org/drm-intel: (131 commits)
  drm/i915: Update DRIVER_DATE to 20141219
  drm/i915: Hold runtime PM during plane commit
  drm/i915: Organize bind_vma funcs
  drm/i915: Organize INSTDONE report for future.
  drm/i915: Organize PDP regs report for future.
  drm/i915: Organize PPGTT init
  drm/i915: Organize Fence registers for future enablement.
  drm/i915: tame the chattermouth (v2)
  drm/i915: Warn about missing context state workarounds only once
  drm/i915: Use true PPGTT in Gen8+ when execlists are enabled
  drm/i915: Skip gunit save/restore for cherryview
  drm/i915/chv: Use timeout mode for RC6 on chv
  drm/i915: Add GPGPU_THREADS_DISPATCHED to the register whitelist
  drm/i915: Tidy up execbuffer command parsing code
  drm/i915: Mark shadow batch buffers as purgeable
  drm/i915: Use batch length instead of object size in command parser
  drm/i915: Use batch pools with the command parser
  drm/i915: Implement a framework for batch buffer pools
  drm/i915: fix use after free during eDP encoder destroying
  drm/i915/skl: Skylake also supports DP MST
  ...

16 files changed:
1  2 
Documentation/DocBook/drm.tmpl
drivers/gpu/drm/drm_crtc.c
drivers/gpu/drm/drm_modes.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_context.c
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/i915_suspend.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/i915/intel_ringbuffer.c
include/drm/drm_crtc.h
include/drm/drm_modes.h

                Driver supports dedicated render nodes.
              </para></listitem>
            </varlistentry>
 +          <varlistentry>
 +            <term>DRIVER_ATOMIC</term>
 +            <listitem><para>
 +              Driver supports atomic properties.  In this case the driver
 +              must implement appropriate obj->atomic_get_property() vfuncs
 +              for any modeset objects with driver specific properties.
 +            </para></listitem>
 +          </varlistentry>
          </variablelist>
        </sect3>
        <sect3>
@@@ -1385,7 -1377,7 +1385,7 @@@ int max_width, max_height;</synopsis
        <itemizedlist>
          <listitem>
          DRM_PLANE_TYPE_PRIMARY represents a "main" plane for a CRTC.  Primary
 -        planes are the planes operated upon by by CRTC modesetting and flipping
 +        planes are the planes operated upon by CRTC modesetting and flipping
          operations described in <xref linkend="drm-kms-crtcops"/>.
          </listitem>
          <listitem>
@@@ -1955,16 -1947,10 +1955,16 @@@ void intel_crt_init(struct drm_device *
              and then retrieves a list of modes by calling the connector
              <methodname>get_modes</methodname> helper operation.
            </para>
 +         <para>
 +            If the helper operation returns no mode, and if the connector status
 +            is connector_status_connected, standard VESA DMT modes up to
 +            1024x768 are automatically added to the modes list by a call to
 +            <function>drm_add_modes_noedid</function>.
 +          </para>
            <para>
 -            The function filters out modes larger than
 +            The function then filters out modes larger than
              <parameter>max_width</parameter> and <parameter>max_height</parameter>
 -            if specified. It then calls the optional connector
 +            if specified. It finally calls the optional connector
              <methodname>mode_valid</methodname> helper operation for each mode in
              the probed list to check whether the mode is valid for the connector.
            </para>
            <synopsis>int (*get_modes)(struct drm_connector *connector);</synopsis>
            <para>
              Fill the connector's <structfield>probed_modes</structfield> list
 -            by parsing EDID data with <function>drm_add_edid_modes</function> or
 -            calling <function>drm_mode_probed_add</function> directly for every
 +            by parsing EDID data with <function>drm_add_edid_modes</function>,
 +            adding standard VESA DMT modes with <function>drm_add_modes_noedid</function>,
 +            or calling <function>drm_mode_probed_add</function> directly for every
              supported mode and return the number of modes it has detected. This
              operation is mandatory.
            </para>
 +          <para>
 +            Note that the caller function will automatically add standard VESA
 +            DMT modes up to 1024x768 if the <methodname>get_modes</methodname>
 +            helper operation returns no mode and if the connector status is
 +            connector_status_connected. There is no need to call
 +            <function>drm_add_edid_modes</function> manually in that case.
 +          </para>
            <para>
              When adding modes manually the driver creates each mode with a call to
              <function>drm_mode_create</function> and must fill the following fields.
              <function>drm_helper_probe_single_connector_modes</function>.
            </para>
            <para>
 -            When parsing EDID data, <function>drm_add_edid_modes</function> fill the
 +            When parsing EDID data, <function>drm_add_edid_modes</function> fills the
              connector <structfield>display_info</structfield>
              <structfield>width_mm</structfield> and
              <structfield>height_mm</structfield> fields. When creating modes
      </sect2>
      <sect2>
        <title>Modeset Helper Functions Reference</title>
 +!Iinclude/drm/drm_crtc_helper.h
  !Edrivers/gpu/drm/drm_crtc_helper.c
  !Pdrivers/gpu/drm/drm_crtc_helper.c overview
      </sect2>
  !Edrivers/gpu/drm/drm_plane_helper.c
  !Pdrivers/gpu/drm/drm_plane_helper.c overview
      </sect2>
 +    <sect2>
 +        <title>Tile group</title>
 +!Pdrivers/gpu/drm/drm_crtc.c Tile group
 +    </sect2>
    </sect1>
  
    <!-- Internals: kms properties -->
        <td valign="top" >Description/Restrictions</td>
        </tr>
        <tr>
 -      <td rowspan="23" valign="top" >DRM</td>
 -      <td rowspan="3" valign="top" >Generic</td>
 +      <td rowspan="36" valign="top" >DRM</td>
 +      <td rowspan="5" valign="top" >Connector</td>
        <td valign="top" >“EDID”</td>
        <td valign="top" >BLOB | IMMUTABLE</td>
        <td valign="top" >0</td>
        <td valign="top" >Contains topology path to a connector.</td>
        </tr>
        <tr>
 -      <td rowspan="1" valign="top" >Plane</td>
 +      <td valign="top" >“TILE”</td>
 +      <td valign="top" >BLOB | IMMUTABLE</td>
 +      <td valign="top" >0</td>
 +      <td valign="top" >Connector</td>
 +      <td valign="top" >Contains tiling information for a connector.</td>
 +      </tr>
 +      <tr>
 +      <td valign="top" >“CRTC_ID”</td>
 +      <td valign="top" >OBJECT</td>
 +      <td valign="top" >DRM_MODE_OBJECT_CRTC</td>
 +      <td valign="top" >Connector</td>
 +      <td valign="top" >CRTC that connector is attached to (atomic)</td>
 +      </tr>
 +      <tr>
 +      <td rowspan="11" valign="top" >Plane</td>
        <td valign="top" >“type”</td>
        <td valign="top" >ENUM | IMMUTABLE</td>
        <td valign="top" >{ "Overlay", "Primary", "Cursor" }</td>
        <td valign="top" >Plane type</td>
        </tr>
        <tr>
 +      <td valign="top" >“SRC_X”</td>
 +      <td valign="top" >RANGE</td>
 +      <td valign="top" >Min=0, Max=UINT_MAX</td>
 +      <td valign="top" >Plane</td>
 +      <td valign="top" >Scanout source x coordinate in 16.16 fixed point (atomic)</td>
 +      </tr>
 +      <tr>
 +      <td valign="top" >“SRC_Y”</td>
 +      <td valign="top" >RANGE</td>
 +      <td valign="top" >Min=0, Max=UINT_MAX</td>
 +      <td valign="top" >Plane</td>
 +      <td valign="top" >Scanout source y coordinate in 16.16 fixed point (atomic)</td>
 +      </tr>
 +      <tr>
 +      <td valign="top" >“SRC_W”</td>
 +      <td valign="top" >RANGE</td>
 +      <td valign="top" >Min=0, Max=UINT_MAX</td>
 +      <td valign="top" >Plane</td>
 +      <td valign="top" >Scanout source width in 16.16 fixed point (atomic)</td>
 +      </tr>
 +      <tr>
 +      <td valign="top" >“SRC_H”</td>
 +      <td valign="top" >RANGE</td>
 +      <td valign="top" >Min=0, Max=UINT_MAX</td>
 +      <td valign="top" >Plane</td>
 +      <td valign="top" >Scanout source height in 16.16 fixed point (atomic)</td>
 +      </tr>
 +      <tr>
 +      <td valign="top" >“CRTC_X”</td>
 +      <td valign="top" >SIGNED_RANGE</td>
 +      <td valign="top" >Min=INT_MIN, Max=INT_MAX</td>
 +      <td valign="top" >Plane</td>
 +      <td valign="top" >Scanout CRTC (destination) x coordinate (atomic)</td>
 +      </tr>
 +      <tr>
 +      <td valign="top" >“CRTC_Y”</td>
 +      <td valign="top" >SIGNED_RANGE</td>
 +      <td valign="top" >Min=INT_MIN, Max=INT_MAX</td>
 +      <td valign="top" >Plane</td>
 +      <td valign="top" >Scanout CRTC (destination) y coordinate (atomic)</td>
 +      </tr>
 +      <tr>
 +      <td valign="top" >“CRTC_W”</td>
 +      <td valign="top" >RANGE</td>
 +      <td valign="top" >Min=0, Max=UINT_MAX</td>
 +      <td valign="top" >Plane</td>
 +      <td valign="top" >Scanout CRTC (destination) width (atomic)</td>
 +      </tr>
 +      <tr>
 +      <td valign="top" >“CRTC_H”</td>
 +      <td valign="top" >RANGE</td>
 +      <td valign="top" >Min=0, Max=UINT_MAX</td>
 +      <td valign="top" >Plane</td>
 +      <td valign="top" >Scanout CRTC (destination) height (atomic)</td>
 +      </tr>
 +      <tr>
 +      <td valign="top" >“FB_ID”</td>
 +      <td valign="top" >OBJECT</td>
 +      <td valign="top" >DRM_MODE_OBJECT_FB</td>
 +      <td valign="top" >Plane</td>
 +      <td valign="top" >Scanout framebuffer (atomic)</td>
 +      </tr>
 +      <tr>
 +      <td valign="top" >“CRTC_ID”</td>
 +      <td valign="top" >OBJECT</td>
 +      <td valign="top" >DRM_MODE_OBJECT_CRTC</td>
 +      <td valign="top" >Plane</td>
 +      <td valign="top" >CRTC that plane is attached to (atomic)</td>
 +      </tr>
 +      <tr>
        <td rowspan="2" valign="top" >DVI-I</td>
        <td valign="top" >“subconnector”</td>
        <td valign="top" >ENUM</td>
@@@ -4035,6 -3924,11 +4035,11 @@@ int num_ioctls;</synopsis
        <title>Panel Self Refresh PSR (PSR/SRD)</title>
  !Pdrivers/gpu/drm/i915/intel_psr.c Panel Self Refresh (PSR/SRD)
  !Idrivers/gpu/drm/i915/intel_psr.c
+       </sect2>
+       <sect2>
+       <title>Frame Buffer Compression (FBC)</title>
+ !Pdrivers/gpu/drm/i915/intel_fbc.c Frame Buffer Compression (FBC)
+ !Idrivers/gpu/drm/i915/intel_fbc.c
        </sect2>
        <sect2>
          <title>DPIO</title>
          <title>Batchbuffer Parsing</title>
  !Pdrivers/gpu/drm/i915/i915_cmd_parser.c batch buffer command parser
  !Idrivers/gpu/drm/i915/i915_cmd_parser.c
+       </sect2>
+       <sect2>
+         <title>Batchbuffer Pools</title>
+ !Pdrivers/gpu/drm/i915/i915_gem_batch_pool.c batch pool
+ !Idrivers/gpu/drm/i915/i915_gem_batch_pool.c
        </sect2>
        <sect2>
          <title>Logical Rings, Logical Ring Contexts and Execlists</title>
  !Pdrivers/gpu/drm/i915/intel_lrc.c Logical Rings, Logical Ring Contexts and Execlists
  !Idrivers/gpu/drm/i915/intel_lrc.c
        </sect2>
+       <sect2>
+         <title>Global GTT views</title>
+ !Pdrivers/gpu/drm/i915/i915_gem_gtt.c Global GTT views
+ !Idrivers/gpu/drm/i915/i915_gem_gtt.c
+       </sect2>
      </sect1>
  
      <sect1>
@@@ -38,7 -38,6 +38,7 @@@
  #include <drm/drm_edid.h>
  #include <drm/drm_fourcc.h>
  #include <drm/drm_modeset_lock.h>
 +#include <drm/drm_atomic.h>
  
  #include "drm_crtc_internal.h"
  #include "drm_internal.h"
@@@ -62,8 -61,8 +62,8 @@@ static struct drm_framebuffer *add_fram
  /*
   * Global properties
   */
 -static const struct drm_prop_enum_list drm_dpms_enum_list[] =
 -{     { DRM_MODE_DPMS_ON, "On" },
 +static const struct drm_prop_enum_list drm_dpms_enum_list[] = {
 +      { DRM_MODE_DPMS_ON, "On" },
        { DRM_MODE_DPMS_STANDBY, "Standby" },
        { DRM_MODE_DPMS_SUSPEND, "Suspend" },
        { DRM_MODE_DPMS_OFF, "Off" }
@@@ -71,7 -70,8 +71,7 @@@
  
  DRM_ENUM_NAME_FN(drm_get_dpms_name, drm_dpms_enum_list)
  
 -static const struct drm_prop_enum_list drm_plane_type_enum_list[] =
 -{
 +static const struct drm_prop_enum_list drm_plane_type_enum_list[] = {
        { DRM_PLANE_TYPE_OVERLAY, "Overlay" },
        { DRM_PLANE_TYPE_PRIMARY, "Primary" },
        { DRM_PLANE_TYPE_CURSOR, "Cursor" },
@@@ -80,7 -80,8 +80,7 @@@
  /*
   * Optional properties
   */
 -static const struct drm_prop_enum_list drm_scaling_mode_enum_list[] =
 -{
 +static const struct drm_prop_enum_list drm_scaling_mode_enum_list[] = {
        { DRM_MODE_SCALE_NONE, "None" },
        { DRM_MODE_SCALE_FULLSCREEN, "Full" },
        { DRM_MODE_SCALE_CENTER, "Center" },
@@@ -96,7 -97,8 +96,7 @@@ static const struct drm_prop_enum_list 
  /*
   * Non-global properties, but "required" for certain connectors.
   */
 -static const struct drm_prop_enum_list drm_dvi_i_select_enum_list[] =
 -{
 +static const struct drm_prop_enum_list drm_dvi_i_select_enum_list[] = {
        { DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */
        { DRM_MODE_SUBCONNECTOR_DVID,      "DVI-D"     }, /* DVI-I  */
        { DRM_MODE_SUBCONNECTOR_DVIA,      "DVI-A"     }, /* DVI-I  */
  
  DRM_ENUM_NAME_FN(drm_get_dvi_i_select_name, drm_dvi_i_select_enum_list)
  
 -static const struct drm_prop_enum_list drm_dvi_i_subconnector_enum_list[] =
 -{
 +static const struct drm_prop_enum_list drm_dvi_i_subconnector_enum_list[] = {
        { DRM_MODE_SUBCONNECTOR_Unknown,   "Unknown"   }, /* DVI-I and TV-out */
        { DRM_MODE_SUBCONNECTOR_DVID,      "DVI-D"     }, /* DVI-I  */
        { DRM_MODE_SUBCONNECTOR_DVIA,      "DVI-A"     }, /* DVI-I  */
  DRM_ENUM_NAME_FN(drm_get_dvi_i_subconnector_name,
                 drm_dvi_i_subconnector_enum_list)
  
 -static const struct drm_prop_enum_list drm_tv_select_enum_list[] =
 -{
 +static const struct drm_prop_enum_list drm_tv_select_enum_list[] = {
        { DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */
        { DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */
        { DRM_MODE_SUBCONNECTOR_SVIDEO,    "SVIDEO"    }, /* TV-out */
  
  DRM_ENUM_NAME_FN(drm_get_tv_select_name, drm_tv_select_enum_list)
  
 -static const struct drm_prop_enum_list drm_tv_subconnector_enum_list[] =
 -{
 +static const struct drm_prop_enum_list drm_tv_subconnector_enum_list[] = {
        { DRM_MODE_SUBCONNECTOR_Unknown,   "Unknown"   }, /* DVI-I and TV-out */
        { DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */
        { DRM_MODE_SUBCONNECTOR_SVIDEO,    "SVIDEO"    }, /* TV-out */
@@@ -149,8 -154,8 +149,8 @@@ struct drm_conn_prop_enum_list 
  /*
   * Connector and encoder types.
   */
 -static struct drm_conn_prop_enum_list drm_connector_enum_list[] =
 -{     { DRM_MODE_CONNECTOR_Unknown, "Unknown" },
 +static struct drm_conn_prop_enum_list drm_connector_enum_list[] = {
 +      { DRM_MODE_CONNECTOR_Unknown, "Unknown" },
        { DRM_MODE_CONNECTOR_VGA, "VGA" },
        { DRM_MODE_CONNECTOR_DVII, "DVI-I" },
        { DRM_MODE_CONNECTOR_DVID, "DVI-D" },
        { DRM_MODE_CONNECTOR_DSI, "DSI" },
  };
  
 -static const struct drm_prop_enum_list drm_encoder_enum_list[] =
 -{     { DRM_MODE_ENCODER_NONE, "None" },
 +static const struct drm_prop_enum_list drm_encoder_enum_list[] = {
 +      { DRM_MODE_ENCODER_NONE, "None" },
        { DRM_MODE_ENCODER_DAC, "DAC" },
        { DRM_MODE_ENCODER_TMDS, "TMDS" },
        { DRM_MODE_ENCODER_LVDS, "LVDS" },
        { DRM_MODE_ENCODER_DPMST, "DP MST" },
  };
  
 -static const struct drm_prop_enum_list drm_subpixel_enum_list[] =
 -{
 +static const struct drm_prop_enum_list drm_subpixel_enum_list[] = {
        { SubPixelUnknown, "Unknown" },
        { SubPixelHorizontalRGB, "Horizontal RGB" },
        { SubPixelHorizontalBGR, "Horizontal BGR" },
@@@ -719,8 -725,6 +719,8 @@@ void drm_crtc_cleanup(struct drm_crtc *
        WARN_ON(crtc->state && !crtc->funcs->atomic_destroy_state);
        if (crtc->state && crtc->funcs->atomic_destroy_state)
                crtc->funcs->atomic_destroy_state(crtc, crtc->state);
 +
 +      memset(crtc, 0, sizeof(*crtc));
  }
  EXPORT_SYMBOL(drm_crtc_cleanup);
  
@@@ -831,7 -835,6 +831,7 @@@ int drm_connector_init(struct drm_devic
                       const struct drm_connector_funcs *funcs,
                       int connector_type)
  {
 +      struct drm_mode_config *config = &dev->mode_config;
        int ret;
        struct ida *connector_ida =
                &drm_connector_enum_list[connector_type].ida;
  
        /* We should add connectors at the end to avoid upsetting the connector
         * index too much. */
 -      list_add_tail(&connector->head, &dev->mode_config.connector_list);
 -      dev->mode_config.num_connector++;
 +      list_add_tail(&connector->head, &config->connector_list);
 +      config->num_connector++;
  
        if (connector_type != DRM_MODE_CONNECTOR_VIRTUAL)
                drm_object_attach_property(&connector->base,
 -                                            dev->mode_config.edid_property,
 +                                            config->edid_property,
                                              0);
  
        drm_object_attach_property(&connector->base,
 -                                    dev->mode_config.dpms_property, 0);
 +                                    config->dpms_property, 0);
 +
 +      if (drm_core_check_feature(dev, DRIVER_ATOMIC)) {
 +              drm_object_attach_property(&connector->base, config->prop_crtc_id, 0);
 +      }
  
        connector->debugfs_entry = NULL;
  
@@@ -909,11 -908,6 +909,11 @@@ void drm_connector_cleanup(struct drm_c
        struct drm_device *dev = connector->dev;
        struct drm_display_mode *mode, *t;
  
 +      if (connector->tile_group) {
 +              drm_mode_put_tile_group(dev, connector->tile_group);
 +              connector->tile_group = NULL;
 +      }
 +
        list_for_each_entry_safe(mode, t, &connector->probed_modes, head)
                drm_mode_remove(connector, mode);
  
        if (connector->state && connector->funcs->atomic_destroy_state)
                connector->funcs->atomic_destroy_state(connector,
                                                       connector->state);
 +
 +      memset(connector, 0, sizeof(*connector));
  }
  EXPORT_SYMBOL(drm_connector_cleanup);
  
@@@ -1076,8 -1068,6 +1076,8 @@@ void drm_bridge_cleanup(struct drm_brid
        list_del(&bridge->head);
        dev->mode_config.num_bridge--;
        drm_modeset_unlock_all(dev);
 +
 +      memset(bridge, 0, sizeof(*bridge));
  }
  EXPORT_SYMBOL(drm_bridge_cleanup);
  
@@@ -1141,15 -1131,13 +1141,15 @@@ EXPORT_SYMBOL(drm_encoder_init)
  void drm_encoder_cleanup(struct drm_encoder *encoder)
  {
        struct drm_device *dev = encoder->dev;
 +
        drm_modeset_lock_all(dev);
        drm_mode_object_put(dev, &encoder->base);
        kfree(encoder->name);
 -      encoder->name = NULL;
        list_del(&encoder->head);
        dev->mode_config.num_encoder--;
        drm_modeset_unlock_all(dev);
 +
 +      memset(encoder, 0, sizeof(*encoder));
  }
  EXPORT_SYMBOL(drm_encoder_cleanup);
  
@@@ -1174,7 -1162,6 +1174,7 @@@ int drm_universal_plane_init(struct drm
                             const uint32_t *formats, uint32_t format_count,
                             enum drm_plane_type type)
  {
 +      struct drm_mode_config *config = &dev->mode_config;
        int ret;
  
        ret = drm_mode_object_get(dev, &plane->base, DRM_MODE_OBJECT_PLANE);
        plane->base.properties = &plane->properties;
        plane->dev = dev;
        plane->funcs = funcs;
 -      plane->format_types = kmalloc(sizeof(uint32_t) * format_count,
 -                                    GFP_KERNEL);
 +      plane->format_types = kmalloc_array(format_count, sizeof(uint32_t),
 +                                          GFP_KERNEL);
        if (!plane->format_types) {
                DRM_DEBUG_KMS("out of memory when allocating plane\n");
                drm_mode_object_put(dev, &plane->base);
        plane->possible_crtcs = possible_crtcs;
        plane->type = type;
  
 -      list_add_tail(&plane->head, &dev->mode_config.plane_list);
 -      dev->mode_config.num_total_plane++;
 +      list_add_tail(&plane->head, &config->plane_list);
 +      config->num_total_plane++;
        if (plane->type == DRM_PLANE_TYPE_OVERLAY)
 -              dev->mode_config.num_overlay_plane++;
 +              config->num_overlay_plane++;
  
        drm_object_attach_property(&plane->base,
 -                                 dev->mode_config.plane_type_property,
 +                                 config->plane_type_property,
                                   plane->type);
  
 +      if (drm_core_check_feature(dev, DRIVER_ATOMIC)) {
 +              drm_object_attach_property(&plane->base, config->prop_fb_id, 0);
 +              drm_object_attach_property(&plane->base, config->prop_crtc_id, 0);
 +              drm_object_attach_property(&plane->base, config->prop_crtc_x, 0);
 +              drm_object_attach_property(&plane->base, config->prop_crtc_y, 0);
 +              drm_object_attach_property(&plane->base, config->prop_crtc_w, 0);
 +              drm_object_attach_property(&plane->base, config->prop_crtc_h, 0);
 +              drm_object_attach_property(&plane->base, config->prop_src_x, 0);
 +              drm_object_attach_property(&plane->base, config->prop_src_y, 0);
 +              drm_object_attach_property(&plane->base, config->prop_src_w, 0);
 +              drm_object_attach_property(&plane->base, config->prop_src_h, 0);
 +      }
 +
        return 0;
  }
  EXPORT_SYMBOL(drm_universal_plane_init);
@@@ -1283,8 -1257,6 +1283,8 @@@ void drm_plane_cleanup(struct drm_plan
        WARN_ON(plane->state && !plane->funcs->atomic_destroy_state);
        if (plane->state && plane->funcs->atomic_destroy_state)
                plane->funcs->atomic_destroy_state(plane, plane->state);
 +
 +      memset(plane, 0, sizeof(*plane));
  }
  EXPORT_SYMBOL(drm_plane_cleanup);
  
@@@ -1342,109 -1314,45 +1342,109 @@@ void drm_plane_force_disable(struct drm
  }
  EXPORT_SYMBOL(drm_plane_force_disable);
  
 -static int drm_mode_create_standard_connector_properties(struct drm_device *dev)
 +static int drm_mode_create_standard_properties(struct drm_device *dev)
  {
 -      struct drm_property *edid;
 -      struct drm_property *dpms;
 -      struct drm_property *dev_path;
 +      struct drm_property *prop;
  
        /*
         * Standard properties (apply to all connectors)
         */
 -      edid = drm_property_create(dev, DRM_MODE_PROP_BLOB |
 +      prop = drm_property_create(dev, DRM_MODE_PROP_BLOB |
                                   DRM_MODE_PROP_IMMUTABLE,
                                   "EDID", 0);
 -      dev->mode_config.edid_property = edid;
 +      if (!prop)
 +              return -ENOMEM;
 +      dev->mode_config.edid_property = prop;
  
 -      dpms = drm_property_create_enum(dev, 0,
 +      prop = drm_property_create_enum(dev, 0,
                                   "DPMS", drm_dpms_enum_list,
                                   ARRAY_SIZE(drm_dpms_enum_list));
 -      dev->mode_config.dpms_property = dpms;
 -
 -      dev_path = drm_property_create(dev,
 -                                     DRM_MODE_PROP_BLOB |
 -                                     DRM_MODE_PROP_IMMUTABLE,
 -                                     "PATH", 0);
 -      dev->mode_config.path_property = dev_path;
 +      if (!prop)
 +              return -ENOMEM;
 +      dev->mode_config.dpms_property = prop;
  
 -      return 0;
 -}
 +      prop = drm_property_create(dev,
 +                                 DRM_MODE_PROP_BLOB |
 +                                 DRM_MODE_PROP_IMMUTABLE,
 +                                 "PATH", 0);
 +      if (!prop)
 +              return -ENOMEM;
 +      dev->mode_config.path_property = prop;
  
 -static int drm_mode_create_standard_plane_properties(struct drm_device *dev)
 -{
 -      struct drm_property *type;
 +      prop = drm_property_create(dev,
 +                                 DRM_MODE_PROP_BLOB |
 +                                 DRM_MODE_PROP_IMMUTABLE,
 +                                 "TILE", 0);
 +      if (!prop)
 +              return -ENOMEM;
 +      dev->mode_config.tile_property = prop;
  
 -      /*
 -       * Standard properties (apply to all planes)
 -       */
 -      type = drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE,
 +      prop = drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE,
                                        "type", drm_plane_type_enum_list,
                                        ARRAY_SIZE(drm_plane_type_enum_list));
 -      dev->mode_config.plane_type_property = type;
 +      if (!prop)
 +              return -ENOMEM;
 +      dev->mode_config.plane_type_property = prop;
 +
 +      prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
 +                      "SRC_X", 0, UINT_MAX);
 +      if (!prop)
 +              return -ENOMEM;
 +      dev->mode_config.prop_src_x = prop;
 +
 +      prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
 +                      "SRC_Y", 0, UINT_MAX);
 +      if (!prop)
 +              return -ENOMEM;
 +      dev->mode_config.prop_src_y = prop;
 +
 +      prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
 +                      "SRC_W", 0, UINT_MAX);
 +      if (!prop)
 +              return -ENOMEM;
 +      dev->mode_config.prop_src_w = prop;
 +
 +      prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
 +                      "SRC_H", 0, UINT_MAX);
 +      if (!prop)
 +              return -ENOMEM;
 +      dev->mode_config.prop_src_h = prop;
 +
 +      prop = drm_property_create_signed_range(dev, DRM_MODE_PROP_ATOMIC,
 +                      "CRTC_X", INT_MIN, INT_MAX);
 +      if (!prop)
 +              return -ENOMEM;
 +      dev->mode_config.prop_crtc_x = prop;
 +
 +      prop = drm_property_create_signed_range(dev, DRM_MODE_PROP_ATOMIC,
 +                      "CRTC_Y", INT_MIN, INT_MAX);
 +      if (!prop)
 +              return -ENOMEM;
 +      dev->mode_config.prop_crtc_y = prop;
 +
 +      prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
 +                      "CRTC_W", 0, INT_MAX);
 +      if (!prop)
 +              return -ENOMEM;
 +      dev->mode_config.prop_crtc_w = prop;
 +
 +      prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
 +                      "CRTC_H", 0, INT_MAX);
 +      if (!prop)
 +              return -ENOMEM;
 +      dev->mode_config.prop_crtc_h = prop;
 +
 +      prop = drm_property_create_object(dev, DRM_MODE_PROP_ATOMIC,
 +                      "FB_ID", DRM_MODE_OBJECT_FB);
 +      if (!prop)
 +              return -ENOMEM;
 +      dev->mode_config.prop_fb_id = prop;
 +
 +      prop = drm_property_create_object(dev, DRM_MODE_PROP_ATOMIC,
 +                      "CRTC_ID", DRM_MODE_OBJECT_CRTC);
 +      if (!prop)
 +              return -ENOMEM;
 +      dev->mode_config.prop_crtc_id = prop;
  
        return 0;
  }
@@@ -1672,7 -1580,7 +1672,7 @@@ static int drm_mode_group_init(struct d
        total_objects += dev->mode_config.num_encoder;
        total_objects += dev->mode_config.num_bridge;
  
 -      group->id_list = kzalloc(total_objects * sizeof(uint32_t), GFP_KERNEL);
 +      group->id_list = kcalloc(total_objects, sizeof(uint32_t), GFP_KERNEL);
        if (!group->id_list)
                return -ENOMEM;
  
@@@ -1702,8 -1610,7 +1702,8 @@@ int drm_mode_group_init_legacy_group(st
        struct drm_bridge *bridge;
        int ret;
  
 -      if ((ret = drm_mode_group_init(dev, group)))
 +      ret = drm_mode_group_init(dev, group);
 +      if (ret)
                return ret;
  
        list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
@@@ -2070,44 -1977,6 +2070,44 @@@ static struct drm_encoder *drm_connecto
        return connector->encoder;
  }
  
 +/* helper for getconnector and getproperties ioctls */
 +static int get_properties(struct drm_mode_object *obj, bool atomic,
 +              uint32_t __user *prop_ptr, uint64_t __user *prop_values,
 +              uint32_t *arg_count_props)
 +{
 +      int props_count;
 +      int i, ret, copied;
 +
 +      props_count = obj->properties->count;
 +      if (!atomic)
 +              props_count -= obj->properties->atomic_count;
 +
 +      if ((*arg_count_props >= props_count) && props_count) {
 +              for (i = 0, copied = 0; copied < props_count; i++) {
 +                      struct drm_property *prop = obj->properties->properties[i];
 +                      uint64_t val;
 +
 +                      if ((prop->flags & DRM_MODE_PROP_ATOMIC) && !atomic)
 +                              continue;
 +
 +                      ret = drm_object_property_get_value(obj, prop, &val);
 +                      if (ret)
 +                              return ret;
 +
 +                      if (put_user(prop->base.id, prop_ptr + copied))
 +                              return -EFAULT;
 +
 +                      if (put_user(val, prop_values + copied))
 +                              return -EFAULT;
 +
 +                      copied++;
 +              }
 +      }
 +      *arg_count_props = props_count;
 +
 +      return 0;
 +}
 +
  /**
   * drm_mode_getconnector - get connector configuration
   * @dev: drm device for the ioctl
@@@ -2129,12 -1998,15 +2129,12 @@@ int drm_mode_getconnector(struct drm_de
        struct drm_encoder *encoder;
        struct drm_display_mode *mode;
        int mode_count = 0;
 -      int props_count = 0;
        int encoders_count = 0;
        int ret = 0;
        int copied = 0;
        int i;
        struct drm_mode_modeinfo u_mode;
        struct drm_mode_modeinfo __user *mode_ptr;
 -      uint32_t __user *prop_ptr;
 -      uint64_t __user *prop_values;
        uint32_t __user *encoder_ptr;
  
        if (!drm_core_check_feature(dev, DRIVER_MODESET))
        DRM_DEBUG_KMS("[CONNECTOR:%d:?]\n", out_resp->connector_id);
  
        mutex_lock(&dev->mode_config.mutex);
 +      drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
  
        connector = drm_connector_find(dev, out_resp->connector_id);
        if (!connector) {
                goto out;
        }
  
 -      props_count = connector->properties.count;
 -
 -      for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
 -              if (connector->encoder_ids[i] != 0) {
 +      for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++)
 +              if (connector->encoder_ids[i] != 0)
                        encoders_count++;
 -              }
 -      }
  
        if (out_resp->count_modes == 0) {
                connector->funcs->fill_modes(connector,
        out_resp->mm_height = connector->display_info.height_mm;
        out_resp->subpixel = connector->display_info.subpixel_order;
        out_resp->connection = connector->status;
 -      drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
 -
        encoder = drm_connector_get_encoder(connector);
        if (encoder)
                out_resp->encoder_id = encoder->base.id;
        else
                out_resp->encoder_id = 0;
 -      drm_modeset_unlock(&dev->mode_config.connection_mutex);
  
        /*
         * This ioctl is called twice, once to determine how much space is
        }
        out_resp->count_modes = mode_count;
  
 -      if ((out_resp->count_props >= props_count) && props_count) {
 -              copied = 0;
 -              prop_ptr = (uint32_t __user *)(unsigned long)(out_resp->props_ptr);
 -              prop_values = (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr);
 -              for (i = 0; i < connector->properties.count; i++) {
 -                      if (put_user(connector->properties.ids[i],
 -                                   prop_ptr + copied)) {
 -                              ret = -EFAULT;
 -                              goto out;
 -                      }
 -
 -                      if (put_user(connector->properties.values[i],
 -                                   prop_values + copied)) {
 -                              ret = -EFAULT;
 -                              goto out;
 -                      }
 -                      copied++;
 -              }
 -      }
 -      out_resp->count_props = props_count;
 +      ret = get_properties(&connector->base, file_priv->atomic,
 +                      (uint32_t __user *)(unsigned long)(out_resp->props_ptr),
 +                      (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr),
 +                      &out_resp->count_props);
 +      if (ret)
 +              goto out;
  
        if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
                copied = 0;
        out_resp->count_encoders = encoders_count;
  
  out:
 +      drm_modeset_unlock(&dev->mode_config.connection_mutex);
        mutex_unlock(&dev->mode_config.mutex);
  
        return ret;
@@@ -2619,7 -2510,7 +2619,7 @@@ int drm_mode_setplane(struct drm_devic
   *
   * This is a little helper to wrap internal calls to the ->set_config driver
   * interface. The only thing it adds is correct refcounting dance.
 - * 
 + *
   * Returns:
   * Zero on success, negative errno on failure.
   */
@@@ -2658,6 -2549,27 +2658,27 @@@ int drm_mode_set_config_internal(struc
  }
  EXPORT_SYMBOL(drm_mode_set_config_internal);
  
+ /**
+  * drm_crtc_get_hv_timing - Fetches hdisplay/vdisplay for given mode
+  * @mode: mode to query
+  * @hdisplay: hdisplay value to fill in
+  * @vdisplay: vdisplay value to fill in
+  *
+  * The vdisplay value will be doubled if the specified mode is a stereo mode of
+  * the appropriate layout.
+  */
+ void drm_crtc_get_hv_timing(const struct drm_display_mode *mode,
+                           int *hdisplay, int *vdisplay)
+ {
+       struct drm_display_mode adjusted;
+       drm_mode_copy(&adjusted, mode);
+       drm_mode_set_crtcinfo(&adjusted, CRTC_STEREO_DOUBLE_ONLY);
+       *hdisplay = adjusted.crtc_hdisplay;
+       *vdisplay = adjusted.crtc_vdisplay;
+ }
+ EXPORT_SYMBOL(drm_crtc_get_hv_timing);
  /**
   * drm_crtc_check_viewport - Checks that a framebuffer is big enough for the
   *     CRTC viewport
@@@ -2675,16 -2587,7 +2696,7 @@@ int drm_crtc_check_viewport(const struc
  {
        int hdisplay, vdisplay;
  
-       hdisplay = mode->hdisplay;
-       vdisplay = mode->vdisplay;
-       if (drm_mode_is_stereo(mode)) {
-               struct drm_display_mode adjusted = *mode;
-               drm_mode_set_crtcinfo(&adjusted, CRTC_STEREO_DOUBLE);
-               hdisplay = adjusted.crtc_hdisplay;
-               vdisplay = adjusted.crtc_vdisplay;
-       }
+       drm_crtc_get_hv_timing(mode, &hdisplay, &vdisplay);
  
        if (crtc->invert_dimensions)
                swap(hdisplay, vdisplay);
@@@ -2780,12 -2683,6 +2792,12 @@@ int drm_mode_setcrtc(struct drm_device 
                        goto out;
                }
  
 +              mode->status = drm_mode_validate_basic(mode);
 +              if (mode->status != MODE_OK) {
 +                      ret = -EINVAL;
 +                      goto out;
 +              }
 +
                drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
  
                ret = drm_crtc_check_viewport(crtc, crtc_req->x, crtc_req->y,
                        goto out;
                }
  
 -              connector_set = kmalloc(crtc_req->count_connectors *
 -                                      sizeof(struct drm_connector *),
 -                                      GFP_KERNEL);
 +              connector_set = kmalloc_array(crtc_req->count_connectors,
 +                                            sizeof(struct drm_connector *),
 +                                            GFP_KERNEL);
                if (!connector_set) {
                        ret = -ENOMEM;
                        goto out;
@@@ -3064,7 -2961,6 +3076,7 @@@ int drm_mode_cursor2_ioctl(struct drm_d
                           void *data, struct drm_file *file_priv)
  {
        struct drm_mode_cursor2 *req = data;
 +
        return drm_mode_cursor_common(dev, req, file_priv);
  }
  
@@@ -3512,7 -3408,7 +3524,7 @@@ int drm_mode_dirtyfb_ioctl(struct drm_d
                        ret = -EINVAL;
                        goto out_err1;
                }
 -              clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL);
 +              clips = kcalloc(num_clips, sizeof(*clips), GFP_KERNEL);
                if (!clips) {
                        ret = -ENOMEM;
                        goto out_err1;
@@@ -3560,7 -3456,7 +3572,7 @@@ void drm_fb_release(struct drm_file *pr
  
        /*
         * When the file gets released that means no one else can access the fb
 -       * list any more, so no need to grab fpriv->fbs_lock. And we need to to
 +       * list any more, so no need to grab fpriv->fbs_lock. And we need to
         * avoid upsetting lockdep since the universal cursor code adds a
         * framebuffer while holding mutex locks.
         *
@@@ -3613,8 -3509,7 +3625,8 @@@ struct drm_property *drm_property_creat
        property->dev = dev;
  
        if (num_values) {
 -              property->values = kzalloc(sizeof(uint64_t)*num_values, GFP_KERNEL);
 +              property->values = kcalloc(num_values, sizeof(uint64_t),
 +                                         GFP_KERNEL);
                if (!property->values)
                        goto fail;
        }
@@@ -3920,11 -3815,9 +3932,11 @@@ void drm_object_attach_property(struct 
                return;
        }
  
 -      obj->properties->ids[count] = property->base.id;
 +      obj->properties->properties[count] = property;
        obj->properties->values[count] = init_val;
        obj->properties->count++;
 +      if (property->flags & DRM_MODE_PROP_ATOMIC)
 +              obj->properties->atomic_count++;
  }
  EXPORT_SYMBOL(drm_object_attach_property);
  
@@@ -3947,7 -3840,7 +3959,7 @@@ int drm_object_property_set_value(struc
        int i;
  
        for (i = 0; i < obj->properties->count; i++) {
 -              if (obj->properties->ids[i] == property->base.id) {
 +              if (obj->properties->properties[i] == property) {
                        obj->properties->values[i] = val;
                        return 0;
                }
@@@ -3976,16 -3869,8 +3988,16 @@@ int drm_object_property_get_value(struc
  {
        int i;
  
 +      /* read-only properties bypass atomic mechanism and still store
 +       * their value in obj->properties->values[].. mostly to avoid
 +       * having to deal w/ EDID and similar props in atomic paths:
 +       */
 +      if (drm_core_check_feature(property->dev, DRIVER_ATOMIC) &&
 +                      !(property->flags & DRM_MODE_PROP_IMMUTABLE))
 +              return drm_atomic_get_property(obj, property, val);
 +
        for (i = 0; i < obj->properties->count; i++) {
 -              if (obj->properties->ids[i] == property->base.id) {
 +              if (obj->properties->properties[i] == property) {
                        *val = obj->properties->values[i];
                        return 0;
                }
@@@ -4165,7 -4050,7 +4177,7 @@@ int drm_mode_getblob_ioctl(struct drm_d
  
        if (out_resp->length == blob->length) {
                blob_ptr = (void __user *)(unsigned long)out_resp->data;
 -              if (copy_to_user(blob_ptr, blob->data, blob->length)){
 +              if (copy_to_user(blob_ptr, blob->data, blob->length)) {
                        ret = -EFAULT;
                        goto done;
                }
@@@ -4209,52 -4094,6 +4221,52 @@@ int drm_mode_connector_set_path_propert
  }
  EXPORT_SYMBOL(drm_mode_connector_set_path_property);
  
 +/**
 + * drm_mode_connector_set_tile_property - set tile property on connector
 + * @connector: connector to set property on.
 + *
 + * This looks up the tile information for a connector, and creates a
 + * property for userspace to parse if it exists. The property is of
 + * the form of 8 integers using ':' as a separator.
 + *
 + * Returns:
 + * Zero on success, errno on failure.
 + */
 +int drm_mode_connector_set_tile_property(struct drm_connector *connector)
 +{
 +      struct drm_device *dev = connector->dev;
 +      int ret, size;
 +      char tile[256];
 +
 +      if (connector->tile_blob_ptr)
 +              drm_property_destroy_blob(dev, connector->tile_blob_ptr);
 +
 +      if (!connector->has_tile) {
 +              connector->tile_blob_ptr = NULL;
 +              ret = drm_object_property_set_value(&connector->base,
 +                                                  dev->mode_config.tile_property, 0);
 +              return ret;
 +      }
 +
 +      snprintf(tile, 256, "%d:%d:%d:%d:%d:%d:%d:%d",
 +               connector->tile_group->id, connector->tile_is_single_monitor,
 +               connector->num_h_tile, connector->num_v_tile,
 +               connector->tile_h_loc, connector->tile_v_loc,
 +               connector->tile_h_size, connector->tile_v_size);
 +      size = strlen(tile) + 1;
 +
 +      connector->tile_blob_ptr = drm_property_create_blob(connector->dev,
 +                                                          size, tile);
 +      if (!connector->tile_blob_ptr)
 +              return -EINVAL;
 +
 +      ret = drm_object_property_set_value(&connector->base,
 +                                          dev->mode_config.tile_property,
 +                                          connector->tile_blob_ptr->base.id);
 +      return ret;
 +}
 +EXPORT_SYMBOL(drm_mode_connector_set_tile_property);
 +
  /**
   * drm_mode_connector_update_edid_property - update the edid property of a connector
   * @connector: drm connector
@@@ -4301,38 -4140,25 +4313,38 @@@ int drm_mode_connector_update_edid_prop
  }
  EXPORT_SYMBOL(drm_mode_connector_update_edid_property);
  
 -static bool drm_property_change_is_valid(struct drm_property *property,
 -                                       uint64_t value)
 +/* Some properties could refer to dynamic refcnt'd objects, or things that
 + * need special locking to handle lifetime issues (ie. to ensure the prop
 + * value doesn't become invalid part way through the property update due to
 + * race).  The value returned by reference via 'obj' should be passed back
 + * to drm_property_change_valid_put() after the property is set (and the
 + * object to which the property is attached has a chance to take it's own
 + * reference).
 + */
 +bool drm_property_change_valid_get(struct drm_property *property,
 +                                       uint64_t value, struct drm_mode_object **ref)
  {
 +      int i;
 +
        if (property->flags & DRM_MODE_PROP_IMMUTABLE)
                return false;
  
 +      *ref = NULL;
 +
        if (drm_property_type_is(property, DRM_MODE_PROP_RANGE)) {
                if (value < property->values[0] || value > property->values[1])
                        return false;
                return true;
        } else if (drm_property_type_is(property, DRM_MODE_PROP_SIGNED_RANGE)) {
                int64_t svalue = U642I64(value);
 +
                if (svalue < U642I64(property->values[0]) ||
                                svalue > U642I64(property->values[1]))
                        return false;
                return true;
        } else if (drm_property_type_is(property, DRM_MODE_PROP_BITMASK)) {
 -              int i;
                uint64_t valid_mask = 0;
 +
                for (i = 0; i < property->num_values; i++)
                        valid_mask |= (1ULL << property->values[i]);
                return !(value & ~valid_mask);
                /* Only the driver knows */
                return true;
        } else if (drm_property_type_is(property, DRM_MODE_PROP_OBJECT)) {
 -              struct drm_mode_object *obj;
                /* a zero value for an object property translates to null: */
                if (value == 0)
                        return true;
 -              /*
 -               * NOTE: use _object_find() directly to bypass restriction on
 -               * looking up refcnt'd objects (ie. fb's).  For a refcnt'd
 -               * object this could race against object finalization, so it
 -               * simply tells us that the object *was* valid.  Which is good
 -               * enough.
 -               */
 -              obj = _object_find(property->dev, value, property->values[0]);
 -              return obj != NULL;
 +
 +              /* handle refcnt'd objects specially: */
 +              if (property->values[0] == DRM_MODE_OBJECT_FB) {
 +                      struct drm_framebuffer *fb;
 +                      fb = drm_framebuffer_lookup(property->dev, value);
 +                      if (fb) {
 +                              *ref = &fb->base;
 +                              return true;
 +                      } else {
 +                              return false;
 +                      }
 +              } else {
 +                      return _object_find(property->dev, value, property->values[0]) != NULL;
 +              }
        } else {
                int i;
                for (i = 0; i < property->num_values; i++)
                                return true;
                return false;
        }
 +
 +      for (i = 0; i < property->num_values; i++)
 +              if (property->values[i] == value)
 +                      return true;
 +      return false;
 +}
 +
 +void drm_property_change_valid_put(struct drm_property *property,
 +              struct drm_mode_object *ref)
 +{
 +      if (!ref)
 +              return;
 +
 +      if (drm_property_type_is(property, DRM_MODE_PROP_OBJECT)) {
 +              if (property->values[0] == DRM_MODE_OBJECT_FB)
 +                      drm_framebuffer_unreference(obj_to_fb(ref));
 +      }
  }
  
  /**
@@@ -4498,6 -4303,11 +4510,6 @@@ int drm_mode_obj_get_properties_ioctl(s
        struct drm_mode_obj_get_properties *arg = data;
        struct drm_mode_object *obj;
        int ret = 0;
 -      int i;
 -      int copied = 0;
 -      int props_count = 0;
 -      uint32_t __user *props_ptr;
 -      uint64_t __user *prop_values_ptr;
  
        if (!drm_core_check_feature(dev, DRIVER_MODESET))
                return -EINVAL;
                goto out;
        }
  
 -      props_count = obj->properties->count;
 +      ret = get_properties(obj, file_priv->atomic,
 +                      (uint32_t __user *)(unsigned long)(arg->props_ptr),
 +                      (uint64_t __user *)(unsigned long)(arg->prop_values_ptr),
 +                      &arg->count_props);
  
 -      /* This ioctl is called twice, once to determine how much space is
 -       * needed, and the 2nd time to fill it. */
 -      if ((arg->count_props >= props_count) && props_count) {
 -              copied = 0;
 -              props_ptr = (uint32_t __user *)(unsigned long)(arg->props_ptr);
 -              prop_values_ptr = (uint64_t __user *)(unsigned long)
 -                                (arg->prop_values_ptr);
 -              for (i = 0; i < props_count; i++) {
 -                      if (put_user(obj->properties->ids[i],
 -                                   props_ptr + copied)) {
 -                              ret = -EFAULT;
 -                              goto out;
 -                      }
 -                      if (put_user(obj->properties->values[i],
 -                                   prop_values_ptr + copied)) {
 -                              ret = -EFAULT;
 -                              goto out;
 -                      }
 -                      copied++;
 -              }
 -      }
 -      arg->count_props = props_count;
  out:
        drm_modeset_unlock_all(dev);
        return ret;
@@@ -4547,8 -4376,8 +4559,8 @@@ int drm_mode_obj_set_property_ioctl(str
        struct drm_mode_object *arg_obj;
        struct drm_mode_object *prop_obj;
        struct drm_property *property;
 -      int ret = -EINVAL;
 -      int i;
 +      int i, ret = -EINVAL;
 +      struct drm_mode_object *ref;
  
        if (!drm_core_check_feature(dev, DRIVER_MODESET))
                return -EINVAL;
                goto out;
  
        for (i = 0; i < arg_obj->properties->count; i++)
 -              if (arg_obj->properties->ids[i] == arg->prop_id)
 +              if (arg_obj->properties->properties[i]->base.id == arg->prop_id)
                        break;
  
        if (i == arg_obj->properties->count)
        }
        property = obj_to_property(prop_obj);
  
 -      if (!drm_property_change_is_valid(property, arg->value))
 +      if (!drm_property_change_valid_get(property, arg->value, &ref))
                goto out;
  
        switch (arg_obj->type) {
                break;
        }
  
 +      drm_property_change_valid_put(property, ref);
 +
  out:
        drm_modeset_unlock_all(dev);
        return ret;
@@@ -4646,8 -4473,7 +4658,8 @@@ int drm_mode_crtc_set_gamma_size(struc
  {
        crtc->gamma_size = gamma_size;
  
 -      crtc->gamma_store = kzalloc(gamma_size * sizeof(uint16_t) * 3, GFP_KERNEL);
 +      crtc->gamma_store = kcalloc(gamma_size, sizeof(uint16_t) * 3,
 +                                  GFP_KERNEL);
        if (!crtc->gamma_store) {
                crtc->gamma_size = 0;
                return -ENOMEM;
@@@ -4862,23 -4688,23 +4874,23 @@@ int drm_mode_page_flip_ioctl(struct drm
        if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) {
                ret = -ENOMEM;
                spin_lock_irqsave(&dev->event_lock, flags);
 -              if (file_priv->event_space < sizeof e->event) {
 +              if (file_priv->event_space < sizeof(e->event)) {
                        spin_unlock_irqrestore(&dev->event_lock, flags);
                        goto out;
                }
 -              file_priv->event_space -= sizeof e->event;
 +              file_priv->event_space -= sizeof(e->event);
                spin_unlock_irqrestore(&dev->event_lock, flags);
  
 -              e = kzalloc(sizeof *e, GFP_KERNEL);
 +              e = kzalloc(sizeof(*e), GFP_KERNEL);
                if (e == NULL) {
                        spin_lock_irqsave(&dev->event_lock, flags);
 -                      file_priv->event_space += sizeof e->event;
 +                      file_priv->event_space += sizeof(e->event);
                        spin_unlock_irqrestore(&dev->event_lock, flags);
                        goto out;
                }
  
                e->event.base.type = DRM_EVENT_FLIP_COMPLETE;
 -              e->event.base.length = sizeof e->event;
 +              e->event.base.length = sizeof(e->event);
                e->event.user_data = page_flip->user_data;
                e->base.event = &e->event.base;
                e->base.file_priv = file_priv;
        if (ret) {
                if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) {
                        spin_lock_irqsave(&dev->event_lock, flags);
 -                      file_priv->event_space += sizeof e->event;
 +                      file_priv->event_space += sizeof(e->event);
                        spin_unlock_irqrestore(&dev->event_lock, flags);
                        kfree(e);
                }
@@@ -5338,10 -5164,10 +5350,10 @@@ void drm_mode_config_init(struct drm_de
        INIT_LIST_HEAD(&dev->mode_config.property_blob_list);
        INIT_LIST_HEAD(&dev->mode_config.plane_list);
        idr_init(&dev->mode_config.crtc_idr);
 +      idr_init(&dev->mode_config.tile_idr);
  
        drm_modeset_lock_all(dev);
 -      drm_mode_create_standard_connector_properties(dev);
 -      drm_mode_create_standard_plane_properties(dev);
 +      drm_mode_create_standard_properties(dev);
        drm_modeset_unlock_all(dev);
  
        /* Just to be sure */
@@@ -5425,7 -5251,6 +5437,7 @@@ void drm_mode_config_cleanup(struct drm
                crtc->funcs->destroy(crtc);
        }
  
 +      idr_destroy(&dev->mode_config.tile_idr);
        idr_destroy(&dev->mode_config.crtc_idr);
        drm_modeset_lock_fini(&dev->mode_config.connection_mutex);
  }
@@@ -5448,100 -5273,3 +5460,100 @@@ struct drm_property *drm_mode_create_ro
                                           supported_rotations);
  }
  EXPORT_SYMBOL(drm_mode_create_rotation_property);
 +
 +/**
 + * DOC: Tile group
 + *
 + * Tile groups are used to represent tiled monitors with a unique
 + * integer identifier. Tiled monitors using DisplayID v1.3 have
 + * a unique 8-byte handle, we store this in a tile group, so we
 + * have a common identifier for all tiles in a monitor group.
 + */
 +static void drm_tile_group_free(struct kref *kref)
 +{
 +      struct drm_tile_group *tg = container_of(kref, struct drm_tile_group, refcount);
 +      struct drm_device *dev = tg->dev;
 +      mutex_lock(&dev->mode_config.idr_mutex);
 +      idr_remove(&dev->mode_config.tile_idr, tg->id);
 +      mutex_unlock(&dev->mode_config.idr_mutex);
 +      kfree(tg);
 +}
 +
 +/**
 + * drm_mode_put_tile_group - drop a reference to a tile group.
 + * @dev: DRM device
 + * @tg: tile group to drop reference to.
 + *
 + * drop reference to tile group and free if 0.
 + */
 +void drm_mode_put_tile_group(struct drm_device *dev,
 +                           struct drm_tile_group *tg)
 +{
 +      kref_put(&tg->refcount, drm_tile_group_free);
 +}
 +
 +/**
 + * drm_mode_get_tile_group - get a reference to an existing tile group
 + * @dev: DRM device
 + * @topology: 8-bytes unique per monitor.
 + *
 + * Use the unique bytes to get a reference to an existing tile group.
 + *
 + * RETURNS:
 + * tile group or NULL if not found.
 + */
 +struct drm_tile_group *drm_mode_get_tile_group(struct drm_device *dev,
 +                                             char topology[8])
 +{
 +      struct drm_tile_group *tg;
 +      int id;
 +      mutex_lock(&dev->mode_config.idr_mutex);
 +      idr_for_each_entry(&dev->mode_config.tile_idr, tg, id) {
 +              if (!memcmp(tg->group_data, topology, 8)) {
 +                      if (!kref_get_unless_zero(&tg->refcount))
 +                              tg = NULL;
 +                      mutex_unlock(&dev->mode_config.idr_mutex);
 +                      return tg;
 +              }
 +      }
 +      mutex_unlock(&dev->mode_config.idr_mutex);
 +      return NULL;
 +}
 +
 +/**
 + * drm_mode_create_tile_group - create a tile group from a displayid description
 + * @dev: DRM device
 + * @topology: 8-bytes unique per monitor.
 + *
 + * Create a tile group for the unique monitor, and get a unique
 + * identifier for the tile group.
 + *
 + * RETURNS:
 + * new tile group or error.
 + */
 +struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev,
 +                                                char topology[8])
 +{
 +      struct drm_tile_group *tg;
 +      int ret;
 +
 +      tg = kzalloc(sizeof(*tg), GFP_KERNEL);
 +      if (!tg)
 +              return ERR_PTR(-ENOMEM);
 +
 +      kref_init(&tg->refcount);
 +      memcpy(tg->group_data, topology, 8);
 +      tg->dev = dev;
 +
 +      mutex_lock(&dev->mode_config.idr_mutex);
 +      ret = idr_alloc(&dev->mode_config.tile_idr, tg, 1, 0, GFP_KERNEL);
 +      if (ret >= 0) {
 +              tg->id = ret;
 +      } else {
 +              kfree(tg);
 +              tg = ERR_PTR(ret);
 +      }
 +
 +      mutex_unlock(&dev->mode_config.idr_mutex);
 +      return tg;
 +}
@@@ -739,6 -739,8 +739,8 @@@ EXPORT_SYMBOL(drm_mode_vrefresh)
   * - The CRTC_STEREO_DOUBLE flag can be used to compute the timings for
   *   buffers containing two eyes (only adjust the timings when needed, eg. for
   *   "frame packing" or "side by side full").
+  * - The CRTC_NO_DBLSCAN and CRTC_NO_VSCAN flags request that adjustment *not*
+  *   be performed for doublescan and vscan > 1 modes respectively.
   */
  void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags)
  {
                }
        }
  
-       if (p->flags & DRM_MODE_FLAG_DBLSCAN) {
-               p->crtc_vdisplay *= 2;
-               p->crtc_vsync_start *= 2;
-               p->crtc_vsync_end *= 2;
-               p->crtc_vtotal *= 2;
+       if (!(adjust_flags & CRTC_NO_DBLSCAN)) {
+               if (p->flags & DRM_MODE_FLAG_DBLSCAN) {
+                       p->crtc_vdisplay *= 2;
+                       p->crtc_vsync_start *= 2;
+                       p->crtc_vsync_end *= 2;
+                       p->crtc_vtotal *= 2;
+               }
        }
  
-       if (p->vscan > 1) {
-               p->crtc_vdisplay *= p->vscan;
-               p->crtc_vsync_start *= p->vscan;
-               p->crtc_vsync_end *= p->vscan;
-               p->crtc_vtotal *= p->vscan;
+       if (!(adjust_flags & CRTC_NO_VSCAN)) {
+               if (p->vscan > 1) {
+                       p->crtc_vdisplay *= p->vscan;
+                       p->crtc_vsync_start *= p->vscan;
+                       p->crtc_vsync_end *= p->vscan;
+                       p->crtc_vtotal *= p->vscan;
+               }
        }
  
        if (adjust_flags & CRTC_STEREO_DOUBLE) {
@@@ -905,41 -911,10 +911,41 @@@ bool drm_mode_equal_no_clocks_no_stereo
  }
  EXPORT_SYMBOL(drm_mode_equal_no_clocks_no_stereo);
  
 +/**
 + * drm_mode_validate_basic - make sure the mode is somewhat sane
 + * @mode: mode to check
 + *
 + * Check that the mode timings are at least somewhat reasonable.
 + * Any hardware specific limits are left up for each driver to check.
 + *
 + * Returns:
 + * The mode status
 + */
 +enum drm_mode_status
 +drm_mode_validate_basic(const struct drm_display_mode *mode)
 +{
 +      if (mode->clock == 0)
 +              return MODE_CLOCK_LOW;
 +
 +      if (mode->hdisplay == 0 ||
 +          mode->hsync_start < mode->hdisplay ||
 +          mode->hsync_end < mode->hsync_start ||
 +          mode->htotal < mode->hsync_end)
 +              return MODE_H_ILLEGAL;
 +
 +      if (mode->vdisplay == 0 ||
 +          mode->vsync_start < mode->vdisplay ||
 +          mode->vsync_end < mode->vsync_start ||
 +          mode->vtotal < mode->vsync_end)
 +              return MODE_V_ILLEGAL;
 +
 +      return MODE_OK;
 +}
 +EXPORT_SYMBOL(drm_mode_validate_basic);
 +
  /**
   * drm_mode_validate_size - make sure modes adhere to size constraints
 - * @dev: DRM device
 - * @mode_list: list of modes to check
 + * @mode: mode to check
   * @maxX: maximum width
   * @maxY: maximum height
   *
   * limitations of the DRM device/connector. If a mode is too big its status
   * member is updated with the appropriate validation failure code. The list
   * itself is not changed.
 + *
 + * Returns:
 + * The mode status
   */
 -void drm_mode_validate_size(struct drm_device *dev,
 -                          struct list_head *mode_list,
 -                          int maxX, int maxY)
 +enum drm_mode_status
 +drm_mode_validate_size(const struct drm_display_mode *mode,
 +                     int maxX, int maxY)
  {
 -      struct drm_display_mode *mode;
 +      if (maxX > 0 && mode->hdisplay > maxX)
 +              return MODE_VIRTUAL_X;
  
 -      list_for_each_entry(mode, mode_list, head) {
 -              if (maxX > 0 && mode->hdisplay > maxX)
 -                      mode->status = MODE_VIRTUAL_X;
 +      if (maxY > 0 && mode->vdisplay > maxY)
 +              return MODE_VIRTUAL_Y;
  
 -              if (maxY > 0 && mode->vdisplay > maxY)
 -                      mode->status = MODE_VIRTUAL_Y;
 -      }
 +      return MODE_OK;
  }
  EXPORT_SYMBOL(drm_mode_validate_size);
  
@@@ -706,12 -706,11 +706,12 @@@ static int i915_drm_resume(struct drm_d
                        dev_priv->display.hpd_irq_setup(dev);
                spin_unlock_irq(&dev_priv->irq_lock);
  
 -              intel_dp_mst_resume(dev);
                drm_modeset_lock_all(dev);
                intel_modeset_setup_hw_state(dev, true);
                drm_modeset_unlock_all(dev);
  
 +              intel_dp_mst_resume(dev);
 +
                /*
                 * ... but also need to make sure that hotplug processing
                 * doesn't cause havoc. Like in the driver load code we don't
@@@ -811,8 -810,6 +811,8 @@@ int i915_reset(struct drm_device *dev
        if (!i915.reset)
                return 0;
  
 +      intel_reset_gt_powersave(dev);
 +
        mutex_lock(&dev->struct_mutex);
  
        i915_gem_reset(dev);
                return ret;
        }
  
+       intel_overlay_reset(dev_priv);
        /* Ok, now get things going again... */
  
        /*
                 * of re-init after reset.
                 */
                if (INTEL_INFO(dev)->gen > 5)
 -                      intel_reset_gt_powersave(dev);
 +                      intel_enable_gt_powersave(dev);
        } else {
                mutex_unlock(&dev->struct_mutex);
        }
@@@ -1299,7 -1298,9 +1301,9 @@@ static int vlv_suspend_complete(struct 
        err = vlv_allow_gt_wake(dev_priv, false);
        if (err)
                goto err2;
-       vlv_save_gunit_s0ix_state(dev_priv);
+       if (!IS_CHERRYVIEW(dev_priv->dev))
+               vlv_save_gunit_s0ix_state(dev_priv);
  
        err = vlv_force_gfx_clock(dev_priv, false);
        if (err)
@@@ -1330,7 -1331,8 +1334,8 @@@ static int vlv_resume_prepare(struct dr
         */
        ret = vlv_force_gfx_clock(dev_priv, true);
  
-       vlv_restore_gunit_s0ix_state(dev_priv);
+       if (!IS_CHERRYVIEW(dev_priv->dev))
+               vlv_restore_gunit_s0ix_state(dev_priv);
  
        err = vlv_allow_gt_wake(dev_priv, true);
        if (!ret)
@@@ -1586,7 -1588,7 +1591,7 @@@ static struct drm_driver driver = 
        .gem_prime_import = i915_gem_prime_import,
  
        .dumb_create = i915_gem_dumb_create,
 -      .dumb_map_offset = i915_gem_dumb_map_offset,
 +      .dumb_map_offset = i915_gem_mmap_gtt,
        .dumb_destroy = drm_gem_dumb_destroy,
        .ioctls = i915_ioctls,
        .fops = &i915_driver_fops,
  
  #define DRIVER_NAME           "i915"
  #define DRIVER_DESC           "Intel Graphics"
- #define DRIVER_DATE           "20141121"
+ #define DRIVER_DATE           "20141219"
  
  #undef WARN_ON
- #define WARN_ON(x)            WARN(x, "WARN_ON(" #x ")")
+ /* Many gcc seem to no see through this and fall over :( */
+ #if 0
+ #define WARN_ON(x) ({ \
+       bool __i915_warn_cond = (x); \
+       if (__builtin_constant_p(__i915_warn_cond)) \
+               BUILD_BUG_ON(__i915_warn_cond); \
+       WARN(__i915_warn_cond, "WARN_ON(" #x ")"); })
+ #else
+ #define WARN_ON(x) WARN((x), "WARN_ON(" #x ")")
+ #endif
+ #define MISSING_CASE(x) WARN(1, "Missing switch case (%lu) in %s\n", \
+                            (long) (x), __func__);
+ /* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
+  * WARN_ON()) for hw state sanity checks to check for unexpected conditions
+  * which may not necessarily be a user visible problem.  This will either
+  * WARN() or DRM_ERROR() depending on the verbose_checks moduleparam, to
+  * enable distros and users to tailor their preferred amount of i915 abrt
+  * spam.
+  */
+ #define I915_STATE_WARN(condition, format...) ({                      \
+       int __ret_warn_on = !!(condition);                              \
+       if (unlikely(__ret_warn_on)) {                                  \
+               if (i915.verbose_state_checks)                          \
+                       __WARN_printf(format);                          \
+               else                                                    \
+                       DRM_ERROR(format);                              \
+       }                                                               \
+       unlikely(__ret_warn_on);                                        \
+ })
+ #define I915_STATE_WARN_ON(condition) ({                              \
+       int __ret_warn_on = !!(condition);                              \
+       if (unlikely(__ret_warn_on)) {                                  \
+               if (i915.verbose_state_checks)                          \
+                       __WARN_printf("WARN_ON(" #condition ")\n");     \
+               else                                                    \
+                       DRM_ERROR("WARN_ON(" #condition ")\n");         \
+       }                                                               \
+       unlikely(__ret_warn_on);                                        \
+ })
  
  enum pipe {
        INVALID_PIPE = -1,
@@@ -924,7 -965,6 +965,7 @@@ struct i915_suspend_saved_registers 
        u32 savePIPEB_LINK_N1;
        u32 saveMCHBAR_RENDER_STANDBY;
        u32 savePCH_PORT_HOTPLUG;
 +      u16 saveGCDGMBUS;
  };
  
  struct vlv_s0ix_state {
@@@ -1130,6 -1170,11 +1171,11 @@@ struct intel_l3_parity 
        int which_slice;
  };
  
+ struct i915_gem_batch_pool {
+       struct drm_device *dev;
+       struct list_head cache_list;
+ };
  struct i915_gem_mm {
        /** Memory allocator for GTT stolen memory */
        struct drm_mm stolen;
         */
        struct list_head unbound_list;
  
+       /*
+        * A pool of objects to use as shadow copies of client batch buffers
+        * when the command parser is enabled. Prevents the client from
+        * modifying the batch contents after software parsing.
+        */
+       struct i915_gem_batch_pool batch_pool;
        /** Usable portion of the GTT for GEM */
        unsigned long stolen_base; /* limited to low memory (32-bit) */
  
@@@ -1307,6 -1359,13 +1360,13 @@@ enum drrs_support_type 
        SEAMLESS_DRRS_SUPPORT = 2
  };
  
+ enum psr_lines_to_wait {
+       PSR_0_LINES_TO_WAIT = 0,
+       PSR_1_LINE_TO_WAIT,
+       PSR_4_LINES_TO_WAIT,
+       PSR_8_LINES_TO_WAIT
+ };
  struct intel_vbt_data {
        struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
        struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
        int edp_bpp;
        struct edp_power_seq edp_pps;
  
+       struct {
+               bool full_link;
+               bool require_aux_wakeup;
+               int idle_frames;
+               enum psr_lines_to_wait lines_to_wait;
+               int tp1_wakeup_time;
+               int tp2_tp3_wakeup_time;
+       } psr;
        struct {
                u16 pwm_freq_hz;
                bool present;
                bool active_low_pwm;
                u8 min_brightness;      /* min_brightness/255 of max */
+               u8 controller;          /* brightness controller number */
        } backlight;
  
        /* MIPI DSI */
@@@ -1772,6 -1841,8 +1842,8 @@@ struct drm_i915_private 
                void (*stop_ring)(struct intel_engine_cs *ring);
        } gt;
  
+       uint32_t request_uniq;
        /*
         * NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch
         * will be rejected. Instead look for a better place.
@@@ -1855,6 -1926,8 +1927,8 @@@ struct drm_i915_gem_object 
        /** Used in execbuf to temporarily hold a ref */
        struct list_head obj_exec_link;
  
+       struct list_head batch_pool_list;
        /**
         * This is set if the object is on the active lists (has pending
         * rendering and so a non-zero seqno), and is not set if it i s on
        void *dma_buf_vmapping;
        int vmapping_count;
  
-       struct intel_engine_cs *ring;
        /** Breadcrumb of last rendering to the buffer. */
-       uint32_t last_read_seqno;
-       uint32_t last_write_seqno;
+       struct drm_i915_gem_request *last_read_req;
+       struct drm_i915_gem_request *last_write_req;
        /** Breadcrumb of last fenced GPU access to the buffer. */
-       uint32_t last_fenced_seqno;
+       struct drm_i915_gem_request *last_fenced_req;
  
        /** Current tiling stride for the object, if it's tiled. */
        uint32_t stride;
        /** Record of address bit 17 of each page at last unbind. */
        unsigned long *bit_17;
  
-       /** User space pin count and filp owning the pin */
-       unsigned long user_pin_count;
-       struct drm_file *pin_filp;
        union {
                /** for phy allocated objects */
                struct drm_dma_handle *phys_handle;
@@@ -1975,11 -2042,14 +2043,14 @@@ void i915_gem_track_fb(struct drm_i915_
   * The request queue allows us to note sequence numbers that have been emitted
   * and may be associated with active buffers to be retired.
   *
-  * By keeping this list, we can avoid having to do questionable
-  * sequence-number comparisons on buffer last_rendering_seqnos, and associate
-  * an emission time with seqnos for tracking how far ahead of the GPU we are.
+  * By keeping this list, we can avoid having to do questionable sequence
+  * number comparisons on buffer last_read|write_seqno. It also allows an
+  * emission time to be associated with the request for tracking how far ahead
+  * of the GPU the submission is.
   */
  struct drm_i915_gem_request {
+       struct kref ref;
        /** On Which ring this request was generated */
        struct intel_engine_cs *ring;
  
        struct drm_i915_file_private *file_priv;
        /** file_priv list entry for this request */
        struct list_head client_list;
+       uint32_t uniq;
  };
  
+ void i915_gem_request_free(struct kref *req_ref);
+ static inline uint32_t
+ i915_gem_request_get_seqno(struct drm_i915_gem_request *req)
+ {
+       return req ? req->seqno : 0;
+ }
+ static inline struct intel_engine_cs *
+ i915_gem_request_get_ring(struct drm_i915_gem_request *req)
+ {
+       return req ? req->ring : NULL;
+ }
+ static inline void
+ i915_gem_request_reference(struct drm_i915_gem_request *req)
+ {
+       kref_get(&req->ref);
+ }
+ static inline void
+ i915_gem_request_unreference(struct drm_i915_gem_request *req)
+ {
+       WARN_ON(!mutex_is_locked(&req->ring->dev->struct_mutex));
+       kref_put(&req->ref, i915_gem_request_free);
+ }
+ static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst,
+                                          struct drm_i915_gem_request *src)
+ {
+       if (src)
+               i915_gem_request_reference(src);
+       if (*pdst)
+               i915_gem_request_unreference(*pdst);
+       *pdst = src;
+ }
+ /*
+  * XXX: i915_gem_request_completed should be here but currently needs the
+  * definition of i915_seqno_passed() which is below. It will be moved in
+  * a later patch when the call to i915_seqno_passed() is obsoleted...
+  */
  struct drm_i915_file_private {
        struct drm_i915_private *dev_priv;
        struct drm_file *file;
@@@ -2242,7 -2359,8 +2360,8 @@@ struct drm_i915_cmd_table 
  
  #define HAS_DDI(dev)          (INTEL_INFO(dev)->has_ddi)
  #define HAS_FPGA_DBG_UNCLAIMED(dev)   (INTEL_INFO(dev)->has_fpga_dbg)
- #define HAS_PSR(dev)          (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ #define HAS_PSR(dev)          (IS_HASWELL(dev) || IS_BROADWELL(dev) || \
+                                IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
  #define HAS_RUNTIME_PM(dev)   (IS_GEN6(dev) || IS_HASWELL(dev) || \
                                 IS_BROADWELL(dev) || IS_VALLEYVIEW(dev))
  #define HAS_RC6(dev)          (INTEL_INFO(dev)->gen >= 6)
@@@ -2312,6 -2430,7 +2431,7 @@@ struct i915_params 
        bool disable_vtd_wa;
        int use_mmio_flip;
        bool mmio_debug;
+       bool verbose_state_checks;
  };
  extern struct i915_params i915 __read_mostly;
  
@@@ -2412,10 -2531,6 +2532,6 @@@ int i915_gem_execbuffer(struct drm_devi
                        struct drm_file *file_priv);
  int i915_gem_execbuffer2(struct drm_device *dev, void *data,
                         struct drm_file *file_priv);
- int i915_gem_pin_ioctl(struct drm_device *dev, void *data,
-                      struct drm_file *file_priv);
- int i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
-                        struct drm_file *file_priv);
  int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
                        struct drm_file *file_priv);
  int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
@@@ -2460,10 -2575,23 +2576,23 @@@ void i915_gem_vma_destroy(struct i915_v
  #define PIN_GLOBAL 0x4
  #define PIN_OFFSET_BIAS 0x8
  #define PIN_OFFSET_MASK (~4095)
+ int __must_check i915_gem_object_pin_view(struct drm_i915_gem_object *obj,
+                                         struct i915_address_space *vm,
+                                         uint32_t alignment,
+                                         uint64_t flags,
+                                         const struct i915_ggtt_view *view);
+ static inline
  int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
                                     struct i915_address_space *vm,
                                     uint32_t alignment,
-                                    uint64_t flags);
+                                    uint64_t flags)
+ {
+       return i915_gem_object_pin_view(obj, vm, alignment, flags,
+                                               &i915_ggtt_view_normal);
+ }
+ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
+                 u32 flags);
  int __must_check i915_vma_unbind(struct i915_vma *vma);
  int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
  void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv);
@@@ -2501,8 -2629,9 +2630,8 @@@ void i915_vma_move_to_active(struct i91
  int i915_gem_dumb_create(struct drm_file *file_priv,
                         struct drm_device *dev,
                         struct drm_mode_create_dumb *args);
 -int i915_gem_dumb_map_offset(struct drm_file *file_priv,
 -                           struct drm_device *dev, uint32_t handle,
 -                           uint64_t *offset);
 +int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
 +                    uint32_t handle, uint64_t *offset);
  /**
   * Returns true if seq1 is later than seq2.
   */
@@@ -2512,6 -2641,18 +2641,18 @@@ i915_seqno_passed(uint32_t seq1, uint32
        return (int32_t)(seq1 - seq2) >= 0;
  }
  
+ static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req,
+                                             bool lazy_coherency)
+ {
+       u32 seqno;
+       BUG_ON(req == NULL);
+       seqno = req->ring->get_seqno(req->ring, lazy_coherency);
+       return i915_seqno_passed(seqno, req->seqno);
+ }
  int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno);
  int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
  int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
@@@ -2527,7 -2668,7 +2668,7 @@@ bool i915_gem_retire_requests(struct dr
  void i915_gem_retire_requests_ring(struct intel_engine_cs *ring);
  int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
                                      bool interruptible);
- int __must_check i915_gem_check_olr(struct intel_engine_cs *ring, u32 seqno);
+ int __must_check i915_gem_check_olr(struct drm_i915_gem_request *req);
  
  static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
  {
@@@ -2570,17 -2711,15 +2711,15 @@@ int __must_check i915_gpu_idle(struct d
  int __must_check i915_gem_suspend(struct drm_device *dev);
  int __i915_add_request(struct intel_engine_cs *ring,
                       struct drm_file *file,
-                      struct drm_i915_gem_object *batch_obj,
-                      u32 *seqno);
- #define i915_add_request(ring, seqno) \
-       __i915_add_request(ring, NULL, NULL, seqno)
- int __i915_wait_seqno(struct intel_engine_cs *ring, u32 seqno,
+                      struct drm_i915_gem_object *batch_obj);
+ #define i915_add_request(ring) \
+       __i915_add_request(ring, NULL, NULL)
+ int __i915_wait_request(struct drm_i915_gem_request *req,
                        unsigned reset_counter,
                        bool interruptible,
                        s64 *timeout,
                        struct drm_i915_file_private *file_priv);
- int __must_check i915_wait_seqno(struct intel_engine_cs *ring,
-                                uint32_t seqno);
+ int __must_check i915_wait_request(struct drm_i915_gem_request *req);
  int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
  int __must_check
  i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
@@@ -2614,18 -2753,51 +2753,51 @@@ struct dma_buf *i915_gem_prime_export(s
  
  void i915_gem_restore_fences(struct drm_device *dev);
  
+ unsigned long i915_gem_obj_offset_view(struct drm_i915_gem_object *o,
+                                      struct i915_address_space *vm,
+                                      enum i915_ggtt_view_type view);
+ static inline
  unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
-                                 struct i915_address_space *vm);
+                                 struct i915_address_space *vm)
+ {
+       return i915_gem_obj_offset_view(o, vm, I915_GGTT_VIEW_NORMAL);
+ }
  bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o);
+ bool i915_gem_obj_bound_view(struct drm_i915_gem_object *o,
+                            struct i915_address_space *vm,
+                            enum i915_ggtt_view_type view);
+ static inline
  bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
-                       struct i915_address_space *vm);
+                       struct i915_address_space *vm)
+ {
+       return i915_gem_obj_bound_view(o, vm, I915_GGTT_VIEW_NORMAL);
+ }
  unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
                                struct i915_address_space *vm);
+ struct i915_vma *i915_gem_obj_to_vma_view(struct drm_i915_gem_object *obj,
+                                         struct i915_address_space *vm,
+                                         const struct i915_ggtt_view *view);
+ static inline
  struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
-                                    struct i915_address_space *vm);
+                                    struct i915_address_space *vm)
+ {
+       return i915_gem_obj_to_vma_view(obj, vm, &i915_ggtt_view_normal);
+ }
+ struct i915_vma *
+ i915_gem_obj_lookup_or_create_vma_view(struct drm_i915_gem_object *obj,
+                                      struct i915_address_space *vm,
+                                      const struct i915_ggtt_view *view);
+ static inline
  struct i915_vma *
  i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
-                                 struct i915_address_space *vm);
+                                 struct i915_address_space *vm)
+ {
+       return i915_gem_obj_lookup_or_create_vma_view(obj, vm,
+                                               &i915_ggtt_view_normal);
+ }
  
  struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj);
  static inline bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj) {
@@@ -2807,6 -2979,13 +2979,13 @@@ void i915_destroy_error_state(struct dr
  void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone);
  const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
  
+ /* i915_gem_batch_pool.c */
+ void i915_gem_batch_pool_init(struct drm_device *dev,
+                             struct i915_gem_batch_pool *pool);
+ void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool);
+ struct drm_i915_gem_object*
+ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool, size_t size);
  /* i915_cmd_parser.c */
  int i915_cmd_parser_get_version(void);
  int i915_cmd_parser_init_ring(struct intel_engine_cs *ring);
@@@ -2814,7 -2993,9 +2993,9 @@@ void i915_cmd_parser_fini_ring(struct i
  bool i915_needs_cmd_parser(struct intel_engine_cs *ring);
  int i915_parse_cmds(struct intel_engine_cs *ring,
                    struct drm_i915_gem_object *batch_obj,
+                   struct drm_i915_gem_object *shadow_batch_obj,
                    u32 batch_start_offset,
+                   u32 batch_len,
                    bool is_master);
  
  /* i915_suspend.c */
@@@ -2894,9 -3075,6 +3075,6 @@@ extern void intel_modeset_setup_hw_stat
                                         bool force_restore);
  extern void i915_redisable_vga(struct drm_device *dev);
  extern void i915_redisable_vga_power_on(struct drm_device *dev);
- extern bool intel_fbc_enabled(struct drm_device *dev);
- extern void bdw_fbc_sw_flush(struct drm_device *dev, u32 value);
- extern void intel_disable_fbc(struct drm_device *dev);
  extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
  extern void intel_init_pch_refclk(struct drm_device *dev);
  extern void gen6_set_rps(struct drm_device *dev, u8 val);
@@@ -3032,11 -3210,6 +3210,11 @@@ static inline unsigned long msecs_to_ji
        return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
  }
  
 +static inline unsigned long nsecs_to_jiffies_timeout(const u64 n)
 +{
 +        return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1);
 +}
 +
  static inline unsigned long
  timespec_to_jiffies_timeout(const struct timespec *value)
  {
@@@ -3072,4 -3245,11 +3250,11 @@@ wait_remaining_ms_from_jiffies(unsigne
        }
  }
  
+ static inline void i915_trace_irq_get(struct intel_engine_cs *ring,
+                                     struct drm_i915_gem_request *req)
+ {
+       if (ring->trace_irq_req == NULL && ring->irq_get(ring))
+               i915_gem_request_assign(&ring->trace_irq_req, req);
+ }
  #endif
@@@ -401,6 -401,7 +401,6 @@@ static in
  i915_gem_create(struct drm_file *file,
                struct drm_device *dev,
                uint64_t size,
 -              bool dumb,
                uint32_t *handle_p)
  {
        struct drm_i915_gem_object *obj;
        if (obj == NULL)
                return -ENOMEM;
  
 -      obj->base.dumb = dumb;
        ret = drm_gem_handle_create(file, &obj->base, &handle);
        /* drop reference from allocate - handle holds it now */
        drm_gem_object_unreference_unlocked(&obj->base);
@@@ -435,7 -437,7 +435,7 @@@ i915_gem_dumb_create(struct drm_file *f
        args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
        args->size = args->pitch * args->height;
        return i915_gem_create(file, dev,
 -                             args->size, true, &args->handle);
 +                             args->size, &args->handle);
  }
  
  /**
@@@ -448,7 -450,7 +448,7 @@@ i915_gem_create_ioctl(struct drm_devic
        struct drm_i915_gem_create *args = data;
  
        return i915_gem_create(file, dev,
 -                             args->size, false, &args->handle);
 +                             args->size, &args->handle);
  }
  
  static inline int
@@@ -1151,19 -1153,18 +1151,18 @@@ i915_gem_check_wedge(struct i915_gpu_er
  }
  
  /*
-  * Compare seqno against outstanding lazy request. Emit a request if they are
-  * equal.
+  * Compare arbitrary request against outstanding lazy request. Emit on match.
   */
  int
- i915_gem_check_olr(struct intel_engine_cs *ring, u32 seqno)
+ i915_gem_check_olr(struct drm_i915_gem_request *req)
  {
        int ret;
  
-       BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
+       WARN_ON(!mutex_is_locked(&req->ring->dev->struct_mutex));
  
        ret = 0;
-       if (seqno == ring->outstanding_lazy_seqno)
-               ret = i915_add_request(ring, NULL);
+       if (req == req->ring->outstanding_lazy_request)
+               ret = i915_add_request(req->ring);
  
        return ret;
  }
@@@ -1188,10 -1189,9 +1187,9 @@@ static bool can_wait_boost(struct drm_i
  }
  
  /**
-  * __i915_wait_seqno - wait until execution of seqno has finished
-  * @ring: the ring expected to report seqno
-  * @seqno: duh!
-  * @reset_counter: reset sequence associated with the given seqno
+  * __i915_wait_request - wait until execution of request has finished
+  * @req: duh!
+  * @reset_counter: reset sequence associated with the given request
   * @interruptible: do an interruptible wait (normally yes)
   * @timeout: in - how long to wait (NULL forever); out - how much time remaining
   *
   * reset_counter _must_ be read before, and an appropriate smp_rmb must be
   * inserted.
   *
-  * Returns 0 if the seqno was found within the alloted time. Else returns the
+  * Returns 0 if the request was found within the alloted time. Else returns the
   * errno with remaining time filled in timeout argument.
   */
- int __i915_wait_seqno(struct intel_engine_cs *ring, u32 seqno,
+ int __i915_wait_request(struct drm_i915_gem_request *req,
                        unsigned reset_counter,
                        bool interruptible,
                        s64 *timeout,
                        struct drm_i915_file_private *file_priv)
  {
+       struct intel_engine_cs *ring = i915_gem_request_get_ring(req);
        struct drm_device *dev = ring->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        const bool irq_test_in_progress =
  
        WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled");
  
-       if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
+       if (i915_gem_request_completed(req, true))
                return 0;
  
 -      timeout_expire = timeout ? jiffies + nsecs_to_jiffies((u64)*timeout) : 0;
 +      timeout_expire = timeout ?
 +              jiffies + nsecs_to_jiffies_timeout((u64)*timeout) : 0;
  
        if (INTEL_INFO(dev)->gen >= 6 && ring->id == RCS && can_wait_boost(file_priv)) {
                gen6_rps_boost(dev_priv);
                return -ENODEV;
  
        /* Record current time in case interrupted by signal, or wedged */
-       trace_i915_gem_request_wait_begin(ring, seqno);
+       trace_i915_gem_request_wait_begin(req);
        before = ktime_get_raw_ns();
        for (;;) {
                struct timer_list timer;
                        break;
                }
  
-               if (i915_seqno_passed(ring->get_seqno(ring, false), seqno)) {
+               if (i915_gem_request_completed(req, false)) {
                        ret = 0;
                        break;
                }
                }
        }
        now = ktime_get_raw_ns();
-       trace_i915_gem_request_wait_end(ring, seqno);
+       trace_i915_gem_request_wait_end(req);
  
        if (!irq_test_in_progress)
                ring->irq_put(ring);
                s64 tres = *timeout - (now - before);
  
                *timeout = tres < 0 ? 0 : tres;
 +
 +              /*
 +               * Apparently ktime isn't accurate enough and occasionally has a
 +               * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch
 +               * things up to make the test happy. We allow up to 1 jiffy.
 +               *
 +               * This is a regrssion from the timespec->ktime conversion.
 +               */
 +              if (ret == -ETIME && *timeout < jiffies_to_usecs(1)*1000)
 +                      *timeout = 0;
        }
  
        return ret;
  }
  
  /**
-  * Waits for a sequence number to be signaled, and cleans up the
+  * Waits for a request to be signaled, and cleans up the
   * request and object lists appropriately for that event.
   */
  int
- i915_wait_seqno(struct intel_engine_cs *ring, uint32_t seqno)
+ i915_wait_request(struct drm_i915_gem_request *req)
  {
-       struct drm_device *dev = ring->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       bool interruptible = dev_priv->mm.interruptible;
+       struct drm_device *dev;
+       struct drm_i915_private *dev_priv;
+       bool interruptible;
        unsigned reset_counter;
        int ret;
  
+       BUG_ON(req == NULL);
+       dev = req->ring->dev;
+       dev_priv = dev->dev_private;
+       interruptible = dev_priv->mm.interruptible;
        BUG_ON(!mutex_is_locked(&dev->struct_mutex));
-       BUG_ON(seqno == 0);
  
        ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
        if (ret)
                return ret;
  
-       ret = i915_gem_check_olr(ring, seqno);
+       ret = i915_gem_check_olr(req);
        if (ret)
                return ret;
  
        reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
-       return __i915_wait_seqno(ring, seqno, reset_counter, interruptible,
-                                NULL, NULL);
+       i915_gem_request_reference(req);
+       ret = __i915_wait_request(req, reset_counter,
+                                 interruptible, NULL, NULL);
+       i915_gem_request_unreference(req);
+       return ret;
  }
  
  static int
@@@ -1355,11 -1353,11 +1362,11 @@@ i915_gem_object_wait_rendering__tail(st
        /* Manually manage the write flush as we may have not yet
         * retired the buffer.
         *
-        * Note that the last_write_seqno is always the earlier of
-        * the two (read/write) seqno, so if we haved successfully waited,
+        * Note that the last_write_req is always the earlier of
+        * the two (read/write) requests, so if we haved successfully waited,
         * we know we have passed the last write.
         */
-       obj->last_write_seqno = 0;
+       i915_gem_request_assign(&obj->last_write_req, NULL);
  
        return 0;
  }
@@@ -1372,15 -1370,14 +1379,14 @@@ static __must_check in
  i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
                               bool readonly)
  {
-       struct intel_engine_cs *ring = obj->ring;
-       u32 seqno;
+       struct drm_i915_gem_request *req;
        int ret;
  
-       seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
-       if (seqno == 0)
+       req = readonly ? obj->last_write_req : obj->last_read_req;
+       if (!req)
                return 0;
  
-       ret = i915_wait_seqno(ring, seqno);
+       ret = i915_wait_request(req);
        if (ret)
                return ret;
  
@@@ -1395,33 -1392,33 +1401,33 @@@ i915_gem_object_wait_rendering__nonbloc
                                            struct drm_i915_file_private *file_priv,
                                            bool readonly)
  {
+       struct drm_i915_gem_request *req;
        struct drm_device *dev = obj->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_engine_cs *ring = obj->ring;
        unsigned reset_counter;
-       u32 seqno;
        int ret;
  
        BUG_ON(!mutex_is_locked(&dev->struct_mutex));
        BUG_ON(!dev_priv->mm.interruptible);
  
-       seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
-       if (seqno == 0)
+       req = readonly ? obj->last_write_req : obj->last_read_req;
+       if (!req)
                return 0;
  
        ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
        if (ret)
                return ret;
  
-       ret = i915_gem_check_olr(ring, seqno);
+       ret = i915_gem_check_olr(req);
        if (ret)
                return ret;
  
        reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
+       i915_gem_request_reference(req);
        mutex_unlock(&dev->struct_mutex);
-       ret = __i915_wait_seqno(ring, seqno, reset_counter, true, NULL,
-                               file_priv);
+       ret = __i915_wait_request(req, reset_counter, true, NULL, file_priv);
        mutex_lock(&dev->struct_mutex);
+       i915_gem_request_unreference(req);
        if (ret)
                return ret;
  
@@@ -1838,10 -1835,10 +1844,10 @@@ static void i915_gem_object_free_mmap_o
        drm_gem_free_mmap_offset(&obj->base);
  }
  
 -static int
 +int
  i915_gem_mmap_gtt(struct drm_file *file,
                  struct drm_device *dev,
 -                uint32_t handle, bool dumb,
 +                uint32_t handle,
                  uint64_t *offset)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
                goto unlock;
        }
  
 -      /*
 -       * We don't allow dumb mmaps on objects created using another
 -       * interface.
 -       */
 -      WARN_ONCE(dumb && !(obj->base.dumb || obj->base.import_attach),
 -                "Illegal dumb map of accelerated buffer.\n");
 -
        if (obj->base.size > dev_priv->gtt.mappable_end) {
                ret = -E2BIG;
                goto out;
@@@ -1882,6 -1886,15 +1888,6 @@@ unlock
        return ret;
  }
  
 -int
 -i915_gem_dumb_map_offset(struct drm_file *file,
 -                       struct drm_device *dev,
 -                       uint32_t handle,
 -                       uint64_t *offset)
 -{
 -      return i915_gem_mmap_gtt(file, dev, handle, true, offset);
 -}
 -
  /**
   * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
   * @dev: DRM device
@@@ -1903,7 -1916,7 +1909,7 @@@ i915_gem_mmap_gtt_ioctl(struct drm_devi
  {
        struct drm_i915_gem_mmap_gtt *args = data;
  
 -      return i915_gem_mmap_gtt(file, dev, args->handle, false, &args->offset);
 +      return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
  }
  
  static inline int
@@@ -2250,14 -2263,18 +2256,18 @@@ static voi
  i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
                               struct intel_engine_cs *ring)
  {
-       u32 seqno = intel_ring_get_seqno(ring);
+       struct drm_i915_gem_request *req;
+       struct intel_engine_cs *old_ring;
  
        BUG_ON(ring == NULL);
-       if (obj->ring != ring && obj->last_write_seqno) {
-               /* Keep the seqno relative to the current ring */
-               obj->last_write_seqno = seqno;
+       req = intel_ring_get_request(ring);
+       old_ring = i915_gem_request_get_ring(obj->last_read_req);
+       if (old_ring != ring && obj->last_write_req) {
+               /* Keep the request relative to the current ring */
+               i915_gem_request_assign(&obj->last_write_req, req);
        }
-       obj->ring = ring;
  
        /* Add a reference if we're newly entering the active list. */
        if (!obj->active) {
  
        list_move_tail(&obj->ring_list, &ring->active_list);
  
-       obj->last_read_seqno = seqno;
+       i915_gem_request_assign(&obj->last_read_req, req);
  }
  
  void i915_vma_move_to_active(struct i915_vma *vma,
  static void
  i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
  {
-       struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
-       struct i915_address_space *vm;
        struct i915_vma *vma;
  
        BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
        BUG_ON(!obj->active);
  
-       list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
-               vma = i915_gem_obj_to_vma(obj, vm);
-               if (vma && !list_empty(&vma->mm_list))
-                       list_move_tail(&vma->mm_list, &vm->inactive_list);
+       list_for_each_entry(vma, &obj->vma_list, vma_link) {
+               if (!list_empty(&vma->mm_list))
+                       list_move_tail(&vma->mm_list, &vma->vm->inactive_list);
        }
  
        intel_fb_obj_flush(obj, true);
  
        list_del_init(&obj->ring_list);
-       obj->ring = NULL;
  
-       obj->last_read_seqno = 0;
-       obj->last_write_seqno = 0;
+       i915_gem_request_assign(&obj->last_read_req, NULL);
+       i915_gem_request_assign(&obj->last_write_req, NULL);
        obj->base.write_domain = 0;
  
-       obj->last_fenced_seqno = 0;
+       i915_gem_request_assign(&obj->last_fenced_req, NULL);
  
        obj->active = 0;
        drm_gem_object_unreference(&obj->base);
  static void
  i915_gem_object_retire(struct drm_i915_gem_object *obj)
  {
-       struct intel_engine_cs *ring = obj->ring;
-       if (ring == NULL)
+       if (obj->last_read_req == NULL)
                return;
  
-       if (i915_seqno_passed(ring->get_seqno(ring, true),
-                             obj->last_read_seqno))
+       if (i915_gem_request_completed(obj->last_read_req, true))
                i915_gem_object_move_to_inactive(obj);
  }
  
@@@ -2395,8 -2405,7 +2398,7 @@@ i915_gem_get_seqno(struct drm_device *d
  
  int __i915_add_request(struct intel_engine_cs *ring,
                       struct drm_file *file,
-                      struct drm_i915_gem_object *obj,
-                      u32 *out_seqno)
+                      struct drm_i915_gem_object *obj)
  {
        struct drm_i915_private *dev_priv = ring->dev->dev_private;
        struct drm_i915_gem_request *request;
        u32 request_ring_position, request_start;
        int ret;
  
-       request = ring->preallocated_lazy_request;
+       request = ring->outstanding_lazy_request;
        if (WARN_ON(request == NULL))
                return -ENOMEM;
  
                        return ret;
        }
  
-       request->seqno = intel_ring_get_seqno(ring);
-       request->ring = ring;
        request->head = request_start;
        request->tail = request_ring_position;
  
                spin_unlock(&file_priv->mm.lock);
        }
  
-       trace_i915_gem_request_add(ring, request->seqno);
-       ring->outstanding_lazy_seqno = 0;
-       ring->preallocated_lazy_request = NULL;
+       trace_i915_gem_request_add(request);
+       ring->outstanding_lazy_request = NULL;
  
        i915_queue_hangcheck(ring->dev);
  
                           round_jiffies_up_relative(HZ));
        intel_mark_busy(dev_priv->dev);
  
-       if (out_seqno)
-               *out_seqno = request->seqno;
        return 0;
  }
  
@@@ -2562,33 -2566,39 +2559,39 @@@ static void i915_set_reset_status(struc
  
  static void i915_gem_free_request(struct drm_i915_gem_request *request)
  {
-       struct intel_context *ctx = request->ctx;
        list_del(&request->list);
        i915_gem_request_remove_from_client(request);
  
+       i915_gem_request_unreference(request);
+ }
+ void i915_gem_request_free(struct kref *req_ref)
+ {
+       struct drm_i915_gem_request *req = container_of(req_ref,
+                                                typeof(*req), ref);
+       struct intel_context *ctx = req->ctx;
        if (ctx) {
                if (i915.enable_execlists) {
-                       struct intel_engine_cs *ring = request->ring;
+                       struct intel_engine_cs *ring = req->ring;
  
                        if (ctx != ring->default_context)
                                intel_lr_context_unpin(ring, ctx);
                }
                i915_gem_context_unreference(ctx);
        }
-       kfree(request);
+       kfree(req);
  }
  
  struct drm_i915_gem_request *
  i915_gem_find_active_request(struct intel_engine_cs *ring)
  {
        struct drm_i915_gem_request *request;
-       u32 completed_seqno;
-       completed_seqno = ring->get_seqno(ring, false);
  
        list_for_each_entry(request, &ring->request_list, list) {
-               if (i915_seqno_passed(completed_seqno, request->seqno))
+               if (i915_gem_request_completed(request, false))
                        continue;
  
                return request;
@@@ -2663,10 -2673,8 +2666,8 @@@ static void i915_gem_reset_ring_cleanup
                i915_gem_free_request(request);
        }
  
-       /* These may not have been flush before the reset, do so now */
-       kfree(ring->preallocated_lazy_request);
-       ring->preallocated_lazy_request = NULL;
-       ring->outstanding_lazy_seqno = 0;
+       /* This may not have been flushed before the reset, so clean it now */
+       i915_gem_request_assign(&ring->outstanding_lazy_request, NULL);
  }
  
  void i915_gem_restore_fences(struct drm_device *dev)
@@@ -2718,15 -2726,11 +2719,11 @@@ void i915_gem_reset(struct drm_device *
  void
  i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
  {
-       uint32_t seqno;
        if (list_empty(&ring->request_list))
                return;
  
        WARN_ON(i915_verify_lists(ring->dev));
  
-       seqno = ring->get_seqno(ring, true);
        /* Move any buffers on the active list that are no longer referenced
         * by the ringbuffer to the flushing/inactive lists as appropriate,
         * before we free the context associated with the requests.
                                      struct drm_i915_gem_object,
                                      ring_list);
  
-               if (!i915_seqno_passed(seqno, obj->last_read_seqno))
+               if (!i915_gem_request_completed(obj->last_read_req, true))
                        break;
  
                i915_gem_object_move_to_inactive(obj);
                                           struct drm_i915_gem_request,
                                           list);
  
-               if (!i915_seqno_passed(seqno, request->seqno))
+               if (!i915_gem_request_completed(request, true))
                        break;
  
-               trace_i915_gem_request_retire(ring, request->seqno);
+               trace_i915_gem_request_retire(request);
  
                /* This is one of the few common intersection points
                 * between legacy ringbuffer submission and execlists:
                i915_gem_free_request(request);
        }
  
-       if (unlikely(ring->trace_irq_seqno &&
-                    i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
+       if (unlikely(ring->trace_irq_req &&
+                    i915_gem_request_completed(ring->trace_irq_req, true))) {
                ring->irq_put(ring);
-               ring->trace_irq_seqno = 0;
+               i915_gem_request_assign(&ring->trace_irq_req, NULL);
        }
  
        WARN_ON(i915_verify_lists(ring->dev));
@@@ -2854,14 -2858,17 +2851,17 @@@ i915_gem_idle_work_handler(struct work_
  static int
  i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
  {
+       struct intel_engine_cs *ring;
        int ret;
  
        if (obj->active) {
-               ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno);
+               ring = i915_gem_request_get_ring(obj->last_read_req);
+               ret = i915_gem_check_olr(obj->last_read_req);
                if (ret)
                        return ret;
  
-               i915_gem_retire_requests_ring(obj->ring);
+               i915_gem_retire_requests_ring(ring);
        }
  
        return 0;
@@@ -2895,9 -2902,8 +2895,8 @@@ i915_gem_wait_ioctl(struct drm_device *
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_wait *args = data;
        struct drm_i915_gem_object *obj;
-       struct intel_engine_cs *ring = NULL;
+       struct drm_i915_gem_request *req;
        unsigned reset_counter;
-       u32 seqno = 0;
        int ret = 0;
  
        if (args->flags != 0)
        if (ret)
                goto out;
  
-       if (obj->active) {
-               seqno = obj->last_read_seqno;
-               ring = obj->ring;
-       }
+       if (!obj->active || !obj->last_read_req)
+               goto out;
  
-       if (seqno == 0)
-                goto out;
+       req = obj->last_read_req;
  
        /* Do this after OLR check to make sure we make forward progress polling
         * on this IOCTL with a timeout <=0 (like busy ioctl)
  
        drm_gem_object_unreference(&obj->base);
        reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
+       i915_gem_request_reference(req);
        mutex_unlock(&dev->struct_mutex);
  
-       return __i915_wait_seqno(ring, seqno, reset_counter, true,
-                                &args->timeout_ns, file->driver_priv);
+       ret = __i915_wait_request(req, reset_counter, true, &args->timeout_ns,
+                                 file->driver_priv);
+       mutex_lock(&dev->struct_mutex);
+       i915_gem_request_unreference(req);
+       mutex_unlock(&dev->struct_mutex);
+       return ret;
  
  out:
        drm_gem_object_unreference(&obj->base);
  i915_gem_object_sync(struct drm_i915_gem_object *obj,
                     struct intel_engine_cs *to)
  {
-       struct intel_engine_cs *from = obj->ring;
+       struct intel_engine_cs *from;
        u32 seqno;
        int ret, idx;
  
+       from = i915_gem_request_get_ring(obj->last_read_req);
        if (from == NULL || to == from)
                return 0;
  
  
        idx = intel_ring_sync_index(from, to);
  
-       seqno = obj->last_read_seqno;
+       seqno = i915_gem_request_get_seqno(obj->last_read_req);
        /* Optimization: Avoid semaphore sync when we are sure we already
         * waited for an object with higher seqno */
        if (seqno <= from->semaphore.sync_seqno[idx])
                return 0;
  
-       ret = i915_gem_check_olr(obj->ring, seqno);
+       ret = i915_gem_check_olr(obj->last_read_req);
        if (ret)
                return ret;
  
-       trace_i915_gem_ring_sync_to(from, to, seqno);
+       trace_i915_gem_ring_sync_to(from, to, obj->last_read_req);
        ret = to->semaphore.sync_to(to, from, seqno);
        if (!ret)
-               /* We use last_read_seqno because sync_to()
+               /* We use last_read_req because sync_to()
                 * might have just caused seqno wrap under
                 * the radar.
                 */
-               from->semaphore.sync_seqno[idx] = obj->last_read_seqno;
+               from->semaphore.sync_seqno[idx] =
+                               i915_gem_request_get_seqno(obj->last_read_req);
  
        return ret;
  }
@@@ -3048,10 -3059,8 +3052,8 @@@ int i915_vma_unbind(struct i915_vma *vm
         * cause memory corruption through use-after-free.
         */
  
-       /* Throw away the active reference before moving to the unbound list */
-       i915_gem_object_retire(obj);
-       if (i915_is_ggtt(vma->vm)) {
+       if (i915_is_ggtt(vma->vm) &&
+           vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
                i915_gem_object_finish_gtt(obj);
  
                /* release the fence reg _after_ flushing */
        vma->unbind_vma(vma);
  
        list_del_init(&vma->mm_list);
-       if (i915_is_ggtt(vma->vm))
-               obj->map_and_fenceable = false;
+       if (i915_is_ggtt(vma->vm)) {
+               if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
+                       obj->map_and_fenceable = false;
+               } else if (vma->ggtt_view.pages) {
+                       sg_free_table(vma->ggtt_view.pages);
+                       kfree(vma->ggtt_view.pages);
+                       vma->ggtt_view.pages = NULL;
+               }
+       }
  
        drm_mm_remove_node(&vma->node);
        i915_gem_vma_destroy(vma);
        /* Since the unbound list is global, only move to that list if
         * no more VMAs exist. */
        if (list_empty(&obj->vma_list)) {
+               /* Throw away the active reference before
+                * moving to the unbound list. */
+               i915_gem_object_retire(obj);
                i915_gem_gtt_finish_object(obj);
                list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
        }
@@@ -3257,17 -3277,12 +3270,12 @@@ static void i915_gem_write_fence(struc
             "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
             obj->stride, obj->tiling_mode);
  
-       switch (INTEL_INFO(dev)->gen) {
-       case 9:
-       case 8:
-       case 7:
-       case 6:
-       case 5:
-       case 4: i965_write_fence_reg(dev, reg, obj); break;
-       case 3: i915_write_fence_reg(dev, reg, obj); break;
-       case 2: i830_write_fence_reg(dev, reg, obj); break;
-       default: BUG();
-       }
+       if (IS_GEN2(dev))
+               i830_write_fence_reg(dev, reg, obj);
+       else if (IS_GEN3(dev))
+               i915_write_fence_reg(dev, reg, obj);
+       else if (INTEL_INFO(dev)->gen >= 4)
+               i965_write_fence_reg(dev, reg, obj);
  
        /* And similarly be paranoid that no direct access to this region
         * is reordered to before the fence is installed.
@@@ -3306,12 -3321,12 +3314,12 @@@ static void i915_gem_object_update_fenc
  static int
  i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
  {
-       if (obj->last_fenced_seqno) {
-               int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
+       if (obj->last_fenced_req) {
+               int ret = i915_wait_request(obj->last_fenced_req);
                if (ret)
                        return ret;
  
-               obj->last_fenced_seqno = 0;
+               i915_gem_request_assign(&obj->last_fenced_req, NULL);
        }
  
        return 0;
@@@ -3484,7 -3499,8 +3492,8 @@@ static struct i915_vma 
  i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
                           struct i915_address_space *vm,
                           unsigned alignment,
-                          uint64_t flags)
+                          uint64_t flags,
+                          const struct i915_ggtt_view *view)
  {
        struct drm_device *dev = obj->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
  
        i915_gem_object_pin_pages(obj);
  
-       vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
+       vma = i915_gem_obj_lookup_or_create_vma_view(obj, vm, view);
        if (IS_ERR(vma))
                goto err_unpin;
  
@@@ -3564,15 -3580,19 +3573,19 @@@ search_free
        if (ret)
                goto err_remove_node;
  
+       trace_i915_vma_bind(vma, flags);
+       ret = i915_vma_bind(vma, obj->cache_level,
+                           flags & PIN_GLOBAL ? GLOBAL_BIND : 0);
+       if (ret)
+               goto err_finish_gtt;
        list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
        list_add_tail(&vma->mm_list, &vm->inactive_list);
  
-       trace_i915_vma_bind(vma, flags);
-       vma->bind_vma(vma, obj->cache_level,
-                     flags & PIN_GLOBAL ? GLOBAL_BIND : 0);
        return vma;
  
+ err_finish_gtt:
+       i915_gem_gtt_finish_object(obj);
  err_remove_node:
        drm_mm_remove_node(&vma->node);
  err_free_vma:
@@@ -3775,9 -3795,12 +3788,12 @@@ int i915_gem_object_set_cache_level(str
                }
  
                list_for_each_entry(vma, &obj->vma_list, vma_link)
-                       if (drm_mm_node_allocated(&vma->node))
-                               vma->bind_vma(vma, cache_level,
-                                               vma->bound & GLOBAL_BIND);
+                       if (drm_mm_node_allocated(&vma->node)) {
+                               ret = i915_vma_bind(vma, cache_level,
+                                                   vma->bound & GLOBAL_BIND);
+                               if (ret)
+                                       return ret;
+                       }
        }
  
        list_for_each_entry(vma, &obj->vma_list, vma_link)
@@@ -3896,18 -3919,14 +3912,14 @@@ static bool is_pin_display(struct drm_i
        if (!vma)
                return false;
  
-       /* There are 3 sources that pin objects:
+       /* There are 2 sources that pin objects:
         *   1. The display engine (scanouts, sprites, cursors);
         *   2. Reservations for execbuffer;
-        *   3. The user.
         *
         * We can ignore reservations as we hold the struct_mutex and
-        * are only called outside of the reservation path.  The user
-        * can only increment pin_count once, and so if after
-        * subtracting the potential reference by the user, any pin_count
-        * remains, it must be due to another use by the display engine.
+        * are only called outside of the reservation path.
         */
-       return vma->pin_count - !!obj->user_pin_count;
+       return vma->pin_count;
  }
  
  /*
@@@ -3924,7 -3943,7 +3936,7 @@@ i915_gem_object_pin_to_display_plane(st
        bool was_pin_display;
        int ret;
  
-       if (pipelined != obj->ring) {
+       if (pipelined != i915_gem_request_get_ring(obj->last_read_req)) {
                ret = i915_gem_object_sync(obj, pipelined);
                if (ret)
                        return ret;
@@@ -4076,10 -4095,8 +4088,8 @@@ i915_gem_ring_throttle(struct drm_devic
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_file_private *file_priv = file->driver_priv;
        unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
-       struct drm_i915_gem_request *request;
-       struct intel_engine_cs *ring = NULL;
+       struct drm_i915_gem_request *request, *target = NULL;
        unsigned reset_counter;
-       u32 seqno = 0;
        int ret;
  
        ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
                if (time_after_eq(request->emitted_jiffies, recent_enough))
                        break;
  
-               ring = request->ring;
-               seqno = request->seqno;
+               target = request;
        }
        reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
+       if (target)
+               i915_gem_request_reference(target);
        spin_unlock(&file_priv->mm.lock);
  
-       if (seqno == 0)
+       if (target == NULL)
                return 0;
  
-       ret = __i915_wait_seqno(ring, seqno, reset_counter, true, NULL, NULL);
+       ret = __i915_wait_request(target, reset_counter, true, NULL, NULL);
        if (ret == 0)
                queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
  
+       mutex_lock(&dev->struct_mutex);
+       i915_gem_request_unreference(target);
+       mutex_unlock(&dev->struct_mutex);
        return ret;
  }
  
@@@ -4131,10 -4153,11 +4146,11 @@@ i915_vma_misplaced(struct i915_vma *vma
  }
  
  int
- i915_gem_object_pin(struct drm_i915_gem_object *obj,
-                   struct i915_address_space *vm,
-                   uint32_t alignment,
-                   uint64_t flags)
+ i915_gem_object_pin_view(struct drm_i915_gem_object *obj,
+                        struct i915_address_space *vm,
+                        uint32_t alignment,
+                        uint64_t flags,
+                        const struct i915_ggtt_view *view)
  {
        struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
        struct i915_vma *vma;
        if (WARN_ON((flags & (PIN_MAPPABLE | PIN_GLOBAL)) == PIN_MAPPABLE))
                return -EINVAL;
  
-       vma = i915_gem_obj_to_vma(obj, vm);
+       vma = i915_gem_obj_to_vma_view(obj, vm, view);
        if (vma) {
                if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
                        return -EBUSY;
                             "bo is already pinned with incorrect alignment:"
                             " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
                             " obj->map_and_fenceable=%d\n",
-                            i915_gem_obj_offset(obj, vm), alignment,
+                            i915_gem_obj_offset_view(obj, vm, view->type),
+                            alignment,
                             !!(flags & PIN_MAPPABLE),
                             obj->map_and_fenceable);
                        ret = i915_vma_unbind(vma);
  
        bound = vma ? vma->bound : 0;
        if (vma == NULL || !drm_mm_node_allocated(&vma->node)) {
-               vma = i915_gem_object_bind_to_vm(obj, vm, alignment, flags);
+               vma = i915_gem_object_bind_to_vm(obj, vm, alignment,
+                                                flags, view);
                if (IS_ERR(vma))
                        return PTR_ERR(vma);
        }
  
-       if (flags & PIN_GLOBAL && !(vma->bound & GLOBAL_BIND))
-               vma->bind_vma(vma, obj->cache_level, GLOBAL_BIND);
+       if (flags & PIN_GLOBAL && !(vma->bound & GLOBAL_BIND)) {
+               ret = i915_vma_bind(vma, obj->cache_level, GLOBAL_BIND);
+               if (ret)
+                       return ret;
+       }
  
        if ((bound ^ vma->bound) & GLOBAL_BIND) {
                bool mappable, fenceable;
@@@ -4250,102 -4278,6 +4271,6 @@@ i915_gem_object_unpin_fence(struct drm_
        }
  }
  
- int
- i915_gem_pin_ioctl(struct drm_device *dev, void *data,
-                  struct drm_file *file)
- {
-       struct drm_i915_gem_pin *args = data;
-       struct drm_i915_gem_object *obj;
-       int ret;
-       if (drm_core_check_feature(dev, DRIVER_MODESET))
-               return -ENODEV;
-       ret = i915_mutex_lock_interruptible(dev);
-       if (ret)
-               return ret;
-       obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
-       if (&obj->base == NULL) {
-               ret = -ENOENT;
-               goto unlock;
-       }
-       if (obj->madv != I915_MADV_WILLNEED) {
-               DRM_DEBUG("Attempting to pin a purgeable buffer\n");
-               ret = -EFAULT;
-               goto out;
-       }
-       if (obj->pin_filp != NULL && obj->pin_filp != file) {
-               DRM_DEBUG("Already pinned in i915_gem_pin_ioctl(): %d\n",
-                         args->handle);
-               ret = -EINVAL;
-               goto out;
-       }
-       if (obj->user_pin_count == ULONG_MAX) {
-               ret = -EBUSY;
-               goto out;
-       }
-       if (obj->user_pin_count == 0) {
-               ret = i915_gem_obj_ggtt_pin(obj, args->alignment, PIN_MAPPABLE);
-               if (ret)
-                       goto out;
-       }
-       obj->user_pin_count++;
-       obj->pin_filp = file;
-       args->offset = i915_gem_obj_ggtt_offset(obj);
- out:
-       drm_gem_object_unreference(&obj->base);
- unlock:
-       mutex_unlock(&dev->struct_mutex);
-       return ret;
- }
- int
- i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
-                    struct drm_file *file)
- {
-       struct drm_i915_gem_pin *args = data;
-       struct drm_i915_gem_object *obj;
-       int ret;
-       if (drm_core_check_feature(dev, DRIVER_MODESET))
-               return -ENODEV;
-       ret = i915_mutex_lock_interruptible(dev);
-       if (ret)
-               return ret;
-       obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
-       if (&obj->base == NULL) {
-               ret = -ENOENT;
-               goto unlock;
-       }
-       if (obj->pin_filp != file) {
-               DRM_DEBUG("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
-                         args->handle);
-               ret = -EINVAL;
-               goto out;
-       }
-       obj->user_pin_count--;
-       if (obj->user_pin_count == 0) {
-               obj->pin_filp = NULL;
-               i915_gem_object_ggtt_unpin(obj);
-       }
- out:
-       drm_gem_object_unreference(&obj->base);
- unlock:
-       mutex_unlock(&dev->struct_mutex);
-       return ret;
- }
  int
  i915_gem_busy_ioctl(struct drm_device *dev, void *data,
                    struct drm_file *file)
        ret = i915_gem_object_flush_active(obj);
  
        args->busy = obj->active;
-       if (obj->ring) {
+       if (obj->last_read_req) {
+               struct intel_engine_cs *ring;
                BUILD_BUG_ON(I915_NUM_RINGS > 16);
-               args->busy |= intel_ring_flag(obj->ring) << 16;
+               ring = i915_gem_request_get_ring(obj->last_read_req);
+               args->busy |= intel_ring_flag(ring) << 16;
        }
  
        drm_gem_object_unreference(&obj->base);
@@@ -4454,6 -4388,7 +4381,7 @@@ void i915_gem_object_init(struct drm_i9
        INIT_LIST_HEAD(&obj->ring_list);
        INIT_LIST_HEAD(&obj->obj_exec_link);
        INIT_LIST_HEAD(&obj->vma_list);
+       INIT_LIST_HEAD(&obj->batch_pool_list);
  
        obj->ops = ops;
  
@@@ -4609,12 -4544,13 +4537,13 @@@ void i915_gem_free_object(struct drm_ge
        intel_runtime_pm_put(dev_priv);
  }
  
- struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
-                                    struct i915_address_space *vm)
+ struct i915_vma *i915_gem_obj_to_vma_view(struct drm_i915_gem_object *obj,
+                                         struct i915_address_space *vm,
+                                         const struct i915_ggtt_view *view)
  {
        struct i915_vma *vma;
        list_for_each_entry(vma, &obj->vma_list, vma_link)
-               if (vma->vm == vm)
+               if (vma->vm == vm && vma->ggtt_view.type == view->type)
                        return vma;
  
        return NULL;
@@@ -4674,6 -4610,11 +4603,11 @@@ i915_gem_suspend(struct drm_device *dev
        cancel_delayed_work_sync(&dev_priv->mm.retire_work);
        flush_delayed_work(&dev_priv->mm.idle_work);
  
+       /* Assert that we sucessfully flushed all the work and
+        * reset the GPU back to its idle, low power state.
+        */
+       WARN_ON(dev_priv->mm.busy);
        return 0;
  
  err:
@@@ -4785,14 -4726,6 +4719,6 @@@ int i915_gem_init_rings(struct drm_devi
        struct drm_i915_private *dev_priv = dev->dev_private;
        int ret;
  
-       /*
-        * At least 830 can leave some of the unused rings
-        * "active" (ie. head != tail) after resume which
-        * will prevent c3 entry. Makes sure all unused rings
-        * are totally idle.
-        */
-       init_unused_rings(dev);
        ret = intel_init_render_ring_buffer(dev);
        if (ret)
                return ret;
@@@ -4845,6 -4778,7 +4771,7 @@@ in
  i915_gem_init_hw(struct drm_device *dev)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_engine_cs *ring;
        int ret, i;
  
        if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
  
        i915_gem_init_swizzling(dev);
  
-       ret = dev_priv->gt.init_rings(dev);
-       if (ret)
-               return ret;
+       /*
+        * At least 830 can leave some of the unused rings
+        * "active" (ie. head != tail) after resume which
+        * will prevent c3 entry. Makes sure all unused rings
+        * are totally idle.
+        */
+       init_unused_rings(dev);
+       for_each_ring(ring, dev_priv, i) {
+               ret = ring->init_hw(ring);
+               if (ret)
+                       return ret;
+       }
  
        for (i = 0; i < NUM_L3_SLICES(dev); i++)
                i915_gem_l3_remap(&dev_priv->ring[RCS], i);
@@@ -4933,18 -4877,18 +4870,18 @@@ int i915_gem_init(struct drm_device *de
        }
  
        ret = i915_gem_init_userptr(dev);
-       if (ret) {
-               mutex_unlock(&dev->struct_mutex);
-               return ret;
-       }
+       if (ret)
+               goto out_unlock;
  
        i915_gem_init_global_gtt(dev);
  
        ret = i915_gem_context_init(dev);
-       if (ret) {
-               mutex_unlock(&dev->struct_mutex);
-               return ret;
-       }
+       if (ret)
+               goto out_unlock;
+       ret = dev_priv->gt.init_rings(dev);
+       if (ret)
+               goto out_unlock;
  
        ret = i915_gem_init_hw(dev);
        if (ret == -EIO) {
                atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
                ret = 0;
        }
+ out_unlock:
        mutex_unlock(&dev->struct_mutex);
  
        return ret;
@@@ -5056,6 -5002,8 +4995,8 @@@ i915_gem_load(struct drm_device *dev
        dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
        register_oom_notifier(&dev_priv->mm.oom_notifier);
  
+       i915_gem_batch_pool_init(dev, &dev_priv->mm.batch_pool);
        mutex_init(&dev_priv->fb_tracking.lock);
  }
  
@@@ -5216,8 -5164,9 +5157,9 @@@ i915_gem_shrinker_count(struct shrinke
  }
  
  /* All the new VM stuff */
- unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
-                                 struct i915_address_space *vm)
+ unsigned long i915_gem_obj_offset_view(struct drm_i915_gem_object *o,
+                                      struct i915_address_space *vm,
+                                      enum i915_ggtt_view_type view)
  {
        struct drm_i915_private *dev_priv = o->base.dev->dev_private;
        struct i915_vma *vma;
        WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
  
        list_for_each_entry(vma, &o->vma_list, vma_link) {
-               if (vma->vm == vm)
+               if (vma->vm == vm && vma->ggtt_view.type == view)
                        return vma->node.start;
  
        }
        return -1;
  }
  
- bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
-                       struct i915_address_space *vm)
+ bool i915_gem_obj_bound_view(struct drm_i915_gem_object *o,
+                            struct i915_address_space *vm,
+                            enum i915_ggtt_view_type view)
  {
        struct i915_vma *vma;
  
        list_for_each_entry(vma, &o->vma_list, vma_link)
-               if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
+               if (vma->vm == vm &&
+                   vma->ggtt_view.type == view &&
+                   drm_mm_node_allocated(&vma->node))
                        return true;
  
        return false;
@@@ -5372,11 -5324,13 +5317,13 @@@ i915_gem_shrinker_oom(struct notifier_b
  
  struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
  {
+       struct i915_address_space *ggtt = i915_obj_to_ggtt(obj);
        struct i915_vma *vma;
  
-       vma = list_first_entry(&obj->vma_list, typeof(*vma), vma_link);
-       if (vma->vm != i915_obj_to_ggtt(obj))
-               return NULL;
+       list_for_each_entry(vma, &obj->vma_list, vma_link)
+               if (vma->vm == ggtt &&
+                   vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL)
+                       return vma;
  
-       return vma;
+       return NULL;
  }
@@@ -408,14 -408,25 +408,25 @@@ int i915_gem_context_enable(struct drm_
  
        BUG_ON(!dev_priv->ring[RCS].default_context);
  
-       if (i915.enable_execlists)
-               return 0;
+       if (i915.enable_execlists) {
+               for_each_ring(ring, dev_priv, i) {
+                       if (ring->init_context) {
+                               ret = ring->init_context(ring,
+                                               ring->default_context);
+                               if (ret) {
+                                       DRM_ERROR("ring init context: %d\n",
+                                                       ret);
+                                       return ret;
+                               }
+                       }
+               }
  
-       for_each_ring(ring, dev_priv, i) {
-               ret = i915_switch_context(ring, ring->default_context);
-               if (ret)
-                       return ret;
-       }
+       } else
+               for_each_ring(ring, dev_priv, i) {
+                       ret = i915_switch_context(ring, ring->default_context);
+                       if (ret)
+                               return ret;
+               }
  
        return 0;
  }
@@@ -473,12 -484,7 +484,12 @@@ mi_set_context(struct intel_engine_cs *
               u32 hw_flags)
  {
        u32 flags = hw_flags | MI_MM_SPACE_GTT;
 -      int ret;
 +      const int num_rings =
 +              /* Use an extended w/a on ivb+ if signalling from other rings */
 +              i915_semaphore_is_enabled(ring->dev) ?
 +              hweight32(INTEL_INFO(ring->dev)->ring_mask) - 1 :
 +              0;
 +      int len, i, ret;
  
        /* w/a: If Flush TLB Invalidation Mode is enabled, driver must do a TLB
         * invalidation prior to MI_SET_CONTEXT. On GEN6 we don't set the value
        if (!IS_HASWELL(ring->dev) && INTEL_INFO(ring->dev)->gen < 8)
                flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN);
  
 -      ret = intel_ring_begin(ring, 6);
 +
 +      len = 4;
 +      if (INTEL_INFO(ring->dev)->gen >= 7)
 +              len += 2 + (num_rings ? 4*num_rings + 2 : 0);
 +
 +      ret = intel_ring_begin(ring, len);
        if (ret)
                return ret;
  
        /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
 -      if (INTEL_INFO(ring->dev)->gen >= 7)
 +      if (INTEL_INFO(ring->dev)->gen >= 7) {
                intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
 -      else
 -              intel_ring_emit(ring, MI_NOOP);
 +              if (num_rings) {
 +                      struct intel_engine_cs *signaller;
 +
 +                      intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings));
 +                      for_each_ring(signaller, to_i915(ring->dev), i) {
 +                              if (signaller == ring)
 +                                      continue;
 +
 +                              intel_ring_emit(ring, RING_PSMI_CTL(signaller->mmio_base));
 +                              intel_ring_emit(ring, _MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
 +                      }
 +              }
 +      }
  
        intel_ring_emit(ring, MI_NOOP);
        intel_ring_emit(ring, MI_SET_CONTEXT);
         */
        intel_ring_emit(ring, MI_NOOP);
  
 -      if (INTEL_INFO(ring->dev)->gen >= 7)
 +      if (INTEL_INFO(ring->dev)->gen >= 7) {
 +              if (num_rings) {
 +                      struct intel_engine_cs *signaller;
 +
 +                      intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings));
 +                      for_each_ring(signaller, to_i915(ring->dev), i) {
 +                              if (signaller == ring)
 +                                      continue;
 +
 +                              intel_ring_emit(ring, RING_PSMI_CTL(signaller->mmio_base));
 +                              intel_ring_emit(ring, _MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
 +                      }
 +              }
                intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE);
 -      else
 -              intel_ring_emit(ring, MI_NOOP);
 +      }
  
        intel_ring_advance(ring);
  
@@@ -611,9 -590,14 +622,14 @@@ static int do_switch(struct intel_engin
                goto unpin_out;
  
        vma = i915_gem_obj_to_ggtt(to->legacy_hw_ctx.rcs_state);
-       if (!(vma->bound & GLOBAL_BIND))
-               vma->bind_vma(vma, to->legacy_hw_ctx.rcs_state->cache_level,
-                               GLOBAL_BIND);
+       if (!(vma->bound & GLOBAL_BIND)) {
+               ret = i915_vma_bind(vma,
+                                   to->legacy_hw_ctx.rcs_state->cache_level,
+                                   GLOBAL_BIND);
+               /* This shouldn't ever fail. */
+               if (WARN_ONCE(ret, "GGTT context bind failed!"))
+                       goto unpin_out;
+       }
  
        if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to))
                hw_flags |= MI_RESTORE_INHIBIT;
                 * swapped, but there is no way to do that yet.
                 */
                from->legacy_hw_ctx.rcs_state->dirty = 1;
-               BUG_ON(from->legacy_hw_ctx.rcs_state->ring != ring);
+               BUG_ON(i915_gem_request_get_ring(
+                       from->legacy_hw_ctx.rcs_state->last_read_req) != ring);
  
                /* obj is kept alive until the next request by its active ref */
                i915_gem_object_ggtt_unpin(from->legacy_hw_ctx.rcs_state);
@@@ -671,10 -656,6 +688,6 @@@ done
                        if (ret)
                                DRM_ERROR("ring init context: %d\n", ret);
                }
-               ret = i915_gem_render_state_init(ring);
-               if (ret)
-                       DRM_ERROR("init render state: %d\n", ret);
        }
  
        return 0;
@@@ -37,6 -37,7 +37,7 @@@
  #define  __EXEC_OBJECT_HAS_FENCE (1<<30)
  #define  __EXEC_OBJECT_NEEDS_MAP (1<<29)
  #define  __EXEC_OBJECT_NEEDS_BIAS (1<<28)
+ #define  __EXEC_OBJECT_PURGEABLE (1<<27)
  
  #define BATCH_OFFSET_BIAS (256*1024)
  
@@@ -121,6 -122,9 +122,6 @@@ eb_lookup_vmas(struct eb_vmas *eb
                        goto err;
                }
  
 -              WARN_ONCE(obj->base.dumb,
 -                        "GPU use of dumb buffer is illegal.\n");
 -
                drm_gem_object_reference(&obj->base);
                list_add_tail(&obj->obj_exec_link, &objects);
        }
@@@ -223,7 -227,12 +224,12 @@@ i915_gem_execbuffer_unreserve_vma(struc
        if (entry->flags & __EXEC_OBJECT_HAS_PIN)
                vma->pin_count--;
  
-       entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
+       if (entry->flags & __EXEC_OBJECT_PURGEABLE)
+               obj->madv = I915_MADV_DONTNEED;
+       entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE |
+                         __EXEC_OBJECT_HAS_PIN |
+                         __EXEC_OBJECT_PURGEABLE);
  }
  
  static void eb_destroy(struct eb_vmas *eb)
@@@ -357,9 -366,12 +363,12 @@@ i915_gem_execbuffer_relocate_entry(stru
         * through the ppgtt for non_secure batchbuffers. */
        if (unlikely(IS_GEN6(dev) &&
            reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
-           !(target_vma->bound & GLOBAL_BIND)))
-               target_vma->bind_vma(target_vma, target_i915_obj->cache_level,
-                               GLOBAL_BIND);
+           !(target_vma->bound & GLOBAL_BIND))) {
+               ret = i915_vma_bind(target_vma, target_i915_obj->cache_level,
+                                   GLOBAL_BIND);
+               if (WARN_ONCE(ret, "Unexpected failure to bind target VMA!"))
+                       return ret;
+       }
  
        /* Validate that the target is in a valid r/w GPU domain */
        if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
@@@ -943,7 -955,7 +952,7 @@@ voi
  i915_gem_execbuffer_move_to_active(struct list_head *vmas,
                                   struct intel_engine_cs *ring)
  {
-       u32 seqno = intel_ring_get_seqno(ring);
+       struct drm_i915_gem_request *req = intel_ring_get_request(ring);
        struct i915_vma *vma;
  
        list_for_each_entry(vma, vmas, exec_list) {
                i915_vma_move_to_active(vma, ring);
                if (obj->base.write_domain) {
                        obj->dirty = 1;
-                       obj->last_write_seqno = seqno;
+                       i915_gem_request_assign(&obj->last_write_req, req);
  
                        intel_fb_obj_invalidate(obj, ring);
  
                        obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
                }
                if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
-                       obj->last_fenced_seqno = seqno;
+                       i915_gem_request_assign(&obj->last_fenced_req, req);
                        if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
                                struct drm_i915_private *dev_priv = to_i915(ring->dev);
                                list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list,
@@@ -990,7 -1002,7 +999,7 @@@ i915_gem_execbuffer_retire_commands(str
        ring->gpu_caches_dirty = true;
  
        /* Add a breadcrumb for the completion of the batch buffer */
-       (void)__i915_add_request(ring, file, obj, NULL);
+       (void)__i915_add_request(ring, file, obj);
  }
  
  static int
@@@ -1060,6 -1072,65 +1069,65 @@@ i915_emit_box(struct intel_engine_cs *r
        return 0;
  }
  
+ static struct drm_i915_gem_object*
+ i915_gem_execbuffer_parse(struct intel_engine_cs *ring,
+                         struct drm_i915_gem_exec_object2 *shadow_exec_entry,
+                         struct eb_vmas *eb,
+                         struct drm_i915_gem_object *batch_obj,
+                         u32 batch_start_offset,
+                         u32 batch_len,
+                         bool is_master,
+                         u32 *flags)
+ {
+       struct drm_i915_private *dev_priv = to_i915(batch_obj->base.dev);
+       struct drm_i915_gem_object *shadow_batch_obj;
+       int ret;
+       shadow_batch_obj = i915_gem_batch_pool_get(&dev_priv->mm.batch_pool,
+                                                  batch_obj->base.size);
+       if (IS_ERR(shadow_batch_obj))
+               return shadow_batch_obj;
+       ret = i915_parse_cmds(ring,
+                             batch_obj,
+                             shadow_batch_obj,
+                             batch_start_offset,
+                             batch_len,
+                             is_master);
+       if (ret) {
+               if (ret == -EACCES)
+                       return batch_obj;
+       } else {
+               struct i915_vma *vma;
+               memset(shadow_exec_entry, 0, sizeof(*shadow_exec_entry));
+               vma = i915_gem_obj_to_ggtt(shadow_batch_obj);
+               vma->exec_entry = shadow_exec_entry;
+               vma->exec_entry->flags = __EXEC_OBJECT_PURGEABLE;
+               drm_gem_object_reference(&shadow_batch_obj->base);
+               list_add_tail(&vma->exec_list, &eb->vmas);
+               shadow_batch_obj->base.pending_read_domains =
+                       batch_obj->base.pending_read_domains;
+               /*
+                * Set the DISPATCH_SECURE bit to remove the NON_SECURE
+                * bit from MI_BATCH_BUFFER_START commands issued in the
+                * dispatch_execbuffer implementations. We specifically
+                * don't want that set when the command parser is
+                * enabled.
+                *
+                * FIXME: with aliasing ppgtt, buffers that should only
+                * be in ggtt still end up in the aliasing ppgtt. remove
+                * this check when that is fixed.
+                */
+               if (USES_FULL_PPGTT(dev))
+                       *flags |= I915_DISPATCH_SECURE;
+       }
+       return ret ? ERR_PTR(ret) : shadow_batch_obj;
+ }
  
  int
  i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
                        return ret;
        }
  
-       trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
+       trace_i915_gem_ring_dispatch(intel_ring_get_request(ring), flags);
  
        i915_gem_execbuffer_move_to_active(vmas, ring);
        i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
@@@ -1277,6 -1348,7 +1345,7 @@@ i915_gem_do_execbuffer(struct drm_devic
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct eb_vmas *eb;
        struct drm_i915_gem_object *batch_obj;
+       struct drm_i915_gem_exec_object2 shadow_exec_entry;
        struct intel_engine_cs *ring;
        struct intel_context *ctx;
        struct i915_address_space *vm;
                ret = -EINVAL;
                goto err;
        }
-       batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
  
        if (i915_needs_cmd_parser(ring)) {
-               ret = i915_parse_cmds(ring,
-                                     batch_obj,
-                                     args->batch_start_offset,
-                                     file->is_master);
-               if (ret) {
-                       if (ret != -EACCES)
-                               goto err;
-               } else {
-                       /*
-                        * XXX: Actually do this when enabling batch copy...
-                        *
-                        * Set the DISPATCH_SECURE bit to remove the NON_SECURE bit
-                        * from MI_BATCH_BUFFER_START commands issued in the
-                        * dispatch_execbuffer implementations. We specifically don't
-                        * want that set when the command parser is enabled.
-                        */
+               batch_obj = i915_gem_execbuffer_parse(ring,
+                                                     &shadow_exec_entry,
+                                                     eb,
+                                                     batch_obj,
+                                                     args->batch_start_offset,
+                                                     args->batch_len,
+                                                     file->is_master,
+                                                     &flags);
+               if (IS_ERR(batch_obj)) {
+                       ret = PTR_ERR(batch_obj);
+                       goto err;
                }
        }
  
+       batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
        /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
         * batch" bit. Hence we need to pin secure batches into the global gtt.
         * hsw should have this fixed, but bdw mucks it up again. */
@@@ -183,6 -183,8 +183,8 @@@ static void ilk_update_gt_irq(struct dr
  {
        assert_spin_locked(&dev_priv->irq_lock);
  
+       WARN_ON(enabled_irq_mask & ~interrupt_mask);
        if (WARN_ON(!intel_irqs_enabled(dev_priv)))
                return;
  
@@@ -229,6 -231,8 +231,8 @@@ static void snb_update_pm_irq(struct dr
  {
        uint32_t new_val;
  
+       WARN_ON(enabled_irq_mask & ~interrupt_mask);
        assert_spin_locked(&dev_priv->irq_lock);
  
        new_val = dev_priv->pm_irq_mask;
@@@ -281,14 -285,10 +285,14 @@@ void gen6_enable_rps_interrupts(struct 
        struct drm_i915_private *dev_priv = dev->dev_private;
  
        spin_lock_irq(&dev_priv->irq_lock);
 +
        WARN_ON(dev_priv->rps.pm_iir);
        WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
        dev_priv->rps.interrupts_enabled = true;
 +      I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) |
 +                              dev_priv->pm_rps_events);
        gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
 +
        spin_unlock_irq(&dev_priv->irq_lock);
  }
  
@@@ -332,6 -332,8 +336,8 @@@ void ibx_display_interrupt_update(struc
        sdeimr &= ~interrupt_mask;
        sdeimr |= (~enabled_irq_mask & interrupt_mask);
  
+       WARN_ON(enabled_irq_mask & ~interrupt_mask);
        assert_spin_locked(&dev_priv->irq_lock);
  
        if (WARN_ON(!intel_irqs_enabled(dev_priv)))
@@@ -1017,7 -1019,7 +1023,7 @@@ static void notify_ring(struct drm_devi
        if (!intel_ring_initialized(ring))
                return;
  
-       trace_i915_gem_request_complete(ring);
+       trace_i915_gem_request_notify(ring);
  
        wake_up_all(&ring->irq_queue);
  }
@@@ -1383,14 -1385,14 +1389,14 @@@ static irqreturn_t gen8_gt_irq_handler(
                        if (rcs & GT_RENDER_USER_INTERRUPT)
                                notify_ring(dev, ring);
                        if (rcs & GT_CONTEXT_SWITCH_INTERRUPT)
-                               intel_execlists_handle_ctx_events(ring);
+                               intel_lrc_irq_handler(ring);
  
                        bcs = tmp >> GEN8_BCS_IRQ_SHIFT;
                        ring = &dev_priv->ring[BCS];
                        if (bcs & GT_RENDER_USER_INTERRUPT)
                                notify_ring(dev, ring);
                        if (bcs & GT_CONTEXT_SWITCH_INTERRUPT)
-                               intel_execlists_handle_ctx_events(ring);
+                               intel_lrc_irq_handler(ring);
                } else
                        DRM_ERROR("The master control interrupt lied (GT0)!\n");
        }
                        if (vcs & GT_RENDER_USER_INTERRUPT)
                                notify_ring(dev, ring);
                        if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
-                               intel_execlists_handle_ctx_events(ring);
+                               intel_lrc_irq_handler(ring);
  
                        vcs = tmp >> GEN8_VCS2_IRQ_SHIFT;
                        ring = &dev_priv->ring[VCS2];
                        if (vcs & GT_RENDER_USER_INTERRUPT)
                                notify_ring(dev, ring);
                        if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
-                               intel_execlists_handle_ctx_events(ring);
+                               intel_lrc_irq_handler(ring);
                } else
                        DRM_ERROR("The master control interrupt lied (GT1)!\n");
        }
                        if (vcs & GT_RENDER_USER_INTERRUPT)
                                notify_ring(dev, ring);
                        if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
-                               intel_execlists_handle_ctx_events(ring);
+                               intel_lrc_irq_handler(ring);
                } else
                        DRM_ERROR("The master control interrupt lied (GT3)!\n");
        }
@@@ -2753,18 -2755,18 +2759,18 @@@ static void gen8_disable_vblank(struct 
        spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  }
  
- static u32
- ring_last_seqno(struct intel_engine_cs *ring)
+ static struct drm_i915_gem_request *
+ ring_last_request(struct intel_engine_cs *ring)
  {
        return list_entry(ring->request_list.prev,
-                         struct drm_i915_gem_request, list)->seqno;
+                         struct drm_i915_gem_request, list);
  }
  
  static bool
- ring_idle(struct intel_engine_cs *ring, u32 seqno)
+ ring_idle(struct intel_engine_cs *ring)
  {
        return (list_empty(&ring->request_list) ||
-               i915_seqno_passed(seqno, ring_last_seqno(ring)));
+               i915_gem_request_completed(ring_last_request(ring), false));
  }
  
  static bool
@@@ -2984,7 -2986,7 +2990,7 @@@ static void i915_hangcheck_elapsed(unsi
                acthd = intel_ring_get_active_head(ring);
  
                if (ring->hangcheck.seqno == seqno) {
-                       if (ring_idle(ring, seqno)) {
+                       if (ring_idle(ring)) {
                                ring->hangcheck.action = HANGCHECK_IDLE;
  
                                if (waitqueue_active(&ring->irq_queue)) {
@@@ -3311,10 -3313,8 +3317,10 @@@ static void gen5_gt_irq_postinstall(str
        GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
  
        if (INTEL_INFO(dev)->gen >= 6) {
 -              pm_irqs |= dev_priv->pm_rps_events;
 -
 +              /*
 +               * RPS interrupts will get enabled/disabled on demand when RPS
 +               * itself is enabled/disabled.
 +               */
                if (HAS_VEBOX(dev))
                        pm_irqs |= PM_VEBOX_USER_INTERRUPT;
  
@@@ -3526,11 -3526,7 +3532,11 @@@ static void gen8_gt_irq_postinstall(str
        dev_priv->pm_irq_mask = 0xffffffff;
        GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
        GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
 -      GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, dev_priv->pm_rps_events);
 +      /*
 +       * RPS interrupts will get enabled/disabled on demand when RPS itself
 +       * is enabled/disabled.
 +       */
 +      GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, 0);
        GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
  }
  
@@@ -3619,7 -3615,7 +3625,7 @@@ static void vlv_display_irq_uninstall(s
  
        vlv_display_irq_reset(dev_priv);
  
 -      dev_priv->irq_mask = 0;
 +      dev_priv->irq_mask = ~0;
  }
  
  static void valleyview_irq_uninstall(struct drm_device *dev)
  #define _PORT(port, a, b) ((a) + (port)*((b)-(a)))
  #define _PIPE3(pipe, a, b, c) ((pipe) == PIPE_A ? (a) : \
                               (pipe) == PIPE_B ? (b) : (c))
+ #define _PORT3(port, a, b, c) ((port) == PORT_A ? (a) : \
+                              (port) == PORT_B ? (b) : (c))
  
 -#define _MASKED_BIT_ENABLE(a) (((a) << 16) | (a))
 -#define _MASKED_BIT_DISABLE(a) ((a) << 16)
 +#define _MASKED_FIELD(mask, value) ({                                    \
 +      if (__builtin_constant_p(mask))                                    \
 +              BUILD_BUG_ON_MSG(((mask) & 0xffff0000), "Incorrect mask"); \
 +      if (__builtin_constant_p(value))                                   \
 +              BUILD_BUG_ON_MSG((value) & 0xffff0000, "Incorrect value"); \
 +      if (__builtin_constant_p(mask) && __builtin_constant_p(value))     \
 +              BUILD_BUG_ON_MSG((value) & ~(mask),                        \
 +                               "Incorrect value for mask");              \
 +      (mask) << 16 | (value); })
 +#define _MASKED_BIT_ENABLE(a) ({ typeof(a) _a = (a); _MASKED_FIELD(_a, _a); })
 +#define _MASKED_BIT_DISABLE(a)        (_MASKED_FIELD((a), 0))
 +
 +
  
  /* PCI config space */
  
@@@ -85,7 -76,6 +87,7 @@@
  #define   I915_GC_RENDER_CLOCK_166_MHZ        (0 << 0)
  #define   I915_GC_RENDER_CLOCK_200_MHZ        (1 << 0)
  #define   I915_GC_RENDER_CLOCK_333_MHZ        (4 << 0)
 +#define GCDGMBUS 0xcc
  #define PCI_LBPC 0xf4 /* legacy/combination backlight modes, also called LBB */
  
  
  #define INSTR_SUBCLIENT_SHIFT   27
  #define INSTR_SUBCLIENT_MASK    0x18000000
  #define   INSTR_MEDIA_SUBCLIENT 0x2
+ #define INSTR_26_TO_24_MASK   0x7000000
+ #define   INSTR_26_TO_24_SHIFT        24
  
  /*
   * Memory interface instructions used by the kernel
  #define MI_BATCH_BUFFER_END   MI_INSTR(0x0a, 0)
  #define MI_SUSPEND_FLUSH      MI_INSTR(0x0b, 0)
  #define   MI_SUSPEND_FLUSH_EN (1<<0)
+ #define MI_SET_APPID          MI_INSTR(0x0e, 0)
  #define MI_OVERLAY_FLIP               MI_INSTR(0x11, 0)
  #define   MI_OVERLAY_CONTINUE (0x0<<21)
  #define   MI_OVERLAY_ON               (0x1<<21)
  #define   MI_SEMAPHORE_POLL           (1<<15)
  #define   MI_SEMAPHORE_SAD_GTE_SDD    (1<<12)
  #define MI_STORE_DWORD_IMM    MI_INSTR(0x20, 1)
- #define MI_STORE_DWORD_IMM_GEN8       MI_INSTR(0x20, 2)
- #define   MI_MEM_VIRTUAL      (1 << 22) /* 965+ only */
+ #define MI_STORE_DWORD_IMM_GEN4       MI_INSTR(0x20, 2)
+ #define   MI_MEM_VIRTUAL      (1 << 22) /* 945,g33,965 */
+ #define   MI_USE_GGTT         (1 << 22) /* g4x+ */
  #define MI_STORE_DWORD_INDEX  MI_INSTR(0x21, 1)
  #define   MI_STORE_DWORD_INDEX_SHIFT 2
  /* Official intel docs are somewhat sloppy concerning MI_LOAD_REGISTER_IMM:
  #define   PIPE_CONTROL_STORE_DATA_INDEX                       (1<<21)
  #define   PIPE_CONTROL_CS_STALL                               (1<<20)
  #define   PIPE_CONTROL_TLB_INVALIDATE                 (1<<18)
 +#define   PIPE_CONTROL_MEDIA_STATE_CLEAR              (1<<16)
  #define   PIPE_CONTROL_QW_WRITE                               (1<<14)
  #define   PIPE_CONTROL_POST_SYNC_OP_MASK                (3<<14)
  #define   PIPE_CONTROL_DEPTH_STALL                    (1<<13)
   */
  #define BCS_SWCTRL 0x22200
  
- #define HS_INVOCATION_COUNT 0x2300
- #define DS_INVOCATION_COUNT 0x2308
- #define IA_VERTICES_COUNT   0x2310
- #define IA_PRIMITIVES_COUNT 0x2318
- #define VS_INVOCATION_COUNT 0x2320
- #define GS_INVOCATION_COUNT 0x2328
- #define GS_PRIMITIVES_COUNT 0x2330
- #define CL_INVOCATION_COUNT 0x2338
- #define CL_PRIMITIVES_COUNT 0x2340
- #define PS_INVOCATION_COUNT 0x2348
- #define PS_DEPTH_COUNT      0x2350
+ #define GPGPU_THREADS_DISPATCHED        0x2290
+ #define HS_INVOCATION_COUNT             0x2300
+ #define DS_INVOCATION_COUNT             0x2308
+ #define IA_VERTICES_COUNT               0x2310
+ #define IA_PRIMITIVES_COUNT             0x2318
+ #define VS_INVOCATION_COUNT             0x2320
+ #define GS_INVOCATION_COUNT             0x2328
+ #define GS_PRIMITIVES_COUNT             0x2330
+ #define CL_INVOCATION_COUNT             0x2338
+ #define CL_PRIMITIVES_COUNT             0x2340
+ #define PS_INVOCATION_COUNT             0x2348
+ #define PS_DEPTH_COUNT                  0x2350
  
  /* There are the 4 64-bit counter registers, one for each stream output */
  #define GEN7_SO_NUM_PRIMS_WRITTEN(n) (0x5200 + (n) * 8)
@@@ -1129,7 -1123,6 +1136,7 @@@ enum punit_power_well 
  #define GEN6_VERSYNC  (RING_SYNC_1(VEBOX_RING_BASE))
  #define GEN6_VEVSYNC  (RING_SYNC_2(VEBOX_RING_BASE))
  #define GEN6_NOSYNC 0
 +#define RING_PSMI_CTL(base)   ((base)+0x50)
  #define RING_MAX_IDLE(base)   ((base)+0x54)
  #define RING_HWS_PGA(base)    ((base)+0x80)
  #define RING_HWS_PGA_GEN6(base)       ((base)+0x2080)
  #define   GEN6_WIZ_HASHING_8x8                                GEN6_WIZ_HASHING(0, 0)
  #define   GEN6_WIZ_HASHING_8x4                                GEN6_WIZ_HASHING(0, 1)
  #define   GEN6_WIZ_HASHING_16x4                               GEN6_WIZ_HASHING(1, 0)
 -#define   GEN6_WIZ_HASHING_MASK                               (GEN6_WIZ_HASHING(1, 1) << 16)
 +#define   GEN6_WIZ_HASHING_MASK                               GEN6_WIZ_HASHING(1, 1)
  #define   GEN6_TD_FOUR_ROW_DISPATCH_DISABLE           (1 << 5)
  
  #define GFX_MODE      0x02520
  #define   GEN6_BLITTER_FBC_NOTIFY                     (1<<3)
  
  #define GEN6_RC_SLEEP_PSMI_CONTROL    0x2050
 +#define   GEN6_PSMI_SLEEP_MSG_DISABLE (1 << 0)
  #define   GEN8_RC_SEMA_IDLE_MSG_DISABLE       (1 << 12)
  #define   GEN8_FF_DOP_CLOCK_GATE_DISABLE      (1<<10)
  
  #define I915_ISP_INTERRUPT                            (1<<22)
  #define I915_LPE_PIPE_B_INTERRUPT                     (1<<21)
  #define I915_LPE_PIPE_A_INTERRUPT                     (1<<20)
- #define I915_MIPIB_INTERRUPT                          (1<<19)
+ #define I915_MIPIC_INTERRUPT                          (1<<19)
  #define I915_MIPIA_INTERRUPT                          (1<<18)
  #define I915_PIPE_CONTROL_NOTIFY_INTERRUPT            (1<<18)
  #define I915_DISPLAY_PORT_INTERRUPT                   (1<<17)
  #define PIPESRC(trans) _TRANSCODER2(trans, _PIPEASRC)
  #define PIPE_MULT(trans) _TRANSCODER2(trans, _PIPE_MULT_A)
  
+ /* VLV eDP PSR registers */
+ #define _PSRCTLA                              (VLV_DISPLAY_BASE + 0x60090)
+ #define _PSRCTLB                              (VLV_DISPLAY_BASE + 0x61090)
+ #define  VLV_EDP_PSR_ENABLE                   (1<<0)
+ #define  VLV_EDP_PSR_RESET                    (1<<1)
+ #define  VLV_EDP_PSR_MODE_MASK                        (7<<2)
+ #define  VLV_EDP_PSR_MODE_HW_TIMER            (1<<3)
+ #define  VLV_EDP_PSR_MODE_SW_TIMER            (1<<2)
+ #define  VLV_EDP_PSR_SINGLE_FRAME_UPDATE      (1<<7)
+ #define  VLV_EDP_PSR_ACTIVE_ENTRY             (1<<8)
+ #define  VLV_EDP_PSR_SRC_TRANSMITTER_STATE    (1<<9)
+ #define  VLV_EDP_PSR_DBL_FRAME                        (1<<10)
+ #define  VLV_EDP_PSR_FRAME_COUNT_MASK         (0xff<<16)
+ #define  VLV_EDP_PSR_IDLE_FRAME_SHIFT         16
+ #define VLV_PSRCTL(pipe) _PIPE(pipe, _PSRCTLA, _PSRCTLB)
+ #define _VSCSDPA                      (VLV_DISPLAY_BASE + 0x600a0)
+ #define _VSCSDPB                      (VLV_DISPLAY_BASE + 0x610a0)
+ #define  VLV_EDP_PSR_SDP_FREQ_MASK    (3<<30)
+ #define  VLV_EDP_PSR_SDP_FREQ_ONCE    (1<<31)
+ #define  VLV_EDP_PSR_SDP_FREQ_EVFRAME (1<<30)
+ #define VLV_VSCSDP(pipe)      _PIPE(pipe, _VSCSDPA, _VSCSDPB)
+ #define _PSRSTATA                     (VLV_DISPLAY_BASE + 0x60094)
+ #define _PSRSTATB                     (VLV_DISPLAY_BASE + 0x61094)
+ #define  VLV_EDP_PSR_LAST_STATE_MASK  (7<<3)
+ #define  VLV_EDP_PSR_CURR_STATE_MASK  7
+ #define  VLV_EDP_PSR_DISABLED         (0<<0)
+ #define  VLV_EDP_PSR_INACTIVE         (1<<0)
+ #define  VLV_EDP_PSR_IN_TRANS_TO_ACTIVE       (2<<0)
+ #define  VLV_EDP_PSR_ACTIVE_NORFB_UP  (3<<0)
+ #define  VLV_EDP_PSR_ACTIVE_SF_UPDATE (4<<0)
+ #define  VLV_EDP_PSR_EXIT             (5<<0)
+ #define  VLV_EDP_PSR_IN_TRANS         (1<<7)
+ #define VLV_PSRSTAT(pipe) _PIPE(pipe, _PSRSTATA, _PSRSTATB)
  /* HSW+ eDP PSR registers */
  #define EDP_PSR_BASE(dev)                       (IS_HASWELL(dev) ? 0x64800 : 0x6f800)
  #define EDP_PSR_CTL(dev)                      (EDP_PSR_BASE(dev) + 0)
  #define   DC_BALANCE_RESET                    (1 << 25)
  #define PORT_DFT2_G4X         (dev_priv->info.display_mmio_offset + 0x61154)
  #define   DC_BALANCE_RESET_VLV                        (1 << 31)
- #define   PIPE_SCRAMBLE_RESET_MASK            (0x3 << 0)
+ #define   PIPE_SCRAMBLE_RESET_MASK            ((1 << 14) | (0x3 << 0))
+ #define   PIPE_C_SCRAMBLE_RESET                       (1 << 14) /* chv */
  #define   PIPE_B_SCRAMBLE_RESET                       (1 << 1)
  #define   PIPE_A_SCRAMBLE_RESET                       (1 << 0)
  
  #define GEN8_PMINTR_REDIRECT_TO_NON_DISP      (1<<31)
  #define VLV_PWRDWNUPCTL                               0xA294
  
+ #define VLV_CHICKEN_3                         (VLV_DISPLAY_BASE + 0x7040C)
+ #define  PIXEL_OVERLAP_CNT_MASK                       (3 << 30)
+ #define  PIXEL_OVERLAP_CNT_SHIFT              30
  #define GEN6_PMISR                            0x44020
  #define GEN6_PMIMR                            0x44024 /* rps_lock */
  #define GEN6_PMIIR                            0x44028
  #define PIPE_CSC_POSTOFF_ME(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_ME, _PIPE_B_CSC_POSTOFF_ME)
  #define PIPE_CSC_POSTOFF_LO(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_LO, _PIPE_B_CSC_POSTOFF_LO)
  
- /* VLV MIPI registers */
+ /* MIPI DSI registers */
+ #define _MIPI_PORT(port, a, c)        _PORT3(port, a, 0, c)   /* ports A and C only */
  
  #define _MIPIA_PORT_CTRL                      (VLV_DISPLAY_BASE + 0x61190)
- #define _MIPIB_PORT_CTRL                      (VLV_DISPLAY_BASE + 0x61700)
- #define MIPI_PORT_CTRL(tc)            _TRANSCODER(tc, _MIPIA_PORT_CTRL, \
-                                               _MIPIB_PORT_CTRL)
- #define  DPI_ENABLE                                   (1 << 31) /* A + B */
+ #define _MIPIC_PORT_CTRL                      (VLV_DISPLAY_BASE + 0x61700)
+ #define MIPI_PORT_CTRL(port)  _MIPI_PORT(port, _MIPIA_PORT_CTRL, _MIPIC_PORT_CTRL)
+ #define  DPI_ENABLE                                   (1 << 31) /* A + C */
  #define  MIPIA_MIPI4DPHY_DELAY_COUNT_SHIFT            27
  #define  MIPIA_MIPI4DPHY_DELAY_COUNT_MASK             (0xf << 27)
+ #define  DUAL_LINK_MODE_SHIFT                         26
  #define  DUAL_LINK_MODE_MASK                          (1 << 26)
  #define  DUAL_LINK_MODE_FRONT_BACK                    (0 << 26)
  #define  DUAL_LINK_MODE_PIXEL_ALTERNATIVE             (1 << 26)
- #define  DITHERING_ENABLE                             (1 << 25) /* A + B */
+ #define  DITHERING_ENABLE                             (1 << 25) /* A + C */
  #define  FLOPPED_HSTX                                 (1 << 23)
  #define  DE_INVERT                                    (1 << 19) /* XXX */
  #define  MIPIA_FLISDSI_DELAY_COUNT_SHIFT              18
  #define  MIPIA_FLISDSI_DELAY_COUNT_MASK                       (0xf << 18)
  #define  AFE_LATCHOUT                                 (1 << 17)
  #define  LP_OUTPUT_HOLD                                       (1 << 16)
- #define  MIPIB_FLISDSI_DELAY_COUNT_HIGH_SHIFT         15
- #define  MIPIB_FLISDSI_DELAY_COUNT_HIGH_MASK          (1 << 15)
- #define  MIPIB_MIPI4DPHY_DELAY_COUNT_SHIFT            11
- #define  MIPIB_MIPI4DPHY_DELAY_COUNT_MASK             (0xf << 11)
+ #define  MIPIC_FLISDSI_DELAY_COUNT_HIGH_SHIFT         15
+ #define  MIPIC_FLISDSI_DELAY_COUNT_HIGH_MASK          (1 << 15)
+ #define  MIPIC_MIPI4DPHY_DELAY_COUNT_SHIFT            11
+ #define  MIPIC_MIPI4DPHY_DELAY_COUNT_MASK             (0xf << 11)
  #define  CSB_SHIFT                                    9
  #define  CSB_MASK                                     (3 << 9)
  #define  CSB_20MHZ                                    (0 << 9)
  #define  BANDGAP_MASK                                 (1 << 8)
  #define  BANDGAP_PNW_CIRCUIT                          (0 << 8)
  #define  BANDGAP_LNC_CIRCUIT                          (1 << 8)
- #define  MIPIB_FLISDSI_DELAY_COUNT_LOW_SHIFT          5
- #define  MIPIB_FLISDSI_DELAY_COUNT_LOW_MASK           (7 << 5)
- #define  TEARING_EFFECT_DELAY                         (1 << 4) /* A + B */
- #define  TEARING_EFFECT_SHIFT                         2 /* A + B */
+ #define  MIPIC_FLISDSI_DELAY_COUNT_LOW_SHIFT          5
+ #define  MIPIC_FLISDSI_DELAY_COUNT_LOW_MASK           (7 << 5)
+ #define  TEARING_EFFECT_DELAY                         (1 << 4) /* A + C */
+ #define  TEARING_EFFECT_SHIFT                         2 /* A + C */
  #define  TEARING_EFFECT_MASK                          (3 << 2)
  #define  TEARING_EFFECT_OFF                           (0 << 2)
  #define  TEARING_EFFECT_DSI                           (1 << 2)
  #define  LANE_CONFIGURATION_DUAL_LINK_B                       (2 << 0)
  
  #define _MIPIA_TEARING_CTRL                   (VLV_DISPLAY_BASE + 0x61194)
- #define _MIPIB_TEARING_CTRL                   (VLV_DISPLAY_BASE + 0x61704)
- #define MIPI_TEARING_CTRL(tc)                 _TRANSCODER(tc, \
-                               _MIPIA_TEARING_CTRL, _MIPIB_TEARING_CTRL)
+ #define _MIPIC_TEARING_CTRL                   (VLV_DISPLAY_BASE + 0x61704)
+ #define MIPI_TEARING_CTRL(port)                       _MIPI_PORT(port, \
+                               _MIPIA_TEARING_CTRL, _MIPIC_TEARING_CTRL)
  #define  TEARING_EFFECT_DELAY_SHIFT                   0
  #define  TEARING_EFFECT_DELAY_MASK                    (0xffff << 0)
  
  /* MIPI DSI Controller and D-PHY registers */
  
  #define _MIPIA_DEVICE_READY           (dev_priv->mipi_mmio_base + 0xb000)
- #define _MIPIB_DEVICE_READY           (dev_priv->mipi_mmio_base + 0xb800)
- #define MIPI_DEVICE_READY(tc)         _TRANSCODER(tc, _MIPIA_DEVICE_READY, \
-                                               _MIPIB_DEVICE_READY)
+ #define _MIPIC_DEVICE_READY           (dev_priv->mipi_mmio_base + 0xb800)
+ #define MIPI_DEVICE_READY(port)               _MIPI_PORT(port, _MIPIA_DEVICE_READY, \
+                                               _MIPIC_DEVICE_READY)
  #define  BUS_POSSESSION                                       (1 << 3) /* set to give bus to receiver */
  #define  ULPS_STATE_MASK                              (3 << 1)
  #define  ULPS_STATE_ENTER                             (2 << 1)
  #define  DEVICE_READY                                 (1 << 0)
  
  #define _MIPIA_INTR_STAT              (dev_priv->mipi_mmio_base + 0xb004)
- #define _MIPIB_INTR_STAT              (dev_priv->mipi_mmio_base + 0xb804)
- #define MIPI_INTR_STAT(tc)            _TRANSCODER(tc, _MIPIA_INTR_STAT, \
-                                       _MIPIB_INTR_STAT)
+ #define _MIPIC_INTR_STAT              (dev_priv->mipi_mmio_base + 0xb804)
+ #define MIPI_INTR_STAT(port)          _MIPI_PORT(port, _MIPIA_INTR_STAT, \
+                                       _MIPIC_INTR_STAT)
  #define _MIPIA_INTR_EN                        (dev_priv->mipi_mmio_base + 0xb008)
- #define _MIPIB_INTR_EN                        (dev_priv->mipi_mmio_base + 0xb808)
- #define MIPI_INTR_EN(tc)              _TRANSCODER(tc, _MIPIA_INTR_EN, \
-                                       _MIPIB_INTR_EN)
+ #define _MIPIC_INTR_EN                        (dev_priv->mipi_mmio_base + 0xb808)
+ #define MIPI_INTR_EN(port)            _MIPI_PORT(port, _MIPIA_INTR_EN, \
+                                       _MIPIC_INTR_EN)
  #define  TEARING_EFFECT                                       (1 << 31)
  #define  SPL_PKT_SENT_INTERRUPT                               (1 << 30)
  #define  GEN_READ_DATA_AVAIL                          (1 << 29)
  #define  RXSOT_ERROR                                  (1 << 0)
  
  #define _MIPIA_DSI_FUNC_PRG           (dev_priv->mipi_mmio_base + 0xb00c)
- #define _MIPIB_DSI_FUNC_PRG           (dev_priv->mipi_mmio_base + 0xb80c)
- #define MIPI_DSI_FUNC_PRG(tc)         _TRANSCODER(tc, _MIPIA_DSI_FUNC_PRG, \
-                                               _MIPIB_DSI_FUNC_PRG)
+ #define _MIPIC_DSI_FUNC_PRG           (dev_priv->mipi_mmio_base + 0xb80c)
+ #define MIPI_DSI_FUNC_PRG(port)               _MIPI_PORT(port, _MIPIA_DSI_FUNC_PRG, \
+                                               _MIPIC_DSI_FUNC_PRG)
  #define  CMD_MODE_DATA_WIDTH_MASK                     (7 << 13)
  #define  CMD_MODE_NOT_SUPPORTED                               (0 << 13)
  #define  CMD_MODE_DATA_WIDTH_16_BIT                   (1 << 13)
  #define  DATA_LANES_PRG_REG_MASK                      (7 << 0)
  
  #define _MIPIA_HS_TX_TIMEOUT          (dev_priv->mipi_mmio_base + 0xb010)
- #define _MIPIB_HS_TX_TIMEOUT          (dev_priv->mipi_mmio_base + 0xb810)
- #define MIPI_HS_TX_TIMEOUT(tc)        _TRANSCODER(tc, _MIPIA_HS_TX_TIMEOUT, \
-                                       _MIPIB_HS_TX_TIMEOUT)
+ #define _MIPIC_HS_TX_TIMEOUT          (dev_priv->mipi_mmio_base + 0xb810)
+ #define MIPI_HS_TX_TIMEOUT(port)      _MIPI_PORT(port, _MIPIA_HS_TX_TIMEOUT, \
+                                       _MIPIC_HS_TX_TIMEOUT)
  #define  HIGH_SPEED_TX_TIMEOUT_COUNTER_MASK           0xffffff
  
  #define _MIPIA_LP_RX_TIMEOUT          (dev_priv->mipi_mmio_base + 0xb014)
- #define _MIPIB_LP_RX_TIMEOUT          (dev_priv->mipi_mmio_base + 0xb814)
- #define MIPI_LP_RX_TIMEOUT(tc)        _TRANSCODER(tc, _MIPIA_LP_RX_TIMEOUT, \
-                                       _MIPIB_LP_RX_TIMEOUT)
+ #define _MIPIC_LP_RX_TIMEOUT          (dev_priv->mipi_mmio_base + 0xb814)
+ #define MIPI_LP_RX_TIMEOUT(port)      _MIPI_PORT(port, _MIPIA_LP_RX_TIMEOUT, \
+                                       _MIPIC_LP_RX_TIMEOUT)
  #define  LOW_POWER_RX_TIMEOUT_COUNTER_MASK            0xffffff
  
  #define _MIPIA_TURN_AROUND_TIMEOUT    (dev_priv->mipi_mmio_base + 0xb018)
- #define _MIPIB_TURN_AROUND_TIMEOUT    (dev_priv->mipi_mmio_base + 0xb818)
- #define MIPI_TURN_AROUND_TIMEOUT(tc)  _TRANSCODER(tc, \
-                       _MIPIA_TURN_AROUND_TIMEOUT, _MIPIB_TURN_AROUND_TIMEOUT)
+ #define _MIPIC_TURN_AROUND_TIMEOUT    (dev_priv->mipi_mmio_base + 0xb818)
+ #define MIPI_TURN_AROUND_TIMEOUT(port)        _MIPI_PORT(port, \
+                       _MIPIA_TURN_AROUND_TIMEOUT, _MIPIC_TURN_AROUND_TIMEOUT)
  #define  TURN_AROUND_TIMEOUT_MASK                     0x3f
  
  #define _MIPIA_DEVICE_RESET_TIMER     (dev_priv->mipi_mmio_base + 0xb01c)
- #define _MIPIB_DEVICE_RESET_TIMER     (dev_priv->mipi_mmio_base + 0xb81c)
- #define MIPI_DEVICE_RESET_TIMER(tc)   _TRANSCODER(tc, \
-                       _MIPIA_DEVICE_RESET_TIMER, _MIPIB_DEVICE_RESET_TIMER)
+ #define _MIPIC_DEVICE_RESET_TIMER     (dev_priv->mipi_mmio_base + 0xb81c)
+ #define MIPI_DEVICE_RESET_TIMER(port) _MIPI_PORT(port, \
+                       _MIPIA_DEVICE_RESET_TIMER, _MIPIC_DEVICE_RESET_TIMER)
  #define  DEVICE_RESET_TIMER_MASK                      0xffff
  
  #define _MIPIA_DPI_RESOLUTION         (dev_priv->mipi_mmio_base + 0xb020)
- #define _MIPIB_DPI_RESOLUTION         (dev_priv->mipi_mmio_base + 0xb820)
- #define MIPI_DPI_RESOLUTION(tc)       _TRANSCODER(tc, _MIPIA_DPI_RESOLUTION, \
-                                       _MIPIB_DPI_RESOLUTION)
+ #define _MIPIC_DPI_RESOLUTION         (dev_priv->mipi_mmio_base + 0xb820)
+ #define MIPI_DPI_RESOLUTION(port)     _MIPI_PORT(port, _MIPIA_DPI_RESOLUTION, \
+                                       _MIPIC_DPI_RESOLUTION)
  #define  VERTICAL_ADDRESS_SHIFT                               16
  #define  VERTICAL_ADDRESS_MASK                                (0xffff << 16)
  #define  HORIZONTAL_ADDRESS_SHIFT                     0
  #define  HORIZONTAL_ADDRESS_MASK                      0xffff
  
  #define _MIPIA_DBI_FIFO_THROTTLE      (dev_priv->mipi_mmio_base + 0xb024)
- #define _MIPIB_DBI_FIFO_THROTTLE      (dev_priv->mipi_mmio_base + 0xb824)
- #define MIPI_DBI_FIFO_THROTTLE(tc)    _TRANSCODER(tc, \
-                       _MIPIA_DBI_FIFO_THROTTLE, _MIPIB_DBI_FIFO_THROTTLE)
+ #define _MIPIC_DBI_FIFO_THROTTLE      (dev_priv->mipi_mmio_base + 0xb824)
+ #define MIPI_DBI_FIFO_THROTTLE(port)  _MIPI_PORT(port, \
+                       _MIPIA_DBI_FIFO_THROTTLE, _MIPIC_DBI_FIFO_THROTTLE)
  #define  DBI_FIFO_EMPTY_HALF                          (0 << 0)
  #define  DBI_FIFO_EMPTY_QUARTER                               (1 << 0)
  #define  DBI_FIFO_EMPTY_7_LOCATIONS                   (2 << 0)
  
  /* regs below are bits 15:0 */
  #define _MIPIA_HSYNC_PADDING_COUNT    (dev_priv->mipi_mmio_base + 0xb028)
- #define _MIPIB_HSYNC_PADDING_COUNT    (dev_priv->mipi_mmio_base + 0xb828)
- #define MIPI_HSYNC_PADDING_COUNT(tc)  _TRANSCODER(tc, \
-                       _MIPIA_HSYNC_PADDING_COUNT, _MIPIB_HSYNC_PADDING_COUNT)
+ #define _MIPIC_HSYNC_PADDING_COUNT    (dev_priv->mipi_mmio_base + 0xb828)
+ #define MIPI_HSYNC_PADDING_COUNT(port)        _MIPI_PORT(port, \
+                       _MIPIA_HSYNC_PADDING_COUNT, _MIPIC_HSYNC_PADDING_COUNT)
  
  #define _MIPIA_HBP_COUNT              (dev_priv->mipi_mmio_base + 0xb02c)
- #define _MIPIB_HBP_COUNT              (dev_priv->mipi_mmio_base + 0xb82c)
- #define MIPI_HBP_COUNT(tc)            _TRANSCODER(tc, _MIPIA_HBP_COUNT, \
-                                       _MIPIB_HBP_COUNT)
+ #define _MIPIC_HBP_COUNT              (dev_priv->mipi_mmio_base + 0xb82c)
+ #define MIPI_HBP_COUNT(port)          _MIPI_PORT(port, _MIPIA_HBP_COUNT, \
+                                       _MIPIC_HBP_COUNT)
  
  #define _MIPIA_HFP_COUNT              (dev_priv->mipi_mmio_base + 0xb030)
- #define _MIPIB_HFP_COUNT              (dev_priv->mipi_mmio_base + 0xb830)
- #define MIPI_HFP_COUNT(tc)            _TRANSCODER(tc, _MIPIA_HFP_COUNT, \
-                                       _MIPIB_HFP_COUNT)
+ #define _MIPIC_HFP_COUNT              (dev_priv->mipi_mmio_base + 0xb830)
+ #define MIPI_HFP_COUNT(port)          _MIPI_PORT(port, _MIPIA_HFP_COUNT, \
+                                       _MIPIC_HFP_COUNT)
  
  #define _MIPIA_HACTIVE_AREA_COUNT     (dev_priv->mipi_mmio_base + 0xb034)
- #define _MIPIB_HACTIVE_AREA_COUNT     (dev_priv->mipi_mmio_base + 0xb834)
- #define MIPI_HACTIVE_AREA_COUNT(tc)   _TRANSCODER(tc, \
-                       _MIPIA_HACTIVE_AREA_COUNT, _MIPIB_HACTIVE_AREA_COUNT)
+ #define _MIPIC_HACTIVE_AREA_COUNT     (dev_priv->mipi_mmio_base + 0xb834)
+ #define MIPI_HACTIVE_AREA_COUNT(port) _MIPI_PORT(port, \
+                       _MIPIA_HACTIVE_AREA_COUNT, _MIPIC_HACTIVE_AREA_COUNT)
  
  #define _MIPIA_VSYNC_PADDING_COUNT    (dev_priv->mipi_mmio_base + 0xb038)
- #define _MIPIB_VSYNC_PADDING_COUNT    (dev_priv->mipi_mmio_base + 0xb838)
- #define MIPI_VSYNC_PADDING_COUNT(tc)  _TRANSCODER(tc, \
-                       _MIPIA_VSYNC_PADDING_COUNT, _MIPIB_VSYNC_PADDING_COUNT)
+ #define _MIPIC_VSYNC_PADDING_COUNT    (dev_priv->mipi_mmio_base + 0xb838)
+ #define MIPI_VSYNC_PADDING_COUNT(port)        _MIPI_PORT(port, \
+                       _MIPIA_VSYNC_PADDING_COUNT, _MIPIC_VSYNC_PADDING_COUNT)
  
  #define _MIPIA_VBP_COUNT              (dev_priv->mipi_mmio_base + 0xb03c)
- #define _MIPIB_VBP_COUNT              (dev_priv->mipi_mmio_base + 0xb83c)
- #define MIPI_VBP_COUNT(tc)            _TRANSCODER(tc, _MIPIA_VBP_COUNT, \
-                                       _MIPIB_VBP_COUNT)
+ #define _MIPIC_VBP_COUNT              (dev_priv->mipi_mmio_base + 0xb83c)
+ #define MIPI_VBP_COUNT(port)          _MIPI_PORT(port, _MIPIA_VBP_COUNT, \
+                                       _MIPIC_VBP_COUNT)
  
  #define _MIPIA_VFP_COUNT              (dev_priv->mipi_mmio_base + 0xb040)
- #define _MIPIB_VFP_COUNT              (dev_priv->mipi_mmio_base + 0xb840)
- #define MIPI_VFP_COUNT(tc)            _TRANSCODER(tc, _MIPIA_VFP_COUNT, \
-                                       _MIPIB_VFP_COUNT)
+ #define _MIPIC_VFP_COUNT              (dev_priv->mipi_mmio_base + 0xb840)
+ #define MIPI_VFP_COUNT(port)          _MIPI_PORT(port, _MIPIA_VFP_COUNT, \
+                                       _MIPIC_VFP_COUNT)
  
  #define _MIPIA_HIGH_LOW_SWITCH_COUNT  (dev_priv->mipi_mmio_base + 0xb044)
- #define _MIPIB_HIGH_LOW_SWITCH_COUNT  (dev_priv->mipi_mmio_base + 0xb844)
- #define MIPI_HIGH_LOW_SWITCH_COUNT(tc)        _TRANSCODER(tc, \
-               _MIPIA_HIGH_LOW_SWITCH_COUNT, _MIPIB_HIGH_LOW_SWITCH_COUNT)
+ #define _MIPIC_HIGH_LOW_SWITCH_COUNT  (dev_priv->mipi_mmio_base + 0xb844)
+ #define MIPI_HIGH_LOW_SWITCH_COUNT(port)      _MIPI_PORT(port,        \
+               _MIPIA_HIGH_LOW_SWITCH_COUNT, _MIPIC_HIGH_LOW_SWITCH_COUNT)
  
  /* regs above are bits 15:0 */
  
  #define _MIPIA_DPI_CONTROL            (dev_priv->mipi_mmio_base + 0xb048)
- #define _MIPIB_DPI_CONTROL            (dev_priv->mipi_mmio_base + 0xb848)
- #define MIPI_DPI_CONTROL(tc)          _TRANSCODER(tc, _MIPIA_DPI_CONTROL, \
-                                       _MIPIB_DPI_CONTROL)
+ #define _MIPIC_DPI_CONTROL            (dev_priv->mipi_mmio_base + 0xb848)
+ #define MIPI_DPI_CONTROL(port)                _MIPI_PORT(port, _MIPIA_DPI_CONTROL, \
+                                       _MIPIC_DPI_CONTROL)
  #define  DPI_LP_MODE                                  (1 << 6)
  #define  BACKLIGHT_OFF                                        (1 << 5)
  #define  BACKLIGHT_ON                                 (1 << 4)
  #define  SHUTDOWN                                     (1 << 0)
  
  #define _MIPIA_DPI_DATA                       (dev_priv->mipi_mmio_base + 0xb04c)
- #define _MIPIB_DPI_DATA                       (dev_priv->mipi_mmio_base + 0xb84c)
- #define MIPI_DPI_DATA(tc)             _TRANSCODER(tc, _MIPIA_DPI_DATA, \
-                                       _MIPIB_DPI_DATA)
+ #define _MIPIC_DPI_DATA                       (dev_priv->mipi_mmio_base + 0xb84c)
+ #define MIPI_DPI_DATA(port)           _MIPI_PORT(port, _MIPIA_DPI_DATA, \
+                                       _MIPIC_DPI_DATA)
  #define  COMMAND_BYTE_SHIFT                           0
  #define  COMMAND_BYTE_MASK                            (0x3f << 0)
  
  #define _MIPIA_INIT_COUNT             (dev_priv->mipi_mmio_base + 0xb050)
- #define _MIPIB_INIT_COUNT             (dev_priv->mipi_mmio_base + 0xb850)
- #define MIPI_INIT_COUNT(tc)           _TRANSCODER(tc, _MIPIA_INIT_COUNT, \
-                                       _MIPIB_INIT_COUNT)
+ #define _MIPIC_INIT_COUNT             (dev_priv->mipi_mmio_base + 0xb850)
+ #define MIPI_INIT_COUNT(port)         _MIPI_PORT(port, _MIPIA_INIT_COUNT, \
+                                       _MIPIC_INIT_COUNT)
  #define  MASTER_INIT_TIMER_SHIFT                      0
  #define  MASTER_INIT_TIMER_MASK                               (0xffff << 0)
  
  #define _MIPIA_MAX_RETURN_PKT_SIZE    (dev_priv->mipi_mmio_base + 0xb054)
- #define _MIPIB_MAX_RETURN_PKT_SIZE    (dev_priv->mipi_mmio_base + 0xb854)
- #define MIPI_MAX_RETURN_PKT_SIZE(tc)  _TRANSCODER(tc, \
-                       _MIPIA_MAX_RETURN_PKT_SIZE, _MIPIB_MAX_RETURN_PKT_SIZE)
+ #define _MIPIC_MAX_RETURN_PKT_SIZE    (dev_priv->mipi_mmio_base + 0xb854)
+ #define MIPI_MAX_RETURN_PKT_SIZE(port)        _MIPI_PORT(port, \
+                       _MIPIA_MAX_RETURN_PKT_SIZE, _MIPIC_MAX_RETURN_PKT_SIZE)
  #define  MAX_RETURN_PKT_SIZE_SHIFT                    0
  #define  MAX_RETURN_PKT_SIZE_MASK                     (0x3ff << 0)
  
  #define _MIPIA_VIDEO_MODE_FORMAT      (dev_priv->mipi_mmio_base + 0xb058)
- #define _MIPIB_VIDEO_MODE_FORMAT      (dev_priv->mipi_mmio_base + 0xb858)
- #define MIPI_VIDEO_MODE_FORMAT(tc)    _TRANSCODER(tc, \
-                       _MIPIA_VIDEO_MODE_FORMAT, _MIPIB_VIDEO_MODE_FORMAT)
+ #define _MIPIC_VIDEO_MODE_FORMAT      (dev_priv->mipi_mmio_base + 0xb858)
+ #define MIPI_VIDEO_MODE_FORMAT(port)  _MIPI_PORT(port, \
+                       _MIPIA_VIDEO_MODE_FORMAT, _MIPIC_VIDEO_MODE_FORMAT)
  #define  RANDOM_DPI_DISPLAY_RESOLUTION                        (1 << 4)
  #define  DISABLE_VIDEO_BTA                            (1 << 3)
  #define  IP_TG_CONFIG                                 (1 << 2)
  #define  VIDEO_MODE_BURST                             (3 << 0)
  
  #define _MIPIA_EOT_DISABLE            (dev_priv->mipi_mmio_base + 0xb05c)
- #define _MIPIB_EOT_DISABLE            (dev_priv->mipi_mmio_base + 0xb85c)
- #define MIPI_EOT_DISABLE(tc)          _TRANSCODER(tc, _MIPIA_EOT_DISABLE, \
-                                       _MIPIB_EOT_DISABLE)
+ #define _MIPIC_EOT_DISABLE            (dev_priv->mipi_mmio_base + 0xb85c)
+ #define MIPI_EOT_DISABLE(port)                _MIPI_PORT(port, _MIPIA_EOT_DISABLE, \
+                                       _MIPIC_EOT_DISABLE)
  #define  LP_RX_TIMEOUT_ERROR_RECOVERY_DISABLE         (1 << 7)
  #define  HS_RX_TIMEOUT_ERROR_RECOVERY_DISABLE         (1 << 6)
  #define  LOW_CONTENTION_RECOVERY_DISABLE              (1 << 5)
  #define  EOT_DISABLE                                  (1 << 0)
  
  #define _MIPIA_LP_BYTECLK             (dev_priv->mipi_mmio_base + 0xb060)
- #define _MIPIB_LP_BYTECLK             (dev_priv->mipi_mmio_base + 0xb860)
- #define MIPI_LP_BYTECLK(tc)           _TRANSCODER(tc, _MIPIA_LP_BYTECLK, \
-                                       _MIPIB_LP_BYTECLK)
+ #define _MIPIC_LP_BYTECLK             (dev_priv->mipi_mmio_base + 0xb860)
+ #define MIPI_LP_BYTECLK(port)         _MIPI_PORT(port, _MIPIA_LP_BYTECLK, \
+                                       _MIPIC_LP_BYTECLK)
  #define  LP_BYTECLK_SHIFT                             0
  #define  LP_BYTECLK_MASK                              (0xffff << 0)
  
  /* bits 31:0 */
  #define _MIPIA_LP_GEN_DATA            (dev_priv->mipi_mmio_base + 0xb064)
- #define _MIPIB_LP_GEN_DATA            (dev_priv->mipi_mmio_base + 0xb864)
- #define MIPI_LP_GEN_DATA(tc)          _TRANSCODER(tc, _MIPIA_LP_GEN_DATA, \
-                                       _MIPIB_LP_GEN_DATA)
+ #define _MIPIC_LP_GEN_DATA            (dev_priv->mipi_mmio_base + 0xb864)
+ #define MIPI_LP_GEN_DATA(port)                _MIPI_PORT(port, _MIPIA_LP_GEN_DATA, \
+                                       _MIPIC_LP_GEN_DATA)
  
  /* bits 31:0 */
  #define _MIPIA_HS_GEN_DATA            (dev_priv->mipi_mmio_base + 0xb068)
- #define _MIPIB_HS_GEN_DATA            (dev_priv->mipi_mmio_base + 0xb868)
- #define MIPI_HS_GEN_DATA(tc)          _TRANSCODER(tc, _MIPIA_HS_GEN_DATA, \
-                                       _MIPIB_HS_GEN_DATA)
+ #define _MIPIC_HS_GEN_DATA            (dev_priv->mipi_mmio_base + 0xb868)
+ #define MIPI_HS_GEN_DATA(port)                _MIPI_PORT(port, _MIPIA_HS_GEN_DATA, \
+                                       _MIPIC_HS_GEN_DATA)
  
  #define _MIPIA_LP_GEN_CTRL            (dev_priv->mipi_mmio_base + 0xb06c)
- #define _MIPIB_LP_GEN_CTRL            (dev_priv->mipi_mmio_base + 0xb86c)
- #define MIPI_LP_GEN_CTRL(tc)          _TRANSCODER(tc, _MIPIA_LP_GEN_CTRL, \
-                                       _MIPIB_LP_GEN_CTRL)
+ #define _MIPIC_LP_GEN_CTRL            (dev_priv->mipi_mmio_base + 0xb86c)
+ #define MIPI_LP_GEN_CTRL(port)                _MIPI_PORT(port, _MIPIA_LP_GEN_CTRL, \
+                                       _MIPIC_LP_GEN_CTRL)
  #define _MIPIA_HS_GEN_CTRL            (dev_priv->mipi_mmio_base + 0xb070)
- #define _MIPIB_HS_GEN_CTRL            (dev_priv->mipi_mmio_base + 0xb870)
- #define MIPI_HS_GEN_CTRL(tc)          _TRANSCODER(tc, _MIPIA_HS_GEN_CTRL, \
-                                       _MIPIB_HS_GEN_CTRL)
+ #define _MIPIC_HS_GEN_CTRL            (dev_priv->mipi_mmio_base + 0xb870)
+ #define MIPI_HS_GEN_CTRL(port)                _MIPI_PORT(port, _MIPIA_HS_GEN_CTRL, \
+                                       _MIPIC_HS_GEN_CTRL)
  #define  LONG_PACKET_WORD_COUNT_SHIFT                 8
  #define  LONG_PACKET_WORD_COUNT_MASK                  (0xffff << 8)
  #define  SHORT_PACKET_PARAM_SHIFT                     8
  /* data type values, see include/video/mipi_display.h */
  
  #define _MIPIA_GEN_FIFO_STAT          (dev_priv->mipi_mmio_base + 0xb074)
- #define _MIPIB_GEN_FIFO_STAT          (dev_priv->mipi_mmio_base + 0xb874)
- #define MIPI_GEN_FIFO_STAT(tc)        _TRANSCODER(tc, _MIPIA_GEN_FIFO_STAT, \
-                                       _MIPIB_GEN_FIFO_STAT)
+ #define _MIPIC_GEN_FIFO_STAT          (dev_priv->mipi_mmio_base + 0xb874)
+ #define MIPI_GEN_FIFO_STAT(port)      _MIPI_PORT(port, _MIPIA_GEN_FIFO_STAT, \
+                                       _MIPIC_GEN_FIFO_STAT)
  #define  DPI_FIFO_EMPTY                                       (1 << 28)
  #define  DBI_FIFO_EMPTY                                       (1 << 27)
  #define  LP_CTRL_FIFO_EMPTY                           (1 << 26)
  #define  HS_DATA_FIFO_FULL                            (1 << 0)
  
  #define _MIPIA_HS_LS_DBI_ENABLE               (dev_priv->mipi_mmio_base + 0xb078)
- #define _MIPIB_HS_LS_DBI_ENABLE               (dev_priv->mipi_mmio_base + 0xb878)
- #define MIPI_HS_LP_DBI_ENABLE(tc)     _TRANSCODER(tc, \
-                       _MIPIA_HS_LS_DBI_ENABLE, _MIPIB_HS_LS_DBI_ENABLE)
+ #define _MIPIC_HS_LS_DBI_ENABLE               (dev_priv->mipi_mmio_base + 0xb878)
+ #define MIPI_HS_LP_DBI_ENABLE(port)   _MIPI_PORT(port, \
+                       _MIPIA_HS_LS_DBI_ENABLE, _MIPIC_HS_LS_DBI_ENABLE)
  #define  DBI_HS_LP_MODE_MASK                          (1 << 0)
  #define  DBI_LP_MODE                                  (1 << 0)
  #define  DBI_HS_MODE                                  (0 << 0)
  
  #define _MIPIA_DPHY_PARAM             (dev_priv->mipi_mmio_base + 0xb080)
- #define _MIPIB_DPHY_PARAM             (dev_priv->mipi_mmio_base + 0xb880)
- #define MIPI_DPHY_PARAM(tc)           _TRANSCODER(tc, _MIPIA_DPHY_PARAM, \
-                                       _MIPIB_DPHY_PARAM)
+ #define _MIPIC_DPHY_PARAM             (dev_priv->mipi_mmio_base + 0xb880)
+ #define MIPI_DPHY_PARAM(port)         _MIPI_PORT(port, _MIPIA_DPHY_PARAM, \
+                                       _MIPIC_DPHY_PARAM)
  #define  EXIT_ZERO_COUNT_SHIFT                                24
  #define  EXIT_ZERO_COUNT_MASK                         (0x3f << 24)
  #define  TRAIL_COUNT_SHIFT                            16
  
  /* bits 31:0 */
  #define _MIPIA_DBI_BW_CTRL            (dev_priv->mipi_mmio_base + 0xb084)
- #define _MIPIB_DBI_BW_CTRL            (dev_priv->mipi_mmio_base + 0xb884)
- #define MIPI_DBI_BW_CTRL(tc)          _TRANSCODER(tc, _MIPIA_DBI_BW_CTRL, \
-                                       _MIPIB_DBI_BW_CTRL)
+ #define _MIPIC_DBI_BW_CTRL            (dev_priv->mipi_mmio_base + 0xb884)
+ #define MIPI_DBI_BW_CTRL(port)                _MIPI_PORT(port, _MIPIA_DBI_BW_CTRL, \
+                                       _MIPIC_DBI_BW_CTRL)
  
  #define _MIPIA_CLK_LANE_SWITCH_TIME_CNT               (dev_priv->mipi_mmio_base \
                                                        + 0xb088)
- #define _MIPIB_CLK_LANE_SWITCH_TIME_CNT               (dev_priv->mipi_mmio_base \
+ #define _MIPIC_CLK_LANE_SWITCH_TIME_CNT               (dev_priv->mipi_mmio_base \
                                                        + 0xb888)
- #define MIPI_CLK_LANE_SWITCH_TIME_CNT(tc)     _TRANSCODER(tc, \
-       _MIPIA_CLK_LANE_SWITCH_TIME_CNT, _MIPIB_CLK_LANE_SWITCH_TIME_CNT)
+ #define MIPI_CLK_LANE_SWITCH_TIME_CNT(port)   _MIPI_PORT(port, \
+       _MIPIA_CLK_LANE_SWITCH_TIME_CNT, _MIPIC_CLK_LANE_SWITCH_TIME_CNT)
  #define  LP_HS_SSW_CNT_SHIFT                          16
  #define  LP_HS_SSW_CNT_MASK                           (0xffff << 16)
  #define  HS_LP_PWR_SW_CNT_SHIFT                               0
  #define  HS_LP_PWR_SW_CNT_MASK                                (0xffff << 0)
  
  #define _MIPIA_STOP_STATE_STALL               (dev_priv->mipi_mmio_base + 0xb08c)
- #define _MIPIB_STOP_STATE_STALL               (dev_priv->mipi_mmio_base + 0xb88c)
- #define MIPI_STOP_STATE_STALL(tc)     _TRANSCODER(tc, \
-                       _MIPIA_STOP_STATE_STALL, _MIPIB_STOP_STATE_STALL)
+ #define _MIPIC_STOP_STATE_STALL               (dev_priv->mipi_mmio_base + 0xb88c)
+ #define MIPI_STOP_STATE_STALL(port)   _MIPI_PORT(port, \
+                       _MIPIA_STOP_STATE_STALL, _MIPIC_STOP_STATE_STALL)
  #define  STOP_STATE_STALL_COUNTER_SHIFT                       0
  #define  STOP_STATE_STALL_COUNTER_MASK                        (0xff << 0)
  
  #define _MIPIA_INTR_STAT_REG_1                (dev_priv->mipi_mmio_base + 0xb090)
- #define _MIPIB_INTR_STAT_REG_1                (dev_priv->mipi_mmio_base + 0xb890)
- #define MIPI_INTR_STAT_REG_1(tc)      _TRANSCODER(tc, \
-                               _MIPIA_INTR_STAT_REG_1, _MIPIB_INTR_STAT_REG_1)
+ #define _MIPIC_INTR_STAT_REG_1                (dev_priv->mipi_mmio_base + 0xb890)
+ #define MIPI_INTR_STAT_REG_1(port)    _MIPI_PORT(port, \
+                               _MIPIA_INTR_STAT_REG_1, _MIPIC_INTR_STAT_REG_1)
  #define _MIPIA_INTR_EN_REG_1          (dev_priv->mipi_mmio_base + 0xb094)
- #define _MIPIB_INTR_EN_REG_1          (dev_priv->mipi_mmio_base + 0xb894)
- #define MIPI_INTR_EN_REG_1(tc)        _TRANSCODER(tc, _MIPIA_INTR_EN_REG_1, \
-                                       _MIPIB_INTR_EN_REG_1)
+ #define _MIPIC_INTR_EN_REG_1          (dev_priv->mipi_mmio_base + 0xb894)
+ #define MIPI_INTR_EN_REG_1(port)      _MIPI_PORT(port, _MIPIA_INTR_EN_REG_1, \
+                                       _MIPIC_INTR_EN_REG_1)
  #define  RX_CONTENTION_DETECTED                               (1 << 0)
  
  /* XXX: only pipe A ?!? */
  /* MIPI adapter registers */
  
  #define _MIPIA_CTRL                   (dev_priv->mipi_mmio_base + 0xb104)
- #define _MIPIB_CTRL                   (dev_priv->mipi_mmio_base + 0xb904)
- #define MIPI_CTRL(tc)                 _TRANSCODER(tc, _MIPIA_CTRL, \
-                                       _MIPIB_CTRL)
+ #define _MIPIC_CTRL                   (dev_priv->mipi_mmio_base + 0xb904)
+ #define MIPI_CTRL(port)                       _MIPI_PORT(port, _MIPIA_CTRL, \
+                                       _MIPIC_CTRL)
  #define  ESCAPE_CLOCK_DIVIDER_SHIFT                   5 /* A only */
  #define  ESCAPE_CLOCK_DIVIDER_MASK                    (3 << 5)
  #define  ESCAPE_CLOCK_DIVIDER_1                               (0 << 5)
  #define  RGB_FLIP_TO_BGR                              (1 << 2)
  
  #define _MIPIA_DATA_ADDRESS           (dev_priv->mipi_mmio_base + 0xb108)
- #define _MIPIB_DATA_ADDRESS           (dev_priv->mipi_mmio_base + 0xb908)
- #define MIPI_DATA_ADDRESS(tc)         _TRANSCODER(tc, _MIPIA_DATA_ADDRESS, \
-                                       _MIPIB_DATA_ADDRESS)
+ #define _MIPIC_DATA_ADDRESS           (dev_priv->mipi_mmio_base + 0xb908)
+ #define MIPI_DATA_ADDRESS(port)               _MIPI_PORT(port, _MIPIA_DATA_ADDRESS, \
+                                       _MIPIC_DATA_ADDRESS)
  #define  DATA_MEM_ADDRESS_SHIFT                               5
  #define  DATA_MEM_ADDRESS_MASK                                (0x7ffffff << 5)
  #define  DATA_VALID                                   (1 << 0)
  
  #define _MIPIA_DATA_LENGTH            (dev_priv->mipi_mmio_base + 0xb10c)
- #define _MIPIB_DATA_LENGTH            (dev_priv->mipi_mmio_base + 0xb90c)
- #define MIPI_DATA_LENGTH(tc)          _TRANSCODER(tc, _MIPIA_DATA_LENGTH, \
-                                       _MIPIB_DATA_LENGTH)
+ #define _MIPIC_DATA_LENGTH            (dev_priv->mipi_mmio_base + 0xb90c)
+ #define MIPI_DATA_LENGTH(port)                _MIPI_PORT(port, _MIPIA_DATA_LENGTH, \
+                                       _MIPIC_DATA_LENGTH)
  #define  DATA_LENGTH_SHIFT                            0
  #define  DATA_LENGTH_MASK                             (0xfffff << 0)
  
  #define _MIPIA_COMMAND_ADDRESS                (dev_priv->mipi_mmio_base + 0xb110)
- #define _MIPIB_COMMAND_ADDRESS                (dev_priv->mipi_mmio_base + 0xb910)
- #define MIPI_COMMAND_ADDRESS(tc)      _TRANSCODER(tc, \
-                               _MIPIA_COMMAND_ADDRESS, _MIPIB_COMMAND_ADDRESS)
+ #define _MIPIC_COMMAND_ADDRESS                (dev_priv->mipi_mmio_base + 0xb910)
+ #define MIPI_COMMAND_ADDRESS(port)    _MIPI_PORT(port, \
+                               _MIPIA_COMMAND_ADDRESS, _MIPIC_COMMAND_ADDRESS)
  #define  COMMAND_MEM_ADDRESS_SHIFT                    5
  #define  COMMAND_MEM_ADDRESS_MASK                     (0x7ffffff << 5)
  #define  AUTO_PWG_ENABLE                              (1 << 2)
  #define  COMMAND_VALID                                        (1 << 0)
  
  #define _MIPIA_COMMAND_LENGTH         (dev_priv->mipi_mmio_base + 0xb114)
- #define _MIPIB_COMMAND_LENGTH         (dev_priv->mipi_mmio_base + 0xb914)
- #define MIPI_COMMAND_LENGTH(tc)       _TRANSCODER(tc, _MIPIA_COMMAND_LENGTH, \
-                                       _MIPIB_COMMAND_LENGTH)
+ #define _MIPIC_COMMAND_LENGTH         (dev_priv->mipi_mmio_base + 0xb914)
+ #define MIPI_COMMAND_LENGTH(port)     _MIPI_PORT(port, _MIPIA_COMMAND_LENGTH, \
+                                       _MIPIC_COMMAND_LENGTH)
  #define  COMMAND_LENGTH_SHIFT(n)                      (8 * (n)) /* n: 0...3 */
  #define  COMMAND_LENGTH_MASK(n)                               (0xff << (8 * (n)))
  
  #define _MIPIA_READ_DATA_RETURN0      (dev_priv->mipi_mmio_base + 0xb118)
- #define _MIPIB_READ_DATA_RETURN0      (dev_priv->mipi_mmio_base + 0xb918)
- #define MIPI_READ_DATA_RETURN(tc, n) \
-       (_TRANSCODER(tc, _MIPIA_READ_DATA_RETURN0, _MIPIB_READ_DATA_RETURN0) \
+ #define _MIPIC_READ_DATA_RETURN0      (dev_priv->mipi_mmio_base + 0xb918)
+ #define MIPI_READ_DATA_RETURN(port, n) \
+       (_MIPI_PORT(port, _MIPIA_READ_DATA_RETURN0, _MIPIC_READ_DATA_RETURN0) \
                                        + 4 * (n)) /* n: 0...7 */
  
  #define _MIPIA_READ_DATA_VALID                (dev_priv->mipi_mmio_base + 0xb138)
- #define _MIPIB_READ_DATA_VALID                (dev_priv->mipi_mmio_base + 0xb938)
- #define MIPI_READ_DATA_VALID(tc)      _TRANSCODER(tc, \
-                               _MIPIA_READ_DATA_VALID, _MIPIB_READ_DATA_VALID)
+ #define _MIPIC_READ_DATA_VALID                (dev_priv->mipi_mmio_base + 0xb938)
+ #define MIPI_READ_DATA_VALID(port)    _MIPI_PORT(port, \
+                               _MIPIA_READ_DATA_VALID, _MIPIC_READ_DATA_VALID)
  #define  READ_DATA_VALID(n)                           (1 << (n))
  
  /* For UMS only (deprecated): */
@@@ -264,7 -264,7 +264,7 @@@ static void i915_restore_display(struc
        }
  
        /* only restore FBC info on the platform that supports FBC*/
-       intel_disable_fbc(dev);
+       intel_fbc_disable(dev);
  
        /* restore FBC interval */
        if (HAS_FBC(dev) && INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev))
@@@ -303,10 -303,6 +303,10 @@@ int i915_save_state(struct drm_device *
                }
        }
  
 +      if (IS_GEN4(dev))
 +              pci_read_config_word(dev->pdev, GCDGMBUS,
 +                                   &dev_priv->regfile.saveGCDGMBUS);
 +
        /* Cache mode state */
        if (INTEL_INFO(dev)->gen < 7)
                dev_priv->regfile.saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
@@@ -335,10 -331,6 +335,10 @@@ int i915_restore_state(struct drm_devic
        mutex_lock(&dev->struct_mutex);
  
        i915_gem_restore_fences(dev);
 +
 +      if (IS_GEN4(dev))
 +              pci_write_config_word(dev->pdev, GCDGMBUS,
 +                                    dev_priv->regfile.saveGCDGMBUS);
        i915_restore_display(dev);
  
        if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
@@@ -1024,7 -1024,7 +1024,7 @@@ void assert_pll(struct drm_i915_privat
        reg = DPLL(pipe);
        val = I915_READ(reg);
        cur_state = !!(val & DPLL_VCO_ENABLE);
-       WARN(cur_state != state,
+       I915_STATE_WARN(cur_state != state,
             "PLL state assertion failure (expected %s, current %s)\n",
             state_string(state), state_string(cur_state));
  }
@@@ -1040,7 -1040,7 +1040,7 @@@ static void assert_dsi_pll(struct drm_i
        mutex_unlock(&dev_priv->dpio_lock);
  
        cur_state = val & DSI_PLL_VCO_EN;
-       WARN(cur_state != state,
+       I915_STATE_WARN(cur_state != state,
             "DSI PLL state assertion failure (expected %s, current %s)\n",
             state_string(state), state_string(cur_state));
  }
@@@ -1071,7 -1071,7 +1071,7 @@@ void assert_shared_dpll(struct drm_i915
                return;
  
        cur_state = pll->get_hw_state(dev_priv, pll, &hw_state);
-       WARN(cur_state != state,
+       I915_STATE_WARN(cur_state != state,
             "%s assertion failure (expected %s, current %s)\n",
             pll->name, state_string(state), state_string(cur_state));
  }
@@@ -1095,7 -1095,7 +1095,7 @@@ static void assert_fdi_tx(struct drm_i9
                val = I915_READ(reg);
                cur_state = !!(val & FDI_TX_ENABLE);
        }
-       WARN(cur_state != state,
+       I915_STATE_WARN(cur_state != state,
             "FDI TX state assertion failure (expected %s, current %s)\n",
             state_string(state), state_string(cur_state));
  }
@@@ -1112,7 -1112,7 +1112,7 @@@ static void assert_fdi_rx(struct drm_i9
        reg = FDI_RX_CTL(pipe);
        val = I915_READ(reg);
        cur_state = !!(val & FDI_RX_ENABLE);
-       WARN(cur_state != state,
+       I915_STATE_WARN(cur_state != state,
             "FDI RX state assertion failure (expected %s, current %s)\n",
             state_string(state), state_string(cur_state));
  }
@@@ -1135,7 -1135,7 +1135,7 @@@ static void assert_fdi_tx_pll_enabled(s
  
        reg = FDI_TX_CTL(pipe);
        val = I915_READ(reg);
-       WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
+       I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
  }
  
  void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
        reg = FDI_RX_CTL(pipe);
        val = I915_READ(reg);
        cur_state = !!(val & FDI_RX_PLL_ENABLE);
-       WARN(cur_state != state,
+       I915_STATE_WARN(cur_state != state,
             "FDI RX PLL assertion failure (expected %s, current %s)\n",
             state_string(state), state_string(cur_state));
  }
@@@ -1190,7 -1190,7 +1190,7 @@@ void assert_panel_unlocked(struct drm_i
            ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
                locked = false;
  
-       WARN(panel_pipe == pipe && locked,
+       I915_STATE_WARN(panel_pipe == pipe && locked,
             "panel assertion failure, pipe %c regs locked\n",
             pipe_name(pipe));
  }
@@@ -1206,7 -1206,7 +1206,7 @@@ static void assert_cursor(struct drm_i9
        else
                cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
  
-       WARN(cur_state != state,
+       I915_STATE_WARN(cur_state != state,
             "cursor on pipe %c assertion failure (expected %s, current %s)\n",
             pipe_name(pipe), state_string(state), state_string(cur_state));
  }
@@@ -1236,7 -1236,7 +1236,7 @@@ void assert_pipe(struct drm_i915_privat
                cur_state = !!(val & PIPECONF_ENABLE);
        }
  
-       WARN(cur_state != state,
+       I915_STATE_WARN(cur_state != state,
             "pipe %c assertion failure (expected %s, current %s)\n",
             pipe_name(pipe), state_string(state), state_string(cur_state));
  }
@@@ -1251,7 -1251,7 +1251,7 @@@ static void assert_plane(struct drm_i91
        reg = DSPCNTR(plane);
        val = I915_READ(reg);
        cur_state = !!(val & DISPLAY_PLANE_ENABLE);
-       WARN(cur_state != state,
+       I915_STATE_WARN(cur_state != state,
             "plane %c assertion failure (expected %s, current %s)\n",
             plane_name(plane), state_string(state), state_string(cur_state));
  }
@@@ -1271,7 -1271,7 +1271,7 @@@ static void assert_planes_disabled(stru
        if (INTEL_INFO(dev)->gen >= 4) {
                reg = DSPCNTR(pipe);
                val = I915_READ(reg);
-               WARN(val & DISPLAY_PLANE_ENABLE,
+               I915_STATE_WARN(val & DISPLAY_PLANE_ENABLE,
                     "plane %c assertion failure, should be disabled but not\n",
                     plane_name(pipe));
                return;
                val = I915_READ(reg);
                cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
                        DISPPLANE_SEL_PIPE_SHIFT;
-               WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
+               I915_STATE_WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
                     "plane %c assertion failure, should be off on pipe %c but is still active\n",
                     plane_name(i), pipe_name(pipe));
        }
@@@ -1299,7 -1299,7 +1299,7 @@@ static void assert_sprites_disabled(str
        if (INTEL_INFO(dev)->gen >= 9) {
                for_each_sprite(pipe, sprite) {
                        val = I915_READ(PLANE_CTL(pipe, sprite));
-                       WARN(val & PLANE_CTL_ENABLE,
+                       I915_STATE_WARN(val & PLANE_CTL_ENABLE,
                             "plane %d assertion failure, should be off on pipe %c but is still active\n",
                             sprite, pipe_name(pipe));
                }
                for_each_sprite(pipe, sprite) {
                        reg = SPCNTR(pipe, sprite);
                        val = I915_READ(reg);
-                       WARN(val & SP_ENABLE,
+                       I915_STATE_WARN(val & SP_ENABLE,
                             "sprite %c assertion failure, should be off on pipe %c but is still active\n",
                             sprite_name(pipe, sprite), pipe_name(pipe));
                }
        } else if (INTEL_INFO(dev)->gen >= 7) {
                reg = SPRCTL(pipe);
                val = I915_READ(reg);
-               WARN(val & SPRITE_ENABLE,
+               I915_STATE_WARN(val & SPRITE_ENABLE,
                     "sprite %c assertion failure, should be off on pipe %c but is still active\n",
                     plane_name(pipe), pipe_name(pipe));
        } else if (INTEL_INFO(dev)->gen >= 5) {
                reg = DVSCNTR(pipe);
                val = I915_READ(reg);
-               WARN(val & DVS_ENABLE,
+               I915_STATE_WARN(val & DVS_ENABLE,
                     "sprite %c assertion failure, should be off on pipe %c but is still active\n",
                     plane_name(pipe), pipe_name(pipe));
        }
  
  static void assert_vblank_disabled(struct drm_crtc *crtc)
  {
-       if (WARN_ON(drm_crtc_vblank_get(crtc) == 0))
+       if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
                drm_crtc_vblank_put(crtc);
  }
  
@@@ -1337,12 -1337,12 +1337,12 @@@ static void ibx_assert_pch_refclk_enabl
        u32 val;
        bool enabled;
  
-       WARN_ON(!(HAS_PCH_IBX(dev_priv->dev) || HAS_PCH_CPT(dev_priv->dev)));
+       I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv->dev) || HAS_PCH_CPT(dev_priv->dev)));
  
        val = I915_READ(PCH_DREF_CONTROL);
        enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
                            DREF_SUPERSPREAD_SOURCE_MASK));
-       WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
+       I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
  }
  
  static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
        reg = PCH_TRANSCONF(pipe);
        val = I915_READ(reg);
        enabled = !!(val & TRANS_ENABLE);
-       WARN(enabled,
+       I915_STATE_WARN(enabled,
             "transcoder assertion failed, should be off on pipe %c but is still active\n",
             pipe_name(pipe));
  }
@@@ -1435,11 -1435,11 +1435,11 @@@ static void assert_pch_dp_disabled(stru
                                   enum pipe pipe, int reg, u32 port_sel)
  {
        u32 val = I915_READ(reg);
-       WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
+       I915_STATE_WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
             "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
             reg, pipe_name(pipe));
  
-       WARN(HAS_PCH_IBX(dev_priv->dev) && (val & DP_PORT_EN) == 0
+       I915_STATE_WARN(HAS_PCH_IBX(dev_priv->dev) && (val & DP_PORT_EN) == 0
             && (val & DP_PIPEB_SELECT),
             "IBX PCH dp port still using transcoder B\n");
  }
@@@ -1448,11 -1448,11 +1448,11 @@@ static void assert_pch_hdmi_disabled(st
                                     enum pipe pipe, int reg)
  {
        u32 val = I915_READ(reg);
-       WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
+       I915_STATE_WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
             "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
             reg, pipe_name(pipe));
  
-       WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_ENABLE) == 0
+       I915_STATE_WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_ENABLE) == 0
             && (val & SDVO_PIPE_B_SELECT),
             "IBX PCH hdmi port still using transcoder B\n");
  }
@@@ -1469,13 -1469,13 +1469,13 @@@ static void assert_pch_ports_disabled(s
  
        reg = PCH_ADPA;
        val = I915_READ(reg);
-       WARN(adpa_pipe_enabled(dev_priv, pipe, val),
+       I915_STATE_WARN(adpa_pipe_enabled(dev_priv, pipe, val),
             "PCH VGA enabled on transcoder %c, should be disabled\n",
             pipe_name(pipe));
  
        reg = PCH_LVDS;
        val = I915_READ(reg);
-       WARN(lvds_pipe_enabled(dev_priv, pipe, val),
+       I915_STATE_WARN(lvds_pipe_enabled(dev_priv, pipe, val),
             "PCH LVDS enabled on transcoder %c, should be disabled\n",
             pipe_name(pipe));
  
@@@ -2954,71 -2954,6 +2954,6 @@@ static void intel_update_pipe_size(stru
        crtc->config.pipe_src_h = adjusted_mode->crtc_vdisplay;
  }
  
- static int
- intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
-                   struct drm_framebuffer *fb)
- {
-       struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       enum pipe pipe = intel_crtc->pipe;
-       struct drm_framebuffer *old_fb = crtc->primary->fb;
-       struct drm_i915_gem_object *old_obj = intel_fb_obj(old_fb);
-       int ret;
-       if (intel_crtc_has_pending_flip(crtc)) {
-               DRM_ERROR("pipe is still busy with an old pageflip\n");
-               return -EBUSY;
-       }
-       /* no fb bound */
-       if (!fb) {
-               DRM_ERROR("No FB bound\n");
-               return 0;
-       }
-       if (intel_crtc->plane > INTEL_INFO(dev)->num_pipes) {
-               DRM_ERROR("no plane for crtc: plane %c, num_pipes %d\n",
-                         plane_name(intel_crtc->plane),
-                         INTEL_INFO(dev)->num_pipes);
-               return -EINVAL;
-       }
-       mutex_lock(&dev->struct_mutex);
-       ret = intel_pin_and_fence_fb_obj(crtc->primary, fb, NULL);
-       if (ret == 0)
-               i915_gem_track_fb(old_obj, intel_fb_obj(fb),
-                                 INTEL_FRONTBUFFER_PRIMARY(pipe));
-       mutex_unlock(&dev->struct_mutex);
-       if (ret != 0) {
-               DRM_ERROR("pin & fence failed\n");
-               return ret;
-       }
-       dev_priv->display.update_primary_plane(crtc, fb, x, y);
-       if (intel_crtc->active)
-               intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
-       crtc->primary->fb = fb;
-       crtc->x = x;
-       crtc->y = y;
-       if (old_fb) {
-               if (intel_crtc->active && old_fb != fb)
-                       intel_wait_for_vblank(dev, intel_crtc->pipe);
-               mutex_lock(&dev->struct_mutex);
-               intel_unpin_fb_obj(old_obj);
-               mutex_unlock(&dev->struct_mutex);
-       }
-       mutex_lock(&dev->struct_mutex);
-       intel_update_fbc(dev);
-       mutex_unlock(&dev->struct_mutex);
-       return 0;
- }
  static void intel_fdi_normal_train(struct drm_crtc *crtc)
  {
        struct drm_device *dev = crtc->dev;
@@@ -4125,7 -4060,7 +4060,7 @@@ static void intel_disable_planes(struc
        drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
                intel_plane = to_intel_plane(plane);
                if (intel_plane->pipe == pipe)
-                       intel_plane_disable(&intel_plane->base);
+                       plane->funcs->disable_plane(plane);
        }
  }
  
@@@ -4266,7 -4201,7 +4201,7 @@@ static void intel_crtc_enable_planes(st
        hsw_enable_ips(intel_crtc);
  
        mutex_lock(&dev->struct_mutex);
-       intel_update_fbc(dev);
+       intel_fbc_update(dev);
        mutex_unlock(&dev->struct_mutex);
  
        /*
@@@ -4288,7 -4223,7 +4223,7 @@@ static void intel_crtc_disable_planes(s
        intel_crtc_wait_for_pending_flips(crtc);
  
        if (dev_priv->fbc.plane == plane)
-               intel_disable_fbc(dev);
+               intel_fbc_disable(dev);
  
        hsw_disable_ips(intel_crtc);
  
@@@ -4565,6 -4500,7 +4500,6 @@@ static void ironlake_crtc_disable(struc
                ironlake_fdi_disable(crtc);
  
                ironlake_disable_pch_transcoder(dev_priv, pipe);
 -              intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
  
                if (HAS_PCH_CPT(dev)) {
                        /* disable TRANS_DP_CTL */
        intel_update_watermarks(crtc);
  
        mutex_lock(&dev->struct_mutex);
-       intel_update_fbc(dev);
+       intel_fbc_update(dev);
        mutex_unlock(&dev->struct_mutex);
  }
  
@@@ -4635,6 -4571,8 +4570,6 @@@ static void haswell_crtc_disable(struc
  
        if (intel_crtc->config.has_pch_encoder) {
                lpt_disable_pch_transcoder(dev_priv);
 -              intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
 -                                                    true);
                intel_ddi_fdi_disable(crtc);
        }
  
        intel_update_watermarks(crtc);
  
        mutex_lock(&dev->struct_mutex);
-       intel_update_fbc(dev);
+       intel_fbc_update(dev);
        mutex_unlock(&dev->struct_mutex);
  
        if (intel_crtc_to_shared_dpll(intel_crtc))
@@@ -4909,7 -4847,7 +4844,7 @@@ static void cherryview_set_cdclk(struc
                cmd = 0;
                break;
        default:
-               WARN_ON(1);
+               MISSING_CASE(cdclk);
                return;
        }
  
@@@ -5251,7 -5189,7 +5186,7 @@@ static void i9xx_crtc_disable(struct dr
        intel_update_watermarks(crtc);
  
        mutex_lock(&dev->struct_mutex);
-       intel_update_fbc(dev);
+       intel_fbc_update(dev);
        mutex_unlock(&dev->struct_mutex);
  }
  
@@@ -5309,8 -5247,6 +5244,6 @@@ static void intel_crtc_disable(struct d
        struct drm_device *dev = crtc->dev;
        struct drm_connector *connector;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_i915_gem_object *old_obj = intel_fb_obj(crtc->primary->fb);
-       enum pipe pipe = to_intel_crtc(crtc)->pipe;
  
        /* crtc should still be enabled when we disable it. */
        WARN_ON(!crtc->enabled);
        dev_priv->display.crtc_disable(crtc);
        dev_priv->display.off(crtc);
  
-       if (crtc->primary->fb) {
-               mutex_lock(&dev->struct_mutex);
-               intel_unpin_fb_obj(old_obj);
-               i915_gem_track_fb(old_obj, NULL,
-                                 INTEL_FRONTBUFFER_PRIMARY(pipe));
-               mutex_unlock(&dev->struct_mutex);
-               crtc->primary->fb = NULL;
-       }
+       crtc->primary->funcs->disable_plane(crtc->primary);
  
        /* Update computed state. */
        list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
@@@ -5382,25 -5311,25 +5308,25 @@@ static void intel_connector_check_state
                if (connector->mst_port)
                        return;
  
-               WARN(connector->base.dpms == DRM_MODE_DPMS_OFF,
+               I915_STATE_WARN(connector->base.dpms == DRM_MODE_DPMS_OFF,
                     "wrong connector dpms state\n");
-               WARN(connector->base.encoder != &encoder->base,
+               I915_STATE_WARN(connector->base.encoder != &encoder->base,
                     "active connector not linked to encoder\n");
  
                if (encoder) {
-                       WARN(!encoder->connectors_active,
+                       I915_STATE_WARN(!encoder->connectors_active,
                             "encoder->connectors_active not set\n");
  
                        encoder_enabled = encoder->get_hw_state(encoder, &pipe);
-                       WARN(!encoder_enabled, "encoder not enabled\n");
-                       if (WARN_ON(!encoder->base.crtc))
+                       I915_STATE_WARN(!encoder_enabled, "encoder not enabled\n");
+                       if (I915_STATE_WARN_ON(!encoder->base.crtc))
                                return;
  
                        crtc = encoder->base.crtc;
  
-                       WARN(!crtc->enabled, "crtc not enabled\n");
-                       WARN(!to_intel_crtc(crtc)->active, "crtc not active\n");
-                       WARN(pipe != to_intel_crtc(crtc)->pipe,
+                       I915_STATE_WARN(!crtc->enabled, "crtc not enabled\n");
+                       I915_STATE_WARN(!to_intel_crtc(crtc)->active, "crtc not active\n");
+                       I915_STATE_WARN(pipe != to_intel_crtc(crtc)->pipe,
                             "encoder active on the wrong pipe\n");
                }
        }
@@@ -7810,24 -7739,24 +7736,24 @@@ static void assert_can_disable_lcpll(st
        struct intel_crtc *crtc;
  
        for_each_intel_crtc(dev, crtc)
-               WARN(crtc->active, "CRTC for pipe %c enabled\n",
+               I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
                     pipe_name(crtc->pipe));
  
-       WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
-       WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n");
-       WARN(I915_READ(WRPLL_CTL1) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n");
-       WARN(I915_READ(WRPLL_CTL2) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n");
-       WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n");
-       WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
+       I915_STATE_WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
+       I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n");
+       I915_STATE_WARN(I915_READ(WRPLL_CTL1) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n");
+       I915_STATE_WARN(I915_READ(WRPLL_CTL2) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n");
+       I915_STATE_WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n");
+       I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
             "CPU PWM1 enabled\n");
        if (IS_HASWELL(dev))
-               WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
+               I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
                     "CPU PWM2 enabled\n");
-       WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
+       I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
             "PCH PWM1 enabled\n");
-       WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
+       I915_STATE_WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
             "Utility pin enabled\n");
-       WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n");
+       I915_STATE_WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n");
  
        /*
         * In theory we can still leave IRQs enabled, as long as only the HPD
         * gen-specific and since we only disable LCPLL after we fully disable
         * the interrupts, the check below should be enough.
         */
-       WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
+       I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
  }
  
  static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
@@@ -8055,12 -7984,21 +7981,21 @@@ static void skylake_get_ddi_pll(struct 
                                enum port port,
                                struct intel_crtc_config *pipe_config)
  {
-       u32 temp;
+       u32 temp, dpll_ctl1;
  
        temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
        pipe_config->ddi_pll_sel = temp >> (port * 3 + 1);
  
        switch (pipe_config->ddi_pll_sel) {
+       case SKL_DPLL0:
+               /*
+                * On SKL the eDP DPLL (DPLL0 as we don't use SSC) is not part
+                * of the shared DPLL framework and thus needs to be read out
+                * separately
+                */
+               dpll_ctl1 = I915_READ(DPLL_CTRL1);
+               pipe_config->dpll_hw_state.ctrl1 = dpll_ctl1 & 0x3f;
+               break;
        case SKL_DPLL1:
                pipe_config->shared_dpll = DPLL_ID_SKL_DPLL1;
                break;
@@@ -8286,7 -8224,7 +8221,7 @@@ static void i9xx_update_cursor(struct d
                                cntl |= CURSOR_MODE_256_ARGB_AX;
                                break;
                        default:
-                               WARN_ON(1);
+                               MISSING_CASE(intel_crtc->cursor_width);
                                return;
                }
                cntl |= pipe << 28; /* Connect to correct pipe */
@@@ -8405,109 -8343,6 +8340,6 @@@ static bool cursor_size_ok(struct drm_d
        return true;
  }
  
- static int intel_crtc_cursor_set_obj(struct drm_crtc *crtc,
-                                    struct drm_i915_gem_object *obj,
-                                    uint32_t width, uint32_t height)
- {
-       struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       enum pipe pipe = intel_crtc->pipe;
-       unsigned old_width;
-       uint32_t addr;
-       int ret;
-       /* if we want to turn off the cursor ignore width and height */
-       if (!obj) {
-               DRM_DEBUG_KMS("cursor off\n");
-               addr = 0;
-               mutex_lock(&dev->struct_mutex);
-               goto finish;
-       }
-       /* we only need to pin inside GTT if cursor is non-phy */
-       mutex_lock(&dev->struct_mutex);
-       if (!INTEL_INFO(dev)->cursor_needs_physical) {
-               unsigned alignment;
-               /*
-                * Global gtt pte registers are special registers which actually
-                * forward writes to a chunk of system memory. Which means that
-                * there is no risk that the register values disappear as soon
-                * as we call intel_runtime_pm_put(), so it is correct to wrap
-                * only the pin/unpin/fence and not more.
-                */
-               intel_runtime_pm_get(dev_priv);
-               /* Note that the w/a also requires 2 PTE of padding following
-                * the bo. We currently fill all unused PTE with the shadow
-                * page and so we should always have valid PTE following the
-                * cursor preventing the VT-d warning.
-                */
-               alignment = 0;
-               if (need_vtd_wa(dev))
-                       alignment = 64*1024;
-               ret = i915_gem_object_pin_to_display_plane(obj, alignment, NULL);
-               if (ret) {
-                       DRM_DEBUG_KMS("failed to move cursor bo into the GTT\n");
-                       intel_runtime_pm_put(dev_priv);
-                       goto fail_locked;
-               }
-               ret = i915_gem_object_put_fence(obj);
-               if (ret) {
-                       DRM_DEBUG_KMS("failed to release fence for cursor");
-                       intel_runtime_pm_put(dev_priv);
-                       goto fail_unpin;
-               }
-               addr = i915_gem_obj_ggtt_offset(obj);
-               intel_runtime_pm_put(dev_priv);
-       } else {
-               int align = IS_I830(dev) ? 16 * 1024 : 256;
-               ret = i915_gem_object_attach_phys(obj, align);
-               if (ret) {
-                       DRM_DEBUG_KMS("failed to attach phys object\n");
-                       goto fail_locked;
-               }
-               addr = obj->phys_handle->busaddr;
-       }
-  finish:
-       if (intel_crtc->cursor_bo) {
-               if (!INTEL_INFO(dev)->cursor_needs_physical)
-                       i915_gem_object_unpin_from_display_plane(intel_crtc->cursor_bo);
-       }
-       i915_gem_track_fb(intel_crtc->cursor_bo, obj,
-                         INTEL_FRONTBUFFER_CURSOR(pipe));
-       mutex_unlock(&dev->struct_mutex);
-       old_width = intel_crtc->cursor_width;
-       intel_crtc->cursor_addr = addr;
-       intel_crtc->cursor_bo = obj;
-       intel_crtc->cursor_width = width;
-       intel_crtc->cursor_height = height;
-       if (intel_crtc->active) {
-               if (old_width != width)
-                       intel_update_watermarks(crtc);
-               intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
-               intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_CURSOR(pipe));
-       }
-       return 0;
- fail_unpin:
-       i915_gem_object_unpin_from_display_plane(obj);
- fail_locked:
-       mutex_unlock(&dev->struct_mutex);
-       return ret;
- }
  static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
                                 u16 *blue, uint32_t start, uint32_t size)
  {
@@@ -9115,7 -8950,10 +8947,10 @@@ static void intel_unpin_work_fn(struct 
        drm_gem_object_unreference(&work->pending_flip_obj->base);
        drm_gem_object_unreference(&work->old_fb_obj->base);
  
-       intel_update_fbc(dev);
+       intel_fbc_update(dev);
+       if (work->flip_queued_req)
+               i915_gem_request_assign(&work->flip_queued_req, NULL);
        mutex_unlock(&dev->struct_mutex);
  
        intel_frontbuffer_flip_complete(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
@@@ -9511,25 -9349,53 +9346,53 @@@ static bool use_mmio_flip(struct intel_
        else if (i915.enable_execlists)
                return true;
        else
-               return ring != obj->ring;
+               return ring != i915_gem_request_get_ring(obj->last_read_req);
  }
  
- static void intel_do_mmio_flip(struct intel_crtc *intel_crtc)
+ static void skl_do_mmio_flip(struct intel_crtc *intel_crtc)
+ {
+       struct drm_device *dev = intel_crtc->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_framebuffer *fb = intel_crtc->base.primary->fb;
+       struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+       struct drm_i915_gem_object *obj = intel_fb->obj;
+       const enum pipe pipe = intel_crtc->pipe;
+       u32 ctl, stride;
+       ctl = I915_READ(PLANE_CTL(pipe, 0));
+       ctl &= ~PLANE_CTL_TILED_MASK;
+       if (obj->tiling_mode == I915_TILING_X)
+               ctl |= PLANE_CTL_TILED_X;
+       /*
+        * The stride is either expressed as a multiple of 64 bytes chunks for
+        * linear buffers or in number of tiles for tiled buffers.
+        */
+       stride = fb->pitches[0] >> 6;
+       if (obj->tiling_mode == I915_TILING_X)
+               stride = fb->pitches[0] >> 9; /* X tiles are 512 bytes wide */
+       /*
+        * Both PLANE_CTL and PLANE_STRIDE are not updated on vblank but on
+        * PLANE_SURF updates, the update is then guaranteed to be atomic.
+        */
+       I915_WRITE(PLANE_CTL(pipe, 0), ctl);
+       I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
+       I915_WRITE(PLANE_SURF(pipe, 0), intel_crtc->unpin_work->gtt_offset);
+       POSTING_READ(PLANE_SURF(pipe, 0));
+ }
+ static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc)
  {
        struct drm_device *dev = intel_crtc->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_framebuffer *intel_fb =
                to_intel_framebuffer(intel_crtc->base.primary->fb);
        struct drm_i915_gem_object *obj = intel_fb->obj;
-       bool atomic_update;
-       u32 start_vbl_count;
        u32 dspcntr;
        u32 reg;
  
-       intel_mark_page_flip_active(intel_crtc);
-       atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count);
        reg = DSPCNTR(intel_crtc->plane);
        dspcntr = I915_READ(reg);
  
                   intel_crtc->unpin_work->gtt_offset);
        POSTING_READ(DSPSURF(intel_crtc->plane));
  
+ }
+ /*
+  * XXX: This is the temporary way to update the plane registers until we get
+  * around to using the usual plane update functions for MMIO flips
+  */
+ static void intel_do_mmio_flip(struct intel_crtc *intel_crtc)
+ {
+       struct drm_device *dev = intel_crtc->base.dev;
+       bool atomic_update;
+       u32 start_vbl_count;
+       intel_mark_page_flip_active(intel_crtc);
+       atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count);
+       if (INTEL_INFO(dev)->gen >= 9)
+               skl_do_mmio_flip(intel_crtc);
+       else
+               /* use_mmio_flip() retricts MMIO flips to ilk+ */
+               ilk_do_mmio_flip(intel_crtc);
        if (atomic_update)
                intel_pipe_update_end(intel_crtc, start_vbl_count);
  }
  
  static void intel_mmio_flip_work_func(struct work_struct *work)
  {
-       struct intel_crtc *intel_crtc =
+       struct intel_crtc *crtc =
                container_of(work, struct intel_crtc, mmio_flip.work);
-       struct intel_engine_cs *ring;
-       uint32_t seqno;
-       seqno = intel_crtc->mmio_flip.seqno;
-       ring = intel_crtc->mmio_flip.ring;
+       struct intel_mmio_flip *mmio_flip;
  
-       if (seqno)
-               WARN_ON(__i915_wait_seqno(ring, seqno,
-                                         intel_crtc->reset_counter,
-                                         false, NULL, NULL) != 0);
+       mmio_flip = &crtc->mmio_flip;
+       if (mmio_flip->req)
+               WARN_ON(__i915_wait_request(mmio_flip->req,
+                                           crtc->reset_counter,
+                                           false, NULL, NULL) != 0);
  
-       intel_do_mmio_flip(intel_crtc);
+       intel_do_mmio_flip(crtc);
+       if (mmio_flip->req) {
+               mutex_lock(&crtc->base.dev->struct_mutex);
+               i915_gem_request_assign(&mmio_flip->req, NULL);
+               mutex_unlock(&crtc->base.dev->struct_mutex);
+       }
  }
  
  static int intel_queue_mmio_flip(struct drm_device *dev,
  {
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  
-       intel_crtc->mmio_flip.seqno = obj->last_write_seqno;
-       intel_crtc->mmio_flip.ring = obj->ring;
+       i915_gem_request_assign(&intel_crtc->mmio_flip.req,
+                               obj->last_write_req);
  
        schedule_work(&intel_crtc->mmio_flip.work);
  
@@@ -9671,9 -9561,8 +9558,8 @@@ static bool __intel_pageflip_stall_chec
                return false;
  
        if (work->flip_ready_vblank == 0) {
-               if (work->flip_queued_ring &&
-                   !i915_seqno_passed(work->flip_queued_ring->get_seqno(work->flip_queued_ring, true),
-                                      work->flip_queued_seqno))
+               if (work->flip_queued_req &&
+                   !i915_gem_request_completed(work->flip_queued_req, true))
                        return false;
  
                work->flip_ready_vblank = drm_vblank_count(dev, intel_crtc->pipe);
@@@ -9726,6 -9615,8 +9612,8 @@@ static int intel_crtc_page_flip(struct 
        struct drm_framebuffer *old_fb = crtc->primary->fb;
        struct drm_i915_gem_object *obj = intel_fb_obj(fb);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       struct drm_plane *primary = crtc->primary;
+       struct intel_plane *intel_plane = to_intel_plane(primary);
        enum pipe pipe = intel_crtc->pipe;
        struct intel_unpin_work *work;
        struct intel_engine_cs *ring;
        } else if (IS_IVYBRIDGE(dev)) {
                ring = &dev_priv->ring[BCS];
        } else if (INTEL_INFO(dev)->gen >= 7) {
-               ring = obj->ring;
+               ring = i915_gem_request_get_ring(obj->last_read_req);
                if (ring == NULL || ring->id != RCS)
                        ring = &dev_priv->ring[BCS];
        } else {
                if (ret)
                        goto cleanup_unpin;
  
-               work->flip_queued_seqno = obj->last_write_seqno;
-               work->flip_queued_ring = obj->ring;
+               i915_gem_request_assign(&work->flip_queued_req,
+                                       obj->last_write_req);
        } else {
                ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, ring,
                                                   page_flip_flags);
                if (ret)
                        goto cleanup_unpin;
  
-               work->flip_queued_seqno = intel_ring_get_seqno(ring);
-               work->flip_queued_ring = ring;
+               i915_gem_request_assign(&work->flip_queued_req,
+                                       intel_ring_get_request(ring));
        }
  
        work->flip_queued_vblank = drm_vblank_count(dev, intel_crtc->pipe);
        i915_gem_track_fb(work->old_fb_obj, obj,
                          INTEL_FRONTBUFFER_PRIMARY(pipe));
  
-       intel_disable_fbc(dev);
+       intel_fbc_disable(dev);
        intel_frontbuffer_flip_prepare(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
        mutex_unlock(&dev->struct_mutex);
  
@@@ -9884,8 -9775,15 +9772,15 @@@ free_work
  
        if (ret == -EIO) {
  out_hang:
-               intel_crtc_wait_for_pending_flips(crtc);
-               ret = intel_pipe_set_base(crtc, crtc->x, crtc->y, fb);
+               ret = primary->funcs->update_plane(primary, crtc, fb,
+                                                  intel_plane->crtc_x,
+                                                  intel_plane->crtc_y,
+                                                  intel_plane->crtc_h,
+                                                  intel_plane->crtc_w,
+                                                  intel_plane->src_x,
+                                                  intel_plane->src_y,
+                                                  intel_plane->src_h,
+                                                  intel_plane->src_w);
                if (ret == 0 && event) {
                        spin_lock_irq(&dev->event_lock);
                        drm_send_vblank_event(dev, pipe, event);
@@@ -10254,9 -10152,9 +10149,9 @@@ intel_modeset_pipe_config(struct drm_cr
         * computation to clearly distinguish it from the adjusted mode, which
         * can be changed by the connectors in the below retry loop.
         */
-       drm_mode_set_crtcinfo(&pipe_config->requested_mode, CRTC_STEREO_DOUBLE);
-       pipe_config->pipe_src_w = pipe_config->requested_mode.crtc_hdisplay;
-       pipe_config->pipe_src_h = pipe_config->requested_mode.crtc_vdisplay;
+       drm_crtc_get_hv_timing(&pipe_config->requested_mode,
+                              &pipe_config->pipe_src_w,
+                              &pipe_config->pipe_src_h);
  
  encoder_retry:
        /* Ensure the port clock defaults are reset when retrying. */
@@@ -10742,7 -10640,7 +10637,7 @@@ check_connector_state(struct drm_devic
                 * ->get_hw_state callbacks. */
                intel_connector_check_state(connector);
  
-               WARN(&connector->new_encoder->base != connector->base.encoder,
+               I915_STATE_WARN(&connector->new_encoder->base != connector->base.encoder,
                     "connector's staged encoder doesn't match current encoder\n");
        }
  }
@@@ -10762,9 -10660,9 +10657,9 @@@ check_encoder_state(struct drm_device *
                              encoder->base.base.id,
                              encoder->base.name);
  
-               WARN(&encoder->new_crtc->base != encoder->base.crtc,
+               I915_STATE_WARN(&encoder->new_crtc->base != encoder->base.crtc,
                     "encoder's stage crtc doesn't match current crtc\n");
-               WARN(encoder->connectors_active && !encoder->base.crtc,
+               I915_STATE_WARN(encoder->connectors_active && !encoder->base.crtc,
                     "encoder's active_connectors set, but no crtc\n");
  
                list_for_each_entry(connector, &dev->mode_config.connector_list,
                if (!enabled && encoder->base.encoder_type == DRM_MODE_ENCODER_DPMST)
                        continue;
  
-               WARN(!!encoder->base.crtc != enabled,
+               I915_STATE_WARN(!!encoder->base.crtc != enabled,
                     "encoder's enabled state mismatch "
                     "(expected %i, found %i)\n",
                     !!encoder->base.crtc, enabled);
-               WARN(active && !encoder->base.crtc,
+               I915_STATE_WARN(active && !encoder->base.crtc,
                     "active encoder with no crtc\n");
  
-               WARN(encoder->connectors_active != active,
+               I915_STATE_WARN(encoder->connectors_active != active,
                     "encoder's computed active state doesn't match tracked active state "
                     "(expected %i, found %i)\n", active, encoder->connectors_active);
  
                active = encoder->get_hw_state(encoder, &pipe);
-               WARN(active != encoder->connectors_active,
+               I915_STATE_WARN(active != encoder->connectors_active,
                     "encoder's hw state doesn't match sw tracking "
                     "(expected %i, found %i)\n",
                     encoder->connectors_active, active);
                        continue;
  
                tracked_pipe = to_intel_crtc(encoder->base.crtc)->pipe;
-               WARN(active && pipe != tracked_pipe,
+               I915_STATE_WARN(active && pipe != tracked_pipe,
                     "active encoder's pipe doesn't match"
                     "(expected %i, found %i)\n",
                     tracked_pipe, pipe);
@@@ -10829,7 -10727,7 +10724,7 @@@ check_crtc_state(struct drm_device *dev
                DRM_DEBUG_KMS("[CRTC:%d]\n",
                              crtc->base.base.id);
  
-               WARN(crtc->active && !crtc->base.enabled,
+               I915_STATE_WARN(crtc->active && !crtc->base.enabled,
                     "active crtc, but not enabled in sw tracking\n");
  
                for_each_intel_encoder(dev, encoder) {
                                active = true;
                }
  
-               WARN(active != crtc->active,
+               I915_STATE_WARN(active != crtc->active,
                     "crtc's computed active state doesn't match tracked active state "
                     "(expected %i, found %i)\n", active, crtc->active);
-               WARN(enabled != crtc->base.enabled,
+               I915_STATE_WARN(enabled != crtc->base.enabled,
                     "crtc's computed enabled state doesn't match tracked enabled state "
                     "(expected %i, found %i)\n", enabled, crtc->base.enabled);
  
                                encoder->get_config(encoder, &pipe_config);
                }
  
-               WARN(crtc->active != active,
+               I915_STATE_WARN(crtc->active != active,
                     "crtc active state doesn't match with hw state "
                     "(expected %i, found %i)\n", crtc->active, active);
  
                if (active &&
                    !intel_pipe_config_compare(dev, &crtc->config, &pipe_config)) {
-                       WARN(1, "pipe state doesn't match!\n");
+                       I915_STATE_WARN(1, "pipe state doesn't match!\n");
                        intel_dump_pipe_config(crtc, &pipe_config,
                                               "[hw state]");
                        intel_dump_pipe_config(crtc, &crtc->config,
@@@ -10897,14 -10795,14 +10792,14 @@@ check_shared_dpll_state(struct drm_devi
  
                active = pll->get_hw_state(dev_priv, pll, &dpll_hw_state);
  
-               WARN(pll->active > hweight32(pll->config.crtc_mask),
+               I915_STATE_WARN(pll->active > hweight32(pll->config.crtc_mask),
                     "more active pll users than references: %i vs %i\n",
                     pll->active, hweight32(pll->config.crtc_mask));
-               WARN(pll->active && !pll->on,
+               I915_STATE_WARN(pll->active && !pll->on,
                     "pll in active use but not on in sw tracking\n");
-               WARN(pll->on && !pll->active,
+               I915_STATE_WARN(pll->on && !pll->active,
                     "pll in on but not on in use in sw tracking\n");
-               WARN(pll->on != active,
+               I915_STATE_WARN(pll->on != active,
                     "pll on state mismatch (expected %i, found %i)\n",
                     pll->on, active);
  
                        if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll)
                                active_crtcs++;
                }
-               WARN(pll->active != active_crtcs,
+               I915_STATE_WARN(pll->active != active_crtcs,
                     "pll active crtcs mismatch (expected %i, found %i)\n",
                     pll->active, active_crtcs);
-               WARN(hweight32(pll->config.crtc_mask) != enabled_crtcs,
+               I915_STATE_WARN(hweight32(pll->config.crtc_mask) != enabled_crtcs,
                     "pll enabled crtcs mismatch (expected %i, found %i)\n",
                     hweight32(pll->config.crtc_mask), enabled_crtcs);
  
-               WARN(pll->on && memcmp(&pll->config.hw_state, &dpll_hw_state,
+               I915_STATE_WARN(pll->on && memcmp(&pll->config.hw_state, &dpll_hw_state,
                                       sizeof(dpll_hw_state)),
                     "pll hw state mismatch\n");
        }
@@@ -11114,26 -11012,15 +11009,15 @@@ static int __intel_set_mode(struct drm_
         * on the DPLL.
         */
        for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) {
-               struct drm_framebuffer *old_fb = crtc->primary->fb;
-               struct drm_i915_gem_object *old_obj = intel_fb_obj(old_fb);
-               struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+               struct drm_plane *primary = intel_crtc->base.primary;
+               int vdisplay, hdisplay;
  
-               mutex_lock(&dev->struct_mutex);
-               ret = intel_pin_and_fence_fb_obj(crtc->primary, fb, NULL);
-               if (ret != 0) {
-                       DRM_ERROR("pin & fence failed\n");
-                       mutex_unlock(&dev->struct_mutex);
-                       goto done;
-               }
-               if (old_fb)
-                       intel_unpin_fb_obj(old_obj);
-               i915_gem_track_fb(old_obj, obj,
-                                 INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
-               mutex_unlock(&dev->struct_mutex);
-               crtc->primary->fb = fb;
-               crtc->x = x;
-               crtc->y = y;
+               drm_crtc_get_hv_timing(mode, &hdisplay, &vdisplay);
+               ret = primary->funcs->update_plane(primary, &intel_crtc->base,
+                                                  fb, 0, 0,
+                                                  hdisplay, vdisplay,
+                                                  x << 16, y << 16,
+                                                  hdisplay << 16, vdisplay << 16);
        }
  
        /* Now enable the clocks, plane, pipe, and connectors that we set up. */
@@@ -11580,12 -11467,10 +11464,12 @@@ static int intel_crtc_set_config(struc
                    to_intel_crtc(set->crtc)->config.has_audio)
                        config->mode_changed = true;
  
 -              /* Force mode sets for any infoframe stuff */
 -              if (pipe_config->has_infoframe ||
 -                  to_intel_crtc(set->crtc)->config.has_infoframe)
 -                      config->mode_changed = true;
 +              /*
 +               * Note we have an issue here with infoframes: current code
 +               * only updates them on the full mode set path per hw
 +               * requirements.  So here we should be checking for any
 +               * required changes and forcing a mode set.
 +               */
        }
  
        /* set_mode will free it in the mode_changed case */
                                           disable_pipes);
        } else if (config->fb_changed) {
                struct intel_crtc *intel_crtc = to_intel_crtc(set->crtc);
+               struct drm_plane *primary = set->crtc->primary;
+               int vdisplay, hdisplay;
  
-               intel_crtc_wait_for_pending_flips(set->crtc);
-               ret = intel_pipe_set_base(set->crtc,
-                                         set->x, set->y, set->fb);
+               drm_crtc_get_hv_timing(set->mode, &hdisplay, &vdisplay);
+               ret = primary->funcs->update_plane(primary, set->crtc, set->fb,
+                                                  0, 0, hdisplay, vdisplay,
+                                                  set->x << 16, set->y << 16,
+                                                  hdisplay << 16, vdisplay << 16);
  
                /*
                 * We need to make sure the primary plane is re-enabled if it
@@@ -11762,95 -11650,115 +11649,115 @@@ static void intel_shared_dpll_init(stru
        BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
  }
  
- static int
- intel_primary_plane_disable(struct drm_plane *plane)
+ /**
+  * intel_prepare_plane_fb - Prepare fb for usage on plane
+  * @plane: drm plane to prepare for
+  * @fb: framebuffer to prepare for presentation
+  *
+  * Prepares a framebuffer for usage on a display plane.  Generally this
+  * involves pinning the underlying object and updating the frontbuffer tracking
+  * bits.  Some older platforms need special physical address handling for
+  * cursor planes.
+  *
+  * Returns 0 on success, negative error code on failure.
+  */
+ int
+ intel_prepare_plane_fb(struct drm_plane *plane,
+                      struct drm_framebuffer *fb)
  {
        struct drm_device *dev = plane->dev;
-       struct intel_crtc *intel_crtc;
+       struct intel_plane *intel_plane = to_intel_plane(plane);
+       enum pipe pipe = intel_plane->pipe;
+       struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+       struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb);
+       unsigned frontbuffer_bits = 0;
+       int ret = 0;
  
-       if (!plane->fb)
+       if (WARN_ON(fb == plane->fb || !obj))
                return 0;
  
-       BUG_ON(!plane->crtc);
+       switch (plane->type) {
+       case DRM_PLANE_TYPE_PRIMARY:
+               frontbuffer_bits = INTEL_FRONTBUFFER_PRIMARY(pipe);
+               break;
+       case DRM_PLANE_TYPE_CURSOR:
+               frontbuffer_bits = INTEL_FRONTBUFFER_CURSOR(pipe);
+               break;
+       case DRM_PLANE_TYPE_OVERLAY:
+               frontbuffer_bits = INTEL_FRONTBUFFER_SPRITE(pipe);
+               break;
+       }
  
-       intel_crtc = to_intel_crtc(plane->crtc);
+       mutex_lock(&dev->struct_mutex);
  
-       /*
-        * Even though we checked plane->fb above, it's still possible that
-        * the primary plane has been implicitly disabled because the crtc
-        * coordinates given weren't visible, or because we detected
-        * that it was 100% covered by a sprite plane.  Or, the CRTC may be
-        * off and we've set a fb, but haven't actually turned on the CRTC yet.
-        * In either case, we need to unpin the FB and let the fb pointer get
-        * updated, but otherwise we don't need to touch the hardware.
-        */
-       if (!intel_crtc->primary_enabled)
-               goto disable_unpin;
+       if (plane->type == DRM_PLANE_TYPE_CURSOR &&
+           INTEL_INFO(dev)->cursor_needs_physical) {
+               int align = IS_I830(dev) ? 16 * 1024 : 256;
+               ret = i915_gem_object_attach_phys(obj, align);
+               if (ret)
+                       DRM_DEBUG_KMS("failed to attach phys object\n");
+       } else {
+               ret = intel_pin_and_fence_fb_obj(plane, fb, NULL);
+       }
  
-       intel_crtc_wait_for_pending_flips(plane->crtc);
-       intel_disable_primary_hw_plane(plane, plane->crtc);
+       if (ret == 0)
+               i915_gem_track_fb(old_obj, obj, frontbuffer_bits);
  
- disable_unpin:
-       mutex_lock(&dev->struct_mutex);
-       i915_gem_track_fb(intel_fb_obj(plane->fb), NULL,
-                         INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
-       intel_unpin_fb_obj(intel_fb_obj(plane->fb));
        mutex_unlock(&dev->struct_mutex);
-       plane->fb = NULL;
  
-       return 0;
+       return ret;
+ }
+ /**
+  * intel_cleanup_plane_fb - Cleans up an fb after plane use
+  * @plane: drm plane to clean up for
+  * @fb: old framebuffer that was on plane
+  *
+  * Cleans up a framebuffer that has just been removed from a plane.
+  */
+ void
+ intel_cleanup_plane_fb(struct drm_plane *plane,
+                      struct drm_framebuffer *fb)
+ {
+       struct drm_device *dev = plane->dev;
+       struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+       if (WARN_ON(!obj))
+               return;
+       if (plane->type != DRM_PLANE_TYPE_CURSOR ||
+           !INTEL_INFO(dev)->cursor_needs_physical) {
+               mutex_lock(&dev->struct_mutex);
+               intel_unpin_fb_obj(obj);
+               mutex_unlock(&dev->struct_mutex);
+       }
  }
  
  static int
  intel_check_primary_plane(struct drm_plane *plane,
                          struct intel_plane_state *state)
  {
-       struct drm_crtc *crtc = state->crtc;
-       struct drm_framebuffer *fb = state->fb;
+       struct drm_crtc *crtc = state->base.crtc;
+       struct drm_framebuffer *fb = state->base.fb;
        struct drm_rect *dest = &state->dst;
        struct drm_rect *src = &state->src;
        const struct drm_rect *clip = &state->clip;
-       return drm_plane_helper_check_update(plane, crtc, fb,
-                                            src, dest, clip,
-                                            DRM_PLANE_HELPER_NO_SCALING,
-                                            DRM_PLANE_HELPER_NO_SCALING,
-                                            false, true, &state->visible);
- }
- static int
- intel_prepare_primary_plane(struct drm_plane *plane,
-                           struct intel_plane_state *state)
- {
-       struct drm_crtc *crtc = state->crtc;
-       struct drm_framebuffer *fb = state->fb;
-       struct drm_device *dev = crtc->dev;
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       enum pipe pipe = intel_crtc->pipe;
-       struct drm_i915_gem_object *obj = intel_fb_obj(fb);
-       struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb);
        int ret;
  
-       intel_crtc_wait_for_pending_flips(crtc);
+       ret = drm_plane_helper_check_update(plane, crtc, fb,
+                                           src, dest, clip,
+                                           DRM_PLANE_HELPER_NO_SCALING,
+                                           DRM_PLANE_HELPER_NO_SCALING,
+                                           false, true, &state->visible);
+       if (ret)
+               return ret;
  
+       intel_crtc_wait_for_pending_flips(crtc);
        if (intel_crtc_has_pending_flip(crtc)) {
                DRM_ERROR("pipe is still busy with an old pageflip\n");
                return -EBUSY;
        }
  
-       if (old_obj != obj) {
-               mutex_lock(&dev->struct_mutex);
-               ret = intel_pin_and_fence_fb_obj(plane, fb, NULL);
-               if (ret == 0)
-                       i915_gem_track_fb(old_obj, obj,
-                                         INTEL_FRONTBUFFER_PRIMARY(pipe));
-               mutex_unlock(&dev->struct_mutex);
-               if (ret != 0) {
-                       DRM_DEBUG_KMS("pin & fence failed\n");
-                       return ret;
-               }
-       }
        return 0;
  }
  
@@@ -11858,19 -11766,28 +11765,28 @@@ static voi
  intel_commit_primary_plane(struct drm_plane *plane,
                           struct intel_plane_state *state)
  {
-       struct drm_crtc *crtc = state->crtc;
-       struct drm_framebuffer *fb = state->fb;
-       struct drm_device *dev = crtc->dev;
+       struct drm_crtc *crtc = state->base.crtc;
+       struct drm_framebuffer *fb = state->base.fb;
+       struct drm_device *dev = plane->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       enum pipe pipe = intel_crtc->pipe;
-       struct drm_framebuffer *old_fb = plane->fb;
        struct drm_i915_gem_object *obj = intel_fb_obj(fb);
-       struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb);
        struct intel_plane *intel_plane = to_intel_plane(plane);
        struct drm_rect *src = &state->src;
+       enum pipe pipe = intel_plane->pipe;
  
-       crtc->primary->fb = fb;
+       if (!fb) {
+               /*
+                * 'prepare' is never called when plane is being disabled, so
+                * we need to handle frontbuffer tracking here
+                */
+               mutex_lock(&dev->struct_mutex);
+               i915_gem_track_fb(intel_fb_obj(plane->fb), NULL,
+                                 INTEL_FRONTBUFFER_PRIMARY(pipe));
+               mutex_unlock(&dev->struct_mutex);
+       }
+       plane->fb = fb;
        crtc->x = src->x1 >> 16;
        crtc->y = src->y1 >> 16;
  
                    INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) &&
                    dev_priv->fbc.plane == intel_crtc->plane &&
                    intel_plane->rotation != BIT(DRM_ROTATE_0)) {
-                       intel_disable_fbc(dev);
+                       intel_fbc_disable(dev);
                }
  
                if (state->visible) {
                intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
  
                mutex_lock(&dev->struct_mutex);
-               intel_update_fbc(dev);
-               mutex_unlock(&dev->struct_mutex);
-       }
-       if (old_fb && old_fb != fb) {
-               if (intel_crtc->active)
-                       intel_wait_for_vblank(dev, intel_crtc->pipe);
-               mutex_lock(&dev->struct_mutex);
-               intel_unpin_fb_obj(old_obj);
+               intel_fbc_update(dev);
                mutex_unlock(&dev->struct_mutex);
        }
  }
  
static int
- intel_primary_plane_setplane(struct drm_plane *plane, struct drm_crtc *crtc,
-                            struct drm_framebuffer *fb, int crtc_x, int crtc_y,
-                            unsigned int crtc_w, unsigned int crtc_h,
-                            uint32_t src_x, uint32_t src_y,
-                            uint32_t src_w, uint32_t src_h)
+ int
+ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
+                  struct drm_framebuffer *fb, int crtc_x, int crtc_y,
+                  unsigned int crtc_w, unsigned int crtc_h,
+                  uint32_t src_x, uint32_t src_y,
+                  uint32_t src_w, uint32_t src_h)
  {
+       struct drm_device *dev = plane->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_framebuffer *old_fb = plane->fb;
        struct intel_plane_state state;
+       struct intel_plane *intel_plane = to_intel_plane(plane);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        int ret;
  
-       state.crtc = crtc;
-       state.fb = fb;
+       state.base.crtc = crtc ? crtc : plane->crtc;
+       state.base.fb = fb;
  
        /* sample coordinates in 16.16 fixed point */
        state.src.x1 = src_x;
        state.orig_src = state.src;
        state.orig_dst = state.dst;
  
-       ret = intel_check_primary_plane(plane, &state);
+       ret = intel_plane->check_plane(plane, &state);
        if (ret)
                return ret;
  
-       ret = intel_prepare_primary_plane(plane, &state);
-       if (ret)
-               return ret;
+       if (fb != old_fb && fb) {
+               ret = intel_prepare_plane_fb(plane, fb);
+               if (ret)
+                       return ret;
+       }
+       intel_runtime_pm_get(dev_priv);
+       intel_plane->commit_plane(plane, &state);
+       intel_runtime_pm_put(dev_priv);
+       if (fb != old_fb && old_fb) {
+               if (intel_crtc->active)
+                       intel_wait_for_vblank(dev, intel_crtc->pipe);
+               intel_cleanup_plane_fb(plane, old_fb);
+       }
  
-       intel_commit_primary_plane(plane, &state);
+       plane->fb = fb;
  
        return 0;
  }
  
+ /**
+  * intel_disable_plane - disable a plane
+  * @plane: plane to disable
+  *
+  * General disable handler for all plane types.
+  */
+ int
+ intel_disable_plane(struct drm_plane *plane)
+ {
+       if (!plane->fb)
+               return 0;
+       if (WARN_ON(!plane->crtc))
+               return -EINVAL;
+       return plane->funcs->update_plane(plane, plane->crtc, NULL,
+                                         0, 0, 0, 0, 0, 0, 0, 0);
+ }
  /* Common destruction function for both primary and cursor planes */
  static void intel_plane_destroy(struct drm_plane *plane)
  {
  }
  
  static const struct drm_plane_funcs intel_primary_plane_funcs = {
-       .update_plane = intel_primary_plane_setplane,
-       .disable_plane = intel_primary_plane_disable,
+       .update_plane = intel_update_plane,
+       .disable_plane = intel_disable_plane,
        .destroy = intel_plane_destroy,
        .set_property = intel_plane_set_property
  };
@@@ -12026,6 -11969,8 +11968,8 @@@ static struct drm_plane *intel_primary_
        primary->pipe = pipe;
        primary->plane = pipe;
        primary->rotation = BIT(DRM_ROTATE_0);
+       primary->check_plane = intel_check_primary_plane;
+       primary->commit_plane = intel_commit_primary_plane;
        if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4)
                primary->plane = !pipe;
  
        return &primary->base;
  }
  
- static int
- intel_cursor_plane_disable(struct drm_plane *plane)
- {
-       if (!plane->fb)
-               return 0;
-       BUG_ON(!plane->crtc);
-       return intel_crtc_cursor_set_obj(plane->crtc, NULL, 0, 0);
- }
  static int
  intel_check_cursor_plane(struct drm_plane *plane,
                         struct intel_plane_state *state)
  {
-       struct drm_crtc *crtc = state->crtc;
+       struct drm_crtc *crtc = state->base.crtc;
        struct drm_device *dev = crtc->dev;
-       struct drm_framebuffer *fb = state->fb;
+       struct drm_framebuffer *fb = state->base.fb;
        struct drm_rect *dest = &state->dst;
        struct drm_rect *src = &state->src;
        const struct drm_rect *clip = &state->clip;
        return ret;
  }
  
- static int
+ static void
  intel_commit_cursor_plane(struct drm_plane *plane,
                          struct intel_plane_state *state)
  {
-       struct drm_crtc *crtc = state->crtc;
-       struct drm_framebuffer *fb = state->fb;
+       struct drm_crtc *crtc = state->base.crtc;
+       struct drm_device *dev = crtc->dev;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct intel_plane *intel_plane = to_intel_plane(plane);
-       struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
-       struct drm_i915_gem_object *obj = intel_fb->obj;
-       int crtc_w, crtc_h;
+       struct drm_i915_gem_object *obj = intel_fb_obj(state->base.fb);
+       struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb);
+       enum pipe pipe = intel_crtc->pipe;
+       unsigned old_width;
+       uint32_t addr;
  
+       plane->fb = state->base.fb;
        crtc->cursor_x = state->orig_dst.x1;
        crtc->cursor_y = state->orig_dst.y1;
  
        intel_plane->src_h = drm_rect_height(&state->orig_src);
        intel_plane->obj = obj;
  
-       if (fb != crtc->cursor->fb) {
-               crtc_w = drm_rect_width(&state->orig_dst);
-               crtc_h = drm_rect_height(&state->orig_dst);
-               return intel_crtc_cursor_set_obj(crtc, obj, crtc_w, crtc_h);
-       } else {
-               intel_crtc_update_cursor(crtc, state->visible);
-               intel_frontbuffer_flip(crtc->dev,
-                                      INTEL_FRONTBUFFER_CURSOR(intel_crtc->pipe));
+       if (intel_crtc->cursor_bo == obj)
+               goto update;
  
-               return 0;
+       /*
+        * 'prepare' is only called when fb != NULL; we still need to update
+        * frontbuffer tracking for the 'disable' case here.
+        */
+       if (!obj) {
+               mutex_lock(&dev->struct_mutex);
+               i915_gem_track_fb(old_obj, NULL,
+                                 INTEL_FRONTBUFFER_CURSOR(pipe));
+               mutex_unlock(&dev->struct_mutex);
        }
- }
- static int
- intel_cursor_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
-                         struct drm_framebuffer *fb, int crtc_x, int crtc_y,
-                         unsigned int crtc_w, unsigned int crtc_h,
-                         uint32_t src_x, uint32_t src_y,
-                         uint32_t src_w, uint32_t src_h)
- {
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       struct intel_plane_state state;
-       int ret;
-       state.crtc = crtc;
-       state.fb = fb;
  
-       /* sample coordinates in 16.16 fixed point */
-       state.src.x1 = src_x;
-       state.src.x2 = src_x + src_w;
-       state.src.y1 = src_y;
-       state.src.y2 = src_y + src_h;
-       /* integer pixels */
-       state.dst.x1 = crtc_x;
-       state.dst.x2 = crtc_x + crtc_w;
-       state.dst.y1 = crtc_y;
-       state.dst.y2 = crtc_y + crtc_h;
+       if (!obj)
+               addr = 0;
+       else if (!INTEL_INFO(dev)->cursor_needs_physical)
+               addr = i915_gem_obj_ggtt_offset(obj);
+       else
+               addr = obj->phys_handle->busaddr;
  
-       state.clip.x1 = 0;
-       state.clip.y1 = 0;
-       state.clip.x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0;
-       state.clip.y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0;
+       intel_crtc->cursor_addr = addr;
+       intel_crtc->cursor_bo = obj;
+ update:
+       old_width = intel_crtc->cursor_width;
  
-       state.orig_src = state.src;
-       state.orig_dst = state.dst;
+       intel_crtc->cursor_width = drm_rect_width(&state->orig_dst);
+       intel_crtc->cursor_height = drm_rect_height(&state->orig_dst);
  
-       ret = intel_check_cursor_plane(plane, &state);
-       if (ret)
-               return ret;
+       if (intel_crtc->active) {
+               if (old_width != intel_crtc->cursor_width)
+                       intel_update_watermarks(crtc);
+               intel_crtc_update_cursor(crtc, state->visible);
  
-       return intel_commit_cursor_plane(plane, &state);
+               intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_CURSOR(pipe));
+       }
  }
  
  static const struct drm_plane_funcs intel_cursor_plane_funcs = {
-       .update_plane = intel_cursor_plane_update,
-       .disable_plane = intel_cursor_plane_disable,
+       .update_plane = intel_update_plane,
+       .disable_plane = intel_disable_plane,
        .destroy = intel_plane_destroy,
        .set_property = intel_plane_set_property,
  };
@@@ -12225,6 -12145,8 +12144,8 @@@ static struct drm_plane *intel_cursor_p
        cursor->pipe = pipe;
        cursor->plane = pipe;
        cursor->rotation = BIT(DRM_ROTATE_0);
+       cursor->check_plane = intel_check_cursor_plane;
+       cursor->commit_plane = intel_commit_cursor_plane;
  
        drm_universal_plane_init(dev, &cursor->base, 0,
                                 &intel_cursor_plane_funcs,
@@@ -12383,28 -12305,6 +12304,6 @@@ static bool has_edp_a(struct drm_devic
        return true;
  }
  
- const char *intel_output_name(int output)
- {
-       static const char *names[] = {
-               [INTEL_OUTPUT_UNUSED] = "Unused",
-               [INTEL_OUTPUT_ANALOG] = "Analog",
-               [INTEL_OUTPUT_DVO] = "DVO",
-               [INTEL_OUTPUT_SDVO] = "SDVO",
-               [INTEL_OUTPUT_LVDS] = "LVDS",
-               [INTEL_OUTPUT_TVOUT] = "TV",
-               [INTEL_OUTPUT_HDMI] = "HDMI",
-               [INTEL_OUTPUT_DISPLAYPORT] = "DisplayPort",
-               [INTEL_OUTPUT_EDP] = "eDP",
-               [INTEL_OUTPUT_DSI] = "DSI",
-               [INTEL_OUTPUT_UNKNOWN] = "Unknown",
-       };
-       if (output < 0 || output >= ARRAY_SIZE(names) || !names[output])
-               return "Invalid";
-       return names[output];
- }
  static bool intel_crt_present(struct drm_device *dev)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@@ -13153,7 -13053,7 +13052,7 @@@ void intel_modeset_init(struct drm_devi
        intel_setup_outputs(dev);
  
        /* Just in case the BIOS is doing something questionable. */
-       intel_disable_fbc(dev);
+       intel_fbc_disable(dev);
  
        drm_modeset_lock_all(dev);
        intel_modeset_setup_hw_state(dev, false);
@@@ -13670,7 -13570,7 +13569,7 @@@ void intel_modeset_cleanup(struct drm_d
  
        intel_unregister_dsm_handler();
  
-       intel_disable_fbc(dev);
+       intel_fbc_disable(dev);
  
        ironlake_teardown_rc6(dev);
  
  #define INTEL_RC6p_ENABLE                     (1<<1)
  #define INTEL_RC6pp_ENABLE                    (1<<2)
  
- /* FBC, or Frame Buffer Compression, is a technique employed to compress the
-  * framebuffer contents in-memory, aiming at reducing the required bandwidth
-  * during in-memory transfers and, therefore, reduce the power packet.
-  *
-  * The benefits of FBC are mostly visible with solid backgrounds and
-  * variation-less patterns.
-  *
-  * FBC-related functionality can be enabled by the means of the
-  * i915.i915_enable_fbc parameter
-  */
  static void gen9_init_clock_gating(struct drm_device *dev)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
                   _MASKED_BIT_ENABLE(GEN8_4x4_STC_OPTIMIZATION_DISABLE));
  }
  
- static void i8xx_disable_fbc(struct drm_device *dev)
- {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       u32 fbc_ctl;
-       dev_priv->fbc.enabled = false;
-       /* Disable compression */
-       fbc_ctl = I915_READ(FBC_CONTROL);
-       if ((fbc_ctl & FBC_CTL_EN) == 0)
-               return;
-       fbc_ctl &= ~FBC_CTL_EN;
-       I915_WRITE(FBC_CONTROL, fbc_ctl);
-       /* Wait for compressing bit to clear */
-       if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
-               DRM_DEBUG_KMS("FBC idle timed out\n");
-               return;
-       }
-       DRM_DEBUG_KMS("disabled FBC\n");
- }
- static void i8xx_enable_fbc(struct drm_crtc *crtc)
- {
-       struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_framebuffer *fb = crtc->primary->fb;
-       struct drm_i915_gem_object *obj = intel_fb_obj(fb);
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       int cfb_pitch;
-       int i;
-       u32 fbc_ctl;
-       dev_priv->fbc.enabled = true;
-       cfb_pitch = dev_priv->fbc.size / FBC_LL_SIZE;
-       if (fb->pitches[0] < cfb_pitch)
-               cfb_pitch = fb->pitches[0];
-       /* FBC_CTL wants 32B or 64B units */
-       if (IS_GEN2(dev))
-               cfb_pitch = (cfb_pitch / 32) - 1;
-       else
-               cfb_pitch = (cfb_pitch / 64) - 1;
-       /* Clear old tags */
-       for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
-               I915_WRITE(FBC_TAG + (i * 4), 0);
-       if (IS_GEN4(dev)) {
-               u32 fbc_ctl2;
-               /* Set it up... */
-               fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
-               fbc_ctl2 |= FBC_CTL_PLANE(intel_crtc->plane);
-               I915_WRITE(FBC_CONTROL2, fbc_ctl2);
-               I915_WRITE(FBC_FENCE_OFF, crtc->y);
-       }
-       /* enable it... */
-       fbc_ctl = I915_READ(FBC_CONTROL);
-       fbc_ctl &= 0x3fff << FBC_CTL_INTERVAL_SHIFT;
-       fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC;
-       if (IS_I945GM(dev))
-               fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
-       fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
-       fbc_ctl |= obj->fence_reg;
-       I915_WRITE(FBC_CONTROL, fbc_ctl);
-       DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %c\n",
-                     cfb_pitch, crtc->y, plane_name(intel_crtc->plane));
- }
- static bool i8xx_fbc_enabled(struct drm_device *dev)
- {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
- }
- static void g4x_enable_fbc(struct drm_crtc *crtc)
- {
-       struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_framebuffer *fb = crtc->primary->fb;
-       struct drm_i915_gem_object *obj = intel_fb_obj(fb);
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       u32 dpfc_ctl;
-       dev_priv->fbc.enabled = true;
-       dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane) | DPFC_SR_EN;
-       if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
-               dpfc_ctl |= DPFC_CTL_LIMIT_2X;
-       else
-               dpfc_ctl |= DPFC_CTL_LIMIT_1X;
-       dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
-       I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
-       /* enable it... */
-       I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
-       DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
- }
- static void g4x_disable_fbc(struct drm_device *dev)
- {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       u32 dpfc_ctl;
-       dev_priv->fbc.enabled = false;
-       /* Disable compression */
-       dpfc_ctl = I915_READ(DPFC_CONTROL);
-       if (dpfc_ctl & DPFC_CTL_EN) {
-               dpfc_ctl &= ~DPFC_CTL_EN;
-               I915_WRITE(DPFC_CONTROL, dpfc_ctl);
-               DRM_DEBUG_KMS("disabled FBC\n");
-       }
- }
- static bool g4x_fbc_enabled(struct drm_device *dev)
- {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
- }
- static void sandybridge_blit_fbc_update(struct drm_device *dev)
- {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       u32 blt_ecoskpd;
-       /* Make sure blitter notifies FBC of writes */
-       /* Blitter is part of Media powerwell on VLV. No impact of
-        * his param in other platforms for now */
-       gen6_gt_force_wake_get(dev_priv, FORCEWAKE_MEDIA);
-       blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
-       blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
-               GEN6_BLITTER_LOCK_SHIFT;
-       I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
-       blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
-       I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
-       blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
-                        GEN6_BLITTER_LOCK_SHIFT);
-       I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
-       POSTING_READ(GEN6_BLITTER_ECOSKPD);
-       gen6_gt_force_wake_put(dev_priv, FORCEWAKE_MEDIA);
- }
- static void ironlake_enable_fbc(struct drm_crtc *crtc)
- {
-       struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_framebuffer *fb = crtc->primary->fb;
-       struct drm_i915_gem_object *obj = intel_fb_obj(fb);
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       u32 dpfc_ctl;
-       dev_priv->fbc.enabled = true;
-       dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane);
-       if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
-               dev_priv->fbc.threshold++;
-       switch (dev_priv->fbc.threshold) {
-       case 4:
-       case 3:
-               dpfc_ctl |= DPFC_CTL_LIMIT_4X;
-               break;
-       case 2:
-               dpfc_ctl |= DPFC_CTL_LIMIT_2X;
-               break;
-       case 1:
-               dpfc_ctl |= DPFC_CTL_LIMIT_1X;
-               break;
-       }
-       dpfc_ctl |= DPFC_CTL_FENCE_EN;
-       if (IS_GEN5(dev))
-               dpfc_ctl |= obj->fence_reg;
-       I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
-       I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID);
-       /* enable it... */
-       I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
-       if (IS_GEN6(dev)) {
-               I915_WRITE(SNB_DPFC_CTL_SA,
-                          SNB_CPU_FENCE_ENABLE | obj->fence_reg);
-               I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
-               sandybridge_blit_fbc_update(dev);
-       }
-       DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
- }
- static void ironlake_disable_fbc(struct drm_device *dev)
- {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       u32 dpfc_ctl;
-       dev_priv->fbc.enabled = false;
-       /* Disable compression */
-       dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
-       if (dpfc_ctl & DPFC_CTL_EN) {
-               dpfc_ctl &= ~DPFC_CTL_EN;
-               I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
-               DRM_DEBUG_KMS("disabled FBC\n");
-       }
- }
- static bool ironlake_fbc_enabled(struct drm_device *dev)
- {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
- }
- static void gen7_enable_fbc(struct drm_crtc *crtc)
- {
-       struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_framebuffer *fb = crtc->primary->fb;
-       struct drm_i915_gem_object *obj = intel_fb_obj(fb);
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       u32 dpfc_ctl;
-       dev_priv->fbc.enabled = true;
-       dpfc_ctl = IVB_DPFC_CTL_PLANE(intel_crtc->plane);
-       if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
-               dev_priv->fbc.threshold++;
-       switch (dev_priv->fbc.threshold) {
-       case 4:
-       case 3:
-               dpfc_ctl |= DPFC_CTL_LIMIT_4X;
-               break;
-       case 2:
-               dpfc_ctl |= DPFC_CTL_LIMIT_2X;
-               break;
-       case 1:
-               dpfc_ctl |= DPFC_CTL_LIMIT_1X;
-               break;
-       }
-       dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
-       if (dev_priv->fbc.false_color)
-               dpfc_ctl |= FBC_CTL_FALSE_COLOR;
-       I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
-       if (IS_IVYBRIDGE(dev)) {
-               /* WaFbcAsynchFlipDisableFbcQueue:ivb */
-               I915_WRITE(ILK_DISPLAY_CHICKEN1,
-                          I915_READ(ILK_DISPLAY_CHICKEN1) |
-                          ILK_FBCQ_DIS);
-       } else {
-               /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
-               I915_WRITE(CHICKEN_PIPESL_1(intel_crtc->pipe),
-                          I915_READ(CHICKEN_PIPESL_1(intel_crtc->pipe)) |
-                          HSW_FBCQ_DIS);
-       }
-       I915_WRITE(SNB_DPFC_CTL_SA,
-                  SNB_CPU_FENCE_ENABLE | obj->fence_reg);
-       I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
-       sandybridge_blit_fbc_update(dev);
-       DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
- }
- bool intel_fbc_enabled(struct drm_device *dev)
- {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       return dev_priv->fbc.enabled;
- }
- void bdw_fbc_sw_flush(struct drm_device *dev, u32 value)
- {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       if (!IS_GEN8(dev))
-               return;
-       if (!intel_fbc_enabled(dev))
-               return;
-       I915_WRITE(MSG_FBC_REND_STATE, value);
- }
- static void intel_fbc_work_fn(struct work_struct *__work)
- {
-       struct intel_fbc_work *work =
-               container_of(to_delayed_work(__work),
-                            struct intel_fbc_work, work);
-       struct drm_device *dev = work->crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       mutex_lock(&dev->struct_mutex);
-       if (work == dev_priv->fbc.fbc_work) {
-               /* Double check that we haven't switched fb without cancelling
-                * the prior work.
-                */
-               if (work->crtc->primary->fb == work->fb) {
-                       dev_priv->display.enable_fbc(work->crtc);
-                       dev_priv->fbc.plane = to_intel_crtc(work->crtc)->plane;
-                       dev_priv->fbc.fb_id = work->crtc->primary->fb->base.id;
-                       dev_priv->fbc.y = work->crtc->y;
-               }
-               dev_priv->fbc.fbc_work = NULL;
-       }
-       mutex_unlock(&dev->struct_mutex);
-       kfree(work);
- }
- static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
- {
-       if (dev_priv->fbc.fbc_work == NULL)
-               return;
-       DRM_DEBUG_KMS("cancelling pending FBC enable\n");
-       /* Synchronisation is provided by struct_mutex and checking of
-        * dev_priv->fbc.fbc_work, so we can perform the cancellation
-        * entirely asynchronously.
-        */
-       if (cancel_delayed_work(&dev_priv->fbc.fbc_work->work))
-               /* tasklet was killed before being run, clean up */
-               kfree(dev_priv->fbc.fbc_work);
-       /* Mark the work as no longer wanted so that if it does
-        * wake-up (because the work was already running and waiting
-        * for our mutex), it will discover that is no longer
-        * necessary to run.
-        */
-       dev_priv->fbc.fbc_work = NULL;
- }
- static void intel_enable_fbc(struct drm_crtc *crtc)
- {
-       struct intel_fbc_work *work;
-       struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       if (!dev_priv->display.enable_fbc)
-               return;
-       intel_cancel_fbc_work(dev_priv);
-       work = kzalloc(sizeof(*work), GFP_KERNEL);
-       if (work == NULL) {
-               DRM_ERROR("Failed to allocate FBC work structure\n");
-               dev_priv->display.enable_fbc(crtc);
-               return;
-       }
-       work->crtc = crtc;
-       work->fb = crtc->primary->fb;
-       INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
-       dev_priv->fbc.fbc_work = work;
-       /* Delay the actual enabling to let pageflipping cease and the
-        * display to settle before starting the compression. Note that
-        * this delay also serves a second purpose: it allows for a
-        * vblank to pass after disabling the FBC before we attempt
-        * to modify the control registers.
-        *
-        * A more complicated solution would involve tracking vblanks
-        * following the termination of the page-flipping sequence
-        * and indeed performing the enable as a co-routine and not
-        * waiting synchronously upon the vblank.
-        *
-        * WaFbcWaitForVBlankBeforeEnable:ilk,snb
-        */
-       schedule_delayed_work(&work->work, msecs_to_jiffies(50));
- }
- void intel_disable_fbc(struct drm_device *dev)
- {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       intel_cancel_fbc_work(dev_priv);
-       if (!dev_priv->display.disable_fbc)
-               return;
-       dev_priv->display.disable_fbc(dev);
-       dev_priv->fbc.plane = -1;
- }
- static bool set_no_fbc_reason(struct drm_i915_private *dev_priv,
-                             enum no_fbc_reason reason)
- {
-       if (dev_priv->fbc.no_fbc_reason == reason)
-               return false;
-       dev_priv->fbc.no_fbc_reason = reason;
-       return true;
- }
- /**
-  * intel_update_fbc - enable/disable FBC as needed
-  * @dev: the drm_device
-  *
-  * Set up the framebuffer compression hardware at mode set time.  We
-  * enable it if possible:
-  *   - plane A only (on pre-965)
-  *   - no pixel mulitply/line duplication
-  *   - no alpha buffer discard
-  *   - no dual wide
-  *   - framebuffer <= max_hdisplay in width, max_vdisplay in height
-  *
-  * We can't assume that any compression will take place (worst case),
-  * so the compressed buffer has to be the same size as the uncompressed
-  * one.  It also must reside (along with the line length buffer) in
-  * stolen memory.
-  *
-  * We need to enable/disable FBC on a global basis.
-  */
- void intel_update_fbc(struct drm_device *dev)
- {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_crtc *crtc = NULL, *tmp_crtc;
-       struct intel_crtc *intel_crtc;
-       struct drm_framebuffer *fb;
-       struct drm_i915_gem_object *obj;
-       const struct drm_display_mode *adjusted_mode;
-       unsigned int max_width, max_height;
-       if (!HAS_FBC(dev)) {
-               set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED);
-               return;
-       }
-       if (!i915.powersave) {
-               if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
-                       DRM_DEBUG_KMS("fbc disabled per module param\n");
-               return;
-       }
-       /*
-        * If FBC is already on, we just have to verify that we can
-        * keep it that way...
-        * Need to disable if:
-        *   - more than one pipe is active
-        *   - changing FBC params (stride, fence, mode)
-        *   - new fb is too large to fit in compressed buffer
-        *   - going to an unsupported config (interlace, pixel multiply, etc.)
-        */
-       for_each_crtc(dev, tmp_crtc) {
-               if (intel_crtc_active(tmp_crtc) &&
-                   to_intel_crtc(tmp_crtc)->primary_enabled) {
-                       if (crtc) {
-                               if (set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES))
-                                       DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
-                               goto out_disable;
-                       }
-                       crtc = tmp_crtc;
-               }
-       }
-       if (!crtc || crtc->primary->fb == NULL) {
-               if (set_no_fbc_reason(dev_priv, FBC_NO_OUTPUT))
-                       DRM_DEBUG_KMS("no output, disabling\n");
-               goto out_disable;
-       }
-       intel_crtc = to_intel_crtc(crtc);
-       fb = crtc->primary->fb;
-       obj = intel_fb_obj(fb);
-       adjusted_mode = &intel_crtc->config.adjusted_mode;
-       if (i915.enable_fbc < 0) {
-               if (set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT))
-                       DRM_DEBUG_KMS("disabled per chip default\n");
-               goto out_disable;
-       }
-       if (!i915.enable_fbc) {
-               if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
-                       DRM_DEBUG_KMS("fbc disabled per module param\n");
-               goto out_disable;
-       }
-       if ((adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) ||
-           (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
-               if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE))
-                       DRM_DEBUG_KMS("mode incompatible with compression, "
-                                     "disabling\n");
-               goto out_disable;
-       }
-       if (INTEL_INFO(dev)->gen >= 8 || IS_HASWELL(dev)) {
-               max_width = 4096;
-               max_height = 4096;
-       } else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
-               max_width = 4096;
-               max_height = 2048;
-       } else {
-               max_width = 2048;
-               max_height = 1536;
-       }
-       if (intel_crtc->config.pipe_src_w > max_width ||
-           intel_crtc->config.pipe_src_h > max_height) {
-               if (set_no_fbc_reason(dev_priv, FBC_MODE_TOO_LARGE))
-                       DRM_DEBUG_KMS("mode too large for compression, disabling\n");
-               goto out_disable;
-       }
-       if ((INTEL_INFO(dev)->gen < 4 || HAS_DDI(dev)) &&
-           intel_crtc->plane != PLANE_A) {
-               if (set_no_fbc_reason(dev_priv, FBC_BAD_PLANE))
-                       DRM_DEBUG_KMS("plane not A, disabling compression\n");
-               goto out_disable;
-       }
-       /* The use of a CPU fence is mandatory in order to detect writes
-        * by the CPU to the scanout and trigger updates to the FBC.
-        */
-       if (obj->tiling_mode != I915_TILING_X ||
-           obj->fence_reg == I915_FENCE_REG_NONE) {
-               if (set_no_fbc_reason(dev_priv, FBC_NOT_TILED))
-                       DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
-               goto out_disable;
-       }
-       if (INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) &&
-           to_intel_plane(crtc->primary)->rotation != BIT(DRM_ROTATE_0)) {
-               if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE))
-                       DRM_DEBUG_KMS("Rotation unsupported, disabling\n");
-               goto out_disable;
-       }
-       /* If the kernel debugger is active, always disable compression */
-       if (in_dbg_master())
-               goto out_disable;
-       if (i915_gem_stolen_setup_compression(dev, obj->base.size,
-                                             drm_format_plane_cpp(fb->pixel_format, 0))) {
-               if (set_no_fbc_reason(dev_priv, FBC_STOLEN_TOO_SMALL))
-                       DRM_DEBUG_KMS("framebuffer too large, disabling compression\n");
-               goto out_disable;
-       }
-       /* If the scanout has not changed, don't modify the FBC settings.
-        * Note that we make the fundamental assumption that the fb->obj
-        * cannot be unpinned (and have its GTT offset and fence revoked)
-        * without first being decoupled from the scanout and FBC disabled.
-        */
-       if (dev_priv->fbc.plane == intel_crtc->plane &&
-           dev_priv->fbc.fb_id == fb->base.id &&
-           dev_priv->fbc.y == crtc->y)
-               return;
-       if (intel_fbc_enabled(dev)) {
-               /* We update FBC along two paths, after changing fb/crtc
-                * configuration (modeswitching) and after page-flipping
-                * finishes. For the latter, we know that not only did
-                * we disable the FBC at the start of the page-flip
-                * sequence, but also more than one vblank has passed.
-                *
-                * For the former case of modeswitching, it is possible
-                * to switch between two FBC valid configurations
-                * instantaneously so we do need to disable the FBC
-                * before we can modify its control registers. We also
-                * have to wait for the next vblank for that to take
-                * effect. However, since we delay enabling FBC we can
-                * assume that a vblank has passed since disabling and
-                * that we can safely alter the registers in the deferred
-                * callback.
-                *
-                * In the scenario that we go from a valid to invalid
-                * and then back to valid FBC configuration we have
-                * no strict enforcement that a vblank occurred since
-                * disabling the FBC. However, along all current pipe
-                * disabling paths we do need to wait for a vblank at
-                * some point. And we wait before enabling FBC anyway.
-                */
-               DRM_DEBUG_KMS("disabling active FBC for update\n");
-               intel_disable_fbc(dev);
-       }
-       intel_enable_fbc(crtc);
-       dev_priv->fbc.no_fbc_reason = FBC_OK;
-       return;
- out_disable:
-       /* Multiple disables should be harmless */
-       if (intel_fbc_enabled(dev)) {
-               DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
-               intel_disable_fbc(dev);
-       }
-       i915_gem_stolen_cleanup_compression(dev);
- }
  
  static void i915_pineview_get_mem_freq(struct drm_device *dev)
  {
@@@ -3286,7 -2668,8 +2668,8 @@@ static void skl_compute_wm_pipe_paramet
        list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
                struct intel_plane *intel_plane = to_intel_plane(plane);
  
-               if (intel_plane->pipe == pipe)
+               if (intel_plane->pipe == pipe &&
+                       plane->type == DRM_PLANE_TYPE_OVERLAY)
                        p->plane[i++] = intel_plane->wm;
        }
  }
@@@ -3621,9 -3004,8 +3004,8 @@@ static void skl_flush_wm_values(struct 
                    skl_ddb_entry_size(&cur_ddb->pipe[pipe])) {
                        skl_wm_flush_pipe(dev_priv, pipe, 2);
                        intel_wait_for_vblank(dev, pipe);
+                       reallocated[pipe] = true;
                }
-               reallocated[pipe] = true;
        }
  
        /*
@@@ -5307,7 -4689,8 +4689,8 @@@ static void cherryview_enable_rps(struc
                I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
        I915_WRITE(GEN6_RC_SLEEP, 0);
  
-       I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
+       /* TO threshold set to 1750 us ( 0x557 * 1.28 us) */
+       I915_WRITE(GEN6_RC6_THRESHOLD, 0x557);
  
        /* allows RC6 residency counter to work */
        I915_WRITE(VLV_COUNTER_CONTROL,
        /* 3: Enable RC6 */
        if ((intel_enable_rc6(dev) & INTEL_RC6_ENABLE) &&
                                                (pcbr >> VLV_PCBR_ADDR_SHIFT))
-               rc6_mode = GEN6_RC_CTL_EI_MODE(1);
+               rc6_mode = GEN7_RC_CTL_TO_MODE;
  
        I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
  
@@@ -5681,146 -5064,27 +5064,27 @@@ unsigned long i915_mch_val(struct drm_i
        return ((m * x) / 127) - b;
  }
  
- static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
+ static int _pxvid_to_vd(u8 pxvid)
+ {
+       if (pxvid == 0)
+               return 0;
+       if (pxvid >= 8 && pxvid < 31)
+               pxvid = 31;
+       return (pxvid + 2) * 125;
+ }
+ static u32 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
  {
        struct drm_device *dev = dev_priv->dev;
-       static const struct v_table {
-               u16 vd; /* in .1 mil */
-               u16 vm; /* in .1 mil */
-       } v_table[] = {
-               { 0, 0, },
-               { 375, 0, },
-               { 500, 0, },
-               { 625, 0, },
-               { 750, 0, },
-               { 875, 0, },
-               { 1000, 0, },
-               { 1125, 0, },
-               { 4125, 3000, },
-               { 4125, 3000, },
-               { 4125, 3000, },
-               { 4125, 3000, },
-               { 4125, 3000, },
-               { 4125, 3000, },
-               { 4125, 3000, },
-               { 4125, 3000, },
-               { 4125, 3000, },
-               { 4125, 3000, },
-               { 4125, 3000, },
-               { 4125, 3000, },
-               { 4125, 3000, },
-               { 4125, 3000, },
-               { 4125, 3000, },
-               { 4125, 3000, },
-               { 4125, 3000, },
-               { 4125, 3000, },
-               { 4125, 3000, },
-               { 4125, 3000, },
-               { 4125, 3000, },
-               { 4125, 3000, },
-               { 4125, 3000, },
-               { 4125, 3000, },
-               { 4250, 3125, },
-               { 4375, 3250, },
-               { 4500, 3375, },
-               { 4625, 3500, },
-               { 4750, 3625, },
-               { 4875, 3750, },
-               { 5000, 3875, },
-               { 5125, 4000, },
-               { 5250, 4125, },
-               { 5375, 4250, },
-               { 5500, 4375, },
-               { 5625, 4500, },
-               { 5750, 4625, },
-               { 5875, 4750, },
-               { 6000, 4875, },
-               { 6125, 5000, },
-               { 6250, 5125, },
-               { 6375, 5250, },
-               { 6500, 5375, },
-               { 6625, 5500, },
-               { 6750, 5625, },
-               { 6875, 5750, },
-               { 7000, 5875, },
-               { 7125, 6000, },
-               { 7250, 6125, },
-               { 7375, 6250, },
-               { 7500, 6375, },
-               { 7625, 6500, },
-               { 7750, 6625, },
-               { 7875, 6750, },
-               { 8000, 6875, },
-               { 8125, 7000, },
-               { 8250, 7125, },
-               { 8375, 7250, },
-               { 8500, 7375, },
-               { 8625, 7500, },
-               { 8750, 7625, },
-               { 8875, 7750, },
-               { 9000, 7875, },
-               { 9125, 8000, },
-               { 9250, 8125, },
-               { 9375, 8250, },
-               { 9500, 8375, },
-               { 9625, 8500, },
-               { 9750, 8625, },
-               { 9875, 8750, },
-               { 10000, 8875, },
-               { 10125, 9000, },
-               { 10250, 9125, },
-               { 10375, 9250, },
-               { 10500, 9375, },
-               { 10625, 9500, },
-               { 10750, 9625, },
-               { 10875, 9750, },
-               { 11000, 9875, },
-               { 11125, 10000, },
-               { 11250, 10125, },
-               { 11375, 10250, },
-               { 11500, 10375, },
-               { 11625, 10500, },
-               { 11750, 10625, },
-               { 11875, 10750, },
-               { 12000, 10875, },
-               { 12125, 11000, },
-               { 12250, 11125, },
-               { 12375, 11250, },
-               { 12500, 11375, },
-               { 12625, 11500, },
-               { 12750, 11625, },
-               { 12875, 11750, },
-               { 13000, 11875, },
-               { 13125, 12000, },
-               { 13250, 12125, },
-               { 13375, 12250, },
-               { 13500, 12375, },
-               { 13625, 12500, },
-               { 13750, 12625, },
-               { 13875, 12750, },
-               { 14000, 12875, },
-               { 14125, 13000, },
-               { 14250, 13125, },
-               { 14375, 13250, },
-               { 14500, 13375, },
-               { 14625, 13500, },
-               { 14750, 13625, },
-               { 14875, 13750, },
-               { 15000, 13875, },
-               { 15125, 14000, },
-               { 15250, 14125, },
-               { 15375, 14250, },
-               { 15500, 14375, },
-               { 15625, 14500, },
-               { 15750, 14625, },
-               { 15875, 14750, },
-               { 16000, 14875, },
-               { 16125, 15000, },
-       };
+       const int vd = _pxvid_to_vd(pxvid);
+       const int vm = vd - 1125;
        if (INTEL_INFO(dev)->is_mobile)
-               return v_table[pxvid].vm;
-       else
-               return v_table[pxvid].vd;
+               return vm > 0 ? vm : 0;
+       return vd;
  }
  
  static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
@@@ -6191,20 -5455,6 +5455,20 @@@ void intel_cleanup_gt_powersave(struct 
                valleyview_cleanup_gt_powersave(dev);
  }
  
 +static void gen6_suspend_rps(struct drm_device *dev)
 +{
 +      struct drm_i915_private *dev_priv = dev->dev_private;
 +
 +      flush_delayed_work(&dev_priv->rps.delayed_resume_work);
 +
 +      /*
 +       * TODO: disable RPS interrupts on GEN9+ too once RPS support
 +       * is added for it.
 +       */
 +      if (INTEL_INFO(dev)->gen < 9)
 +              gen6_disable_rps_interrupts(dev);
 +}
 +
  /**
   * intel_suspend_gt_powersave - suspend PM work and helper threads
   * @dev: drm device
@@@ -6220,7 -5470,14 +5484,7 @@@ void intel_suspend_gt_powersave(struct 
        if (INTEL_INFO(dev)->gen < 6)
                return;
  
 -      flush_delayed_work(&dev_priv->rps.delayed_resume_work);
 -
 -      /*
 -       * TODO: disable RPS interrupts on GEN9+ too once RPS support
 -       * is added for it.
 -       */
 -      if (INTEL_INFO(dev)->gen < 9)
 -              gen6_disable_rps_interrupts(dev);
 +      gen6_suspend_rps(dev);
  
        /* Force GPU to min freq during suspend */
        gen6_rps_idle(dev_priv);
@@@ -6323,11 -5580,8 +5587,11 @@@ void intel_reset_gt_powersave(struct dr
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
  
 +      if (INTEL_INFO(dev)->gen < 6)
 +              return;
 +
 +      gen6_suspend_rps(dev);
        dev_priv->rps.enabled = false;
 -      intel_enable_gt_powersave(dev);
  }
  
  static void ibx_init_clock_gating(struct drm_device *dev)
@@@ -6518,7 -5772,7 +5782,7 @@@ static void gen6_init_clock_gating(stru
         * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
         */
        I915_WRITE(GEN6_GT_MODE,
 -                 GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
 +                 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
  
        ilk_init_lp_watermarks(dev);
  
@@@ -6716,7 -5970,7 +5980,7 @@@ static void haswell_init_clock_gating(s
         * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
         */
        I915_WRITE(GEN7_GT_MODE,
 -                 GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
 +                 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
  
        /* WaSwitchSolVfFArbitrationPriority:hsw */
        I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
@@@ -6813,7 -6067,7 +6077,7 @@@ static void ivybridge_init_clock_gating
         * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
         */
        I915_WRITE(GEN7_GT_MODE,
 -                 GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
 +                 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
  
        snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
        snpcr &= ~GEN6_MBC_SNPCR_MASK;
@@@ -7051,43 -6305,12 +6315,12 @@@ void intel_suspend_hw(struct drm_devic
                lpt_suspend_hw(dev);
  }
  
- static void intel_init_fbc(struct drm_i915_private *dev_priv)
- {
-       if (!HAS_FBC(dev_priv)) {
-               dev_priv->fbc.enabled = false;
-               return;
-       }
-       if (INTEL_INFO(dev_priv)->gen >= 7) {
-               dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
-               dev_priv->display.enable_fbc = gen7_enable_fbc;
-               dev_priv->display.disable_fbc = ironlake_disable_fbc;
-       } else if (INTEL_INFO(dev_priv)->gen >= 5) {
-               dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
-               dev_priv->display.enable_fbc = ironlake_enable_fbc;
-               dev_priv->display.disable_fbc = ironlake_disable_fbc;
-       } else if (IS_GM45(dev_priv)) {
-               dev_priv->display.fbc_enabled = g4x_fbc_enabled;
-               dev_priv->display.enable_fbc = g4x_enable_fbc;
-               dev_priv->display.disable_fbc = g4x_disable_fbc;
-       } else {
-               dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
-               dev_priv->display.enable_fbc = i8xx_enable_fbc;
-               dev_priv->display.disable_fbc = i8xx_disable_fbc;
-               /* This value was pulled out of someone's hat */
-               I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
-       }
-       dev_priv->fbc.enabled = dev_priv->display.fbc_enabled(dev_priv->dev);
- }
  /* Set up chip specific power management-related functions */
  void intel_init_pm(struct drm_device *dev)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
  
-       intel_init_fbc(dev_priv);
+       intel_fbc_init(dev_priv);
  
        /* For cxsr */
        if (IS_PINEVIEW(dev))
@@@ -52,16 -52,27 +52,27 @@@ intel_ring_initialized(struct intel_eng
  
  int __intel_ring_space(int head, int tail, int size)
  {
-       int space = head - (tail + I915_RING_FREE_SPACE);
-       if (space < 0)
+       int space = head - tail;
+       if (space <= 0)
                space += size;
-       return space;
+       return space - I915_RING_FREE_SPACE;
+ }
+ void intel_ring_update_space(struct intel_ringbuffer *ringbuf)
+ {
+       if (ringbuf->last_retired_head != -1) {
+               ringbuf->head = ringbuf->last_retired_head;
+               ringbuf->last_retired_head = -1;
+       }
+       ringbuf->space = __intel_ring_space(ringbuf->head & HEAD_ADDR,
+                                           ringbuf->tail, ringbuf->size);
  }
  
  int intel_ring_space(struct intel_ringbuffer *ringbuf)
  {
-       return __intel_ring_space(ringbuf->head & HEAD_ADDR,
-                                 ringbuf->tail, ringbuf->size);
+       intel_ring_update_space(ringbuf);
+       return ringbuf->space;
  }
  
  bool intel_ring_stopped(struct intel_engine_cs *ring)
@@@ -362,15 -373,12 +373,15 @@@ gen7_render_ring_flush(struct intel_eng
                flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
                flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
                flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
 +              flags |= PIPE_CONTROL_MEDIA_STATE_CLEAR;
                /*
                 * TLB invalidate requires a post-sync write.
                 */
                flags |= PIPE_CONTROL_QW_WRITE;
                flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
  
 +              flags |= PIPE_CONTROL_STALL_AT_SCOREBOARD;
 +
                /* Workaround: we must issue a pipe_control with CS-stall bit
                 * set before a pipe_control command that has the state cache
                 * invalidate bit set. */
@@@ -592,10 -600,10 +603,10 @@@ static int init_ring_common(struct inte
                goto out;
        }
  
+       ringbuf->last_retired_head = -1;
        ringbuf->head = I915_READ_HEAD(ring);
        ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
-       ringbuf->space = intel_ring_space(ringbuf);
-       ringbuf->last_retired_head = -1;
+       intel_ring_update_space(ringbuf);
  
        memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
  
@@@ -627,8 -635,7 +638,7 @@@ intel_init_pipe_control(struct intel_en
  {
        int ret;
  
-       if (ring->scratch.obj)
-               return 0;
+       WARN_ON(ring->scratch.obj);
  
        ring->scratch.obj = i915_gem_alloc_object(ring->dev, 4096);
        if (ring->scratch.obj == NULL) {
@@@ -672,7 -679,7 +682,7 @@@ static int intel_ring_workarounds_emit(
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct i915_workarounds *w = &dev_priv->workarounds;
  
-       if (WARN_ON(w->count == 0))
+       if (WARN_ON_ONCE(w->count == 0))
                return 0;
  
        ring->gpu_caches_dirty = true;
        return 0;
  }
  
+ static int intel_rcs_ctx_init(struct intel_engine_cs *ring,
+                             struct intel_context *ctx)
+ {
+       int ret;
+       ret = intel_ring_workarounds_emit(ring, ctx);
+       if (ret != 0)
+               return ret;
+       ret = i915_gem_render_state_init(ring);
+       if (ret)
+               DRM_ERROR("init render state: %d\n", ret);
+       return ret;
+ }
  static int wa_add(struct drm_i915_private *dev_priv,
 -                const u32 addr, const u32 val, const u32 mask)
 +                const u32 addr, const u32 mask, const u32 val)
  {
        const u32 idx = dev_priv->workarounds.count;
  
        return 0;
  }
  
 -#define WA_REG(addr, val, mask) { \
 -              const int r = wa_add(dev_priv, (addr), (val), (mask)); \
 +#define WA_REG(addr, mask, val) { \
 +              const int r = wa_add(dev_priv, (addr), (mask), (val)); \
                if (r) \
                        return r; \
        }
  
  #define WA_SET_BIT_MASKED(addr, mask) \
 -      WA_REG(addr, _MASKED_BIT_ENABLE(mask), (mask) & 0xffff)
 +      WA_REG(addr, (mask), _MASKED_BIT_ENABLE(mask))
  
  #define WA_CLR_BIT_MASKED(addr, mask) \
 -      WA_REG(addr, _MASKED_BIT_DISABLE(mask), (mask) & 0xffff)
 +      WA_REG(addr, (mask), _MASKED_BIT_DISABLE(mask))
 +
 +#define WA_SET_FIELD_MASKED(addr, mask, value) \
 +      WA_REG(addr, mask, _MASKED_FIELD(mask, value))
  
 -#define WA_SET_BIT(addr, mask) WA_REG(addr, I915_READ(addr) | (mask), mask)
 -#define WA_CLR_BIT(addr, mask) WA_REG(addr, I915_READ(addr) & ~(mask), mask)
 +#define WA_SET_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) | (mask))
 +#define WA_CLR_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) & ~(mask))
  
 -#define WA_WRITE(addr, val) WA_REG(addr, val, 0xffffffff)
 +#define WA_WRITE(addr, val) WA_REG(addr, 0xffffffff, val)
  
  static int bdw_init_workarounds(struct intel_engine_cs *ring)
  {
         * workaround for for a possible hang in the unlikely event a TLB
         * invalidation occurs during a PSD flush.
         */
+       /* WaForceEnableNonCoherent:bdw */
+       /* WaHdcDisableFetchWhenMasked:bdw */
        /* WaDisableFenceDestinationToSLM:bdw (GT3 pre-production) */
        WA_SET_BIT_MASKED(HDC_CHICKEN0,
                          HDC_FORCE_NON_COHERENT |
+                         HDC_DONOT_FETCH_MEM_WHEN_MASKED |
                          (IS_BDW_GT3(dev) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
  
        /* Wa4x4STCOptimizationDisable:bdw */
         * disable bit, which we don't touch here, but it's good
         * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
         */
 -      WA_SET_BIT_MASKED(GEN7_GT_MODE,
 -                        GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
 +      WA_SET_FIELD_MASKED(GEN7_GT_MODE,
 +                          GEN6_WIZ_HASHING_MASK,
 +                          GEN6_WIZ_HASHING_16x4);
  
        return 0;
  }
@@@ -861,12 -883,6 +890,6 @@@ static int init_render_ring(struct inte
                           _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) |
                           _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
  
-       if (INTEL_INFO(dev)->gen >= 5) {
-               ret = intel_init_pipe_control(ring);
-               if (ret)
-                       return ret;
-       }
        if (IS_GEN6(dev)) {
                /* From the Sandybridge PRM, volume 1 part 3, page 24:
                 * "If this bit is set, STCunit will have LRA as replacement
@@@ -918,17 -934,20 +941,20 @@@ static int gen8_rcs_signal(struct intel
                return ret;
  
        for_each_ring(waiter, dev_priv, i) {
+               u32 seqno;
                u64 gtt_offset = signaller->semaphore.signal_ggtt[i];
                if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
                        continue;
  
+               seqno = i915_gem_request_get_seqno(
+                                          signaller->outstanding_lazy_request);
                intel_ring_emit(signaller, GFX_OP_PIPE_CONTROL(6));
                intel_ring_emit(signaller, PIPE_CONTROL_GLOBAL_GTT_IVB |
                                           PIPE_CONTROL_QW_WRITE |
                                           PIPE_CONTROL_FLUSH_ENABLE);
                intel_ring_emit(signaller, lower_32_bits(gtt_offset));
                intel_ring_emit(signaller, upper_32_bits(gtt_offset));
-               intel_ring_emit(signaller, signaller->outstanding_lazy_seqno);
+               intel_ring_emit(signaller, seqno);
                intel_ring_emit(signaller, 0);
                intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
                                           MI_SEMAPHORE_TARGET(waiter->id));
@@@ -956,16 -975,19 +982,19 @@@ static int gen8_xcs_signal(struct intel
                return ret;
  
        for_each_ring(waiter, dev_priv, i) {
+               u32 seqno;
                u64 gtt_offset = signaller->semaphore.signal_ggtt[i];
                if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
                        continue;
  
+               seqno = i915_gem_request_get_seqno(
+                                          signaller->outstanding_lazy_request);
                intel_ring_emit(signaller, (MI_FLUSH_DW + 1) |
                                           MI_FLUSH_DW_OP_STOREDW);
                intel_ring_emit(signaller, lower_32_bits(gtt_offset) |
                                           MI_FLUSH_DW_USE_GTT);
                intel_ring_emit(signaller, upper_32_bits(gtt_offset));
-               intel_ring_emit(signaller, signaller->outstanding_lazy_seqno);
+               intel_ring_emit(signaller, seqno);
                intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
                                           MI_SEMAPHORE_TARGET(waiter->id));
                intel_ring_emit(signaller, 0);
@@@ -994,9 -1016,11 +1023,11 @@@ static int gen6_signal(struct intel_eng
        for_each_ring(useless, dev_priv, i) {
                u32 mbox_reg = signaller->semaphore.mbox.signal[i];
                if (mbox_reg != GEN6_NOSYNC) {
+                       u32 seqno = i915_gem_request_get_seqno(
+                                          signaller->outstanding_lazy_request);
                        intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1));
                        intel_ring_emit(signaller, mbox_reg);
-                       intel_ring_emit(signaller, signaller->outstanding_lazy_seqno);
+                       intel_ring_emit(signaller, seqno);
                }
        }
  
@@@ -1031,7 -1055,8 +1062,8 @@@ gen6_add_request(struct intel_engine_c
  
        intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
        intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-       intel_ring_emit(ring, ring->outstanding_lazy_seqno);
+       intel_ring_emit(ring,
+                   i915_gem_request_get_seqno(ring->outstanding_lazy_request));
        intel_ring_emit(ring, MI_USER_INTERRUPT);
        __intel_ring_advance(ring);
  
@@@ -1149,7 -1174,8 +1181,8 @@@ pc_render_add_request(struct intel_engi
                        PIPE_CONTROL_WRITE_FLUSH |
                        PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
        intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
-       intel_ring_emit(ring, ring->outstanding_lazy_seqno);
+       intel_ring_emit(ring,
+                   i915_gem_request_get_seqno(ring->outstanding_lazy_request));
        intel_ring_emit(ring, 0);
        PIPE_CONTROL_FLUSH(ring, scratch_addr);
        scratch_addr += 2 * CACHELINE_BYTES; /* write to separate cachelines */
                        PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
                        PIPE_CONTROL_NOTIFY);
        intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
-       intel_ring_emit(ring, ring->outstanding_lazy_seqno);
+       intel_ring_emit(ring,
+                   i915_gem_request_get_seqno(ring->outstanding_lazy_request));
        intel_ring_emit(ring, 0);
        __intel_ring_advance(ring);
  
@@@ -1408,7 -1435,8 +1442,8 @@@ i9xx_add_request(struct intel_engine_c
  
        intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
        intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-       intel_ring_emit(ring, ring->outstanding_lazy_seqno);
+       intel_ring_emit(ring,
+                   i915_gem_request_get_seqno(ring->outstanding_lazy_request));
        intel_ring_emit(ring, MI_USER_INTERRUPT);
        __intel_ring_advance(ring);
  
@@@ -1789,15 -1817,15 +1824,15 @@@ int intel_alloc_ringbuffer_obj(struct d
  static int intel_init_ring_buffer(struct drm_device *dev,
                                  struct intel_engine_cs *ring)
  {
-       struct intel_ringbuffer *ringbuf = ring->buffer;
+       struct intel_ringbuffer *ringbuf;
        int ret;
  
-       if (ringbuf == NULL) {
-               ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
-               if (!ringbuf)
-                       return -ENOMEM;
-               ring->buffer = ringbuf;
-       }
+       WARN_ON(ring->buffer);
+       ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
+       if (!ringbuf)
+               return -ENOMEM;
+       ring->buffer = ringbuf;
  
        ring->dev = dev;
        INIT_LIST_HEAD(&ring->active_list);
                        goto error;
        }
  
-       if (ringbuf->obj == NULL) {
-               ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
-               if (ret) {
-                       DRM_ERROR("Failed to allocate ringbuffer %s: %d\n",
-                                       ring->name, ret);
-                       goto error;
-               }
+       WARN_ON(ringbuf->obj);
  
-               ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf);
-               if (ret) {
-                       DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n",
-                                       ring->name, ret);
-                       intel_destroy_ringbuffer_obj(ringbuf);
-                       goto error;
-               }
+       ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
+       if (ret) {
+               DRM_ERROR("Failed to allocate ringbuffer %s: %d\n",
+                               ring->name, ret);
+               goto error;
+       }
+       ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf);
+       if (ret) {
+               DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n",
+                               ring->name, ret);
+               intel_destroy_ringbuffer_obj(ringbuf);
+               goto error;
        }
  
        /* Workaround an erratum on the i830 which causes a hang if
        if (ret)
                goto error;
  
-       ret = ring->init(ring);
-       if (ret)
-               goto error;
        return 0;
  
  error:
@@@ -1877,8 -1901,7 +1908,7 @@@ void intel_cleanup_ring_buffer(struct i
  
        intel_unpin_ringbuffer_obj(ringbuf);
        intel_destroy_ringbuffer_obj(ringbuf);
-       ring->preallocated_lazy_request = NULL;
-       ring->outstanding_lazy_seqno = 0;
+       i915_gem_request_assign(&ring->outstanding_lazy_request, NULL);
  
        if (ring->cleanup)
                ring->cleanup(ring);
@@@ -1895,38 -1918,27 +1925,27 @@@ static int intel_ring_wait_request(stru
  {
        struct intel_ringbuffer *ringbuf = ring->buffer;
        struct drm_i915_gem_request *request;
-       u32 seqno = 0;
        int ret;
  
-       if (ringbuf->last_retired_head != -1) {
-               ringbuf->head = ringbuf->last_retired_head;
-               ringbuf->last_retired_head = -1;
-               ringbuf->space = intel_ring_space(ringbuf);
-               if (ringbuf->space >= n)
-                       return 0;
-       }
+       if (intel_ring_space(ringbuf) >= n)
+               return 0;
  
        list_for_each_entry(request, &ring->request_list, list) {
                if (__intel_ring_space(request->tail, ringbuf->tail,
                                       ringbuf->size) >= n) {
-                       seqno = request->seqno;
                        break;
                }
        }
  
-       if (seqno == 0)
+       if (&request->list == &ring->request_list)
                return -ENOSPC;
  
-       ret = i915_wait_seqno(ring, seqno);
+       ret = i915_wait_request(request);
        if (ret)
                return ret;
  
        i915_gem_retire_requests_ring(ring);
-       ringbuf->head = ringbuf->last_retired_head;
-       ringbuf->last_retired_head = -1;
  
-       ringbuf->space = intel_ring_space(ringbuf);
        return 0;
  }
  
@@@ -1952,14 -1964,14 +1971,14 @@@ static int ring_wait_for_space(struct i
         * case by choosing an insanely large timeout. */
        end = jiffies + 60 * HZ;
  
+       ret = 0;
        trace_i915_ring_wait_begin(ring);
        do {
+               if (intel_ring_space(ringbuf) >= n)
+                       break;
                ringbuf->head = I915_READ_HEAD(ring);
-               ringbuf->space = intel_ring_space(ringbuf);
-               if (ringbuf->space >= n) {
-                       ret = 0;
+               if (intel_ring_space(ringbuf) >= n)
                        break;
-               }
  
                msleep(1);
  
@@@ -2000,19 -2012,19 +2019,19 @@@ static int intel_wrap_ring_buffer(struc
                iowrite32(MI_NOOP, virt++);
  
        ringbuf->tail = 0;
-       ringbuf->space = intel_ring_space(ringbuf);
+       intel_ring_update_space(ringbuf);
  
        return 0;
  }
  
  int intel_ring_idle(struct intel_engine_cs *ring)
  {
-       u32 seqno;
+       struct drm_i915_gem_request *req;
        int ret;
  
        /* We need to add any requests required to flush the objects and ring */
-       if (ring->outstanding_lazy_seqno) {
-               ret = i915_add_request(ring, NULL);
+       if (ring->outstanding_lazy_request) {
+               ret = i915_add_request(ring);
                if (ret)
                        return ret;
        }
        if (list_empty(&ring->request_list))
                return 0;
  
-       seqno = list_entry(ring->request_list.prev,
+       req = list_entry(ring->request_list.prev,
                           struct drm_i915_gem_request,
-                          list)->seqno;
+                          list);
  
-       return i915_wait_seqno(ring, seqno);
+       return i915_wait_request(req);
  }
  
  static int
- intel_ring_alloc_seqno(struct intel_engine_cs *ring)
+ intel_ring_alloc_request(struct intel_engine_cs *ring)
  {
-       if (ring->outstanding_lazy_seqno)
+       int ret;
+       struct drm_i915_gem_request *request;
+       struct drm_i915_private *dev_private = ring->dev->dev_private;
+       if (ring->outstanding_lazy_request)
                return 0;
  
-       if (ring->preallocated_lazy_request == NULL) {
-               struct drm_i915_gem_request *request;
+       request = kzalloc(sizeof(*request), GFP_KERNEL);
+       if (request == NULL)
+               return -ENOMEM;
  
-               request = kmalloc(sizeof(*request), GFP_KERNEL);
-               if (request == NULL)
-                       return -ENOMEM;
+       kref_init(&request->ref);
+       request->ring = ring;
+       request->uniq = dev_private->request_uniq++;
  
-               ring->preallocated_lazy_request = request;
+       ret = i915_gem_get_seqno(ring->dev, &request->seqno);
+       if (ret) {
+               kfree(request);
+               return ret;
        }
  
-       return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno);
+       ring->outstanding_lazy_request = request;
+       return 0;
  }
  
  static int __intel_ring_prepare(struct intel_engine_cs *ring,
@@@ -2084,7 -2105,7 +2112,7 @@@ int intel_ring_begin(struct intel_engin
                return ret;
  
        /* Preallocate the olr before touching the ring */
-       ret = intel_ring_alloc_seqno(ring);
+       ret = intel_ring_alloc_request(ring);
        if (ret)
                return ret;
  
@@@ -2119,7 -2140,7 +2147,7 @@@ void intel_ring_init_seqno(struct intel
        struct drm_device *dev = ring->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
  
-       BUG_ON(ring->outstanding_lazy_seqno);
+       BUG_ON(ring->outstanding_lazy_request);
  
        if (INTEL_INFO(dev)->gen == 6 || INTEL_INFO(dev)->gen == 7) {
                I915_WRITE(RING_SYNC_0(ring->mmio_base), 0);
@@@ -2341,7 -2362,7 +2369,7 @@@ int intel_init_render_ring_buffer(struc
                        }
                }
  
-               ring->init_context = intel_ring_workarounds_emit;
+               ring->init_context = intel_rcs_ctx_init;
                ring->add_request = gen6_add_request;
                ring->flush = gen8_render_ring_flush;
                ring->irq_get = gen8_ring_get_irq;
                ring->dispatch_execbuffer = i830_dispatch_execbuffer;
        else
                ring->dispatch_execbuffer = i915_dispatch_execbuffer;
-       ring->init = init_render_ring;
+       ring->init_hw = init_render_ring;
        ring->cleanup = render_ring_cleanup;
  
        /* Workaround batchbuffer to combat CS tlb bug. */
                ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj);
        }
  
-       return intel_init_ring_buffer(dev, ring);
+       ret = intel_init_ring_buffer(dev, ring);
+       if (ret)
+               return ret;
+       if (INTEL_INFO(dev)->gen >= 5) {
+               ret = intel_init_pipe_control(ring);
+               if (ret)
+                       return ret;
+       }
+       return 0;
  }
  
  int intel_init_bsd_ring_buffer(struct drm_device *dev)
                }
                ring->dispatch_execbuffer = i965_dispatch_execbuffer;
        }
-       ring->init = init_ring_common;
+       ring->init_hw = init_ring_common;
  
        return intel_init_ring_buffer(dev, ring);
  }
@@@ -2558,7 -2589,7 +2596,7 @@@ int intel_init_bsd2_ring_buffer(struct 
                ring->semaphore.signal = gen8_xcs_signal;
                GEN8_RING_SEMAPHORE_INIT;
        }
-       ring->init = init_ring_common;
+       ring->init_hw = init_ring_common;
  
        return intel_init_ring_buffer(dev, ring);
  }
@@@ -2615,7 -2646,7 +2653,7 @@@ int intel_init_blt_ring_buffer(struct d
                        ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
                }
        }
-       ring->init = init_ring_common;
+       ring->init_hw = init_ring_common;
  
        return intel_init_ring_buffer(dev, ring);
  }
@@@ -2666,7 -2697,7 +2704,7 @@@ int intel_init_vebox_ring_buffer(struc
                        ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
                }
        }
-       ring->init = init_ring_common;
+       ring->init_hw = init_ring_common;
  
        return intel_init_ring_buffer(dev, ring);
  }
diff --combined include/drm/drm_crtc.h
@@@ -63,16 -63,8 +63,16 @@@ struct drm_mode_object 
  
  #define DRM_OBJECT_MAX_PROPERTY 24
  struct drm_object_properties {
 -      int count;
 -      uint32_t ids[DRM_OBJECT_MAX_PROPERTY];
 +      int count, atomic_count;
 +      /* NOTE: if we ever start dynamically destroying properties (ie.
 +       * not at drm_mode_config_cleanup() time), then we'd have to do
 +       * a better job of detaching property from mode objects to avoid
 +       * dangling property pointers:
 +       */
 +      struct drm_property *properties[DRM_OBJECT_MAX_PROPERTY];
 +      /* do not read/write values directly, but use drm_object_property_get_value()
 +       * and drm_object_property_set_value():
 +       */
        uint64_t values[DRM_OBJECT_MAX_PROPERTY];
  };
  
@@@ -145,14 -137,6 +145,14 @@@ struct drm_display_info 
        u8 cea_rev;
  };
  
 +/* data corresponds to displayid vend/prod/serial */
 +struct drm_tile_group {
 +      struct kref refcount;
 +      struct drm_device *dev;
 +      int id;
 +      u8 group_data[8];
 +};
 +
  struct drm_framebuffer_funcs {
        /* note: use drm_framebuffer_remove() */
        void (*destroy)(struct drm_framebuffer *framebuffer);
@@@ -245,9 -229,7 +245,9 @@@ struct drm_atomic_state
  
  /**
   * struct drm_crtc_state - mutable CRTC state
 + * @crtc: backpointer to the CRTC
   * @enable: whether the CRTC should be enabled, gates all other state
 + * @active: whether the CRTC is actively displaying (used for DPMS)
   * @mode_changed: for use by helpers and drivers when computing state updates
   * @plane_mask: bitmask of (1 << drm_plane_index(plane)) of attached planes
   * @last_vblank_count: for helpers and drivers to capture the vblank of the
   * @event: optional pointer to a DRM event to signal upon completion of the
   *    state update
   * @state: backpointer to global drm_atomic_state
 + *
 + * Note that the distinction between @enable and @active is rather subtile:
 + * Flipping @active while @enable is set without changing anything else may
 + * never return in a failure from the ->atomic_check callback. Userspace assumes
 + * that a DPMS On will always succeed. In other words: @enable controls resource
 + * assignment, @active controls the actual hardware state.
   */
  struct drm_crtc_state {
 +      struct drm_crtc *crtc;
 +
        bool enable;
 +      bool active;
  
        /* computed state bits used by helpers and drivers */
        bool planes_changed : 1;
   * @atomic_duplicate_state: duplicate the atomic state for this CRTC
   * @atomic_destroy_state: destroy an atomic state for this CRTC
   * @atomic_set_property: set a property on an atomic state for this CRTC
 + *    (do not call directly, use drm_atomic_crtc_set_property())
 + * @atomic_get_property: get a property on an atomic state for this CRTC
 + *    (do not call directly, use drm_atomic_crtc_get_property())
   *
   * The drm_crtc_funcs structure is the central CRTC management structure
   * in the DRM.  Each CRTC controls one or more connectors (note that the name
@@@ -373,10 -343,6 +373,10 @@@ struct drm_crtc_funcs 
                                   struct drm_crtc_state *state,
                                   struct drm_property *property,
                                   uint64_t val);
 +      int (*atomic_get_property)(struct drm_crtc *crtc,
 +                                 const struct drm_crtc_state *state,
 +                                 struct drm_property *property,
 +                                 uint64_t *val);
  };
  
  /**
@@@ -475,14 -441,11 +475,14 @@@ struct drm_crtc 
  
  /**
   * struct drm_connector_state - mutable connector state
 + * @connector: backpointer to the connector
   * @crtc: CRTC to connect connector to, NULL if disabled
   * @best_encoder: can be used by helpers and drivers to select the encoder
   * @state: backpointer to global drm_atomic_state
   */
  struct drm_connector_state {
 +      struct drm_connector *connector;
 +
        struct drm_crtc *crtc;  /* do not write directly, use drm_atomic_set_crtc_for_connector() */
  
        struct drm_encoder *best_encoder;
  
  /**
   * struct drm_connector_funcs - control connectors on a given device
 - * @dpms: set power state (see drm_crtc_funcs above)
 + * @dpms: set power state
   * @save: save connector state
   * @restore: restore connector state
   * @reset: reset connector after state has been invalidated (e.g. resume)
   * @atomic_duplicate_state: duplicate the atomic state for this connector
   * @atomic_destroy_state: destroy an atomic state for this connector
   * @atomic_set_property: set a property on an atomic state for this connector
 + *    (do not call directly, use drm_atomic_connector_set_property())
 + * @atomic_get_property: get a property on an atomic state for this connector
 + *    (do not call directly, use drm_atomic_connector_get_property())
   *
   * Each CRTC may have one or more connectors attached to it.  The functions
   * below allow the core DRM code to control connectors, enumerate available modes,
@@@ -540,10 -500,6 +540,10 @@@ struct drm_connector_funcs 
                                   struct drm_connector_state *state,
                                   struct drm_property *property,
                                   uint64_t val);
 +      int (*atomic_get_property)(struct drm_connector *connector,
 +                                 const struct drm_connector_state *state,
 +                                 struct drm_property *property,
 +                                 uint64_t *val);
  };
  
  /**
@@@ -643,15 -599,6 +643,15 @@@ struct drm_encoder 
   * @bad_edid_counter: track sinks that give us an EDID with invalid checksum
   * @debugfs_entry: debugfs directory for this connector
   * @state: current atomic state for this connector
 + * @has_tile: is this connector connected to a tiled monitor
 + * @tile_group: tile group for the connected monitor
 + * @tile_is_single_monitor: whether the tile is one monitor housing
 + * @num_h_tile: number of horizontal tiles in the tile group
 + * @num_v_tile: number of vertical tiles in the tile group
 + * @tile_h_loc: horizontal location of this tile
 + * @tile_v_loc: vertical location of this tile
 + * @tile_h_size: horizontal size of this tile.
 + * @tile_v_size: vertical size of this tile.
   *
   * Each connector may be connected to one or more CRTCs, or may be clonable by
   * another connector if they can share a CRTC.  Each connector also has a specific
@@@ -687,8 -634,6 +687,8 @@@ struct drm_connector 
  
        struct drm_property_blob *path_blob_ptr;
  
 +      struct drm_property_blob *tile_blob_ptr;
 +
        uint8_t polled; /* DRM_CONNECTOR_POLL_* */
  
        /* requested DPMS state */
        struct dentry *debugfs_entry;
  
        struct drm_connector_state *state;
 +
 +      /* DisplayID bits */
 +      bool has_tile;
 +      struct drm_tile_group *tile_group;
 +      bool tile_is_single_monitor;
 +
 +      uint8_t num_h_tile, num_v_tile;
 +      uint8_t tile_h_loc, tile_v_loc;
 +      uint16_t tile_h_size, tile_v_size;
  };
  
  /**
   * struct drm_plane_state - mutable plane state
 + * @plane: backpointer to the plane
   * @crtc: currently bound CRTC, NULL if disabled
   * @fb: currently bound framebuffer
   * @fence: optional fence to wait for before scanning out @fb
   * @state: backpointer to global drm_atomic_state
   */
  struct drm_plane_state {
 +      struct drm_plane *plane;
 +
        struct drm_crtc *crtc;   /* do not write directly, use drm_atomic_set_crtc_for_plane() */
        struct drm_framebuffer *fb;  /* do not write directly, use drm_atomic_set_fb_for_plane() */
        struct fence *fence;
   * @atomic_duplicate_state: duplicate the atomic state for this plane
   * @atomic_destroy_state: destroy an atomic state for this plane
   * @atomic_set_property: set a property on an atomic state for this plane
 + *    (do not call directly, use drm_atomic_plane_set_property())
 + * @atomic_get_property: get a property on an atomic state for this plane
 + *    (do not call directly, use drm_atomic_plane_get_property())
   */
  struct drm_plane_funcs {
        int (*update_plane)(struct drm_plane *plane,
                                   struct drm_plane_state *state,
                                   struct drm_property *property,
                                   uint64_t val);
 +      int (*atomic_get_property)(struct drm_plane *plane,
 +                                 const struct drm_plane_state *state,
 +                                 struct drm_property *property,
 +                                 uint64_t *val);
  };
  
  enum drm_plane_type {
@@@ -902,7 -828,7 +902,7 @@@ struct drm_bridge 
  /**
   * struct struct drm_atomic_state - the global state object for atomic updates
   * @dev: parent DRM device
 - * @flags: state flags like async update
 + * @allow_modeset: allow full modeset
   * @planes: pointer to array of plane pointers
   * @plane_states: pointer to array of plane states pointers
   * @crtcs: pointer to array of CRTC pointers
   */
  struct drm_atomic_state {
        struct drm_device *dev;
 -      uint32_t flags;
 +      bool allow_modeset : 1;
        struct drm_plane **planes;
        struct drm_plane_state **plane_states;
        struct drm_crtc **crtcs;
@@@ -1052,7 -978,6 +1052,7 @@@ struct drm_mode_config 
        struct drm_modeset_acquire_ctx *acquire_ctx; /* for legacy _lock_all() / _unlock_all() */
        struct mutex idr_mutex; /* for IDR management */
        struct idr crtc_idr; /* use this idr for all IDs, fb, crtc, connector, modes - just makes life easier */
 +      struct idr tile_idr; /* use this idr for all IDs, fb, crtc, connector, modes - just makes life easier */
        /* this is limited to one for now */
  
        struct mutex fb_lock; /* proctects global and per-file fb lists */
        struct drm_property *edid_property;
        struct drm_property *dpms_property;
        struct drm_property *path_property;
 +      struct drm_property *tile_property;
        struct drm_property *plane_type_property;
        struct drm_property *rotation_property;
 +      struct drm_property *prop_src_x;
 +      struct drm_property *prop_src_y;
 +      struct drm_property *prop_src_w;
 +      struct drm_property *prop_src_h;
 +      struct drm_property *prop_crtc_x;
 +      struct drm_property *prop_crtc_y;
 +      struct drm_property *prop_crtc_w;
 +      struct drm_property *prop_crtc_h;
 +      struct drm_property *prop_fb_id;
 +      struct drm_property *prop_crtc_id;
  
        /* DVI-I properties */
        struct drm_property *dvi_i_subconnector_property;
@@@ -1247,6 -1161,8 +1247,8 @@@ extern int drm_plane_init(struct drm_de
  extern void drm_plane_cleanup(struct drm_plane *plane);
  extern unsigned int drm_plane_index(struct drm_plane *plane);
  extern void drm_plane_force_disable(struct drm_plane *plane);
+ extern void drm_crtc_get_hv_timing(const struct drm_display_mode *mode,
+                                  int *hdisplay, int *vdisplay);
  extern int drm_crtc_check_viewport(const struct drm_crtc *crtc,
                                   int x, int y,
                                   const struct drm_display_mode *mode,
@@@ -1276,7 -1192,6 +1278,7 @@@ extern void drm_mode_config_cleanup(str
  
  extern int drm_mode_connector_set_path_property(struct drm_connector *connector,
                                                const char *path);
 +int drm_mode_connector_set_tile_property(struct drm_connector *connector);
  extern int drm_mode_connector_update_edid_property(struct drm_connector *connector,
                                                   const struct edid *edid);
  
@@@ -1346,10 -1261,6 +1348,10 @@@ extern int drm_mode_create_scaling_mode
  extern int drm_mode_create_aspect_ratio_property(struct drm_device *dev);
  extern int drm_mode_create_dirty_info_property(struct drm_device *dev);
  extern int drm_mode_create_suggested_offset_properties(struct drm_device *dev);
 +extern bool drm_property_change_valid_get(struct drm_property *property,
 +                                       uint64_t value, struct drm_mode_object **ref);
 +extern void drm_property_change_valid_put(struct drm_property *property,
 +              struct drm_mode_object *ref);
  
  extern int drm_mode_connector_attach_encoder(struct drm_connector *connector,
                                             struct drm_encoder *encoder);
@@@ -1417,13 -1328,6 +1419,13 @@@ extern void drm_set_preferred_mode(stru
  extern int drm_edid_header_is_valid(const u8 *raw_edid);
  extern bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid);
  extern bool drm_edid_is_valid(struct edid *edid);
 +
 +extern struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev,
 +                                                       char topology[8]);
 +extern struct drm_tile_group *drm_mode_get_tile_group(struct drm_device *dev,
 +                                             char topology[8]);
 +extern void drm_mode_put_tile_group(struct drm_device *dev,
 +                                 struct drm_tile_group *tg);
  struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
                                           int hsize, int vsize, int fresh,
                                           bool rb);
@@@ -1441,8 -1345,6 +1443,8 @@@ extern int drm_mode_obj_set_property_io
  extern int drm_mode_plane_set_obj_prop(struct drm_plane *plane,
                                       struct drm_property *property,
                                       uint64_t value);
 +extern int drm_mode_atomic_ioctl(struct drm_device *dev,
 +                               void *data, struct drm_file *file_priv);
  
  extern void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
                                 int *bpp);
diff --combined include/drm/drm_modes.h
@@@ -90,6 -90,9 +90,9 @@@ enum drm_mode_status 
  
  #define CRTC_INTERLACE_HALVE_V        (1 << 0) /* halve V values for interlacing */
  #define CRTC_STEREO_DOUBLE    (1 << 1) /* adjust timings for stereo modes */
+ #define CRTC_NO_DBLSCAN               (1 << 2) /* don't adjust doublescan */
+ #define CRTC_NO_VSCAN         (1 << 3) /* don't adjust doublescan */
+ #define CRTC_STEREO_DOUBLE_ONLY       (CRTC_NO_DBLSCAN | CRTC_NO_VSCAN)
  
  #define DRM_MODE_FLAG_3D_MAX  DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF
  
@@@ -217,9 -220,9 +220,9 @@@ bool drm_mode_equal_no_clocks_no_stereo
                                        const struct drm_display_mode *mode2);
  
  /* for use by the crtc helper probe functions */
 -void drm_mode_validate_size(struct drm_device *dev,
 -                          struct list_head *mode_list,
 -                          int maxX, int maxY);
 +enum drm_mode_status drm_mode_validate_basic(const struct drm_display_mode *mode);
 +enum drm_mode_status drm_mode_validate_size(const struct drm_display_mode *mode,
 +                                          int maxX, int maxY);
  void drm_mode_prune_invalid(struct drm_device *dev,
                            struct list_head *mode_list, bool verbose);
  void drm_mode_sort(struct list_head *mode_list);