Merge branch 'drm-next-4.7' of git://people.freedesktop.org/~agd5f/linux into drm...
authorDave Airlie <airlied@redhat.com>
Fri, 6 May 2016 04:17:22 +0000 (14:17 +1000)
committerDave Airlie <airlied@redhat.com>
Fri, 6 May 2016 04:17:22 +0000 (14:17 +1000)
This is the first big radeon/amdgpu pull request for 4.7.  Highlights:
    - Polaris support in amdgpu
      Current display stack on par with other asics, for advanced features DAL is required
      Power management support
      Support for GFX, Compute, SDMA, UVD, VCE
    - VCE and UVD init/fini cleanup in radeon
    - GPUVM improvements
    - Scheduler improvements
    - Clockgating improvements
    - Powerplay improvements
    - TTM changes to support driver specific LRU update mechanism
    - Radeon support for new Mesa features
    - ASYNC pageflip support for radeon
    - Lots of bug fixes and code cleanups

* 'drm-next-4.7' of git://people.freedesktop.org/~agd5f/linux: (180 commits)
  drm/amdgpu: Replace rcu_assign_pointer() with RCU_INIT_POINTER()
  drm/amdgpu: use drm_mode_vrefresh() rather than mode->vrefresh
  drm/amdgpu/uvd6: add bypass support for fiji (v3)
  drm/amdgpu/fiji: set UVD CG state when enabling UVD DPM (v2)
  drm/powerplay: add missing clockgating callback for tonga
  drm/amdgpu: Constify some tables
  drm/amd/powerplay: Delete dead struct declaration
  drm/amd/powerplay/hwmgr: don't add invalid voltage
  drm/amd/powerplay/hwmgr: prevent VDDC from exceeding 2V
  MAINTAINERS: Remove unneded wildcard for the Radeon/AMDGPU drivers
  drm/radeon: add cayman VM support for append packet.
  drm/amd/amdgpu: Add debugfs entries for smc/didt/pcie
  drm/amd/amdgpu: Drop print_status callbacks.
  drm/amd/powerplay: revise reading/writing pptable on Polaris10
  drm/amd/powerplay: revise reading/writing pptable on Tonga
  drm/amd/powerplay: revise reading/writing pptable on Fiji
  drm/amd/powerplay: revise caching the soft pptable and add it's size
  drm/amd/powerplay: add dpm force multiple levels on cz/tonga/fiji/polaris (v2)
  drm/amd/powerplay: fix fan speed percent setting error on Polaris10
  drm/amd/powerplay: fix bug dpm can't work when resume back on Polaris
  ...

166 files changed:
Documentation/DocBook/gpu.tmpl
Documentation/devicetree/bindings/display/brcm,bcm-vc4.txt
Documentation/devicetree/bindings/display/exynos/exynos5433-decon.txt
Documentation/devicetree/bindings/display/exynos/exynos_hdmi.txt
Documentation/devicetree/bindings/display/hisilicon/dw-dsi.txt [new file with mode: 0644]
Documentation/devicetree/bindings/display/hisilicon/hisi-ade.txt [new file with mode: 0644]
MAINTAINERS
drivers/gpu/drm/Kconfig
drivers/gpu/drm/Makefile
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
drivers/gpu/drm/amd/amdkfd/kfd_events.c
drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
drivers/gpu/drm/arm/hdlcd_drv.c
drivers/gpu/drm/drm_agpsupport.c
drivers/gpu/drm/drm_atomic.c
drivers/gpu/drm/drm_atomic_helper.c
drivers/gpu/drm/drm_bufs.c
drivers/gpu/drm/drm_crtc.c
drivers/gpu/drm/drm_crtc_helper.c
drivers/gpu/drm/drm_dp_aux_dev.c
drivers/gpu/drm/drm_dp_helper.c
drivers/gpu/drm/drm_drv.c
drivers/gpu/drm/drm_fb_cma_helper.c
drivers/gpu/drm/drm_fb_helper.c
drivers/gpu/drm/drm_fops.c
drivers/gpu/drm/drm_gem.c
drivers/gpu/drm/drm_info.c
drivers/gpu/drm/drm_internal.h
drivers/gpu/drm/drm_ioctl.c
drivers/gpu/drm/drm_legacy.h
drivers/gpu/drm/drm_pci.c
drivers/gpu/drm/drm_sysfs.c
drivers/gpu/drm/drm_vm.c
drivers/gpu/drm/etnaviv/etnaviv_drv.c
drivers/gpu/drm/exynos/Kconfig
drivers/gpu/drm/exynos/Makefile
drivers/gpu/drm/exynos/exynos5433_drm_decon.c
drivers/gpu/drm/exynos/exynos7_drm_decon.c
drivers/gpu/drm/exynos/exynos_dp.c
drivers/gpu/drm/exynos/exynos_drm_core.c
drivers/gpu/drm/exynos/exynos_drm_crtc.c
drivers/gpu/drm/exynos/exynos_drm_drv.c
drivers/gpu/drm/exynos/exynos_drm_drv.h
drivers/gpu/drm/exynos/exynos_drm_dsi.c
drivers/gpu/drm/exynos/exynos_drm_fb.c
drivers/gpu/drm/exynos/exynos_drm_fbdev.c
drivers/gpu/drm/exynos/exynos_drm_fbdev.h
drivers/gpu/drm/exynos/exynos_drm_fimd.c
drivers/gpu/drm/exynos/exynos_drm_mic.c
drivers/gpu/drm/exynos/exynos_drm_plane.c
drivers/gpu/drm/exynos/exynos_drm_rotator.c
drivers/gpu/drm/exynos/exynos_hdmi.c
drivers/gpu/drm/exynos/exynos_mixer.c
drivers/gpu/drm/exynos/regs-hdmi.h
drivers/gpu/drm/fsl-dcu/Kconfig
drivers/gpu/drm/hisilicon/Kconfig [new file with mode: 0644]
drivers/gpu/drm/hisilicon/Makefile [new file with mode: 0644]
drivers/gpu/drm/hisilicon/kirin/Kconfig [new file with mode: 0644]
drivers/gpu/drm/hisilicon/kirin/Makefile [new file with mode: 0644]
drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c [new file with mode: 0644]
drivers/gpu/drm/hisilicon/kirin/dw_dsi_reg.h [new file with mode: 0644]
drivers/gpu/drm/hisilicon/kirin/kirin_ade_reg.h [new file with mode: 0644]
drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c [new file with mode: 0644]
drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c [new file with mode: 0644]
drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.h [new file with mode: 0644]
drivers/gpu/drm/i915/Kconfig.debug
drivers/gpu/drm/i915/Makefile
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_dma.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem.h [new file with mode: 0644]
drivers/gpu/drm/i915/i915_gem_context.c
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/i915_gem_shrinker.c
drivers/gpu/drm/i915/i915_gem_stolen.c
drivers/gpu/drm/i915/i915_gem_userptr.c
drivers/gpu/drm/i915/i915_guc_submission.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/intel_bios.c
drivers/gpu/drm/i915/intel_csr.c
drivers/gpu/drm/i915/intel_ddi.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_dp_mst.c
drivers/gpu/drm/i915/intel_dpll_mgr.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_dsi.c
drivers/gpu/drm/i915/intel_dsi.h
drivers/gpu/drm/i915/intel_dsi_pll.c
drivers/gpu/drm/i915/intel_guc.h
drivers/gpu/drm/i915/intel_hdmi.c
drivers/gpu/drm/i915/intel_i2c.c
drivers/gpu/drm/i915/intel_lrc.c
drivers/gpu/drm/i915/intel_lrc.h
drivers/gpu/drm/i915/intel_mocs.c
drivers/gpu/drm/i915/intel_mocs.h
drivers/gpu/drm/i915/intel_opregion.c
drivers/gpu/drm/i915/intel_overlay.c
drivers/gpu/drm/i915/intel_panel.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/intel_ringbuffer.h
drivers/gpu/drm/i915/intel_runtime_pm.c
drivers/gpu/drm/i915/intel_uncore.c
drivers/gpu/drm/imx/imx-drm-core.c
drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
drivers/gpu/drm/msm/msm_atomic.c
drivers/gpu/drm/msm/msm_drv.h
drivers/gpu/drm/omapdrm/omap_drv.c
drivers/gpu/drm/qxl/qxl_display.c
drivers/gpu/drm/qxl/qxl_drv.h
drivers/gpu/drm/qxl/qxl_fb.c
drivers/gpu/drm/qxl/qxl_kms.c
drivers/gpu/drm/radeon/radeon_display.c
drivers/gpu/drm/radeon/radeon_drv.c
drivers/gpu/drm/rcar-du/rcar_du_crtc.c
drivers/gpu/drm/rcar-du/rcar_du_kms.c
drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
drivers/gpu/drm/rockchip/dw-mipi-dsi.c
drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
drivers/gpu/drm/rockchip/inno_hdmi.c
drivers/gpu/drm/rockchip/rockchip_drm_drv.c
drivers/gpu/drm/rockchip/rockchip_drm_drv.h
drivers/gpu/drm/rockchip/rockchip_drm_fb.c
drivers/gpu/drm/rockchip/rockchip_drm_vop.c
drivers/gpu/drm/shmobile/shmob_drm_crtc.c
drivers/gpu/drm/sti/sti_drv.c
drivers/gpu/drm/tegra/drm.c
drivers/gpu/drm/tilcdc/tilcdc_crtc.c
drivers/gpu/drm/udl/udl_drv.h
drivers/gpu/drm/udl/udl_fb.c
drivers/gpu/drm/vc4/Kconfig
drivers/gpu/drm/vc4/Makefile
drivers/gpu/drm/vc4/vc4_crtc.c
drivers/gpu/drm/vc4/vc4_debugfs.c
drivers/gpu/drm/vc4/vc4_dpi.c [new file with mode: 0644]
drivers/gpu/drm/vc4/vc4_drv.c
drivers/gpu/drm/vc4/vc4_drv.h
drivers/gpu/drm/vc4/vc4_hdmi.c
drivers/gpu/drm/vc4/vc4_kms.c
drivers/gpu/drm/vc4/vc4_regs.h
drivers/video/fbdev/core/fb_defio.c
include/drm/drmP.h
include/drm/drm_agpsupport.h
include/drm/drm_atomic.h
include/drm/drm_atomic_helper.h
include/drm/drm_crtc.h
include/drm/drm_fb_cma_helper.h
include/drm/drm_fb_helper.h
include/drm/drm_gem.h
include/drm/drm_legacy.h
include/linux/fb.h
include/uapi/drm/drm_mode.h
include/video/exynos5433_decon.h

index 1464fb2..56386d3 100644 (file)
@@ -1817,7 +1817,7 @@ void intel_crt_init(struct drm_device *dev)
        </tr>
        <tr>
        <td rowspan="42" valign="top" >DRM</td>
-       <td valign="top" >Generic</td>
+       <td rowspan="2" valign="top" >Generic</td>
        <td valign="top" >“rotation”</td>
        <td valign="top" >BITMASK</td>
        <td valign="top" >{ 0, "rotate-0" },
@@ -1832,6 +1832,13 @@ void intel_crt_init(struct drm_device *dev)
        image along the specified axis prior to rotation</td>
        </tr>
        <tr>
+       <td valign="top" >“scaling mode”</td>
+       <td valign="top" >ENUM</td>
+       <td valign="top" >{ "None", "Full", "Center", "Full aspect" }</td>
+       <td valign="top" >Connector</td>
+       <td valign="top" >Supported by: amdgpu, gma500, i915, nouveau and radeon.</td>
+       </tr>
+       <tr>
        <td rowspan="5" valign="top" >Connector</td>
        <td valign="top" >“EDID”</td>
        <td valign="top" >BLOB | IMMUTABLE</td>
@@ -2068,21 +2075,12 @@ void intel_crt_init(struct drm_device *dev)
        <td valign="top" >property to suggest an Y offset for a connector</td>
        </tr>
        <tr>
-       <td rowspan="8" valign="top" >Optional</td>
-       <td valign="top" >“scaling mode”</td>
-       <td valign="top" >ENUM</td>
-       <td valign="top" >{ "None", "Full", "Center", "Full aspect" }</td>
-       <td valign="top" >Connector</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
+       <td rowspan="7" valign="top" >Optional</td>
        <td valign="top" >"aspect ratio"</td>
        <td valign="top" >ENUM</td>
        <td valign="top" >{ "None", "4:3", "16:9" }</td>
        <td valign="top" >Connector</td>
-       <td valign="top" >DRM property to set aspect ratio from user space app.
-               This enum is made generic to allow addition of custom aspect
-               ratios.</td>
+       <td valign="top" >TDB</td>
        </tr>
        <tr>
        <td valign="top" >“dirty”</td>
index 9f97df4..a5ea451 100644 (file)
@@ -35,12 +35,22 @@ Optional properties for HDMI:
                  as an interrupt/status bit in the HDMI controller
                  itself).  See bindings/pinctrl/brcm,bcm2835-gpio.txt
 
+Required properties for DPI:
+- compatible:  Should be "brcm,bcm2835-dpi"
+- reg:         Physical base address and length of the registers
+- clocks:      a) core: The core clock the unit runs on
+               b) pixel: The pixel clock that feeds the pixelvalve
+- port:                Port node with a single endpoint connecting to the panel
+                 device, as defined in [1]
+
 Required properties for V3D:
 - compatible:  Should be "brcm,bcm2835-v3d"
 - reg:         Physical base address and length of the V3D's registers
 - interrupts:  The interrupt number
                  See bindings/interrupt-controller/brcm,bcm2835-armctrl-ic.txt
 
+[1] Documentation/devicetree/bindings/media/video-interfaces.txt
+
 Example:
 pixelvalve@7e807000 {
        compatible = "brcm,bcm2835-pixelvalve2";
@@ -66,6 +76,22 @@ hdmi: hdmi@7e902000 {
        clock-names = "pixel", "hdmi";
 };
 
+dpi: dpi@7e208000 {
+       compatible = "brcm,bcm2835-dpi";
+       reg = <0x7e208000 0x8c>;
+       clocks = <&clocks BCM2835_CLOCK_VPU>,
+                <&clocks BCM2835_CLOCK_DPI>;
+       clock-names = "core", "pixel";
+       #address-cells = <1>;
+       #size-cells = <0>;
+
+       port {
+               dpi_out: endpoint@0 {
+                       remote-endpoint = <&panel_in>;
+               };
+       };
+};
+
 v3d: v3d@7ec00000 {
        compatible = "brcm,bcm2835-v3d";
        reg = <0x7ec00000 0x1000>;
@@ -75,3 +101,13 @@ v3d: v3d@7ec00000 {
 vc4: gpu {
        compatible = "brcm,bcm2835-vc4";
 };
+
+panel: panel {
+       compatible = "ontat,yx700wv03", "simple-panel";
+
+       port {
+               panel_in: endpoint {
+                       remote-endpoint = <&dpi_out>;
+               };
+       };
+};
index 377afbf..c9fd7b3 100644 (file)
@@ -5,7 +5,8 @@ Exynos series of SoCs which transfers the image data from a video memory
 buffer to an external LCD interface.
 
 Required properties:
-- compatible: value should be "samsung,exynos5433-decon";
+- compatible: value should be one of:
+       "samsung,exynos5433-decon", "samsung,exynos5433-decon-tv";
 - reg: physical base address and length of the DECON registers set.
 - interrupts: should contain a list of all DECON IP block interrupts in the
              order: VSYNC, LCD_SYSTEM. The interrupt specifier format
@@ -16,7 +17,7 @@ Required properties:
 - clocks: must include clock specifiers corresponding to entries in the
          clock-names property.
 - clock-names: list of clock names sorted in the same order as the clocks
-              property. Must contain "aclk_decon", "aclk_smmu_decon0x",
+              property. Must contain "pclk", "aclk_decon", "aclk_smmu_decon0x",
               "aclk_xiu_decon0x", "pclk_smmu_decon0x", clk_decon_vclk",
               "sclk_decon_eclk"
 - ports: contains a port which is connected to mic node. address-cells and
index d474f59..a2ec4c1 100644 (file)
@@ -5,6 +5,7 @@ Required properties:
        1) "samsung,exynos4210-hdmi"
        2) "samsung,exynos4212-hdmi"
        3) "samsung,exynos5420-hdmi"
+       4) "samsung,exynos5433-hdmi"
 - reg: physical base address of the hdmi and length of memory mapped
        region.
 - interrupts: interrupt number to the cpu.
@@ -12,6 +13,11 @@ Required properties:
        a) phandle of the gpio controller node.
        b) pin number within the gpio controller.
        c) optional flags and pull up/down.
+- ddc: phandle to the hdmi ddc node
+- phy: phandle to the hdmi phy node
+- samsung,syscon-phandle: phandle for system controller node for PMU.
+
+Required properties for Exynos 4210, 4212, 5420 and 5433:
 - clocks: list of clock IDs from SoC clock driver.
        a) hdmi: Gate of HDMI IP bus clock.
        b) sclk_hdmi: Gate of HDMI special clock.
@@ -25,9 +31,24 @@ Required properties:
                sclk_pixel.
 - clock-names: aliases as per driver requirements for above clock IDs:
        "hdmi", "sclk_hdmi", "sclk_pixel", "sclk_hdmiphy" and "mout_hdmi".
-- ddc: phandle to the hdmi ddc node
-- phy: phandle to the hdmi phy node
-- samsung,syscon-phandle: phandle for system controller node for PMU.
+
+Required properties for Exynos 5433:
+- clocks: list of clock specifiers according to common clock bindings.
+       a) hdmi_pclk: Gate of HDMI IP APB bus.
+       b) hdmi_i_pclk: Gate of HDMI-PHY IP APB bus.
+       d) i_tmds_clk: Gate of HDMI TMDS clock.
+       e) i_pixel_clk: Gate of HDMI pixel clock.
+       f) i_spdif_clk: Gate of HDMI SPDIF clock.
+       g) oscclk: Oscillator clock, used as parent of following *_user clocks
+               in case HDMI-PHY is not operational.
+       h) tmds_clko: TMDS clock generated by HDMI-PHY.
+       i) tmds_clko_user: MUX used to switch between oscclk and tmds_clko,
+               respectively if HDMI-PHY is off and operational.
+       j) pixel_clko: Pixel clock generated by HDMI-PHY.
+       k) pixel_clko_user: MUX used to switch between oscclk and pixel_clko,
+               respectively if HDMI-PHY is off and operational.
+- clock-names: aliases for above clock specfiers.
+- samsung,sysreg: handle to syscon used to control the system registers.
 
 Example:
 
diff --git a/Documentation/devicetree/bindings/display/hisilicon/dw-dsi.txt b/Documentation/devicetree/bindings/display/hisilicon/dw-dsi.txt
new file mode 100644 (file)
index 0000000..d270bfe
--- /dev/null
@@ -0,0 +1,72 @@
+Device-Tree bindings for DesignWare DSI Host Controller v1.20a driver
+
+A DSI Host Controller resides in the middle of display controller and external
+HDMI converter or panel.
+
+Required properties:
+- compatible: value should be "hisilicon,hi6220-dsi".
+- reg: physical base address and length of dsi controller's registers.
+- clocks: contains APB clock phandle + clock-specifier pair.
+- clock-names: should be "pclk".
+- ports: contains DSI controller input and output sub port.
+  The input port connects to ADE output port with the reg value "0".
+  The output port with the reg value "1", it could connect to panel or
+  any other bridge endpoints.
+  See Documentation/devicetree/bindings/graph.txt for more device graph info.
+
+A example of HiKey board hi6220 SoC and board specific DT entry:
+Example:
+
+SoC specific:
+       dsi: dsi@f4107800 {
+               compatible = "hisilicon,hi6220-dsi";
+               reg = <0x0 0xf4107800 0x0 0x100>;
+               clocks = <&media_ctrl  HI6220_DSI_PCLK>;
+               clock-names = "pclk";
+               status = "disabled";
+
+               ports {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+
+                       /* 0 for input port */
+                       port@0 {
+                               reg = <0>;
+                               dsi_in: endpoint {
+                                       remote-endpoint = <&ade_out>;
+                               };
+                       };
+               };
+       };
+
+
+Board specific:
+       &dsi {
+               status = "ok";
+
+               ports {
+                       /* 1 for output port */
+                       port@1 {
+                               reg = <1>;
+
+                               dsi_out0: endpoint@0 {
+                                       remote-endpoint = <&adv7533_in>;
+                               };
+                       };
+               };
+       };
+
+       &i2c2 {
+               ...
+
+               adv7533: adv7533@39 {
+                       ...
+
+                       port {
+                               adv7533_in: endpoint {
+                                       remote-endpoint = <&dsi_out0>;
+                               };
+                       };
+               };
+       };
+
diff --git a/Documentation/devicetree/bindings/display/hisilicon/hisi-ade.txt b/Documentation/devicetree/bindings/display/hisilicon/hisi-ade.txt
new file mode 100644 (file)
index 0000000..38dc9d6
--- /dev/null
@@ -0,0 +1,64 @@
+Device-Tree bindings for hisilicon ADE display controller driver
+
+ADE (Advanced Display Engine) is the display controller which grab image
+data from memory, do composition, do post image processing, generate RGB
+timing stream and transfer to DSI.
+
+Required properties:
+- compatible: value should be "hisilicon,hi6220-ade".
+- reg: physical base address and length of the ADE controller's registers.
+- hisilicon,noc-syscon: ADE NOC QoS syscon.
+- resets: The ADE reset controller node.
+- interrupt: the ldi vblank interrupt number used.
+- clocks: a list of phandle + clock-specifier pairs, one for each entry
+  in clock-names.
+- clock-names: should contain:
+  "clk_ade_core" for the ADE core clock.
+  "clk_codec_jpeg" for the media NOC QoS clock, which use the same clock with
+  jpeg codec.
+  "clk_ade_pix" for the ADE pixel clok.
+- assigned-clocks: Should contain "clk_ade_core" and "clk_codec_jpeg" clocks'
+  phandle + clock-specifier pairs.
+- assigned-clock-rates: clock rates, one for each entry in assigned-clocks.
+  The rate of "clk_ade_core" could be "360000000" or "180000000";
+  The rate of "clk_codec_jpeg" could be or less than "1440000000".
+  These rate values could be configured according to performance and power
+  consumption.
+- port: the output port. This contains one endpoint subnode, with its
+  remote-endpoint set to the phandle of the connected DSI input endpoint.
+  See Documentation/devicetree/bindings/graph.txt for more device graph info.
+
+Optional properties:
+- dma-coherent: Present if dma operations are coherent.
+
+
+A example of HiKey board hi6220 SoC specific DT entry:
+Example:
+
+       ade: ade@f4100000 {
+               compatible = "hisilicon,hi6220-ade";
+               reg = <0x0 0xf4100000 0x0 0x7800>;
+               reg-names = "ade_base";
+               hisilicon,noc-syscon = <&medianoc_ade>;
+               resets = <&media_ctrl MEDIA_ADE>;
+               interrupts = <0 115 4>; /* ldi interrupt */
+
+               clocks = <&media_ctrl HI6220_ADE_CORE>,
+                        <&media_ctrl HI6220_CODEC_JPEG>,
+                        <&media_ctrl HI6220_ADE_PIX_SRC>;
+               /*clock name*/
+               clock-names  = "clk_ade_core",
+                              "clk_codec_jpeg",
+                              "clk_ade_pix";
+
+               assigned-clocks = <&media_ctrl HI6220_ADE_CORE>,
+                       <&media_ctrl HI6220_CODEC_JPEG>;
+               assigned-clock-rates = <360000000>, <288000000>;
+               dma-coherent;
+
+               port {
+                       ade_out: endpoint {
+                               remote-endpoint = <&dsi_in>;
+                       };
+               };
+       };
index 4f2e3a4..e6ee4ec 100644 (file)
@@ -3768,6 +3768,21 @@ F:       drivers/gpu/vga/
 F:     include/drm/
 F:     include/uapi/drm/
 
+DRM DRIVER FOR AST SERVER GRAPHICS CHIPS
+M:     Dave Airlie <airlied@redhat.com>
+S:     Odd Fixes
+F:     drivers/gpu/drm/ast/
+
+DRM DRIVER FOR BOCHS VIRTUAL GPU
+M:     Gerd Hoffmann <kraxel@redhat.com>
+S:     Odd Fixes
+F:     drivers/gpu/drm/bochs/
+
+DRM DRIVER FOR QEMU'S CIRRUS DEVICE
+M:     Dave Airlie <airlied@redhat.com>
+S:     Odd Fixes
+F:     drivers/gpu/drm/cirrus/
+
 RADEON and AMDGPU DRM DRIVERS
 M:     Alex Deucher <alexander.deucher@amd.com>
 M:     Christian König <christian.koenig@amd.com>
@@ -3800,7 +3815,7 @@ T:        git git://anongit.freedesktop.org/drm-intel
 S:     Supported
 F:     drivers/gpu/drm/i915/
 F:     include/drm/i915*
-F:     include/uapi/drm/i915*
+F:     include/uapi/drm/i915_drm.h
 
 DRM DRIVERS FOR ATMEL HLCDC
 M:     Boris Brezillon <boris.brezillon@free-electrons.com>
@@ -3825,8 +3840,8 @@ L:        dri-devel@lists.freedesktop.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/daeinki/drm-exynos.git
 S:     Supported
 F:     drivers/gpu/drm/exynos/
-F:     include/drm/exynos*
-F:     include/uapi/drm/exynos*
+F:     include/uapi/drm/exynos_drm.h
+F:     Documentation/devicetree/bindings/display/exynos/
 
 DRM DRIVERS FOR FREESCALE DCU
 M:     Stefan Agner <stefan@agner.ch>
@@ -3851,8 +3866,42 @@ M:       Patrik Jakobsson <patrik.r.jakobsson@gmail.com>
 L:     dri-devel@lists.freedesktop.org
 T:     git git://github.com/patjak/drm-gma500
 S:     Maintained
-F:     drivers/gpu/drm/gma500
-F:     include/drm/gma500*
+F:     drivers/gpu/drm/gma500/
+
+DRM DRIVERS FOR HISILICON
+M:     Xinliang Liu <z.liuxinliang@hisilicon.com>
+R:     Xinwei Kong <kong.kongxinwei@hisilicon.com>
+R:     Chen Feng <puck.chen@hisilicon.com>
+L:     dri-devel@lists.freedesktop.org
+T:     git git://github.com/xin3liang/linux.git
+S:     Maintained
+F:     drivers/gpu/drm/hisilicon/
+F:     Documentation/devicetree/bindings/display/hisilicon/
+
+DRM DRIVER FOR INTEL I810 VIDEO CARDS
+S:     Orphan / Obsolete
+F:     drivers/gpu/drm/i810/
+F:     include/uapi/drm/i810_drm.h
+
+DRM DRIVER FOR MSM ADRENO GPU
+M:     Rob Clark <robdclark@gmail.com>
+L:     linux-arm-msm@vger.kernel.org
+L:     dri-devel@lists.freedesktop.org
+L:     freedreno@lists.freedesktop.org
+T:     git git://people.freedesktop.org/~robclark/linux
+S:     Maintained
+F:     drivers/gpu/drm/msm/
+F:     include/uapi/drm/msm_drm.h
+F:     Documentation/devicetree/bindings/display/msm/
+
+DRM DRIVER FOR NVIDIA GEFORCE/QUADRO GPUS
+M:     Ben Skeggs <bskeggs@redhat.com>
+L:     dri-devel@lists.freedesktop.org
+L:     nouveau@lists.freedesktop.org
+T:     git git://github.com/skeggsb/linux
+S:     Supported
+F:     drivers/gpu/drm/nouveau/
+F:     include/uapi/drm/nouveau_drm.h
 
 DRM DRIVERS FOR NVIDIA TEGRA
 M:     Thierry Reding <thierry.reding@gmail.com>
@@ -3867,22 +3916,54 @@ F:      include/linux/host1x.h
 F:     include/uapi/drm/tegra_drm.h
 F:     Documentation/devicetree/bindings/display/tegra/nvidia,tegra20-host1x.txt
 
+DRM DRIVER FOR MATROX G200/G400 GRAPHICS CARDS
+S:     Orphan / Obsolete
+F:     drivers/gpu/drm/mga/
+F:     include/uapi/drm/mga_drm.h
+
+DRM DRIVER FOR MGA G200 SERVER GRAPHICS CHIPS
+M:     Dave Airlie <airlied@redhat.com>
+S:     Odd Fixes
+F:     drivers/gpu/drm/mgag200/
+
+DRM DRIVER FOR RAGE 128 VIDEO CARDS
+S:     Orphan / Obsolete
+F:     drivers/gpu/drm/r128/
+F:     include/uapi/drm/r128_drm.h
+
 DRM DRIVERS FOR RENESAS
 M:     Laurent Pinchart <laurent.pinchart@ideasonboard.com>
 L:     dri-devel@lists.freedesktop.org
 L:     linux-renesas-soc@vger.kernel.org
-T:     git git://people.freedesktop.org/~airlied/linux
+T:     git git://linuxtv.org/pinchartl/fbdev
 S:     Supported
 F:     drivers/gpu/drm/rcar-du/
 F:     drivers/gpu/drm/shmobile/
 F:     include/linux/platform_data/shmob_drm.h
+F:     Documentation/devicetree/bindings/display/renesas,du.txt
+
+DRM DRIVER FOR QXL VIRTUAL GPU
+M:     Dave Airlie <airlied@redhat.com>
+S:     Odd Fixes
+F:     drivers/gpu/drm/qxl/
+F:     include/uapi/drm/qxl_drm.h
 
 DRM DRIVERS FOR ROCKCHIP
 M:     Mark Yao <mark.yao@rock-chips.com>
 L:     dri-devel@lists.freedesktop.org
 S:     Maintained
 F:     drivers/gpu/drm/rockchip/
-F:     Documentation/devicetree/bindings/display/rockchip*
+F:     Documentation/devicetree/bindings/display/rockchip/
+
+DRM DRIVER FOR SAVAGE VIDEO CARDS
+S:     Orphan / Obsolete
+F:     drivers/gpu/drm/savage/
+F:     include/uapi/drm/savage_drm.h
+
+DRM DRIVER FOR SIS VIDEO CARDS
+S:     Orphan / Obsolete
+F:     drivers/gpu/drm/sis/
+F:     include/uapi/drm/sis_drm.h
 
 DRM DRIVERS FOR STI
 M:     Benjamin Gaignard <benjamin.gaignard@linaro.org>
@@ -3893,14 +3974,43 @@ S:      Maintained
 F:     drivers/gpu/drm/sti
 F:     Documentation/devicetree/bindings/display/st,stih4xx.txt
 
+DRM DRIVER FOR TDFX VIDEO CARDS
+S:     Orphan / Obsolete
+F:     drivers/gpu/drm/tdfx/
+
+DRM DRIVER FOR USB DISPLAYLINK VIDEO ADAPTERS
+M:     Dave Airlie <airlied@redhat.com>
+S:     Odd Fixes
+F:     drivers/gpu/drm/udl/
+
 DRM DRIVERS FOR VIVANTE GPU IP
 M:     Lucas Stach <l.stach@pengutronix.de>
 R:     Russell King <linux+etnaviv@arm.linux.org.uk>
 R:     Christian Gmeiner <christian.gmeiner@gmail.com>
 L:     dri-devel@lists.freedesktop.org
 S:     Maintained
-F:     drivers/gpu/drm/etnaviv
-F:     Documentation/devicetree/bindings/display/etnaviv
+F:     drivers/gpu/drm/etnaviv/
+F:     include/uapi/drm/etnaviv_drm.h
+F:     Documentation/devicetree/bindings/display/etnaviv/
+
+DRM DRIVER FOR VMWARE VIRTUAL GPU
+M:     "VMware Graphics" <linux-graphics-maintainer@vmware.com>
+M:     Sinclair Yeh <syeh@vmware.com>
+M:     Thomas Hellstrom <thellstrom@vmware.com>
+L:     dri-devel@lists.freedesktop.org
+T:     git git://people.freedesktop.org/~syeh/repos_linux
+T:     git git://people.freedesktop.org/~thomash/linux
+S:     Supported
+F:     drivers/gpu/drm/vmwgfx/
+F:     include/uapi/drm/vmwgfx_drm.h
+
+DRM DRIVERS FOR VC4
+M:     Eric Anholt <eric@anholt.net>
+T:     git git://github.com/anholt/linux
+S:     Supported
+F:     drivers/gpu/drm/vc4/
+F:     include/uapi/drm/vc4_drm.h
+F:     Documentation/devicetree/bindings/display/brcm,bcm-vc4.txt
 
 DSBR100 USB FM RADIO DRIVER
 M:     Alexey Klimov <klimov.linux@gmail.com>
@@ -6922,6 +7032,8 @@ MARVELL ARMADA DRM SUPPORT
 M:     Russell King <rmk+kernel@arm.linux.org.uk>
 S:     Maintained
 F:     drivers/gpu/drm/armada/
+F:     include/uapi/drm/armada_drm.h
+F:     Documentation/devicetree/bindings/display/armada/
 
 MARVELL 88E6352 DSA support
 M:     Guenter Roeck <linux@roeck-us.net>
index cd51502..16e4c21 100644 (file)
@@ -52,6 +52,7 @@ config DRM_KMS_FB_HELPER
        select FB_CFB_FILLRECT
        select FB_CFB_COPYAREA
        select FB_CFB_IMAGEBLIT
+       select FB_DEFERRED_IO
        help
          FBDEV helpers for KMS drivers.
 
@@ -285,3 +286,5 @@ source "drivers/gpu/drm/vc4/Kconfig"
 source "drivers/gpu/drm/etnaviv/Kconfig"
 
 source "drivers/gpu/drm/arc/Kconfig"
+
+source "drivers/gpu/drm/hisilicon/Kconfig"
index 1a26b4e..43c2abf 100644 (file)
@@ -80,3 +80,4 @@ obj-y                 += bridge/
 obj-$(CONFIG_DRM_FSL_DCU) += fsl-dcu/
 obj-$(CONFIG_DRM_ETNAVIV) += etnaviv/
 obj-$(CONFIG_DRM_ARCPGU)+= arc/
+obj-y                  += hisilicon/
index b48942a..1dab5f2 100644 (file)
@@ -524,7 +524,7 @@ static struct drm_driver kms_driver = {
        .irq_uninstall = amdgpu_irq_uninstall,
        .irq_handler = amdgpu_irq_handler,
        .ioctls = amdgpu_ioctls_kms,
-       .gem_free_object = amdgpu_gem_object_free,
+       .gem_free_object_unlocked = amdgpu_gem_object_free,
        .gem_open_object = amdgpu_gem_object_open,
        .gem_close_object = amdgpu_gem_object_close,
        .dumb_create = amdgpu_mode_dumb_create,
index 0635bb6..c68f4ca 100644 (file)
@@ -93,7 +93,7 @@ void amdgpu_gem_force_release(struct amdgpu_device *adev)
        struct drm_device *ddev = adev->ddev;
        struct drm_file *file;
 
-       mutex_lock(&ddev->struct_mutex);
+       mutex_lock(&ddev->filelist_mutex);
 
        list_for_each_entry(file, &ddev->filelist, lhead) {
                struct drm_gem_object *gobj;
@@ -103,13 +103,13 @@ void amdgpu_gem_force_release(struct amdgpu_device *adev)
                spin_lock(&file->table_lock);
                idr_for_each_entry(&file->object_idr, gobj, handle) {
                        WARN_ONCE(1, "And also active allocations!\n");
-                       drm_gem_object_unreference(gobj);
+                       drm_gem_object_unreference_unlocked(gobj);
                }
                idr_destroy(&file->object_idr);
                spin_unlock(&file->table_lock);
        }
 
-       mutex_unlock(&ddev->struct_mutex);
+       mutex_unlock(&ddev->filelist_mutex);
 }
 
 /*
@@ -769,7 +769,7 @@ static int amdgpu_debugfs_gem_info(struct seq_file *m, void *data)
        struct drm_file *file;
        int r;
 
-       r = mutex_lock_interruptible(&dev->struct_mutex);
+       r = mutex_lock_interruptible(&dev->filelist_mutex);
        if (r)
                return r;
 
@@ -793,7 +793,7 @@ static int amdgpu_debugfs_gem_info(struct seq_file *m, void *data)
                spin_unlock(&file->table_lock);
        }
 
-       mutex_unlock(&dev->struct_mutex);
+       mutex_unlock(&dev->filelist_mutex);
        return 0;
 }
 
index f7f67f3..8af5fbc 100644 (file)
@@ -3359,7 +3359,7 @@ static int dce_v10_0_pageflip_irq(struct amdgpu_device *adev,
 
        /* wakeup usersapce */
        if (works->event)
-               drm_send_vblank_event(adev->ddev, crtc_id, works->event);
+               drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
 
        spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
 
index e4f3dc7..dda9ffb 100644 (file)
@@ -3422,7 +3422,7 @@ static int dce_v11_0_pageflip_irq(struct amdgpu_device *adev,
 
        /* wakeup usersapce */
        if(works->event)
-               drm_send_vblank_event(adev->ddev, crtc_id, works->event);
+               drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
 
        spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
 
index 429e98a..25e6af0 100644 (file)
@@ -3368,7 +3368,7 @@ static int dce_v8_0_pageflip_irq(struct amdgpu_device *adev,
 
        /* wakeup usersapce */
        if (works->event)
-               drm_send_vblank_event(adev->ddev, crtc_id, works->event);
+               drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
 
        spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
 
index 07ac724..ee3e04e 100644 (file)
@@ -109,7 +109,7 @@ static int kfd_open(struct inode *inode, struct file *filep)
 
        is_32bit_user_mode = in_compat_syscall();
 
-       if (is_32bit_user_mode == true) {
+       if (is_32bit_user_mode) {
                dev_warn(kfd_device,
                        "Process %d (32-bit) failed to open /dev/kfd\n"
                        "32-bit processes are not supported by amdkfd\n",
@@ -131,12 +131,11 @@ static int kfd_ioctl_get_version(struct file *filep, struct kfd_process *p,
                                        void *data)
 {
        struct kfd_ioctl_get_version_args *args = data;
-       int err = 0;
 
        args->major_version = KFD_IOCTL_MAJOR_VERSION;
        args->minor_version = KFD_IOCTL_MINOR_VERSION;
 
-       return err;
+       return 0;
 }
 
 static int set_queue_properties_from_user(struct queue_properties *q_properties,
index 4bb7f42..f49c551 100644 (file)
@@ -216,7 +216,7 @@ static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q)
                }
        }
 
-       if (set == false)
+       if (!set)
                return -EBUSY;
 
        pr_debug("kfd: DQM %s hqd slot - pipe (%d) queue(%d)\n",
@@ -354,7 +354,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
                return -ENOMEM;
        }
 
-       if (q->properties.is_active == true)
+       if (q->properties.is_active)
                prev_active = true;
 
        /*
@@ -363,9 +363,9 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
         * and modify counter accordingly
         */
        retval = mqd->update_mqd(mqd, q->mqd, &q->properties);
-       if ((q->properties.is_active == true) && (prev_active == false))
+       if ((q->properties.is_active) && (!prev_active))
                dqm->queue_count++;
-       else if ((q->properties.is_active == false) && (prev_active == true))
+       else if ((!q->properties.is_active) && (prev_active))
                dqm->queue_count--;
 
        if (sched_policy != KFD_SCHED_POLICY_NO_HWS)
@@ -954,7 +954,7 @@ static int destroy_queues_cpsch(struct device_queue_manager *dqm,
 
        if (lock)
                mutex_lock(&dqm->lock);
-       if (dqm->active_runlist == false)
+       if (!dqm->active_runlist)
                goto out;
 
        pr_debug("kfd: Before destroying queues, sdma queue count is : %u\n",
index b6e28dc..a6a4b2b 100644 (file)
@@ -177,9 +177,9 @@ static bool allocate_event_notification_slot(struct file *devkfd,
        bool ret;
 
        ret = allocate_free_slot(p, page, signal_slot_index);
-       if (ret == false) {
+       if (!ret) {
                ret = allocate_signal_page(devkfd, p);
-               if (ret == true)
+               if (ret)
                        ret = allocate_free_slot(p, page, signal_slot_index);
        }
 
index 8fa8941..9beae87 100644 (file)
@@ -300,7 +300,7 @@ struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
                break;
        }
 
-       if (kq->ops.initialize(kq, dev, type, KFD_KERNEL_QUEUE_SIZE) == false) {
+       if (!kq->ops.initialize(kq, dev, type, KFD_KERNEL_QUEUE_SIZE)) {
                pr_err("amdkfd: failed to init kernel queue\n");
                kfree(kq);
                return NULL;
index 90f3914..ca8c093 100644 (file)
@@ -98,7 +98,7 @@ static int pm_allocate_runlist_ib(struct packet_manager *pm,
        int retval;
 
        BUG_ON(!pm);
-       BUG_ON(pm->allocated == true);
+       BUG_ON(pm->allocated);
        BUG_ON(is_over_subscription == NULL);
 
        pm_calc_rlib_size(pm, rl_buffer_size, is_over_subscription);
@@ -292,7 +292,7 @@ static int pm_create_map_queue(struct packet_manager *pm, uint32_t *buffer,
                        q->properties.doorbell_off;
 
        packet->mes_map_queues_ordinals[0].bitfields3.is_static =
-                       (use_static == true) ? 1 : 0;
+                       (use_static) ? 1 : 0;
 
        packet->mes_map_queues_ordinals[0].mqd_addr_lo =
                        lower_32_bits(q->gart_mqd_addr);
@@ -357,7 +357,7 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
                                alloc_size_bytes);
 
                list_for_each_entry(kq, &qpd->priv_queue_list, list) {
-                       if (kq->queue->properties.is_active != true)
+                       if (!kq->queue->properties.is_active)
                                continue;
 
                        pr_debug("kfd: static_queue, mapping kernel q %d, is debug status %d\n",
@@ -383,7 +383,7 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
                }
 
                list_for_each_entry(q, &qpd->queues_list, list) {
-                       if (q->properties.is_active != true)
+                       if (!q->properties.is_active)
                                continue;
 
                        pr_debug("kfd: static_queue, mapping user queue %d, is debug status %d\n",
@@ -531,7 +531,7 @@ fail_create_runlist:
 fail_acquire_packet_buffer:
        mutex_unlock(&pm->lock);
 fail_create_runlist_ib:
-       if (pm->allocated == true)
+       if (pm->allocated)
                pm_release_ib(pm);
        return retval;
 }
@@ -647,7 +647,7 @@ int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
        default:
                BUG();
                break;
-       };
+       }
 
        pm->priv_queue->ops.submit_packet(pm->priv_queue);
 
index 3ac1ae4..734899c 100644 (file)
@@ -113,7 +113,7 @@ static void hdlcd_fb_output_poll_changed(struct drm_device *drm)
 }
 
 static int hdlcd_atomic_commit(struct drm_device *dev,
-                              struct drm_atomic_state *state, bool async)
+                              struct drm_atomic_state *state, bool nonblock)
 {
        return drm_atomic_helper_commit(dev, state, false);
 }
index a10ea6a..605bd24 100644 (file)
@@ -423,7 +423,7 @@ struct drm_agp_head *drm_agp_init(struct drm_device *dev)
 }
 
 /**
- * drm_agp_clear - Clear AGP resource list
+ * drm_legacy_agp_clear - Clear AGP resource list
  * @dev: DRM device
  *
  * Iterate over all AGP resources and remove them. But keep the AGP head
@@ -434,7 +434,7 @@ struct drm_agp_head *drm_agp_init(struct drm_device *dev)
  * resources from getting destroyed. Drivers are responsible of cleaning them up
  * during device shutdown.
  */
-void drm_agp_clear(struct drm_device *dev)
+void drm_legacy_agp_clear(struct drm_device *dev)
 {
        struct drm_agp_mem *entry, *tempe;
 
index 8ee1db8..86e89db 100644 (file)
@@ -31,6 +31,8 @@
 #include <drm/drm_mode.h>
 #include <drm/drm_plane_helper.h>
 
+#include "drm_crtc_internal.h"
+
 /**
  * drm_atomic_state_default_release -
  * release memory initialized by drm_atomic_state_init
@@ -142,18 +144,11 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state)
                if (!connector)
                        continue;
 
-               /*
-                * FIXME: Async commits can race with connector unplugging and
-                * there's currently nothing that prevents cleanup up state for
-                * deleted connectors. As long as the callback doesn't look at
-                * the connector we'll be fine though, so make sure that's the
-                * case by setting all connector pointers to NULL.
-                */
-               state->connector_states[i]->connector = NULL;
-               connector->funcs->atomic_destroy_state(NULL,
+               connector->funcs->atomic_destroy_state(connector,
                                                       state->connector_states[i]);
                state->connectors[i] = NULL;
                state->connector_states[i] = NULL;
+               drm_connector_unreference(connector);
        }
 
        for (i = 0; i < config->num_crtc; i++) {
@@ -261,6 +256,8 @@ drm_atomic_get_crtc_state(struct drm_atomic_state *state,
        int ret, index = drm_crtc_index(crtc);
        struct drm_crtc_state *crtc_state;
 
+       WARN_ON(!state->acquire_ctx);
+
        crtc_state = drm_atomic_get_existing_crtc_state(state, crtc);
        if (crtc_state)
                return crtc_state;
@@ -620,6 +617,8 @@ drm_atomic_get_plane_state(struct drm_atomic_state *state,
        int ret, index = drm_plane_index(plane);
        struct drm_plane_state *plane_state;
 
+       WARN_ON(!state->acquire_ctx);
+
        plane_state = drm_atomic_get_existing_plane_state(state, plane);
        if (plane_state)
                return plane_state;
@@ -888,6 +887,8 @@ drm_atomic_get_connector_state(struct drm_atomic_state *state,
        struct drm_mode_config *config = &connector->dev->mode_config;
        struct drm_connector_state *connector_state;
 
+       WARN_ON(!state->acquire_ctx);
+
        ret = drm_modeset_lock(&config->connection_mutex, state->acquire_ctx);
        if (ret)
                return ERR_PTR(ret);
@@ -924,6 +925,7 @@ drm_atomic_get_connector_state(struct drm_atomic_state *state,
        if (!connector_state)
                return ERR_PTR(-ENOMEM);
 
+       drm_connector_reference(connector);
        state->connector_states[index] = connector_state;
        state->connectors[index] = connector;
        connector_state->state = state;
@@ -1158,6 +1160,8 @@ drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
 {
        struct drm_crtc_state *crtc_state;
 
+       if (crtc)
+               drm_connector_reference(conn_state->connector);
        if (conn_state->crtc && conn_state->crtc != crtc) {
                crtc_state = drm_atomic_get_existing_crtc_state(conn_state->state,
                                                                conn_state->crtc);
@@ -1175,6 +1179,8 @@ drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
                        1 << drm_connector_index(conn_state->connector);
        }
 
+       if (conn_state->crtc)
+               drm_connector_unreference(conn_state->connector);
        conn_state->crtc = crtc;
 
        if (crtc)
@@ -1388,7 +1394,7 @@ int drm_atomic_commit(struct drm_atomic_state *state)
 EXPORT_SYMBOL(drm_atomic_commit);
 
 /**
- * drm_atomic_async_commit - atomic&async configuration commit
+ * drm_atomic_nonblocking_commit - atomic&nonblocking configuration commit
  * @state: atomic configuration to check
  *
  * Note that this function can return -EDEADLK if the driver needed to acquire
@@ -1403,7 +1409,7 @@ EXPORT_SYMBOL(drm_atomic_commit);
  * Returns:
  * 0 on success, negative error code on failure.
  */
-int drm_atomic_async_commit(struct drm_atomic_state *state)
+int drm_atomic_nonblocking_commit(struct drm_atomic_state *state)
 {
        struct drm_mode_config *config = &state->dev->mode_config;
        int ret;
@@ -1412,11 +1418,11 @@ int drm_atomic_async_commit(struct drm_atomic_state *state)
        if (ret)
                return ret;
 
-       DRM_DEBUG_ATOMIC("commiting %p asynchronously\n", state);
+       DRM_DEBUG_ATOMIC("commiting %p nonblocking\n", state);
 
        return config->funcs->atomic_commit(state->dev, state, true);
 }
-EXPORT_SYMBOL(drm_atomic_async_commit);
+EXPORT_SYMBOL(drm_atomic_nonblocking_commit);
 
 /*
  * The big monstor ioctl
@@ -1614,12 +1620,19 @@ retry:
                }
 
                obj = drm_mode_object_find(dev, obj_id, DRM_MODE_OBJECT_ANY);
-               if (!obj || !obj->properties) {
+               if (!obj) {
+                       ret = -ENOENT;
+                       goto out;
+               }
+
+               if (!obj->properties) {
+                       drm_mode_object_unreference(obj);
                        ret = -ENOENT;
                        goto out;
                }
 
                if (get_user(count_props, count_props_ptr + copied_objs)) {
+                       drm_mode_object_unreference(obj);
                        ret = -EFAULT;
                        goto out;
                }
@@ -1632,12 +1645,14 @@ retry:
                        struct drm_property *prop;
 
                        if (get_user(prop_id, props_ptr + copied_props)) {
+                               drm_mode_object_unreference(obj);
                                ret = -EFAULT;
                                goto out;
                        }
 
                        prop = drm_property_find(dev, prop_id);
                        if (!prop) {
+                               drm_mode_object_unreference(obj);
                                ret = -ENOENT;
                                goto out;
                        }
@@ -1645,13 +1660,16 @@ retry:
                        if (copy_from_user(&prop_value,
                                           prop_values_ptr + copied_props,
                                           sizeof(prop_value))) {
+                               drm_mode_object_unreference(obj);
                                ret = -EFAULT;
                                goto out;
                        }
 
                        ret = atomic_set_prop(state, obj, prop, prop_value);
-                       if (ret)
+                       if (ret) {
+                               drm_mode_object_unreference(obj);
                                goto out;
+                       }
 
                        copied_props++;
                }
@@ -1662,6 +1680,7 @@ retry:
                        plane_mask |= (1 << drm_plane_index(plane));
                        plane->old_fb = plane->fb;
                }
+               drm_mode_object_unreference(obj);
        }
 
        if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT) {
@@ -1685,7 +1704,7 @@ retry:
                 */
                ret = drm_atomic_check_only(state);
        } else if (arg->flags & DRM_MODE_ATOMIC_NONBLOCK) {
-               ret = drm_atomic_async_commit(state);
+               ret = drm_atomic_nonblocking_commit(state);
        } else {
                ret = drm_atomic_commit(state);
        }
index d25abce..997fd21 100644 (file)
@@ -1114,13 +1114,13 @@ EXPORT_SYMBOL(drm_atomic_helper_wait_for_vblanks);
  * drm_atomic_helper_commit - commit validated state object
  * @dev: DRM device
  * @state: the driver state object
- * @async: asynchronous commit
+ * @nonblocking: whether nonblocking behavior is requested.
  *
  * This function commits a with drm_atomic_helper_check() pre-validated state
  * object. This can still fail when e.g. the framebuffer reservation fails. For
- * now this doesn't implement asynchronous commits.
+ * now this doesn't implement nonblocking commits.
  *
- * Note that right now this function does not support async commits, and hence
+ * Note that right now this function does not support nonblocking commits, hence
  * driver writers must implement their own version for now. Also note that the
  * default ordering of how the various stages are called is to match the legacy
  * modeset helper library closest. One peculiarity of that is that it doesn't
@@ -1141,11 +1141,11 @@ EXPORT_SYMBOL(drm_atomic_helper_wait_for_vblanks);
  */
 int drm_atomic_helper_commit(struct drm_device *dev,
                             struct drm_atomic_state *state,
-                            bool async)
+                            bool nonblock)
 {
        int ret;
 
-       if (async)
+       if (nonblock)
                return -EBUSY;
 
        ret = drm_atomic_helper_prepare_planes(dev, state);
@@ -1195,20 +1195,20 @@ int drm_atomic_helper_commit(struct drm_device *dev,
 EXPORT_SYMBOL(drm_atomic_helper_commit);
 
 /**
- * DOC: implementing async commit
+ * DOC: implementing nonblocking commit
  *
- * For now the atomic helpers don't support async commit directly. If there is
- * real need it could be added though, using the dma-buf fence infrastructure
- * for generic synchronization with outstanding rendering.
+ * For now the atomic helpers don't support nonblocking commit directly. If
+ * there is real need it could be added though, using the dma-buf fence
+ * infrastructure for generic synchronization with outstanding rendering.
  *
- * For now drivers have to implement async commit themselves, with the following
- * sequence being the recommended one:
+ * For now drivers have to implement nonblocking commit themselves, with the
+ * following sequence being the recommended one:
  *
  * 1. Run drm_atomic_helper_prepare_planes() first. This is the only function
  * which commit needs to call which can fail, so we want to run it first and
  * synchronously.
  *
- * 2. Synchronize with any outstanding asynchronous commit worker threads which
+ * 2. Synchronize with any outstanding nonblocking commit worker threads which
  * might be affected the new state update. This can be done by either cancelling
  * or flushing the work items, depending upon whether the driver can deal with
  * cancelled updates. Note that it is important to ensure that the framebuffer
@@ -1222,9 +1222,9 @@ EXPORT_SYMBOL(drm_atomic_helper_commit);
  * 3. The software state is updated synchronously with
  * drm_atomic_helper_swap_state(). Doing this under the protection of all modeset
  * locks means concurrent callers never see inconsistent state. And doing this
- * while it's guaranteed that no relevant async worker runs means that async
- * workers do not need grab any locks. Actually they must not grab locks, for
- * otherwise the work flushing will deadlock.
+ * while it's guaranteed that no relevant nonblocking worker runs means that
+ * nonblocking workers do not need grab any locks. Actually they must not grab
+ * locks, for otherwise the work flushing will deadlock.
  *
  * 4. Schedule a work item to do all subsequent steps, using the split-out
  * commit helpers: a) pre-plane commit b) plane commit c) post-plane commit and
@@ -2371,11 +2371,11 @@ retry:
                goto fail;
        }
 
-       ret = drm_atomic_async_commit(state);
+       ret = drm_atomic_nonblocking_commit(state);
        if (ret != 0)
                goto fail;
 
-       /* Driver takes ownership of state on successful async commit. */
+       /* Driver takes ownership of state on successful commit. */
        return 0;
 fail:
        if (ret == -EDEADLK)
@@ -2762,6 +2762,8 @@ __drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector,
                                            struct drm_connector_state *state)
 {
        memcpy(state, connector->state, sizeof(*state));
+       if (state->crtc)
+               drm_connector_reference(connector);
 }
 EXPORT_SYMBOL(__drm_atomic_helper_connector_duplicate_state);
 
@@ -2889,6 +2891,8 @@ __drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
         * state will automatically do the right thing if code is ever added
         * to this function.
         */
+       if (state->crtc)
+               drm_connector_unreference(state->connector);
 }
 EXPORT_SYMBOL(__drm_atomic_helper_connector_destroy_state);
 
index f1a204d..9b34158 100644 (file)
@@ -396,6 +396,10 @@ int drm_legacy_addmap_ioctl(struct drm_device *dev, void *data,
        if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP || map->type == _DRM_SHM))
                return -EPERM;
 
+       if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
+           drm_core_check_feature(dev, DRIVER_MODESET))
+               return -EINVAL;
+
        err = drm_addmap_core(dev, map->offset, map->size, map->type,
                              map->flags, &maplist);
 
@@ -416,6 +420,62 @@ int drm_legacy_addmap_ioctl(struct drm_device *dev, void *data,
        return 0;
 }
 
+/*
+ * Get a mapping information.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_map structure.
+ *
+ * \return zero on success or a negative number on failure.
+ *
+ * Searches for the mapping with the specified offset and copies its information
+ * into userspace
+ */
+int drm_legacy_getmap_ioctl(struct drm_device *dev, void *data,
+                           struct drm_file *file_priv)
+{
+       struct drm_map *map = data;
+       struct drm_map_list *r_list = NULL;
+       struct list_head *list;
+       int idx;
+       int i;
+
+       if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
+           drm_core_check_feature(dev, DRIVER_MODESET))
+               return -EINVAL;
+
+       idx = map->offset;
+       if (idx < 0)
+               return -EINVAL;
+
+       i = 0;
+       mutex_lock(&dev->struct_mutex);
+       list_for_each(list, &dev->maplist) {
+               if (i == idx) {
+                       r_list = list_entry(list, struct drm_map_list, head);
+                       break;
+               }
+               i++;
+       }
+       if (!r_list || !r_list->map) {
+               mutex_unlock(&dev->struct_mutex);
+               return -EINVAL;
+       }
+
+       map->offset = r_list->map->offset;
+       map->size = r_list->map->size;
+       map->type = r_list->map->type;
+       map->flags = r_list->map->flags;
+       map->handle = (void *)(unsigned long) r_list->user_token;
+       map->mtrr = arch_phys_wc_index(r_list->map->mtrr);
+
+       mutex_unlock(&dev->struct_mutex);
+
+       return 0;
+}
+
 /**
  * Remove a map private from list and deallocate resources if the mapping
  * isn't in use.
@@ -482,18 +542,35 @@ int drm_legacy_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
 }
 EXPORT_SYMBOL(drm_legacy_rmmap_locked);
 
-int drm_legacy_rmmap(struct drm_device *dev, struct drm_local_map *map)
+void drm_legacy_rmmap(struct drm_device *dev, struct drm_local_map *map)
 {
-       int ret;
+       if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
+           drm_core_check_feature(dev, DRIVER_MODESET))
+               return;
 
        mutex_lock(&dev->struct_mutex);
-       ret = drm_legacy_rmmap_locked(dev, map);
+       drm_legacy_rmmap_locked(dev, map);
        mutex_unlock(&dev->struct_mutex);
-
-       return ret;
 }
 EXPORT_SYMBOL(drm_legacy_rmmap);
 
+void drm_legacy_master_rmmaps(struct drm_device *dev, struct drm_master *master)
+{
+       struct drm_map_list *r_list, *list_temp;
+
+       if (drm_core_check_feature(dev, DRIVER_MODESET))
+               return;
+
+       mutex_lock(&dev->struct_mutex);
+       list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) {
+               if (r_list->master == master) {
+                       drm_legacy_rmmap_locked(dev, r_list->map);
+                       r_list = NULL;
+               }
+       }
+       mutex_unlock(&dev->struct_mutex);
+}
+
 /* The rmmap ioctl appears to be unnecessary.  All mappings are torn down on
  * the last close of the device, and this is necessary for cleanup when things
  * exit uncleanly.  Therefore, having userland manually remove mappings seems
@@ -517,6 +594,10 @@ int drm_legacy_rmmap_ioctl(struct drm_device *dev, void *data,
        struct drm_map_list *r_list;
        int ret;
 
+       if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
+           drm_core_check_feature(dev, DRIVER_MODESET))
+               return -EINVAL;
+
        mutex_lock(&dev->struct_mutex);
        list_for_each_entry(r_list, &dev->maplist, head) {
                if (r_list->map &&
index 4e5b015..a9c0a43 100644 (file)
@@ -168,6 +168,7 @@ static struct drm_conn_prop_enum_list drm_connector_enum_list[] = {
        { DRM_MODE_CONNECTOR_eDP, "eDP" },
        { DRM_MODE_CONNECTOR_VIRTUAL, "Virtual" },
        { DRM_MODE_CONNECTOR_DSI, "DSI" },
+       { DRM_MODE_CONNECTOR_DPI, "DPI" },
 };
 
 static const struct drm_prop_enum_list drm_encoder_enum_list[] = {
@@ -179,6 +180,7 @@ static const struct drm_prop_enum_list drm_encoder_enum_list[] = {
        { DRM_MODE_ENCODER_VIRTUAL, "Virtual" },
        { DRM_MODE_ENCODER_DSI, "DSI" },
        { DRM_MODE_ENCODER_DPMST, "DP MST" },
+       { DRM_MODE_ENCODER_DPI, "DPI" },
 };
 
 static const struct drm_prop_enum_list drm_subpixel_enum_list[] = {
@@ -862,6 +864,16 @@ static void drm_connector_get_cmdline_mode(struct drm_connector *connector)
                      mode->interlace ?  " interlaced" : "");
 }
 
+static void drm_connector_free(struct kref *kref)
+{
+       struct drm_connector *connector =
+               container_of(kref, struct drm_connector, base.refcount);
+       struct drm_device *dev = connector->dev;
+
+       drm_mode_object_unregister(dev, &connector->base);
+       connector->funcs->destroy(connector);
+}
+
 /**
  * drm_connector_init - Init a preallocated connector
  * @dev: DRM device
@@ -887,7 +899,9 @@ int drm_connector_init(struct drm_device *dev,
 
        drm_modeset_lock_all(dev);
 
-       ret = drm_mode_object_get_reg(dev, &connector->base, DRM_MODE_OBJECT_CONNECTOR, false, NULL);
+       ret = drm_mode_object_get_reg(dev, &connector->base,
+                                     DRM_MODE_OBJECT_CONNECTOR,
+                                     false, drm_connector_free);
        if (ret)
                goto out_unlock;
 
@@ -1936,8 +1950,6 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
                copied = 0;
                crtc_id = (uint32_t __user *)(unsigned long)card_res->crtc_id_ptr;
                drm_for_each_crtc(crtc, dev) {
-                       DRM_DEBUG_KMS("[CRTC:%d:%s]\n",
-                                     crtc->base.id, crtc->name);
                        if (put_user(crtc->base.id, crtc_id + copied)) {
                                ret = -EFAULT;
                                goto out;
@@ -1952,8 +1964,6 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
                copied = 0;
                encoder_id = (uint32_t __user *)(unsigned long)card_res->encoder_id_ptr;
                drm_for_each_encoder(encoder, dev) {
-                       DRM_DEBUG_KMS("[ENCODER:%d:%s]\n", encoder->base.id,
-                                       encoder->name);
                        if (put_user(encoder->base.id, encoder_id +
                                     copied)) {
                                ret = -EFAULT;
@@ -1969,9 +1979,6 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
                copied = 0;
                connector_id = (uint32_t __user *)(unsigned long)card_res->connector_id_ptr;
                drm_for_each_connector(connector, dev) {
-                       DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
-                               connector->base.id,
-                               connector->name);
                        if (put_user(connector->base.id,
                                     connector_id + copied)) {
                                ret = -EFAULT;
@@ -1982,9 +1989,6 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
        }
        card_res->count_connectors = connector_count;
 
-       DRM_DEBUG_KMS("CRTC[%d] CONNECTORS[%d] ENCODERS[%d]\n", card_res->count_crtcs,
-                 card_res->count_connectors, card_res->count_encoders);
-
 out:
        mutex_unlock(&dev->mode_config.mutex);
        return ret;
@@ -2143,11 +2147,9 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
 
        memset(&u_mode, 0, sizeof(struct drm_mode_modeinfo));
 
-       DRM_DEBUG_KMS("[CONNECTOR:%d:?]\n", out_resp->connector_id);
-
        mutex_lock(&dev->mode_config.mutex);
 
-       connector = drm_connector_find(dev, out_resp->connector_id);
+       connector = drm_connector_lookup(dev, out_resp->connector_id);
        if (!connector) {
                ret = -ENOENT;
                goto out_unlock;
@@ -2231,6 +2233,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
 out:
        drm_modeset_unlock(&dev->mode_config.connection_mutex);
 
+       drm_connector_unreference(connector);
 out_unlock:
        mutex_unlock(&dev->mode_config.mutex);
 
@@ -2875,13 +2878,14 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
                }
 
                for (i = 0; i < crtc_req->count_connectors; i++) {
+                       connector_set[i] = NULL;
                        set_connectors_ptr = (uint32_t __user *)(unsigned long)crtc_req->set_connectors_ptr;
                        if (get_user(out_id, &set_connectors_ptr[i])) {
                                ret = -EFAULT;
                                goto out;
                        }
 
-                       connector = drm_connector_find(dev, out_id);
+                       connector = drm_connector_lookup(dev, out_id);
                        if (!connector) {
                                DRM_DEBUG_KMS("Connector id %d unknown\n",
                                                out_id);
@@ -2909,6 +2913,12 @@ out:
        if (fb)
                drm_framebuffer_unreference(fb);
 
+       if (connector_set) {
+               for (i = 0; i < crtc_req->count_connectors; i++) {
+                       if (connector_set[i])
+                               drm_connector_unreference(connector_set[i]);
+               }
+       }
        kfree(connector_set);
        drm_mode_destroy(dev, mode);
        drm_modeset_unlock_all(dev);
@@ -4999,7 +5009,7 @@ int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
        property = obj_to_property(prop_obj);
 
        if (!drm_property_change_valid_get(property, arg->value, &ref))
-               goto out;
+               goto out_unref;
 
        switch (arg_obj->type) {
        case DRM_MODE_OBJECT_CONNECTOR:
index 66ca313..f47a252 100644 (file)
@@ -456,6 +456,9 @@ drm_crtc_helper_disable(struct drm_crtc *crtc)
                         * between them is henceforth no longer available.
                         */
                        connector->dpms = DRM_MODE_DPMS_OFF;
+
+                       /* we keep a reference while the encoder is bound */
+                       drm_connector_unreference(connector);
                }
        }
 
@@ -606,6 +609,11 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
                mode_changed = true;
        }
 
+       /* take a reference on all connectors in set */
+       for (ro = 0; ro < set->num_connectors; ro++) {
+               drm_connector_reference(set->connectors[ro]);
+       }
+
        /* a) traverse passed in connector list and get encoders for them */
        count = 0;
        drm_for_each_connector(connector, dev) {
@@ -724,6 +732,12 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
                }
        }
 
+       /* after fail drop reference on all connectors in save set */
+       count = 0;
+       drm_for_each_connector(connector, dev) {
+               drm_connector_unreference(&save_connectors[count++]);
+       }
+
        kfree(save_connectors);
        kfree(save_encoders);
        return 0;
@@ -740,6 +754,11 @@ fail:
                *connector = save_connectors[count++];
        }
 
+       /* after fail drop reference on all connectors in set */
+       for (ro = 0; ro < set->num_connectors; ro++) {
+               drm_connector_unreference(set->connectors[ro]);
+       }
+
        /* Try to restore the config */
        if (mode_changed &&
            !drm_crtc_helper_set_mode(save_set.crtc, save_set.mode, save_set.x,
index f73b38b..3334baa 100644 (file)
@@ -159,6 +159,12 @@ static ssize_t auxdev_read(struct file *file, char __user *buf, size_t count,
                uint8_t localbuf[DP_AUX_MAX_PAYLOAD_BYTES];
                ssize_t todo = min_t(size_t, bytes_pending, sizeof(localbuf));
 
+               if (signal_pending(current)) {
+                       res = num_bytes_processed ?
+                               num_bytes_processed : -ERESTARTSYS;
+                       goto out;
+               }
+
                res = drm_dp_dpcd_read(aux_dev->aux, *offset, localbuf, todo);
                if (res <= 0) {
                        res = num_bytes_processed ? num_bytes_processed : res;
@@ -202,6 +208,12 @@ static ssize_t auxdev_write(struct file *file, const char __user *buf,
                uint8_t localbuf[DP_AUX_MAX_PAYLOAD_BYTES];
                ssize_t todo = min_t(size_t, bytes_pending, sizeof(localbuf));
 
+               if (signal_pending(current)) {
+                       res = num_bytes_processed ?
+                               num_bytes_processed : -ERESTARTSYS;
+                       goto out;
+               }
+
                if (__copy_from_user(localbuf,
                                     buf + num_bytes_processed, todo)) {
                        res = num_bytes_processed ?
index df64ed1..eeaf5a7 100644 (file)
@@ -178,8 +178,8 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
                              unsigned int offset, void *buffer, size_t size)
 {
        struct drm_dp_aux_msg msg;
-       unsigned int retry;
-       int err = 0;
+       unsigned int retry, native_reply;
+       int err = 0, ret = 0;
 
        memset(&msg, 0, sizeof(msg));
        msg.address = offset;
@@ -196,38 +196,39 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
         * sufficient, bump to 32 which makes Dell 4k monitors happier.
         */
        for (retry = 0; retry < 32; retry++) {
-
-               err = aux->transfer(aux, &msg);
-               if (err < 0) {
-                       if (err == -EBUSY)
-                               continue;
-
-                       goto unlock;
+               if (ret != 0 && ret != -ETIMEDOUT) {
+                       usleep_range(AUX_RETRY_INTERVAL,
+                                    AUX_RETRY_INTERVAL + 100);
                }
 
+               ret = aux->transfer(aux, &msg);
 
-               switch (msg.reply & DP_AUX_NATIVE_REPLY_MASK) {
-               case DP_AUX_NATIVE_REPLY_ACK:
-                       if (err < size)
-                               err = -EPROTO;
-                       goto unlock;
+               if (ret > 0) {
+                       native_reply = msg.reply & DP_AUX_NATIVE_REPLY_MASK;
+                       if (native_reply == DP_AUX_NATIVE_REPLY_ACK) {
+                               if (ret == size)
+                                       goto unlock;
 
-               case DP_AUX_NATIVE_REPLY_NACK:
-                       err = -EIO;
-                       goto unlock;
-
-               case DP_AUX_NATIVE_REPLY_DEFER:
-                       usleep_range(AUX_RETRY_INTERVAL, AUX_RETRY_INTERVAL + 100);
-                       break;
+                               ret = -EPROTO;
+                       } else
+                               ret = -EIO;
                }
+
+               /*
+                * We want the error we return to be the error we received on
+                * the first transaction, since we may get a different error the
+                * next time we retry
+                */
+               if (!err)
+                       err = ret;
        }
 
        DRM_DEBUG_KMS("too many retries, giving up\n");
-       err = -EIO;
+       ret = err;
 
 unlock:
        mutex_unlock(&aux->hw_mutex);
-       return err;
+       return ret;
 }
 
 /**
@@ -247,6 +248,25 @@ unlock:
 ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset,
                         void *buffer, size_t size)
 {
+       int ret;
+
+       /*
+        * HP ZR24w corrupts the first DPCD access after entering power save
+        * mode. Eg. on a read, the entire buffer will be filled with the same
+        * byte. Do a throw away read to avoid corrupting anything we care
+        * about. Afterwards things will work correctly until the monitor
+        * gets woken up and subsequently re-enters power save mode.
+        *
+        * The user pressing any button on the monitor is enough to wake it
+        * up, so there is no particularly good place to do the workaround.
+        * We just have to do it before any DPCD access and hope that the
+        * monitor doesn't power down exactly after the throw away read.
+        */
+       ret = drm_dp_dpcd_access(aux, DP_AUX_NATIVE_READ, DP_DPCD_REV, buffer,
+                                1);
+       if (ret != 1)
+               return ret;
+
        return drm_dp_dpcd_access(aux, DP_AUX_NATIVE_READ, offset, buffer,
                                  size);
 }
index f8a7a6e..bff8922 100644 (file)
@@ -121,19 +121,11 @@ static void drm_master_destroy(struct kref *kref)
 {
        struct drm_master *master = container_of(kref, struct drm_master, refcount);
        struct drm_device *dev = master->minor->dev;
-       struct drm_map_list *r_list, *list_temp;
 
-       mutex_lock(&dev->struct_mutex);
        if (dev->driver->master_destroy)
                dev->driver->master_destroy(dev, master);
 
-       list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) {
-               if (r_list->master == master) {
-                       drm_legacy_rmmap_locked(dev, r_list->map);
-                       r_list = NULL;
-               }
-       }
-       mutex_unlock(&dev->struct_mutex);
+       drm_legacy_master_rmmaps(dev, master);
 
        idr_destroy(&master->magic_map);
        kfree(master->unique);
@@ -598,6 +590,7 @@ struct drm_device *drm_dev_alloc(struct drm_driver *driver,
        spin_lock_init(&dev->buf_lock);
        spin_lock_init(&dev->event_lock);
        mutex_init(&dev->struct_mutex);
+       mutex_init(&dev->filelist_mutex);
        mutex_init(&dev->ctxlist_mutex);
        mutex_init(&dev->master_mutex);
 
index bb88e3d..086f600 100644 (file)
@@ -25,6 +25,8 @@
 #include <drm/drm_fb_cma_helper.h>
 #include <linux/module.h>
 
+#define DEFAULT_FBDEFIO_DELAY_MS 50
+
 struct drm_fb_cma {
        struct drm_framebuffer          fb;
        struct drm_gem_cma_object       *obj[4];
@@ -35,6 +37,61 @@ struct drm_fbdev_cma {
        struct drm_fb_cma       *fb;
 };
 
+/**
+ * DOC: framebuffer cma helper functions
+ *
+ * Provides helper functions for creating a cma (contiguous memory allocator)
+ * backed framebuffer.
+ *
+ * drm_fb_cma_create() is used in the
+ * (struct drm_mode_config_funcs *)->fb_create callback function to create the
+ * cma backed framebuffer.
+ *
+ * An fbdev framebuffer backed by cma is also available by calling
+ * drm_fbdev_cma_init(). drm_fbdev_cma_fini() tears it down.
+ * If CONFIG_FB_DEFERRED_IO is enabled and the callback
+ * (struct drm_framebuffer_funcs)->dirty is set, fb_deferred_io
+ * will be set up automatically. dirty() is called by
+ * drm_fb_helper_deferred_io() in process context (struct delayed_work).
+ *
+ * Example fbdev deferred io code:
+ *
+ *     static int driver_fbdev_fb_dirty(struct drm_framebuffer *fb,
+ *                                      struct drm_file *file_priv,
+ *                                      unsigned flags, unsigned color,
+ *                                      struct drm_clip_rect *clips,
+ *                                      unsigned num_clips)
+ *     {
+ *         struct drm_gem_cma_object *cma = drm_fb_cma_get_gem_obj(fb, 0);
+ *         ... push changes ...
+ *         return 0;
+ *     }
+ *
+ *     static struct drm_framebuffer_funcs driver_fbdev_fb_funcs = {
+ *         .destroy       = drm_fb_cma_destroy,
+ *         .create_handle = drm_fb_cma_create_handle,
+ *         .dirty         = driver_fbdev_fb_dirty,
+ *     };
+ *
+ *     static int driver_fbdev_create(struct drm_fb_helper *helper,
+ *             struct drm_fb_helper_surface_size *sizes)
+ *     {
+ *         return drm_fbdev_cma_create_with_funcs(helper, sizes,
+ *                                                &driver_fbdev_fb_funcs);
+ *     }
+ *
+ *     static const struct drm_fb_helper_funcs driver_fb_helper_funcs = {
+ *         .fb_probe = driver_fbdev_create,
+ *     };
+ *
+ *     Initialize:
+ *     fbdev = drm_fbdev_cma_init_with_funcs(dev, 16,
+ *                                           dev->mode_config.num_crtc,
+ *                                           dev->mode_config.num_connector,
+ *                                           &driver_fb_helper_funcs);
+ *
+ */
+
 static inline struct drm_fbdev_cma *to_fbdev_cma(struct drm_fb_helper *helper)
 {
        return container_of(helper, struct drm_fbdev_cma, fb_helper);
@@ -45,7 +102,7 @@ static inline struct drm_fb_cma *to_fb_cma(struct drm_framebuffer *fb)
        return container_of(fb, struct drm_fb_cma, fb);
 }
 
-static void drm_fb_cma_destroy(struct drm_framebuffer *fb)
+void drm_fb_cma_destroy(struct drm_framebuffer *fb)
 {
        struct drm_fb_cma *fb_cma = to_fb_cma(fb);
        int i;
@@ -58,8 +115,9 @@ static void drm_fb_cma_destroy(struct drm_framebuffer *fb)
        drm_framebuffer_cleanup(fb);
        kfree(fb_cma);
 }
+EXPORT_SYMBOL(drm_fb_cma_destroy);
 
-static int drm_fb_cma_create_handle(struct drm_framebuffer *fb,
+int drm_fb_cma_create_handle(struct drm_framebuffer *fb,
        struct drm_file *file_priv, unsigned int *handle)
 {
        struct drm_fb_cma *fb_cma = to_fb_cma(fb);
@@ -67,6 +125,7 @@ static int drm_fb_cma_create_handle(struct drm_framebuffer *fb,
        return drm_gem_handle_create(file_priv,
                        &fb_cma->obj[0]->base, handle);
 }
+EXPORT_SYMBOL(drm_fb_cma_create_handle);
 
 static struct drm_framebuffer_funcs drm_fb_cma_funcs = {
        .destroy        = drm_fb_cma_destroy,
@@ -76,7 +135,7 @@ static struct drm_framebuffer_funcs drm_fb_cma_funcs = {
 static struct drm_fb_cma *drm_fb_cma_alloc(struct drm_device *dev,
        const struct drm_mode_fb_cmd2 *mode_cmd,
        struct drm_gem_cma_object **obj,
-       unsigned int num_planes)
+       unsigned int num_planes, struct drm_framebuffer_funcs *funcs)
 {
        struct drm_fb_cma *fb_cma;
        int ret;
@@ -91,7 +150,7 @@ static struct drm_fb_cma *drm_fb_cma_alloc(struct drm_device *dev,
        for (i = 0; i < num_planes; i++)
                fb_cma->obj[i] = obj[i];
 
-       ret = drm_framebuffer_init(dev, &fb_cma->fb, &drm_fb_cma_funcs);
+       ret = drm_framebuffer_init(dev, &fb_cma->fb, funcs);
        if (ret) {
                dev_err(dev->dev, "Failed to initialize framebuffer: %d\n", ret);
                kfree(fb_cma);
@@ -145,7 +204,7 @@ struct drm_framebuffer *drm_fb_cma_create(struct drm_device *dev,
                objs[i] = to_drm_gem_cma_obj(obj);
        }
 
-       fb_cma = drm_fb_cma_alloc(dev, mode_cmd, objs, i);
+       fb_cma = drm_fb_cma_alloc(dev, mode_cmd, objs, i, &drm_fb_cma_funcs);
        if (IS_ERR(fb_cma)) {
                ret = PTR_ERR(fb_cma);
                goto err_gem_object_unreference;
@@ -233,8 +292,67 @@ static struct fb_ops drm_fbdev_cma_ops = {
        .fb_setcmap     = drm_fb_helper_setcmap,
 };
 
-static int drm_fbdev_cma_create(struct drm_fb_helper *helper,
-       struct drm_fb_helper_surface_size *sizes)
+static int drm_fbdev_cma_deferred_io_mmap(struct fb_info *info,
+                                         struct vm_area_struct *vma)
+{
+       fb_deferred_io_mmap(info, vma);
+       vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+
+       return 0;
+}
+
+static int drm_fbdev_cma_defio_init(struct fb_info *fbi,
+                                   struct drm_gem_cma_object *cma_obj)
+{
+       struct fb_deferred_io *fbdefio;
+       struct fb_ops *fbops;
+
+       /*
+        * Per device structures are needed because:
+        * fbops: fb_deferred_io_cleanup() clears fbops.fb_mmap
+        * fbdefio: individual delays
+        */
+       fbdefio = kzalloc(sizeof(*fbdefio), GFP_KERNEL);
+       fbops = kzalloc(sizeof(*fbops), GFP_KERNEL);
+       if (!fbdefio || !fbops) {
+               kfree(fbdefio);
+               return -ENOMEM;
+       }
+
+       /* can't be offset from vaddr since dirty() uses cma_obj */
+       fbi->screen_buffer = cma_obj->vaddr;
+       /* fb_deferred_io_fault() needs a physical address */
+       fbi->fix.smem_start = page_to_phys(virt_to_page(fbi->screen_buffer));
+
+       *fbops = *fbi->fbops;
+       fbi->fbops = fbops;
+
+       fbdefio->delay = msecs_to_jiffies(DEFAULT_FBDEFIO_DELAY_MS);
+       fbdefio->deferred_io = drm_fb_helper_deferred_io;
+       fbi->fbdefio = fbdefio;
+       fb_deferred_io_init(fbi);
+       fbi->fbops->fb_mmap = drm_fbdev_cma_deferred_io_mmap;
+
+       return 0;
+}
+
+static void drm_fbdev_cma_defio_fini(struct fb_info *fbi)
+{
+       if (!fbi->fbdefio)
+               return;
+
+       fb_deferred_io_cleanup(fbi);
+       kfree(fbi->fbdefio);
+       kfree(fbi->fbops);
+}
+
+/*
+ * For use in a (struct drm_fb_helper_funcs *)->fb_probe callback function that
+ * needs custom struct drm_framebuffer_funcs, like dirty() for deferred_io use.
+ */
+int drm_fbdev_cma_create_with_funcs(struct drm_fb_helper *helper,
+       struct drm_fb_helper_surface_size *sizes,
+       struct drm_framebuffer_funcs *funcs)
 {
        struct drm_fbdev_cma *fbdev_cma = to_fbdev_cma(helper);
        struct drm_mode_fb_cmd2 mode_cmd = { 0 };
@@ -270,7 +388,7 @@ static int drm_fbdev_cma_create(struct drm_fb_helper *helper,
                goto err_gem_free_object;
        }
 
-       fbdev_cma->fb = drm_fb_cma_alloc(dev, &mode_cmd, &obj, 1);
+       fbdev_cma->fb = drm_fb_cma_alloc(dev, &mode_cmd, &obj, 1, funcs);
        if (IS_ERR(fbdev_cma->fb)) {
                dev_err(dev->dev, "Failed to allocate DRM framebuffer.\n");
                ret = PTR_ERR(fbdev_cma->fb);
@@ -296,31 +414,48 @@ static int drm_fbdev_cma_create(struct drm_fb_helper *helper,
        fbi->screen_size = size;
        fbi->fix.smem_len = size;
 
+       if (funcs->dirty) {
+               ret = drm_fbdev_cma_defio_init(fbi, obj);
+               if (ret)
+                       goto err_cma_destroy;
+       }
+
        return 0;
 
+err_cma_destroy:
+       drm_framebuffer_unregister_private(&fbdev_cma->fb->fb);
+       drm_fb_cma_destroy(&fbdev_cma->fb->fb);
 err_fb_info_destroy:
        drm_fb_helper_release_fbi(helper);
 err_gem_free_object:
        dev->driver->gem_free_object(&obj->base);
        return ret;
 }
+EXPORT_SYMBOL(drm_fbdev_cma_create_with_funcs);
+
+static int drm_fbdev_cma_create(struct drm_fb_helper *helper,
+       struct drm_fb_helper_surface_size *sizes)
+{
+       return drm_fbdev_cma_create_with_funcs(helper, sizes, &drm_fb_cma_funcs);
+}
 
 static const struct drm_fb_helper_funcs drm_fb_cma_helper_funcs = {
        .fb_probe = drm_fbdev_cma_create,
 };
 
 /**
- * drm_fbdev_cma_init() - Allocate and initializes a drm_fbdev_cma struct
+ * drm_fbdev_cma_init_with_funcs() - Allocate and initializes a drm_fbdev_cma struct
  * @dev: DRM device
  * @preferred_bpp: Preferred bits per pixel for the device
  * @num_crtc: Number of CRTCs
  * @max_conn_count: Maximum number of connectors
+ * @funcs: fb helper functions, in particular fb_probe()
  *
  * Returns a newly allocated drm_fbdev_cma struct or a ERR_PTR.
  */
-struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev,
+struct drm_fbdev_cma *drm_fbdev_cma_init_with_funcs(struct drm_device *dev,
        unsigned int preferred_bpp, unsigned int num_crtc,
-       unsigned int max_conn_count)
+       unsigned int max_conn_count, const struct drm_fb_helper_funcs *funcs)
 {
        struct drm_fbdev_cma *fbdev_cma;
        struct drm_fb_helper *helper;
@@ -334,7 +469,7 @@ struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev,
 
        helper = &fbdev_cma->fb_helper;
 
-       drm_fb_helper_prepare(dev, helper, &drm_fb_cma_helper_funcs);
+       drm_fb_helper_prepare(dev, helper, funcs);
 
        ret = drm_fb_helper_init(dev, helper, num_crtc, max_conn_count);
        if (ret < 0) {
@@ -364,6 +499,24 @@ err_free:
 
        return ERR_PTR(ret);
 }
+EXPORT_SYMBOL_GPL(drm_fbdev_cma_init_with_funcs);
+
+/**
+ * drm_fbdev_cma_init() - Allocate and initializes a drm_fbdev_cma struct
+ * @dev: DRM device
+ * @preferred_bpp: Preferred bits per pixel for the device
+ * @num_crtc: Number of CRTCs
+ * @max_conn_count: Maximum number of connectors
+ *
+ * Returns a newly allocated drm_fbdev_cma struct or a ERR_PTR.
+ */
+struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev,
+       unsigned int preferred_bpp, unsigned int num_crtc,
+       unsigned int max_conn_count)
+{
+       return drm_fbdev_cma_init_with_funcs(dev, preferred_bpp, num_crtc,
+                               max_conn_count, &drm_fb_cma_helper_funcs);
+}
 EXPORT_SYMBOL_GPL(drm_fbdev_cma_init);
 
 /**
@@ -373,6 +526,7 @@ EXPORT_SYMBOL_GPL(drm_fbdev_cma_init);
 void drm_fbdev_cma_fini(struct drm_fbdev_cma *fbdev_cma)
 {
        drm_fb_helper_unregister_fbi(&fbdev_cma->fb_helper);
+       drm_fbdev_cma_defio_fini(fbdev_cma->fb_helper.fbdev);
        drm_fb_helper_release_fbi(&fbdev_cma->fb_helper);
 
        if (fbdev_cma->fb) {
index 855108e..0bb3d4b 100644 (file)
@@ -84,6 +84,15 @@ static LIST_HEAD(kernel_fb_helper_list);
  * and set up an initial configuration using the detected hardware, drivers
  * should call drm_fb_helper_single_add_all_connectors() followed by
  * drm_fb_helper_initial_config().
+ *
+ * If CONFIG_FB_DEFERRED_IO is enabled and &drm_framebuffer_funcs ->dirty is
+ * set, the drm_fb_helper_{cfb,sys}_{write,fillrect,copyarea,imageblit}
+ * functions will accumulate changes and schedule &fb_helper .dirty_work to run
+ * right away. This worker then calls the dirty() function ensuring that it
+ * will always run in process context since the fb_*() function could be
+ * running in atomic context. If drm_fb_helper_deferred_io() is used as the
+ * deferred_io callback it will also schedule dirty_work with the damage
+ * collected from the mmap page writes.
  */
 
 /**
@@ -153,40 +162,13 @@ int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, struct drm_
        if (!fb_helper_connector)
                return -ENOMEM;
 
+       drm_connector_reference(connector);
        fb_helper_connector->connector = connector;
        fb_helper->connector_info[fb_helper->connector_count++] = fb_helper_connector;
        return 0;
 }
 EXPORT_SYMBOL(drm_fb_helper_add_one_connector);
 
-static void remove_from_modeset(struct drm_mode_set *set,
-               struct drm_connector *connector)
-{
-       int i, j;
-
-       for (i = 0; i < set->num_connectors; i++) {
-               if (set->connectors[i] == connector)
-                       break;
-       }
-
-       if (i == set->num_connectors)
-               return;
-
-       for (j = i + 1; j < set->num_connectors; j++) {
-               set->connectors[j - 1] = set->connectors[j];
-       }
-       set->num_connectors--;
-
-       /*
-        * TODO maybe need to makes sure we set it back to !=NULL somewhere?
-        */
-       if (set->num_connectors == 0) {
-               set->fb = NULL;
-               drm_mode_destroy(connector->dev, set->mode);
-               set->mode = NULL;
-       }
-}
-
 int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
                                       struct drm_connector *connector)
 {
@@ -206,6 +188,7 @@ int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
        if (i == fb_helper->connector_count)
                return -EINVAL;
        fb_helper_connector = fb_helper->connector_info[i];
+       drm_connector_unreference(fb_helper_connector->connector);
 
        for (j = i + 1; j < fb_helper->connector_count; j++) {
                fb_helper->connector_info[j - 1] = fb_helper->connector_info[j];
@@ -213,10 +196,6 @@ int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
        fb_helper->connector_count--;
        kfree(fb_helper_connector);
 
-       /* also cleanup dangling references to the connector: */
-       for (i = 0; i < fb_helper->crtc_count; i++)
-               remove_from_modeset(&fb_helper->crtc_info[i].mode_set, connector);
-
        return 0;
 }
 EXPORT_SYMBOL(drm_fb_helper_remove_one_connector);
@@ -626,8 +605,10 @@ static void drm_fb_helper_crtc_free(struct drm_fb_helper *helper)
 {
        int i;
 
-       for (i = 0; i < helper->connector_count; i++)
+       for (i = 0; i < helper->connector_count; i++) {
+               drm_connector_unreference(helper->connector_info[i]->connector);
                kfree(helper->connector_info[i]);
+       }
        kfree(helper->connector_info);
        for (i = 0; i < helper->crtc_count; i++) {
                kfree(helper->crtc_info[i].mode_set.connectors);
@@ -637,6 +618,23 @@ static void drm_fb_helper_crtc_free(struct drm_fb_helper *helper)
        kfree(helper->crtc_info);
 }
 
+static void drm_fb_helper_dirty_work(struct work_struct *work)
+{
+       struct drm_fb_helper *helper = container_of(work, struct drm_fb_helper,
+                                                   dirty_work);
+       struct drm_clip_rect *clip = &helper->dirty_clip;
+       struct drm_clip_rect clip_copy;
+       unsigned long flags;
+
+       spin_lock_irqsave(&helper->dirty_lock, flags);
+       clip_copy = *clip;
+       clip->x1 = clip->y1 = ~0;
+       clip->x2 = clip->y2 = 0;
+       spin_unlock_irqrestore(&helper->dirty_lock, flags);
+
+       helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, &clip_copy, 1);
+}
+
 /**
  * drm_fb_helper_prepare - setup a drm_fb_helper structure
  * @dev: DRM device
@@ -650,6 +648,9 @@ void drm_fb_helper_prepare(struct drm_device *dev, struct drm_fb_helper *helper,
                           const struct drm_fb_helper_funcs *funcs)
 {
        INIT_LIST_HEAD(&helper->kernel_fb_list);
+       spin_lock_init(&helper->dirty_lock);
+       INIT_WORK(&helper->dirty_work, drm_fb_helper_dirty_work);
+       helper->dirty_clip.x1 = helper->dirty_clip.y1 = ~0;
        helper->funcs = funcs;
        helper->dev = dev;
 }
@@ -834,6 +835,59 @@ void drm_fb_helper_unlink_fbi(struct drm_fb_helper *fb_helper)
 }
 EXPORT_SYMBOL(drm_fb_helper_unlink_fbi);
 
+static void drm_fb_helper_dirty(struct fb_info *info, u32 x, u32 y,
+                               u32 width, u32 height)
+{
+       struct drm_fb_helper *helper = info->par;
+       struct drm_clip_rect *clip = &helper->dirty_clip;
+       unsigned long flags;
+
+       if (!helper->fb->funcs->dirty)
+               return;
+
+       spin_lock_irqsave(&helper->dirty_lock, flags);
+       clip->x1 = min_t(u32, clip->x1, x);
+       clip->y1 = min_t(u32, clip->y1, y);
+       clip->x2 = max_t(u32, clip->x2, x + width);
+       clip->y2 = max_t(u32, clip->y2, y + height);
+       spin_unlock_irqrestore(&helper->dirty_lock, flags);
+
+       schedule_work(&helper->dirty_work);
+}
+
+/**
+ * drm_fb_helper_deferred_io() - fbdev deferred_io callback function
+ * @info: fb_info struct pointer
+ * @pagelist: list of dirty mmap framebuffer pages
+ *
+ * This function is used as the &fb_deferred_io ->deferred_io
+ * callback function for flushing the fbdev mmap writes.
+ */
+void drm_fb_helper_deferred_io(struct fb_info *info,
+                              struct list_head *pagelist)
+{
+       unsigned long start, end, min, max;
+       struct page *page;
+       u32 y1, y2;
+
+       min = ULONG_MAX;
+       max = 0;
+       list_for_each_entry(page, pagelist, lru) {
+               start = page->index << PAGE_SHIFT;
+               end = start + PAGE_SIZE - 1;
+               min = min(min, start);
+               max = max(max, end);
+       }
+
+       if (min < max) {
+               y1 = min / info->fix.line_length;
+               y2 = min_t(u32, DIV_ROUND_UP(max, info->fix.line_length),
+                          info->var.yres);
+               drm_fb_helper_dirty(info, 0, y1, info->var.xres, y2 - y1);
+       }
+}
+EXPORT_SYMBOL(drm_fb_helper_deferred_io);
+
 /**
  * drm_fb_helper_sys_read - wrapper around fb_sys_read
  * @info: fb_info struct pointer
@@ -862,7 +916,14 @@ EXPORT_SYMBOL(drm_fb_helper_sys_read);
 ssize_t drm_fb_helper_sys_write(struct fb_info *info, const char __user *buf,
                                size_t count, loff_t *ppos)
 {
-       return fb_sys_write(info, buf, count, ppos);
+       ssize_t ret;
+
+       ret = fb_sys_write(info, buf, count, ppos);
+       if (ret > 0)
+               drm_fb_helper_dirty(info, 0, 0, info->var.xres,
+                                   info->var.yres);
+
+       return ret;
 }
 EXPORT_SYMBOL(drm_fb_helper_sys_write);
 
@@ -877,6 +938,8 @@ void drm_fb_helper_sys_fillrect(struct fb_info *info,
                                const struct fb_fillrect *rect)
 {
        sys_fillrect(info, rect);
+       drm_fb_helper_dirty(info, rect->dx, rect->dy,
+                           rect->width, rect->height);
 }
 EXPORT_SYMBOL(drm_fb_helper_sys_fillrect);
 
@@ -891,6 +954,8 @@ void drm_fb_helper_sys_copyarea(struct fb_info *info,
                                const struct fb_copyarea *area)
 {
        sys_copyarea(info, area);
+       drm_fb_helper_dirty(info, area->dx, area->dy,
+                           area->width, area->height);
 }
 EXPORT_SYMBOL(drm_fb_helper_sys_copyarea);
 
@@ -905,6 +970,8 @@ void drm_fb_helper_sys_imageblit(struct fb_info *info,
                                 const struct fb_image *image)
 {
        sys_imageblit(info, image);
+       drm_fb_helper_dirty(info, image->dx, image->dy,
+                           image->width, image->height);
 }
 EXPORT_SYMBOL(drm_fb_helper_sys_imageblit);
 
@@ -919,6 +986,8 @@ void drm_fb_helper_cfb_fillrect(struct fb_info *info,
                                const struct fb_fillrect *rect)
 {
        cfb_fillrect(info, rect);
+       drm_fb_helper_dirty(info, rect->dx, rect->dy,
+                           rect->width, rect->height);
 }
 EXPORT_SYMBOL(drm_fb_helper_cfb_fillrect);
 
@@ -933,6 +1002,8 @@ void drm_fb_helper_cfb_copyarea(struct fb_info *info,
                                const struct fb_copyarea *area)
 {
        cfb_copyarea(info, area);
+       drm_fb_helper_dirty(info, area->dx, area->dy,
+                           area->width, area->height);
 }
 EXPORT_SYMBOL(drm_fb_helper_cfb_copyarea);
 
@@ -947,6 +1018,8 @@ void drm_fb_helper_cfb_imageblit(struct fb_info *info,
                                 const struct fb_image *image)
 {
        cfb_imageblit(info, image);
+       drm_fb_helper_dirty(info, image->dx, image->dy,
+                           image->width, image->height);
 }
 EXPORT_SYMBOL(drm_fb_helper_cfb_imageblit);
 
index aeef58e..7af7f8b 100644 (file)
@@ -297,9 +297,9 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor)
        }
        mutex_unlock(&dev->master_mutex);
 
-       mutex_lock(&dev->struct_mutex);
+       mutex_lock(&dev->filelist_mutex);
        list_add(&priv->lhead, &dev->filelist);
-       mutex_unlock(&dev->struct_mutex);
+       mutex_unlock(&dev->filelist_mutex);
 
 #ifdef __alpha__
        /*
@@ -381,14 +381,26 @@ static void drm_events_release(struct drm_file *file_priv)
  */
 static void drm_legacy_dev_reinit(struct drm_device *dev)
 {
-       if (drm_core_check_feature(dev, DRIVER_MODESET))
-               return;
+       if (dev->irq_enabled)
+               drm_irq_uninstall(dev);
+
+       mutex_lock(&dev->struct_mutex);
+
+       drm_legacy_agp_clear(dev);
+
+       drm_legacy_sg_cleanup(dev);
+       drm_legacy_vma_flush(dev);
+       drm_legacy_dma_takedown(dev);
+
+       mutex_unlock(&dev->struct_mutex);
 
        dev->sigdata.lock = NULL;
 
        dev->context_flag = 0;
        dev->last_context = 0;
        dev->if_version = 0;
+
+       DRM_DEBUG("lastclose completed\n");
 }
 
 /*
@@ -400,7 +412,7 @@ static void drm_legacy_dev_reinit(struct drm_device *dev)
  *
  * \sa drm_device
  */
-int drm_lastclose(struct drm_device * dev)
+void drm_lastclose(struct drm_device * dev)
 {
        DRM_DEBUG("\n");
 
@@ -408,23 +420,8 @@ int drm_lastclose(struct drm_device * dev)
                dev->driver->lastclose(dev);
        DRM_DEBUG("driver lastclose completed\n");
 
-       if (dev->irq_enabled && !drm_core_check_feature(dev, DRIVER_MODESET))
-               drm_irq_uninstall(dev);
-
-       mutex_lock(&dev->struct_mutex);
-
-       drm_agp_clear(dev);
-
-       drm_legacy_sg_cleanup(dev);
-       drm_legacy_vma_flush(dev);
-       drm_legacy_dma_takedown(dev);
-
-       mutex_unlock(&dev->struct_mutex);
-
-       drm_legacy_dev_reinit(dev);
-
-       DRM_DEBUG("lastclose completed\n");
-       return 0;
+       if (!drm_core_check_feature(dev, DRIVER_MODESET))
+               drm_legacy_dev_reinit(dev);
 }
 
 /**
@@ -445,14 +442,16 @@ int drm_release(struct inode *inode, struct file *filp)
        struct drm_file *file_priv = filp->private_data;
        struct drm_minor *minor = file_priv->minor;
        struct drm_device *dev = minor->dev;
-       int retcode = 0;
 
        mutex_lock(&drm_global_mutex);
 
        DRM_DEBUG("open_count = %d\n", dev->open_count);
 
-       mutex_lock(&dev->struct_mutex);
+       mutex_lock(&dev->filelist_mutex);
        list_del(&file_priv->lhead);
+       mutex_unlock(&dev->filelist_mutex);
+
+       mutex_lock(&dev->struct_mutex);
        if (file_priv->magic)
                idr_remove(&file_priv->master->magic_map, file_priv->magic);
        mutex_unlock(&dev->struct_mutex);
@@ -538,7 +537,7 @@ int drm_release(struct inode *inode, struct file *filp)
         */
 
        if (!--dev->open_count) {
-               retcode = drm_lastclose(dev);
+               drm_lastclose(dev);
                if (drm_device_is_unplugged(dev))
                        drm_put_dev(dev);
        }
@@ -546,7 +545,7 @@ int drm_release(struct inode *inode, struct file *filp)
 
        drm_minor_release(minor);
 
-       return retcode;
+       return 0;
 }
 EXPORT_SYMBOL(drm_release);
 
index 25dac31..f716308 100644 (file)
@@ -804,13 +804,66 @@ drm_gem_object_free(struct kref *kref)
                container_of(kref, struct drm_gem_object, refcount);
        struct drm_device *dev = obj->dev;
 
-       WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+       if (dev->driver->gem_free_object_unlocked) {
+               dev->driver->gem_free_object_unlocked(obj);
+       } else if (dev->driver->gem_free_object) {
+               WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 
-       if (dev->driver->gem_free_object != NULL)
                dev->driver->gem_free_object(obj);
+       }
 }
 EXPORT_SYMBOL(drm_gem_object_free);
 
+/**
+ * drm_gem_object_unreference_unlocked - release a GEM BO reference
+ * @obj: GEM buffer object
+ *
+ * This releases a reference to @obj. Callers must not hold the
+ * dev->struct_mutex lock when calling this function.
+ *
+ * See also __drm_gem_object_unreference().
+ */
+void
+drm_gem_object_unreference_unlocked(struct drm_gem_object *obj)
+{
+       struct drm_device *dev;
+
+       if (!obj)
+               return;
+
+       dev = obj->dev;
+       might_lock(&dev->struct_mutex);
+
+       if (dev->driver->gem_free_object_unlocked)
+               kref_put(&obj->refcount, drm_gem_object_free);
+       else if (kref_put_mutex(&obj->refcount, drm_gem_object_free,
+                               &dev->struct_mutex))
+               mutex_unlock(&dev->struct_mutex);
+}
+EXPORT_SYMBOL(drm_gem_object_unreference_unlocked);
+
+/**
+ * drm_gem_object_unreference - release a GEM BO reference
+ * @obj: GEM buffer object
+ *
+ * This releases a reference to @obj. Callers must hold the dev->struct_mutex
+ * lock when calling this function, even when the driver doesn't use
+ * dev->struct_mutex for anything.
+ *
+ * For drivers not encumbered with legacy locking use
+ * drm_gem_object_unreference_unlocked() instead.
+ */
+void
+drm_gem_object_unreference(struct drm_gem_object *obj)
+{
+       if (obj) {
+               WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
+
+               kref_put(&obj->refcount, drm_gem_object_free);
+       }
+}
+EXPORT_SYMBOL(drm_gem_object_unreference);
+
 /**
  * drm_gem_vm_open - vma->ops->open implementation for GEM
  * @vma: VM area structure
index cbb4fc0..5d469b2 100644 (file)
@@ -174,7 +174,7 @@ int drm_clients_info(struct seq_file *m, void *data)
        /* dev->filelist is sorted youngest first, but we want to present
         * oldest first (i.e. kernel, servers, clients), so walk backwardss.
         */
-       mutex_lock(&dev->struct_mutex);
+       mutex_lock(&dev->filelist_mutex);
        list_for_each_entry_reverse(priv, &dev->filelist, lhead) {
                struct task_struct *task;
 
@@ -190,7 +190,7 @@ int drm_clients_info(struct seq_file *m, void *data)
                           priv->magic);
                rcu_read_unlock();
        }
-       mutex_unlock(&dev->struct_mutex);
+       mutex_unlock(&dev->filelist_mutex);
        return 0;
 }
 
index 43cbda3..902cf6a 100644 (file)
@@ -26,7 +26,7 @@ extern unsigned int drm_timestamp_monotonic;
 
 /* drm_fops.c */
 extern struct mutex drm_global_mutex;
-int drm_lastclose(struct drm_device *dev);
+void drm_lastclose(struct drm_device *dev);
 
 /* drm_pci.c */
 int drm_pci_set_unique(struct drm_device *dev,
@@ -37,8 +37,6 @@ int drm_irq_by_busid(struct drm_device *dev, void *data,
 
 /* drm_vm.c */
 int drm_vma_info(struct seq_file *m, void *data);
-void drm_vm_open_locked(struct drm_device *dev, struct vm_area_struct *vma);
-void drm_vm_close_locked(struct drm_device *dev, struct vm_area_struct *vma);
 
 /* drm_prime.c */
 int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
index 8ce2a0c..b7a3977 100644 (file)
@@ -149,58 +149,6 @@ static int drm_set_busid(struct drm_device *dev, struct drm_file *file_priv)
        return 0;
 }
 
-/*
- * Get a mapping information.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument, pointing to a drm_map structure.
- *
- * \return zero on success or a negative number on failure.
- *
- * Searches for the mapping with the specified offset and copies its information
- * into userspace
- */
-static int drm_getmap(struct drm_device *dev, void *data,
-              struct drm_file *file_priv)
-{
-       struct drm_map *map = data;
-       struct drm_map_list *r_list = NULL;
-       struct list_head *list;
-       int idx;
-       int i;
-
-       idx = map->offset;
-       if (idx < 0)
-               return -EINVAL;
-
-       i = 0;
-       mutex_lock(&dev->struct_mutex);
-       list_for_each(list, &dev->maplist) {
-               if (i == idx) {
-                       r_list = list_entry(list, struct drm_map_list, head);
-                       break;
-               }
-               i++;
-       }
-       if (!r_list || !r_list->map) {
-               mutex_unlock(&dev->struct_mutex);
-               return -EINVAL;
-       }
-
-       map->offset = r_list->map->offset;
-       map->size = r_list->map->size;
-       map->type = r_list->map->type;
-       map->flags = r_list->map->flags;
-       map->handle = (void *)(unsigned long) r_list->user_token;
-       map->mtrr = arch_phys_wc_index(r_list->map->mtrr);
-
-       mutex_unlock(&dev->struct_mutex);
-
-       return 0;
-}
-
 /*
  * Get client information.
  *
@@ -558,7 +506,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
        DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
        DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
        DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, DRM_UNLOCKED),
+       DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_legacy_getmap_ioctl, DRM_UNLOCKED),
        DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, DRM_UNLOCKED),
        DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, DRM_UNLOCKED),
        DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED|DRM_RENDER_ALLOW),
index 9b73178..d3b6ee3 100644 (file)
@@ -63,6 +63,8 @@ int drm_legacy_getsareactx(struct drm_device *d, void *v, struct drm_file *f);
 
 #define DRM_MAP_HASH_OFFSET 0x10000000
 
+int drm_legacy_getmap_ioctl(struct drm_device *dev, void *data,
+                           struct drm_file *file_priv);
 int drm_legacy_addmap_ioctl(struct drm_device *d, void *v, struct drm_file *f);
 int drm_legacy_rmmap_ioctl(struct drm_device *d, void *v, struct drm_file *f);
 int drm_legacy_addbufs(struct drm_device *d, void *v, struct drm_file *f);
index a1fff11..29d5a54 100644 (file)
@@ -250,7 +250,7 @@ void drm_pci_agp_destroy(struct drm_device *dev)
 {
        if (dev->agp) {
                arch_phys_wc_del(dev->agp->agp_mtrr);
-               drm_agp_clear(dev);
+               drm_legacy_agp_clear(dev);
                kfree(dev->agp);
                dev->agp = NULL;
        }
index d7d8cec..fa7fadc 100644 (file)
@@ -208,9 +208,12 @@ static ssize_t status_show(struct device *device,
                           char *buf)
 {
        struct drm_connector *connector = to_drm_connector(device);
+       enum drm_connector_status status;
+
+       status = READ_ONCE(connector->status);
 
        return snprintf(buf, PAGE_SIZE, "%s\n",
-                       drm_get_connector_status_name(connector->status));
+                       drm_get_connector_status_name(status));
 }
 
 static ssize_t dpms_show(struct device *device,
@@ -231,9 +234,11 @@ static ssize_t enabled_show(struct device *device,
                           char *buf)
 {
        struct drm_connector *connector = to_drm_connector(device);
+       bool enabled;
+
+       enabled = READ_ONCE(connector->encoder);
 
-       return snprintf(buf, PAGE_SIZE, "%s\n", connector->encoder ? "enabled" :
-                       "disabled");
+       return snprintf(buf, PAGE_SIZE, enabled ? "enabled\n" : "disabled\n");
 }
 
 static ssize_t edid_show(struct file *filp, struct kobject *kobj,
index f90bd5f..ac9f4b3 100644 (file)
@@ -395,16 +395,8 @@ static const struct vm_operations_struct drm_vm_sg_ops = {
        .close = drm_vm_close,
 };
 
-/**
- * \c open method for shared virtual memory.
- *
- * \param vma virtual memory area.
- *
- * Create a new drm_vma_entry structure as the \p vma private data entry and
- * add it to drm_device::vmalist.
- */
-void drm_vm_open_locked(struct drm_device *dev,
-               struct vm_area_struct *vma)
+static void drm_vm_open_locked(struct drm_device *dev,
+                              struct vm_area_struct *vma)
 {
        struct drm_vma_entry *vma_entry;
 
@@ -429,8 +421,8 @@ static void drm_vm_open(struct vm_area_struct *vma)
        mutex_unlock(&dev->struct_mutex);
 }
 
-void drm_vm_close_locked(struct drm_device *dev,
-               struct vm_area_struct *vma)
+static void drm_vm_close_locked(struct drm_device *dev,
+                               struct vm_area_struct *vma)
 {
        struct drm_vma_entry *pt, *temp;
 
index e885898..c2f92e3 100644 (file)
@@ -497,7 +497,7 @@ static struct drm_driver etnaviv_drm_driver = {
        .open               = etnaviv_open,
        .preclose           = etnaviv_preclose,
        .set_busid          = drm_platform_set_busid,
-       .gem_free_object    = etnaviv_gem_free_object,
+       .gem_free_object_unlocked = etnaviv_gem_free_object,
        .gem_vm_ops         = &vm_ops,
        .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
        .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
index 2fadd82..d814b30 100644 (file)
@@ -95,7 +95,7 @@ comment "Sub-drivers"
 
 config DRM_EXYNOS_G2D
        bool "G2D"
-       depends on !VIDEO_SAMSUNG_S5P_G2D
+       depends on VIDEO_SAMSUNG_S5P_G2D=n
        select FRAME_VECTOR
        help
          Choose this option if you want to use Exynos G2D for DRM.
index 126b0a1..f663490 100644 (file)
@@ -2,10 +2,10 @@
 # Makefile for the drm device driver.  This driver provides support for the
 # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
 
-exynosdrm-y := exynos_drm_drv.o exynos_drm_crtc.o exynos_drm_fbdev.o \
-               exynos_drm_fb.o exynos_drm_gem.o exynos_drm_core.o \
-               exynos_drm_plane.o
+exynosdrm-y := exynos_drm_drv.o exynos_drm_crtc.o exynos_drm_fb.o \
+               exynos_drm_gem.o exynos_drm_core.o exynos_drm_plane.o
 
+exynosdrm-$(CONFIG_DRM_FBDEV_EMULATION) += exynos_drm_fbdev.o
 exynosdrm-$(CONFIG_DRM_EXYNOS_IOMMU) += exynos_drm_iommu.o
 exynosdrm-$(CONFIG_DRM_EXYNOS_FIMD)    += exynos_drm_fimd.o
 exynosdrm-$(CONFIG_DRM_EXYNOS5433_DECON)       += exynos5433_drm_decon.o
index 5245bc5..4ab5bfc 100644 (file)
 #define WINDOWS_NR     3
 #define MIN_FB_WIDTH_FOR_16WORD_BURST  128
 
+#define IFTYPE_I80     (1 << 0)
+#define I80_HW_TRG     (1 << 1)
+#define IFTYPE_HDMI    (1 << 2)
+
 static const char * const decon_clks_name[] = {
        "pclk",
        "aclk_decon",
@@ -38,12 +42,6 @@ static const char * const decon_clks_name[] = {
        "sclk_decon_eclk",
 };
 
-enum decon_iftype {
-       IFTYPE_RGB,
-       IFTYPE_I80,
-       IFTYPE_HDMI
-};
-
 enum decon_flag_bits {
        BIT_CLKS_ENABLED,
        BIT_IRQS_ENABLED,
@@ -61,7 +59,7 @@ struct decon_context {
        struct clk                      *clks[ARRAY_SIZE(decon_clks_name)];
        int                             pipe;
        unsigned long                   flags;
-       enum decon_iftype               out_type;
+       unsigned long                   out_type;
        int                             first_win;
 };
 
@@ -95,7 +93,7 @@ static int decon_enable_vblank(struct exynos_drm_crtc *crtc)
 
        if (!test_and_set_bit(BIT_IRQS_ENABLED, &ctx->flags)) {
                val = VIDINTCON0_INTEN;
-               if (ctx->out_type == IFTYPE_I80)
+               if (ctx->out_type & IFTYPE_I80)
                        val |= VIDINTCON0_FRAMEDONE;
                else
                        val |= VIDINTCON0_INTFRMEN;
@@ -119,11 +117,11 @@ static void decon_disable_vblank(struct exynos_drm_crtc *crtc)
 
 static void decon_setup_trigger(struct decon_context *ctx)
 {
-       u32 val = (ctx->out_type != IFTYPE_HDMI)
+       u32 val = !(ctx->out_type & I80_HW_TRG)
                ? TRIGCON_TRIGEN_PER_F | TRIGCON_TRIGEN_F |
                  TRIGCON_TE_AUTO_MASK | TRIGCON_SWTRIGEN
                : TRIGCON_TRIGEN_PER_F | TRIGCON_TRIGEN_F |
-                 TRIGCON_HWTRIGMASK_I80_RGB | TRIGCON_HWTRIGEN_I80_RGB;
+                 TRIGCON_HWTRIGMASK | TRIGCON_HWTRIGEN;
        writel(val, ctx->addr + DECON_TRIGCON);
 }
 
@@ -136,7 +134,7 @@ static void decon_commit(struct exynos_drm_crtc *crtc)
        if (test_bit(BIT_SUSPENDED, &ctx->flags))
                return;
 
-       if (ctx->out_type == IFTYPE_HDMI) {
+       if (ctx->out_type & IFTYPE_HDMI) {
                m->crtc_hsync_start = m->crtc_hdisplay + 10;
                m->crtc_hsync_end = m->crtc_htotal - 92;
                m->crtc_vsync_start = m->crtc_vdisplay + 1;
@@ -151,17 +149,20 @@ static void decon_commit(struct exynos_drm_crtc *crtc)
 
        /* lcd on and use command if */
        val = VIDOUT_LCD_ON;
-       if (ctx->out_type == IFTYPE_I80)
+       if (ctx->out_type & IFTYPE_I80) {
                val |= VIDOUT_COMMAND_IF;
-       else
+               decon_setup_trigger(ctx);
+       } else {
                val |= VIDOUT_RGB_IF;
+       }
+
        writel(val, ctx->addr + DECON_VIDOUTCON0);
 
        val = VIDTCON2_LINEVAL(m->vdisplay - 1) |
                VIDTCON2_HOZVAL(m->hdisplay - 1);
        writel(val, ctx->addr + DECON_VIDTCON2);
 
-       if (ctx->out_type != IFTYPE_I80) {
+       if (!(ctx->out_type & IFTYPE_I80)) {
                val = VIDTCON00_VBPD_F(
                                m->crtc_vtotal - m->crtc_vsync_end - 1) |
                        VIDTCON00_VFPD_F(
@@ -183,10 +184,10 @@ static void decon_commit(struct exynos_drm_crtc *crtc)
                writel(val, ctx->addr + DECON_VIDTCON11);
        }
 
-       decon_setup_trigger(ctx);
-
        /* enable output and display signal */
        decon_set_bits(ctx, DECON_VIDCON0, VIDCON0_ENVID | VIDCON0_ENVID_F, ~0);
+
+       decon_set_bits(ctx, DECON_UPDATE, STANDALONE_UPDATE_F, ~0);
 }
 
 static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win,
@@ -300,7 +301,7 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc,
        val = dma_addr + pitch * state->src.h;
        writel(val, ctx->addr + DECON_VIDW0xADD1B0(win));
 
-       if (ctx->out_type != IFTYPE_HDMI)
+       if (!(ctx->out_type & IFTYPE_HDMI))
                val = BIT_VAL(pitch - state->crtc.w * bpp, 27, 14)
                        | BIT_VAL(state->crtc.w * bpp, 13, 0);
        else
@@ -312,9 +313,6 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc,
 
        /* window enable */
        decon_set_bits(ctx, DECON_WINCONx(win), WINCONx_ENWIN_F, ~0);
-
-       /* standalone update */
-       decon_set_bits(ctx, DECON_UPDATE, STANDALONE_UPDATE_F, ~0);
 }
 
 static void decon_disable_plane(struct exynos_drm_crtc *crtc,
@@ -326,15 +324,7 @@ static void decon_disable_plane(struct exynos_drm_crtc *crtc,
        if (test_bit(BIT_SUSPENDED, &ctx->flags))
                return;
 
-       decon_shadow_protect_win(ctx, win, true);
-
-       /* window disable */
        decon_set_bits(ctx, DECON_WINCONx(win), WINCONx_ENWIN_F, 0);
-
-       decon_shadow_protect_win(ctx, win, false);
-
-       /* standalone update */
-       decon_set_bits(ctx, DECON_UPDATE, STANDALONE_UPDATE_F, ~0);
 }
 
 static void decon_atomic_flush(struct exynos_drm_crtc *crtc)
@@ -348,7 +338,10 @@ static void decon_atomic_flush(struct exynos_drm_crtc *crtc)
        for (i = ctx->first_win; i < WINDOWS_NR; i++)
                decon_shadow_protect_win(ctx, i, false);
 
-       if (ctx->out_type == IFTYPE_I80)
+       /* standalone update */
+       decon_set_bits(ctx, DECON_UPDATE, STANDALONE_UPDATE_F, ~0);
+
+       if (ctx->out_type & IFTYPE_I80)
                set_bit(BIT_WIN_UPDATED, &ctx->flags);
 }
 
@@ -374,7 +367,7 @@ static void decon_swreset(struct decon_context *ctx)
 
        WARN(tries == 0, "failed to software reset DECON\n");
 
-       if (ctx->out_type != IFTYPE_HDMI)
+       if (!(ctx->out_type & IFTYPE_HDMI))
                return;
 
        writel(VIDCON0_CLKVALUP | VIDCON0_VLCKFREE, ctx->addr + DECON_VIDCON0);
@@ -383,7 +376,9 @@ static void decon_swreset(struct decon_context *ctx)
        writel(VIDCON1_VCLK_RUN_VDEN_DISABLE, ctx->addr + DECON_VIDCON1);
        writel(CRCCTRL_CRCEN | CRCCTRL_CRCSTART_F | CRCCTRL_CRCCLKEN,
               ctx->addr + DECON_CRCCTRL);
-       decon_setup_trigger(ctx);
+
+       if (ctx->out_type & IFTYPE_I80)
+               decon_setup_trigger(ctx);
 }
 
 static void decon_enable(struct exynos_drm_crtc *crtc)
@@ -395,8 +390,12 @@ static void decon_enable(struct exynos_drm_crtc *crtc)
 
        pm_runtime_get_sync(ctx->dev);
 
+       exynos_drm_pipe_clk_enable(crtc, true);
+
        set_bit(BIT_CLKS_ENABLED, &ctx->flags);
 
+       decon_swreset(ctx);
+
        /* if vblank was enabled status, enable it again. */
        if (test_and_clear_bit(BIT_IRQS_ENABLED, &ctx->flags))
                decon_enable_vblank(ctx->crtc);
@@ -424,6 +423,8 @@ static void decon_disable(struct exynos_drm_crtc *crtc)
 
        clear_bit(BIT_CLKS_ENABLED, &ctx->flags);
 
+       exynos_drm_pipe_clk_enable(crtc, false);
+
        pm_runtime_put_sync(ctx->dev);
 
        set_bit(BIT_SUSPENDED, &ctx->flags);
@@ -459,8 +460,10 @@ static void decon_clear_channels(struct exynos_drm_crtc *crtc)
                decon_shadow_protect_win(ctx, win, true);
                decon_set_bits(ctx, DECON_WINCONx(win), WINCONx_ENWIN_F, 0);
                decon_shadow_protect_win(ctx, win, false);
-               decon_set_bits(ctx, DECON_UPDATE, STANDALONE_UPDATE_F, ~0);
        }
+
+       decon_set_bits(ctx, DECON_UPDATE, STANDALONE_UPDATE_F, ~0);
+
        /* TODO: wait for possible vsync */
        msleep(50);
 
@@ -509,7 +512,7 @@ static int decon_bind(struct device *dev, struct device *master, void *data)
        }
 
        exynos_plane = &ctx->planes[ctx->first_win];
-       out_type = (ctx->out_type == IFTYPE_HDMI) ? EXYNOS_DISPLAY_TYPE_HDMI
+       out_type = (ctx->out_type & IFTYPE_HDMI) ? EXYNOS_DISPLAY_TYPE_HDMI
                                                  : EXYNOS_DISPLAY_TYPE_LCD;
        ctx->crtc = exynos_drm_crtc_create(drm_dev, &exynos_plane->base,
                                        ctx->pipe, out_type,
@@ -617,11 +620,11 @@ static const struct dev_pm_ops exynos5433_decon_pm_ops = {
 static const struct of_device_id exynos5433_decon_driver_dt_match[] = {
        {
                .compatible = "samsung,exynos5433-decon",
-               .data = (void *)IFTYPE_RGB
+               .data = (void *)I80_HW_TRG
        },
        {
                .compatible = "samsung,exynos5433-decon-tv",
-               .data = (void *)IFTYPE_HDMI
+               .data = (void *)(I80_HW_TRG | IFTYPE_HDMI)
        },
        {},
 };
@@ -629,7 +632,6 @@ MODULE_DEVICE_TABLE(of, exynos5433_decon_driver_dt_match);
 
 static int exynos5433_decon_probe(struct platform_device *pdev)
 {
-       const struct of_device_id *of_id;
        struct device *dev = &pdev->dev;
        struct decon_context *ctx;
        struct resource *res;
@@ -642,14 +644,14 @@ static int exynos5433_decon_probe(struct platform_device *pdev)
 
        __set_bit(BIT_SUSPENDED, &ctx->flags);
        ctx->dev = dev;
+       ctx->out_type = (unsigned long)of_device_get_match_data(dev);
 
-       of_id = of_match_device(exynos5433_decon_driver_dt_match, &pdev->dev);
-       ctx->out_type = (enum decon_iftype)of_id->data;
-
-       if (ctx->out_type == IFTYPE_HDMI)
+       if (ctx->out_type & IFTYPE_HDMI) {
                ctx->first_win = 1;
-       else if (of_get_child_by_name(dev->of_node, "i80-if-timings"))
                ctx->out_type = IFTYPE_I80;
+       } else if (of_get_child_by_name(dev->of_node, "i80-if-timings")) {
+               ctx->out_type = IFTYPE_I80;
+       }
 
        for (i = 0; i < ARRAY_SIZE(decon_clks_name); i++) {
                struct clk *clk;
@@ -674,7 +676,7 @@ static int exynos5433_decon_probe(struct platform_device *pdev)
        }
 
        res = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
-                       (ctx->out_type == IFTYPE_I80) ? "lcd_sys" : "vsync");
+                       (ctx->out_type & IFTYPE_I80) ? "lcd_sys" : "vsync");
        if (!res) {
                dev_err(dev, "cannot find IRQ resource\n");
                return -ENXIO;
index 9336107..f6223f9 100644 (file)
@@ -593,7 +593,6 @@ static const struct exynos_drm_crtc_ops decon_crtc_ops = {
        .commit = decon_commit,
        .enable_vblank = decon_enable_vblank,
        .disable_vblank = decon_disable_vblank,
-       .wait_for_vblank = decon_wait_for_vblank,
        .atomic_begin = decon_atomic_begin,
        .update_plane = decon_update_plane,
        .disable_plane = decon_disable_plane,
index 8ae3d51..468498e 100644 (file)
@@ -48,14 +48,11 @@ int exynos_dp_crtc_clock_enable(struct analogix_dp_plat_data *plat_data,
 {
        struct exynos_dp_device *dp = to_dp(plat_data);
        struct drm_encoder *encoder = &dp->encoder;
-       struct exynos_drm_crtc *crtc;
 
-       if (!encoder)
-               return -1;
+       if (!encoder->crtc)
+               return -EPERM;
 
-       crtc = to_exynos_crtc(encoder->crtc);
-       if (crtc && crtc->ops && crtc->ops->clock_enable)
-               crtc->ops->clock_enable(crtc, enable);
+       exynos_drm_pipe_clk_enable(to_exynos_crtc(encoder->crtc), enable);
 
        return 0;
 }
index 7f55ba6..011211e 100644 (file)
@@ -101,7 +101,7 @@ int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file)
        return 0;
 
 err:
-       list_for_each_entry_reverse(subdrv, &subdrv->list, list) {
+       list_for_each_entry_continue_reverse(subdrv, &exynos_drm_subdrv_list, list) {
                if (subdrv->close)
                        subdrv->close(dev, subdrv->dev, file);
        }
index e36579c..50dd33d 100644 (file)
@@ -157,9 +157,8 @@ err_crtc:
 
 int exynos_drm_crtc_enable_vblank(struct drm_device *dev, unsigned int pipe)
 {
-       struct exynos_drm_private *private = dev->dev_private;
-       struct exynos_drm_crtc *exynos_crtc =
-               to_exynos_crtc(private->crtc[pipe]);
+       struct exynos_drm_crtc *exynos_crtc = exynos_drm_crtc_from_pipe(dev,
+                                                                       pipe);
 
        if (exynos_crtc->ops->enable_vblank)
                return exynos_crtc->ops->enable_vblank(exynos_crtc);
@@ -169,9 +168,8 @@ int exynos_drm_crtc_enable_vblank(struct drm_device *dev, unsigned int pipe)
 
 void exynos_drm_crtc_disable_vblank(struct drm_device *dev, unsigned int pipe)
 {
-       struct exynos_drm_private *private = dev->dev_private;
-       struct exynos_drm_crtc *exynos_crtc =
-               to_exynos_crtc(private->crtc[pipe]);
+       struct exynos_drm_crtc *exynos_crtc = exynos_drm_crtc_from_pipe(dev,
+                                                                       pipe);
 
        if (exynos_crtc->ops->disable_vblank)
                exynos_crtc->ops->disable_vblank(exynos_crtc);
index 5344940..8ff355d 100644 (file)
@@ -270,7 +270,7 @@ static int commit_is_pending(struct exynos_drm_private *priv, u32 crtcs)
 }
 
 int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state,
-                        bool async)
+                        bool nonblock)
 {
        struct exynos_drm_private *priv = dev->dev_private;
        struct exynos_atomic_commit *commit;
@@ -308,7 +308,7 @@ int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state,
 
        drm_atomic_helper_swap_state(dev, state);
 
-       if (async)
+       if (nonblock)
                schedule_work(&commit->work);
        else
                exynos_atomic_commit_complete(commit);
index 502f750..cc33ec9 100644 (file)
@@ -120,8 +120,6 @@ struct exynos_drm_plane_config {
  * @commit: set current hw specific display mode to hw.
  * @enable_vblank: specific driver callback for enabling vblank interrupt.
  * @disable_vblank: specific driver callback for disabling vblank interrupt.
- * @wait_for_vblank: wait for vblank interrupt to make sure that
- *     hardware overlay is updated.
  * @atomic_check: validate state
  * @atomic_begin: prepare device to receive an update
  * @atomic_flush: mark the end of device update
@@ -129,10 +127,6 @@ struct exynos_drm_plane_config {
  * @disable_plane: disable hardware specific overlay.
  * @te_handler: trigger to transfer video image at the tearing effect
  *     synchronization signal if there is a page flip request.
- * @clock_enable: optional function enabling/disabling display domain clock,
- *     called from exynos-dp driver before powering up (with
- *     'enable' argument as true) and after powering down (with
- *     'enable' as false).
  */
 struct exynos_drm_crtc;
 struct exynos_drm_crtc_ops {
@@ -141,7 +135,6 @@ struct exynos_drm_crtc_ops {
        void (*commit)(struct exynos_drm_crtc *crtc);
        int (*enable_vblank)(struct exynos_drm_crtc *crtc);
        void (*disable_vblank)(struct exynos_drm_crtc *crtc);
-       void (*wait_for_vblank)(struct exynos_drm_crtc *crtc);
        int (*atomic_check)(struct exynos_drm_crtc *crtc,
                            struct drm_crtc_state *state);
        void (*atomic_begin)(struct exynos_drm_crtc *crtc);
@@ -151,7 +144,10 @@ struct exynos_drm_crtc_ops {
                              struct exynos_drm_plane *plane);
        void (*atomic_flush)(struct exynos_drm_crtc *crtc);
        void (*te_handler)(struct exynos_drm_crtc *crtc);
-       void (*clock_enable)(struct exynos_drm_crtc *crtc, bool enable);
+};
+
+struct exynos_drm_clk {
+       void (*enable)(struct exynos_drm_clk *clk, bool enable);
 };
 
 /*
@@ -182,8 +178,16 @@ struct exynos_drm_crtc {
        atomic_t                        pending_update;
        const struct exynos_drm_crtc_ops        *ops;
        void                            *ctx;
+       struct exynos_drm_clk           *pipe_clk;
 };
 
+static inline void exynos_drm_pipe_clk_enable(struct exynos_drm_crtc *crtc,
+                                             bool enable)
+{
+       if (crtc->pipe_clk)
+               crtc->pipe_clk->enable(crtc->pipe_clk, enable);
+}
+
 struct exynos_drm_g2d_private {
        struct device           *dev;
        struct list_head        inuse_cmdlist;
@@ -232,6 +236,14 @@ struct exynos_drm_private {
        wait_queue_head_t       wait;
 };
 
+static inline struct exynos_drm_crtc *
+exynos_drm_crtc_from_pipe(struct drm_device *dev, int pipe)
+{
+       struct exynos_drm_private *private = dev->dev_private;
+
+       return to_exynos_crtc(private->crtc[pipe]);
+}
+
 static inline struct device *to_dma_dev(struct drm_device *dev)
 {
        struct exynos_drm_private *priv = dev->dev_private;
@@ -296,7 +308,7 @@ static inline int exynos_dpi_bind(struct drm_device *dev,
 #endif
 
 int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state,
-                        bool async);
+                        bool nonblock);
 
 
 extern struct platform_driver fimd_driver;
index 63c84a1..72c3565 100644 (file)
@@ -280,7 +280,7 @@ struct exynos_dsi {
        spinlock_t transfer_lock; /* protects transfer_list */
        struct list_head transfer_list;
 
-       struct exynos_dsi_driver_data *driver_data;
+       const struct exynos_dsi_driver_data *driver_data;
        struct device_node *bridge_node;
 };
 
@@ -532,15 +532,6 @@ static const struct of_device_id exynos_dsi_of_match[] = {
        { }
 };
 
-static inline struct exynos_dsi_driver_data *exynos_dsi_get_driver_data(
-                                               struct platform_device *pdev)
-{
-       const struct of_device_id *of_id =
-                       of_match_device(exynos_dsi_of_match, &pdev->dev);
-
-       return (struct exynos_dsi_driver_data *)of_id->data;
-}
-
 static void exynos_dsi_wait_for_reset(struct exynos_dsi *dsi)
 {
        if (wait_for_completion_timeout(&dsi->completed, msecs_to_jiffies(300)))
@@ -564,7 +555,7 @@ static void exynos_dsi_reset(struct exynos_dsi *dsi)
 static unsigned long exynos_dsi_pll_find_pms(struct exynos_dsi *dsi,
                unsigned long fin, unsigned long fout, u8 *p, u16 *m, u8 *s)
 {
-       struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
+       const struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
        unsigned long best_freq = 0;
        u32 min_delta = 0xffffffff;
        u8 p_min, p_max;
@@ -618,7 +609,7 @@ static unsigned long exynos_dsi_pll_find_pms(struct exynos_dsi *dsi,
 static unsigned long exynos_dsi_set_pll(struct exynos_dsi *dsi,
                                        unsigned long freq)
 {
-       struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
+       const struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
        unsigned long fin, fout;
        int timeout;
        u8 p, s;
@@ -712,7 +703,7 @@ static int exynos_dsi_enable_clock(struct exynos_dsi *dsi)
 
 static void exynos_dsi_set_phy_ctrl(struct exynos_dsi *dsi)
 {
-       struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
+       const struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
        const unsigned int *reg_values = driver_data->reg_values;
        u32 reg;
 
@@ -790,7 +781,7 @@ static void exynos_dsi_enable_lane(struct exynos_dsi *dsi, u32 lane)
 
 static int exynos_dsi_init_link(struct exynos_dsi *dsi)
 {
-       struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
+       const struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
        int timeout;
        u32 reg;
        u32 lanes_mask;
@@ -1334,7 +1325,7 @@ static void exynos_dsi_disable_irq(struct exynos_dsi *dsi)
 
 static int exynos_dsi_init(struct exynos_dsi *dsi)
 {
-       struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
+       const struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
 
        exynos_dsi_reset(dsi);
        exynos_dsi_enable_irq(dsi);
@@ -1833,7 +1824,7 @@ static int exynos_dsi_probe(struct platform_device *pdev)
        dsi->dsi_host.dev = dev;
 
        dsi->dev = dev;
-       dsi->driver_data = exynos_dsi_get_driver_data(pdev);
+       dsi->driver_data = of_device_get_match_data(dev);
 
        ret = exynos_dsi_parse_dt(dsi);
        if (ret)
@@ -1917,7 +1908,7 @@ static int __maybe_unused exynos_dsi_suspend(struct device *dev)
 {
        struct drm_encoder *encoder = dev_get_drvdata(dev);
        struct exynos_dsi *dsi = encoder_to_dsi(encoder);
-       struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
+       const struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
        int ret, i;
 
        usleep_range(10000, 20000);
@@ -1948,7 +1939,7 @@ static int __maybe_unused exynos_dsi_resume(struct device *dev)
 {
        struct drm_encoder *encoder = dev_get_drvdata(dev);
        struct exynos_dsi *dsi = encoder_to_dsi(encoder);
-       struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
+       const struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
        int ret, i;
 
        ret = regulator_bulk_enable(ARRAY_SIZE(dsi->supplies), dsi->supplies);
index d614194..81cc553 100644 (file)
@@ -199,17 +199,6 @@ dma_addr_t exynos_drm_fb_dma_addr(struct drm_framebuffer *fb, int index)
        return exynos_fb->dma_addr[index];
 }
 
-static void exynos_drm_output_poll_changed(struct drm_device *dev)
-{
-       struct exynos_drm_private *private = dev->dev_private;
-       struct drm_fb_helper *fb_helper = private->fb_helper;
-
-       if (fb_helper)
-               drm_fb_helper_hotplug_event(fb_helper);
-       else
-               exynos_drm_fbdev_init(dev);
-}
-
 static const struct drm_mode_config_funcs exynos_drm_mode_config_funcs = {
        .fb_create = exynos_user_fb_create,
        .output_poll_changed = exynos_drm_output_poll_changed,
index 4656cd6..67dcd68 100644 (file)
@@ -311,3 +311,14 @@ void exynos_drm_fbdev_restore_mode(struct drm_device *dev)
 
        drm_fb_helper_restore_fbdev_mode_unlocked(private->fb_helper);
 }
+
+void exynos_drm_output_poll_changed(struct drm_device *dev)
+{
+       struct exynos_drm_private *private = dev->dev_private;
+       struct drm_fb_helper *fb_helper = private->fb_helper;
+
+       if (fb_helper)
+               drm_fb_helper_hotplug_event(fb_helper);
+       else
+               exynos_drm_fbdev_init(dev);
+}
index e16d7f0..330eef8 100644 (file)
 #ifndef _EXYNOS_DRM_FBDEV_H_
 #define _EXYNOS_DRM_FBDEV_H_
 
+#ifdef CONFIG_DRM_FBDEV_EMULATION
+
 int exynos_drm_fbdev_init(struct drm_device *dev);
-int exynos_drm_fbdev_reinit(struct drm_device *dev);
 void exynos_drm_fbdev_fini(struct drm_device *dev);
 void exynos_drm_fbdev_restore_mode(struct drm_device *dev);
+void exynos_drm_output_poll_changed(struct drm_device *dev);
+
+#else
+
+static inline int exynos_drm_fbdev_init(struct drm_device *dev)
+{
+       return 0;
+}
+
+static inline void exynos_drm_fbdev_fini(struct drm_device *dev)
+{
+}
+
+static inline void exynos_drm_fbdev_restore_mode(struct drm_device *dev)
+{
+}
+
+#define exynos_drm_output_poll_changed (NULL)
+
+#endif
 
 #endif
index 51d484a..547d759 100644 (file)
 /* color key value register for hardware window 1 ~ 4. */
 #define WKEYCON1_BASE(x)               ((WKEYCON1 + 0x140) + ((x - 1) * 8))
 
-/* I80 / RGB trigger control register */
+/* I80 trigger control register */
 #define TRIGCON                                0x1A4
-#define TRGMODE_I80_RGB_ENABLE_I80     (1 << 0)
-#define SWTRGCMD_I80_RGB_ENABLE                (1 << 1)
+#define TRGMODE_ENABLE                 (1 << 0)
+#define SWTRGCMD_ENABLE                        (1 << 1)
+/* Exynos3250, 3472, 4415, 5260 5410, 5420 and 5422 only supported. */
+#define HWTRGEN_ENABLE                 (1 << 3)
+#define HWTRGMASK_ENABLE               (1 << 4)
+/* Exynos3250, 3472, 4415, 5260, 5420 and 5422 only supported. */
+#define HWTRIGEN_PER_ENABLE            (1 << 31)
 
 /* display mode change control register except exynos4 */
 #define VIDOUT_CON                     0x000
 /* FIMD has totally five hardware windows. */
 #define WINDOWS_NR     5
 
+/* HW trigger flag on i80 panel. */
+#define I80_HW_TRG     (1 << 1)
+
 struct fimd_driver_data {
        unsigned int timing_base;
        unsigned int lcdblk_offset;
        unsigned int lcdblk_vt_shift;
        unsigned int lcdblk_bypass_shift;
        unsigned int lcdblk_mic_bypass_shift;
+       unsigned int trg_type;
 
        unsigned int has_shadowcon:1;
        unsigned int has_clksel:1;
@@ -102,20 +111,26 @@ struct fimd_driver_data {
        unsigned int has_vidoutcon:1;
        unsigned int has_vtsel:1;
        unsigned int has_mic_bypass:1;
+       unsigned int has_dp_clk:1;
+       unsigned int has_hw_trigger:1;
+       unsigned int has_trigger_per_te:1;
 };
 
 static struct fimd_driver_data s3c64xx_fimd_driver_data = {
        .timing_base = 0x0,
        .has_clksel = 1,
        .has_limited_fmt = 1,
+       .has_hw_trigger = 1,
 };
 
 static struct fimd_driver_data exynos3_fimd_driver_data = {
        .timing_base = 0x20000,
        .lcdblk_offset = 0x210,
        .lcdblk_bypass_shift = 1,
+       .trg_type = I80_HW_TRG,
        .has_shadowcon = 1,
        .has_vidoutcon = 1,
+       .has_trigger_per_te = 1,
 };
 
 static struct fimd_driver_data exynos4_fimd_driver_data = {
@@ -132,9 +147,11 @@ static struct fimd_driver_data exynos4415_fimd_driver_data = {
        .lcdblk_offset = 0x210,
        .lcdblk_vt_shift = 10,
        .lcdblk_bypass_shift = 1,
+       .trg_type = I80_HW_TRG,
        .has_shadowcon = 1,
        .has_vidoutcon = 1,
        .has_vtsel = 1,
+       .has_trigger_per_te = 1,
 };
 
 static struct fimd_driver_data exynos5_fimd_driver_data = {
@@ -145,6 +162,7 @@ static struct fimd_driver_data exynos5_fimd_driver_data = {
        .has_shadowcon = 1,
        .has_vidoutcon = 1,
        .has_vtsel = 1,
+       .has_dp_clk = 1,
 };
 
 static struct fimd_driver_data exynos5420_fimd_driver_data = {
@@ -153,10 +171,14 @@ static struct fimd_driver_data exynos5420_fimd_driver_data = {
        .lcdblk_vt_shift = 24,
        .lcdblk_bypass_shift = 15,
        .lcdblk_mic_bypass_shift = 11,
+       .trg_type = I80_HW_TRG,
        .has_shadowcon = 1,
        .has_vidoutcon = 1,
        .has_vtsel = 1,
        .has_mic_bypass = 1,
+       .has_dp_clk = 1,
+       .has_hw_trigger = 1,
+       .has_trigger_per_te = 1,
 };
 
 struct fimd_context {
@@ -182,8 +204,9 @@ struct fimd_context {
        atomic_t                        win_updated;
        atomic_t                        triggering;
 
-       struct fimd_driver_data *driver_data;
+       const struct fimd_driver_data *driver_data;
        struct drm_encoder *encoder;
+       struct exynos_drm_clk           dp_clk;
 };
 
 static const struct of_device_id fimd_driver_dt_match[] = {
@@ -219,15 +242,6 @@ static const uint32_t fimd_formats[] = {
        DRM_FORMAT_ARGB8888,
 };
 
-static inline struct fimd_driver_data *drm_fimd_get_driver_data(
-       struct platform_device *pdev)
-{
-       const struct of_device_id *of_id =
-                       of_match_device(fimd_driver_dt_match, &pdev->dev);
-
-       return (struct fimd_driver_data *)of_id->data;
-}
-
 static int fimd_enable_vblank(struct exynos_drm_crtc *crtc)
 {
        struct fimd_context *ctx = crtc->ctx;
@@ -400,11 +414,31 @@ static u32 fimd_calc_clkdiv(struct fimd_context *ctx,
        return (clkdiv < 0x100) ? clkdiv : 0xff;
 }
 
+static void fimd_setup_trigger(struct fimd_context *ctx)
+{
+       void __iomem *timing_base = ctx->regs + ctx->driver_data->timing_base;
+       u32 trg_type = ctx->driver_data->trg_type;
+       u32 val = readl(timing_base + TRIGCON);
+
+       val &= ~(TRGMODE_ENABLE);
+
+       if (trg_type == I80_HW_TRG) {
+               if (ctx->driver_data->has_hw_trigger)
+                       val |= HWTRGEN_ENABLE | HWTRGMASK_ENABLE;
+               if (ctx->driver_data->has_trigger_per_te)
+                       val |= HWTRIGEN_PER_ENABLE;
+       } else {
+               val |= TRGMODE_ENABLE;
+       }
+
+       writel(val, timing_base + TRIGCON);
+}
+
 static void fimd_commit(struct exynos_drm_crtc *crtc)
 {
        struct fimd_context *ctx = crtc->ctx;
        struct drm_display_mode *mode = &crtc->base.state->adjusted_mode;
-       struct fimd_driver_data *driver_data = ctx->driver_data;
+       const struct fimd_driver_data *driver_data = ctx->driver_data;
        void *timing_base = ctx->regs + driver_data->timing_base;
        u32 val, clkdiv;
 
@@ -495,6 +529,8 @@ static void fimd_commit(struct exynos_drm_crtc *crtc)
               VIDTCON2_HOZVAL_E(mode->hdisplay - 1);
        writel(val, ctx->regs + driver_data->timing_base + VIDTCON2);
 
+       fimd_setup_trigger(ctx);
+
        /*
         * fields of register with prefix '_F' would be updated
         * at vsync(same as dma start)
@@ -827,7 +863,7 @@ static void fimd_disable(struct exynos_drm_crtc *crtc)
 static void fimd_trigger(struct device *dev)
 {
        struct fimd_context *ctx = dev_get_drvdata(dev);
-       struct fimd_driver_data *driver_data = ctx->driver_data;
+       const struct fimd_driver_data *driver_data = ctx->driver_data;
        void *timing_base = ctx->regs + driver_data->timing_base;
        u32 reg;
 
@@ -842,7 +878,7 @@ static void fimd_trigger(struct device *dev)
        atomic_set(&ctx->triggering, 1);
 
        reg = readl(timing_base + TRIGCON);
-       reg |= (TRGMODE_I80_RGB_ENABLE_I80 | SWTRGCMD_I80_RGB_ENABLE);
+       reg |= (TRGMODE_ENABLE | SWTRGCMD_ENABLE);
        writel(reg, timing_base + TRIGCON);
 
        /*
@@ -856,11 +892,15 @@ static void fimd_trigger(struct device *dev)
 static void fimd_te_handler(struct exynos_drm_crtc *crtc)
 {
        struct fimd_context *ctx = crtc->ctx;
+       u32 trg_type = ctx->driver_data->trg_type;
 
        /* Checks the crtc is detached already from encoder */
        if (ctx->pipe < 0 || !ctx->drm_dev)
                return;
 
+       if (trg_type == I80_HW_TRG)
+               goto out;
+
        /*
         * If there is a page flip request, triggers and handles the page flip
         * event so that current fb can be updated into panel GRAM.
@@ -868,6 +908,7 @@ static void fimd_te_handler(struct exynos_drm_crtc *crtc)
        if (atomic_add_unless(&ctx->win_updated, -1, 0))
                fimd_trigger(ctx->dev);
 
+out:
        /* Wakes up vsync event queue */
        if (atomic_read(&ctx->wait_vsync_event)) {
                atomic_set(&ctx->wait_vsync_event, 0);
@@ -878,21 +919,12 @@ static void fimd_te_handler(struct exynos_drm_crtc *crtc)
                drm_crtc_handle_vblank(&ctx->crtc->base);
 }
 
-static void fimd_dp_clock_enable(struct exynos_drm_crtc *crtc, bool enable)
+static void fimd_dp_clock_enable(struct exynos_drm_clk *clk, bool enable)
 {
-       struct fimd_context *ctx = crtc->ctx;
-       u32 val;
+       struct fimd_context *ctx = container_of(clk, struct fimd_context,
+                                               dp_clk);
+       u32 val = enable ? DP_MIE_CLK_DP_ENABLE : DP_MIE_CLK_DISABLE;
 
-       /*
-        * Only Exynos 5250, 5260, 5410 and 542x requires enabling DP/MIE
-        * clock. On these SoCs the bootloader may enable it but any
-        * power domain off/on will reset it to disable state.
-        */
-       if (ctx->driver_data != &exynos5_fimd_driver_data ||
-           ctx->driver_data != &exynos5420_fimd_driver_data)
-               return;
-
-       val = enable ? DP_MIE_CLK_DP_ENABLE : DP_MIE_CLK_DISABLE;
        writel(val, ctx->regs + DP_MIE_CLKCON);
 }
 
@@ -902,13 +934,11 @@ static const struct exynos_drm_crtc_ops fimd_crtc_ops = {
        .commit = fimd_commit,
        .enable_vblank = fimd_enable_vblank,
        .disable_vblank = fimd_disable_vblank,
-       .wait_for_vblank = fimd_wait_for_vblank,
        .atomic_begin = fimd_atomic_begin,
        .update_plane = fimd_update_plane,
        .disable_plane = fimd_disable_plane,
        .atomic_flush = fimd_atomic_flush,
        .te_handler = fimd_te_handler,
-       .clock_enable = fimd_dp_clock_enable,
 };
 
 static irqreturn_t fimd_irq_handler(int irq, void *dev_id)
@@ -987,6 +1017,11 @@ static int fimd_bind(struct device *dev, struct device *master, void *data)
        if (IS_ERR(ctx->crtc))
                return PTR_ERR(ctx->crtc);
 
+       if (ctx->driver_data->has_dp_clk) {
+               ctx->dp_clk.enable = fimd_dp_clock_enable;
+               ctx->crtc->pipe_clk = &ctx->dp_clk;
+       }
+
        if (ctx->encoder)
                exynos_dpi_bind(drm_dev, ctx->encoder);
 
@@ -1035,7 +1070,7 @@ static int fimd_probe(struct platform_device *pdev)
 
        ctx->dev = dev;
        ctx->suspended = true;
-       ctx->driver_data = drm_fimd_get_driver_data(pdev);
+       ctx->driver_data = of_device_get_match_data(dev);
 
        if (of_property_read_bool(dev->of_node, "samsung,invert-vden"))
                ctx->vidcon1 |= VIDCON1_INV_VDEN;
index 9869d70..a0def0b 100644 (file)
@@ -129,7 +129,7 @@ static void mic_set_path(struct exynos_mic *mic, bool enable)
        } else
                val &= ~(MIC0_RGB_MUX | MIC0_I80_MUX | MIC0_ON_MUX);
 
-       regmap_write(mic->sysreg, DSD_CFG_MUX, val);
+       ret = regmap_write(mic->sysreg, DSD_CFG_MUX, val);
        if (ret)
                DRM_ERROR("mic: Failed to read system register\n");
 }
@@ -457,6 +457,7 @@ static int exynos_mic_probe(struct platform_device *pdev)
                                                        "samsung,disp-syscon");
        if (IS_ERR(mic->sysreg)) {
                DRM_ERROR("mic: Failed to get system register.\n");
+               ret = PTR_ERR(mic->sysreg);
                goto err;
        }
 
index d862272..50185ac 100644 (file)
 
 #include <drm/drmP.h>
 
-#include <drm/exynos_drm.h>
-#include <drm/drm_plane_helper.h>
+#include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/exynos_drm.h>
 #include "exynos_drm_drv.h"
 #include "exynos_drm_crtc.h"
 #include "exynos_drm_fb.h"
@@ -57,11 +58,12 @@ static int exynos_plane_get_size(int start, unsigned length, unsigned last)
 }
 
 static void exynos_plane_mode_set(struct exynos_drm_plane_state *exynos_state)
-
 {
        struct drm_plane_state *state = &exynos_state->base;
-       struct drm_crtc *crtc = exynos_state->base.crtc;
-       struct drm_display_mode *mode = &crtc->state->adjusted_mode;
+       struct drm_crtc *crtc = state->crtc;
+       struct drm_crtc_state *crtc_state =
+                       drm_atomic_get_existing_crtc_state(state->state, crtc);
+       struct drm_display_mode *mode = &crtc_state->adjusted_mode;
        int crtc_x, crtc_y;
        unsigned int crtc_w, crtc_h;
        unsigned int src_x, src_y;
index f18fbe4..404367a 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/io.h>
 #include <linux/platform_device.h>
 #include <linux/clk.h>
+#include <linux/of_device.h>
 #include <linux/pm_runtime.h>
 
 #include <drm/drmP.h>
@@ -696,7 +697,6 @@ static int rotator_probe(struct platform_device *pdev)
        struct device *dev = &pdev->dev;
        struct rot_context *rot;
        struct exynos_drm_ippdrv *ippdrv;
-       const struct of_device_id *match;
        int ret;
 
        if (!dev->of_node) {
@@ -708,13 +708,8 @@ static int rotator_probe(struct platform_device *pdev)
        if (!rot)
                return -ENOMEM;
 
-       match = of_match_node(exynos_rotator_match, dev->of_node);
-       if (!match) {
-               dev_err(dev, "failed to match node\n");
-               return -ENODEV;
-       }
-       rot->limit_tbl = (struct rot_limit_table *)match->data;
-
+       rot->limit_tbl = (struct rot_limit_table *)
+                               of_device_get_match_data(dev);
        rot->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        rot->regs = devm_ioremap_resource(dev, rot->regs_res);
        if (IS_ERR(rot->regs))
index e148d72..0f87acb 100644 (file)
@@ -7,9 +7,9 @@
  *
  * Based on drivers/media/video/s5p-tv/hdmi_drv.c
  *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
  * option) any later version.
  *
  */
 
 /* AVI header and aspect ratio */
 #define HDMI_AVI_VERSION               0x02
-#define HDMI_AVI_LENGTH                0x0D
+#define HDMI_AVI_LENGTH                        0x0d
 
 /* AUI header info */
-#define HDMI_AUI_VERSION       0x01
-#define HDMI_AUI_LENGTH        0x0A
-#define AVI_SAME_AS_PIC_ASPECT_RATIO 0x8
-#define AVI_4_3_CENTER_RATIO   0x9
-#define AVI_16_9_CENTER_RATIO  0xa
+#define HDMI_AUI_VERSION               0x01
+#define HDMI_AUI_LENGTH                        0x0a
+
+/* AVI active format aspect ratio */
+#define AVI_SAME_AS_PIC_ASPECT_RATIO   0x08
+#define AVI_4_3_CENTER_RATIO           0x09
+#define AVI_16_9_CENTER_RATIO          0x0a
 
 enum hdmi_type {
        HDMI_TYPE13,
@@ -90,11 +92,34 @@ static const char * const supply[] = {
        "vdd_pll",
 };
 
+struct hdmiphy_config {
+       int pixel_clock;
+       u8 conf[32];
+};
+
+struct hdmiphy_configs {
+       int count;
+       const struct hdmiphy_config *data;
+};
+
+struct string_array_spec {
+       int count;
+       const char * const *data;
+};
+
+#define INIT_ARRAY_SPEC(a) { .count = ARRAY_SIZE(a), .data = a }
+
 struct hdmi_driver_data {
        unsigned int type;
-       const struct hdmiphy_config *phy_confs;
-       unsigned int phy_conf_count;
        unsigned int is_apb_phy:1;
+       unsigned int has_sysreg:1;
+       struct hdmiphy_configs phy_confs;
+       struct string_array_spec clk_gates;
+       /*
+        * Array of triplets (p_off, p_on, clock), where p_off and p_on are
+        * required parents of clock when HDMI-PHY is respectively off or on.
+        */
+       struct string_array_spec clk_muxes;
 };
 
 struct hdmi_context {
@@ -116,11 +141,9 @@ struct hdmi_context {
        struct gpio_desc                *hpd_gpio;
        int                             irq;
        struct regmap                   *pmureg;
-       struct clk                      *hdmi;
-       struct clk                      *sclk_hdmi;
-       struct clk                      *sclk_pixel;
-       struct clk                      *sclk_hdmiphy;
-       struct clk                      *mout_hdmi;
+       struct regmap                   *sysreg;
+       struct clk                      **clk_gates;
+       struct clk                      **clk_muxes;
        struct regulator_bulk_data      regul_bulk[ARRAY_SIZE(supply)];
        struct regulator                *reg_hdmi_en;
 };
@@ -135,12 +158,6 @@ static inline struct hdmi_context *connector_to_hdmi(struct drm_connector *c)
        return container_of(c, struct hdmi_context, connector);
 }
 
-struct hdmiphy_config {
-       int pixel_clock;
-       u8 conf[32];
-};
-
-/* list of phy config settings */
 static const struct hdmiphy_config hdmiphy_v13_configs[] = {
        {
                .pixel_clock = 27000000,
@@ -501,25 +518,136 @@ static const struct hdmiphy_config hdmiphy_5420_configs[] = {
        },
 };
 
-static struct hdmi_driver_data exynos5420_hdmi_driver_data = {
+static const struct hdmiphy_config hdmiphy_5433_configs[] = {
+       {
+               .pixel_clock = 27000000,
+               .conf = {
+                       0x01, 0x51, 0x22, 0x51, 0x08, 0xfc, 0x88, 0x46,
+                       0x72, 0x50, 0x24, 0x0c, 0x24, 0x0f, 0x7c, 0xa5,
+                       0xd4, 0x2b, 0x87, 0x00, 0x00, 0x04, 0x00, 0x30,
+                       0x08, 0x10, 0x01, 0x01, 0x48, 0x40, 0x00, 0x40,
+               },
+       },
+       {
+               .pixel_clock = 27027000,
+               .conf = {
+                       0x01, 0x51, 0x2d, 0x72, 0x64, 0x09, 0x88, 0xc3,
+                       0x71, 0x50, 0x24, 0x14, 0x24, 0x0f, 0x7c, 0xa5,
+                       0xd4, 0x2b, 0x87, 0x00, 0x00, 0x04, 0x00, 0x30,
+                       0x28, 0x10, 0x01, 0x01, 0x48, 0x40, 0x00, 0x40,
+               },
+       },
+       {
+               .pixel_clock = 40000000,
+               .conf = {
+                       0x01, 0x51, 0x32, 0x55, 0x01, 0x00, 0x88, 0x02,
+                       0x4d, 0x50, 0x44, 0x8C, 0x27, 0x00, 0x7C, 0xAC,
+                       0xD6, 0x2B, 0x67, 0x00, 0x00, 0x04, 0x00, 0x30,
+                       0x08, 0x10, 0x01, 0x01, 0x48, 0x40, 0x00, 0x40,
+               },
+       },
+       {
+               .pixel_clock = 50000000,
+               .conf = {
+                       0x01, 0x51, 0x34, 0x40, 0x64, 0x09, 0x88, 0xc3,
+                       0x3d, 0x50, 0x44, 0x8C, 0x27, 0x00, 0x7C, 0xAC,
+                       0xD6, 0x2B, 0x67, 0x00, 0x00, 0x04, 0x00, 0x30,
+                       0x08, 0x10, 0x01, 0x01, 0x48, 0x40, 0x00, 0x40,
+               },
+       },
+       {
+               .pixel_clock = 65000000,
+               .conf = {
+                       0x01, 0x51, 0x36, 0x31, 0x40, 0x10, 0x04, 0xc6,
+                       0x2e, 0xe8, 0x44, 0x8C, 0x27, 0x00, 0x7C, 0xAC,
+                       0xD6, 0x2B, 0x67, 0x00, 0x00, 0x04, 0x00, 0x30,
+                       0x08, 0x10, 0x01, 0x01, 0x48, 0x40, 0x00, 0x40,
+               },
+       },
+       {
+               .pixel_clock = 74176000,
+               .conf = {
+                       0x01, 0x51, 0x3E, 0x35, 0x5B, 0xDE, 0x88, 0x42,
+                       0x53, 0x51, 0x44, 0x8C, 0x27, 0x00, 0x7C, 0xAC,
+                       0xD6, 0x2B, 0x67, 0x00, 0x00, 0x04, 0x00, 0x30,
+                       0x08, 0x10, 0x01, 0x01, 0x48, 0x40, 0x00, 0x40,
+               },
+       },
+       {
+               .pixel_clock = 74250000,
+               .conf = {
+                       0x01, 0x51, 0x3E, 0x35, 0x40, 0xF0, 0x88, 0xC2,
+                       0x52, 0x51, 0x44, 0x8C, 0x27, 0x00, 0x7C, 0xAC,
+                       0xD6, 0x2B, 0x67, 0x00, 0x00, 0x04, 0x00, 0x30,
+                       0x08, 0x10, 0x01, 0x01, 0x48, 0x40, 0x00, 0x40,
+               },
+       },
+       {
+               .pixel_clock = 108000000,
+               .conf = {
+                       0x01, 0x51, 0x2d, 0x15, 0x01, 0x00, 0x88, 0x02,
+                       0x72, 0x52, 0x44, 0x8C, 0x27, 0x00, 0x7C, 0xAC,
+                       0xD6, 0x2B, 0x67, 0x00, 0x00, 0x04, 0x00, 0x30,
+                       0x08, 0x10, 0x01, 0x01, 0x48, 0x40, 0x00, 0x40,
+               },
+       },
+       {
+               .pixel_clock = 148500000,
+               .conf = {
+                       0x01, 0x51, 0x1f, 0x00, 0x40, 0xf8, 0x88, 0xc1,
+                       0x52, 0x52, 0x24, 0x0c, 0x24, 0x0f, 0x7c, 0xa5,
+                       0xd4, 0x2b, 0x87, 0x00, 0x00, 0x04, 0x00, 0x30,
+                       0x08, 0x10, 0x01, 0x01, 0x48, 0x4a, 0x00, 0x40,
+               },
+       },
+};
+
+static const char * const hdmi_clk_gates4[] = {
+       "hdmi", "sclk_hdmi"
+};
+
+static const char * const hdmi_clk_muxes4[] = {
+       "sclk_pixel", "sclk_hdmiphy", "mout_hdmi"
+};
+
+static const char * const hdmi_clk_gates5433[] = {
+       "hdmi_pclk", "hdmi_i_pclk", "i_tmds_clk", "i_pixel_clk", "i_spdif_clk"
+};
+
+static const char * const hdmi_clk_muxes5433[] = {
+       "oscclk", "tmds_clko", "tmds_clko_user",
+       "oscclk", "pixel_clko", "pixel_clko_user"
+};
+
+static const struct hdmi_driver_data exynos4210_hdmi_driver_data = {
+       .type           = HDMI_TYPE13,
+       .phy_confs      = INIT_ARRAY_SPEC(hdmiphy_v13_configs),
+       .clk_gates      = INIT_ARRAY_SPEC(hdmi_clk_gates4),
+       .clk_muxes      = INIT_ARRAY_SPEC(hdmi_clk_muxes4),
+};
+
+static const struct hdmi_driver_data exynos4212_hdmi_driver_data = {
        .type           = HDMI_TYPE14,
-       .phy_confs      = hdmiphy_5420_configs,
-       .phy_conf_count = ARRAY_SIZE(hdmiphy_5420_configs),
-       .is_apb_phy     = 1,
+       .phy_confs      = INIT_ARRAY_SPEC(hdmiphy_v14_configs),
+       .clk_gates      = INIT_ARRAY_SPEC(hdmi_clk_gates4),
+       .clk_muxes      = INIT_ARRAY_SPEC(hdmi_clk_muxes4),
 };
 
-static struct hdmi_driver_data exynos4212_hdmi_driver_data = {
+static const struct hdmi_driver_data exynos5420_hdmi_driver_data = {
        .type           = HDMI_TYPE14,
-       .phy_confs      = hdmiphy_v14_configs,
-       .phy_conf_count = ARRAY_SIZE(hdmiphy_v14_configs),
-       .is_apb_phy     = 0,
+       .is_apb_phy     = 1,
+       .phy_confs      = INIT_ARRAY_SPEC(hdmiphy_5420_configs),
+       .clk_gates      = INIT_ARRAY_SPEC(hdmi_clk_gates4),
+       .clk_muxes      = INIT_ARRAY_SPEC(hdmi_clk_muxes4),
 };
 
-static struct hdmi_driver_data exynos4210_hdmi_driver_data = {
-       .type           = HDMI_TYPE13,
-       .phy_confs      = hdmiphy_v13_configs,
-       .phy_conf_count = ARRAY_SIZE(hdmiphy_v13_configs),
-       .is_apb_phy     = 0,
+static const struct hdmi_driver_data exynos5433_hdmi_driver_data = {
+       .type           = HDMI_TYPE14,
+       .is_apb_phy     = 1,
+       .has_sysreg     = 1,
+       .phy_confs      = INIT_ARRAY_SPEC(hdmiphy_5433_configs),
+       .clk_gates      = INIT_ARRAY_SPEC(hdmi_clk_gates5433),
+       .clk_muxes      = INIT_ARRAY_SPEC(hdmi_clk_muxes5433),
 };
 
 static inline u32 hdmi_map_reg(struct hdmi_context *hdata, u32 reg_id)
@@ -585,266 +713,52 @@ static int hdmiphy_reg_write_buf(struct hdmi_context *hdata,
        }
 }
 
-static void hdmi_v13_regs_dump(struct hdmi_context *hdata, char *prefix)
+static int hdmi_clk_enable_gates(struct hdmi_context *hdata)
 {
-#define DUMPREG(reg_id) \
-       DRM_DEBUG_KMS("%s:" #reg_id " = %08x\n", prefix, \
-       readl(hdata->regs + reg_id))
-       DRM_DEBUG_KMS("%s: ---- CONTROL REGISTERS ----\n", prefix);
-       DUMPREG(HDMI_INTC_FLAG);
-       DUMPREG(HDMI_INTC_CON);
-       DUMPREG(HDMI_HPD_STATUS);
-       DUMPREG(HDMI_V13_PHY_RSTOUT);
-       DUMPREG(HDMI_V13_PHY_VPLL);
-       DUMPREG(HDMI_V13_PHY_CMU);
-       DUMPREG(HDMI_V13_CORE_RSTOUT);
-
-       DRM_DEBUG_KMS("%s: ---- CORE REGISTERS ----\n", prefix);
-       DUMPREG(HDMI_CON_0);
-       DUMPREG(HDMI_CON_1);
-       DUMPREG(HDMI_CON_2);
-       DUMPREG(HDMI_SYS_STATUS);
-       DUMPREG(HDMI_V13_PHY_STATUS);
-       DUMPREG(HDMI_STATUS_EN);
-       DUMPREG(HDMI_HPD);
-       DUMPREG(HDMI_MODE_SEL);
-       DUMPREG(HDMI_V13_HPD_GEN);
-       DUMPREG(HDMI_V13_DC_CONTROL);
-       DUMPREG(HDMI_V13_VIDEO_PATTERN_GEN);
-
-       DRM_DEBUG_KMS("%s: ---- CORE SYNC REGISTERS ----\n", prefix);
-       DUMPREG(HDMI_H_BLANK_0);
-       DUMPREG(HDMI_H_BLANK_1);
-       DUMPREG(HDMI_V13_V_BLANK_0);
-       DUMPREG(HDMI_V13_V_BLANK_1);
-       DUMPREG(HDMI_V13_V_BLANK_2);
-       DUMPREG(HDMI_V13_H_V_LINE_0);
-       DUMPREG(HDMI_V13_H_V_LINE_1);
-       DUMPREG(HDMI_V13_H_V_LINE_2);
-       DUMPREG(HDMI_VSYNC_POL);
-       DUMPREG(HDMI_INT_PRO_MODE);
-       DUMPREG(HDMI_V13_V_BLANK_F_0);
-       DUMPREG(HDMI_V13_V_BLANK_F_1);
-       DUMPREG(HDMI_V13_V_BLANK_F_2);
-       DUMPREG(HDMI_V13_H_SYNC_GEN_0);
-       DUMPREG(HDMI_V13_H_SYNC_GEN_1);
-       DUMPREG(HDMI_V13_H_SYNC_GEN_2);
-       DUMPREG(HDMI_V13_V_SYNC_GEN_1_0);
-       DUMPREG(HDMI_V13_V_SYNC_GEN_1_1);
-       DUMPREG(HDMI_V13_V_SYNC_GEN_1_2);
-       DUMPREG(HDMI_V13_V_SYNC_GEN_2_0);
-       DUMPREG(HDMI_V13_V_SYNC_GEN_2_1);
-       DUMPREG(HDMI_V13_V_SYNC_GEN_2_2);
-       DUMPREG(HDMI_V13_V_SYNC_GEN_3_0);
-       DUMPREG(HDMI_V13_V_SYNC_GEN_3_1);
-       DUMPREG(HDMI_V13_V_SYNC_GEN_3_2);
-
-       DRM_DEBUG_KMS("%s: ---- TG REGISTERS ----\n", prefix);
-       DUMPREG(HDMI_TG_CMD);
-       DUMPREG(HDMI_TG_H_FSZ_L);
-       DUMPREG(HDMI_TG_H_FSZ_H);
-       DUMPREG(HDMI_TG_HACT_ST_L);
-       DUMPREG(HDMI_TG_HACT_ST_H);
-       DUMPREG(HDMI_TG_HACT_SZ_L);
-       DUMPREG(HDMI_TG_HACT_SZ_H);
-       DUMPREG(HDMI_TG_V_FSZ_L);
-       DUMPREG(HDMI_TG_V_FSZ_H);
-       DUMPREG(HDMI_TG_VSYNC_L);
-       DUMPREG(HDMI_TG_VSYNC_H);
-       DUMPREG(HDMI_TG_VSYNC2_L);
-       DUMPREG(HDMI_TG_VSYNC2_H);
-       DUMPREG(HDMI_TG_VACT_ST_L);
-       DUMPREG(HDMI_TG_VACT_ST_H);
-       DUMPREG(HDMI_TG_VACT_SZ_L);
-       DUMPREG(HDMI_TG_VACT_SZ_H);
-       DUMPREG(HDMI_TG_FIELD_CHG_L);
-       DUMPREG(HDMI_TG_FIELD_CHG_H);
-       DUMPREG(HDMI_TG_VACT_ST2_L);
-       DUMPREG(HDMI_TG_VACT_ST2_H);
-       DUMPREG(HDMI_TG_VSYNC_TOP_HDMI_L);
-       DUMPREG(HDMI_TG_VSYNC_TOP_HDMI_H);
-       DUMPREG(HDMI_TG_VSYNC_BOT_HDMI_L);
-       DUMPREG(HDMI_TG_VSYNC_BOT_HDMI_H);
-       DUMPREG(HDMI_TG_FIELD_TOP_HDMI_L);
-       DUMPREG(HDMI_TG_FIELD_TOP_HDMI_H);
-       DUMPREG(HDMI_TG_FIELD_BOT_HDMI_L);
-       DUMPREG(HDMI_TG_FIELD_BOT_HDMI_H);
-#undef DUMPREG
+       int i, ret;
+
+       for (i = 0; i < hdata->drv_data->clk_gates.count; ++i) {
+               ret = clk_prepare_enable(hdata->clk_gates[i]);
+               if (!ret)
+                       continue;
+
+               dev_err(hdata->dev, "Cannot enable clock '%s', %d\n",
+                       hdata->drv_data->clk_gates.data[i], ret);
+               while (i--)
+                       clk_disable_unprepare(hdata->clk_gates[i]);
+               return ret;
+       }
+
+       return 0;
 }
 
-static void hdmi_v14_regs_dump(struct hdmi_context *hdata, char *prefix)
+static void hdmi_clk_disable_gates(struct hdmi_context *hdata)
 {
-       int i;
+       int i = hdata->drv_data->clk_gates.count;
 
-#define DUMPREG(reg_id) \
-       DRM_DEBUG_KMS("%s:" #reg_id " = %08x\n", prefix, \
-       readl(hdata->regs + reg_id))
-
-       DRM_DEBUG_KMS("%s: ---- CONTROL REGISTERS ----\n", prefix);
-       DUMPREG(HDMI_INTC_CON);
-       DUMPREG(HDMI_INTC_FLAG);
-       DUMPREG(HDMI_HPD_STATUS);
-       DUMPREG(HDMI_INTC_CON_1);
-       DUMPREG(HDMI_INTC_FLAG_1);
-       DUMPREG(HDMI_PHY_STATUS_0);
-       DUMPREG(HDMI_PHY_STATUS_PLL);
-       DUMPREG(HDMI_PHY_CON_0);
-       DUMPREG(HDMI_V14_PHY_RSTOUT);
-       DUMPREG(HDMI_PHY_VPLL);
-       DUMPREG(HDMI_PHY_CMU);
-       DUMPREG(HDMI_CORE_RSTOUT);
-
-       DRM_DEBUG_KMS("%s: ---- CORE REGISTERS ----\n", prefix);
-       DUMPREG(HDMI_CON_0);
-       DUMPREG(HDMI_CON_1);
-       DUMPREG(HDMI_CON_2);
-       DUMPREG(HDMI_SYS_STATUS);
-       DUMPREG(HDMI_PHY_STATUS_0);
-       DUMPREG(HDMI_STATUS_EN);
-       DUMPREG(HDMI_HPD);
-       DUMPREG(HDMI_MODE_SEL);
-       DUMPREG(HDMI_ENC_EN);
-       DUMPREG(HDMI_DC_CONTROL);
-       DUMPREG(HDMI_VIDEO_PATTERN_GEN);
-
-       DRM_DEBUG_KMS("%s: ---- CORE SYNC REGISTERS ----\n", prefix);
-       DUMPREG(HDMI_H_BLANK_0);
-       DUMPREG(HDMI_H_BLANK_1);
-       DUMPREG(HDMI_V2_BLANK_0);
-       DUMPREG(HDMI_V2_BLANK_1);
-       DUMPREG(HDMI_V1_BLANK_0);
-       DUMPREG(HDMI_V1_BLANK_1);
-       DUMPREG(HDMI_V_LINE_0);
-       DUMPREG(HDMI_V_LINE_1);
-       DUMPREG(HDMI_H_LINE_0);
-       DUMPREG(HDMI_H_LINE_1);
-       DUMPREG(HDMI_HSYNC_POL);
-
-       DUMPREG(HDMI_VSYNC_POL);
-       DUMPREG(HDMI_INT_PRO_MODE);
-       DUMPREG(HDMI_V_BLANK_F0_0);
-       DUMPREG(HDMI_V_BLANK_F0_1);
-       DUMPREG(HDMI_V_BLANK_F1_0);
-       DUMPREG(HDMI_V_BLANK_F1_1);
-
-       DUMPREG(HDMI_H_SYNC_START_0);
-       DUMPREG(HDMI_H_SYNC_START_1);
-       DUMPREG(HDMI_H_SYNC_END_0);
-       DUMPREG(HDMI_H_SYNC_END_1);
-
-       DUMPREG(HDMI_V_SYNC_LINE_BEF_2_0);
-       DUMPREG(HDMI_V_SYNC_LINE_BEF_2_1);
-       DUMPREG(HDMI_V_SYNC_LINE_BEF_1_0);
-       DUMPREG(HDMI_V_SYNC_LINE_BEF_1_1);
-
-       DUMPREG(HDMI_V_SYNC_LINE_AFT_2_0);
-       DUMPREG(HDMI_V_SYNC_LINE_AFT_2_1);
-       DUMPREG(HDMI_V_SYNC_LINE_AFT_1_0);
-       DUMPREG(HDMI_V_SYNC_LINE_AFT_1_1);
-
-       DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_2_0);
-       DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_2_1);
-       DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_1_0);
-       DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_1_1);
-
-       DUMPREG(HDMI_V_BLANK_F2_0);
-       DUMPREG(HDMI_V_BLANK_F2_1);
-       DUMPREG(HDMI_V_BLANK_F3_0);
-       DUMPREG(HDMI_V_BLANK_F3_1);
-       DUMPREG(HDMI_V_BLANK_F4_0);
-       DUMPREG(HDMI_V_BLANK_F4_1);
-       DUMPREG(HDMI_V_BLANK_F5_0);
-       DUMPREG(HDMI_V_BLANK_F5_1);
-
-       DUMPREG(HDMI_V_SYNC_LINE_AFT_3_0);
-       DUMPREG(HDMI_V_SYNC_LINE_AFT_3_1);
-       DUMPREG(HDMI_V_SYNC_LINE_AFT_4_0);
-       DUMPREG(HDMI_V_SYNC_LINE_AFT_4_1);
-       DUMPREG(HDMI_V_SYNC_LINE_AFT_5_0);
-       DUMPREG(HDMI_V_SYNC_LINE_AFT_5_1);
-       DUMPREG(HDMI_V_SYNC_LINE_AFT_6_0);
-       DUMPREG(HDMI_V_SYNC_LINE_AFT_6_1);
-
-       DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_3_0);
-       DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_3_1);
-       DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_4_0);
-       DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_4_1);
-       DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_5_0);
-       DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_5_1);
-       DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_6_0);
-       DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_6_1);
-
-       DUMPREG(HDMI_VACT_SPACE_1_0);
-       DUMPREG(HDMI_VACT_SPACE_1_1);
-       DUMPREG(HDMI_VACT_SPACE_2_0);
-       DUMPREG(HDMI_VACT_SPACE_2_1);
-       DUMPREG(HDMI_VACT_SPACE_3_0);
-       DUMPREG(HDMI_VACT_SPACE_3_1);
-       DUMPREG(HDMI_VACT_SPACE_4_0);
-       DUMPREG(HDMI_VACT_SPACE_4_1);
-       DUMPREG(HDMI_VACT_SPACE_5_0);
-       DUMPREG(HDMI_VACT_SPACE_5_1);
-       DUMPREG(HDMI_VACT_SPACE_6_0);
-       DUMPREG(HDMI_VACT_SPACE_6_1);
-
-       DRM_DEBUG_KMS("%s: ---- TG REGISTERS ----\n", prefix);
-       DUMPREG(HDMI_TG_CMD);
-       DUMPREG(HDMI_TG_H_FSZ_L);
-       DUMPREG(HDMI_TG_H_FSZ_H);
-       DUMPREG(HDMI_TG_HACT_ST_L);
-       DUMPREG(HDMI_TG_HACT_ST_H);
-       DUMPREG(HDMI_TG_HACT_SZ_L);
-       DUMPREG(HDMI_TG_HACT_SZ_H);
-       DUMPREG(HDMI_TG_V_FSZ_L);
-       DUMPREG(HDMI_TG_V_FSZ_H);
-       DUMPREG(HDMI_TG_VSYNC_L);
-       DUMPREG(HDMI_TG_VSYNC_H);
-       DUMPREG(HDMI_TG_VSYNC2_L);
-       DUMPREG(HDMI_TG_VSYNC2_H);
-       DUMPREG(HDMI_TG_VACT_ST_L);
-       DUMPREG(HDMI_TG_VACT_ST_H);
-       DUMPREG(HDMI_TG_VACT_SZ_L);
-       DUMPREG(HDMI_TG_VACT_SZ_H);
-       DUMPREG(HDMI_TG_FIELD_CHG_L);
-       DUMPREG(HDMI_TG_FIELD_CHG_H);
-       DUMPREG(HDMI_TG_VACT_ST2_L);
-       DUMPREG(HDMI_TG_VACT_ST2_H);
-       DUMPREG(HDMI_TG_VACT_ST3_L);
-       DUMPREG(HDMI_TG_VACT_ST3_H);
-       DUMPREG(HDMI_TG_VACT_ST4_L);
-       DUMPREG(HDMI_TG_VACT_ST4_H);
-       DUMPREG(HDMI_TG_VSYNC_TOP_HDMI_L);
-       DUMPREG(HDMI_TG_VSYNC_TOP_HDMI_H);
-       DUMPREG(HDMI_TG_VSYNC_BOT_HDMI_L);
-       DUMPREG(HDMI_TG_VSYNC_BOT_HDMI_H);
-       DUMPREG(HDMI_TG_FIELD_TOP_HDMI_L);
-       DUMPREG(HDMI_TG_FIELD_TOP_HDMI_H);
-       DUMPREG(HDMI_TG_FIELD_BOT_HDMI_L);
-       DUMPREG(HDMI_TG_FIELD_BOT_HDMI_H);
-       DUMPREG(HDMI_TG_3D);
-
-       DRM_DEBUG_KMS("%s: ---- PACKET REGISTERS ----\n", prefix);
-       DUMPREG(HDMI_AVI_CON);
-       DUMPREG(HDMI_AVI_HEADER0);
-       DUMPREG(HDMI_AVI_HEADER1);
-       DUMPREG(HDMI_AVI_HEADER2);
-       DUMPREG(HDMI_AVI_CHECK_SUM);
-       DUMPREG(HDMI_VSI_CON);
-       DUMPREG(HDMI_VSI_HEADER0);
-       DUMPREG(HDMI_VSI_HEADER1);
-       DUMPREG(HDMI_VSI_HEADER2);
-       for (i = 0; i < 7; ++i)
-               DUMPREG(HDMI_VSI_DATA(i));
-
-#undef DUMPREG
+       while (i--)
+               clk_disable_unprepare(hdata->clk_gates[i]);
 }
 
-static void hdmi_regs_dump(struct hdmi_context *hdata, char *prefix)
+static int hdmi_clk_set_parents(struct hdmi_context *hdata, bool to_phy)
 {
-       if (hdata->drv_data->type == HDMI_TYPE13)
-               hdmi_v13_regs_dump(hdata, prefix);
-       else
-               hdmi_v14_regs_dump(hdata, prefix);
+       struct device *dev = hdata->dev;
+       int ret = 0;
+       int i;
+
+       for (i = 0; i < hdata->drv_data->clk_muxes.count; i += 3) {
+               struct clk **c = &hdata->clk_muxes[i];
+
+               ret = clk_set_parent(c[2], c[to_phy]);
+               if (!ret)
+                       continue;
+
+               dev_err(dev, "Cannot set clock parent of '%s' to '%s', %d\n",
+                       hdata->drv_data->clk_muxes.data[i + 2],
+                       hdata->drv_data->clk_muxes.data[i + to_phy], ret);
+       }
+
+       return ret;
 }
 
 static u8 hdmi_chksum(struct hdmi_context *hdata,
@@ -993,10 +907,11 @@ static int hdmi_get_modes(struct drm_connector *connector)
 
 static int hdmi_find_phy_conf(struct hdmi_context *hdata, u32 pixel_clock)
 {
+       const struct hdmiphy_configs *confs = &hdata->drv_data->phy_confs;
        int i;
 
-       for (i = 0; i < hdata->drv_data->phy_conf_count; i++)
-               if (hdata->drv_data->phy_confs[i].pixel_clock == pixel_clock)
+       for (i = 0; i < confs->count; i++)
+               if (confs->data[i].pixel_clock == pixel_clock)
                        return i;
 
        DRM_DEBUG_KMS("Could not find phy config for %d\n", pixel_clock);
@@ -1078,13 +993,11 @@ static bool hdmi_mode_fixup(struct drm_encoder *encoder,
 
        mode_ok = hdmi_mode_valid(connector, adjusted_mode);
 
-       /* just return if user desired mode exists. */
        if (mode_ok == MODE_OK)
                return true;
 
        /*
-        * otherwise, find the most suitable mode among modes and change it
-        * to adjusted_mode.
+        * Find the most suitable mode and copy it to adjusted_mode.
         */
        list_for_each_entry(m, &connector->modes, head) {
                mode_ok = hdmi_mode_valid(connector, m);
@@ -1129,15 +1042,15 @@ static void hdmi_audio_init(struct hdmi_context *hdata)
        switch (bits_per_sample) {
        case 20:
                data_num = 2;
-               bit_ch  = 1;
+               bit_ch = 1;
                break;
        case 24:
                data_num = 3;
-               bit_ch  = 1;
+               bit_ch = 1;
                break;
        default:
                data_num = 1;
-               bit_ch  = 0;
+               bit_ch = 0;
                break;
        }
 
@@ -1230,13 +1143,12 @@ static void hdmi_conf_init(struct hdmi_context *hdata)
        /* choose HDMI mode */
        hdmi_reg_writemask(hdata, HDMI_MODE_SEL,
                HDMI_MODE_HDMI_EN, HDMI_MODE_MASK);
-       /* Apply Video preable and Guard band in HDMI mode only */
+       /* apply video pre-amble and guard band in HDMI mode only */
        hdmi_reg_writeb(hdata, HDMI_CON_2, 0);
        /* disable bluescreen */
        hdmi_reg_writemask(hdata, HDMI_CON_0, 0, HDMI_BLUE_SCR_EN);
 
        if (hdata->dvi_mode) {
-               /* choose DVI mode */
                hdmi_reg_writemask(hdata, HDMI_MODE_SEL,
                                HDMI_MODE_DVI_EN, HDMI_MODE_MASK);
                hdmi_reg_writeb(hdata, HDMI_CON_2,
@@ -1308,7 +1220,7 @@ static void hdmi_v13_mode_apply(struct hdmi_context *hdata)
 
        val = (m->hsync_start - m->hdisplay - 2);
        val |= ((m->hsync_end - m->hdisplay - 2) << 10);
-       val |= ((m->flags & DRM_MODE_FLAG_NHSYNC)  ? 1 : 0)<<20;
+       val |= ((m->flags & DRM_MODE_FLAG_NHSYNC) ? 1 : 0)<<20;
        hdmi_reg_writev(hdata, HDMI_V13_H_SYNC_GEN_0, 3, val);
 
        /*
@@ -1319,7 +1231,6 @@ static void hdmi_v13_mode_apply(struct hdmi_context *hdata)
 
        /* Following values & calculations differ for different type of modes */
        if (m->flags & DRM_MODE_FLAG_INTERLACE) {
-               /* Interlaced Mode */
                val = ((m->vsync_end - m->vdisplay) / 2);
                val |= ((m->vsync_start - m->vdisplay) / 2) << 12;
                hdmi_reg_writev(hdata, HDMI_V13_V_SYNC_GEN_1_0, 3, val);
@@ -1348,8 +1259,6 @@ static void hdmi_v13_mode_apply(struct hdmi_context *hdata)
 
                hdmi_reg_writev(hdata, HDMI_TG_VACT_ST2_L, 2, 0x249);
        } else {
-               /* Progressive Mode */
-
                val = m->vtotal;
                val |= (m->vtotal - m->vdisplay) << 11;
                hdmi_reg_writev(hdata, HDMI_V13_V_BLANK_0, 3, val);
@@ -1365,21 +1274,12 @@ static void hdmi_v13_mode_apply(struct hdmi_context *hdata)
                hdmi_reg_writev(hdata, HDMI_TG_VACT_ST_L, 2,
                                m->vtotal - m->vdisplay);
                hdmi_reg_writev(hdata, HDMI_TG_VACT_SZ_L, 2, m->vdisplay);
-               hdmi_reg_writev(hdata, HDMI_TG_VACT_ST2_L, 2, 0x248);
        }
 
-       /* Timing generator registers */
        hdmi_reg_writev(hdata, HDMI_TG_H_FSZ_L, 2, m->htotal);
        hdmi_reg_writev(hdata, HDMI_TG_HACT_ST_L, 2, m->htotal - m->hdisplay);
        hdmi_reg_writev(hdata, HDMI_TG_HACT_SZ_L, 2, m->hdisplay);
        hdmi_reg_writev(hdata, HDMI_TG_V_FSZ_L, 2, m->vtotal);
-       hdmi_reg_writev(hdata, HDMI_TG_VSYNC_L, 2, 0x1);
-       hdmi_reg_writev(hdata, HDMI_TG_VSYNC2_L, 2, 0x233);
-       hdmi_reg_writev(hdata, HDMI_TG_FIELD_CHG_L, 2, 0x233);
-       hdmi_reg_writev(hdata, HDMI_TG_VSYNC_TOP_HDMI_L, 2, 0x1);
-       hdmi_reg_writev(hdata, HDMI_TG_VSYNC_BOT_HDMI_L, 2, 0x233);
-       hdmi_reg_writev(hdata, HDMI_TG_FIELD_TOP_HDMI_L, 2, 0x1);
-       hdmi_reg_writev(hdata, HDMI_TG_FIELD_BOT_HDMI_L, 2, 0x233);
 }
 
 static void hdmi_v14_mode_apply(struct hdmi_context *hdata)
@@ -1390,7 +1290,7 @@ static void hdmi_v14_mode_apply(struct hdmi_context *hdata)
        hdmi_reg_writev(hdata, HDMI_V_LINE_0, 2, m->vtotal);
        hdmi_reg_writev(hdata, HDMI_H_LINE_0, 2, m->htotal);
        hdmi_reg_writev(hdata, HDMI_HSYNC_POL, 1,
-                       (m->flags & DRM_MODE_FLAG_NHSYNC)  ? 1 : 0);
+                       (m->flags & DRM_MODE_FLAG_NHSYNC) ? 1 : 0);
        hdmi_reg_writev(hdata, HDMI_VSYNC_POL, 1,
                        (m->flags & DRM_MODE_FLAG_NVSYNC) ? 1 : 0);
        hdmi_reg_writev(hdata, HDMI_INT_PRO_MODE, 1,
@@ -1404,7 +1304,6 @@ static void hdmi_v14_mode_apply(struct hdmi_context *hdata)
 
        /* Following values & calculations differ for different type of modes */
        if (m->flags & DRM_MODE_FLAG_INTERLACE) {
-               /* Interlaced Mode */
                hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_BEF_2_0, 2,
                        (m->vsync_end - m->vdisplay) / 2);
                hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_BEF_1_0, 2,
@@ -1437,7 +1336,6 @@ static void hdmi_v14_mode_apply(struct hdmi_context *hdata)
                hdmi_reg_writev(hdata, HDMI_TG_VACT_ST3_L, 2, 0x0);
                hdmi_reg_writev(hdata, HDMI_TG_VACT_ST4_L, 2, 0x0);
        } else {
-               /* Progressive Mode */
                hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_BEF_2_0, 2,
                        m->vsync_end - m->vdisplay);
                hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_BEF_1_0, 2,
@@ -1454,15 +1352,8 @@ static void hdmi_v14_mode_apply(struct hdmi_context *hdata)
                hdmi_reg_writev(hdata, HDMI_TG_VACT_ST_L, 2,
                                m->vtotal - m->vdisplay);
                hdmi_reg_writev(hdata, HDMI_TG_VACT_SZ_L, 2, m->vdisplay);
-               hdmi_reg_writev(hdata, HDMI_TG_VACT_ST2_L, 2, 0x248);
-               hdmi_reg_writev(hdata, HDMI_TG_VACT_ST3_L, 2, 0x47b);
-               hdmi_reg_writev(hdata, HDMI_TG_VACT_ST4_L, 2, 0x6ae);
-               hdmi_reg_writev(hdata, HDMI_TG_VSYNC2_L, 2, 0x233);
-               hdmi_reg_writev(hdata, HDMI_TG_VSYNC_BOT_HDMI_L, 2, 0x233);
-               hdmi_reg_writev(hdata, HDMI_TG_FIELD_BOT_HDMI_L, 2, 0x233);
        }
 
-       /* Following values & calculations are same irrespective of mode type */
        hdmi_reg_writev(hdata, HDMI_H_SYNC_START_0, 2,
                        m->hsync_start - m->hdisplay - 2);
        hdmi_reg_writev(hdata, HDMI_H_SYNC_END_0, 2,
@@ -1486,16 +1377,12 @@ static void hdmi_v14_mode_apply(struct hdmi_context *hdata)
        hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_PXL_5_0, 2, 0xffff);
        hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_PXL_6_0, 2, 0xffff);
 
-       /* Timing generator registers */
        hdmi_reg_writev(hdata, HDMI_TG_H_FSZ_L, 2, m->htotal);
        hdmi_reg_writev(hdata, HDMI_TG_HACT_ST_L, 2, m->htotal - m->hdisplay);
        hdmi_reg_writev(hdata, HDMI_TG_HACT_SZ_L, 2, m->hdisplay);
        hdmi_reg_writev(hdata, HDMI_TG_V_FSZ_L, 2, m->vtotal);
-       hdmi_reg_writev(hdata, HDMI_TG_VSYNC_L, 2, 0x1);
-       hdmi_reg_writev(hdata, HDMI_TG_FIELD_CHG_L, 2, 0x233);
-       hdmi_reg_writev(hdata, HDMI_TG_VSYNC_TOP_HDMI_L, 2, 0x1);
-       hdmi_reg_writev(hdata, HDMI_TG_FIELD_TOP_HDMI_L, 2, 0x1);
-       hdmi_reg_writev(hdata, HDMI_TG_3D, 1, 0x0);
+       if (hdata->drv_data == &exynos5433_hdmi_driver_data)
+               hdmi_reg_writeb(hdata, HDMI_TG_DECON_EN, 1);
 }
 
 static void hdmi_mode_apply(struct hdmi_context *hdata)
@@ -1505,62 +1392,65 @@ static void hdmi_mode_apply(struct hdmi_context *hdata)
        else
                hdmi_v14_mode_apply(hdata);
 
-       hdmiphy_wait_for_pll(hdata);
-
-       clk_set_parent(hdata->mout_hdmi, hdata->sclk_hdmiphy);
-
-       /* enable HDMI and timing generator */
        hdmi_start(hdata, true);
 }
 
 static void hdmiphy_conf_reset(struct hdmi_context *hdata)
 {
-       clk_set_parent(hdata->mout_hdmi, hdata->sclk_pixel);
-
-       /* reset hdmiphy */
+       hdmi_reg_writemask(hdata, HDMI_CORE_RSTOUT, 0, 1);
+       usleep_range(10000, 12000);
+       hdmi_reg_writemask(hdata, HDMI_CORE_RSTOUT, ~0, 1);
+       usleep_range(10000, 12000);
        hdmi_reg_writemask(hdata, HDMI_PHY_RSTOUT, ~0, HDMI_PHY_SW_RSTOUT);
        usleep_range(10000, 12000);
-       hdmi_reg_writemask(hdata, HDMI_PHY_RSTOUT,  0, HDMI_PHY_SW_RSTOUT);
+       hdmi_reg_writemask(hdata, HDMI_PHY_RSTOUT, 0, HDMI_PHY_SW_RSTOUT);
        usleep_range(10000, 12000);
 }
 
+static void hdmiphy_enable_mode_set(struct hdmi_context *hdata, bool enable)
+{
+       u8 v = enable ? HDMI_PHY_ENABLE_MODE_SET : HDMI_PHY_DISABLE_MODE_SET;
+
+       if (hdata->drv_data == &exynos5433_hdmi_driver_data)
+               writel(v, hdata->regs_hdmiphy + HDMIPHY5433_MODE_SET_DONE);
+}
+
 static void hdmiphy_conf_apply(struct hdmi_context *hdata)
 {
        int ret;
-       int i;
+       const u8 *phy_conf;
 
-       /* pixel clock */
-       i = hdmi_find_phy_conf(hdata, hdata->current_mode.clock * 1000);
-       if (i < 0) {
+       ret = hdmi_find_phy_conf(hdata, hdata->current_mode.clock * 1000);
+       if (ret < 0) {
                DRM_ERROR("failed to find hdmiphy conf\n");
                return;
        }
+       phy_conf = hdata->drv_data->phy_confs.data[ret].conf;
+
+       hdmi_clk_set_parents(hdata, false);
+
+       hdmiphy_conf_reset(hdata);
 
-       ret = hdmiphy_reg_write_buf(hdata, 0,
-                       hdata->drv_data->phy_confs[i].conf, 32);
+       hdmiphy_enable_mode_set(hdata, true);
+       ret = hdmiphy_reg_write_buf(hdata, 0, phy_conf, 32);
        if (ret) {
                DRM_ERROR("failed to configure hdmiphy\n");
                return;
        }
-
+       hdmiphy_enable_mode_set(hdata, false);
+       hdmi_clk_set_parents(hdata, true);
        usleep_range(10000, 12000);
+       hdmiphy_wait_for_pll(hdata);
 }
 
 static void hdmi_conf_apply(struct hdmi_context *hdata)
 {
-       hdmiphy_conf_reset(hdata);
        hdmiphy_conf_apply(hdata);
-
        hdmi_start(hdata, false);
        hdmi_conf_init(hdata);
-
        hdmi_audio_init(hdata);
-
-       /* setting core registers */
        hdmi_mode_apply(hdata);
        hdmi_audio_control(hdata, true);
-
-       hdmi_regs_dump(hdata, "start");
 }
 
 static void hdmi_mode_set(struct drm_encoder *encoder,
@@ -1579,6 +1469,15 @@ static void hdmi_mode_set(struct drm_encoder *encoder,
        hdata->cea_video_id = drm_match_cea_mode(mode);
 }
 
+static void hdmi_set_refclk(struct hdmi_context *hdata, bool on)
+{
+       if (!hdata->sysreg)
+               return;
+
+       regmap_update_bits(hdata->sysreg, EXYNOS5433_SYSREG_DISP_HDMI_PHY,
+                          SYSREG_HDMI_REFCLK_INT_CLK, on ? ~0 : 0);
+}
+
 static void hdmi_enable(struct drm_encoder *encoder)
 {
        struct hdmi_context *hdata = encoder_to_hdmi(encoder);
@@ -1591,10 +1490,13 @@ static void hdmi_enable(struct drm_encoder *encoder)
        if (regulator_bulk_enable(ARRAY_SIZE(supply), hdata->regul_bulk))
                DRM_DEBUG_KMS("failed to enable regulator bulk\n");
 
-       /* set pmu hdmiphy control bit to enable hdmiphy */
        regmap_update_bits(hdata->pmureg, PMU_HDMI_PHY_CONTROL,
                        PMU_HDMI_PHY_ENABLE_BIT, 1);
 
+       hdmi_set_refclk(hdata, true);
+
+       hdmi_reg_writemask(hdata, HDMI_PHY_CON_0, 0, HDMI_PHY_POWER_OFF_EN);
+
        hdmi_conf_apply(hdata);
 
        hdata->powered = true;
@@ -1623,12 +1525,14 @@ static void hdmi_disable(struct drm_encoder *encoder)
        if (funcs && funcs->disable)
                (*funcs->disable)(crtc);
 
-       /* HDMI System Disable */
        hdmi_reg_writemask(hdata, HDMI_CON_0, 0, HDMI_EN);
 
        cancel_delayed_work(&hdata->hotplug_work);
 
-       /* reset pmu hdmiphy control bit to disable hdmiphy */
+       hdmi_reg_writemask(hdata, HDMI_PHY_CON_0, ~0, HDMI_PHY_POWER_OFF_EN);
+
+       hdmi_set_refclk(hdata, false);
+
        regmap_update_bits(hdata->pmureg, PMU_HDMI_PHY_CONTROL,
                        PMU_HDMI_PHY_ENABLE_BIT, 0);
 
@@ -1670,6 +1574,57 @@ static irqreturn_t hdmi_irq_thread(int irq, void *arg)
        return IRQ_HANDLED;
 }
 
+static int hdmi_clks_get(struct hdmi_context *hdata,
+                        const struct string_array_spec *names,
+                        struct clk **clks)
+{
+       struct device *dev = hdata->dev;
+       int i;
+
+       for (i = 0; i < names->count; ++i) {
+               struct clk *clk = devm_clk_get(dev, names->data[i]);
+
+               if (IS_ERR(clk)) {
+                       int ret = PTR_ERR(clk);
+
+                       dev_err(dev, "Cannot get clock %s, %d\n",
+                               names->data[i], ret);
+
+                       return ret;
+               }
+
+               clks[i] = clk;
+       }
+
+       return 0;
+}
+
+static int hdmi_clk_init(struct hdmi_context *hdata)
+{
+       const struct hdmi_driver_data *drv_data = hdata->drv_data;
+       int count = drv_data->clk_gates.count + drv_data->clk_muxes.count;
+       struct device *dev = hdata->dev;
+       struct clk **clks;
+       int ret;
+
+       if (!count)
+               return 0;
+
+       clks = devm_kzalloc(dev, sizeof(*clks) * count, GFP_KERNEL);
+       if (!clks)
+       return -ENOMEM;
+
+       hdata->clk_gates = clks;
+       hdata->clk_muxes = clks + drv_data->clk_gates.count;
+
+       ret = hdmi_clks_get(hdata, &drv_data->clk_gates, hdata->clk_gates);
+       if (ret)
+               return ret;
+
+       return hdmi_clks_get(hdata, &drv_data->clk_muxes, hdata->clk_muxes);
+}
+
+
 static int hdmi_resources_init(struct hdmi_context *hdata)
 {
        struct device *dev = hdata->dev;
@@ -1688,39 +1643,14 @@ static int hdmi_resources_init(struct hdmi_context *hdata)
                DRM_ERROR("failed to get GPIO irq\n");
                return  hdata->irq;
        }
-       /* get clocks, power */
-       hdata->hdmi = devm_clk_get(dev, "hdmi");
-       if (IS_ERR(hdata->hdmi)) {
-               DRM_ERROR("failed to get clock 'hdmi'\n");
-               ret = PTR_ERR(hdata->hdmi);
-               goto fail;
-       }
-       hdata->sclk_hdmi = devm_clk_get(dev, "sclk_hdmi");
-       if (IS_ERR(hdata->sclk_hdmi)) {
-               DRM_ERROR("failed to get clock 'sclk_hdmi'\n");
-               ret = PTR_ERR(hdata->sclk_hdmi);
-               goto fail;
-       }
-       hdata->sclk_pixel = devm_clk_get(dev, "sclk_pixel");
-       if (IS_ERR(hdata->sclk_pixel)) {
-               DRM_ERROR("failed to get clock 'sclk_pixel'\n");
-               ret = PTR_ERR(hdata->sclk_pixel);
-               goto fail;
-       }
-       hdata->sclk_hdmiphy = devm_clk_get(dev, "sclk_hdmiphy");
-       if (IS_ERR(hdata->sclk_hdmiphy)) {
-               DRM_ERROR("failed to get clock 'sclk_hdmiphy'\n");
-               ret = PTR_ERR(hdata->sclk_hdmiphy);
-               goto fail;
-       }
-       hdata->mout_hdmi = devm_clk_get(dev, "mout_hdmi");
-       if (IS_ERR(hdata->mout_hdmi)) {
-               DRM_ERROR("failed to get clock 'mout_hdmi'\n");
-               ret = PTR_ERR(hdata->mout_hdmi);
-               goto fail;
-       }
 
-       clk_set_parent(hdata->mout_hdmi, hdata->sclk_pixel);
+       ret = hdmi_clk_init(hdata);
+       if (ret)
+               return ret;
+
+       ret = hdmi_clk_set_parents(hdata, false);
+       if (ret)
+               return ret;
 
        for (i = 0; i < ARRAY_SIZE(supply); ++i) {
                hdata->regul_bulk[i].supply = supply[i];
@@ -1745,9 +1675,6 @@ static int hdmi_resources_init(struct hdmi_context *hdata)
                DRM_ERROR("failed to enable hdmi-en regulator\n");
 
        return ret;
-fail:
-       DRM_ERROR("HDMI resource init - failed\n");
-       return ret;
 }
 
 static struct of_device_id hdmi_match_types[] = {
@@ -1760,6 +1687,9 @@ static struct of_device_id hdmi_match_types[] = {
        }, {
                .compatible = "samsung,exynos5420-hdmi",
                .data = &exynos5420_hdmi_driver_data,
+       }, {
+               .compatible = "samsung,exynos5433-hdmi",
+               .data = &exynos5433_hdmi_driver_data,
        }, {
                /* end node */
        }
@@ -1830,7 +1760,6 @@ static struct device_node *hdmi_legacy_phy_dt_binding(struct device *dev)
 static int hdmi_probe(struct platform_device *pdev)
 {
        struct device_node *ddc_node, *phy_node;
-       const struct of_device_id *match;
        struct device *dev = &pdev->dev;
        struct hdmi_context *hdata;
        struct resource *res;
@@ -1840,11 +1769,7 @@ static int hdmi_probe(struct platform_device *pdev)
        if (!hdata)
                return -ENOMEM;
 
-       match = of_match_device(hdmi_match_types, dev);
-       if (!match)
-               return -ENODEV;
-
-       hdata->drv_data = match->data;
+       hdata->drv_data = of_device_get_match_data(dev);
 
        platform_set_drvdata(pdev, hdata);
 
@@ -1867,7 +1792,6 @@ static int hdmi_probe(struct platform_device *pdev)
        if (ddc_node)
                goto out_get_ddc_adpt;
 
-       /* DDC i2c driver */
        ddc_node = of_parse_phandle(dev->of_node, "ddc", 0);
        if (!ddc_node) {
                DRM_ERROR("Failed to find ddc node in device tree\n");
@@ -1885,7 +1809,6 @@ out_get_ddc_adpt:
        if (phy_node)
                goto out_get_phy_port;
 
-       /* hdmiphy i2c driver */
        phy_node = of_parse_phandle(dev->of_node, "phy", 0);
        if (!phy_node) {
                DRM_ERROR("Failed to find hdmiphy node in device tree\n");
@@ -1929,6 +1852,16 @@ out_get_phy_port:
                goto err_hdmiphy;
        }
 
+       if (hdata->drv_data->has_sysreg) {
+               hdata->sysreg = syscon_regmap_lookup_by_phandle(dev->of_node,
+                               "samsung,sysreg-phandle");
+               if (IS_ERR(hdata->sysreg)) {
+                       DRM_ERROR("sysreg regmap lookup failed.\n");
+                       ret = -EPROBE_DEFER;
+                       goto err_hdmiphy;
+               }
+       }
+
        pm_runtime_enable(dev);
 
        ret = component_add(&pdev->dev, &hdmi_component_ops);
@@ -1975,8 +1908,7 @@ static int exynos_hdmi_suspend(struct device *dev)
 {
        struct hdmi_context *hdata = dev_get_drvdata(dev);
 
-       clk_disable_unprepare(hdata->sclk_hdmi);
-       clk_disable_unprepare(hdata->hdmi);
+       hdmi_clk_disable_gates(hdata);
 
        return 0;
 }
@@ -1986,17 +1918,9 @@ static int exynos_hdmi_resume(struct device *dev)
        struct hdmi_context *hdata = dev_get_drvdata(dev);
        int ret;
 
-       ret = clk_prepare_enable(hdata->hdmi);
-       if (ret < 0) {
-               DRM_ERROR("Failed to prepare_enable the hdmi clk [%d]\n", ret);
-               return ret;
-       }
-       ret = clk_prepare_enable(hdata->sclk_hdmi);
-       if (ret < 0) {
-               DRM_ERROR("Failed to prepare_enable the sclk_mixer clk [%d]\n",
-                         ret);
+       ret = hdmi_clk_enable_gates(hdata);
+       if (ret < 0)
                return ret;
-       }
 
        return 0;
 }
index 0a5a600..74a4269 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/clk.h>
 #include <linux/regulator/consumer.h>
 #include <linux/of.h>
+#include <linux/of_device.h>
 #include <linux/component.h>
 
 #include <drm/exynos_drm.h>
@@ -103,8 +104,6 @@ struct mixer_context {
 
        struct mixer_resources  mixer_res;
        enum mixer_version_id   mxr_ver;
-       wait_queue_head_t       wait_vsync_queue;
-       atomic_t                wait_vsync_event;
 };
 
 struct mixer_drv_data {
@@ -787,12 +786,6 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg)
 
                        exynos_drm_crtc_finish_update(ctx->crtc, plane);
                }
-
-               /* set wait vsync event to zero and wake up queue. */
-               if (atomic_read(&ctx->wait_vsync_event)) {
-                       atomic_set(&ctx->wait_vsync_event, 0);
-                       wake_up(&ctx->wait_vsync_queue);
-               }
        }
 
 out:
@@ -1027,34 +1020,6 @@ static void mixer_atomic_flush(struct exynos_drm_crtc *crtc)
        mixer_vsync_set_update(mixer_ctx, true);
 }
 
-static void mixer_wait_for_vblank(struct exynos_drm_crtc *crtc)
-{
-       struct mixer_context *mixer_ctx = crtc->ctx;
-       int err;
-
-       if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags))
-               return;
-
-       err = drm_vblank_get(mixer_ctx->drm_dev, mixer_ctx->pipe);
-       if (err < 0) {
-               DRM_DEBUG_KMS("failed to acquire vblank counter\n");
-               return;
-       }
-
-       atomic_set(&mixer_ctx->wait_vsync_event, 1);
-
-       /*
-        * wait for MIXER to signal VSYNC interrupt or return after
-        * timeout which is set to 50ms (refresh rate of 20).
-        */
-       if (!wait_event_timeout(mixer_ctx->wait_vsync_queue,
-                               !atomic_read(&mixer_ctx->wait_vsync_event),
-                               HZ/20))
-               DRM_DEBUG_KMS("vblank wait timed out.\n");
-
-       drm_vblank_put(mixer_ctx->drm_dev, mixer_ctx->pipe);
-}
-
 static void mixer_enable(struct exynos_drm_crtc *crtc)
 {
        struct mixer_context *ctx = crtc->ctx;
@@ -1065,6 +1030,8 @@ static void mixer_enable(struct exynos_drm_crtc *crtc)
 
        pm_runtime_get_sync(ctx->dev);
 
+       exynos_drm_pipe_clk_enable(crtc, true);
+
        mixer_vsync_set_update(ctx, false);
 
        mixer_reg_writemask(res, MXR_STATUS, ~0, MXR_STATUS_SOFT_RESET);
@@ -1094,6 +1061,8 @@ static void mixer_disable(struct exynos_drm_crtc *crtc)
        for (i = 0; i < MIXER_WIN_NR; i++)
                mixer_disable_plane(crtc, &ctx->planes[i]);
 
+       exynos_drm_pipe_clk_enable(crtc, false);
+
        pm_runtime_put(ctx->dev);
 
        clear_bit(MXR_BIT_POWERED, &ctx->flags);
@@ -1126,7 +1095,6 @@ static const struct exynos_drm_crtc_ops mixer_crtc_ops = {
        .disable                = mixer_disable,
        .enable_vblank          = mixer_enable_vblank,
        .disable_vblank         = mixer_disable_vblank,
-       .wait_for_vblank        = mixer_wait_for_vblank,
        .atomic_begin           = mixer_atomic_begin,
        .update_plane           = mixer_update_plane,
        .disable_plane          = mixer_disable_plane,
@@ -1155,18 +1123,6 @@ static struct mixer_drv_data exynos4210_mxr_drv_data = {
        .has_sclk = 1,
 };
 
-static const struct platform_device_id mixer_driver_types[] = {
-       {
-               .name           = "s5p-mixer",
-               .driver_data    = (unsigned long)&exynos4210_mxr_drv_data,
-       }, {
-               .name           = "exynos5-mixer",
-               .driver_data    = (unsigned long)&exynos5250_mxr_drv_data,
-       }, {
-               /* end node */
-       }
-};
-
 static struct of_device_id mixer_match_types[] = {
        {
                .compatible = "samsung,exynos4210-mixer",
@@ -1243,7 +1199,7 @@ static const struct component_ops mixer_component_ops = {
 static int mixer_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
-       struct mixer_drv_data *drv;
+       const struct mixer_drv_data *drv;
        struct mixer_context *ctx;
        int ret;
 
@@ -1253,23 +1209,13 @@ static int mixer_probe(struct platform_device *pdev)
                return -ENOMEM;
        }
 
-       if (dev->of_node) {
-               const struct of_device_id *match;
-
-               match = of_match_node(mixer_match_types, dev->of_node);
-               drv = (struct mixer_drv_data *)match->data;
-       } else {
-               drv = (struct mixer_drv_data *)
-                       platform_get_device_id(pdev)->driver_data;
-       }
+       drv = of_device_get_match_data(dev);
 
        ctx->pdev = pdev;
        ctx->dev = dev;
        ctx->vp_enabled = drv->is_vp_enabled;
        ctx->has_sclk = drv->has_sclk;
        ctx->mxr_ver = drv->version;
-       init_waitqueue_head(&ctx->wait_vsync_queue);
-       atomic_set(&ctx->wait_vsync_event, 0);
 
        platform_set_drvdata(pdev, ctx);
 
@@ -1355,5 +1301,4 @@ struct platform_driver mixer_driver = {
        },
        .probe = mixer_probe,
        .remove = mixer_remove,
-       .id_table       = mixer_driver_types,
 };
index 8c891e5..169667a 100644 (file)
 #define HDMI_TG_VACT_ST4_L             HDMI_TG_BASE(0x0070)
 #define HDMI_TG_VACT_ST4_H             HDMI_TG_BASE(0x0074)
 #define HDMI_TG_3D                     HDMI_TG_BASE(0x00F0)
+#define HDMI_TG_DECON_EN               HDMI_TG_BASE(0x01e0)
 
 /* HDMI PHY Registers Offsets*/
-#define HDMIPHY_POWER          (0x74 >> 2)
-#define HDMIPHY_MODE_SET_DONE          (0x7c >> 2)
+#define HDMIPHY_POWER                  0x74
+#define HDMIPHY_MODE_SET_DONE          0x7c
+#define HDMIPHY5433_MODE_SET_DONE      0x84
 
 /* HDMI PHY Values */
 #define HDMI_PHY_POWER_ON              0x80
 #define PMU_HDMI_PHY_CONTROL           0x700
 #define PMU_HDMI_PHY_ENABLE_BIT                BIT(0)
 
+#define EXYNOS5433_SYSREG_DISP_HDMI_PHY        0x1008
+#define SYSREG_HDMI_REFCLK_INT_CLK     1
+
 #endif /* SAMSUNG_REGS_HDMI_H */
index c78cf3f..b9c714d 100644 (file)
@@ -1,6 +1,6 @@
 config DRM_FSL_DCU
        tristate "DRM Support for Freescale DCU"
-       depends on DRM && OF && ARM
+       depends on DRM && OF && ARM && COMMON_CLK
        select BACKLIGHT_CLASS_DEVICE
        select BACKLIGHT_LCD_SUPPORT
        select DRM_KMS_HELPER
diff --git a/drivers/gpu/drm/hisilicon/Kconfig b/drivers/gpu/drm/hisilicon/Kconfig
new file mode 100644 (file)
index 0000000..558c61b
--- /dev/null
@@ -0,0 +1,5 @@
+#
+# hisilicon drm device configuration.
+# Please keep this list sorted alphabetically
+
+source "drivers/gpu/drm/hisilicon/kirin/Kconfig"
diff --git a/drivers/gpu/drm/hisilicon/Makefile b/drivers/gpu/drm/hisilicon/Makefile
new file mode 100644 (file)
index 0000000..e3f6d49
--- /dev/null
@@ -0,0 +1,5 @@
+#
+# Makefile for hisilicon drm drivers.
+# Please keep this list sorted alphabetically
+
+obj-$(CONFIG_DRM_HISI_KIRIN) += kirin/
diff --git a/drivers/gpu/drm/hisilicon/kirin/Kconfig b/drivers/gpu/drm/hisilicon/kirin/Kconfig
new file mode 100644 (file)
index 0000000..ea0df61
--- /dev/null
@@ -0,0 +1,18 @@
+config DRM_HISI_KIRIN
+       tristate "DRM Support for Hisilicon Kirin series SoCs Platform"
+       depends on DRM && OF && ARM64
+       select DRM_KMS_HELPER
+       select DRM_GEM_CMA_HELPER
+       select DRM_KMS_CMA_HELPER
+       help
+         Choose this option if you have a hisilicon Kirin chipsets(hi6220).
+         If M is selected the module will be called kirin-drm.
+
+config HISI_KIRIN_DW_DSI
+       tristate "HiSilicon Kirin specific extensions for Synopsys DW MIPI DSI"
+       depends on DRM_HISI_KIRIN
+       select DRM_MIPI_DSI
+       help
+        This selects support for HiSilicon Kirin SoC specific extensions for
+        the Synopsys DesignWare DSI driver. If you want to enable MIPI DSI on
+        hi6220 based SoC, you should selet this option.
diff --git a/drivers/gpu/drm/hisilicon/kirin/Makefile b/drivers/gpu/drm/hisilicon/kirin/Makefile
new file mode 100644 (file)
index 0000000..cdf6158
--- /dev/null
@@ -0,0 +1,6 @@
+kirin-drm-y := kirin_drm_drv.o \
+              kirin_drm_ade.o
+
+obj-$(CONFIG_DRM_HISI_KIRIN) += kirin-drm.o
+
+obj-$(CONFIG_HISI_KIRIN_DW_DSI) += dw_drm_dsi.o
diff --git a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
new file mode 100644 (file)
index 0000000..bfbc215
--- /dev/null
@@ -0,0 +1,857 @@
+/*
+ * DesignWare MIPI DSI Host Controller v1.02 driver
+ *
+ * Copyright (c) 2016 Linaro Limited.
+ * Copyright (c) 2014-2016 Hisilicon Limited.
+ *
+ * Author:
+ *     Xinliang Liu <z.liuxinliang@hisilicon.com>
+ *     Xinliang Liu <xinliang.liu@linaro.org>
+ *     Xinwei Kong <kong.kongxinwei@hisilicon.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/of_graph.h>
+
+#include <drm/drm_of.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_encoder_slave.h>
+#include <drm/drm_atomic_helper.h>
+
+#include "dw_dsi_reg.h"
+
+#define MAX_TX_ESC_CLK         10
+#define ROUND(x, y)            ((x) / (y) + \
+                               ((x) % (y) * 10 / (y) >= 5 ? 1 : 0))
+#define PHY_REF_CLK_RATE       19200000
+#define PHY_REF_CLK_PERIOD_PS  (1000000000 / (PHY_REF_CLK_RATE / 1000))
+
+#define encoder_to_dsi(encoder) \
+       container_of(encoder, struct dw_dsi, encoder)
+#define host_to_dsi(host) \
+       container_of(host, struct dw_dsi, host)
+
+struct mipi_phy_params {
+       u32 clk_t_lpx;
+       u32 clk_t_hs_prepare;
+       u32 clk_t_hs_zero;
+       u32 clk_t_hs_trial;
+       u32 clk_t_wakeup;
+       u32 data_t_lpx;
+       u32 data_t_hs_prepare;
+       u32 data_t_hs_zero;
+       u32 data_t_hs_trial;
+       u32 data_t_ta_go;
+       u32 data_t_ta_get;
+       u32 data_t_wakeup;
+       u32 hstx_ckg_sel;
+       u32 pll_fbd_div5f;
+       u32 pll_fbd_div1f;
+       u32 pll_fbd_2p;
+       u32 pll_enbwt;
+       u32 pll_fbd_p;
+       u32 pll_fbd_s;
+       u32 pll_pre_div1p;
+       u32 pll_pre_p;
+       u32 pll_vco_750M;
+       u32 pll_lpf_rs;
+       u32 pll_lpf_cs;
+       u32 clklp2hs_time;
+       u32 clkhs2lp_time;
+       u32 lp2hs_time;
+       u32 hs2lp_time;
+       u32 clk_to_data_delay;
+       u32 data_to_clk_delay;
+       u32 lane_byte_clk_kHz;
+       u32 clk_division;
+};
+
+struct dsi_hw_ctx {
+       void __iomem *base;
+       struct clk *pclk;
+};
+
+struct dw_dsi {
+       struct drm_encoder encoder;
+       struct drm_bridge *bridge;
+       struct mipi_dsi_host host;
+       struct drm_display_mode cur_mode;
+       struct dsi_hw_ctx *ctx;
+       struct mipi_phy_params phy;
+
+       u32 lanes;
+       enum mipi_dsi_pixel_format format;
+       unsigned long mode_flags;
+       bool enable;
+};
+
+struct dsi_data {
+       struct dw_dsi dsi;
+       struct dsi_hw_ctx ctx;
+};
+
+struct dsi_phy_range {
+       u32 min_range_kHz;
+       u32 max_range_kHz;
+       u32 pll_vco_750M;
+       u32 hstx_ckg_sel;
+};
+
+static const struct dsi_phy_range dphy_range_info[] = {
+       {   46875,    62500,   1,    7 },
+       {   62500,    93750,   0,    7 },
+       {   93750,   125000,   1,    6 },
+       {  125000,   187500,   0,    6 },
+       {  187500,   250000,   1,    5 },
+       {  250000,   375000,   0,    5 },
+       {  375000,   500000,   1,    4 },
+       {  500000,   750000,   0,    4 },
+       {  750000,  1000000,   1,    0 },
+       { 1000000,  1500000,   0,    0 }
+};
+
+static u32 dsi_calc_phy_rate(u32 req_kHz, struct mipi_phy_params *phy)
+{
+       u32 ref_clk_ps = PHY_REF_CLK_PERIOD_PS;
+       u32 tmp_kHz = req_kHz;
+       u32 i = 0;
+       u32 q_pll = 1;
+       u32 m_pll = 0;
+       u32 n_pll = 0;
+       u32 r_pll = 1;
+       u32 m_n = 0;
+       u32 m_n_int = 0;
+       u32 f_kHz = 0;
+       u64 temp;
+
+       /*
+        * Find a rate >= req_kHz.
+        */
+       do {
+               f_kHz = tmp_kHz;
+
+               for (i = 0; i < ARRAY_SIZE(dphy_range_info); i++)
+                       if (f_kHz >= dphy_range_info[i].min_range_kHz &&
+                           f_kHz <= dphy_range_info[i].max_range_kHz)
+                               break;
+
+               if (i == ARRAY_SIZE(dphy_range_info)) {
+                       DRM_ERROR("%dkHz out of range\n", f_kHz);
+                       return 0;
+               }
+
+               phy->pll_vco_750M = dphy_range_info[i].pll_vco_750M;
+               phy->hstx_ckg_sel = dphy_range_info[i].hstx_ckg_sel;
+
+               if (phy->hstx_ckg_sel <= 7 &&
+                   phy->hstx_ckg_sel >= 4)
+                       q_pll = 0x10 >> (7 - phy->hstx_ckg_sel);
+
+               temp = f_kHz * (u64)q_pll * (u64)ref_clk_ps;
+               m_n_int = temp / (u64)1000000000;
+               m_n = (temp % (u64)1000000000) / (u64)100000000;
+
+               if (m_n_int % 2 == 0) {
+                       if (m_n * 6 >= 50) {
+                               n_pll = 2;
+                               m_pll = (m_n_int + 1) * n_pll;
+                       } else if (m_n * 6 >= 30) {
+                               n_pll = 3;
+                               m_pll = m_n_int * n_pll + 2;
+                       } else {
+                               n_pll = 1;
+                               m_pll = m_n_int * n_pll;
+                       }
+               } else {
+                       if (m_n * 6 >= 50) {
+                               n_pll = 1;
+                               m_pll = (m_n_int + 1) * n_pll;
+                       } else if (m_n * 6 >= 30) {
+                               n_pll = 1;
+                               m_pll = (m_n_int + 1) * n_pll;
+                       } else if (m_n * 6 >= 10) {
+                               n_pll = 3;
+                               m_pll = m_n_int * n_pll + 1;
+                       } else {
+                               n_pll = 2;
+                               m_pll = m_n_int * n_pll;
+                       }
+               }
+
+               if (n_pll == 1) {
+                       phy->pll_fbd_p = 0;
+                       phy->pll_pre_div1p = 1;
+               } else {
+                       phy->pll_fbd_p = n_pll;
+                       phy->pll_pre_div1p = 0;
+               }
+
+               if (phy->pll_fbd_2p <= 7 && phy->pll_fbd_2p >= 4)
+                       r_pll = 0x10 >> (7 - phy->pll_fbd_2p);
+
+               if (m_pll == 2) {
+                       phy->pll_pre_p = 0;
+                       phy->pll_fbd_s = 0;
+                       phy->pll_fbd_div1f = 0;
+                       phy->pll_fbd_div5f = 1;
+               } else if (m_pll >= 2 * 2 * r_pll && m_pll <= 2 * 4 * r_pll) {
+                       phy->pll_pre_p = m_pll / (2 * r_pll);
+                       phy->pll_fbd_s = 0;
+                       phy->pll_fbd_div1f = 1;
+                       phy->pll_fbd_div5f = 0;
+               } else if (m_pll >= 2 * 5 * r_pll && m_pll <= 2 * 150 * r_pll) {
+                       if (((m_pll / (2 * r_pll)) % 2) == 0) {
+                               phy->pll_pre_p =
+                                       (m_pll / (2 * r_pll)) / 2 - 1;
+                               phy->pll_fbd_s =
+                                       (m_pll / (2 * r_pll)) % 2 + 2;
+                       } else {
+                               phy->pll_pre_p =
+                                       (m_pll / (2 * r_pll)) / 2;
+                               phy->pll_fbd_s =
+                                       (m_pll / (2 * r_pll)) % 2;
+                       }
+                       phy->pll_fbd_div1f = 0;
+                       phy->pll_fbd_div5f = 0;
+               } else {
+                       phy->pll_pre_p = 0;
+                       phy->pll_fbd_s = 0;
+                       phy->pll_fbd_div1f = 0;
+                       phy->pll_fbd_div5f = 1;
+               }
+
+               f_kHz = (u64)1000000000 * (u64)m_pll /
+                       ((u64)ref_clk_ps * (u64)n_pll * (u64)q_pll);
+
+               if (f_kHz >= req_kHz)
+                       break;
+
+               tmp_kHz += 10;
+
+       } while (true);
+
+       return f_kHz;
+}
+
+static void dsi_get_phy_params(u32 phy_req_kHz,
+                              struct mipi_phy_params *phy)
+{
+       u32 ref_clk_ps = PHY_REF_CLK_PERIOD_PS;
+       u32 phy_rate_kHz;
+       u32 ui;
+
+       memset(phy, 0, sizeof(*phy));
+
+       phy_rate_kHz = dsi_calc_phy_rate(phy_req_kHz, phy);
+       if (!phy_rate_kHz)
+               return;
+
+       ui = 1000000 / phy_rate_kHz;
+
+       phy->clk_t_lpx = ROUND(50, 8 * ui);
+       phy->clk_t_hs_prepare = ROUND(133, 16 * ui) - 1;
+
+       phy->clk_t_hs_zero = ROUND(262, 8 * ui);
+       phy->clk_t_hs_trial = 2 * (ROUND(60, 8 * ui) - 1);
+       phy->clk_t_wakeup = ROUND(1000000, (ref_clk_ps / 1000) - 1);
+       if (phy->clk_t_wakeup > 0xff)
+               phy->clk_t_wakeup = 0xff;
+       phy->data_t_wakeup = phy->clk_t_wakeup;
+       phy->data_t_lpx = phy->clk_t_lpx;
+       phy->data_t_hs_prepare = ROUND(125 + 10 * ui, 16 * ui) - 1;
+       phy->data_t_hs_zero = ROUND(105 + 6 * ui, 8 * ui);
+       phy->data_t_hs_trial = 2 * (ROUND(60 + 4 * ui, 8 * ui) - 1);
+       phy->data_t_ta_go = 3;
+       phy->data_t_ta_get = 4;
+
+       phy->pll_enbwt = 1;
+       phy->clklp2hs_time = ROUND(407, 8 * ui) + 12;
+       phy->clkhs2lp_time = ROUND(105 + 12 * ui, 8 * ui);
+       phy->lp2hs_time = ROUND(240 + 12 * ui, 8 * ui) + 1;
+       phy->hs2lp_time = phy->clkhs2lp_time;
+       phy->clk_to_data_delay = 1 + phy->clklp2hs_time;
+       phy->data_to_clk_delay = ROUND(60 + 52 * ui, 8 * ui) +
+                               phy->clkhs2lp_time;
+
+       phy->lane_byte_clk_kHz = phy_rate_kHz / 8;
+       phy->clk_division =
+               DIV_ROUND_UP(phy->lane_byte_clk_kHz, MAX_TX_ESC_CLK);
+}
+
+static u32 dsi_get_dpi_color_coding(enum mipi_dsi_pixel_format format)
+{
+       u32 val;
+
+       /*
+        * TODO: only support RGB888 now, to support more
+        */
+       switch (format) {
+       case MIPI_DSI_FMT_RGB888:
+               val = DSI_24BITS_1;
+               break;
+       default:
+               val = DSI_24BITS_1;
+               break;
+       }
+
+       return val;
+}
+
+/*
+ * dsi phy reg write function
+ */
+static void dsi_phy_tst_set(void __iomem *base, u32 reg, u32 val)
+{
+       u32 reg_write = 0x10000 + reg;
+
+       /*
+        * latch reg first
+        */
+       writel(reg_write, base + PHY_TST_CTRL1);
+       writel(0x02, base + PHY_TST_CTRL0);
+       writel(0x00, base + PHY_TST_CTRL0);
+
+       /*
+        * then latch value
+        */
+       writel(val, base + PHY_TST_CTRL1);
+       writel(0x02, base + PHY_TST_CTRL0);
+       writel(0x00, base + PHY_TST_CTRL0);
+}
+
+static void dsi_set_phy_timer(void __iomem *base,
+                             struct mipi_phy_params *phy,
+                             u32 lanes)
+{
+       u32 val;
+
+       /*
+        * Set lane value and phy stop wait time.
+        */
+       val = (lanes - 1) | (PHY_STOP_WAIT_TIME << 8);
+       writel(val, base + PHY_IF_CFG);
+
+       /*
+        * Set phy clk division.
+        */
+       val = readl(base + CLKMGR_CFG) | phy->clk_division;
+       writel(val, base + CLKMGR_CFG);
+
+       /*
+        * Set lp and hs switching params.
+        */
+       dw_update_bits(base + PHY_TMR_CFG, 24, MASK(8), phy->hs2lp_time);
+       dw_update_bits(base + PHY_TMR_CFG, 16, MASK(8), phy->lp2hs_time);
+       dw_update_bits(base + PHY_TMR_LPCLK_CFG, 16, MASK(10),
+                      phy->clkhs2lp_time);
+       dw_update_bits(base + PHY_TMR_LPCLK_CFG, 0, MASK(10),
+                      phy->clklp2hs_time);
+       dw_update_bits(base + CLK_DATA_TMR_CFG, 8, MASK(8),
+                      phy->data_to_clk_delay);
+       dw_update_bits(base + CLK_DATA_TMR_CFG, 0, MASK(8),
+                      phy->clk_to_data_delay);
+}
+
+static void dsi_set_mipi_phy(void __iomem *base,
+                            struct mipi_phy_params *phy,
+                            u32 lanes)
+{
+       u32 delay_count;
+       u32 val;
+       u32 i;
+
+       /* phy timer setting */
+       dsi_set_phy_timer(base, phy, lanes);
+
+       /*
+        * Reset to clean up phy tst params.
+        */
+       writel(0, base + PHY_RSTZ);
+       writel(0, base + PHY_TST_CTRL0);
+       writel(1, base + PHY_TST_CTRL0);
+       writel(0, base + PHY_TST_CTRL0);
+
+       /*
+        * Clock lane timing control setting: TLPX, THS-PREPARE,
+        * THS-ZERO, THS-TRAIL, TWAKEUP.
+        */
+       dsi_phy_tst_set(base, CLK_TLPX, phy->clk_t_lpx);
+       dsi_phy_tst_set(base, CLK_THS_PREPARE, phy->clk_t_hs_prepare);
+       dsi_phy_tst_set(base, CLK_THS_ZERO, phy->clk_t_hs_zero);
+       dsi_phy_tst_set(base, CLK_THS_TRAIL, phy->clk_t_hs_trial);
+       dsi_phy_tst_set(base, CLK_TWAKEUP, phy->clk_t_wakeup);
+
+       /*
+        * Data lane timing control setting: TLPX, THS-PREPARE,
+        * THS-ZERO, THS-TRAIL, TTA-GO, TTA-GET, TWAKEUP.
+        */
+       for (i = 0; i < lanes; i++) {
+               dsi_phy_tst_set(base, DATA_TLPX(i), phy->data_t_lpx);
+               dsi_phy_tst_set(base, DATA_THS_PREPARE(i),
+                               phy->data_t_hs_prepare);
+               dsi_phy_tst_set(base, DATA_THS_ZERO(i), phy->data_t_hs_zero);
+               dsi_phy_tst_set(base, DATA_THS_TRAIL(i), phy->data_t_hs_trial);
+               dsi_phy_tst_set(base, DATA_TTA_GO(i), phy->data_t_ta_go);
+               dsi_phy_tst_set(base, DATA_TTA_GET(i), phy->data_t_ta_get);
+               dsi_phy_tst_set(base, DATA_TWAKEUP(i), phy->data_t_wakeup);
+       }
+
+       /*
+        * physical configuration: I, pll I, pll II, pll III,
+        * pll IV, pll V.
+        */
+       dsi_phy_tst_set(base, PHY_CFG_I, phy->hstx_ckg_sel);
+       val = (phy->pll_fbd_div5f << 5) + (phy->pll_fbd_div1f << 4) +
+                               (phy->pll_fbd_2p << 1) + phy->pll_enbwt;
+       dsi_phy_tst_set(base, PHY_CFG_PLL_I, val);
+       dsi_phy_tst_set(base, PHY_CFG_PLL_II, phy->pll_fbd_p);
+       dsi_phy_tst_set(base, PHY_CFG_PLL_III, phy->pll_fbd_s);
+       val = (phy->pll_pre_div1p << 7) + phy->pll_pre_p;
+       dsi_phy_tst_set(base, PHY_CFG_PLL_IV, val);
+       val = (5 << 5) + (phy->pll_vco_750M << 4) + (phy->pll_lpf_rs << 2) +
+               phy->pll_lpf_cs;
+       dsi_phy_tst_set(base, PHY_CFG_PLL_V, val);
+
+       writel(PHY_ENABLECLK, base + PHY_RSTZ);
+       udelay(1);
+       writel(PHY_ENABLECLK | PHY_UNSHUTDOWNZ, base + PHY_RSTZ);
+       udelay(1);
+       writel(PHY_ENABLECLK | PHY_UNRSTZ | PHY_UNSHUTDOWNZ, base + PHY_RSTZ);
+       usleep_range(1000, 1500);
+
+       /*
+        * wait for phy's clock ready
+        */
+       delay_count = 100;
+       while (delay_count--) {
+               val = readl(base +  PHY_STATUS);
+               if ((BIT(0) | BIT(2)) & val)
+                       break;
+
+               udelay(1);
+       }
+
+       if (!delay_count)
+               DRM_INFO("phylock and phystopstateclklane is not ready.\n");
+}
+
+static void dsi_set_mode_timing(void __iomem *base,
+                               u32 lane_byte_clk_kHz,
+                               struct drm_display_mode *mode,
+                               enum mipi_dsi_pixel_format format)
+{
+       u32 hfp, hbp, hsw, vfp, vbp, vsw;
+       u32 hline_time;
+       u32 hsa_time;
+       u32 hbp_time;
+       u32 pixel_clk_kHz;
+       int htot, vtot;
+       u32 val;
+       u64 tmp;
+
+       val = dsi_get_dpi_color_coding(format);
+       writel(val, base + DPI_COLOR_CODING);
+
+       val = (mode->flags & DRM_MODE_FLAG_NHSYNC ? 1 : 0) << 2;
+       val |= (mode->flags & DRM_MODE_FLAG_NVSYNC ? 1 : 0) << 1;
+       writel(val, base +  DPI_CFG_POL);
+
+       /*
+        * The DSI IP accepts vertical timing using lines as normal,
+        * but horizontal timing is a mixture of pixel-clocks for the
+        * active region and byte-lane clocks for the blanking-related
+        * timings.  hfp is specified as the total hline_time in byte-
+        * lane clocks minus hsa, hbp and active.
+        */
+       pixel_clk_kHz = mode->clock;
+       htot = mode->htotal;
+       vtot = mode->vtotal;
+       hfp = mode->hsync_start - mode->hdisplay;
+       hbp = mode->htotal - mode->hsync_end;
+       hsw = mode->hsync_end - mode->hsync_start;
+       vfp = mode->vsync_start - mode->vdisplay;
+       vbp = mode->vtotal - mode->vsync_end;
+       vsw = mode->vsync_end - mode->vsync_start;
+       if (vsw > 15) {
+               DRM_DEBUG_DRIVER("vsw exceeded 15\n");
+               vsw = 15;
+       }
+
+       hsa_time = (hsw * lane_byte_clk_kHz) / pixel_clk_kHz;
+       hbp_time = (hbp * lane_byte_clk_kHz) / pixel_clk_kHz;
+       tmp = (u64)htot * (u64)lane_byte_clk_kHz;
+       hline_time = DIV_ROUND_UP(tmp, pixel_clk_kHz);
+
+       /* all specified in byte-lane clocks */
+       writel(hsa_time, base + VID_HSA_TIME);
+       writel(hbp_time, base + VID_HBP_TIME);
+       writel(hline_time, base + VID_HLINE_TIME);
+
+       writel(vsw, base + VID_VSA_LINES);
+       writel(vbp, base + VID_VBP_LINES);
+       writel(vfp, base + VID_VFP_LINES);
+       writel(mode->vdisplay, base + VID_VACTIVE_LINES);
+       writel(mode->hdisplay, base + VID_PKT_SIZE);
+
+       DRM_DEBUG_DRIVER("htot=%d, hfp=%d, hbp=%d, hsw=%d\n",
+                        htot, hfp, hbp, hsw);
+       DRM_DEBUG_DRIVER("vtol=%d, vfp=%d, vbp=%d, vsw=%d\n",
+                        vtot, vfp, vbp, vsw);
+       DRM_DEBUG_DRIVER("hsa_time=%d, hbp_time=%d, hline_time=%d\n",
+                        hsa_time, hbp_time, hline_time);
+}
+
+static void dsi_set_video_mode(void __iomem *base, unsigned long flags)
+{
+       u32 val;
+       u32 mode_mask = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
+               MIPI_DSI_MODE_VIDEO_SYNC_PULSE;
+       u32 non_burst_sync_pulse = MIPI_DSI_MODE_VIDEO |
+               MIPI_DSI_MODE_VIDEO_SYNC_PULSE;
+       u32 non_burst_sync_event = MIPI_DSI_MODE_VIDEO;
+
+       /*
+        * choose video mode type
+        */
+       if ((flags & mode_mask) == non_burst_sync_pulse)
+               val = DSI_NON_BURST_SYNC_PULSES;
+       else if ((flags & mode_mask) == non_burst_sync_event)
+               val = DSI_NON_BURST_SYNC_EVENTS;
+       else
+               val = DSI_BURST_SYNC_PULSES_1;
+       writel(val, base + VID_MODE_CFG);
+
+       writel(PHY_TXREQUESTCLKHS, base + LPCLK_CTRL);
+       writel(DSI_VIDEO_MODE, base + MODE_CFG);
+}
+
+static void dsi_mipi_init(struct dw_dsi *dsi)
+{
+       struct dsi_hw_ctx *ctx = dsi->ctx;
+       struct mipi_phy_params *phy = &dsi->phy;
+       struct drm_display_mode *mode = &dsi->cur_mode;
+       u32 bpp = mipi_dsi_pixel_format_to_bpp(dsi->format);
+       void __iomem *base = ctx->base;
+       u32 dphy_req_kHz;
+
+       /*
+        * count phy params
+        */
+       dphy_req_kHz = mode->clock * bpp / dsi->lanes;
+       dsi_get_phy_params(dphy_req_kHz, phy);
+
+       /* reset Core */
+       writel(RESET, base + PWR_UP);
+
+       /* set dsi phy params */
+       dsi_set_mipi_phy(base, phy, dsi->lanes);
+
+       /* set dsi mode timing */
+       dsi_set_mode_timing(base, phy->lane_byte_clk_kHz, mode, dsi->format);
+
+       /* set dsi video mode */
+       dsi_set_video_mode(base, dsi->mode_flags);
+
+       /* dsi wake up */
+       writel(POWERUP, base + PWR_UP);
+
+       DRM_DEBUG_DRIVER("lanes=%d, pixel_clk=%d kHz, bytes_freq=%d kHz\n",
+                        dsi->lanes, mode->clock, phy->lane_byte_clk_kHz);
+}
+
+static void dsi_encoder_disable(struct drm_encoder *encoder)
+{
+       struct dw_dsi *dsi = encoder_to_dsi(encoder);
+       struct dsi_hw_ctx *ctx = dsi->ctx;
+       void __iomem *base = ctx->base;
+
+       if (!dsi->enable)
+               return;
+
+       writel(0, base + PWR_UP);
+       writel(0, base + LPCLK_CTRL);
+       writel(0, base + PHY_RSTZ);
+       clk_disable_unprepare(ctx->pclk);
+
+       dsi->enable = false;
+}
+
+static void dsi_encoder_enable(struct drm_encoder *encoder)
+{
+       struct dw_dsi *dsi = encoder_to_dsi(encoder);
+       struct dsi_hw_ctx *ctx = dsi->ctx;
+       int ret;
+
+       if (dsi->enable)
+               return;
+
+       ret = clk_prepare_enable(ctx->pclk);
+       if (ret) {
+               DRM_ERROR("fail to enable pclk: %d\n", ret);
+               return;
+       }
+
+       dsi_mipi_init(dsi);
+
+       dsi->enable = true;
+}
+
+static void dsi_encoder_mode_set(struct drm_encoder *encoder,
+                                struct drm_display_mode *mode,
+                                struct drm_display_mode *adj_mode)
+{
+       struct dw_dsi *dsi = encoder_to_dsi(encoder);
+
+       drm_mode_copy(&dsi->cur_mode, adj_mode);
+}
+
+static int dsi_encoder_atomic_check(struct drm_encoder *encoder,
+                                   struct drm_crtc_state *crtc_state,
+                                   struct drm_connector_state *conn_state)
+{
+       /* do nothing */
+       return 0;
+}
+
+static const struct drm_encoder_helper_funcs dw_encoder_helper_funcs = {
+       .atomic_check   = dsi_encoder_atomic_check,
+       .mode_set       = dsi_encoder_mode_set,
+       .enable         = dsi_encoder_enable,
+       .disable        = dsi_encoder_disable
+};
+
+static const struct drm_encoder_funcs dw_encoder_funcs = {
+       .destroy = drm_encoder_cleanup,
+};
+
+static int dw_drm_encoder_init(struct device *dev,
+                              struct drm_device *drm_dev,
+                              struct drm_encoder *encoder)
+{
+       int ret;
+       u32 crtc_mask = drm_of_find_possible_crtcs(drm_dev, dev->of_node);
+
+       if (!crtc_mask) {
+               DRM_ERROR("failed to find crtc mask\n");
+               return -EINVAL;
+       }
+
+       encoder->possible_crtcs = crtc_mask;
+       ret = drm_encoder_init(drm_dev, encoder, &dw_encoder_funcs,
+                              DRM_MODE_ENCODER_DSI, NULL);
+       if (ret) {
+               DRM_ERROR("failed to init dsi encoder\n");
+               return ret;
+       }
+
+       drm_encoder_helper_add(encoder, &dw_encoder_helper_funcs);
+
+       return 0;
+}
+
+static int dsi_host_attach(struct mipi_dsi_host *host,
+                          struct mipi_dsi_device *mdsi)
+{
+       struct dw_dsi *dsi = host_to_dsi(host);
+
+       if (mdsi->lanes < 1 || mdsi->lanes > 4) {
+               DRM_ERROR("dsi device params invalid\n");
+               return -EINVAL;
+       }
+
+       dsi->lanes = mdsi->lanes;
+       dsi->format = mdsi->format;
+       dsi->mode_flags = mdsi->mode_flags;
+
+       return 0;
+}
+
+static int dsi_host_detach(struct mipi_dsi_host *host,
+                          struct mipi_dsi_device *mdsi)
+{
+       /* do nothing */
+       return 0;
+}
+
+static const struct mipi_dsi_host_ops dsi_host_ops = {
+       .attach = dsi_host_attach,
+       .detach = dsi_host_detach,
+};
+
+static int dsi_host_init(struct device *dev, struct dw_dsi *dsi)
+{
+       struct mipi_dsi_host *host = &dsi->host;
+       int ret;
+
+       host->dev = dev;
+       host->ops = &dsi_host_ops;
+       ret = mipi_dsi_host_register(host);
+       if (ret) {
+               DRM_ERROR("failed to register dsi host\n");
+               return ret;
+       }
+
+       return 0;
+}
+
+static int dsi_bridge_init(struct drm_device *dev, struct dw_dsi *dsi)
+{
+       struct drm_encoder *encoder = &dsi->encoder;
+       struct drm_bridge *bridge = dsi->bridge;
+       int ret;
+
+       /* associate the bridge to dsi encoder */
+       encoder->bridge = bridge;
+       bridge->encoder = encoder;
+
+       ret = drm_bridge_attach(dev, bridge);
+       if (ret) {
+               DRM_ERROR("failed to attach external bridge\n");
+               return ret;
+       }
+
+       return 0;
+}
+
+static int dsi_bind(struct device *dev, struct device *master, void *data)
+{
+       struct dsi_data *ddata = dev_get_drvdata(dev);
+       struct dw_dsi *dsi = &ddata->dsi;
+       struct drm_device *drm_dev = data;
+       int ret;
+
+       ret = dw_drm_encoder_init(dev, drm_dev, &dsi->encoder);
+       if (ret)
+               return ret;
+
+       ret = dsi_host_init(dev, dsi);
+       if (ret)
+               return ret;
+
+       ret = dsi_bridge_init(drm_dev, dsi);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+static void dsi_unbind(struct device *dev, struct device *master, void *data)
+{
+       /* do nothing */
+}
+
+static const struct component_ops dsi_ops = {
+       .bind   = dsi_bind,
+       .unbind = dsi_unbind,
+};
+
+static int dsi_parse_dt(struct platform_device *pdev, struct dw_dsi *dsi)
+{
+       struct dsi_hw_ctx *ctx = dsi->ctx;
+       struct device_node *np = pdev->dev.of_node;
+       struct device_node *endpoint, *bridge_node;
+       struct drm_bridge *bridge;
+       struct resource *res;
+
+       /*
+        * Get the endpoint node. In our case, dsi has one output port1
+        * to which the external HDMI bridge is connected.
+        */
+       endpoint = of_graph_get_endpoint_by_regs(np, 1, -1);
+       if (!endpoint) {
+               DRM_ERROR("no valid endpoint node\n");
+               return -ENODEV;
+       }
+       of_node_put(endpoint);
+
+       bridge_node = of_graph_get_remote_port_parent(endpoint);
+       if (!bridge_node) {
+               DRM_ERROR("no valid bridge node\n");
+               return -ENODEV;
+       }
+       of_node_put(bridge_node);
+
+       bridge = of_drm_find_bridge(bridge_node);
+       if (!bridge) {
+               DRM_INFO("wait for external HDMI bridge driver.\n");
+               return -EPROBE_DEFER;
+       }
+       dsi->bridge = bridge;
+
+       ctx->pclk = devm_clk_get(&pdev->dev, "pclk");
+       if (IS_ERR(ctx->pclk)) {
+               DRM_ERROR("failed to get pclk clock\n");
+               return PTR_ERR(ctx->pclk);
+       }
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       ctx->base = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(ctx->base)) {
+               DRM_ERROR("failed to remap dsi io region\n");
+               return PTR_ERR(ctx->base);
+       }
+
+       return 0;
+}
+
+static int dsi_probe(struct platform_device *pdev)
+{
+       struct dsi_data *data;
+       struct dw_dsi *dsi;
+       struct dsi_hw_ctx *ctx;
+       int ret;
+
+       data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+       if (!data) {
+               DRM_ERROR("failed to allocate dsi data.\n");
+               return -ENOMEM;
+       }
+       dsi = &data->dsi;
+       ctx = &data->ctx;
+       dsi->ctx = ctx;
+
+       ret = dsi_parse_dt(pdev, dsi);
+       if (ret)
+               return ret;
+
+       platform_set_drvdata(pdev, data);
+
+       return component_add(&pdev->dev, &dsi_ops);
+}
+
+static int dsi_remove(struct platform_device *pdev)
+{
+       component_del(&pdev->dev, &dsi_ops);
+
+       return 0;
+}
+
+static const struct of_device_id dsi_of_match[] = {
+       {.compatible = "hisilicon,hi6220-dsi"},
+       { }
+};
+MODULE_DEVICE_TABLE(of, dsi_of_match);
+
+static struct platform_driver dsi_driver = {
+       .probe = dsi_probe,
+       .remove = dsi_remove,
+       .driver = {
+               .name = "dw-dsi",
+               .of_match_table = dsi_of_match,
+       },
+};
+
+module_platform_driver(dsi_driver);
+
+MODULE_AUTHOR("Xinliang Liu <xinliang.liu@linaro.org>");
+MODULE_AUTHOR("Xinliang Liu <z.liuxinliang@hisilicon.com>");
+MODULE_AUTHOR("Xinwei Kong <kong.kongxinwei@hisilicon.com>");
+MODULE_DESCRIPTION("DesignWare MIPI DSI Host Controller v1.02 driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/hisilicon/kirin/dw_dsi_reg.h b/drivers/gpu/drm/hisilicon/kirin/dw_dsi_reg.h
new file mode 100644 (file)
index 0000000..18808fc
--- /dev/null
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) 2016 Linaro Limited.
+ * Copyright (c) 2014-2016 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __DW_DSI_REG_H__
+#define __DW_DSI_REG_H__
+
+#define MASK(x)                                (BIT(x) - 1)
+
+/*
+ * regs
+ */
+#define PWR_UP                  0x04  /* Core power-up */
+#define RESET                   0
+#define POWERUP                 BIT(0)
+#define PHY_IF_CFG              0xA4  /* D-PHY interface configuration */
+#define CLKMGR_CFG              0x08  /* the internal clock dividers */
+#define PHY_RSTZ                0xA0  /* D-PHY reset control */
+#define PHY_ENABLECLK           BIT(2)
+#define PHY_UNRSTZ              BIT(1)
+#define PHY_UNSHUTDOWNZ         BIT(0)
+#define PHY_TST_CTRL0           0xB4  /* D-PHY test interface control 0 */
+#define PHY_TST_CTRL1           0xB8  /* D-PHY test interface control 1 */
+#define CLK_TLPX                0x10
+#define CLK_THS_PREPARE         0x11
+#define CLK_THS_ZERO            0x12
+#define CLK_THS_TRAIL           0x13
+#define CLK_TWAKEUP             0x14
+#define DATA_TLPX(x)            (0x20 + ((x) << 4))
+#define DATA_THS_PREPARE(x)     (0x21 + ((x) << 4))
+#define DATA_THS_ZERO(x)        (0x22 + ((x) << 4))
+#define DATA_THS_TRAIL(x)       (0x23 + ((x) << 4))
+#define DATA_TTA_GO(x)          (0x24 + ((x) << 4))
+#define DATA_TTA_GET(x)         (0x25 + ((x) << 4))
+#define DATA_TWAKEUP(x)         (0x26 + ((x) << 4))
+#define PHY_CFG_I               0x60
+#define PHY_CFG_PLL_I           0x63
+#define PHY_CFG_PLL_II          0x64
+#define PHY_CFG_PLL_III         0x65
+#define PHY_CFG_PLL_IV          0x66
+#define PHY_CFG_PLL_V           0x67
+#define DPI_COLOR_CODING        0x10  /* DPI color coding */
+#define DPI_CFG_POL             0x14  /* DPI polarity configuration */
+#define VID_HSA_TIME            0x48  /* Horizontal Sync Active time */
+#define VID_HBP_TIME            0x4C  /* Horizontal Back Porch time */
+#define VID_HLINE_TIME          0x50  /* Line time */
+#define VID_VSA_LINES           0x54  /* Vertical Sync Active period */
+#define VID_VBP_LINES           0x58  /* Vertical Back Porch period */
+#define VID_VFP_LINES           0x5C  /* Vertical Front Porch period */
+#define VID_VACTIVE_LINES       0x60  /* Vertical resolution */
+#define VID_PKT_SIZE            0x3C  /* Video packet size */
+#define VID_MODE_CFG            0x38  /* Video mode configuration */
+#define PHY_TMR_CFG             0x9C  /* Data lanes timing configuration */
+#define BTA_TO_CNT              0x8C  /* Response timeout definition */
+#define PHY_TMR_LPCLK_CFG       0x98  /* clock lane timing configuration */
+#define CLK_DATA_TMR_CFG        0xCC
+#define LPCLK_CTRL              0x94  /* Low-power in clock lane */
+#define PHY_TXREQUESTCLKHS      BIT(0)
+#define MODE_CFG                0x34  /* Video or Command mode selection */
+#define PHY_STATUS              0xB0  /* D-PHY PPI status interface */
+
+#define        PHY_STOP_WAIT_TIME      0x30
+
+/*
+ * regs relevant enum
+ */
+enum dpi_color_coding {
+       DSI_24BITS_1 = 5,
+};
+
+enum dsi_video_mode_type {
+       DSI_NON_BURST_SYNC_PULSES = 0,
+       DSI_NON_BURST_SYNC_EVENTS,
+       DSI_BURST_SYNC_PULSES_1,
+       DSI_BURST_SYNC_PULSES_2
+};
+
+enum dsi_work_mode {
+       DSI_VIDEO_MODE = 0,
+       DSI_COMMAND_MODE
+};
+
+/*
+ * Register Write/Read Helper functions
+ */
+static inline void dw_update_bits(void __iomem *addr, u32 bit_start,
+                                 u32 mask, u32 val)
+{
+       u32 tmp, orig;
+
+       orig = readl(addr);
+       tmp = orig & ~(mask << bit_start);
+       tmp |= (val & mask) << bit_start;
+       writel(tmp, addr);
+}
+
+#endif /* __DW_DRM_DSI_H__ */
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_ade_reg.h b/drivers/gpu/drm/hisilicon/kirin/kirin_ade_reg.h
new file mode 100644 (file)
index 0000000..4cf281b
--- /dev/null
@@ -0,0 +1,230 @@
+/*
+ * Copyright (c) 2016 Linaro Limited.
+ * Copyright (c) 2014-2016 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __KIRIN_ADE_REG_H__
+#define __KIRIN_ADE_REG_H__
+
+/*
+ * ADE Registers
+ */
+#define MASK(x)                                (BIT(x) - 1)
+
+#define ADE_CTRL                       0x0004
+#define FRM_END_START_OFST             0
+#define FRM_END_START_MASK             MASK(2)
+#define AUTO_CLK_GATE_EN_OFST          0
+#define AUTO_CLK_GATE_EN               BIT(0)
+#define ADE_DISP_SRC_CFG               0x0018
+#define ADE_CTRL1                      0x008C
+#define ADE_EN                         0x0100
+#define ADE_DISABLE                    0
+#define ADE_ENABLE                     1
+/* reset and reload regs */
+#define ADE_SOFT_RST_SEL(x)            (0x0078 + (x) * 0x4)
+#define ADE_RELOAD_DIS(x)              (0x00AC + (x) * 0x4)
+#define RDMA_OFST                      0
+#define CLIP_OFST                      15
+#define SCL_OFST                       21
+#define CTRAN_OFST                     24
+#define OVLY_OFST                      37 /* 32+5 */
+/* channel regs */
+#define RD_CH_CTRL(x)                  (0x1004 + (x) * 0x80)
+#define RD_CH_ADDR(x)                  (0x1008 + (x) * 0x80)
+#define RD_CH_SIZE(x)                  (0x100C + (x) * 0x80)
+#define RD_CH_STRIDE(x)                        (0x1010 + (x) * 0x80)
+#define RD_CH_SPACE(x)                 (0x1014 + (x) * 0x80)
+#define RD_CH_EN(x)                    (0x1020 + (x) * 0x80)
+/* overlay regs */
+#define ADE_OVLY1_TRANS_CFG            0x002C
+#define ADE_OVLY_CTL                   0x0098
+#define ADE_OVLY_CH_XY0(x)             (0x2004 + (x) * 4)
+#define ADE_OVLY_CH_XY1(x)             (0x2024 + (x) * 4)
+#define ADE_OVLY_CH_CTL(x)             (0x204C + (x) * 4)
+#define ADE_OVLY_OUTPUT_SIZE(x)                (0x2070 + (x) * 8)
+#define OUTPUT_XSIZE_OFST              16
+#define ADE_OVLYX_CTL(x)               (0x209C + (x) * 4)
+#define CH_OVLY_SEL_OFST(x)            ((x) * 4)
+#define CH_OVLY_SEL_MASK               MASK(2)
+#define CH_OVLY_SEL_VAL(x)             ((x) + 1)
+#define CH_ALP_MODE_OFST               0
+#define CH_ALP_SEL_OFST                        2
+#define CH_UNDER_ALP_SEL_OFST          4
+#define CH_EN_OFST                     6
+#define CH_ALP_GBL_OFST                        15
+#define CH_SEL_OFST                    28
+/* ctran regs */
+#define ADE_CTRAN_DIS(x)               (0x5004 + (x) * 0x100)
+#define CTRAN_BYPASS_ON                        1
+#define CTRAN_BYPASS_OFF               0
+#define ADE_CTRAN_IMAGE_SIZE(x)                (0x503C + (x) * 0x100)
+/* clip regs */
+#define ADE_CLIP_DISABLE(x)            (0x6800 + (x) * 0x100)
+#define ADE_CLIP_SIZE0(x)              (0x6804 + (x) * 0x100)
+#define ADE_CLIP_SIZE1(x)              (0x6808 + (x) * 0x100)
+
+/*
+ * LDI Registers
+ */
+#define LDI_HRZ_CTRL0                  0x7400
+#define HBP_OFST                       20
+#define LDI_HRZ_CTRL1                  0x7404
+#define LDI_VRT_CTRL0                  0x7408
+#define VBP_OFST                       20
+#define LDI_VRT_CTRL1                  0x740C
+#define LDI_PLR_CTRL                   0x7410
+#define FLAG_NVSYNC                    BIT(0)
+#define FLAG_NHSYNC                    BIT(1)
+#define FLAG_NPIXCLK                   BIT(2)
+#define FLAG_NDE                       BIT(3)
+#define LDI_DSP_SIZE                   0x7414
+#define VSIZE_OFST                     20
+#define LDI_INT_EN                     0x741C
+#define FRAME_END_INT_EN_OFST          1
+#define LDI_CTRL                       0x7420
+#define BPP_OFST                       3
+#define DATA_GATE_EN                   BIT(2)
+#define LDI_EN                         BIT(0)
+#define LDI_MSK_INT                    0x7428
+#define LDI_INT_CLR                    0x742C
+#define LDI_WORK_MODE                  0x7430
+#define LDI_HDMI_DSI_GT                        0x7434
+
+/*
+ * ADE media bus service regs
+ */
+#define ADE0_QOSGENERATOR_MODE         0x010C
+#define QOSGENERATOR_MODE_MASK         MASK(2)
+#define ADE0_QOSGENERATOR_EXTCONTROL   0x0118
+#define SOCKET_QOS_EN                  BIT(0)
+#define ADE1_QOSGENERATOR_MODE         0x020C
+#define ADE1_QOSGENERATOR_EXTCONTROL   0x0218
+
+/*
+ * ADE regs relevant enums
+ */
+enum frame_end_start {
+       /* regs take effect in every vsync */
+       REG_EFFECTIVE_IN_VSYNC = 0,
+       /* regs take effect in fist ade en and every frame end */
+       REG_EFFECTIVE_IN_ADEEN_FRMEND,
+       /* regs take effect in ade en immediately */
+       REG_EFFECTIVE_IN_ADEEN,
+       /* regs take effect in first vsync and every frame end */
+       REG_EFFECTIVE_IN_VSYNC_FRMEND
+};
+
+enum ade_fb_format {
+       ADE_RGB_565 = 0,
+       ADE_BGR_565,
+       ADE_XRGB_8888,
+       ADE_XBGR_8888,
+       ADE_ARGB_8888,
+       ADE_ABGR_8888,
+       ADE_RGBA_8888,
+       ADE_BGRA_8888,
+       ADE_RGB_888,
+       ADE_BGR_888 = 9,
+       ADE_FORMAT_UNSUPPORT = 800
+};
+
+enum ade_channel {
+       ADE_CH1 = 0,    /* channel 1 for primary plane */
+       ADE_CH_NUM
+};
+
+enum ade_scale {
+       ADE_SCL1 = 0,
+       ADE_SCL2,
+       ADE_SCL3,
+       ADE_SCL_NUM
+};
+
+enum ade_ctran {
+       ADE_CTRAN1 = 0,
+       ADE_CTRAN2,
+       ADE_CTRAN3,
+       ADE_CTRAN4,
+       ADE_CTRAN5,
+       ADE_CTRAN6,
+       ADE_CTRAN_NUM
+};
+
+enum ade_overlay {
+       ADE_OVLY1 = 0,
+       ADE_OVLY2,
+       ADE_OVLY3,
+       ADE_OVLY_NUM
+};
+
+enum ade_alpha_mode {
+       ADE_ALP_GLOBAL = 0,
+       ADE_ALP_PIXEL,
+       ADE_ALP_PIXEL_AND_GLB
+};
+
+enum ade_alpha_blending_mode {
+       ADE_ALP_MUL_COEFF_0 = 0,        /* alpha */
+       ADE_ALP_MUL_COEFF_1,            /* 1-alpha */
+       ADE_ALP_MUL_COEFF_2,            /* 0 */
+       ADE_ALP_MUL_COEFF_3             /* 1 */
+};
+
+/*
+ * LDI regs relevant enums
+ */
+enum dsi_pclk_en {
+       DSI_PCLK_ON = 0,
+       DSI_PCLK_OFF
+};
+
+enum ldi_output_format {
+       LDI_OUT_RGB_565 = 0,
+       LDI_OUT_RGB_666,
+       LDI_OUT_RGB_888
+};
+
+enum ldi_work_mode {
+       TEST_MODE = 0,
+       NORMAL_MODE
+};
+
+enum ldi_input_source {
+       DISP_SRC_NONE = 0,
+       DISP_SRC_OVLY2,
+       DISP_SRC_DISP,
+       DISP_SRC_ROT,
+       DISP_SRC_SCL2
+};
+
+/*
+ * ADE media bus service relevant enums
+ */
+enum qos_generator_mode {
+       FIXED_MODE = 0,
+       LIMITER_MODE,
+       BYPASS_MODE,
+       REGULATOR_MODE
+};
+
+/*
+ * Register Write/Read Helper functions
+ */
+static inline void ade_update_bits(void __iomem *addr, u32 bit_start,
+                                  u32 mask, u32 val)
+{
+       u32 tmp, orig;
+
+       orig = readl(addr);
+       tmp = orig & ~(mask << bit_start);
+       tmp |= (val & mask) << bit_start;
+       writel(tmp, addr);
+}
+
+#endif
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
new file mode 100644 (file)
index 0000000..fba6372
--- /dev/null
@@ -0,0 +1,1057 @@
+/*
+ * Hisilicon Hi6220 SoC ADE(Advanced Display Engine)'s crtc&plane driver
+ *
+ * Copyright (c) 2016 Linaro Limited.
+ * Copyright (c) 2014-2016 Hisilicon Limited.
+ *
+ * Author:
+ *     Xinliang Liu <z.liuxinliang@hisilicon.com>
+ *     Xinliang Liu <xinliang.liu@linaro.org>
+ *     Xinwei Kong <kong.kongxinwei@hisilicon.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <video/display_timing.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+
+#include "kirin_drm_drv.h"
+#include "kirin_ade_reg.h"
+
+#define PRIMARY_CH     ADE_CH1 /* primary plane */
+#define OUT_OVLY       ADE_OVLY2 /* output overlay compositor */
+#define ADE_DEBUG      1
+
+#define to_ade_crtc(crtc) \
+       container_of(crtc, struct ade_crtc, base)
+
+#define to_ade_plane(plane) \
+       container_of(plane, struct ade_plane, base)
+
+struct ade_hw_ctx {
+       void __iomem  *base;
+       struct regmap *noc_regmap;
+       struct clk *ade_core_clk;
+       struct clk *media_noc_clk;
+       struct clk *ade_pix_clk;
+       struct reset_control *reset;
+       bool power_on;
+       int irq;
+};
+
+struct ade_crtc {
+       struct drm_crtc base;
+       struct ade_hw_ctx *ctx;
+       bool enable;
+       u32 out_format;
+};
+
+struct ade_plane {
+       struct drm_plane base;
+       void *ctx;
+       u8 ch; /* channel */
+};
+
+struct ade_data {
+       struct ade_crtc acrtc;
+       struct ade_plane aplane[ADE_CH_NUM];
+       struct ade_hw_ctx ctx;
+};
+
+/* ade-format info: */
+struct ade_format {
+       u32 pixel_format;
+       enum ade_fb_format ade_format;
+};
+
+static const struct ade_format ade_formats[] = {
+       /* 16bpp RGB: */
+       { DRM_FORMAT_RGB565, ADE_RGB_565 },
+       { DRM_FORMAT_BGR565, ADE_BGR_565 },
+       /* 24bpp RGB: */
+       { DRM_FORMAT_RGB888, ADE_RGB_888 },
+       { DRM_FORMAT_BGR888, ADE_BGR_888 },
+       /* 32bpp [A]RGB: */
+       { DRM_FORMAT_XRGB8888, ADE_XRGB_8888 },
+       { DRM_FORMAT_XBGR8888, ADE_XBGR_8888 },
+       { DRM_FORMAT_RGBA8888, ADE_RGBA_8888 },
+       { DRM_FORMAT_BGRA8888, ADE_BGRA_8888 },
+       { DRM_FORMAT_ARGB8888, ADE_ARGB_8888 },
+       { DRM_FORMAT_ABGR8888, ADE_ABGR_8888 },
+};
+
+static const u32 channel_formats1[] = {
+       /* channel 1,2,3,4 */
+       DRM_FORMAT_RGB565, DRM_FORMAT_BGR565, DRM_FORMAT_RGB888,
+       DRM_FORMAT_BGR888, DRM_FORMAT_XRGB8888, DRM_FORMAT_XBGR8888,
+       DRM_FORMAT_RGBA8888, DRM_FORMAT_BGRA8888, DRM_FORMAT_ARGB8888,
+       DRM_FORMAT_ABGR8888
+};
+
+u32 ade_get_channel_formats(u8 ch, const u32 **formats)
+{
+       switch (ch) {
+       case ADE_CH1:
+               *formats = channel_formats1;
+               return ARRAY_SIZE(channel_formats1);
+       default:
+               DRM_ERROR("no this channel %d\n", ch);
+               *formats = NULL;
+               return 0;
+       }
+}
+
+/* convert from fourcc format to ade format */
+static u32 ade_get_format(u32 pixel_format)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(ade_formats); i++)
+               if (ade_formats[i].pixel_format == pixel_format)
+                       return ade_formats[i].ade_format;
+
+       /* not found */
+       DRM_ERROR("Not found pixel format!!fourcc_format= %d\n",
+                 pixel_format);
+       return ADE_FORMAT_UNSUPPORT;
+}
+
+static void ade_update_reload_bit(void __iomem *base, u32 bit_num, u32 val)
+{
+       u32 bit_ofst, reg_num;
+
+       bit_ofst = bit_num % 32;
+       reg_num = bit_num / 32;
+
+       ade_update_bits(base + ADE_RELOAD_DIS(reg_num), bit_ofst,
+                       MASK(1), !!val);
+}
+
+static u32 ade_read_reload_bit(void __iomem *base, u32 bit_num)
+{
+       u32 tmp, bit_ofst, reg_num;
+
+       bit_ofst = bit_num % 32;
+       reg_num = bit_num / 32;
+
+       tmp = readl(base + ADE_RELOAD_DIS(reg_num));
+       return !!(BIT(bit_ofst) & tmp);
+}
+
+static void ade_init(struct ade_hw_ctx *ctx)
+{
+       void __iomem *base = ctx->base;
+
+       /* enable clk gate */
+       ade_update_bits(base + ADE_CTRL1, AUTO_CLK_GATE_EN_OFST,
+                       AUTO_CLK_GATE_EN, ADE_ENABLE);
+       /* clear overlay */
+       writel(0, base + ADE_OVLY1_TRANS_CFG);
+       writel(0, base + ADE_OVLY_CTL);
+       writel(0, base + ADE_OVLYX_CTL(OUT_OVLY));
+       /* clear reset and reload regs */
+       writel(MASK(32), base + ADE_SOFT_RST_SEL(0));
+       writel(MASK(32), base + ADE_SOFT_RST_SEL(1));
+       writel(MASK(32), base + ADE_RELOAD_DIS(0));
+       writel(MASK(32), base + ADE_RELOAD_DIS(1));
+       /*
+        * for video mode, all the ade registers should
+        * become effective at frame end.
+        */
+       ade_update_bits(base + ADE_CTRL, FRM_END_START_OFST,
+                       FRM_END_START_MASK, REG_EFFECTIVE_IN_ADEEN_FRMEND);
+}
+
+static void ade_set_pix_clk(struct ade_hw_ctx *ctx,
+                           struct drm_display_mode *mode,
+                           struct drm_display_mode *adj_mode)
+{
+       u32 clk_Hz = mode->clock * 1000;
+       int ret;
+
+       /*
+        * Success should be guaranteed in mode_valid call back,
+        * so failure shouldn't happen here
+        */
+       ret = clk_set_rate(ctx->ade_pix_clk, clk_Hz);
+       if (ret)
+               DRM_ERROR("failed to set pixel clk %dHz (%d)\n", clk_Hz, ret);
+       adj_mode->clock = clk_get_rate(ctx->ade_pix_clk) / 1000;
+}
+
+static void ade_ldi_set_mode(struct ade_crtc *acrtc,
+                            struct drm_display_mode *mode,
+                            struct drm_display_mode *adj_mode)
+{
+       struct ade_hw_ctx *ctx = acrtc->ctx;
+       void __iomem *base = ctx->base;
+       u32 width = mode->hdisplay;
+       u32 height = mode->vdisplay;
+       u32 hfp, hbp, hsw, vfp, vbp, vsw;
+       u32 plr_flags;
+
+       plr_flags = (mode->flags & DRM_MODE_FLAG_NVSYNC) ? FLAG_NVSYNC : 0;
+       plr_flags |= (mode->flags & DRM_MODE_FLAG_NHSYNC) ? FLAG_NHSYNC : 0;
+       hfp = mode->hsync_start - mode->hdisplay;
+       hbp = mode->htotal - mode->hsync_end;
+       hsw = mode->hsync_end - mode->hsync_start;
+       vfp = mode->vsync_start - mode->vdisplay;
+       vbp = mode->vtotal - mode->vsync_end;
+       vsw = mode->vsync_end - mode->vsync_start;
+       if (vsw > 15) {
+               DRM_DEBUG_DRIVER("vsw exceeded 15\n");
+               vsw = 15;
+       }
+
+       writel((hbp << HBP_OFST) | hfp, base + LDI_HRZ_CTRL0);
+        /* the configured value is actual value - 1 */
+       writel(hsw - 1, base + LDI_HRZ_CTRL1);
+       writel((vbp << VBP_OFST) | vfp, base + LDI_VRT_CTRL0);
+        /* the configured value is actual value - 1 */
+       writel(vsw - 1, base + LDI_VRT_CTRL1);
+        /* the configured value is actual value - 1 */
+       writel(((height - 1) << VSIZE_OFST) | (width - 1),
+              base + LDI_DSP_SIZE);
+       writel(plr_flags, base + LDI_PLR_CTRL);
+
+       /* set overlay compositor output size */
+       writel(((width - 1) << OUTPUT_XSIZE_OFST) | (height - 1),
+              base + ADE_OVLY_OUTPUT_SIZE(OUT_OVLY));
+
+       /* ctran6 setting */
+       writel(CTRAN_BYPASS_ON, base + ADE_CTRAN_DIS(ADE_CTRAN6));
+        /* the configured value is actual value - 1 */
+       writel(width * height - 1, base + ADE_CTRAN_IMAGE_SIZE(ADE_CTRAN6));
+       ade_update_reload_bit(base, CTRAN_OFST + ADE_CTRAN6, 0);
+
+       ade_set_pix_clk(ctx, mode, adj_mode);
+
+       DRM_DEBUG_DRIVER("set mode: %dx%d\n", width, height);
+}
+
+static int ade_power_up(struct ade_hw_ctx *ctx)
+{
+       int ret;
+
+       ret = clk_prepare_enable(ctx->media_noc_clk);
+       if (ret) {
+               DRM_ERROR("failed to enable media_noc_clk (%d)\n", ret);
+               return ret;
+       }
+
+       ret = reset_control_deassert(ctx->reset);
+       if (ret) {
+               DRM_ERROR("failed to deassert reset\n");
+               return ret;
+       }
+
+       ret = clk_prepare_enable(ctx->ade_core_clk);
+       if (ret) {
+               DRM_ERROR("failed to enable ade_core_clk (%d)\n", ret);
+               return ret;
+       }
+
+       ade_init(ctx);
+       ctx->power_on = true;
+       return 0;
+}
+
+static void ade_power_down(struct ade_hw_ctx *ctx)
+{
+       void __iomem *base = ctx->base;
+
+       writel(ADE_DISABLE, base + LDI_CTRL);
+       /* dsi pixel off */
+       writel(DSI_PCLK_OFF, base + LDI_HDMI_DSI_GT);
+
+       clk_disable_unprepare(ctx->ade_core_clk);
+       reset_control_assert(ctx->reset);
+       clk_disable_unprepare(ctx->media_noc_clk);
+       ctx->power_on = false;
+}
+
+static void ade_set_medianoc_qos(struct ade_crtc *acrtc)
+{
+       struct ade_hw_ctx *ctx = acrtc->ctx;
+       struct regmap *map = ctx->noc_regmap;
+
+       regmap_update_bits(map, ADE0_QOSGENERATOR_MODE,
+                          QOSGENERATOR_MODE_MASK, BYPASS_MODE);
+       regmap_update_bits(map, ADE0_QOSGENERATOR_EXTCONTROL,
+                          SOCKET_QOS_EN, SOCKET_QOS_EN);
+
+       regmap_update_bits(map, ADE1_QOSGENERATOR_MODE,
+                          QOSGENERATOR_MODE_MASK, BYPASS_MODE);
+       regmap_update_bits(map, ADE1_QOSGENERATOR_EXTCONTROL,
+                          SOCKET_QOS_EN, SOCKET_QOS_EN);
+}
+
+static int ade_enable_vblank(struct drm_device *dev, unsigned int pipe)
+{
+       struct kirin_drm_private *priv = dev->dev_private;
+       struct ade_crtc *acrtc = to_ade_crtc(priv->crtc[pipe]);
+       struct ade_hw_ctx *ctx = acrtc->ctx;
+       void __iomem *base = ctx->base;
+
+       if (!ctx->power_on)
+               (void)ade_power_up(ctx);
+
+       ade_update_bits(base + LDI_INT_EN, FRAME_END_INT_EN_OFST,
+                       MASK(1), 1);
+
+       return 0;
+}
+
+static void ade_disable_vblank(struct drm_device *dev, unsigned int pipe)
+{
+       struct kirin_drm_private *priv = dev->dev_private;
+       struct ade_crtc *acrtc = to_ade_crtc(priv->crtc[pipe]);
+       struct ade_hw_ctx *ctx = acrtc->ctx;
+       void __iomem *base = ctx->base;
+
+       if (!ctx->power_on) {
+               DRM_ERROR("power is down! vblank disable fail\n");
+               return;
+       }
+
+       ade_update_bits(base + LDI_INT_EN, FRAME_END_INT_EN_OFST,
+                       MASK(1), 0);
+}
+
+static irqreturn_t ade_irq_handler(int irq, void *data)
+{
+       struct ade_crtc *acrtc = data;
+       struct ade_hw_ctx *ctx = acrtc->ctx;
+       struct drm_crtc *crtc = &acrtc->base;
+       void __iomem *base = ctx->base;
+       u32 status;
+
+       status = readl(base + LDI_MSK_INT);
+       DRM_DEBUG_VBL("LDI IRQ: status=0x%X\n", status);
+
+       /* vblank irq */
+       if (status & BIT(FRAME_END_INT_EN_OFST)) {
+               ade_update_bits(base + LDI_INT_CLR, FRAME_END_INT_EN_OFST,
+                               MASK(1), 1);
+               drm_crtc_handle_vblank(crtc);
+       }
+
+       return IRQ_HANDLED;
+}
+
+static void ade_display_enable(struct ade_crtc *acrtc)
+{
+       struct ade_hw_ctx *ctx = acrtc->ctx;
+       void __iomem *base = ctx->base;
+       u32 out_fmt = acrtc->out_format;
+
+       /* enable output overlay compositor */
+       writel(ADE_ENABLE, base + ADE_OVLYX_CTL(OUT_OVLY));
+       ade_update_reload_bit(base, OVLY_OFST + OUT_OVLY, 0);
+
+       /* display source setting */
+       writel(DISP_SRC_OVLY2, base + ADE_DISP_SRC_CFG);
+
+       /* enable ade */
+       writel(ADE_ENABLE, base + ADE_EN);
+       /* enable ldi */
+       writel(NORMAL_MODE, base + LDI_WORK_MODE);
+       writel((out_fmt << BPP_OFST) | DATA_GATE_EN | LDI_EN,
+              base + LDI_CTRL);
+       /* dsi pixel on */
+       writel(DSI_PCLK_ON, base + LDI_HDMI_DSI_GT);
+}
+
+#if ADE_DEBUG
+static void ade_rdma_dump_regs(void __iomem *base, u32 ch)
+{
+       u32 reg_ctrl, reg_addr, reg_size, reg_stride, reg_space, reg_en;
+       u32 val;
+
+       reg_ctrl = RD_CH_CTRL(ch);
+       reg_addr = RD_CH_ADDR(ch);
+       reg_size = RD_CH_SIZE(ch);
+       reg_stride = RD_CH_STRIDE(ch);
+       reg_space = RD_CH_SPACE(ch);
+       reg_en = RD_CH_EN(ch);
+
+       val = ade_read_reload_bit(base, RDMA_OFST + ch);
+       DRM_DEBUG_DRIVER("[rdma%d]: reload(%d)\n", ch + 1, val);
+       val = readl(base + reg_ctrl);
+       DRM_DEBUG_DRIVER("[rdma%d]: reg_ctrl(0x%08x)\n", ch + 1, val);
+       val = readl(base + reg_addr);
+       DRM_DEBUG_DRIVER("[rdma%d]: reg_addr(0x%08x)\n", ch + 1, val);
+       val = readl(base + reg_size);
+       DRM_DEBUG_DRIVER("[rdma%d]: reg_size(0x%08x)\n", ch + 1, val);
+       val = readl(base + reg_stride);
+       DRM_DEBUG_DRIVER("[rdma%d]: reg_stride(0x%08x)\n", ch + 1, val);
+       val = readl(base + reg_space);
+       DRM_DEBUG_DRIVER("[rdma%d]: reg_space(0x%08x)\n", ch + 1, val);
+       val = readl(base + reg_en);
+       DRM_DEBUG_DRIVER("[rdma%d]: reg_en(0x%08x)\n", ch + 1, val);
+}
+
+static void ade_clip_dump_regs(void __iomem *base, u32 ch)
+{
+       u32 val;
+
+       val = ade_read_reload_bit(base, CLIP_OFST + ch);
+       DRM_DEBUG_DRIVER("[clip%d]: reload(%d)\n", ch + 1, val);
+       val = readl(base + ADE_CLIP_DISABLE(ch));
+       DRM_DEBUG_DRIVER("[clip%d]: reg_clip_disable(0x%08x)\n", ch + 1, val);
+       val = readl(base + ADE_CLIP_SIZE0(ch));
+       DRM_DEBUG_DRIVER("[clip%d]: reg_clip_size0(0x%08x)\n", ch + 1, val);
+       val = readl(base + ADE_CLIP_SIZE1(ch));
+       DRM_DEBUG_DRIVER("[clip%d]: reg_clip_size1(0x%08x)\n", ch + 1, val);
+}
+
+static void ade_compositor_routing_dump_regs(void __iomem *base, u32 ch)
+{
+       u8 ovly_ch = 0; /* TODO: Only primary plane now */
+       u32 val;
+
+       val = readl(base + ADE_OVLY_CH_XY0(ovly_ch));
+       DRM_DEBUG_DRIVER("[overlay ch%d]: reg_ch_xy0(0x%08x)\n", ovly_ch, val);
+       val = readl(base + ADE_OVLY_CH_XY1(ovly_ch));
+       DRM_DEBUG_DRIVER("[overlay ch%d]: reg_ch_xy1(0x%08x)\n", ovly_ch, val);
+       val = readl(base + ADE_OVLY_CH_CTL(ovly_ch));
+       DRM_DEBUG_DRIVER("[overlay ch%d]: reg_ch_ctl(0x%08x)\n", ovly_ch, val);
+}
+
+static void ade_dump_overlay_compositor_regs(void __iomem *base, u32 comp)
+{
+       u32 val;
+
+       val = ade_read_reload_bit(base, OVLY_OFST + comp);
+       DRM_DEBUG_DRIVER("[overlay%d]: reload(%d)\n", comp + 1, val);
+       writel(ADE_ENABLE, base + ADE_OVLYX_CTL(comp));
+       DRM_DEBUG_DRIVER("[overlay%d]: reg_ctl(0x%08x)\n", comp + 1, val);
+       val = readl(base + ADE_OVLY_CTL);
+       DRM_DEBUG_DRIVER("ovly_ctl(0x%08x)\n", val);
+}
+
+static void ade_dump_regs(void __iomem *base)
+{
+       u32 i;
+
+       /* dump channel regs */
+       for (i = 0; i < ADE_CH_NUM; i++) {
+               /* dump rdma regs */
+               ade_rdma_dump_regs(base, i);
+
+               /* dump clip regs */
+               ade_clip_dump_regs(base, i);
+
+               /* dump compositor routing regs */
+               ade_compositor_routing_dump_regs(base, i);
+       }
+
+       /* dump overlay compositor regs */
+       ade_dump_overlay_compositor_regs(base, OUT_OVLY);
+}
+#else
+static void ade_dump_regs(void __iomem *base) { }
+#endif
+
+static void ade_crtc_enable(struct drm_crtc *crtc)
+{
+       struct ade_crtc *acrtc = to_ade_crtc(crtc);
+       struct ade_hw_ctx *ctx = acrtc->ctx;
+       int ret;
+
+       if (acrtc->enable)
+               return;
+
+       if (!ctx->power_on) {
+               ret = ade_power_up(ctx);
+               if (ret)
+                       return;
+       }
+
+       ade_set_medianoc_qos(acrtc);
+       ade_display_enable(acrtc);
+       ade_dump_regs(ctx->base);
+       acrtc->enable = true;
+}
+
+static void ade_crtc_disable(struct drm_crtc *crtc)
+{
+       struct ade_crtc *acrtc = to_ade_crtc(crtc);
+       struct ade_hw_ctx *ctx = acrtc->ctx;
+
+       if (!acrtc->enable)
+               return;
+
+       ade_power_down(ctx);
+       acrtc->enable = false;
+}
+
+static int ade_crtc_atomic_check(struct drm_crtc *crtc,
+                                struct drm_crtc_state *state)
+{
+       /* do nothing */
+       return 0;
+}
+
+static void ade_crtc_mode_set_nofb(struct drm_crtc *crtc)
+{
+       struct ade_crtc *acrtc = to_ade_crtc(crtc);
+       struct ade_hw_ctx *ctx = acrtc->ctx;
+       struct drm_display_mode *mode = &crtc->state->mode;
+       struct drm_display_mode *adj_mode = &crtc->state->adjusted_mode;
+
+       if (!ctx->power_on)
+               (void)ade_power_up(ctx);
+       ade_ldi_set_mode(acrtc, mode, adj_mode);
+}
+
+static void ade_crtc_atomic_begin(struct drm_crtc *crtc,
+                                 struct drm_crtc_state *old_state)
+{
+       struct ade_crtc *acrtc = to_ade_crtc(crtc);
+       struct ade_hw_ctx *ctx = acrtc->ctx;
+
+       if (!ctx->power_on)
+               (void)ade_power_up(ctx);
+}
+
+static void ade_crtc_atomic_flush(struct drm_crtc *crtc,
+                                 struct drm_crtc_state *old_state)
+
+{
+       struct ade_crtc *acrtc = to_ade_crtc(crtc);
+       struct ade_hw_ctx *ctx = acrtc->ctx;
+       void __iomem *base = ctx->base;
+
+       /* only crtc is enabled regs take effect */
+       if (acrtc->enable) {
+               ade_dump_regs(base);
+               /* flush ade registers */
+               writel(ADE_ENABLE, base + ADE_EN);
+       }
+}
+
+static const struct drm_crtc_helper_funcs ade_crtc_helper_funcs = {
+       .enable         = ade_crtc_enable,
+       .disable        = ade_crtc_disable,
+       .atomic_check   = ade_crtc_atomic_check,
+       .mode_set_nofb  = ade_crtc_mode_set_nofb,
+       .atomic_begin   = ade_crtc_atomic_begin,
+       .atomic_flush   = ade_crtc_atomic_flush,
+};
+
+static const struct drm_crtc_funcs ade_crtc_funcs = {
+       .destroy        = drm_crtc_cleanup,
+       .set_config     = drm_atomic_helper_set_config,
+       .page_flip      = drm_atomic_helper_page_flip,
+       .reset          = drm_atomic_helper_crtc_reset,
+       .set_property = drm_atomic_helper_crtc_set_property,
+       .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+       .atomic_destroy_state   = drm_atomic_helper_crtc_destroy_state,
+};
+
+static int ade_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
+                        struct drm_plane *plane)
+{
+       struct kirin_drm_private *priv = dev->dev_private;
+       struct device_node *port;
+       int ret;
+
+       /* set crtc port so that
+        * drm_of_find_possible_crtcs call works
+        */
+       port = of_get_child_by_name(dev->dev->of_node, "port");
+       if (!port) {
+               DRM_ERROR("no port node found in %s\n",
+                         dev->dev->of_node->full_name);
+               return -EINVAL;
+       }
+       of_node_put(port);
+       crtc->port = port;
+
+       ret = drm_crtc_init_with_planes(dev, crtc, plane, NULL,
+                                       &ade_crtc_funcs, NULL);
+       if (ret) {
+               DRM_ERROR("failed to init crtc.\n");
+               return ret;
+       }
+
+       drm_crtc_helper_add(crtc, &ade_crtc_helper_funcs);
+       priv->crtc[drm_crtc_index(crtc)] = crtc;
+
+       return 0;
+}
+
+static void ade_rdma_set(void __iomem *base, struct drm_framebuffer *fb,
+                        u32 ch, u32 y, u32 in_h, u32 fmt)
+{
+       struct drm_gem_cma_object *obj = drm_fb_cma_get_gem_obj(fb, 0);
+       u32 reg_ctrl, reg_addr, reg_size, reg_stride, reg_space, reg_en;
+       u32 stride = fb->pitches[0];
+       u32 addr = (u32)obj->paddr + y * stride;
+
+       DRM_DEBUG_DRIVER("rdma%d: (y=%d, height=%d), stride=%d, paddr=0x%x\n",
+                        ch + 1, y, in_h, stride, (u32)obj->paddr);
+       DRM_DEBUG_DRIVER("addr=0x%x, fb:%dx%d, pixel_format=%d(%s)\n",
+                        addr, fb->width, fb->height, fmt,
+                        drm_get_format_name(fb->pixel_format));
+
+       /* get reg offset */
+       reg_ctrl = RD_CH_CTRL(ch);
+       reg_addr = RD_CH_ADDR(ch);
+       reg_size = RD_CH_SIZE(ch);
+       reg_stride = RD_CH_STRIDE(ch);
+       reg_space = RD_CH_SPACE(ch);
+       reg_en = RD_CH_EN(ch);
+
+       /*
+        * TODO: set rotation
+        */
+       writel((fmt << 16) & 0x1f0000, base + reg_ctrl);
+       writel(addr, base + reg_addr);
+       writel((in_h << 16) | stride, base + reg_size);
+       writel(stride, base + reg_stride);
+       writel(in_h * stride, base + reg_space);
+       writel(ADE_ENABLE, base + reg_en);
+       ade_update_reload_bit(base, RDMA_OFST + ch, 0);
+}
+
+static void ade_rdma_disable(void __iomem *base, u32 ch)
+{
+       u32 reg_en;
+
+       /* get reg offset */
+       reg_en = RD_CH_EN(ch);
+       writel(0, base + reg_en);
+       ade_update_reload_bit(base, RDMA_OFST + ch, 1);
+}
+
+static void ade_clip_set(void __iomem *base, u32 ch, u32 fb_w, u32 x,
+                        u32 in_w, u32 in_h)
+{
+       u32 disable_val;
+       u32 clip_left;
+       u32 clip_right;
+
+       /*
+        * clip width, no need to clip height
+        */
+       if (fb_w == in_w) { /* bypass */
+               disable_val = 1;
+               clip_left = 0;
+               clip_right = 0;
+       } else {
+               disable_val = 0;
+               clip_left = x;
+               clip_right = fb_w - (x + in_w) - 1;
+       }
+
+       DRM_DEBUG_DRIVER("clip%d: clip_left=%d, clip_right=%d\n",
+                        ch + 1, clip_left, clip_right);
+
+       writel(disable_val, base + ADE_CLIP_DISABLE(ch));
+       writel((fb_w - 1) << 16 | (in_h - 1), base + ADE_CLIP_SIZE0(ch));
+       writel(clip_left << 16 | clip_right, base + ADE_CLIP_SIZE1(ch));
+       ade_update_reload_bit(base, CLIP_OFST + ch, 0);
+}
+
+static void ade_clip_disable(void __iomem *base, u32 ch)
+{
+       writel(1, base + ADE_CLIP_DISABLE(ch));
+       ade_update_reload_bit(base, CLIP_OFST + ch, 1);
+}
+
+static bool has_Alpha_channel(int format)
+{
+       switch (format) {
+       case ADE_ARGB_8888:
+       case ADE_ABGR_8888:
+       case ADE_RGBA_8888:
+       case ADE_BGRA_8888:
+               return true;
+       default:
+               return false;
+       }
+}
+
+static void ade_get_blending_params(u32 fmt, u8 glb_alpha, u8 *alp_mode,
+                                   u8 *alp_sel, u8 *under_alp_sel)
+{
+       bool has_alpha = has_Alpha_channel(fmt);
+
+       /*
+        * get alp_mode
+        */
+       if (has_alpha && glb_alpha < 255)
+               *alp_mode = ADE_ALP_PIXEL_AND_GLB;
+       else if (has_alpha)
+               *alp_mode = ADE_ALP_PIXEL;
+       else
+               *alp_mode = ADE_ALP_GLOBAL;
+
+       /*
+        * get alp sel
+        */
+       *alp_sel = ADE_ALP_MUL_COEFF_3; /* 1 */
+       *under_alp_sel = ADE_ALP_MUL_COEFF_2; /* 0 */
+}
+
+static void ade_compositor_routing_set(void __iomem *base, u8 ch,
+                                      u32 x0, u32 y0,
+                                      u32 in_w, u32 in_h, u32 fmt)
+{
+       u8 ovly_ch = 0; /* TODO: This is the zpos, only one plane now */
+       u8 glb_alpha = 255;
+       u32 x1 = x0 + in_w - 1;
+       u32 y1 = y0 + in_h - 1;
+       u32 val;
+       u8 alp_sel;
+       u8 under_alp_sel;
+       u8 alp_mode;
+
+       ade_get_blending_params(fmt, glb_alpha, &alp_mode, &alp_sel,
+                               &under_alp_sel);
+
+       /* overlay routing setting
+        */
+       writel(x0 << 16 | y0, base + ADE_OVLY_CH_XY0(ovly_ch));
+       writel(x1 << 16 | y1, base + ADE_OVLY_CH_XY1(ovly_ch));
+       val = (ch + 1) << CH_SEL_OFST | BIT(CH_EN_OFST) |
+               alp_sel << CH_ALP_SEL_OFST |
+               under_alp_sel << CH_UNDER_ALP_SEL_OFST |
+               glb_alpha << CH_ALP_GBL_OFST |
+               alp_mode << CH_ALP_MODE_OFST;
+       writel(val, base + ADE_OVLY_CH_CTL(ovly_ch));
+       /* connect this plane/channel to overlay2 compositor */
+       ade_update_bits(base + ADE_OVLY_CTL, CH_OVLY_SEL_OFST(ovly_ch),
+                       CH_OVLY_SEL_MASK, CH_OVLY_SEL_VAL(OUT_OVLY));
+}
+
+static void ade_compositor_routing_disable(void __iomem *base, u32 ch)
+{
+       u8 ovly_ch = 0; /* TODO: Only primary plane now */
+
+       /* disable this plane/channel */
+       ade_update_bits(base + ADE_OVLY_CH_CTL(ovly_ch), CH_EN_OFST,
+                       MASK(1), 0);
+       /* dis-connect this plane/channel of overlay2 compositor */
+       ade_update_bits(base + ADE_OVLY_CTL, CH_OVLY_SEL_OFST(ovly_ch),
+                       CH_OVLY_SEL_MASK, 0);
+}
+
+/*
+ * Typicaly, a channel looks like: DMA-->clip-->scale-->ctrans-->compositor
+ */
+static void ade_update_channel(struct ade_plane *aplane,
+                              struct drm_framebuffer *fb, int crtc_x,
+                              int crtc_y, unsigned int crtc_w,
+                              unsigned int crtc_h, u32 src_x,
+                              u32 src_y, u32 src_w, u32 src_h)
+{
+       struct ade_hw_ctx *ctx = aplane->ctx;
+       void __iomem *base = ctx->base;
+       u32 fmt = ade_get_format(fb->pixel_format);
+       u32 ch = aplane->ch;
+       u32 in_w;
+       u32 in_h;
+
+       DRM_DEBUG_DRIVER("channel%d: src:(%d, %d)-%dx%d, crtc:(%d, %d)-%dx%d",
+                        ch + 1, src_x, src_y, src_w, src_h,
+                        crtc_x, crtc_y, crtc_w, crtc_h);
+
+       /* 1) DMA setting */
+       in_w = src_w;
+       in_h = src_h;
+       ade_rdma_set(base, fb, ch, src_y, in_h, fmt);
+
+       /* 2) clip setting */
+       ade_clip_set(base, ch, fb->width, src_x, in_w, in_h);
+
+       /* 3) TODO: scale setting for overlay planes */
+
+       /* 4) TODO: ctran/csc setting for overlay planes */
+
+       /* 5) compositor routing setting */
+       ade_compositor_routing_set(base, ch, crtc_x, crtc_y, in_w, in_h, fmt);
+}
+
+static void ade_disable_channel(struct ade_plane *aplane)
+{
+       struct ade_hw_ctx *ctx = aplane->ctx;
+       void __iomem *base = ctx->base;
+       u32 ch = aplane->ch;
+
+       DRM_DEBUG_DRIVER("disable channel%d\n", ch + 1);
+
+       /* disable read DMA */
+       ade_rdma_disable(base, ch);
+
+       /* disable clip */
+       ade_clip_disable(base, ch);
+
+       /* disable compositor routing */
+       ade_compositor_routing_disable(base, ch);
+}
+
+static int ade_plane_prepare_fb(struct drm_plane *plane,
+                               const struct drm_plane_state *new_state)
+{
+       /* do nothing */
+       return 0;
+}
+
+static void ade_plane_cleanup_fb(struct drm_plane *plane,
+                                const struct drm_plane_state *old_state)
+{
+       /* do nothing */
+}
+
+static int ade_plane_atomic_check(struct drm_plane *plane,
+                                 struct drm_plane_state *state)
+{
+       struct drm_framebuffer *fb = state->fb;
+       struct drm_crtc *crtc = state->crtc;
+       struct drm_crtc_state *crtc_state;
+       u32 src_x = state->src_x >> 16;
+       u32 src_y = state->src_y >> 16;
+       u32 src_w = state->src_w >> 16;
+       u32 src_h = state->src_h >> 16;
+       int crtc_x = state->crtc_x;
+       int crtc_y = state->crtc_y;
+       u32 crtc_w = state->crtc_w;
+       u32 crtc_h = state->crtc_h;
+       u32 fmt;
+
+       if (!crtc || !fb)
+               return 0;
+
+       fmt = ade_get_format(fb->pixel_format);
+       if (fmt == ADE_FORMAT_UNSUPPORT)
+               return -EINVAL;
+
+       crtc_state = drm_atomic_get_crtc_state(state->state, crtc);
+       if (IS_ERR(crtc_state))
+               return PTR_ERR(crtc_state);
+
+       if (src_w != crtc_w || src_h != crtc_h) {
+               DRM_ERROR("Scale not support!!!\n");
+               return -EINVAL;
+       }
+
+       if (src_x + src_w > fb->width ||
+           src_y + src_h > fb->height)
+               return -EINVAL;
+
+       if (crtc_x < 0 || crtc_y < 0)
+               return -EINVAL;
+
+       if (crtc_x + crtc_w > crtc_state->adjusted_mode.hdisplay ||
+           crtc_y + crtc_h > crtc_state->adjusted_mode.vdisplay)
+               return -EINVAL;
+
+       return 0;
+}
+
+static void ade_plane_atomic_update(struct drm_plane *plane,
+                                   struct drm_plane_state *old_state)
+{
+       struct drm_plane_state  *state  = plane->state;
+       struct ade_plane *aplane = to_ade_plane(plane);
+
+       ade_update_channel(aplane, state->fb, state->crtc_x, state->crtc_y,
+                          state->crtc_w, state->crtc_h,
+                          state->src_x >> 16, state->src_y >> 16,
+                          state->src_w >> 16, state->src_h >> 16);
+}
+
+static void ade_plane_atomic_disable(struct drm_plane *plane,
+                                    struct drm_plane_state *old_state)
+{
+       struct ade_plane *aplane = to_ade_plane(plane);
+
+       ade_disable_channel(aplane);
+}
+
+static const struct drm_plane_helper_funcs ade_plane_helper_funcs = {
+       .prepare_fb = ade_plane_prepare_fb,
+       .cleanup_fb = ade_plane_cleanup_fb,
+       .atomic_check = ade_plane_atomic_check,
+       .atomic_update = ade_plane_atomic_update,
+       .atomic_disable = ade_plane_atomic_disable,
+};
+
+static struct drm_plane_funcs ade_plane_funcs = {
+       .update_plane   = drm_atomic_helper_update_plane,
+       .disable_plane  = drm_atomic_helper_disable_plane,
+       .set_property = drm_atomic_helper_plane_set_property,
+       .destroy = drm_plane_cleanup,
+       .reset = drm_atomic_helper_plane_reset,
+       .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+       .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+};
+
+static int ade_plane_init(struct drm_device *dev, struct ade_plane *aplane,
+                         enum drm_plane_type type)
+{
+       const u32 *fmts;
+       u32 fmts_cnt;
+       int ret = 0;
+
+       /* get  properties */
+       fmts_cnt = ade_get_channel_formats(aplane->ch, &fmts);
+       if (ret)
+               return ret;
+
+       ret = drm_universal_plane_init(dev, &aplane->base, 1, &ade_plane_funcs,
+                                      fmts, fmts_cnt, type, NULL);
+       if (ret) {
+               DRM_ERROR("fail to init plane, ch=%d\n", aplane->ch);
+               return ret;
+       }
+
+       drm_plane_helper_add(&aplane->base, &ade_plane_helper_funcs);
+
+       return 0;
+}
+
+static int ade_dts_parse(struct platform_device *pdev, struct ade_hw_ctx *ctx)
+{
+       struct resource *res;
+       struct device *dev = &pdev->dev;
+       struct device_node *np = pdev->dev.of_node;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       ctx->base = devm_ioremap_resource(dev, res);
+       if (IS_ERR(ctx->base)) {
+               DRM_ERROR("failed to remap ade io base\n");
+               return  PTR_ERR(ctx->base);
+       }
+
+       ctx->reset = devm_reset_control_get(dev, NULL);
+       if (IS_ERR(ctx->reset))
+               return PTR_ERR(ctx->reset);
+
+       ctx->noc_regmap =
+               syscon_regmap_lookup_by_phandle(np, "hisilicon,noc-syscon");
+       if (IS_ERR(ctx->noc_regmap)) {
+               DRM_ERROR("failed to get noc regmap\n");
+               return PTR_ERR(ctx->noc_regmap);
+       }
+
+       ctx->irq = platform_get_irq(pdev, 0);
+       if (ctx->irq < 0) {
+               DRM_ERROR("failed to get irq\n");
+               return -ENODEV;
+       }
+
+       ctx->ade_core_clk = devm_clk_get(dev, "clk_ade_core");
+       if (!ctx->ade_core_clk) {
+               DRM_ERROR("failed to parse clk ADE_CORE\n");
+               return -ENODEV;
+       }
+
+       ctx->media_noc_clk = devm_clk_get(dev, "clk_codec_jpeg");
+       if (!ctx->media_noc_clk) {
+               DRM_ERROR("failed to parse clk CODEC_JPEG\n");
+           return -ENODEV;
+       }
+
+       ctx->ade_pix_clk = devm_clk_get(dev, "clk_ade_pix");
+       if (!ctx->ade_pix_clk) {
+               DRM_ERROR("failed to parse clk ADE_PIX\n");
+           return -ENODEV;
+       }
+
+       return 0;
+}
+
+static int ade_drm_init(struct drm_device *dev)
+{
+       struct platform_device *pdev = dev->platformdev;
+       struct ade_data *ade;
+       struct ade_hw_ctx *ctx;
+       struct ade_crtc *acrtc;
+       struct ade_plane *aplane;
+       enum drm_plane_type type;
+       int ret;
+       int i;
+
+       ade = devm_kzalloc(dev->dev, sizeof(*ade), GFP_KERNEL);
+       if (!ade) {
+               DRM_ERROR("failed to alloc ade_data\n");
+               return -ENOMEM;
+       }
+       platform_set_drvdata(pdev, ade);
+
+       ctx = &ade->ctx;
+       acrtc = &ade->acrtc;
+       acrtc->ctx = ctx;
+       acrtc->out_format = LDI_OUT_RGB_888;
+
+       ret = ade_dts_parse(pdev, ctx);
+       if (ret)
+               return ret;
+
+       /*
+        * plane init
+        * TODO: Now only support primary plane, overlay planes
+        * need to do.
+        */
+       for (i = 0; i < ADE_CH_NUM; i++) {
+               aplane = &ade->aplane[i];
+               aplane->ch = i;
+               aplane->ctx = ctx;
+               type = i == PRIMARY_CH ? DRM_PLANE_TYPE_PRIMARY :
+                       DRM_PLANE_TYPE_OVERLAY;
+
+               ret = ade_plane_init(dev, aplane, type);
+               if (ret)
+                       return ret;
+       }
+
+       /* crtc init */
+       ret = ade_crtc_init(dev, &acrtc->base, &ade->aplane[PRIMARY_CH].base);
+       if (ret)
+               return ret;
+
+       /* vblank irq init */
+       ret = devm_request_irq(dev->dev, ctx->irq, ade_irq_handler,
+                              IRQF_SHARED, dev->driver->name, acrtc);
+       if (ret)
+               return ret;
+       dev->driver->get_vblank_counter = drm_vblank_no_hw_counter;
+       dev->driver->enable_vblank = ade_enable_vblank;
+       dev->driver->disable_vblank = ade_disable_vblank;
+
+       return 0;
+}
+
+static void ade_drm_cleanup(struct drm_device *dev)
+{
+       struct platform_device *pdev = dev->platformdev;
+       struct ade_data *ade = platform_get_drvdata(pdev);
+       struct drm_crtc *crtc = &ade->acrtc.base;
+
+       drm_crtc_cleanup(crtc);
+}
+
+const struct kirin_dc_ops ade_dc_ops = {
+       .init = ade_drm_init,
+       .cleanup = ade_drm_cleanup
+};
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
new file mode 100644 (file)
index 0000000..e102c9e
--- /dev/null
@@ -0,0 +1,367 @@
+/*
+ * Hisilicon Kirin SoCs drm master driver
+ *
+ * Copyright (c) 2016 Linaro Limited.
+ * Copyright (c) 2014-2016 Hisilicon Limited.
+ *
+ * Author:
+ *     Xinliang Liu <z.liuxinliang@hisilicon.com>
+ *     Xinliang Liu <xinliang.liu@linaro.org>
+ *     Xinwei Kong <kong.kongxinwei@hisilicon.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/of_platform.h>
+#include <linux/component.h>
+#include <linux/of_graph.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+
+#include "kirin_drm_drv.h"
+
+static struct kirin_dc_ops *dc_ops;
+
+static int kirin_drm_kms_cleanup(struct drm_device *dev)
+{
+       struct kirin_drm_private *priv = dev->dev_private;
+
+#ifdef CONFIG_DRM_FBDEV_EMULATION
+       if (priv->fbdev) {
+               drm_fbdev_cma_fini(priv->fbdev);
+               priv->fbdev = NULL;
+       }
+#endif
+       drm_kms_helper_poll_fini(dev);
+       drm_vblank_cleanup(dev);
+       dc_ops->cleanup(dev);
+       drm_mode_config_cleanup(dev);
+       devm_kfree(dev->dev, priv);
+       dev->dev_private = NULL;
+
+       return 0;
+}
+
+#ifdef CONFIG_DRM_FBDEV_EMULATION
+static void kirin_fbdev_output_poll_changed(struct drm_device *dev)
+{
+       struct kirin_drm_private *priv = dev->dev_private;
+
+       if (priv->fbdev) {
+               drm_fbdev_cma_hotplug_event(priv->fbdev);
+       } else {
+               priv->fbdev = drm_fbdev_cma_init(dev, 32,
+                               dev->mode_config.num_crtc,
+                               dev->mode_config.num_connector);
+               if (IS_ERR(priv->fbdev))
+                       priv->fbdev = NULL;
+       }
+}
+#endif
+
+static const struct drm_mode_config_funcs kirin_drm_mode_config_funcs = {
+       .fb_create = drm_fb_cma_create,
+#ifdef CONFIG_DRM_FBDEV_EMULATION
+       .output_poll_changed = kirin_fbdev_output_poll_changed,
+#endif
+       .atomic_check = drm_atomic_helper_check,
+       .atomic_commit = drm_atomic_helper_commit,
+};
+
+static void kirin_drm_mode_config_init(struct drm_device *dev)
+{
+       dev->mode_config.min_width = 0;
+       dev->mode_config.min_height = 0;
+
+       dev->mode_config.max_width = 2048;
+       dev->mode_config.max_height = 2048;
+
+       dev->mode_config.funcs = &kirin_drm_mode_config_funcs;
+}
+
+static int kirin_drm_kms_init(struct drm_device *dev)
+{
+       struct kirin_drm_private *priv;
+       int ret;
+
+       priv = devm_kzalloc(dev->dev, sizeof(*priv), GFP_KERNEL);
+       if (!priv)
+               return -ENOMEM;
+
+       dev->dev_private = priv;
+       dev_set_drvdata(dev->dev, dev);
+
+       /* dev->mode_config initialization */
+       drm_mode_config_init(dev);
+       kirin_drm_mode_config_init(dev);
+
+       /* display controller init */
+       ret = dc_ops->init(dev);
+       if (ret)
+               goto err_mode_config_cleanup;
+
+       /* bind and init sub drivers */
+       ret = component_bind_all(dev->dev, dev);
+       if (ret) {
+               DRM_ERROR("failed to bind all component.\n");
+               goto err_dc_cleanup;
+       }
+
+       /* vblank init */
+       ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
+       if (ret) {
+               DRM_ERROR("failed to initialize vblank.\n");
+               goto err_unbind_all;
+       }
+       /* with irq_enabled = true, we can use the vblank feature. */
+       dev->irq_enabled = true;
+
+       /* reset all the states of crtc/plane/encoder/connector */
+       drm_mode_config_reset(dev);
+
+       /* init kms poll for handling hpd */
+       drm_kms_helper_poll_init(dev);
+
+       /* force detection after connectors init */
+       (void)drm_helper_hpd_irq_event(dev);
+
+       return 0;
+
+err_unbind_all:
+       component_unbind_all(dev->dev, dev);
+err_dc_cleanup:
+       dc_ops->cleanup(dev);
+err_mode_config_cleanup:
+       drm_mode_config_cleanup(dev);
+       devm_kfree(dev->dev, priv);
+       dev->dev_private = NULL;
+
+       return ret;
+}
+
+static const struct file_operations kirin_drm_fops = {
+       .owner          = THIS_MODULE,
+       .open           = drm_open,
+       .release        = drm_release,
+       .unlocked_ioctl = drm_ioctl,
+#ifdef CONFIG_COMPAT
+       .compat_ioctl   = drm_compat_ioctl,
+#endif
+       .poll           = drm_poll,
+       .read           = drm_read,
+       .llseek         = no_llseek,
+       .mmap           = drm_gem_cma_mmap,
+};
+
+static int kirin_gem_cma_dumb_create(struct drm_file *file,
+                                    struct drm_device *dev,
+                                    struct drm_mode_create_dumb *args)
+{
+       return drm_gem_cma_dumb_create_internal(file, dev, args);
+}
+
+static struct drm_driver kirin_drm_driver = {
+       .driver_features        = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
+                                 DRIVER_ATOMIC | DRIVER_HAVE_IRQ,
+       .fops                   = &kirin_drm_fops,
+       .set_busid              = drm_platform_set_busid,
+
+       .gem_free_object        = drm_gem_cma_free_object,
+       .gem_vm_ops             = &drm_gem_cma_vm_ops,
+       .dumb_create            = kirin_gem_cma_dumb_create,
+       .dumb_map_offset        = drm_gem_cma_dumb_map_offset,
+       .dumb_destroy           = drm_gem_dumb_destroy,
+
+       .prime_handle_to_fd     = drm_gem_prime_handle_to_fd,
+       .prime_fd_to_handle     = drm_gem_prime_fd_to_handle,
+       .gem_prime_export       = drm_gem_prime_export,
+       .gem_prime_import       = drm_gem_prime_import,
+       .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
+       .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
+       .gem_prime_vmap         = drm_gem_cma_prime_vmap,
+       .gem_prime_vunmap       = drm_gem_cma_prime_vunmap,
+       .gem_prime_mmap         = drm_gem_cma_prime_mmap,
+
+       .name                   = "kirin",
+       .desc                   = "Hisilicon Kirin SoCs' DRM Driver",
+       .date                   = "20150718",
+       .major                  = 1,
+       .minor                  = 0,
+};
+
+static int compare_of(struct device *dev, void *data)
+{
+       return dev->of_node == data;
+}
+
+static int kirin_drm_connectors_register(struct drm_device *dev)
+{
+       struct drm_connector *connector;
+       struct drm_connector *failed_connector;
+       int ret;
+
+       mutex_lock(&dev->mode_config.mutex);
+       drm_for_each_connector(connector, dev) {
+               ret = drm_connector_register(connector);
+               if (ret) {
+                       failed_connector = connector;
+                       goto err;
+               }
+       }
+       mutex_unlock(&dev->mode_config.mutex);
+
+       return 0;
+
+err:
+       drm_for_each_connector(connector, dev) {
+               if (failed_connector == connector)
+                       break;
+               drm_connector_unregister(connector);
+       }
+       mutex_unlock(&dev->mode_config.mutex);
+
+       return ret;
+}
+
+static int kirin_drm_bind(struct device *dev)
+{
+       struct drm_driver *driver = &kirin_drm_driver;
+       struct drm_device *drm_dev;
+       int ret;
+
+       drm_dev = drm_dev_alloc(driver, dev);
+       if (!drm_dev)
+               return -ENOMEM;
+
+       drm_dev->platformdev = to_platform_device(dev);
+
+       ret = kirin_drm_kms_init(drm_dev);
+       if (ret)
+               goto err_drm_dev_unref;
+
+       ret = drm_dev_register(drm_dev, 0);
+       if (ret)
+               goto err_kms_cleanup;
+
+       /* connectors should be registered after drm device register */
+       ret = kirin_drm_connectors_register(drm_dev);
+       if (ret)
+               goto err_drm_dev_unregister;
+
+       DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
+                driver->name, driver->major, driver->minor, driver->patchlevel,
+                driver->date, drm_dev->primary->index);
+
+       return 0;
+
+err_drm_dev_unregister:
+       drm_dev_unregister(drm_dev);
+err_kms_cleanup:
+       kirin_drm_kms_cleanup(drm_dev);
+err_drm_dev_unref:
+       drm_dev_unref(drm_dev);
+
+       return ret;
+}
+
+static void kirin_drm_unbind(struct device *dev)
+{
+       drm_put_dev(dev_get_drvdata(dev));
+}
+
+static const struct component_master_ops kirin_drm_ops = {
+       .bind = kirin_drm_bind,
+       .unbind = kirin_drm_unbind,
+};
+
+static struct device_node *kirin_get_remote_node(struct device_node *np)
+{
+       struct device_node *endpoint, *remote;
+
+       /* get the first endpoint, in our case only one remote node
+        * is connected to display controller.
+        */
+       endpoint = of_graph_get_next_endpoint(np, NULL);
+       if (!endpoint) {
+               DRM_ERROR("no valid endpoint node\n");
+               return ERR_PTR(-ENODEV);
+       }
+       of_node_put(endpoint);
+
+       remote = of_graph_get_remote_port_parent(endpoint);
+       if (!remote) {
+               DRM_ERROR("no valid remote node\n");
+               return ERR_PTR(-ENODEV);
+       }
+       of_node_put(remote);
+
+       if (!of_device_is_available(remote)) {
+               DRM_ERROR("not available for remote node\n");
+               return ERR_PTR(-ENODEV);
+       }
+
+       return remote;
+}
+
+static int kirin_drm_platform_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct device_node *np = dev->of_node;
+       struct component_match *match = NULL;
+       struct device_node *remote;
+
+       dc_ops = (struct kirin_dc_ops *)of_device_get_match_data(dev);
+       if (!dc_ops) {
+               DRM_ERROR("failed to get dt id data\n");
+               return -EINVAL;
+       }
+
+       remote = kirin_get_remote_node(np);
+       if (IS_ERR(remote))
+               return PTR_ERR(remote);
+
+       component_match_add(dev, &match, compare_of, remote);
+
+       return component_master_add_with_match(dev, &kirin_drm_ops, match);
+
+       return 0;
+}
+
+static int kirin_drm_platform_remove(struct platform_device *pdev)
+{
+       component_master_del(&pdev->dev, &kirin_drm_ops);
+       dc_ops = NULL;
+       return 0;
+}
+
+static const struct of_device_id kirin_drm_dt_ids[] = {
+       { .compatible = "hisilicon,hi6220-ade",
+         .data = &ade_dc_ops,
+       },
+       { /* end node */ },
+};
+MODULE_DEVICE_TABLE(of, kirin_drm_dt_ids);
+
+static struct platform_driver kirin_drm_platform_driver = {
+       .probe = kirin_drm_platform_probe,
+       .remove = kirin_drm_platform_remove,
+       .driver = {
+               .name = "kirin-drm",
+               .of_match_table = kirin_drm_dt_ids,
+       },
+};
+
+module_platform_driver(kirin_drm_platform_driver);
+
+MODULE_AUTHOR("Xinliang Liu <xinliang.liu@linaro.org>");
+MODULE_AUTHOR("Xinliang Liu <z.liuxinliang@hisilicon.com>");
+MODULE_AUTHOR("Xinwei Kong <kong.kongxinwei@hisilicon.com>");
+MODULE_DESCRIPTION("hisilicon Kirin SoCs' DRM master driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.h b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.h
new file mode 100644 (file)
index 0000000..1a07caf
--- /dev/null
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2016 Linaro Limited.
+ * Copyright (c) 2014-2016 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __KIRIN_DRM_DRV_H__
+#define __KIRIN_DRM_DRV_H__
+
+#define MAX_CRTC       2
+
+/* display controller init/cleanup ops */
+struct kirin_dc_ops {
+       int (*init)(struct drm_device *dev);
+       void (*cleanup)(struct drm_device *dev);
+};
+
+struct kirin_drm_private {
+       struct drm_crtc *crtc[MAX_CRTC];
+#ifdef CONFIG_DRM_FBDEV_EMULATION
+       struct drm_fbdev_cma *fbdev;
+#endif
+};
+
+extern const struct kirin_dc_ops ade_dc_ops;
+
+#endif /* __KIRIN_DRM_DRV_H__ */
index 649a562..8f40410 100644 (file)
@@ -1,3 +1,20 @@
+config DRM_I915_WERROR
+        bool "Force GCC to throw an error instead of a warning when compiling"
+        # As this may inadvertently break the build, only allow the user
+        # to shoot oneself in the foot iff they aim really hard
+        depends on EXPERT
+        # We use the dependency on !COMPILE_TEST to not be enabled in
+        # allmodconfig or allyesconfig configurations
+        depends on !COMPILE_TEST
+        default n
+        help
+          Add -Werror to the build flags for (and only for) i915.ko.
+          Do not enable this unless you are writing code for the i915.ko module.
+
+          Recommended for driver developers only.
+
+          If in doubt, say "N".
+
 config DRM_I915_DEBUG
         bool "Enable additional driver debugging"
         depends on DRM_I915
@@ -10,3 +27,15 @@ config DRM_I915_DEBUG
 
           If in doubt, say "N".
 
+config DRM_I915_DEBUG_GEM
+        bool "Insert extra checks into the GEM internals"
+        default n
+        depends on DRM_I915_WERROR
+        help
+          Enable extra sanity checks (including BUGs) along the GEM driver
+          paths that may slow the system down and if hit hang the machine.
+
+          Recommended for driver developers only.
+
+          If in doubt, say "N".
+
index 7ffb51b..0b88ba0 100644 (file)
@@ -2,6 +2,8 @@
 # Makefile for the drm device driver.  This driver provides support for the
 # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
 
+subdir-ccflags-$(CONFIG_DRM_I915_WERROR) := -Werror
+
 # Please keep these build lists sorted!
 
 # core driver code
index 8593394..8b8d6f0 100644 (file)
@@ -89,27 +89,34 @@ static int i915_capabilities(struct seq_file *m, void *data)
        return 0;
 }
 
-static const char *get_pin_flag(struct drm_i915_gem_object *obj)
+static const char get_active_flag(struct drm_i915_gem_object *obj)
 {
-       if (obj->pin_display)
-               return "p";
-       else
-               return " ";
+       return obj->active ? '*' : ' ';
+}
+
+static const char get_pin_flag(struct drm_i915_gem_object *obj)
+{
+       return obj->pin_display ? 'p' : ' ';
 }
 
-static const char *get_tiling_flag(struct drm_i915_gem_object *obj)
+static const char get_tiling_flag(struct drm_i915_gem_object *obj)
 {
        switch (obj->tiling_mode) {
        default:
-       case I915_TILING_NONE: return " ";
-       case I915_TILING_X: return "X";
-       case I915_TILING_Y: return "Y";
+       case I915_TILING_NONE: return ' ';
+       case I915_TILING_X: return 'X';
+       case I915_TILING_Y: return 'Y';
        }
 }
 
-static inline const char *get_global_flag(struct drm_i915_gem_object *obj)
+static inline const char get_global_flag(struct drm_i915_gem_object *obj)
 {
-       return i915_gem_obj_to_ggtt(obj) ? "g" : " ";
+       return i915_gem_obj_to_ggtt(obj) ? 'g' : ' ';
+}
+
+static inline const char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
+{
+       return obj->mapping ? 'M' : ' ';
 }
 
 static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
@@ -136,12 +143,13 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
 
        lockdep_assert_held(&obj->base.dev->struct_mutex);
 
-       seq_printf(m, "%pK: %s%s%s%s %8zdKiB %02x %02x [ ",
+       seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x [ ",
                   &obj->base,
-                  obj->active ? "*" : " ",
+                  get_active_flag(obj),
                   get_pin_flag(obj),
                   get_tiling_flag(obj),
                   get_global_flag(obj),
+                  get_pin_mapped_flag(obj),
                   obj->base.size / 1024,
                   obj->base.read_domains,
                   obj->base.write_domain);
@@ -435,6 +443,8 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
        struct i915_ggtt *ggtt = &dev_priv->ggtt;
        u32 count, mappable_count, purgeable_count;
        u64 size, mappable_size, purgeable_size;
+       unsigned long pin_mapped_count = 0, pin_mapped_purgeable_count = 0;
+       u64 pin_mapped_size = 0, pin_mapped_purgeable_size = 0;
        struct drm_i915_gem_object *obj;
        struct drm_file *file;
        struct i915_vma *vma;
@@ -468,6 +478,14 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
                size += obj->base.size, ++count;
                if (obj->madv == I915_MADV_DONTNEED)
                        purgeable_size += obj->base.size, ++purgeable_count;
+               if (obj->mapping) {
+                       pin_mapped_count++;
+                       pin_mapped_size += obj->base.size;
+                       if (obj->pages_pin_count == 0) {
+                               pin_mapped_purgeable_count++;
+                               pin_mapped_purgeable_size += obj->base.size;
+                       }
+               }
        }
        seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
 
@@ -485,6 +503,14 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
                        purgeable_size += obj->base.size;
                        ++purgeable_count;
                }
+               if (obj->mapping) {
+                       pin_mapped_count++;
+                       pin_mapped_size += obj->base.size;
+                       if (obj->pages_pin_count == 0) {
+                               pin_mapped_purgeable_count++;
+                               pin_mapped_purgeable_size += obj->base.size;
+                       }
+               }
        }
        seq_printf(m, "%u purgeable objects, %llu bytes\n",
                   purgeable_count, purgeable_size);
@@ -492,12 +518,20 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
                   mappable_count, mappable_size);
        seq_printf(m, "%u fault mappable objects, %llu bytes\n",
                   count, size);
+       seq_printf(m,
+                  "%lu [%lu] pin mapped objects, %llu [%llu] bytes [purgeable]\n",
+                  pin_mapped_count, pin_mapped_purgeable_count,
+                  pin_mapped_size, pin_mapped_purgeable_size);
 
        seq_printf(m, "%llu [%llu] gtt total\n",
                   ggtt->base.total, ggtt->mappable_end - ggtt->base.start);
 
        seq_putc(m, '\n');
        print_batch_pool_stats(m, dev_priv);
+
+       mutex_unlock(&dev->struct_mutex);
+
+       mutex_lock(&dev->filelist_mutex);
        list_for_each_entry_reverse(file, &dev->filelist, lhead) {
                struct file_stats stats;
                struct task_struct *task;
@@ -518,8 +552,7 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
                print_file_stats(m, task ? task->comm : "<unknown>", stats);
                rcu_read_unlock();
        }
-
-       mutex_unlock(&dev->struct_mutex);
+       mutex_unlock(&dev->filelist_mutex);
 
        return 0;
 }
@@ -1216,12 +1249,12 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
                rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
 
                rpstat = I915_READ(GEN6_RPSTAT1);
-               rpupei = I915_READ(GEN6_RP_CUR_UP_EI);
-               rpcurup = I915_READ(GEN6_RP_CUR_UP);
-               rpprevup = I915_READ(GEN6_RP_PREV_UP);
-               rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI);
-               rpcurdown = I915_READ(GEN6_RP_CUR_DOWN);
-               rpprevdown = I915_READ(GEN6_RP_PREV_DOWN);
+               rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
+               rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
+               rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
+               rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
+               rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
+               rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
                if (IS_GEN9(dev))
                        cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
                else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
@@ -1261,21 +1294,21 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
                seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
                seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
                seq_printf(m, "CAGF: %dMHz\n", cagf);
-               seq_printf(m, "RP CUR UP EI: %dus\n", rpupei &
-                          GEN6_CURICONT_MASK);
-               seq_printf(m, "RP CUR UP: %dus\n", rpcurup &
-                          GEN6_CURBSYTAVG_MASK);
-               seq_printf(m, "RP PREV UP: %dus\n", rpprevup &
-                          GEN6_CURBSYTAVG_MASK);
+               seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
+                          rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei));
+               seq_printf(m, "RP CUR UP: %d (%dus)\n",
+                          rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
+               seq_printf(m, "RP PREV UP: %d (%dus)\n",
+                          rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
                seq_printf(m, "Up threshold: %d%%\n",
                           dev_priv->rps.up_threshold);
 
-               seq_printf(m, "RP CUR DOWN EI: %dus\n", rpdownei &
-                          GEN6_CURIAVG_MASK);
-               seq_printf(m, "RP CUR DOWN: %dus\n", rpcurdown &
-                          GEN6_CURBSYTAVG_MASK);
-               seq_printf(m, "RP PREV DOWN: %dus\n", rpprevdown &
-                          GEN6_CURBSYTAVG_MASK);
+               seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
+                          rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
+               seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
+                          rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
+               seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
+                          rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
                seq_printf(m, "Down threshold: %d%%\n",
                           dev_priv->rps.down_threshold);
 
@@ -1469,12 +1502,11 @@ static int i915_forcewake_domains(struct seq_file *m, void *data)
        struct drm_device *dev = node->minor->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_uncore_forcewake_domain *fw_domain;
-       int i;
 
        spin_lock_irq(&dev_priv->uncore.lock);
-       for_each_fw_domain(fw_domain, dev_priv, i) {
+       for_each_fw_domain(fw_domain, dev_priv) {
                seq_printf(m, "%s.wake_count = %u\n",
-                          intel_uncore_forcewake_domain_to_str(i),
+                          intel_uncore_forcewake_domain_to_str(fw_domain->id),
                           fw_domain->wake_count);
        }
        spin_unlock_irq(&dev_priv->uncore.lock);
@@ -2325,6 +2357,7 @@ static int i915_ppgtt_info(struct seq_file *m, void *data)
        else if (INTEL_INFO(dev)->gen >= 6)
                gen6_ppgtt_info(m, dev);
 
+       mutex_lock(&dev->filelist_mutex);
        list_for_each_entry_reverse(file, &dev->filelist, lhead) {
                struct drm_i915_file_private *file_priv = file->driver_priv;
                struct task_struct *task;
@@ -2339,6 +2372,7 @@ static int i915_ppgtt_info(struct seq_file *m, void *data)
                idr_for_each(&file_priv->context_idr, per_file_ctx,
                             (void *)(unsigned long)m);
        }
+       mutex_unlock(&dev->filelist_mutex);
 
 out_put:
        intel_runtime_pm_put(dev_priv);
@@ -2374,6 +2408,8 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
                   intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit),
                   intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit),
                   intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
+
+       mutex_lock(&dev->filelist_mutex);
        spin_lock(&dev_priv->rps.client_lock);
        list_for_each_entry_reverse(file, &dev->filelist, lhead) {
                struct drm_i915_file_private *file_priv = file->driver_priv;
@@ -2396,6 +2432,7 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
                   list_empty(&dev_priv->rps.mmioflips.link) ? "" : ", active");
        seq_printf(m, "Kernel boosts: %d\n", dev_priv->rps.boosts);
        spin_unlock(&dev_priv->rps.client_lock);
+       mutex_unlock(&dev->filelist_mutex);
 
        return 0;
 }
@@ -2405,10 +2442,11 @@ static int i915_llc(struct seq_file *m, void *data)
        struct drm_info_node *node = m->private;
        struct drm_device *dev = node->minor->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
+       const bool edram = INTEL_GEN(dev_priv) > 8;
 
-       /* Size calculation for LLC is a bit of a pain. Ignore for now. */
        seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev)));
-       seq_printf(m, "eLLC: %zuMB\n", dev_priv->ellc_size);
+       seq_printf(m, "%s: %lluMB\n", edram ? "eDRAM" : "eLLC",
+                  intel_uncore_edram_size(dev_priv)/1024/1024);
 
        return 0;
 }
@@ -4723,7 +4761,7 @@ i915_wedged_get(void *data, u64 *val)
        struct drm_device *dev = data;
        struct drm_i915_private *dev_priv = dev->dev_private;
 
-       *val = atomic_read(&dev_priv->gpu_error.reset_counter);
+       *val = i915_terminally_wedged(&dev_priv->gpu_error);
 
        return 0;
 }
index b377753..5c76150 100644 (file)
@@ -257,13 +257,6 @@ static int i915_get_bridge_dev(struct drm_device *dev)
        return 0;
 }
 
-#define MCHBAR_I915 0x44
-#define MCHBAR_I965 0x48
-#define MCHBAR_SIZE (4*4096)
-
-#define DEVEN_REG 0x54
-#define   DEVEN_MCHBAR_EN (1 << 28)
-
 /* Allocate space for the MCH regs if needed, return nonzero on error */
 static int
 intel_alloc_mchbar_resource(struct drm_device *dev)
@@ -325,7 +318,7 @@ intel_setup_mchbar(struct drm_device *dev)
        dev_priv->mchbar_need_disable = false;
 
        if (IS_I915G(dev) || IS_I915GM(dev)) {
-               pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
+               pci_read_config_dword(dev_priv->bridge_dev, DEVEN, &temp);
                enabled = !!(temp & DEVEN_MCHBAR_EN);
        } else {
                pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
@@ -343,7 +336,7 @@ intel_setup_mchbar(struct drm_device *dev)
 
        /* Space is allocated or reserved, so enable it. */
        if (IS_I915G(dev) || IS_I915GM(dev)) {
-               pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG,
+               pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
                                       temp | DEVEN_MCHBAR_EN);
        } else {
                pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
@@ -356,17 +349,24 @@ intel_teardown_mchbar(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
-       u32 temp;
 
        if (dev_priv->mchbar_need_disable) {
                if (IS_I915G(dev) || IS_I915GM(dev)) {
-                       pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
-                       temp &= ~DEVEN_MCHBAR_EN;
-                       pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, temp);
+                       u32 deven_val;
+
+                       pci_read_config_dword(dev_priv->bridge_dev, DEVEN,
+                                             &deven_val);
+                       deven_val &= ~DEVEN_MCHBAR_EN;
+                       pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
+                                              deven_val);
                } else {
-                       pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
-                       temp &= ~1;
-                       pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp);
+                       u32 mchbar_val;
+
+                       pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg,
+                                             &mchbar_val);
+                       mchbar_val &= ~1;
+                       pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg,
+                                              mchbar_val);
                }
        }
 
index 29b4e79..d37c0a6 100644 (file)
@@ -567,10 +567,9 @@ static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
        drm_modeset_unlock_all(dev);
 }
 
-static int intel_suspend_complete(struct drm_i915_private *dev_priv);
 static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
                              bool rpm_resume);
-static int bxt_resume_prepare(struct drm_i915_private *dev_priv);
+static int vlv_suspend_complete(struct drm_i915_private *dev_priv);
 
 static bool suspend_to_idle(struct drm_i915_private *dev_priv)
 {
@@ -640,8 +639,7 @@ static int i915_drm_suspend(struct drm_device *dev)
 
        intel_display_set_init_power(dev_priv, false);
 
-       if (HAS_CSR(dev_priv))
-               flush_work(&dev_priv->csr.work);
+       intel_csr_ucode_suspend(dev_priv);
 
 out:
        enable_rpm_wakeref_asserts(dev_priv);
@@ -657,7 +655,8 @@ static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation)
 
        disable_rpm_wakeref_asserts(dev_priv);
 
-       fw_csr = suspend_to_idle(dev_priv) && dev_priv->csr.dmc_payload;
+       fw_csr = !IS_BROXTON(dev_priv) &&
+               suspend_to_idle(dev_priv) && dev_priv->csr.dmc_payload;
        /*
         * In case of firmware assisted context save/restore don't manually
         * deinit the power domains. This also means the CSR/DMC firmware will
@@ -668,7 +667,13 @@ static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation)
        if (!fw_csr)
                intel_power_domains_suspend(dev_priv);
 
-       ret = intel_suspend_complete(dev_priv);
+       ret = 0;
+       if (IS_BROXTON(dev_priv))
+               bxt_enable_dc9(dev_priv);
+       else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+               hsw_enable_pc8(dev_priv);
+       else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+               ret = vlv_suspend_complete(dev_priv);
 
        if (ret) {
                DRM_ERROR("Suspend complete failed: %d\n", ret);
@@ -732,6 +737,8 @@ static int i915_drm_resume(struct drm_device *dev)
 
        disable_rpm_wakeref_asserts(dev_priv);
 
+       intel_csr_ucode_resume(dev_priv);
+
        mutex_lock(&dev->struct_mutex);
        i915_gem_restore_gtt_mappings(dev);
        mutex_unlock(&dev->struct_mutex);
@@ -802,7 +809,7 @@ static int i915_drm_resume(struct drm_device *dev)
 static int i915_drm_resume_early(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       int ret = 0;
+       int ret;
 
        /*
         * We have a resume ordering issue with the snd-hda driver also
@@ -813,6 +820,36 @@ static int i915_drm_resume_early(struct drm_device *dev)
         * FIXME: This should be solved with a special hdmi sink device or
         * similar so that power domains can be employed.
         */
+
+       /*
+        * Note that we need to set the power state explicitly, since we
+        * powered off the device during freeze and the PCI core won't power
+        * it back up for us during thaw. Powering off the device during
+        * freeze is not a hard requirement though, and during the
+        * suspend/resume phases the PCI core makes sure we get here with the
+        * device powered on. So in case we change our freeze logic and keep
+        * the device powered we can also remove the following set power state
+        * call.
+        */
+       ret = pci_set_power_state(dev->pdev, PCI_D0);
+       if (ret) {
+               DRM_ERROR("failed to set PCI D0 power state (%d)\n", ret);
+               goto out;
+       }
+
+       /*
+        * Note that pci_enable_device() first enables any parent bridge
+        * device and only then sets the power state for this device. The
+        * bridge enabling is a nop though, since bridge devices are resumed
+        * first. The order of enabling power and enabling the device is
+        * imposed by the PCI core as described above, so here we preserve the
+        * same order for the freeze/thaw phases.
+        *
+        * TODO: eventually we should remove pci_disable_device() /
+        * pci_enable_enable_device() from suspend/resume. Due to how they
+        * depend on the device enable refcount we can't anyway depend on them
+        * disabling/enabling the device.
+        */
        if (pci_enable_device(dev->pdev)) {
                ret = -EIO;
                goto out;
@@ -830,21 +867,25 @@ static int i915_drm_resume_early(struct drm_device *dev)
 
        intel_uncore_early_sanitize(dev, true);
 
-       if (IS_BROXTON(dev))
-               ret = bxt_resume_prepare(dev_priv);
-       else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+       if (IS_BROXTON(dev)) {
+               if (!dev_priv->suspended_to_idle)
+                       gen9_sanitize_dc_state(dev_priv);
+               bxt_disable_dc9(dev_priv);
+       } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
                hsw_disable_pc8(dev_priv);
+       }
 
        intel_uncore_sanitize(dev);
 
-       if (!(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload))
+       if (IS_BROXTON(dev_priv) ||
+           !(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload))
                intel_power_domains_init_hw(dev_priv, true);
 
+       enable_rpm_wakeref_asserts(dev_priv);
+
 out:
        dev_priv->suspended_to_idle = false;
 
-       enable_rpm_wakeref_asserts(dev_priv);
-
        return ret;
 }
 
@@ -880,23 +921,32 @@ int i915_resume_switcheroo(struct drm_device *dev)
 int i915_reset(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       bool simulated;
+       struct i915_gpu_error *error = &dev_priv->gpu_error;
+       unsigned reset_counter;
        int ret;
 
        intel_reset_gt_powersave(dev);
 
        mutex_lock(&dev->struct_mutex);
 
-       i915_gem_reset(dev);
+       /* Clear any previous failed attempts at recovery. Time to try again. */
+       atomic_andnot(I915_WEDGED, &error->reset_counter);
 
-       simulated = dev_priv->gpu_error.stop_rings != 0;
+       /* Clear the reset-in-progress flag and increment the reset epoch. */
+       reset_counter = atomic_inc_return(&error->reset_counter);
+       if (WARN_ON(__i915_reset_in_progress(reset_counter))) {
+               ret = -EIO;
+               goto error;
+       }
+
+       i915_gem_reset(dev);
 
        ret = intel_gpu_reset(dev, ALL_ENGINES);
 
        /* Also reset the gpu hangman. */
-       if (simulated) {
+       if (error->stop_rings != 0) {
                DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
-               dev_priv->gpu_error.stop_rings = 0;
+               error->stop_rings = 0;
                if (ret == -ENODEV) {
                        DRM_INFO("Reset not implemented, but ignoring "
                                 "error for simulated gpu hangs\n");
@@ -908,9 +958,11 @@ int i915_reset(struct drm_device *dev)
                pr_notice("drm/i915: Resetting chip after gpu hang\n");
 
        if (ret) {
-               DRM_ERROR("Failed to reset chip: %i\n", ret);
-               mutex_unlock(&dev->struct_mutex);
-               return ret;
+               if (ret != -ENODEV)
+                       DRM_ERROR("Failed to reset chip: %i\n", ret);
+               else
+                       DRM_DEBUG_DRIVER("GPU reset disabled\n");
+               goto error;
        }
 
        intel_overlay_reset(dev_priv);
@@ -929,20 +981,14 @@ int i915_reset(struct drm_device *dev)
         * was running at the time of the reset (i.e. we weren't VT
         * switched away).
         */
-
-       /* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset */
-       dev_priv->gpu_error.reload_in_reset = true;
-
        ret = i915_gem_init_hw(dev);
-
-       dev_priv->gpu_error.reload_in_reset = false;
-
-       mutex_unlock(&dev->struct_mutex);
        if (ret) {
                DRM_ERROR("Failed hw init on reset %d\n", ret);
-               return ret;
+               goto error;
        }
 
+       mutex_unlock(&dev->struct_mutex);
+
        /*
         * rps/rc6 re-init is necessary to restore state lost after the
         * reset and the re-install of gt irqs. Skip for ironlake per
@@ -953,6 +999,11 @@ int i915_reset(struct drm_device *dev)
                intel_enable_gt_powersave(dev);
 
        return 0;
+
+error:
+       atomic_or(I915_WEDGED, &error->reset_counter);
+       mutex_unlock(&dev->struct_mutex);
+       return ret;
 }
 
 static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
@@ -1059,44 +1110,6 @@ static int i915_pm_resume(struct device *dev)
        return i915_drm_resume(drm_dev);
 }
 
-static int hsw_suspend_complete(struct drm_i915_private *dev_priv)
-{
-       hsw_enable_pc8(dev_priv);
-
-       return 0;
-}
-
-static int bxt_suspend_complete(struct drm_i915_private *dev_priv)
-{
-       struct drm_device *dev = dev_priv->dev;
-
-       /* TODO: when DC5 support is added disable DC5 here. */
-
-       broxton_ddi_phy_uninit(dev);
-       broxton_uninit_cdclk(dev);
-       bxt_enable_dc9(dev_priv);
-
-       return 0;
-}
-
-static int bxt_resume_prepare(struct drm_i915_private *dev_priv)
-{
-       struct drm_device *dev = dev_priv->dev;
-
-       /* TODO: when CSR FW support is added make sure the FW is loaded */
-
-       bxt_disable_dc9(dev_priv);
-
-       /*
-        * TODO: when DC5 support is added enable DC5 here if the CSR FW
-        * is available.
-        */
-       broxton_init_cdclk(dev);
-       broxton_ddi_phy_init(dev);
-
-       return 0;
-}
-
 /*
  * Save all Gunit registers that may be lost after a D3 and a subsequent
  * S0i[R123] transition. The list of registers needing a save/restore is
@@ -1502,7 +1515,16 @@ static int intel_runtime_suspend(struct device *device)
        intel_suspend_gt_powersave(dev);
        intel_runtime_pm_disable_interrupts(dev_priv);
 
-       ret = intel_suspend_complete(dev_priv);
+       ret = 0;
+       if (IS_BROXTON(dev_priv)) {
+               bxt_display_core_uninit(dev_priv);
+               bxt_enable_dc9(dev_priv);
+       } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
+               hsw_enable_pc8(dev_priv);
+       } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+               ret = vlv_suspend_complete(dev_priv);
+       }
+
        if (ret) {
                DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
                intel_runtime_pm_enable_interrupts(dev_priv);
@@ -1576,12 +1598,17 @@ static int intel_runtime_resume(struct device *device)
        if (IS_GEN6(dev_priv))
                intel_init_pch_refclk(dev);
 
-       if (IS_BROXTON(dev))
-               ret = bxt_resume_prepare(dev_priv);
-       else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+       if (IS_BROXTON(dev)) {
+               bxt_disable_dc9(dev_priv);
+               bxt_display_core_init(dev_priv, true);
+               if (dev_priv->csr.dmc_payload &&
+                   (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5))
+                       gen9_enable_dc5(dev_priv);
+       } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
                hsw_disable_pc8(dev_priv);
-       else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+       } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
                ret = vlv_resume_prepare(dev_priv, true);
+       }
 
        /*
         * No point of rolling back things in case of an error, as the best
@@ -1612,26 +1639,6 @@ static int intel_runtime_resume(struct device *device)
        return ret;
 }
 
-/*
- * This function implements common functionality of runtime and system
- * suspend sequence.
- */
-static int intel_suspend_complete(struct drm_i915_private *dev_priv)
-{
-       int ret;
-
-       if (IS_BROXTON(dev_priv))
-               ret = bxt_suspend_complete(dev_priv);
-       else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
-               ret = hsw_suspend_complete(dev_priv);
-       else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
-               ret = vlv_suspend_complete(dev_priv);
-       else
-               ret = 0;
-
-       return ret;
-}
-
 static const struct dev_pm_ops i915_pm_ops = {
        /*
         * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND,
index a9c8211..9d7b54e 100644 (file)
 #include <uapi/drm/i915_drm.h>
 #include <uapi/drm/drm_fourcc.h>
 
-#include <drm/drmP.h>
-#include "i915_params.h"
-#include "i915_reg.h"
-#include "intel_bios.h"
-#include "intel_ringbuffer.h"
-#include "intel_lrc.h"
-#include "i915_gem_gtt.h"
-#include "i915_gem_render_state.h"
 #include <linux/io-mapping.h>
 #include <linux/i2c.h>
 #include <linux/i2c-algo-bit.h>
-#include <drm/intel-gtt.h>
-#include <drm/drm_legacy.h> /* for struct drm_dma_handle */
-#include <drm/drm_gem.h>
 #include <linux/backlight.h>
 #include <linux/hashtable.h>
 #include <linux/intel-iommu.h>
 #include <linux/kref.h>
 #include <linux/pm_qos.h>
-#include "intel_guc.h"
+#include <linux/shmem_fs.h>
+
+#include <drm/drmP.h>
+#include <drm/intel-gtt.h>
+#include <drm/drm_legacy.h> /* for struct drm_dma_handle */
+#include <drm/drm_gem.h>
+
+#include "i915_params.h"
+#include "i915_reg.h"
+
+#include "intel_bios.h"
 #include "intel_dpll_mgr.h"
+#include "intel_guc.h"
+#include "intel_lrc.h"
+#include "intel_ringbuffer.h"
+
+#include "i915_gem.h"
+#include "i915_gem_gtt.h"
+#include "i915_gem_render_state.h"
 
 /* General customization:
  */
 
 #define DRIVER_NAME            "i915"
 #define DRIVER_DESC            "Intel Graphics"
-#define DRIVER_DATE            "20160411"
+#define DRIVER_DATE            "20160425"
 
 #undef WARN_ON
 /* Many gcc seem to no see through this and fall over :( */
@@ -634,6 +640,13 @@ enum forcewake_domains {
                         FORCEWAKE_MEDIA)
 };
 
+#define FW_REG_READ  (1)
+#define FW_REG_WRITE (2)
+
+enum forcewake_domains
+intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv,
+                              i915_reg_t reg, unsigned int op);
+
 struct intel_uncore_funcs {
        void (*force_wake_get)(struct drm_i915_private *dev_priv,
                                                        enum forcewake_domains domains);
@@ -666,8 +679,9 @@ struct intel_uncore {
        struct intel_uncore_forcewake_domain {
                struct drm_i915_private *i915;
                enum forcewake_domain_id id;
+               enum forcewake_domains mask;
                unsigned wake_count;
-               struct timer_list timer;
+               struct hrtimer timer;
                i915_reg_t reg_set;
                u32 val_set;
                u32 val_clear;
@@ -680,14 +694,14 @@ struct intel_uncore {
 };
 
 /* Iterate over initialised fw domains */
-#define for_each_fw_domain_mask(domain__, mask__, dev_priv__, i__) \
-       for ((i__) = 0, (domain__) = &(dev_priv__)->uncore.fw_domain[0]; \
-            (i__) < FW_DOMAIN_ID_COUNT; \
-            (i__)++, (domain__) = &(dev_priv__)->uncore.fw_domain[i__]) \
-               for_each_if (((mask__) & (dev_priv__)->uncore.fw_domains) & (1 << (i__)))
+#define for_each_fw_domain_masked(domain__, mask__, dev_priv__) \
+       for ((domain__) = &(dev_priv__)->uncore.fw_domain[0]; \
+            (domain__) < &(dev_priv__)->uncore.fw_domain[FW_DOMAIN_ID_COUNT]; \
+            (domain__)++) \
+               for_each_if ((mask__) & (domain__)->mask)
 
-#define for_each_fw_domain(domain__, dev_priv__, i__) \
-       for_each_fw_domain_mask(domain__, FORCEWAKE_ALL, dev_priv__, i__)
+#define for_each_fw_domain(domain__, dev_priv__) \
+       for_each_fw_domain_masked(domain__, FORCEWAKE_ALL, dev_priv__)
 
 #define CSR_VERSION(major, minor)      ((major) << 16 | (minor))
 #define CSR_VERSION_MAJOR(version)     ((version) >> 16)
@@ -996,6 +1010,7 @@ struct intel_fbc_work;
 
 struct intel_gmbus {
        struct i2c_adapter adapter;
+#define GMBUS_FORCE_BIT_RETRY (1U << 31)
        u32 force_bit;
        u32 reg0;
        i915_reg_t gpio_reg;
@@ -1385,9 +1400,6 @@ struct i915_gpu_error {
 
        /* For missed irq/seqno simulation. */
        unsigned int test_irq_rings;
-
-       /* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset   */
-       bool reload_in_reset;
 };
 
 enum modeset_restore {
@@ -1444,6 +1456,7 @@ struct intel_vbt_data {
        unsigned int lvds_use_ssc:1;
        unsigned int display_clock_mode:1;
        unsigned int fdi_rx_polarity_inverted:1;
+       unsigned int panel_type:4;
        int lvds_ssc_freq;
        unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
 
@@ -1863,7 +1876,7 @@ struct drm_i915_private {
        struct intel_l3_parity l3_parity;
 
        /* Cannot be determined by PCIID. You must always read a register. */
-       size_t ellc_size;
+       u32 edram_cap;
 
        /* gen6+ rps state */
        struct intel_gen6_power_mgmt rps;
@@ -1911,6 +1924,7 @@ struct drm_i915_private {
         * crappiness (can't read out DPLL_MD for pipes B & C).
         */
        u32 chv_dpll_md[I915_MAX_PIPES];
+       u32 bxt_phy_grc;
 
        u32 suspend_count;
        bool suspended_to_idle;
@@ -2237,6 +2251,7 @@ struct drm_i915_gem_request {
        /** On Which ring this request was generated */
        struct drm_i915_private *i915;
        struct intel_engine_cs *engine;
+       unsigned reset_counter;
 
         /** GEM sequence number associated with the previous request,
          * when the HWS breadcrumb is equal to this the GPU is processing
@@ -2317,7 +2332,6 @@ struct drm_i915_gem_request {
 struct drm_i915_gem_request * __must_check
 i915_gem_request_alloc(struct intel_engine_cs *engine,
                       struct intel_context *ctx);
-void i915_gem_request_cancel(struct drm_i915_gem_request *req);
 void i915_gem_request_free(struct kref *req_ref);
 int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
                                   struct drm_file *file);
@@ -2487,6 +2501,7 @@ struct drm_i915_cmd_table {
        __p; \
 })
 #define INTEL_INFO(p)  (&__I915__(p)->info)
+#define INTEL_GEN(p)   (INTEL_INFO(p)->gen)
 #define INTEL_DEVID(p) (INTEL_INFO(p)->device_id)
 #define INTEL_REVID(p) (__I915__(p)->dev->pdev->revision)
 
@@ -2613,8 +2628,9 @@ struct drm_i915_cmd_table {
 #define HAS_VEBOX(dev)         (INTEL_INFO(dev)->ring_mask & VEBOX_RING)
 #define HAS_LLC(dev)           (INTEL_INFO(dev)->has_llc)
 #define HAS_SNOOP(dev)         (INTEL_INFO(dev)->has_snoop)
+#define HAS_EDRAM(dev)         (__I915__(dev)->edram_cap & EDRAM_ENABLED)
 #define HAS_WT(dev)            ((IS_HASWELL(dev) || IS_BROADWELL(dev)) && \
-                                __I915__(dev)->ellc_size)
+                                HAS_EDRAM(dev))
 #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
 
 #define HAS_HW_CONTEXTS(dev)   (INTEL_INFO(dev)->gen >= 6)
@@ -2631,8 +2647,9 @@ struct drm_i915_cmd_table {
 
 /* WaRsDisableCoarsePowerGating:skl,bxt */
 #define NEEDS_WaRsDisableCoarsePowerGating(dev) (IS_BXT_REVID(dev, 0, BXT_REVID_A1) || \
-                                                ((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) && \
-                                                 IS_SKL_REVID(dev, 0, SKL_REVID_F0)))
+                                                IS_SKL_GT3(dev) || \
+                                                IS_SKL_GT4(dev))
+
 /*
  * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts
  * even when in MSI mode. This results in spurious interrupt warnings if the
@@ -2667,7 +2684,7 @@ struct drm_i915_cmd_table {
 #define HAS_RUNTIME_PM(dev)    (IS_GEN6(dev) || IS_HASWELL(dev) || \
                                 IS_BROADWELL(dev) || IS_VALLEYVIEW(dev) || \
                                 IS_CHERRYVIEW(dev) || IS_SKYLAKE(dev) || \
-                                IS_KABYLAKE(dev))
+                                IS_KABYLAKE(dev) || IS_BROXTON(dev))
 #define HAS_RC6(dev)           (INTEL_INFO(dev)->gen >= 6)
 #define HAS_RC6p(dev)          (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev))
 
@@ -2791,6 +2808,8 @@ void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv,
                                        enum forcewake_domains domains);
 void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
                                        enum forcewake_domains domains);
+u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv);
+
 void assert_forcewakes_inactive(struct drm_i915_private *dev_priv);
 static inline bool intel_vgpu_active(struct drm_device *dev)
 {
@@ -2869,7 +2888,6 @@ int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
                             struct drm_file *file_priv);
 void i915_gem_execbuffer_move_to_active(struct list_head *vmas,
                                        struct drm_i915_gem_request *req);
-void i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params);
 int i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
                                   struct drm_i915_gem_execbuffer2 *args,
                                   struct list_head *vmas);
@@ -3000,9 +3018,11 @@ static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
  * pages and then returns a contiguous mapping of the backing storage into
  * the kernel address space.
  *
- * The caller must hold the struct_mutex.
+ * The caller must hold the struct_mutex, and is responsible for calling
+ * i915_gem_object_unpin_map() when the mapping is no longer required.
  *
- * Returns the pointer through which to access the backing storage.
+ * Returns the pointer through which to access the mapped object, or an
+ * ERR_PTR() on error.
  */
 void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj);
 
@@ -3069,23 +3089,45 @@ i915_gem_find_active_request(struct intel_engine_cs *engine);
 
 bool i915_gem_retire_requests(struct drm_device *dev);
 void i915_gem_retire_requests_ring(struct intel_engine_cs *engine);
-int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
-                                     bool interruptible);
+
+static inline u32 i915_reset_counter(struct i915_gpu_error *error)
+{
+       return atomic_read(&error->reset_counter);
+}
+
+static inline bool __i915_reset_in_progress(u32 reset)
+{
+       return unlikely(reset & I915_RESET_IN_PROGRESS_FLAG);
+}
+
+static inline bool __i915_reset_in_progress_or_wedged(u32 reset)
+{
+       return unlikely(reset & (I915_RESET_IN_PROGRESS_FLAG | I915_WEDGED));
+}
+
+static inline bool __i915_terminally_wedged(u32 reset)
+{
+       return unlikely(reset & I915_WEDGED);
+}
 
 static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
 {
-       return unlikely(atomic_read(&error->reset_counter)
-                       & (I915_RESET_IN_PROGRESS_FLAG | I915_WEDGED));
+       return __i915_reset_in_progress(i915_reset_counter(error));
+}
+
+static inline bool i915_reset_in_progress_or_wedged(struct i915_gpu_error *error)
+{
+       return __i915_reset_in_progress_or_wedged(i915_reset_counter(error));
 }
 
 static inline bool i915_terminally_wedged(struct i915_gpu_error *error)
 {
-       return atomic_read(&error->reset_counter) & I915_WEDGED;
+       return __i915_terminally_wedged(i915_reset_counter(error));
 }
 
 static inline u32 i915_reset_count(struct i915_gpu_error *error)
 {
-       return ((atomic_read(&error->reset_counter) & ~I915_WEDGED) + 1) / 2;
+       return ((i915_reset_counter(error) & ~I915_WEDGED) + 1) / 2;
 }
 
 static inline bool i915_stop_ring_allow_ban(struct drm_i915_private *dev_priv)
@@ -3118,7 +3160,6 @@ void __i915_add_request(struct drm_i915_gem_request *req,
 #define i915_add_request_no_flush(req) \
        __i915_add_request(req, NULL, false)
 int __i915_wait_request(struct drm_i915_gem_request *req,
-                       unsigned reset_counter,
                        bool interruptible,
                        s64 *timeout,
                        struct intel_rps_client *rps);
@@ -3455,6 +3496,7 @@ extern int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
                                         bool enable);
 extern int intel_opregion_notify_adapter(struct drm_device *dev,
                                         pci_power_t state);
+extern int intel_opregion_get_panel_type(struct drm_device *dev);
 #else
 static inline int intel_opregion_setup(struct drm_device *dev) { return 0; }
 static inline void intel_opregion_init(struct drm_device *dev) { return; }
@@ -3470,6 +3512,10 @@ intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state)
 {
        return 0;
 }
+static inline int intel_opregion_get_panel_type(struct drm_device *dev)
+{
+       return -ENODEV;
+}
 #endif
 
 /* intel_acpi.c */
index f4abf3a..261a3ef 100644 (file)
 #include "i915_vgpu.h"
 #include "i915_trace.h"
 #include "intel_drv.h"
+#include "intel_mocs.h"
 #include <linux/shmem_fs.h>
 #include <linux/slab.h>
 #include <linux/swap.h>
 #include <linux/pci.h>
 #include <linux/dma-buf.h>
 
-#define RQ_BUG_ON(expr)
-
 static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
 static void
@@ -85,9 +84,7 @@ i915_gem_wait_for_error(struct i915_gpu_error *error)
 {
        int ret;
 
-#define EXIT_COND (!i915_reset_in_progress(error) || \
-                  i915_terminally_wedged(error))
-       if (EXIT_COND)
+       if (!i915_reset_in_progress(error))
                return 0;
 
        /*
@@ -96,17 +93,16 @@ i915_gem_wait_for_error(struct i915_gpu_error *error)
         * we should simply try to bail out and fail as gracefully as possible.
         */
        ret = wait_event_interruptible_timeout(error->reset_queue,
-                                              EXIT_COND,
+                                              !i915_reset_in_progress(error),
                                               10*HZ);
        if (ret == 0) {
                DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
                return -EIO;
        } else if (ret < 0) {
                return ret;
+       } else {
+               return 0;
        }
-#undef EXIT_COND
-
-       return 0;
 }
 
 int i915_mutex_lock_interruptible(struct drm_device *dev)
@@ -211,11 +207,10 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
        BUG_ON(obj->madv == __I915_MADV_PURGED);
 
        ret = i915_gem_object_set_to_cpu_domain(obj, true);
-       if (ret) {
+       if (WARN_ON(ret)) {
                /* In the event of a disaster, abandon all caches and
                 * hope for the best.
                 */
-               WARN_ON(ret != -EIO);
                obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
        }
 
@@ -1110,27 +1105,19 @@ put_rpm:
        return ret;
 }
 
-int
-i915_gem_check_wedge(struct i915_gpu_error *error,
-                    bool interruptible)
+static int
+i915_gem_check_wedge(unsigned reset_counter, bool interruptible)
 {
-       if (i915_reset_in_progress(error)) {
+       if (__i915_terminally_wedged(reset_counter))
+               return -EIO;
+
+       if (__i915_reset_in_progress(reset_counter)) {
                /* Non-interruptible callers can't handle -EAGAIN, hence return
                 * -EIO unconditionally for these. */
                if (!interruptible)
                        return -EIO;
 
-               /* Recovery complete, but the reset failed ... */
-               if (i915_terminally_wedged(error))
-                       return -EIO;
-
-               /*
-                * Check if GPU Reset is in progress - we need intel_ring_begin
-                * to work properly to reinit the hw state while the gpu is
-                * still marked as reset-in-progress. Handle this with a flag.
-                */
-               if (!error->reload_in_reset)
-                       return -EAGAIN;
+               return -EAGAIN;
        }
 
        return 0;
@@ -1224,7 +1211,6 @@ static int __i915_spin_request(struct drm_i915_gem_request *req, int state)
 /**
  * __i915_wait_request - wait until execution of request has finished
  * @req: duh!
- * @reset_counter: reset sequence associated with the given request
  * @interruptible: do an interruptible wait (normally yes)
  * @timeout: in - how long to wait (NULL forever); out - how much time remaining
  *
@@ -1239,7 +1225,6 @@ static int __i915_spin_request(struct drm_i915_gem_request *req, int state)
  * errno with remaining time filled in timeout argument.
  */
 int __i915_wait_request(struct drm_i915_gem_request *req,
-                       unsigned reset_counter,
                        bool interruptible,
                        s64 *timeout,
                        struct intel_rps_client *rps)
@@ -1300,13 +1285,14 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
                prepare_to_wait(&engine->irq_queue, &wait, state);
 
                /* We need to check whether any gpu reset happened in between
-                * the caller grabbing the seqno and now ... */
-               if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) {
-                       /* ... but upgrade the -EAGAIN to an -EIO if the gpu
-                        * is truely gone. */
-                       ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
-                       if (ret == 0)
-                               ret = -EAGAIN;
+                * the request being submitted and now. If a reset has occurred,
+                * the request is effectively complete (we either are in the
+                * process of or have discarded the rendering and completely
+                * reset the GPU. The results of the request are lost and we
+                * are free to continue on with the original operation.
+                */
+               if (req->reset_counter != i915_reset_counter(&dev_priv->gpu_error)) {
+                       ret = 0;
                        break;
                }
 
@@ -1458,26 +1444,15 @@ __i915_gem_request_retire__upto(struct drm_i915_gem_request *req)
 int
 i915_wait_request(struct drm_i915_gem_request *req)
 {
-       struct drm_device *dev;
-       struct drm_i915_private *dev_priv;
+       struct drm_i915_private *dev_priv = req->i915;
        bool interruptible;
        int ret;
 
-       BUG_ON(req == NULL);
-
-       dev = req->engine->dev;
-       dev_priv = dev->dev_private;
        interruptible = dev_priv->mm.interruptible;
 
-       BUG_ON(!mutex_is_locked(&dev->struct_mutex));
-
-       ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
-       if (ret)
-               return ret;
+       BUG_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
 
-       ret = __i915_wait_request(req,
-                                 atomic_read(&dev_priv->gpu_error.reset_counter),
-                                 interruptible, NULL, NULL);
+       ret = __i915_wait_request(req, interruptible, NULL, NULL);
        if (ret)
                return ret;
 
@@ -1521,7 +1496,7 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
 
                        i915_gem_object_retire__read(obj, i);
                }
-               RQ_BUG_ON(obj->active);
+               GEM_BUG_ON(obj->active);
        }
 
        return 0;
@@ -1552,7 +1527,6 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
        struct drm_device *dev = obj->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_request *requests[I915_NUM_ENGINES];
-       unsigned reset_counter;
        int ret, i, n = 0;
 
        BUG_ON(!mutex_is_locked(&dev->struct_mutex));
@@ -1561,12 +1535,6 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
        if (!obj->active)
                return 0;
 
-       ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
-       if (ret)
-               return ret;
-
-       reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
-
        if (readonly) {
                struct drm_i915_gem_request *req;
 
@@ -1588,9 +1556,9 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
        }
 
        mutex_unlock(&dev->struct_mutex);
+       ret = 0;
        for (i = 0; ret == 0 && i < n; i++)
-               ret = __i915_wait_request(requests[i], reset_counter, true,
-                                         NULL, rps);
+               ret = __i915_wait_request(requests[i], true, NULL, rps);
        mutex_lock(&dev->struct_mutex);
 
        for (i = 0; i < n; i++) {
@@ -1964,11 +1932,27 @@ out:
 void
 i915_gem_release_mmap(struct drm_i915_gem_object *obj)
 {
+       /* Serialisation between user GTT access and our code depends upon
+        * revoking the CPU's PTE whilst the mutex is held. The next user
+        * pagefault then has to wait until we release the mutex.
+        */
+       lockdep_assert_held(&obj->base.dev->struct_mutex);
+
        if (!obj->fault_mappable)
                return;
 
        drm_vma_node_unmap(&obj->base.vma_node,
                           obj->base.dev->anon_inode->i_mapping);
+
+       /* Ensure that the CPU's PTE are revoked and there are not outstanding
+        * memory transactions from userspace before we return. The TLB
+        * flushing implied above by changing the PTE above *should* be
+        * sufficient, an extra barrier here just provides us with a bit
+        * of paranoid documentation about our requirement to serialise
+        * memory writes before touching registers / GSM.
+        */
+       wmb();
+
        obj->fault_mappable = false;
 }
 
@@ -2177,11 +2161,10 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
        BUG_ON(obj->madv == __I915_MADV_PURGED);
 
        ret = i915_gem_object_set_to_cpu_domain(obj, true);
-       if (ret) {
+       if (WARN_ON(ret)) {
                /* In the event of a disaster, abandon all caches and
                 * hope for the best.
                 */
-               WARN_ON(ret != -EIO);
                i915_gem_clflush_object(obj, true);
                obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
        }
@@ -2470,8 +2453,8 @@ void i915_vma_move_to_active(struct i915_vma *vma,
 static void
 i915_gem_object_retire__write(struct drm_i915_gem_object *obj)
 {
-       RQ_BUG_ON(obj->last_write_req == NULL);
-       RQ_BUG_ON(!(obj->active & intel_engine_flag(obj->last_write_req->engine)));
+       GEM_BUG_ON(obj->last_write_req == NULL);
+       GEM_BUG_ON(!(obj->active & intel_engine_flag(obj->last_write_req->engine)));
 
        i915_gem_request_assign(&obj->last_write_req, NULL);
        intel_fb_obj_flush(obj, true, ORIGIN_CS);
@@ -2482,8 +2465,8 @@ i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring)
 {
        struct i915_vma *vma;
 
-       RQ_BUG_ON(obj->last_read_req[ring] == NULL);
-       RQ_BUG_ON(!(obj->active & (1 << ring)));
+       GEM_BUG_ON(obj->last_read_req[ring] == NULL);
+       GEM_BUG_ON(!(obj->active & (1 << ring)));
 
        list_del_init(&obj->engine_list[ring]);
        i915_gem_request_assign(&obj->last_read_req[ring], NULL);
@@ -2743,6 +2726,7 @@ __i915_gem_request_alloc(struct intel_engine_cs *engine,
                         struct drm_i915_gem_request **req_out)
 {
        struct drm_i915_private *dev_priv = to_i915(engine->dev);
+       unsigned reset_counter = i915_reset_counter(&dev_priv->gpu_error);
        struct drm_i915_gem_request *req;
        int ret;
 
@@ -2751,6 +2735,14 @@ __i915_gem_request_alloc(struct intel_engine_cs *engine,
 
        *req_out = NULL;
 
+       /* ABI: Before userspace accesses the GPU (e.g. execbuffer), report
+        * EIO if the GPU is already wedged, or EAGAIN to drop the struct_mutex
+        * and restart.
+        */
+       ret = i915_gem_check_wedge(reset_counter, dev_priv->mm.interruptible);
+       if (ret)
+               return ret;
+
        req = kmem_cache_zalloc(dev_priv->requests, GFP_KERNEL);
        if (req == NULL)
                return -ENOMEM;
@@ -2762,6 +2754,7 @@ __i915_gem_request_alloc(struct intel_engine_cs *engine,
        kref_init(&req->ref);
        req->i915 = dev_priv;
        req->engine = engine;
+       req->reset_counter = reset_counter;
        req->ctx  = ctx;
        i915_gem_context_reference(req->ctx);
 
@@ -2791,7 +2784,8 @@ __i915_gem_request_alloc(struct intel_engine_cs *engine,
                 * fully prepared. Thus it can be cleaned up using the proper
                 * free code.
                 */
-               i915_gem_request_cancel(req);
+               intel_ring_reserved_space_cancel(req->ringbuf);
+               i915_gem_request_unreference(req);
                return ret;
        }
 
@@ -2828,13 +2822,6 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
        return err ? ERR_PTR(err) : req;
 }
 
-void i915_gem_request_cancel(struct drm_i915_gem_request *req)
-{
-       intel_ring_reserved_space_cancel(req->ringbuf);
-
-       i915_gem_request_unreference(req);
-}
-
 struct drm_i915_gem_request *
 i915_gem_find_active_request(struct intel_engine_cs *engine)
 {
@@ -3140,11 +3127,9 @@ retire:
 int
 i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_wait *args = data;
        struct drm_i915_gem_object *obj;
        struct drm_i915_gem_request *req[I915_NUM_ENGINES];
-       unsigned reset_counter;
        int i, n = 0;
        int ret;
 
@@ -3178,7 +3163,6 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
        }
 
        drm_gem_object_unreference(&obj->base);
-       reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
 
        for (i = 0; i < I915_NUM_ENGINES; i++) {
                if (obj->last_read_req[i] == NULL)
@@ -3191,7 +3175,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
 
        for (i = 0; i < n; i++) {
                if (ret == 0)
-                       ret = __i915_wait_request(req[i], reset_counter, true,
+                       ret = __i915_wait_request(req[i], true,
                                                  args->timeout_ns > 0 ? &args->timeout_ns : NULL,
                                                  to_rps_client(file));
                i915_gem_request_unreference__unlocked(req[i]);
@@ -3223,7 +3207,6 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
        if (!i915_semaphore_is_enabled(obj->base.dev)) {
                struct drm_i915_private *i915 = to_i915(obj->base.dev);
                ret = __i915_wait_request(from_req,
-                                         atomic_read(&i915->gpu_error.reset_counter),
                                          i915->mm.interruptible,
                                          NULL,
                                          &i915->rps.semaphores);
@@ -3344,9 +3327,6 @@ static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
        if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
                return;
 
-       /* Wait for any direct GTT access to complete */
-       mb();
-
        old_read_domains = obj->base.read_domains;
        old_write_domain = obj->base.write_domain;
 
@@ -3451,12 +3431,9 @@ int i915_gpu_idle(struct drm_device *dev)
                                return PTR_ERR(req);
 
                        ret = i915_switch_context(req);
-                       if (ret) {
-                               i915_gem_request_cancel(req);
-                               return ret;
-                       }
-
                        i915_add_request_no_flush(req);
+                       if (ret)
+                               return ret;
                }
 
                ret = intel_engine_idle(engine);
@@ -4179,16 +4156,15 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
        struct drm_i915_file_private *file_priv = file->driver_priv;
        unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
        struct drm_i915_gem_request *request, *target = NULL;
-       unsigned reset_counter;
        int ret;
 
        ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
        if (ret)
                return ret;
 
-       ret = i915_gem_check_wedge(&dev_priv->gpu_error, false);
-       if (ret)
-               return ret;
+       /* ABI: return -EIO if already wedged */
+       if (i915_terminally_wedged(&dev_priv->gpu_error))
+               return -EIO;
 
        spin_lock(&file_priv->mm.lock);
        list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
@@ -4204,7 +4180,6 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
 
                target = request;
        }
-       reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
        if (target)
                i915_gem_request_reference(target);
        spin_unlock(&file_priv->mm.lock);
@@ -4212,7 +4187,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
        if (target == NULL)
                return 0;
 
-       ret = __i915_wait_request(target, reset_counter, true, NULL, NULL);
+       ret = __i915_wait_request(target, true, NULL, NULL);
        if (ret == 0)
                queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
 
@@ -4372,7 +4347,6 @@ i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj,
 {
        struct i915_vma *vma = i915_gem_obj_to_ggtt_view(obj, view);
 
-       BUG_ON(!vma);
        WARN_ON(vma->pin_count == 0);
        WARN_ON(!i915_gem_obj_ggtt_bound_view(obj, view));
 
@@ -4889,7 +4863,7 @@ i915_gem_init_hw(struct drm_device *dev)
        /* Double layer security blanket, see i915_gem_init() */
        intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
 
-       if (dev_priv->ellc_size)
+       if (HAS_EDRAM(dev) && INTEL_GEN(dev_priv) < 9)
                I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
 
        if (IS_HASWELL(dev))
@@ -4933,6 +4907,8 @@ i915_gem_init_hw(struct drm_device *dev)
                        goto out;
        }
 
+       intel_mocs_init_l3cc_table(dev);
+
        /* We can't enable contexts until all firmware is loaded */
        if (HAS_GUC_UCODE(dev)) {
                ret = intel_guc_ucode_load(dev);
@@ -4958,34 +4934,33 @@ i915_gem_init_hw(struct drm_device *dev)
                req = i915_gem_request_alloc(engine, NULL);
                if (IS_ERR(req)) {
                        ret = PTR_ERR(req);
-                       i915_gem_cleanup_engines(dev);
-                       goto out;
+                       break;
                }
 
                if (engine->id == RCS) {
-                       for (j = 0; j < NUM_L3_SLICES(dev); j++)
-                               i915_gem_l3_remap(req, j);
+                       for (j = 0; j < NUM_L3_SLICES(dev); j++) {
+                               ret = i915_gem_l3_remap(req, j);
+                               if (ret)
+                                       goto err_request;
+                       }
                }
 
                ret = i915_ppgtt_init_ring(req);
-               if (ret && ret != -EIO) {
-                       DRM_ERROR("PPGTT enable %s failed %d\n",
-                                 engine->name, ret);
-                       i915_gem_request_cancel(req);
-                       i915_gem_cleanup_engines(dev);
-                       goto out;
-               }
+               if (ret)
+                       goto err_request;
 
                ret = i915_gem_context_enable(req);
-               if (ret && ret != -EIO) {
-                       DRM_ERROR("Context enable %s failed %d\n",
+               if (ret)
+                       goto err_request;
+
+err_request:
+               i915_add_request_no_flush(req);
+               if (ret) {
+                       DRM_ERROR("Failed to enable %s, error=%d\n",
                                  engine->name, ret);
-                       i915_gem_request_cancel(req);
                        i915_gem_cleanup_engines(dev);
-                       goto out;
+                       break;
                }
-
-               i915_add_request_no_flush(req);
        }
 
 out:
diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h
new file mode 100644 (file)
index 0000000..8292e79
--- /dev/null
@@ -0,0 +1,34 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __I915_GEM_H__
+#define __I915_GEM_H__
+
+#ifdef CONFIG_DRM_I915_DEBUG_GEM
+#define GEM_BUG_ON(expr) BUG_ON(expr)
+#else
+#define GEM_BUG_ON(expr)
+#endif
+
+#endif /* __I915_GEM_H__ */
index fe580cb..e5acc39 100644 (file)
@@ -342,7 +342,7 @@ void i915_gem_context_reset(struct drm_device *dev)
                struct intel_context *ctx;
 
                list_for_each_entry(ctx, &dev_priv->context_list, link)
-                       intel_lr_context_reset(dev, ctx);
+                       intel_lr_context_reset(dev_priv, ctx);
        }
 
        for (i = 0; i < I915_NUM_ENGINES; i++) {
@@ -539,7 +539,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
 
        len = 4;
        if (INTEL_INFO(engine->dev)->gen >= 7)
-               len += 2 + (num_rings ? 4*num_rings + 2 : 0);
+               len += 2 + (num_rings ? 4*num_rings + 6 : 0);
 
        ret = intel_ring_begin(req, len);
        if (ret)
@@ -579,6 +579,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
        if (INTEL_INFO(engine->dev)->gen >= 7) {
                if (num_rings) {
                        struct intel_engine_cs *signaller;
+                       i915_reg_t last_reg = {}; /* keep gcc quiet */
 
                        intel_ring_emit(engine,
                                        MI_LOAD_REGISTER_IMM(num_rings));
@@ -586,11 +587,19 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
                                if (signaller == engine)
                                        continue;
 
-                               intel_ring_emit_reg(engine,
-                                                   RING_PSMI_CTL(signaller->mmio_base));
+                               last_reg = RING_PSMI_CTL(signaller->mmio_base);
+                               intel_ring_emit_reg(engine, last_reg);
                                intel_ring_emit(engine,
                                                _MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
                        }
+
+                       /* Insert a delay before the next switch! */
+                       intel_ring_emit(engine,
+                                       MI_STORE_REGISTER_MEM |
+                                       MI_SRM_LRM_GLOBAL_GTT);
+                       intel_ring_emit_reg(engine, last_reg);
+                       intel_ring_emit(engine, engine->scratch.gtt_offset);
+                       intel_ring_emit(engine, MI_NOOP);
                }
                intel_ring_emit(engine, MI_ARB_ON_OFF | MI_ARB_ENABLE);
        }
@@ -600,50 +609,48 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
        return ret;
 }
 
-static inline bool should_skip_switch(struct intel_engine_cs *engine,
-                                     struct intel_context *from,
-                                     struct intel_context *to)
+static inline bool skip_rcs_switch(struct intel_engine_cs *engine,
+                                  struct intel_context *to)
 {
        if (to->remap_slice)
                return false;
 
-       if (to->ppgtt && from == to &&
+       if (!to->legacy_hw_ctx.initialized)
+               return false;
+
+       if (to->ppgtt &&
            !(intel_engine_flag(engine) & to->ppgtt->pd_dirty_rings))
-               return true;
+               return false;
 
-       return false;
+       return to == engine->last_context;
 }
 
 static bool
 needs_pd_load_pre(struct intel_engine_cs *engine, struct intel_context *to)
 {
-       struct drm_i915_private *dev_priv = engine->dev->dev_private;
-
        if (!to->ppgtt)
                return false;
 
-       if (INTEL_INFO(engine->dev)->gen < 8)
+       if (engine->last_context == to &&
+           !(intel_engine_flag(engine) & to->ppgtt->pd_dirty_rings))
+               return false;
+
+       if (engine->id != RCS)
                return true;
 
-       if (engine != &dev_priv->engine[RCS])
+       if (INTEL_INFO(engine->dev)->gen < 8)
                return true;
 
        return false;
 }
 
 static bool
-needs_pd_load_post(struct intel_engine_cs *engine, struct intel_context *to,
-                  u32 hw_flags)
+needs_pd_load_post(struct intel_context *to, u32 hw_flags)
 {
-       struct drm_i915_private *dev_priv = engine->dev->dev_private;
-
        if (!to->ppgtt)
                return false;
 
-       if (!IS_GEN8(engine->dev))
-               return false;
-
-       if (engine != &dev_priv->engine[RCS])
+       if (!IS_GEN8(to->i915))
                return false;
 
        if (hw_flags & MI_RESTORE_INHIBIT)
@@ -652,60 +659,33 @@ needs_pd_load_post(struct intel_engine_cs *engine, struct intel_context *to,
        return false;
 }
 
-static int do_switch(struct drm_i915_gem_request *req)
+static int do_rcs_switch(struct drm_i915_gem_request *req)
 {
        struct intel_context *to = req->ctx;
        struct intel_engine_cs *engine = req->engine;
-       struct drm_i915_private *dev_priv = req->i915;
-       struct intel_context *from = engine->last_context;
-       u32 hw_flags = 0;
-       bool uninitialized = false;
+       struct intel_context *from;
+       u32 hw_flags;
        int ret, i;
 
-       if (from != NULL && engine == &dev_priv->engine[RCS]) {
-               BUG_ON(from->legacy_hw_ctx.rcs_state == NULL);
-               BUG_ON(!i915_gem_obj_is_pinned(from->legacy_hw_ctx.rcs_state));
-       }
-
-       if (should_skip_switch(engine, from, to))
+       if (skip_rcs_switch(engine, to))
                return 0;
 
        /* Trying to pin first makes error handling easier. */
-       if (engine == &dev_priv->engine[RCS]) {
-               ret = i915_gem_obj_ggtt_pin(to->legacy_hw_ctx.rcs_state,
-                                           get_context_alignment(engine->dev),
-                                           0);
-               if (ret)
-                       return ret;
-       }
+       ret = i915_gem_obj_ggtt_pin(to->legacy_hw_ctx.rcs_state,
+                                   get_context_alignment(engine->dev),
+                                   0);
+       if (ret)
+               return ret;
 
        /*
         * Pin can switch back to the default context if we end up calling into
         * evict_everything - as a last ditch gtt defrag effort that also
         * switches to the default context. Hence we need to reload from here.
+        *
+        * XXX: Doing so is painfully broken!
         */
        from = engine->last_context;
 
-       if (needs_pd_load_pre(engine, to)) {
-               /* Older GENs and non render rings still want the load first,
-                * "PP_DCLV followed by PP_DIR_BASE register through Load
-                * Register Immediate commands in Ring Buffer before submitting
-                * a context."*/
-               trace_switch_mm(engine, to);
-               ret = to->ppgtt->switch_mm(to->ppgtt, req);
-               if (ret)
-                       goto unpin_out;
-
-               /* Doing a PD load always reloads the page dirs */
-               to->ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
-       }
-
-       if (engine != &dev_priv->engine[RCS]) {
-               if (from)
-                       i915_gem_context_unreference(from);
-               goto done;
-       }
-
        /*
         * Clear this page out of any CPU caches for coherent swap-in/out. Note
         * that thanks to write = false in this call and us not setting any gpu
@@ -718,53 +698,37 @@ static int do_switch(struct drm_i915_gem_request *req)
        if (ret)
                goto unpin_out;
 
-       if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to)) {
-               hw_flags |= MI_RESTORE_INHIBIT;
+       if (needs_pd_load_pre(engine, to)) {
+               /* Older GENs and non render rings still want the load first,
+                * "PP_DCLV followed by PP_DIR_BASE register through Load
+                * Register Immediate commands in Ring Buffer before submitting
+                * a context."*/
+               trace_switch_mm(engine, to);
+               ret = to->ppgtt->switch_mm(to->ppgtt, req);
+               if (ret)
+                       goto unpin_out;
+       }
+
+       if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to))
                /* NB: If we inhibit the restore, the context is not allowed to
                 * die because future work may end up depending on valid address
                 * space. This means we must enforce that a page table load
                 * occur when this occurs. */
-       } else if (to->ppgtt &&
-                  (intel_engine_flag(engine) & to->ppgtt->pd_dirty_rings)) {
-               hw_flags |= MI_FORCE_RESTORE;
-               to->ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
-       }
+               hw_flags = MI_RESTORE_INHIBIT;
+       else if (to->ppgtt &&
+                intel_engine_flag(engine) & to->ppgtt->pd_dirty_rings)
+               hw_flags = MI_FORCE_RESTORE;
+       else
+               hw_flags = 0;
 
        /* We should never emit switch_mm more than once */
        WARN_ON(needs_pd_load_pre(engine, to) &&
-               needs_pd_load_post(engine, to, hw_flags));
-
-       ret = mi_set_context(req, hw_flags);
-       if (ret)
-               goto unpin_out;
+               needs_pd_load_post(to, hw_flags));
 
-       /* GEN8 does *not* require an explicit reload if the PDPs have been
-        * setup, and we do not wish to move them.
-        */
-       if (needs_pd_load_post(engine, to, hw_flags)) {
-               trace_switch_mm(engine, to);
-               ret = to->ppgtt->switch_mm(to->ppgtt, req);
-               /* The hardware context switch is emitted, but we haven't
-                * actually changed the state - so it's probably safe to bail
-                * here. Still, let the user know something dangerous has
-                * happened.
-                */
-               if (ret) {
-                       DRM_ERROR("Failed to change address space on context switch\n");
-                       goto unpin_out;
-               }
-       }
-
-       for (i = 0; i < MAX_L3_SLICES; i++) {
-               if (!(to->remap_slice & (1<<i)))
-                       continue;
-
-               ret = i915_gem_l3_remap(req, i);
-               /* If it failed, try again next round */
+       if (to != from || (hw_flags & MI_FORCE_RESTORE)) {
+               ret = mi_set_context(req, hw_flags);
                if (ret)
-                       DRM_DEBUG_DRIVER("L3 remapping failed\n");
-               else
-                       to->remap_slice &= ~(1<<i);
+                       goto unpin_out;
        }
 
        /* The backing object for the context is done after switching to the
@@ -789,27 +753,51 @@ static int do_switch(struct drm_i915_gem_request *req)
                i915_gem_object_ggtt_unpin(from->legacy_hw_ctx.rcs_state);
                i915_gem_context_unreference(from);
        }
-
-       uninitialized = !to->legacy_hw_ctx.initialized;
-       to->legacy_hw_ctx.initialized = true;
-
-done:
        i915_gem_context_reference(to);
        engine->last_context = to;
 
-       if (uninitialized) {
+       /* GEN8 does *not* require an explicit reload if the PDPs have been
+        * setup, and we do not wish to move them.
+        */
+       if (needs_pd_load_post(to, hw_flags)) {
+               trace_switch_mm(engine, to);
+               ret = to->ppgtt->switch_mm(to->ppgtt, req);
+               /* The hardware context switch is emitted, but we haven't
+                * actually changed the state - so it's probably safe to bail
+                * here. Still, let the user know something dangerous has
+                * happened.
+                */
+               if (ret)
+                       return ret;
+       }
+
+       if (to->ppgtt)
+               to->ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
+
+       for (i = 0; i < MAX_L3_SLICES; i++) {
+               if (!(to->remap_slice & (1<<i)))
+                       continue;
+
+               ret = i915_gem_l3_remap(req, i);
+               if (ret)
+                       return ret;
+
+               to->remap_slice &= ~(1<<i);
+       }
+
+       if (!to->legacy_hw_ctx.initialized) {
                if (engine->init_context) {
                        ret = engine->init_context(req);
                        if (ret)
-                               DRM_ERROR("ring init context: %d\n", ret);
+                               return ret;
                }
+               to->legacy_hw_ctx.initialized = true;
        }
 
        return 0;
 
 unpin_out:
-       if (engine->id == RCS)
-               i915_gem_object_ggtt_unpin(to->legacy_hw_ctx.rcs_state);
+       i915_gem_object_ggtt_unpin(to->legacy_hw_ctx.rcs_state);
        return ret;
 }
 
@@ -834,17 +822,33 @@ int i915_switch_context(struct drm_i915_gem_request *req)
        WARN_ON(i915.enable_execlists);
        WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
 
-       if (req->ctx->legacy_hw_ctx.rcs_state == NULL) { /* We have the fake context */
-               if (req->ctx != engine->last_context) {
-                       i915_gem_context_reference(req->ctx);
+       if (engine->id != RCS ||
+           req->ctx->legacy_hw_ctx.rcs_state == NULL) {
+               struct intel_context *to = req->ctx;
+
+               if (needs_pd_load_pre(engine, to)) {
+                       int ret;
+
+                       trace_switch_mm(engine, to);
+                       ret = to->ppgtt->switch_mm(to->ppgtt, req);
+                       if (ret)
+                               return ret;
+
+                       /* Doing a PD load always reloads the page dirs */
+                       to->ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
+               }
+
+               if (to != engine->last_context) {
+                       i915_gem_context_reference(to);
                        if (engine->last_context)
                                i915_gem_context_unreference(engine->last_context);
-                       engine->last_context = req->ctx;
+                       engine->last_context = to;
                }
+
                return 0;
        }
 
-       return do_switch(req);
+       return do_rcs_switch(req);
 }
 
 static bool contexts_enabled(struct drm_device *dev)
index 6ee4f00..6f4f2a6 100644 (file)
@@ -1137,7 +1137,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
        }
 }
 
-void
+static void
 i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params)
 {
        /* Unconditionally force add_request to emit a full flush. */
@@ -1322,7 +1322,6 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
        trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags);
 
        i915_gem_execbuffer_move_to_active(vmas, params->request);
-       i915_gem_execbuffer_retire_commands(params);
 
        return 0;
 }
@@ -1624,7 +1623,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 
        ret = i915_gem_request_add_to_client(req, file);
        if (ret)
-               goto err_batch_unpin;
+               goto err_request;
 
        /*
         * Save assorted stuff away to pass through to *_submission().
@@ -1641,6 +1640,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
        params->request                 = req;
 
        ret = dev_priv->gt.execbuf_submit(params, args, &eb->vmas);
+err_request:
+       i915_gem_execbuffer_retire_commands(params);
 
 err_batch_unpin:
        /*
@@ -1657,14 +1658,6 @@ err:
        i915_gem_context_unreference(ctx);
        eb_destroy(eb);
 
-       /*
-        * If the request was created but not successfully submitted then it
-        * must be freed again. If it was submitted then it is being tracked
-        * on the active request list and no clean up is required here.
-        */
-       if (ret && !IS_ERR_OR_NULL(req))
-               i915_gem_request_cancel(req);
-
        mutex_unlock(&dev->struct_mutex);
 
 pre_mutex_err:
index c5cb049..0d666b3 100644 (file)
@@ -745,7 +745,7 @@ static void gen8_ppgtt_clear_pte_range(struct i915_address_space *vm,
                        num_entries--;
                }
 
-               kunmap_px(ppgtt, pt);
+               kunmap_px(ppgtt, pt_vaddr);
 
                pte = 0;
                if (++pde == I915_PDES) {
@@ -905,11 +905,10 @@ static int gen8_init_scratch(struct i915_address_space *vm)
 static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
 {
        enum vgt_g2v_type msg;
-       struct drm_device *dev = ppgtt->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev);
        int i;
 
-       if (USES_FULL_48BIT_PPGTT(dev)) {
+       if (USES_FULL_48BIT_PPGTT(dev_priv)) {
                u64 daddr = px_dma(&ppgtt->pml4);
 
                I915_WRITE(vgtif_reg(pdp[0].lo), lower_32_bits(daddr));
@@ -3172,7 +3171,8 @@ int i915_ggtt_init_hw(struct drm_device *dev)
        } else if (INTEL_INFO(dev)->gen < 8) {
                ggtt->probe = gen6_gmch_probe;
                ggtt->base.cleanup = gen6_gmch_remove;
-               if (IS_HASWELL(dev) && dev_priv->ellc_size)
+
+               if (HAS_EDRAM(dev))
                        ggtt->base.pte_encode = iris_pte_encode;
                else if (IS_HASWELL(dev))
                        ggtt->base.pte_encode = hsw_pte_encode;
index d46388f..425e721 100644 (file)
@@ -70,6 +70,10 @@ static bool swap_available(void)
 
 static bool can_release_pages(struct drm_i915_gem_object *obj)
 {
+       /* Only shmemfs objects are backed by swap */
+       if (!obj->base.filp)
+               return false;
+
        /* Only report true if by unbinding the object and putting its pages
         * we can actually make forward progress towards freeing physical
         * pages.
@@ -336,7 +340,7 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
                container_of(nb, struct drm_i915_private, mm.oom_notifier);
        struct shrinker_lock_uninterruptible slu;
        struct drm_i915_gem_object *obj;
-       unsigned long pinned, bound, unbound, freed_pages;
+       unsigned long unevictable, bound, unbound, freed_pages;
 
        if (!i915_gem_shrinker_lock_uninterruptible(dev_priv, &slu, 5000))
                return NOTIFY_DONE;
@@ -347,33 +351,28 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
         * assert that there are no objects with pinned pages that are not
         * being pointed to by hardware.
         */
-       unbound = bound = pinned = 0;
+       unbound = bound = unevictable = 0;
        list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
-               if (!obj->base.filp) /* not backed by a freeable object */
-                       continue;
-
-               if (obj->pages_pin_count)
-                       pinned += obj->base.size;
+               if (!can_release_pages(obj))
+                       unevictable += obj->base.size >> PAGE_SHIFT;
                else
-                       unbound += obj->base.size;
+                       unbound += obj->base.size >> PAGE_SHIFT;
        }
        list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
-               if (!obj->base.filp)
-                       continue;
-
-               if (obj->pages_pin_count)
-                       pinned += obj->base.size;
+               if (!can_release_pages(obj))
+                       unevictable += obj->base.size >> PAGE_SHIFT;
                else
-                       bound += obj->base.size;
+                       bound += obj->base.size >> PAGE_SHIFT;
        }
 
        i915_gem_shrinker_unlock_uninterruptible(dev_priv, &slu);
 
        if (freed_pages || unbound || bound)
-               pr_info("Purging GPU memory, %lu bytes freed, %lu bytes still pinned.\n",
-                       freed_pages << PAGE_SHIFT, pinned);
+               pr_info("Purging GPU memory, %lu pages freed, "
+                       "%lu pages still pinned.\n",
+                       freed_pages, unevictable);
        if (unbound || bound)
-               pr_err("%lu and %lu bytes still available in the "
+               pr_err("%lu and %lu pages still available in the "
                       "bound and unbound GPU page lists.\n",
                       bound, unbound);
 
index ea06da0..b7ce963 100644 (file)
@@ -95,9 +95,9 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
        u32 base;
 
        /* Almost universally we can find the Graphics Base of Stolen Memory
-        * at offset 0x5c in the igfx configuration space. On a few (desktop)
-        * machines this is also mirrored in the bridge device at different
-        * locations, or in the MCHBAR.
+        * at register BSM (0x5c) in the igfx configuration space. On a few
+        * (desktop) machines this is also mirrored in the bridge device at
+        * different locations, or in the MCHBAR.
         *
         * On 865 we just check the TOUD register.
         *
@@ -107,9 +107,11 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
         */
        base = 0;
        if (INTEL_INFO(dev)->gen >= 3) {
-               /* Read Graphics Base of Stolen Memory directly */
-               pci_read_config_dword(dev->pdev, 0x5c, &base);
-               base &= ~((1<<20) - 1);
+               u32 bsm;
+
+               pci_read_config_dword(dev->pdev, BSM, &bsm);
+
+               base = bsm & BSM_MASK;
        } else if (IS_I865G(dev)) {
                u16 toud = 0;
 
index 0f94b6c..32d9726 100644 (file)
@@ -34,7 +34,7 @@
 
 struct i915_mm_struct {
        struct mm_struct *mm;
-       struct drm_device *dev;
+       struct drm_i915_private *i915;
        struct i915_mmu_notifier *mn;
        struct hlist_node node;
        struct kref kref;
@@ -49,6 +49,7 @@ struct i915_mmu_notifier {
        struct hlist_node node;
        struct mmu_notifier mn;
        struct rb_root objects;
+       struct workqueue_struct *wq;
 };
 
 struct i915_mmu_object {
@@ -60,6 +61,37 @@ struct i915_mmu_object {
        bool attached;
 };
 
+static void wait_rendering(struct drm_i915_gem_object *obj)
+{
+       struct drm_device *dev = obj->base.dev;
+       struct drm_i915_gem_request *requests[I915_NUM_ENGINES];
+       int i, n;
+
+       if (!obj->active)
+               return;
+
+       n = 0;
+       for (i = 0; i < I915_NUM_ENGINES; i++) {
+               struct drm_i915_gem_request *req;
+
+               req = obj->last_read_req[i];
+               if (req == NULL)
+                       continue;
+
+               requests[n++] = i915_gem_request_reference(req);
+       }
+
+       mutex_unlock(&dev->struct_mutex);
+
+       for (i = 0; i < n; i++)
+               __i915_wait_request(requests[i], false, NULL, NULL);
+
+       mutex_lock(&dev->struct_mutex);
+
+       for (i = 0; i < n; i++)
+               i915_gem_request_unreference(requests[i]);
+}
+
 static void cancel_userptr(struct work_struct *work)
 {
        struct i915_mmu_object *mo = container_of(work, typeof(*mo), work);
@@ -75,13 +107,13 @@ static void cancel_userptr(struct work_struct *work)
                struct i915_vma *vma, *tmp;
                bool was_interruptible;
 
+               wait_rendering(obj);
+
                was_interruptible = dev_priv->mm.interruptible;
                dev_priv->mm.interruptible = false;
 
-               list_for_each_entry_safe(vma, tmp, &obj->vma_list, obj_link) {
-                       int ret = i915_vma_unbind(vma);
-                       WARN_ON(ret && ret != -EIO);
-               }
+               list_for_each_entry_safe(vma, tmp, &obj->vma_list, obj_link)
+                       WARN_ON(i915_vma_unbind(vma));
                WARN_ON(i915_gem_object_put_pages(obj));
 
                dev_priv->mm.interruptible = was_interruptible;
@@ -140,7 +172,7 @@ static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
                 */
                mo = container_of(it, struct i915_mmu_object, it);
                if (kref_get_unless_zero(&mo->obj->base.refcount))
-                       schedule_work(&mo->work);
+                       queue_work(mn->wq, &mo->work);
 
                list_add(&mo->link, &cancelled);
                it = interval_tree_iter_next(it, start, end);
@@ -148,6 +180,8 @@ static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
        list_for_each_entry(mo, &cancelled, link)
                del_object(mo);
        spin_unlock(&mn->lock);
+
+       flush_workqueue(mn->wq);
 }
 
 static const struct mmu_notifier_ops i915_gem_userptr_notifier = {
@@ -167,10 +201,16 @@ i915_mmu_notifier_create(struct mm_struct *mm)
        spin_lock_init(&mn->lock);
        mn->mn.ops = &i915_gem_userptr_notifier;
        mn->objects = RB_ROOT;
+       mn->wq = alloc_workqueue("i915-userptr-release", WQ_UNBOUND, 0);
+       if (mn->wq == NULL) {
+               kfree(mn);
+               return ERR_PTR(-ENOMEM);
+       }
 
         /* Protected by mmap_sem (write-lock) */
        ret = __mmu_notifier_register(&mn->mn, mm);
        if (ret) {
+               destroy_workqueue(mn->wq);
                kfree(mn);
                return ERR_PTR(ret);
        }
@@ -205,13 +245,13 @@ i915_mmu_notifier_find(struct i915_mm_struct *mm)
                return mn;
 
        down_write(&mm->mm->mmap_sem);
-       mutex_lock(&to_i915(mm->dev)->mm_lock);
+       mutex_lock(&mm->i915->mm_lock);
        if ((mn = mm->mn) == NULL) {
                mn = i915_mmu_notifier_create(mm->mm);
                if (!IS_ERR(mn))
                        mm->mn = mn;
        }
-       mutex_unlock(&to_i915(mm->dev)->mm_lock);
+       mutex_unlock(&mm->i915->mm_lock);
        up_write(&mm->mm->mmap_sem);
 
        return mn;
@@ -256,6 +296,7 @@ i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
                return;
 
        mmu_notifier_unregister(&mn->mn, mm);
+       destroy_workqueue(mn->wq);
        kfree(mn);
 }
 
@@ -327,7 +368,7 @@ i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj)
                }
 
                kref_init(&mm->kref);
-               mm->dev = obj->base.dev;
+               mm->i915 = to_i915(obj->base.dev);
 
                mm->mm = current->mm;
                atomic_inc(&current->mm->mm_count);
@@ -362,7 +403,7 @@ __i915_mm_struct_free(struct kref *kref)
 
        /* Protected by dev_priv->mm_lock */
        hash_del(&mm->node);
-       mutex_unlock(&to_i915(mm->dev)->mm_lock);
+       mutex_unlock(&mm->i915->mm_lock);
 
        INIT_WORK(&mm->work, __i915_mm_struct_free__worker);
        schedule_work(&mm->work);
@@ -498,19 +539,24 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
        if (pvec != NULL) {
                struct mm_struct *mm = obj->userptr.mm->mm;
 
-               down_read(&mm->mmap_sem);
-               while (pinned < npages) {
-                       ret = get_user_pages_remote(work->task, mm,
-                                       obj->userptr.ptr + pinned * PAGE_SIZE,
-                                       npages - pinned,
-                                       !obj->userptr.read_only, 0,
-                                       pvec + pinned, NULL);
-                       if (ret < 0)
-                               break;
-
-                       pinned += ret;
+               ret = -EFAULT;
+               if (atomic_inc_not_zero(&mm->mm_users)) {
+                       down_read(&mm->mmap_sem);
+                       while (pinned < npages) {
+                               ret = get_user_pages_remote
+                                       (work->task, mm,
+                                        obj->userptr.ptr + pinned * PAGE_SIZE,
+                                        npages - pinned,
+                                        !obj->userptr.read_only, 0,
+                                        pvec + pinned, NULL);
+                               if (ret < 0)
+                                       break;
+
+                               pinned += ret;
+                       }
+                       up_read(&mm->mmap_sem);
+                       mmput(mm);
                }
-               up_read(&mm->mmap_sem);
        }
 
        mutex_lock(&dev->struct_mutex);
index da86bdb..d40c13f 100644 (file)
@@ -179,15 +179,11 @@ static void guc_init_doorbell(struct intel_guc *guc,
                              struct i915_guc_client *client)
 {
        struct guc_doorbell_info *doorbell;
-       void *base;
 
-       base = kmap_atomic(i915_gem_object_get_page(client->client_obj, 0));
-       doorbell = base + client->doorbell_offset;
+       doorbell = client->client_base + client->doorbell_offset;
 
-       doorbell->db_status = 1;
+       doorbell->db_status = GUC_DOORBELL_ENABLED;
        doorbell->cookie = 0;
-
-       kunmap_atomic(base);
 }
 
 static int guc_ring_doorbell(struct i915_guc_client *gc)
@@ -195,11 +191,9 @@ static int guc_ring_doorbell(struct i915_guc_client *gc)
        struct guc_process_desc *desc;
        union guc_doorbell_qw db_cmp, db_exc, db_ret;
        union guc_doorbell_qw *db;
-       void *base;
        int attempt = 2, ret = -EAGAIN;
 
-       base = kmap_atomic(i915_gem_object_get_page(gc->client_obj, 0));
-       desc = base + gc->proc_desc_offset;
+       desc = gc->client_base + gc->proc_desc_offset;
 
        /* Update the tail so it is visible to GuC */
        desc->tail = gc->wq_tail;
@@ -215,7 +209,7 @@ static int guc_ring_doorbell(struct i915_guc_client *gc)
                db_exc.cookie = 1;
 
        /* pointer of current doorbell cacheline */
-       db = base + gc->doorbell_offset;
+       db = gc->client_base + gc->doorbell_offset;
 
        while (attempt--) {
                /* lets ring the doorbell */
@@ -244,10 +238,6 @@ static int guc_ring_doorbell(struct i915_guc_client *gc)
                        db_exc.cookie = 1;
        }
 
-       /* Finally, update the cached copy of the GuC's WQ head */
-       gc->wq_head = desc->head;
-
-       kunmap_atomic(base);
        return ret;
 }
 
@@ -256,16 +246,12 @@ static void guc_disable_doorbell(struct intel_guc *guc,
 {
        struct drm_i915_private *dev_priv = guc_to_i915(guc);
        struct guc_doorbell_info *doorbell;
-       void *base;
        i915_reg_t drbreg = GEN8_DRBREGL(client->doorbell_id);
        int value;
 
-       base = kmap_atomic(i915_gem_object_get_page(client->client_obj, 0));
-       doorbell = base + client->doorbell_offset;
-
-       doorbell->db_status = 0;
+       doorbell = client->client_base + client->doorbell_offset;
 
-       kunmap_atomic(base);
+       doorbell->db_status = GUC_DOORBELL_DISABLED;
 
        I915_WRITE(drbreg, I915_READ(drbreg) & ~GEN8_DRB_VALID);
 
@@ -341,10 +327,8 @@ static void guc_init_proc_desc(struct intel_guc *guc,
                               struct i915_guc_client *client)
 {
        struct guc_process_desc *desc;
-       void *base;
 
-       base = kmap_atomic(i915_gem_object_get_page(client->client_obj, 0));
-       desc = base + client->proc_desc_offset;
+       desc = client->client_base + client->proc_desc_offset;
 
        memset(desc, 0, sizeof(*desc));
 
@@ -361,8 +345,6 @@ static void guc_init_proc_desc(struct intel_guc *guc,
        desc->wq_size_bytes = client->wq_size;
        desc->wq_status = WQ_STATUS_ACTIVE;
        desc->priority = client->priority;
-
-       kunmap_atomic(base);
 }
 
 /*
@@ -376,12 +358,14 @@ static void guc_init_proc_desc(struct intel_guc *guc,
 static void guc_init_ctx_desc(struct intel_guc *guc,
                              struct i915_guc_client *client)
 {
+       struct drm_i915_gem_object *client_obj = client->client_obj;
        struct drm_i915_private *dev_priv = guc_to_i915(guc);
        struct intel_engine_cs *engine;
        struct intel_context *ctx = client->owner;
        struct guc_context_desc desc;
        struct sg_table *sg;
        enum intel_engine_id id;
+       u32 gfx_addr;
 
        memset(&desc, 0, sizeof(desc));
 
@@ -410,16 +394,17 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
                lrc->context_desc = (u32)ctx_desc;
 
                /* The state page is after PPHWSP */
-               lrc->ring_lcra = i915_gem_obj_ggtt_offset(obj) +
-                               LRC_STATE_PN * PAGE_SIZE;
+               gfx_addr = i915_gem_obj_ggtt_offset(obj);
+               lrc->ring_lcra = gfx_addr + LRC_STATE_PN * PAGE_SIZE;
                lrc->context_id = (client->ctx_index << GUC_ELC_CTXID_OFFSET) |
                                (engine->guc_id << GUC_ELC_ENGINE_OFFSET);
 
                obj = ctx->engine[id].ringbuf->obj;
+               gfx_addr = i915_gem_obj_ggtt_offset(obj);
 
-               lrc->ring_begin = i915_gem_obj_ggtt_offset(obj);
-               lrc->ring_end = lrc->ring_begin + obj->base.size - 1;
-               lrc->ring_next_free_location = lrc->ring_begin;
+               lrc->ring_begin = gfx_addr;
+               lrc->ring_end = gfx_addr + obj->base.size - 1;
+               lrc->ring_next_free_location = gfx_addr;
                lrc->ring_current_tail_pointer_value = 0;
 
                desc.engines_used |= (1 << engine->guc_id);
@@ -428,22 +413,17 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
        WARN_ON(desc.engines_used == 0);
 
        /*
-        * The CPU address is only needed at certain points, so kmap_atomic on
-        * demand instead of storing it in the ctx descriptor.
-        * XXX: May make debug easier to have it mapped
+        * The doorbell, process descriptor, and workqueue are all parts
+        * of the client object, which the GuC will reference via the GGTT
         */
-       desc.db_trigger_cpu = 0;
-       desc.db_trigger_uk = client->doorbell_offset +
-               i915_gem_obj_ggtt_offset(client->client_obj);
-       desc.db_trigger_phy = client->doorbell_offset +
-               sg_dma_address(client->client_obj->pages->sgl);
-
-       desc.process_desc = client->proc_desc_offset +
-               i915_gem_obj_ggtt_offset(client->client_obj);
-
-       desc.wq_addr = client->wq_offset +
-               i915_gem_obj_ggtt_offset(client->client_obj);
-
+       gfx_addr = i915_gem_obj_ggtt_offset(client_obj);
+       desc.db_trigger_phy = sg_dma_address(client_obj->pages->sgl) +
+                               client->doorbell_offset;
+       desc.db_trigger_cpu = (uintptr_t)client->client_base +
+                               client->doorbell_offset;
+       desc.db_trigger_uk = gfx_addr + client->doorbell_offset;
+       desc.process_desc = gfx_addr + client->proc_desc_offset;
+       desc.wq_addr = gfx_addr + client->wq_offset;
        desc.wq_size = client->wq_size;
 
        /*
@@ -474,25 +454,16 @@ static void guc_fini_ctx_desc(struct intel_guc *guc,
 int i915_guc_wq_check_space(struct i915_guc_client *gc)
 {
        struct guc_process_desc *desc;
-       void *base;
        u32 size = sizeof(struct guc_wq_item);
        int ret = -ETIMEDOUT, timeout_counter = 200;
 
        if (!gc)
                return 0;
 
-       /* Quickly return if wq space is available since last time we cache the
-        * head position. */
-       if (CIRC_SPACE(gc->wq_tail, gc->wq_head, gc->wq_size) >= size)
-               return 0;
-
-       base = kmap_atomic(i915_gem_object_get_page(gc->client_obj, 0));
-       desc = base + gc->proc_desc_offset;
+       desc = gc->client_base + gc->proc_desc_offset;
 
        while (timeout_counter-- > 0) {
-               gc->wq_head = desc->head;
-
-               if (CIRC_SPACE(gc->wq_tail, gc->wq_head, gc->wq_size) >= size) {
+               if (CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size) >= size) {
                        ret = 0;
                        break;
                }
@@ -501,19 +472,19 @@ int i915_guc_wq_check_space(struct i915_guc_client *gc)
                        usleep_range(1000, 2000);
        };
 
-       kunmap_atomic(base);
-
        return ret;
 }
 
 static int guc_add_workqueue_item(struct i915_guc_client *gc,
                                  struct drm_i915_gem_request *rq)
 {
+       struct guc_process_desc *desc;
        struct guc_wq_item *wqi;
        void *base;
        u32 tail, wq_len, wq_off, space;
 
-       space = CIRC_SPACE(gc->wq_tail, gc->wq_head, gc->wq_size);
+       desc = gc->client_base + gc->proc_desc_offset;
+       space = CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size);
        if (WARN_ON(space < sizeof(struct guc_wq_item)))
                return -ENOSPC; /* shouldn't happen */
 
@@ -661,21 +632,28 @@ static void guc_client_free(struct drm_device *dev,
        if (!client)
                return;
 
-       if (client->doorbell_id != GUC_INVALID_DOORBELL_ID) {
-               /*
-                * First disable the doorbell, then tell the GuC we've
-                * finished with it, finally deallocate it in our bitmap
-                */
-               guc_disable_doorbell(guc, client);
-               host2guc_release_doorbell(guc, client);
-               release_doorbell(guc, client->doorbell_id);
-       }
-
        /*
         * XXX: wait for any outstanding submissions before freeing memory.
         * Be sure to drop any locks
         */
 
+       if (client->client_base) {
+               /*
+                * If we got as far as setting up a doorbell, make sure
+                * we shut it down before unmapping & deallocating the
+                * memory. So first disable the doorbell, then tell the
+                * GuC that we've finished with it, finally deallocate
+                * it in our bitmap
+                */
+               if (client->doorbell_id != GUC_INVALID_DOORBELL_ID) {
+                       guc_disable_doorbell(guc, client);
+                       host2guc_release_doorbell(guc, client);
+                       release_doorbell(guc, client->doorbell_id);
+               }
+
+               kunmap(kmap_to_page(client->client_base));
+       }
+
        gem_release_guc_obj(client->client_obj);
 
        if (client->ctx_index != GUC_INVALID_CTX_ID) {
@@ -696,7 +674,7 @@ static void guc_client_free(struct drm_device *dev,
  * @ctx:       the context that owns the client (we use the default render
  *             context)
  *
- * Return:     An i915_guc_client object if success.
+ * Return:     An i915_guc_client object if success, else NULL.
  */
 static struct i915_guc_client *guc_client_alloc(struct drm_device *dev,
                                                uint32_t priority,
@@ -728,7 +706,9 @@ static struct i915_guc_client *guc_client_alloc(struct drm_device *dev,
        if (!obj)
                goto err;
 
+       /* We'll keep just the first (doorbell/proc) page permanently kmap'd. */
        client->client_obj = obj;
+       client->client_base = kmap(i915_gem_object_get_page(obj, 0));
        client->wq_offset = GUC_DB_SIZE;
        client->wq_size = GUC_WQ_SIZE;
 
index 679f08c..2f6fd33 100644 (file)
@@ -1264,18 +1264,17 @@ out:
        mutex_unlock(&dev_priv->dev->struct_mutex);
 }
 
-static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
+static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv,
+                                              u32 iir)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
-       if (!HAS_L3_DPF(dev))
+       if (!HAS_L3_DPF(dev_priv))
                return;
 
        spin_lock(&dev_priv->irq_lock);
-       gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
+       gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
        spin_unlock(&dev_priv->irq_lock);
 
-       iir &= GT_PARITY_ERROR(dev);
+       iir &= GT_PARITY_ERROR(dev_priv);
        if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
                dev_priv->l3_parity.which_slice |= 1 << 1;
 
@@ -1285,8 +1284,7 @@ static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
        queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
 }
 
-static void ilk_gt_irq_handler(struct drm_device *dev,
-                              struct drm_i915_private *dev_priv,
+static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv,
                               u32 gt_iir)
 {
        if (gt_iir &
@@ -1296,8 +1294,7 @@ static void ilk_gt_irq_handler(struct drm_device *dev,
                notify_ring(&dev_priv->engine[VCS]);
 }
 
-static void snb_gt_irq_handler(struct drm_device *dev,
-                              struct drm_i915_private *dev_priv,
+static void snb_gt_irq_handler(struct drm_i915_private *dev_priv,
                               u32 gt_iir)
 {
 
@@ -1314,8 +1311,8 @@ static void snb_gt_irq_handler(struct drm_device *dev,
                      GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
                DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
 
-       if (gt_iir & GT_PARITY_ERROR(dev))
-               ivybridge_parity_error_irq_handler(dev, gt_iir);
+       if (gt_iir & GT_PARITY_ERROR(dev_priv))
+               ivybridge_parity_error_irq_handler(dev_priv, gt_iir);
 }
 
 static __always_inline void
@@ -1327,60 +1324,45 @@ gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift)
                tasklet_schedule(&engine->irq_tasklet);
 }
 
-static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
-                                      u32 master_ctl)
+static irqreturn_t gen8_gt_irq_ack(struct drm_i915_private *dev_priv,
+                                  u32 master_ctl,
+                                  u32 gt_iir[4])
 {
        irqreturn_t ret = IRQ_NONE;
 
        if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
-               u32 iir = I915_READ_FW(GEN8_GT_IIR(0));
-               if (iir) {
-                       I915_WRITE_FW(GEN8_GT_IIR(0), iir);
+               gt_iir[0] = I915_READ_FW(GEN8_GT_IIR(0));
+               if (gt_iir[0]) {
+                       I915_WRITE_FW(GEN8_GT_IIR(0), gt_iir[0]);
                        ret = IRQ_HANDLED;
-
-                       gen8_cs_irq_handler(&dev_priv->engine[RCS],
-                                           iir, GEN8_RCS_IRQ_SHIFT);
-
-                       gen8_cs_irq_handler(&dev_priv->engine[BCS],
-                                           iir, GEN8_BCS_IRQ_SHIFT);
                } else
                        DRM_ERROR("The master control interrupt lied (GT0)!\n");
        }
 
        if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
-               u32 iir = I915_READ_FW(GEN8_GT_IIR(1));
-               if (iir) {
-                       I915_WRITE_FW(GEN8_GT_IIR(1), iir);
+               gt_iir[1] = I915_READ_FW(GEN8_GT_IIR(1));
+               if (gt_iir[1]) {
+                       I915_WRITE_FW(GEN8_GT_IIR(1), gt_iir[1]);
                        ret = IRQ_HANDLED;
-
-                       gen8_cs_irq_handler(&dev_priv->engine[VCS],
-                                           iir, GEN8_VCS1_IRQ_SHIFT);
-
-                       gen8_cs_irq_handler(&dev_priv->engine[VCS2],
-                                           iir, GEN8_VCS2_IRQ_SHIFT);
                } else
                        DRM_ERROR("The master control interrupt lied (GT1)!\n");
        }
 
        if (master_ctl & GEN8_GT_VECS_IRQ) {
-               u32 iir = I915_READ_FW(GEN8_GT_IIR(3));
-               if (iir) {
-                       I915_WRITE_FW(GEN8_GT_IIR(3), iir);
+               gt_iir[3] = I915_READ_FW(GEN8_GT_IIR(3));
+               if (gt_iir[3]) {
+                       I915_WRITE_FW(GEN8_GT_IIR(3), gt_iir[3]);
                        ret = IRQ_HANDLED;
-
-                       gen8_cs_irq_handler(&dev_priv->engine[VECS],
-                                           iir, GEN8_VECS_IRQ_SHIFT);
                } else
                        DRM_ERROR("The master control interrupt lied (GT3)!\n");
        }
 
        if (master_ctl & GEN8_GT_PM_IRQ) {
-               u32 iir = I915_READ_FW(GEN8_GT_IIR(2));
-               if (iir & dev_priv->pm_rps_events) {
+               gt_iir[2] = I915_READ_FW(GEN8_GT_IIR(2));
+               if (gt_iir[2] & dev_priv->pm_rps_events) {
                        I915_WRITE_FW(GEN8_GT_IIR(2),
-                                     iir & dev_priv->pm_rps_events);
+                                     gt_iir[2] & dev_priv->pm_rps_events);
                        ret = IRQ_HANDLED;
-                       gen6_rps_irq_handler(dev_priv, iir);
                } else
                        DRM_ERROR("The master control interrupt lied (PM)!\n");
        }
@@ -1388,6 +1370,31 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
        return ret;
 }
 
+static void gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
+                               u32 gt_iir[4])
+{
+       if (gt_iir[0]) {
+               gen8_cs_irq_handler(&dev_priv->engine[RCS],
+                                   gt_iir[0], GEN8_RCS_IRQ_SHIFT);
+               gen8_cs_irq_handler(&dev_priv->engine[BCS],
+                                   gt_iir[0], GEN8_BCS_IRQ_SHIFT);
+       }
+
+       if (gt_iir[1]) {
+               gen8_cs_irq_handler(&dev_priv->engine[VCS],
+                                   gt_iir[1], GEN8_VCS1_IRQ_SHIFT);
+               gen8_cs_irq_handler(&dev_priv->engine[VCS2],
+                                   gt_iir[1], GEN8_VCS2_IRQ_SHIFT);
+       }
+
+       if (gt_iir[3])
+               gen8_cs_irq_handler(&dev_priv->engine[VECS],
+                                   gt_iir[3], GEN8_VECS_IRQ_SHIFT);
+
+       if (gt_iir[2] & dev_priv->pm_rps_events)
+               gen6_rps_irq_handler(dev_priv, gt_iir[2]);
+}
+
 static bool bxt_port_hotplug_long_detect(enum port port, u32 val)
 {
        switch (port) {
@@ -1644,10 +1651,10 @@ static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe)
        return true;
 }
 
-static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
+static void valleyview_pipestat_irq_ack(struct drm_device *dev, u32 iir,
+                                       u32 pipe_stats[I915_MAX_PIPES])
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       u32 pipe_stats[I915_MAX_PIPES] = { };
        int pipe;
 
        spin_lock(&dev_priv->irq_lock);
@@ -1701,6 +1708,13 @@ static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
                        I915_WRITE(reg, pipe_stats[pipe]);
        }
        spin_unlock(&dev_priv->irq_lock);
+}
+
+static void valleyview_pipestat_irq_handler(struct drm_device *dev,
+                                           u32 pipe_stats[I915_MAX_PIPES])
+{
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       enum pipe pipe;
 
        for_each_pipe(dev_priv, pipe) {
                if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
@@ -1723,21 +1737,20 @@ static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
                gmbus_irq_handler(dev);
 }
 
-static void i9xx_hpd_irq_handler(struct drm_device *dev)
+static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
-       u32 pin_mask = 0, long_mask = 0;
 
-       if (!hotplug_status)
-               return;
+       if (hotplug_status)
+               I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
 
-       I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
-       /*
-        * Make sure hotplug status is cleared before we clear IIR, or else we
-        * may miss hotplug events.
-        */
-       POSTING_READ(PORT_HOTPLUG_STAT);
+       return hotplug_status;
+}
+
+static void i9xx_hpd_irq_handler(struct drm_device *dev,
+                                u32 hotplug_status)
+{
+       u32 pin_mask = 0, long_mask = 0;
 
        if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
                u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
@@ -1768,7 +1781,6 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
 {
        struct drm_device *dev = arg;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       u32 iir, gt_iir, pm_iir;
        irqreturn_t ret = IRQ_NONE;
 
        if (!intel_irqs_enabled(dev_priv))
@@ -1777,40 +1789,72 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
        /* IRQs are synced during runtime_suspend, we don't require a wakeref */
        disable_rpm_wakeref_asserts(dev_priv);
 
-       while (true) {
-               /* Find, clear, then process each source of interrupt */
+       do {
+               u32 iir, gt_iir, pm_iir;
+               u32 pipe_stats[I915_MAX_PIPES] = {};
+               u32 hotplug_status = 0;
+               u32 ier = 0;
 
                gt_iir = I915_READ(GTIIR);
-               if (gt_iir)
-                       I915_WRITE(GTIIR, gt_iir);
-
                pm_iir = I915_READ(GEN6_PMIIR);
-               if (pm_iir)
-                       I915_WRITE(GEN6_PMIIR, pm_iir);
-
                iir = I915_READ(VLV_IIR);
-               if (iir) {
-                       /* Consume port before clearing IIR or we'll miss events */
-                       if (iir & I915_DISPLAY_PORT_INTERRUPT)
-                               i9xx_hpd_irq_handler(dev);
-                       I915_WRITE(VLV_IIR, iir);
-               }
 
                if (gt_iir == 0 && pm_iir == 0 && iir == 0)
-                       goto out;
+                       break;
 
                ret = IRQ_HANDLED;
 
+               /*
+                * Theory on interrupt generation, based on empirical evidence:
+                *
+                * x = ((VLV_IIR & VLV_IER) ||
+                *      (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) &&
+                *       (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE)));
+                *
+                * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
+                * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to
+                * guarantee the CPU interrupt will be raised again even if we
+                * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR
+                * bits this time around.
+                */
+               I915_WRITE(VLV_MASTER_IER, 0);
+               ier = I915_READ(VLV_IER);
+               I915_WRITE(VLV_IER, 0);
+
                if (gt_iir)
-                       snb_gt_irq_handler(dev, dev_priv, gt_iir);
+                       I915_WRITE(GTIIR, gt_iir);
                if (pm_iir)
-                       gen6_rps_irq_handler(dev_priv, pm_iir);
+                       I915_WRITE(GEN6_PMIIR, pm_iir);
+
+               if (iir & I915_DISPLAY_PORT_INTERRUPT)
+                       hotplug_status = i9xx_hpd_irq_ack(dev_priv);
+
                /* Call regardless, as some status bits might not be
                 * signalled in iir */
-               valleyview_pipestat_irq_handler(dev, iir);
-       }
+               valleyview_pipestat_irq_ack(dev, iir, pipe_stats);
+
+               /*
+                * VLV_IIR is single buffered, and reflects the level
+                * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
+                */
+               if (iir)
+                       I915_WRITE(VLV_IIR, iir);
+
+               I915_WRITE(VLV_IER, ier);
+               I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
+               POSTING_READ(VLV_MASTER_IER);
+
+               if (gt_iir)
+                       snb_gt_irq_handler(dev_priv, gt_iir);
+               if (pm_iir)
+                       gen6_rps_irq_handler(dev_priv, pm_iir);
+
+               if (hotplug_status)
+                       i9xx_hpd_irq_handler(dev, hotplug_status);
+
+               valleyview_pipestat_irq_handler(dev, pipe_stats);
+       } while (0);
 
-out:
        enable_rpm_wakeref_asserts(dev_priv);
 
        return ret;
@@ -1820,7 +1864,6 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
 {
        struct drm_device *dev = arg;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       u32 master_ctl, iir;
        irqreturn_t ret = IRQ_NONE;
 
        if (!intel_irqs_enabled(dev_priv))
@@ -1830,6 +1873,12 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
        disable_rpm_wakeref_asserts(dev_priv);
 
        do {
+               u32 master_ctl, iir;
+               u32 gt_iir[4] = {};
+               u32 pipe_stats[I915_MAX_PIPES] = {};
+               u32 hotplug_status = 0;
+               u32 ier = 0;
+
                master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
                iir = I915_READ(VLV_IIR);
 
@@ -1838,25 +1887,49 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
 
                ret = IRQ_HANDLED;
 
+               /*
+                * Theory on interrupt generation, based on empirical evidence:
+                *
+                * x = ((VLV_IIR & VLV_IER) ||
+                *      ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) &&
+                *       (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL)));
+                *
+                * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
+                * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to
+                * guarantee the CPU interrupt will be raised again even if we
+                * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL
+                * bits this time around.
+                */
                I915_WRITE(GEN8_MASTER_IRQ, 0);
+               ier = I915_READ(VLV_IER);
+               I915_WRITE(VLV_IER, 0);
 
-               /* Find, clear, then process each source of interrupt */
+               gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
 
-               if (iir) {
-                       /* Consume port before clearing IIR or we'll miss events */
-                       if (iir & I915_DISPLAY_PORT_INTERRUPT)
-                               i9xx_hpd_irq_handler(dev);
-                       I915_WRITE(VLV_IIR, iir);
-               }
-
-               gen8_gt_irq_handler(dev_priv, master_ctl);
+               if (iir & I915_DISPLAY_PORT_INTERRUPT)
+                       hotplug_status = i9xx_hpd_irq_ack(dev_priv);
 
                /* Call regardless, as some status bits might not be
                 * signalled in iir */
-               valleyview_pipestat_irq_handler(dev, iir);
+               valleyview_pipestat_irq_ack(dev, iir, pipe_stats);
 
-               I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
+               /*
+                * VLV_IIR is single buffered, and reflects the level
+                * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
+                */
+               if (iir)
+                       I915_WRITE(VLV_IIR, iir);
+
+               I915_WRITE(VLV_IER, ier);
+               I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
                POSTING_READ(GEN8_MASTER_IRQ);
+
+               gen8_gt_irq_handler(dev_priv, gt_iir);
+
+               if (hotplug_status)
+                       i9xx_hpd_irq_handler(dev, hotplug_status);
+
+               valleyview_pipestat_irq_handler(dev, pipe_stats);
        } while (0);
 
        enable_rpm_wakeref_asserts(dev_priv);
@@ -2217,9 +2290,9 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
                I915_WRITE(GTIIR, gt_iir);
                ret = IRQ_HANDLED;
                if (INTEL_INFO(dev)->gen >= 6)
-                       snb_gt_irq_handler(dev, dev_priv, gt_iir);
+                       snb_gt_irq_handler(dev_priv, gt_iir);
                else
-                       ilk_gt_irq_handler(dev, dev_priv, gt_iir);
+                       ilk_gt_irq_handler(dev_priv, gt_iir);
        }
 
        de_iir = I915_READ(DEIIR);
@@ -2419,6 +2492,7 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
        struct drm_device *dev = arg;
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 master_ctl;
+       u32 gt_iir[4] = {};
        irqreturn_t ret;
 
        if (!intel_irqs_enabled(dev_priv))
@@ -2435,7 +2509,8 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
        disable_rpm_wakeref_asserts(dev_priv);
 
        /* Find, clear, then process each source of interrupt */
-       ret = gen8_gt_irq_handler(dev_priv, master_ctl);
+       ret = gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
+       gen8_gt_irq_handler(dev_priv, gt_iir);
        ret |= gen8_de_irq_handler(dev_priv, master_ctl);
 
        I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
@@ -2483,7 +2558,6 @@ static void i915_error_wake_up(struct drm_i915_private *dev_priv,
 static void i915_reset_and_wakeup(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
-       struct i915_gpu_error *error = &dev_priv->gpu_error;
        char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
        char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
        char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
@@ -2501,7 +2575,7 @@ static void i915_reset_and_wakeup(struct drm_device *dev)
         * the reset in-progress bit is only ever set by code outside of this
         * work we don't need to worry about any other races.
         */
-       if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
+       if (i915_reset_in_progress(&dev_priv->gpu_error)) {
                DRM_DEBUG_DRIVER("resetting chip\n");
                kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
                                   reset_event);
@@ -2529,25 +2603,9 @@ static void i915_reset_and_wakeup(struct drm_device *dev)
 
                intel_runtime_pm_put(dev_priv);
 
-               if (ret == 0) {
-                       /*
-                        * After all the gem state is reset, increment the reset
-                        * counter and wake up everyone waiting for the reset to
-                        * complete.
-                        *
-                        * Since unlock operations are a one-sided barrier only,
-                        * we need to insert a barrier here to order any seqno
-                        * updates before
-                        * the counter increment.
-                        */
-                       smp_mb__before_atomic();
-                       atomic_inc(&dev_priv->gpu_error.reset_counter);
-
+               if (ret == 0)
                        kobject_uevent_env(&dev->primary->kdev->kobj,
                                           KOBJ_CHANGE, reset_done_event);
-               } else {
-                       atomic_or(I915_WEDGED, &error->reset_counter);
-               }
 
                /*
                 * Note: The wake_up also serves as a memory barrier so that
@@ -3285,6 +3343,55 @@ static void gen5_gt_irq_reset(struct drm_device *dev)
                GEN5_IRQ_RESET(GEN6_PM);
 }
 
+static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
+{
+       enum pipe pipe;
+
+       if (IS_CHERRYVIEW(dev_priv))
+               I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
+       else
+               I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
+
+       i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
+       I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
+
+       for_each_pipe(dev_priv, pipe) {
+               I915_WRITE(PIPESTAT(pipe),
+                          PIPE_FIFO_UNDERRUN_STATUS |
+                          PIPESTAT_INT_STATUS_MASK);
+               dev_priv->pipestat_irq_mask[pipe] = 0;
+       }
+
+       GEN5_IRQ_RESET(VLV_);
+       dev_priv->irq_mask = ~0;
+}
+
+static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
+{
+       u32 pipestat_mask;
+       u32 enable_mask;
+       enum pipe pipe;
+
+       pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
+                       PIPE_CRC_DONE_INTERRUPT_STATUS;
+
+       i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
+       for_each_pipe(dev_priv, pipe)
+               i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
+
+       enable_mask = I915_DISPLAY_PORT_INTERRUPT |
+               I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+               I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
+       if (IS_CHERRYVIEW(dev_priv))
+               enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
+
+       WARN_ON(dev_priv->irq_mask != ~0);
+
+       dev_priv->irq_mask = ~enable_mask;
+
+       GEN5_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask);
+}
+
 /* drm_dma.h hooks
 */
 static void ironlake_irq_reset(struct drm_device *dev)
@@ -3302,34 +3409,19 @@ static void ironlake_irq_reset(struct drm_device *dev)
        ibx_irq_reset(dev);
 }
 
-static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
-{
-       enum pipe pipe;
-
-       i915_hotplug_interrupt_update(dev_priv, 0xFFFFFFFF, 0);
-       I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
-
-       for_each_pipe(dev_priv, pipe)
-               I915_WRITE(PIPESTAT(pipe), 0xffff);
-
-       GEN5_IRQ_RESET(VLV_);
-}
-
 static void valleyview_irq_preinstall(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
 
-       /* VLV magic */
-       I915_WRITE(VLV_IMR, 0);
-       I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
-       I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
-       I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
+       I915_WRITE(VLV_MASTER_IER, 0);
+       POSTING_READ(VLV_MASTER_IER);
 
        gen5_gt_irq_reset(dev);
 
-       I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
-
-       vlv_display_irq_reset(dev_priv);
+       spin_lock_irq(&dev_priv->irq_lock);
+       if (dev_priv->display_irqs_enabled)
+               vlv_display_irq_reset(dev_priv);
+       spin_unlock_irq(&dev_priv->irq_lock);
 }
 
 static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
@@ -3402,9 +3494,10 @@ static void cherryview_irq_preinstall(struct drm_device *dev)
 
        GEN5_IRQ_RESET(GEN8_PCU_);
 
-       I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
-
-       vlv_display_irq_reset(dev_priv);
+       spin_lock_irq(&dev_priv->irq_lock);
+       if (dev_priv->display_irqs_enabled)
+               vlv_display_irq_reset(dev_priv);
+       spin_unlock_irq(&dev_priv->irq_lock);
 }
 
 static u32 intel_hpd_enabled_irqs(struct drm_device *dev,
@@ -3651,74 +3744,6 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
        return 0;
 }
 
-static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv)
-{
-       u32 pipestat_mask;
-       u32 iir_mask;
-       enum pipe pipe;
-
-       pipestat_mask = PIPESTAT_INT_STATUS_MASK |
-                       PIPE_FIFO_UNDERRUN_STATUS;
-
-       for_each_pipe(dev_priv, pipe)
-               I915_WRITE(PIPESTAT(pipe), pipestat_mask);
-       POSTING_READ(PIPESTAT(PIPE_A));
-
-       pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
-                       PIPE_CRC_DONE_INTERRUPT_STATUS;
-
-       i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
-       for_each_pipe(dev_priv, pipe)
-                     i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
-
-       iir_mask = I915_DISPLAY_PORT_INTERRUPT |
-                  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
-                  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
-       if (IS_CHERRYVIEW(dev_priv))
-               iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
-       dev_priv->irq_mask &= ~iir_mask;
-
-       I915_WRITE(VLV_IIR, iir_mask);
-       I915_WRITE(VLV_IIR, iir_mask);
-       I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
-       I915_WRITE(VLV_IMR, dev_priv->irq_mask);
-       POSTING_READ(VLV_IMR);
-}
-
-static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv)
-{
-       u32 pipestat_mask;
-       u32 iir_mask;
-       enum pipe pipe;
-
-       iir_mask = I915_DISPLAY_PORT_INTERRUPT |
-                  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
-                  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
-       if (IS_CHERRYVIEW(dev_priv))
-               iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
-
-       dev_priv->irq_mask |= iir_mask;
-       I915_WRITE(VLV_IMR, dev_priv->irq_mask);
-       I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
-       I915_WRITE(VLV_IIR, iir_mask);
-       I915_WRITE(VLV_IIR, iir_mask);
-       POSTING_READ(VLV_IIR);
-
-       pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
-                       PIPE_CRC_DONE_INTERRUPT_STATUS;
-
-       i915_disable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
-       for_each_pipe(dev_priv, pipe)
-               i915_disable_pipestat(dev_priv, pipe, pipestat_mask);
-
-       pipestat_mask = PIPESTAT_INT_STATUS_MASK |
-                       PIPE_FIFO_UNDERRUN_STATUS;
-
-       for_each_pipe(dev_priv, pipe)
-               I915_WRITE(PIPESTAT(pipe), pipestat_mask);
-       POSTING_READ(PIPESTAT(PIPE_A));
-}
-
 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
 {
        assert_spin_locked(&dev_priv->irq_lock);
@@ -3728,8 +3753,10 @@ void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
 
        dev_priv->display_irqs_enabled = true;
 
-       if (intel_irqs_enabled(dev_priv))
-               valleyview_display_irqs_install(dev_priv);
+       if (intel_irqs_enabled(dev_priv)) {
+               vlv_display_irq_reset(dev_priv);
+               vlv_display_irq_postinstall(dev_priv);
+       }
 }
 
 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
@@ -3742,45 +3769,23 @@ void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
        dev_priv->display_irqs_enabled = false;
 
        if (intel_irqs_enabled(dev_priv))
-               valleyview_display_irqs_uninstall(dev_priv);
+               vlv_display_irq_reset(dev_priv);
 }
 
-static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
-{
-       dev_priv->irq_mask = ~0;
-
-       i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
-       POSTING_READ(PORT_HOTPLUG_EN);
-
-       I915_WRITE(VLV_IIR, 0xffffffff);
-       I915_WRITE(VLV_IIR, 0xffffffff);
-       I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
-       I915_WRITE(VLV_IMR, dev_priv->irq_mask);
-       POSTING_READ(VLV_IMR);
-
-       /* Interrupt setup is already guaranteed to be single-threaded, this is
-        * just to make the assert_spin_locked check happy. */
-       spin_lock_irq(&dev_priv->irq_lock);
-       if (dev_priv->display_irqs_enabled)
-               valleyview_display_irqs_install(dev_priv);
-       spin_unlock_irq(&dev_priv->irq_lock);
-}
 
 static int valleyview_irq_postinstall(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
 
-       vlv_display_irq_postinstall(dev_priv);
-
        gen5_gt_irq_postinstall(dev);
 
-       /* ack & enable invalid PTE error interrupts */
-#if 0 /* FIXME: add support to irq handler for checking these bits */
-       I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
-       I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
-#endif
+       spin_lock_irq(&dev_priv->irq_lock);
+       if (dev_priv->display_irqs_enabled)
+               vlv_display_irq_postinstall(dev_priv);
+       spin_unlock_irq(&dev_priv->irq_lock);
 
        I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
+       POSTING_READ(VLV_MASTER_IER);
 
        return 0;
 }
@@ -3791,7 +3796,6 @@ static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
        uint32_t gt_interrupts[] = {
                GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
                        GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
-                       GT_RENDER_L3_PARITY_ERROR_INTERRUPT |
                        GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
                        GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
                GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
@@ -3803,6 +3807,9 @@ static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
                        GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
                };
 
+       if (HAS_L3_DPF(dev_priv))
+               gt_interrupts[0] |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
+
        dev_priv->pm_irq_mask = 0xffffffff;
        GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
        GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
@@ -3870,7 +3877,7 @@ static int gen8_irq_postinstall(struct drm_device *dev)
        if (HAS_PCH_SPLIT(dev))
                ibx_irq_postinstall(dev);
 
-       I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
+       I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
        POSTING_READ(GEN8_MASTER_IRQ);
 
        return 0;
@@ -3880,11 +3887,14 @@ static int cherryview_irq_postinstall(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
 
-       vlv_display_irq_postinstall(dev_priv);
-
        gen8_gt_irq_postinstall(dev_priv);
 
-       I915_WRITE(GEN8_MASTER_IRQ, MASTER_INTERRUPT_ENABLE);
+       spin_lock_irq(&dev_priv->irq_lock);
+       if (dev_priv->display_irqs_enabled)
+               vlv_display_irq_postinstall(dev_priv);
+       spin_unlock_irq(&dev_priv->irq_lock);
+
+       I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
        POSTING_READ(GEN8_MASTER_IRQ);
 
        return 0;
@@ -3900,20 +3910,6 @@ static void gen8_irq_uninstall(struct drm_device *dev)
        gen8_irq_reset(dev);
 }
 
-static void vlv_display_irq_uninstall(struct drm_i915_private *dev_priv)
-{
-       /* Interrupt setup is already guaranteed to be single-threaded, this is
-        * just to make the assert_spin_locked check happy. */
-       spin_lock_irq(&dev_priv->irq_lock);
-       if (dev_priv->display_irqs_enabled)
-               valleyview_display_irqs_uninstall(dev_priv);
-       spin_unlock_irq(&dev_priv->irq_lock);
-
-       vlv_display_irq_reset(dev_priv);
-
-       dev_priv->irq_mask = ~0;
-}
-
 static void valleyview_irq_uninstall(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3922,12 +3918,16 @@ static void valleyview_irq_uninstall(struct drm_device *dev)
                return;
 
        I915_WRITE(VLV_MASTER_IER, 0);
+       POSTING_READ(VLV_MASTER_IER);
 
        gen5_gt_irq_reset(dev);
 
        I915_WRITE(HWSTAM, 0xffffffff);
 
-       vlv_display_irq_uninstall(dev_priv);
+       spin_lock_irq(&dev_priv->irq_lock);
+       if (dev_priv->display_irqs_enabled)
+               vlv_display_irq_reset(dev_priv);
+       spin_unlock_irq(&dev_priv->irq_lock);
 }
 
 static void cherryview_irq_uninstall(struct drm_device *dev)
@@ -3944,7 +3944,10 @@ static void cherryview_irq_uninstall(struct drm_device *dev)
 
        GEN5_IRQ_RESET(GEN8_PCU_);
 
-       vlv_display_irq_uninstall(dev_priv);
+       spin_lock_irq(&dev_priv->irq_lock);
+       if (dev_priv->display_irqs_enabled)
+               vlv_display_irq_reset(dev_priv);
+       spin_unlock_irq(&dev_priv->irq_lock);
 }
 
 static void ironlake_irq_uninstall(struct drm_device *dev)
@@ -4271,8 +4274,11 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
 
                /* Consume port.  Then clear IIR or we'll miss events */
                if (I915_HAS_HOTPLUG(dev) &&
-                   iir & I915_DISPLAY_PORT_INTERRUPT)
-                       i9xx_hpd_irq_handler(dev);
+                   iir & I915_DISPLAY_PORT_INTERRUPT) {
+                       u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
+                       if (hotplug_status)
+                               i9xx_hpd_irq_handler(dev, hotplug_status);
+               }
 
                I915_WRITE(IIR, iir & ~flip_mask);
                new_iir = I915_READ(IIR); /* Flush posted writes */
@@ -4501,8 +4507,11 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
                ret = IRQ_HANDLED;
 
                /* Consume port.  Then clear IIR or we'll miss events */
-               if (iir & I915_DISPLAY_PORT_INTERRUPT)
-                       i9xx_hpd_irq_handler(dev);
+               if (iir & I915_DISPLAY_PORT_INTERRUPT) {
+                       u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
+                       if (hotplug_status)
+                               i9xx_hpd_irq_handler(dev, hotplug_status);
+               }
 
                I915_WRITE(IIR, iir & ~flip_mask);
                new_iir = I915_READ(IIR); /* Flush posted writes */
index cea5a39..58ac6c7 100644 (file)
@@ -79,6 +79,16 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 
 /* PCI config space */
 
+#define MCHBAR_I915 0x44
+#define MCHBAR_I965 0x48
+#define MCHBAR_SIZE (4 * 4096)
+
+#define DEVEN 0x54
+#define   DEVEN_MCHBAR_EN (1 << 28)
+
+#define BSM 0x5c
+#define   BSM_MASK (0xFFFF << 20)
+
 #define HPLLCC 0xc0 /* 85x only */
 #define   GC_CLOCK_CONTROL_MASK                (0x7 << 0)
 #define   GC_CLOCK_133_200             (0 << 0)
@@ -90,6 +100,16 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 #define   GC_CLOCK_166_266             (6 << 0)
 #define   GC_CLOCK_166_250             (7 << 0)
 
+#define I915_GDRST 0xc0 /* PCI config register */
+#define   GRDOM_FULL           (0 << 2)
+#define   GRDOM_RENDER         (1 << 2)
+#define   GRDOM_MEDIA          (3 << 2)
+#define   GRDOM_MASK           (3 << 2)
+#define   GRDOM_RESET_STATUS   (1 << 1)
+#define   GRDOM_RESET_ENABLE   (1 << 0)
+
+#define GCDGMBUS 0xcc
+
 #define GCFGC2 0xda
 #define GCFGC  0xf0 /* 915+ only */
 #define   GC_LOW_FREQUENCY_ENABLE      (1 << 7)
@@ -121,18 +141,16 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 #define   I915_GC_RENDER_CLOCK_166_MHZ (0 << 0)
 #define   I915_GC_RENDER_CLOCK_200_MHZ (1 << 0)
 #define   I915_GC_RENDER_CLOCK_333_MHZ (4 << 0)
-#define GCDGMBUS 0xcc
-#define PCI_LBPC 0xf4 /* legacy/combination backlight modes, also called LBB */
 
+#define ASLE   0xe4
+#define ASLS   0xfc
+
+#define SWSCI  0xe8
+#define   SWSCI_SCISEL (1 << 15)
+#define   SWSCI_GSSCIE (1 << 0)
+
+#define LBPC 0xf4 /* legacy/combination backlight modes, also called LBB */
 
-/* Graphics reset regs */
-#define I915_GDRST 0xc0 /* PCI config register */
-#define  GRDOM_FULL    (0<<2)
-#define  GRDOM_RENDER  (1<<2)
-#define  GRDOM_MEDIA   (3<<2)
-#define  GRDOM_MASK    (3<<2)
-#define  GRDOM_RESET_STATUS (1<<1)
-#define  GRDOM_RESET_ENABLE (1<<0)
 
 #define ILK_GDSR _MMIO(MCHBAR_MIRROR_BASE + 0x2ca4)
 #define  ILK_GRDOM_FULL                (0<<1)
@@ -1375,14 +1393,10 @@ enum skl_disp_power_wells {
 
 #define _PORT_REF_DW6_A                        0x162198
 #define _PORT_REF_DW6_BC               0x6C198
-/*
- * FIXME: BSpec/CHV ConfigDB disagrees on the following two fields, fix them
- * after testing.
- */
-#define   GRC_CODE_SHIFT               23
-#define   GRC_CODE_MASK                        (0x1FF << GRC_CODE_SHIFT)
+#define   GRC_CODE_SHIFT               24
+#define   GRC_CODE_MASK                        (0xFF << GRC_CODE_SHIFT)
 #define   GRC_CODE_FAST_SHIFT          16
-#define   GRC_CODE_FAST_MASK           (0x7F << GRC_CODE_FAST_SHIFT)
+#define   GRC_CODE_FAST_MASK           (0xFF << GRC_CODE_FAST_SHIFT)
 #define   GRC_CODE_SLOW_SHIFT          8
 #define   GRC_CODE_SLOW_MASK           (0xFF << GRC_CODE_SLOW_SHIFT)
 #define   GRC_CODE_NOM_MASK            0xFF
@@ -2934,7 +2948,14 @@ enum skl_disp_power_wells {
 #define GEN6_RP_STATE_CAP      _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5998)
 #define BXT_RP_STATE_CAP        _MMIO(0x138170)
 
-#define INTERVAL_1_28_US(us)   (((us) * 100) >> 7)
+/*
+ * Make these a multiple of magic 25 to avoid SNB (eg. Dell XPS
+ * 8300) freezing up around GPU hangs. Looks as if even
+ * scheduling/timer interrupts start misbehaving if the RPS
+ * EI/thresholds are "bad", leading to a very sluggish or even
+ * frozen machine.
+ */
+#define INTERVAL_1_28_US(us)   roundup(((us) * 100) >> 7, 25)
 #define INTERVAL_1_33_US(us)   (((us) * 3)   >> 2)
 #define INTERVAL_0_833_US(us)  (((us) * 6) / 5)
 #define GT_INTERVAL_FROM_US(dev_priv, us) (IS_GEN9(dev_priv) ? \
@@ -2943,6 +2964,15 @@ enum skl_disp_power_wells {
                                INTERVAL_1_33_US(us)) : \
                                INTERVAL_1_28_US(us))
 
+#define INTERVAL_1_28_TO_US(interval)  (((interval) << 7) / 100)
+#define INTERVAL_1_33_TO_US(interval)  (((interval) << 2) / 3)
+#define INTERVAL_0_833_TO_US(interval) (((interval) * 5)  / 6)
+#define GT_PM_INTERVAL_TO_US(dev_priv, interval) (IS_GEN9(dev_priv) ? \
+                           (IS_BROXTON(dev_priv) ? \
+                           INTERVAL_0_833_TO_US(interval) : \
+                           INTERVAL_1_33_TO_US(interval)) : \
+                           INTERVAL_1_28_TO_US(interval))
+
 /*
  * Logical Context regs
  */
@@ -6866,6 +6896,8 @@ enum skl_disp_power_wells {
 #define  VLV_SPAREG2H                          _MMIO(0xA194)
 
 #define  GTFIFODBG                             _MMIO(0x120000)
+#define    GT_FIFO_SBDEDICATE_FREE_ENTRY_CHV   (0x1f << 20)
+#define    GT_FIFO_FREE_ENTRIES_CHV            (0x7f << 13)
 #define    GT_FIFO_SBDROPERR                   (1<<6)
 #define    GT_FIFO_BLOBDROPERR                 (1<<5)
 #define    GT_FIFO_SB_READ_ABORTERR            (1<<4)
@@ -6882,8 +6914,11 @@ enum skl_disp_power_wells {
 
 #define  HSW_IDICR                             _MMIO(0x9008)
 #define    IDIHASHMSK(x)                       (((x) & 0x3f) << 16)
-#define  HSW_EDRAM_PRESENT                     _MMIO(0x120010)
+#define  HSW_EDRAM_CAP                         _MMIO(0x120010)
 #define    EDRAM_ENABLED                       0x1
+#define    EDRAM_NUM_BANKS(cap)                        (((cap) >> 1) & 0xf)
+#define    EDRAM_WAYS_IDX(cap)                 (((cap) >> 5) & 0x7)
+#define    EDRAM_SETS_IDX(cap)                 (((cap) >> 8) & 0x3)
 
 #define GEN6_UCGCTL1                           _MMIO(0x9400)
 # define GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE             (1 << 16)
@@ -7161,6 +7196,7 @@ enum skl_disp_power_wells {
 
 #define GEN9_HALF_SLICE_CHICKEN7       _MMIO(0xe194)
 #define   GEN9_ENABLE_YV12_BUGFIX      (1<<4)
+#define   GEN9_ENABLE_GPGPU_PREEMPTION (1<<2)
 
 /* Audio */
 #define G4X_AUD_VID_DID                        _MMIO(dev_priv->info.display_mmio_offset + 0x62020)
index eb756c4..e72dd9a 100644 (file)
@@ -58,8 +58,6 @@
 #define        SLAVE_ADDR1     0x70
 #define        SLAVE_ADDR2     0x72
 
-static int panel_type;
-
 /* Get BDB block size given a pointer to Block ID. */
 static u32 _get_blocksize(const u8 *block_base)
 {
@@ -205,17 +203,32 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
        const struct lvds_dvo_timing *panel_dvo_timing;
        const struct lvds_fp_timing *fp_timing;
        struct drm_display_mode *panel_fixed_mode;
+       int panel_type;
        int drrs_mode;
+       int ret;
 
        lvds_options = find_section(bdb, BDB_LVDS_OPTIONS);
        if (!lvds_options)
                return;
 
        dev_priv->vbt.lvds_dither = lvds_options->pixel_dither;
-       if (lvds_options->panel_type == 0xff)
-               return;
 
-       panel_type = lvds_options->panel_type;
+       ret = intel_opregion_get_panel_type(dev_priv->dev);
+       if (ret >= 0) {
+               WARN_ON(ret > 0xf);
+               panel_type = ret;
+               DRM_DEBUG_KMS("Panel type: %d (OpRegion)\n", panel_type);
+       } else {
+               if (lvds_options->panel_type > 0xf) {
+                       DRM_DEBUG_KMS("Invalid VBT panel type 0x%x\n",
+                                     lvds_options->panel_type);
+                       return;
+               }
+               panel_type = lvds_options->panel_type;
+               DRM_DEBUG_KMS("Panel type: %d (VBT)\n", panel_type);
+       }
+
+       dev_priv->vbt.panel_type = panel_type;
 
        drrs_mode = (lvds_options->dps_panel_type_bits
                                >> (panel_type * 2)) & MODE_MASK;
@@ -251,7 +264,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
 
        panel_dvo_timing = get_lvds_dvo_timing(lvds_lfp_data,
                                               lvds_lfp_data_ptrs,
-                                              lvds_options->panel_type);
+                                              panel_type);
 
        panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL);
        if (!panel_fixed_mode)
@@ -266,7 +279,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
 
        fp_timing = get_lvds_fp_timing(bdb, lvds_lfp_data,
                                       lvds_lfp_data_ptrs,
-                                      lvds_options->panel_type);
+                                      panel_type);
        if (fp_timing) {
                /* check the resolution, just to be sure */
                if (fp_timing->x_res == panel_fixed_mode->hdisplay &&
@@ -284,6 +297,7 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv,
 {
        const struct bdb_lfp_backlight_data *backlight_data;
        const struct bdb_lfp_backlight_data_entry *entry;
+       int panel_type = dev_priv->vbt.panel_type;
 
        backlight_data = find_section(bdb, BDB_LVDS_BACKLIGHT);
        if (!backlight_data)
@@ -546,6 +560,7 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
        const struct bdb_edp *edp;
        const struct edp_power_seq *edp_pps;
        const struct edp_link_params *edp_link_params;
+       int panel_type = dev_priv->vbt.panel_type;
 
        edp = find_section(bdb, BDB_EDP);
        if (!edp) {
@@ -657,6 +672,7 @@ parse_psr(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
 {
        const struct bdb_psr *psr;
        const struct psr_table *psr_table;
+       int panel_type = dev_priv->vbt.panel_type;
 
        psr = find_section(bdb, BDB_PSR);
        if (!psr) {
@@ -703,6 +719,7 @@ parse_mipi_config(struct drm_i915_private *dev_priv,
        const struct bdb_mipi_config *start;
        const struct mipi_config *config;
        const struct mipi_pps_data *pps;
+       int panel_type = dev_priv->vbt.panel_type;
 
        /* parse MIPI blocks only if LFP type is MIPI */
        if (!intel_bios_is_dsi_present(dev_priv, NULL))
@@ -910,6 +927,7 @@ static void
 parse_mipi_sequence(struct drm_i915_private *dev_priv,
                    const struct bdb_header *bdb)
 {
+       int panel_type = dev_priv->vbt.panel_type;
        const struct bdb_mipi_sequence *sequence;
        const u8 *seq_data;
        u32 seq_size;
index 3f57cb9..a34c23e 100644 (file)
@@ -50,6 +50,7 @@ MODULE_FIRMWARE(I915_CSR_SKL);
 MODULE_FIRMWARE(I915_CSR_BXT);
 
 #define SKL_CSR_VERSION_REQUIRED       CSR_VERSION(1, 23)
+#define BXT_CSR_VERSION_REQUIRED       CSR_VERSION(1, 7)
 
 #define CSR_MAX_FW_SIZE                        0x2FFF
 #define CSR_DEFAULT_FW_OFFSET          0xFFFFFFFF
@@ -281,6 +282,7 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
        uint32_t dmc_offset = CSR_DEFAULT_FW_OFFSET, readcount = 0, nbytes;
        uint32_t i;
        uint32_t *dmc_payload;
+       uint32_t required_min_version;
 
        if (!fw)
                return NULL;
@@ -296,15 +298,23 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
 
        csr->version = css_header->version;
 
-       if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
-           csr->version < SKL_CSR_VERSION_REQUIRED) {
-               DRM_INFO("Refusing to load old Skylake DMC firmware v%u.%u,"
+       if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+               required_min_version = SKL_CSR_VERSION_REQUIRED;
+       } else if (IS_BROXTON(dev_priv)) {
+               required_min_version = BXT_CSR_VERSION_REQUIRED;
+       } else {
+               MISSING_CASE(INTEL_REVID(dev_priv));
+               required_min_version = 0;
+       }
+
+       if (csr->version < required_min_version) {
+               DRM_INFO("Refusing to load old DMC firmware v%u.%u,"
                         " please upgrade to v%u.%u or later"
                           " [" FIRMWARE_URL "].\n",
                         CSR_VERSION_MAJOR(csr->version),
                         CSR_VERSION_MINOR(csr->version),
-                        CSR_VERSION_MAJOR(SKL_CSR_VERSION_REQUIRED),
-                        CSR_VERSION_MINOR(SKL_CSR_VERSION_REQUIRED));
+                        CSR_VERSION_MAJOR(required_min_version),
+                        CSR_VERSION_MINOR(required_min_version));
                return NULL;
        }
 
@@ -456,11 +466,51 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
        schedule_work(&dev_priv->csr.work);
 }
 
+/**
+ * intel_csr_ucode_suspend() - prepare CSR firmware before system suspend
+ * @dev_priv: i915 drm device
+ *
+ * Prepare the DMC firmware before entering system suspend. This includes
+ * flushing pending work items and releasing any resources acquired during
+ * init.
+ */
+void intel_csr_ucode_suspend(struct drm_i915_private *dev_priv)
+{
+       if (!HAS_CSR(dev_priv))
+               return;
+
+       flush_work(&dev_priv->csr.work);
+
+       /* Drop the reference held in case DMC isn't loaded. */
+       if (!dev_priv->csr.dmc_payload)
+               intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
+}
+
+/**
+ * intel_csr_ucode_resume() - init CSR firmware during system resume
+ * @dev_priv: i915 drm device
+ *
+ * Reinitialize the DMC firmware during system resume, reacquiring any
+ * resources released in intel_csr_ucode_suspend().
+ */
+void intel_csr_ucode_resume(struct drm_i915_private *dev_priv)
+{
+       if (!HAS_CSR(dev_priv))
+               return;
+
+       /*
+        * Reacquire the reference to keep RPM disabled in case DMC isn't
+        * loaded.
+        */
+       if (!dev_priv->csr.dmc_payload)
+               intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
+}
+
 /**
  * intel_csr_ucode_fini() - unload the CSR firmware.
  * @dev_priv: i915 drm device.
  *
- * Firmmware unloading includes freeing the internal momory and reset the
+ * Firmmware unloading includes freeing the internal memory and reset the
  * firmware loading status.
  */
 void intel_csr_ucode_fini(struct drm_i915_private *dev_priv)
@@ -468,7 +518,7 @@ void intel_csr_ucode_fini(struct drm_i915_private *dev_priv)
        if (!HAS_CSR(dev_priv))
                return;
 
-       flush_work(&dev_priv->csr.work);
+       intel_csr_ucode_suspend(dev_priv);
 
        kfree(dev_priv->csr.dmc_payload);
 }
index 921edf1..e30e178 100644 (file)
@@ -443,9 +443,17 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder)
        } else if (IS_BROADWELL(dev_priv)) {
                ddi_translations_fdi = bdw_ddi_translations_fdi;
                ddi_translations_dp = bdw_ddi_translations_dp;
-               ddi_translations_edp = bdw_ddi_translations_edp;
+
+               if (dev_priv->vbt.edp.low_vswing) {
+                       ddi_translations_edp = bdw_ddi_translations_edp;
+                       n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp);
+               } else {
+                       ddi_translations_edp = bdw_ddi_translations_dp;
+                       n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
+               }
+
                ddi_translations_hdmi = bdw_ddi_translations_hdmi;
-               n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp);
+
                n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
                n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
                hdmi_default_entry = 7;
@@ -1722,12 +1730,78 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder)
        }
 }
 
+static bool broxton_phy_is_enabled(struct drm_i915_private *dev_priv,
+                                  enum dpio_phy phy)
+{
+       if (!(I915_READ(BXT_P_CR_GT_DISP_PWRON) & GT_DISPLAY_POWER_ON(phy)))
+               return false;
+
+       if ((I915_READ(BXT_PORT_CL1CM_DW0(phy)) &
+            (PHY_POWER_GOOD | PHY_RESERVED)) != PHY_POWER_GOOD) {
+               DRM_DEBUG_DRIVER("DDI PHY %d powered, but power hasn't settled\n",
+                                phy);
+
+               return false;
+       }
+
+       if (phy == DPIO_PHY1 &&
+           !(I915_READ(BXT_PORT_REF_DW3(DPIO_PHY1)) & GRC_DONE)) {
+               DRM_DEBUG_DRIVER("DDI PHY 1 powered, but GRC isn't done\n");
+
+               return false;
+       }
+
+       if (!(I915_READ(BXT_PHY_CTL_FAMILY(phy)) & COMMON_RESET_DIS)) {
+               DRM_DEBUG_DRIVER("DDI PHY %d powered, but still in reset\n",
+                                phy);
+
+               return false;
+       }
+
+       return true;
+}
+
+static u32 broxton_get_grc(struct drm_i915_private *dev_priv, enum dpio_phy phy)
+{
+       u32 val = I915_READ(BXT_PORT_REF_DW6(phy));
+
+       return (val & GRC_CODE_MASK) >> GRC_CODE_SHIFT;
+}
+
+static void broxton_phy_wait_grc_done(struct drm_i915_private *dev_priv,
+                                     enum dpio_phy phy)
+{
+       if (wait_for(I915_READ(BXT_PORT_REF_DW3(phy)) & GRC_DONE, 10))
+               DRM_ERROR("timeout waiting for PHY%d GRC\n", phy);
+}
+
+static bool broxton_phy_verify_state(struct drm_i915_private *dev_priv,
+                                    enum dpio_phy phy);
+
 static void broxton_phy_init(struct drm_i915_private *dev_priv,
                             enum dpio_phy phy)
 {
        enum port port;
        u32 ports, val;
 
+       if (broxton_phy_is_enabled(dev_priv, phy)) {
+               /* Still read out the GRC value for state verification */
+               if (phy == DPIO_PHY0)
+                       dev_priv->bxt_phy_grc = broxton_get_grc(dev_priv, phy);
+
+               if (broxton_phy_verify_state(dev_priv, phy)) {
+                       DRM_DEBUG_DRIVER("DDI PHY %d already enabled, "
+                                        "won't reprogram it\n", phy);
+
+                       return;
+               }
+
+               DRM_DEBUG_DRIVER("DDI PHY %d enabled with invalid state, "
+                                "force reprogramming it\n", phy);
+       } else {
+               DRM_DEBUG_DRIVER("DDI PHY %d not enabled, enabling it\n", phy);
+       }
+
        val = I915_READ(BXT_P_CR_GT_DISP_PWRON);
        val |= GT_DISPLAY_POWER_ON(phy);
        I915_WRITE(BXT_P_CR_GT_DISP_PWRON, val);
@@ -1798,6 +1872,9 @@ static void broxton_phy_init(struct drm_i915_private *dev_priv,
         * enabled.
         * TODO: port C is only connected on BXT-P, so on BXT0/1 we should
         * power down the second channel on PHY0 as well.
+        *
+        * FIXME: Clarify programming of the following, the register is
+        * read-only with bit 6 fixed at 0 at least in stepping A.
         */
        if (phy == DPIO_PHY1)
                val |= OCL2_LDOFUSE_PWR_DIS;
@@ -1810,12 +1887,10 @@ static void broxton_phy_init(struct drm_i915_private *dev_priv,
                 * the corresponding calibrated value from PHY1, and disable
                 * the automatic calibration on PHY0.
                 */
-               if (wait_for(I915_READ(BXT_PORT_REF_DW3(DPIO_PHY1)) & GRC_DONE,
-                            10))
-                       DRM_ERROR("timeout waiting for PHY1 GRC\n");
+               broxton_phy_wait_grc_done(dev_priv, DPIO_PHY1);
 
-               val = I915_READ(BXT_PORT_REF_DW6(DPIO_PHY1));
-               val = (val & GRC_CODE_MASK) >> GRC_CODE_SHIFT;
+               val = dev_priv->bxt_phy_grc = broxton_get_grc(dev_priv,
+                                                             DPIO_PHY1);
                grc_code = val << GRC_CODE_FAST_SHIFT |
                           val << GRC_CODE_SLOW_SHIFT |
                           val;
@@ -1825,17 +1900,27 @@ static void broxton_phy_init(struct drm_i915_private *dev_priv,
                val |= GRC_DIS | GRC_RDY_OVRD;
                I915_WRITE(BXT_PORT_REF_DW8(DPIO_PHY0), val);
        }
+       /*
+        * During PHY1 init delay waiting for GRC calibration to finish, since
+        * it can happen in parallel with the subsequent PHY0 init.
+        */
 
        val = I915_READ(BXT_PHY_CTL_FAMILY(phy));
        val |= COMMON_RESET_DIS;
        I915_WRITE(BXT_PHY_CTL_FAMILY(phy), val);
 }
 
-void broxton_ddi_phy_init(struct drm_device *dev)
+void broxton_ddi_phy_init(struct drm_i915_private *dev_priv)
 {
        /* Enable PHY1 first since it provides Rcomp for PHY0 */
-       broxton_phy_init(dev->dev_private, DPIO_PHY1);
-       broxton_phy_init(dev->dev_private, DPIO_PHY0);
+       broxton_phy_init(dev_priv, DPIO_PHY1);
+       broxton_phy_init(dev_priv, DPIO_PHY0);
+
+       /*
+        * If BIOS enabled only PHY0 and not PHY1, we skipped waiting for the
+        * PHY1 GRC calibration to finish, so wait for it here.
+        */
+       broxton_phy_wait_grc_done(dev_priv, DPIO_PHY1);
 }
 
 static void broxton_phy_uninit(struct drm_i915_private *dev_priv,
@@ -1846,17 +1931,126 @@ static void broxton_phy_uninit(struct drm_i915_private *dev_priv,
        val = I915_READ(BXT_PHY_CTL_FAMILY(phy));
        val &= ~COMMON_RESET_DIS;
        I915_WRITE(BXT_PHY_CTL_FAMILY(phy), val);
+
+       val = I915_READ(BXT_P_CR_GT_DISP_PWRON);
+       val &= ~GT_DISPLAY_POWER_ON(phy);
+       I915_WRITE(BXT_P_CR_GT_DISP_PWRON, val);
 }
 
-void broxton_ddi_phy_uninit(struct drm_device *dev)
+void broxton_ddi_phy_uninit(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
        broxton_phy_uninit(dev_priv, DPIO_PHY1);
        broxton_phy_uninit(dev_priv, DPIO_PHY0);
+}
+
+static bool __printf(6, 7)
+__phy_reg_verify_state(struct drm_i915_private *dev_priv, enum dpio_phy phy,
+                      i915_reg_t reg, u32 mask, u32 expected,
+                      const char *reg_fmt, ...)
+{
+       struct va_format vaf;
+       va_list args;
+       u32 val;
+
+       val = I915_READ(reg);
+       if ((val & mask) == expected)
+               return true;
+
+       va_start(args, reg_fmt);
+       vaf.fmt = reg_fmt;
+       vaf.va = &args;
 
-       /* FIXME: do this in broxton_phy_uninit per phy */
-       I915_WRITE(BXT_P_CR_GT_DISP_PWRON, 0);
+       DRM_DEBUG_DRIVER("DDI PHY %d reg %pV [%08x] state mismatch: "
+                        "current %08x, expected %08x (mask %08x)\n",
+                        phy, &vaf, reg.reg, val, (val & ~mask) | expected,
+                        mask);
+
+       va_end(args);
+
+       return false;
+}
+
+static bool broxton_phy_verify_state(struct drm_i915_private *dev_priv,
+                                    enum dpio_phy phy)
+{
+       enum port port;
+       u32 ports;
+       uint32_t mask;
+       bool ok;
+
+#define _CHK(reg, mask, exp, fmt, ...)                                 \
+       __phy_reg_verify_state(dev_priv, phy, reg, mask, exp, fmt,      \
+                              ## __VA_ARGS__)
+
+       /* We expect the PHY to be always enabled */
+       if (!broxton_phy_is_enabled(dev_priv, phy))
+               return false;
+
+       ok = true;
+
+       if (phy == DPIO_PHY0)
+               ports = BIT(PORT_B) | BIT(PORT_C);
+       else
+               ports = BIT(PORT_A);
+
+       for_each_port_masked(port, ports) {
+               int lane;
+
+               for (lane = 0; lane < 4; lane++)
+                       ok &= _CHK(BXT_PORT_TX_DW14_LN(port, lane),
+                                   LATENCY_OPTIM,
+                                   lane != 1 ? LATENCY_OPTIM : 0,
+                                   "BXT_PORT_TX_DW14_LN(%d, %d)", port, lane);
+       }
+
+       /* PLL Rcomp code offset */
+       ok &= _CHK(BXT_PORT_CL1CM_DW9(phy),
+                   IREF0RC_OFFSET_MASK, 0xe4 << IREF0RC_OFFSET_SHIFT,
+                   "BXT_PORT_CL1CM_DW9(%d)", phy);
+       ok &= _CHK(BXT_PORT_CL1CM_DW10(phy),
+                   IREF1RC_OFFSET_MASK, 0xe4 << IREF1RC_OFFSET_SHIFT,
+                   "BXT_PORT_CL1CM_DW10(%d)", phy);
+
+       /* Power gating */
+       mask = OCL1_POWER_DOWN_EN | DW28_OLDO_DYN_PWR_DOWN_EN | SUS_CLK_CONFIG;
+       ok &= _CHK(BXT_PORT_CL1CM_DW28(phy), mask, mask,
+                   "BXT_PORT_CL1CM_DW28(%d)", phy);
+
+       if (phy == DPIO_PHY0)
+               ok &= _CHK(BXT_PORT_CL2CM_DW6_BC,
+                          DW6_OLDO_DYN_PWR_DOWN_EN, DW6_OLDO_DYN_PWR_DOWN_EN,
+                          "BXT_PORT_CL2CM_DW6_BC");
+
+       /*
+        * TODO: Verify BXT_PORT_CL1CM_DW30 bit OCL2_LDOFUSE_PWR_DIS,
+        * at least on stepping A this bit is read-only and fixed at 0.
+        */
+
+       if (phy == DPIO_PHY0) {
+               u32 grc_code = dev_priv->bxt_phy_grc;
+
+               grc_code = grc_code << GRC_CODE_FAST_SHIFT |
+                          grc_code << GRC_CODE_SLOW_SHIFT |
+                          grc_code;
+               mask = GRC_CODE_FAST_MASK | GRC_CODE_SLOW_MASK |
+                      GRC_CODE_NOM_MASK;
+               ok &= _CHK(BXT_PORT_REF_DW6(DPIO_PHY0), mask, grc_code,
+                           "BXT_PORT_REF_DW6(%d)", DPIO_PHY0);
+
+               mask = GRC_DIS | GRC_RDY_OVRD;
+               ok &= _CHK(BXT_PORT_REF_DW8(DPIO_PHY0), mask, mask,
+                           "BXT_PORT_REF_DW8(%d)", DPIO_PHY0);
+       }
+
+       return ok;
+#undef _CHK
+}
+
+void broxton_ddi_phy_verify_state(struct drm_i915_private *dev_priv)
+{
+       if (!broxton_phy_verify_state(dev_priv, DPIO_PHY0) ||
+           !broxton_phy_verify_state(dev_priv, DPIO_PHY1))
+               i915_report_error(dev_priv, "DDI PHY state mismatch\n");
 }
 
 void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
@@ -2044,12 +2238,6 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
        intel_ddi_clock_get(encoder, pipe_config);
 }
 
-static void intel_ddi_destroy(struct drm_encoder *encoder)
-{
-       /* HDMI has nothing special to destroy, so we can go with this. */
-       intel_dp_encoder_destroy(encoder);
-}
-
 static bool intel_ddi_compute_config(struct intel_encoder *encoder,
                                     struct intel_crtc_state *pipe_config)
 {
@@ -2068,7 +2256,8 @@ static bool intel_ddi_compute_config(struct intel_encoder *encoder,
 }
 
 static const struct drm_encoder_funcs intel_ddi_funcs = {
-       .destroy = intel_ddi_destroy,
+       .reset = intel_dp_encoder_reset,
+       .destroy = intel_dp_encoder_destroy,
 };
 
 static struct intel_connector *
@@ -2167,6 +2356,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
        intel_encoder->post_disable = intel_ddi_post_disable;
        intel_encoder->get_hw_state = intel_ddi_get_hw_state;
        intel_encoder->get_config = intel_ddi_get_config;
+       intel_encoder->suspend = intel_dp_encoder_suspend;
 
        intel_dig_port->port = port;
        intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
index 551541b..73299f9 100644 (file)
@@ -1530,45 +1530,47 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
        assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
 }
 
+static void _vlv_enable_pll(struct intel_crtc *crtc,
+                           const struct intel_crtc_state *pipe_config)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       enum pipe pipe = crtc->pipe;
+
+       I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
+       POSTING_READ(DPLL(pipe));
+       udelay(150);
+
+       if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
+               DRM_ERROR("DPLL %d failed to lock\n", pipe);
+}
+
 static void vlv_enable_pll(struct intel_crtc *crtc,
                           const struct intel_crtc_state *pipe_config)
 {
-       struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
        enum pipe pipe = crtc->pipe;
-       i915_reg_t reg = DPLL(pipe);
-       u32 dpll = pipe_config->dpll_hw_state.dpll;
 
        assert_pipe_disabled(dev_priv, pipe);
 
        /* PLL is protected by panel, make sure we can write it */
        assert_panel_unlocked(dev_priv, pipe);
 
-       I915_WRITE(reg, dpll);
-       POSTING_READ(reg);
-       udelay(150);
-
-       if (wait_for(((I915_READ(reg) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
-               DRM_ERROR("DPLL %d failed to lock\n", pipe);
+       if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
+               _vlv_enable_pll(crtc, pipe_config);
 
        I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
        POSTING_READ(DPLL_MD(pipe));
 }
 
-static void chv_enable_pll(struct intel_crtc *crtc,
-                          const struct intel_crtc_state *pipe_config)
+
+static void _chv_enable_pll(struct intel_crtc *crtc,
+                           const struct intel_crtc_state *pipe_config)
 {
-       struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
        enum pipe pipe = crtc->pipe;
        enum dpio_channel port = vlv_pipe_to_channel(pipe);
        u32 tmp;
 
-       assert_pipe_disabled(dev_priv, pipe);
-
-       /* PLL is protected by panel, make sure we can write it */
-       assert_panel_unlocked(dev_priv, pipe);
-
        mutex_lock(&dev_priv->sb_lock);
 
        /* Enable back the 10bit clock to display controller */
@@ -1589,6 +1591,21 @@ static void chv_enable_pll(struct intel_crtc *crtc,
        /* Check PLL is locked */
        if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
                DRM_ERROR("PLL %d failed to lock\n", pipe);
+}
+
+static void chv_enable_pll(struct intel_crtc *crtc,
+                          const struct intel_crtc_state *pipe_config)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       enum pipe pipe = crtc->pipe;
+
+       assert_pipe_disabled(dev_priv, pipe);
+
+       /* PLL is protected by panel, make sure we can write it */
+       assert_panel_unlocked(dev_priv, pipe);
+
+       if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
+               _chv_enable_pll(crtc, pipe_config);
 
        if (pipe != PIPE_A) {
                /*
@@ -3198,12 +3215,12 @@ void intel_finish_reset(struct drm_device *dev)
 static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       unsigned reset_counter;
        bool pending;
 
-       if (i915_reset_in_progress(&dev_priv->gpu_error) ||
-           intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
+       reset_counter = i915_reset_counter(&to_i915(dev)->gpu_error);
+       if (intel_crtc->reset_counter != reset_counter)
                return false;
 
        spin_lock_irq(&dev->event_lock);
@@ -3805,9 +3822,7 @@ static void page_flip_completed(struct intel_crtc *intel_crtc)
        intel_crtc->unpin_work = NULL;
 
        if (work->event)
-               drm_send_vblank_event(intel_crtc->base.dev,
-                                     intel_crtc->pipe,
-                                     work->event);
+               drm_crtc_send_vblank_event(&intel_crtc->base, work->event);
 
        drm_crtc_vblank_put(&intel_crtc->base);
 
@@ -4088,12 +4103,6 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
        I915_WRITE(FDI_RX_TUSIZE1(pipe),
                   I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
 
-       /*
-        * Sometimes spurious CPU pipe underruns happen during FDI
-        * training, at least with VGA+HDMI cloning. Suppress them.
-        */
-       intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
-
        /* For PCH output, training FDI link */
        dev_priv->display.fdi_link_train(crtc);
 
@@ -4128,8 +4137,6 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
 
        intel_fdi_normal_train(crtc);
 
-       intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
-
        /* For PCH DP, enable TRANS_DP_CTL */
        if (HAS_PCH_CPT(dev) && intel_crtc->config->has_dp_encoder) {
                const struct drm_display_mode *adjusted_mode =
@@ -4732,6 +4739,18 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
        if (WARN_ON(intel_crtc->active))
                return;
 
+       /*
+        * Sometimes spurious CPU pipe underruns happen during FDI
+        * training, at least with VGA+HDMI cloning. Suppress them.
+        *
+        * On ILK we get an occasional spurious CPU pipe underruns
+        * between eDP port A enable and vdd enable. Also PCH port
+        * enable seems to result in the occasional CPU pipe underrun.
+        *
+        * Spurious PCH underruns also occur during PCH enabling.
+        */
+       if (intel_crtc->config->has_pch_encoder || IS_GEN5(dev_priv))
+               intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
        if (intel_crtc->config->has_pch_encoder)
                intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
 
@@ -4753,8 +4772,6 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
 
        intel_crtc->active = true;
 
-       intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
-
        for_each_encoder_on_crtc(dev, crtc, encoder)
                if (encoder->pre_enable)
                        encoder->pre_enable(encoder);
@@ -4796,6 +4813,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
        /* Must wait for vblank to avoid spurious PCH FIFO underruns */
        if (intel_crtc->config->has_pch_encoder)
                intel_wait_for_vblank(dev, pipe);
+       intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
        intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
 }
 
@@ -4948,8 +4966,15 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
        struct intel_encoder *encoder;
        int pipe = intel_crtc->pipe;
 
-       if (intel_crtc->config->has_pch_encoder)
+       /*
+        * Sometimes spurious CPU pipe underruns happen when the
+        * pipe is already disabled, but FDI RX/TX is still enabled.
+        * Happens at least with VGA+HDMI cloning. Suppress them.
+        */
+       if (intel_crtc->config->has_pch_encoder) {
+               intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
                intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
+       }
 
        for_each_encoder_on_crtc(dev, crtc, encoder)
                encoder->disable(encoder);
@@ -4957,22 +4982,12 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
        drm_crtc_vblank_off(crtc);
        assert_vblank_disabled(crtc);
 
-       /*
-        * Sometimes spurious CPU pipe underruns happen when the
-        * pipe is already disabled, but FDI RX/TX is still enabled.
-        * Happens at least with VGA+HDMI cloning. Suppress them.
-        */
-       if (intel_crtc->config->has_pch_encoder)
-               intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
-
        intel_disable_pipe(intel_crtc);
 
        ironlake_pfit_disable(intel_crtc, false);
 
-       if (intel_crtc->config->has_pch_encoder) {
+       if (intel_crtc->config->has_pch_encoder)
                ironlake_fdi_disable(crtc);
-               intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
-       }
 
        for_each_encoder_on_crtc(dev, crtc, encoder)
                if (encoder->post_disable)
@@ -5002,6 +5017,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
                ironlake_fdi_pll_disable(intel_crtc);
        }
 
+       intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
        intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
 }
 
@@ -5329,9 +5345,8 @@ static void intel_update_cdclk(struct drm_device *dev)
                intel_update_max_cdclk(dev);
 }
 
-static void broxton_set_cdclk(struct drm_device *dev, int frequency)
+static void broxton_set_cdclk(struct drm_i915_private *dev_priv, int frequency)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        uint32_t divider;
        uint32_t ratio;
        uint32_t current_freq;
@@ -5445,33 +5460,46 @@ static void broxton_set_cdclk(struct drm_device *dev, int frequency)
                return;
        }
 
-       intel_update_cdclk(dev);
+       intel_update_cdclk(dev_priv->dev);
 }
 
-void broxton_init_cdclk(struct drm_device *dev)
+static bool broxton_cdclk_is_enabled(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       uint32_t val;
+       if (!(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_PLL_ENABLE))
+               return false;
 
-       /*
-        * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
-        * or else the reset will hang because there is no PCH to respond.
-        * Move the handshake programming to initialization sequence.
-        * Previously was left up to BIOS.
-        */
-       val = I915_READ(HSW_NDE_RSTWRN_OPT);
-       val &= ~RESET_PCH_HANDSHAKE_ENABLE;
-       I915_WRITE(HSW_NDE_RSTWRN_OPT, val);
+       /* TODO: Check for a valid CDCLK rate */
+
+       if (!(I915_READ(DBUF_CTL) & DBUF_POWER_REQUEST)) {
+               DRM_DEBUG_DRIVER("CDCLK enabled, but DBUF power not requested\n");
+
+               return false;
+       }
+
+       if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE)) {
+               DRM_DEBUG_DRIVER("CDCLK enabled, but DBUF power hasn't settled\n");
+
+               return false;
+       }
+
+       return true;
+}
 
-       /* Enable PG1 for cdclk */
-       intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
+bool broxton_cdclk_verify_state(struct drm_i915_private *dev_priv)
+{
+       return broxton_cdclk_is_enabled(dev_priv);
+}
 
+void broxton_init_cdclk(struct drm_i915_private *dev_priv)
+{
        /* check if cd clock is enabled */
-       if (I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_PLL_ENABLE) {
-               DRM_DEBUG_KMS("Display already initialized\n");
+       if (broxton_cdclk_is_enabled(dev_priv)) {
+               DRM_DEBUG_KMS("CDCLK already enabled, won't reprogram it\n");
                return;
        }
 
+       DRM_DEBUG_KMS("CDCLK not enabled, enabling it\n");
+
        /*
         * FIXME:
         * - The initial CDCLK needs to be read from VBT.
@@ -5479,7 +5507,7 @@ void broxton_init_cdclk(struct drm_device *dev)
         * - check if setting the max (or any) cdclk freq is really necessary
         *   here, it belongs to modeset time
         */
-       broxton_set_cdclk(dev, 624000);
+       broxton_set_cdclk(dev_priv, 624000);
 
        I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST);
        POSTING_READ(DBUF_CTL);
@@ -5490,10 +5518,8 @@ void broxton_init_cdclk(struct drm_device *dev)
                DRM_ERROR("DBuf power enable timeout!\n");
 }
 
-void broxton_uninit_cdclk(struct drm_device *dev)
+void broxton_uninit_cdclk(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
        I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST);
        POSTING_READ(DBUF_CTL);
 
@@ -5503,9 +5529,7 @@ void broxton_uninit_cdclk(struct drm_device *dev)
                DRM_ERROR("DBuf power disable timeout!\n");
 
        /* Set minimum (bypass) frequency, in effect turning off the DE PLL */
-       broxton_set_cdclk(dev, 19200);
-
-       intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
+       broxton_set_cdclk(dev_priv, 19200);
 }
 
 static const struct skl_cdclk_entry {
@@ -6072,14 +6096,12 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
                if (encoder->pre_pll_enable)
                        encoder->pre_pll_enable(encoder);
 
-       if (!intel_crtc->config->has_dsi_encoder) {
-               if (IS_CHERRYVIEW(dev)) {
-                       chv_prepare_pll(intel_crtc, intel_crtc->config);
-                       chv_enable_pll(intel_crtc, intel_crtc->config);
-               } else {
-                       vlv_prepare_pll(intel_crtc, intel_crtc->config);
-                       vlv_enable_pll(intel_crtc, intel_crtc->config);
-               }
+       if (IS_CHERRYVIEW(dev)) {
+               chv_prepare_pll(intel_crtc, intel_crtc->config);
+               chv_enable_pll(intel_crtc, intel_crtc->config);
+       } else {
+               vlv_prepare_pll(intel_crtc, intel_crtc->config);
+               vlv_enable_pll(intel_crtc, intel_crtc->config);
        }
 
        for_each_encoder_on_crtc(dev, crtc, encoder)
@@ -6117,7 +6139,7 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
        struct intel_encoder *encoder;
        struct intel_crtc_state *pipe_config =
                to_intel_crtc_state(crtc->state);
-       int pipe = intel_crtc->pipe;
+       enum pipe pipe = intel_crtc->pipe;
 
        if (WARN_ON(intel_crtc->active))
                return;
@@ -7173,11 +7195,15 @@ static void vlv_compute_dpll(struct intel_crtc *crtc,
                             struct intel_crtc_state *pipe_config)
 {
        pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
-               DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS |
-               DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV;
+               DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
        if (crtc->pipe != PIPE_A)
                pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
 
+       /* DPLL not used with DSI, but still need the rest set up */
+       if (!pipe_config->has_dsi_encoder)
+               pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE |
+                       DPLL_EXT_BUFFER_ENABLE_VLV;
+
        pipe_config->dpll_hw_state.dpll_md =
                (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
 }
@@ -7186,11 +7212,14 @@ static void chv_compute_dpll(struct intel_crtc *crtc,
                             struct intel_crtc_state *pipe_config)
 {
        pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
-               DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS |
-               DPLL_VCO_ENABLE;
+               DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
        if (crtc->pipe != PIPE_A)
                pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
 
+       /* DPLL not used with DSI, but still need the rest set up */
+       if (!pipe_config->has_dsi_encoder)
+               pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE;
+
        pipe_config->dpll_hw_state.dpll_md =
                (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
 }
@@ -7200,11 +7229,20 @@ static void vlv_prepare_pll(struct intel_crtc *crtc,
 {
        struct drm_device *dev = crtc->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       int pipe = crtc->pipe;
+       enum pipe pipe = crtc->pipe;
        u32 mdiv;
        u32 bestn, bestm1, bestm2, bestp1, bestp2;
        u32 coreclk, reg_val;
 
+       /* Enable Refclk */
+       I915_WRITE(DPLL(pipe),
+                  pipe_config->dpll_hw_state.dpll &
+                  ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
+
+       /* No need to actually set up the DPLL with DSI */
+       if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
+               return;
+
        mutex_lock(&dev_priv->sb_lock);
 
        bestn = pipe_config->dpll.n;
@@ -7291,14 +7329,21 @@ static void chv_prepare_pll(struct intel_crtc *crtc,
 {
        struct drm_device *dev = crtc->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       int pipe = crtc->pipe;
-       i915_reg_t dpll_reg = DPLL(crtc->pipe);
+       enum pipe pipe = crtc->pipe;
        enum dpio_channel port = vlv_pipe_to_channel(pipe);
        u32 loopfilter, tribuf_calcntr;
        u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
        u32 dpio_val;
        int vco;
 
+       /* Enable Refclk and SSC */
+       I915_WRITE(DPLL(pipe),
+                  pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
+
+       /* No need to actually set up the DPLL with DSI */
+       if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
+               return;
+
        bestn = pipe_config->dpll.n;
        bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
        bestm1 = pipe_config->dpll.m1;
@@ -7309,12 +7354,6 @@ static void chv_prepare_pll(struct intel_crtc *crtc,
        dpio_val = 0;
        loopfilter = 0;
 
-       /*
-        * Enable Refclk and SSC
-        */
-       I915_WRITE(dpll_reg,
-                  pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
-
        mutex_lock(&dev_priv->sb_lock);
 
        /* p1 and p2 divider */
@@ -7929,9 +7968,6 @@ static int chv_crtc_compute_clock(struct intel_crtc *crtc,
        memset(&crtc_state->dpll_hw_state, 0,
               sizeof(crtc_state->dpll_hw_state));
 
-       if (crtc_state->has_dsi_encoder)
-               return 0;
-
        if (!crtc_state->clock_set &&
            !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
                                refclk, NULL, &crtc_state->dpll)) {
@@ -7953,9 +7989,6 @@ static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
        memset(&crtc_state->dpll_hw_state, 0,
               sizeof(crtc_state->dpll_hw_state));
 
-       if (crtc_state->has_dsi_encoder)
-               return 0;
-
        if (!crtc_state->clock_set &&
            !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
                                refclk, NULL, &crtc_state->dpll)) {
@@ -8008,8 +8041,8 @@ static void vlv_crtc_clock_get(struct intel_crtc *crtc,
        u32 mdiv;
        int refclk = 100000;
 
-       /* In case of MIPI DPLL will not even be used */
-       if (!(pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE))
+       /* In case of DSI, DPLL will not be used */
+       if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
                return;
 
        mutex_lock(&dev_priv->sb_lock);
@@ -8105,6 +8138,10 @@ static void chv_crtc_clock_get(struct intel_crtc *crtc,
        u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
        int refclk = 100000;
 
+       /* In case of DSI, DPLL will not be used */
+       if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
+               return;
+
        mutex_lock(&dev_priv->sb_lock);
        cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
        pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
@@ -9533,7 +9570,7 @@ static void broxton_modeset_commit_cdclk(struct drm_atomic_state *old_state)
                to_intel_atomic_state(old_state);
        unsigned int req_cdclk = old_intel_state->dev_cdclk;
 
-       broxton_set_cdclk(dev, req_cdclk);
+       broxton_set_cdclk(to_i915(dev), req_cdclk);
 }
 
 /* compute the max rate for new configuration */
@@ -10903,9 +10940,10 @@ static bool page_flip_finished(struct intel_crtc *crtc)
 {
        struct drm_device *dev = crtc->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
+       unsigned reset_counter;
 
-       if (i915_reset_in_progress(&dev_priv->gpu_error) ||
-           crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
+       reset_counter = i915_reset_counter(&dev_priv->gpu_error);
+       if (crtc->reset_counter != reset_counter)
                return true;
 
        /*
@@ -11359,7 +11397,6 @@ static void intel_mmio_flip_work_func(struct work_struct *work)
 
        if (mmio_flip->req) {
                WARN_ON(__i915_wait_request(mmio_flip->req,
-                                           mmio_flip->crtc->reset_counter,
                                            false, NULL,
                                            &mmio_flip->i915->rps.mmioflips));
                i915_gem_request_unreference__unlocked(mmio_flip->req);
@@ -11567,8 +11604,13 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
        if (ret)
                goto cleanup;
 
+       intel_crtc->reset_counter = i915_reset_counter(&dev_priv->gpu_error);
+       if (__i915_reset_in_progress_or_wedged(intel_crtc->reset_counter)) {
+               ret = -EIO;
+               goto cleanup;
+       }
+
        atomic_inc(&intel_crtc->unpin_work_count);
-       intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
 
        if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
                work->flip_count = I915_READ(PIPE_FLIPCOUNT_G4X(pipe)) + 1;
@@ -11654,7 +11696,7 @@ cleanup_unpin:
        intel_unpin_fb_obj(fb, crtc->primary->state->rotation);
 cleanup_pending:
        if (!IS_ERR_OR_NULL(request))
-               i915_gem_request_cancel(request);
+               i915_add_request_no_flush(request);
        atomic_dec(&intel_crtc->unpin_work_count);
        mutex_unlock(&dev->struct_mutex);
 cleanup:
@@ -11704,7 +11746,7 @@ retry:
 
                if (ret == 0 && event) {
                        spin_lock_irq(&dev->event_lock);
-                       drm_send_vblank_event(dev, pipe, event);
+                       drm_crtc_send_vblank_event(crtc, event);
                        spin_unlock_irq(&dev->event_lock);
                }
        }
@@ -12686,7 +12728,7 @@ intel_pipe_config_compare(struct drm_device *dev,
        PIPE_CONF_CHECK_X(gmch_pfit.control);
        /* pfit ratios are autocomputed by the hw on gen4+ */
        if (INTEL_INFO(dev)->gen < 4)
-               PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios);
+               PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
        PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
 
        if (!adjust) {
@@ -12721,6 +12763,9 @@ intel_pipe_config_compare(struct drm_device *dev,
        PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
        PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
 
+       PIPE_CONF_CHECK_X(dsi_pll.ctrl);
+       PIPE_CONF_CHECK_X(dsi_pll.div);
+
        if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
                PIPE_CONF_CHECK_I(pipe_bpp);
 
@@ -13386,7 +13431,7 @@ static int intel_atomic_check(struct drm_device *dev,
 
 static int intel_atomic_prepare_commit(struct drm_device *dev,
                                       struct drm_atomic_state *state,
-                                      bool async)
+                                      bool nonblock)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_plane_state *plane_state;
@@ -13395,12 +13440,15 @@ static int intel_atomic_prepare_commit(struct drm_device *dev,
        struct drm_crtc *crtc;
        int i, ret;
 
-       if (async) {
-               DRM_DEBUG_KMS("i915 does not yet support async commit\n");
+       if (nonblock) {
+               DRM_DEBUG_KMS("i915 does not yet support nonblocking commit\n");
                return -EINVAL;
        }
 
        for_each_crtc_in_state(state, crtc, crtc_state, i) {
+               if (state->legacy_cursor_update)
+                       continue;
+
                ret = intel_crtc_wait_for_pending_flips(crtc);
                if (ret)
                        return ret;
@@ -13414,12 +13462,9 @@ static int intel_atomic_prepare_commit(struct drm_device *dev,
                return ret;
 
        ret = drm_atomic_helper_prepare_planes(dev, state);
-       if (!ret && !async && !i915_reset_in_progress(&dev_priv->gpu_error)) {
-               u32 reset_counter;
-
-               reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
-               mutex_unlock(&dev->struct_mutex);
+       mutex_unlock(&dev->struct_mutex);
 
+       if (!ret && !nonblock) {
                for_each_plane_in_state(state, plane, plane_state, i) {
                        struct intel_plane_state *intel_plane_state =
                                to_intel_plane_state(plane_state);
@@ -13428,25 +13473,18 @@ static int intel_atomic_prepare_commit(struct drm_device *dev,
                                continue;
 
                        ret = __i915_wait_request(intel_plane_state->wait_req,
-                                                 reset_counter, true,
-                                                 NULL, NULL);
-
-                       /* Swallow -EIO errors to allow updates during hw lockup. */
-                       if (ret == -EIO)
-                               ret = 0;
-
-                       if (ret)
+                                                 true, NULL, NULL);
+                       if (ret) {
+                               /* Any hang should be swallowed by the wait */
+                               WARN_ON(ret == -EIO);
+                               mutex_lock(&dev->struct_mutex);
+                               drm_atomic_helper_cleanup_planes(dev, state);
+                               mutex_unlock(&dev->struct_mutex);
                                break;
+                       }
                }
-
-               if (!ret)
-                       return 0;
-
-               mutex_lock(&dev->struct_mutex);
-               drm_atomic_helper_cleanup_planes(dev, state);
        }
 
-       mutex_unlock(&dev->struct_mutex);
        return ret;
 }
 
@@ -13488,7 +13526,7 @@ static void intel_atomic_wait_for_vblanks(struct drm_device *dev,
                                        drm_crtc_vblank_count(crtc),
                                msecs_to_jiffies(50));
 
-               WARN_ON(!lret);
+               WARN(!lret, "pipe %c vblank wait timed out\n", pipe_name(pipe));
 
                drm_crtc_vblank_put(crtc);
        }
@@ -13519,21 +13557,21 @@ static bool needs_vblank_wait(struct intel_crtc_state *crtc_state)
  * intel_atomic_commit - commit validated state object
  * @dev: DRM device
  * @state: the top-level driver state object
- * @async: asynchronous commit
+ * @nonblock: nonblocking commit
  *
  * This function commits a top-level state object that has been validated
  * with drm_atomic_helper_check().
  *
  * FIXME:  Atomic modeset support for i915 is not yet complete.  At the moment
  * we can only handle plane-related operations and do not yet support
- * asynchronous commit.
+ * nonblocking commit.
  *
  * RETURNS
  * Zero for success or -errno.
  */
 static int intel_atomic_commit(struct drm_device *dev,
                               struct drm_atomic_state *state,
-                              bool async)
+                              bool nonblock)
 {
        struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -13545,7 +13583,7 @@ static int intel_atomic_commit(struct drm_device *dev,
        unsigned long put_domains[I915_MAX_PIPES] = {};
        unsigned crtc_vblank_mask = 0;
 
-       ret = intel_atomic_prepare_commit(dev, state, async);
+       ret = intel_atomic_prepare_commit(dev, state, nonblock);
        if (ret) {
                DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
                return ret;
@@ -13790,10 +13828,11 @@ intel_prepare_plane_fb(struct drm_plane *plane,
                 */
                if (needs_modeset(crtc_state))
                        ret = i915_gem_object_wait_rendering(old_obj, true);
-
-               /* Swallow -EIO errors to allow updates during hw lockup. */
-               if (ret && ret != -EIO)
+               if (ret) {
+                       /* GPU hangs should have been swallowed by the wait */
+                       WARN_ON(ret == -EIO);
                        return ret;
+               }
        }
 
        /* For framebuffer backed by dmabuf, wait for fence */
index da0c3d2..f192f58 100644 (file)
@@ -2215,6 +2215,15 @@ static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
        POSTING_READ(DP_A);
        udelay(500);
 
+       /*
+        * [DevILK] Work around required when enabling DP PLL
+        * while a pipe is enabled going to FDI:
+        * 1. Wait for the start of vertical blank on the enabled pipe going to FDI
+        * 2. Program DP PLL enable
+        */
+       if (IS_GEN5(dev_priv))
+               intel_wait_for_vblank_if_active(dev_priv->dev, !crtc->pipe);
+
        intel_dp->DP |= DP_PLL_ENABLE;
 
        I915_WRITE(DP_A, intel_dp->DP);
@@ -2630,7 +2639,6 @@ static void intel_enable_dp(struct intel_encoder *encoder)
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
        uint32_t dp_reg = I915_READ(intel_dp->output_reg);
-       enum port port = dp_to_dig_port(intel_dp)->port;
        enum pipe pipe = crtc->pipe;
 
        if (WARN_ON(dp_reg & DP_PORT_EN))
@@ -2641,35 +2649,12 @@ static void intel_enable_dp(struct intel_encoder *encoder)
        if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
                vlv_init_panel_power_sequencer(intel_dp);
 
-       /*
-        * We get an occasional spurious underrun between the port
-        * enable and vdd enable, when enabling port A eDP.
-        *
-        * FIXME: Not sure if this applies to (PCH) port D eDP as well
-        */
-       if (port == PORT_A)
-               intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
-
        intel_dp_enable_port(intel_dp);
 
-       if (port == PORT_A && IS_GEN5(dev_priv)) {
-               /*
-                * Underrun reporting for the other pipe was disabled in
-                * g4x_pre_enable_dp(). The eDP PLL and port have now been
-                * enabled, so it's now safe to re-enable underrun reporting.
-                */
-               intel_wait_for_vblank_if_active(dev_priv->dev, !pipe);
-               intel_set_cpu_fifo_underrun_reporting(dev_priv, !pipe, true);
-               intel_set_pch_fifo_underrun_reporting(dev_priv, !pipe, true);
-       }
-
        edp_panel_vdd_on(intel_dp);
        edp_panel_on(intel_dp);
        edp_panel_vdd_off(intel_dp, true);
 
-       if (port == PORT_A)
-               intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
-
        pps_unlock(intel_dp);
 
        if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
@@ -2711,26 +2696,11 @@ static void vlv_enable_dp(struct intel_encoder *encoder)
 
 static void g4x_pre_enable_dp(struct intel_encoder *encoder)
 {
-       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
        enum port port = dp_to_dig_port(intel_dp)->port;
-       enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
 
        intel_dp_prepare(encoder);
 
-       if (port == PORT_A && IS_GEN5(dev_priv)) {
-               /*
-                * We get FIFO underruns on the other pipe when
-                * enabling the CPU eDP PLL, and when enabling CPU
-                * eDP port. We could potentially avoid the PLL
-                * underrun with a vblank wait just prior to enabling
-                * the PLL, but that doesn't appear to help the port
-                * enable case. Just sweep it all under the rug.
-                */
-               intel_set_cpu_fifo_underrun_reporting(dev_priv, !pipe, false);
-               intel_set_pch_fifo_underrun_reporting(dev_priv, !pipe, false);
-       }
-
        /* Only ilk+ has port A */
        if (port == PORT_A)
                ironlake_edp_pll_on(intel_dp);
@@ -3103,37 +3073,6 @@ static void chv_dp_post_pll_disable(struct intel_encoder *encoder)
        chv_phy_powergate_lanes(encoder, false, 0x0);
 }
 
-/*
- * Native read with retry for link status and receiver capability reads for
- * cases where the sink may still be asleep.
- *
- * Sinks are *supposed* to come up within 1ms from an off state, but we're also
- * supposed to retry 3 times per the spec.
- */
-static ssize_t
-intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
-                       void *buffer, size_t size)
-{
-       ssize_t ret;
-       int i;
-
-       /*
-        * Sometime we just get the same incorrect byte repeated
-        * over the entire buffer. Doing just one throw away read
-        * initially seems to "solve" it.
-        */
-       drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
-
-       for (i = 0; i < 3; i++) {
-               ret = drm_dp_dpcd_read(aux, offset, buffer, size);
-               if (ret == size)
-                       return ret;
-               msleep(1);
-       }
-
-       return ret;
-}
-
 /*
  * Fetch AUX CH registers 0x202 - 0x207 which contain
  * link status information
@@ -3141,10 +3080,8 @@ intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
 bool
 intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
 {
-       return intel_dp_dpcd_read_wake(&intel_dp->aux,
-                                      DP_LANE0_1_STATUS,
-                                      link_status,
-                                      DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
+       return drm_dp_dpcd_read(&intel_dp->aux, DP_LANE0_1_STATUS, link_status,
+                               DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
 }
 
 /* These are source-specific values. */
@@ -3779,8 +3716,8 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
        struct drm_i915_private *dev_priv = dev->dev_private;
        uint8_t rev;
 
-       if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
-                                   sizeof(intel_dp->dpcd)) < 0)
+       if (drm_dp_dpcd_read(&intel_dp->aux, 0x000, intel_dp->dpcd,
+                            sizeof(intel_dp->dpcd)) < 0)
                return false; /* aux transfer failed */
 
        DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
@@ -3788,8 +3725,8 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
        if (intel_dp->dpcd[DP_DPCD_REV] == 0)
                return false; /* DPCD not present */
 
-       if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
-                                   &intel_dp->sink_count, 1) < 0)
+       if (drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT,
+                            &intel_dp->sink_count, 1) < 0)
                return false;
 
        /*
@@ -3806,15 +3743,15 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
         * downstream port information. So, an early return here saves
         * time from performing other operations which are not required.
         */
-       if (!intel_dp->sink_count)
+       if (!is_edp(intel_dp) && !intel_dp->sink_count)
                return false;
 
        /* Check if the panel supports PSR */
        memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
        if (is_edp(intel_dp)) {
-               intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
-                                       intel_dp->psr_dpcd,
-                                       sizeof(intel_dp->psr_dpcd));
+               drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT,
+                                intel_dp->psr_dpcd,
+                                sizeof(intel_dp->psr_dpcd));
                if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
                        dev_priv->psr.sink_support = true;
                        DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
@@ -3825,9 +3762,9 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
                        uint8_t frame_sync_cap;
 
                        dev_priv->psr.sink_support = true;
-                       intel_dp_dpcd_read_wake(&intel_dp->aux,
-                                       DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
-                                       &frame_sync_cap, 1);
+                       drm_dp_dpcd_read(&intel_dp->aux,
+                                        DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
+                                        &frame_sync_cap, 1);
                        dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
                        /* PSR2 needs frame sync as well */
                        dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
@@ -3843,15 +3780,13 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
        /* Intermediate frequency support */
        if (is_edp(intel_dp) &&
            (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
-           (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
+           (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
            (rev >= 0x03)) { /* eDp v1.4 or higher */
                __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
                int i;
 
-               intel_dp_dpcd_read_wake(&intel_dp->aux,
-                               DP_SUPPORTED_LINK_RATES,
-                               sink_rates,
-                               sizeof(sink_rates));
+               drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES,
+                               sink_rates, sizeof(sink_rates));
 
                for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
                        int val = le16_to_cpu(sink_rates[i]);
@@ -3874,9 +3809,9 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
        if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
                return true; /* no per-port downstream info */
 
-       if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
-                                   intel_dp->downstream_ports,
-                                   DP_MAX_DOWNSTREAM_PORTS) < 0)
+       if (drm_dp_dpcd_read(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
+                            intel_dp->downstream_ports,
+                            DP_MAX_DOWNSTREAM_PORTS) < 0)
                return false; /* downstream port status fetch failed */
 
        return true;
@@ -3890,11 +3825,11 @@ intel_dp_probe_oui(struct intel_dp *intel_dp)
        if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
                return;
 
-       if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
+       if (drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
                DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
                              buf[0], buf[1], buf[2]);
 
-       if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
+       if (drm_dp_dpcd_read(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
                DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
                              buf[0], buf[1], buf[2]);
 }
@@ -3913,7 +3848,7 @@ intel_dp_probe_mst(struct intel_dp *intel_dp)
        if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
                return false;
 
-       if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
+       if (drm_dp_dpcd_read(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
                if (buf[0] & DP_MST_CAP) {
                        DRM_DEBUG_KMS("Sink is MST capable\n");
                        intel_dp->is_mst = true;
@@ -4050,7 +3985,7 @@ stop:
 static bool
 intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
 {
-       return intel_dp_dpcd_read_wake(&intel_dp->aux,
+       return drm_dp_dpcd_read(&intel_dp->aux,
                                       DP_DEVICE_SERVICE_IRQ_VECTOR,
                                       sink_irq_vector, 1) == 1;
 }
@@ -4060,7 +3995,7 @@ intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
 {
        int ret;
 
-       ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
+       ret = drm_dp_dpcd_read(&intel_dp->aux,
                                             DP_SINK_COUNT_ESI,
                                             sink_irq_vector, 14);
        if (ret != 14)
@@ -4339,6 +4274,9 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp)
        if (!intel_dp_get_dpcd(intel_dp))
                return connector_status_disconnected;
 
+       if (is_edp(intel_dp))
+               return connector_status_connected;
+
        /* if there's no downstream port, we're done */
        if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
                return connector_status_connected;
@@ -4608,6 +4546,15 @@ intel_dp_long_pulse(struct intel_connector *intel_connector)
                intel_dp->compliance_test_type = 0;
                intel_dp->compliance_test_data = 0;
 
+               if (intel_dp->is_mst) {
+                       DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
+                                     intel_dp->is_mst,
+                                     intel_dp->mst_mgr.mst_state);
+                       intel_dp->is_mst = false;
+                       drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
+                                                       intel_dp->is_mst);
+               }
+
                goto out;
        }
 
@@ -4665,20 +4612,9 @@ intel_dp_long_pulse(struct intel_connector *intel_connector)
        }
 
 out:
-       if (status != connector_status_connected) {
+       if ((status != connector_status_connected) &&
+           (intel_dp->is_mst == false))
                intel_dp_unset_edid(intel_dp);
-               /*
-                * If we were in MST mode, and device is not there,
-                * get out of MST mode
-                */
-               if (intel_dp->is_mst) {
-                       DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
-                                     intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
-                       intel_dp->is_mst = false;
-                       drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
-                                                       intel_dp->is_mst);
-               }
-       }
 
        intel_display_power_put(to_i915(dev), power_domain);
        return;
@@ -4851,6 +4787,11 @@ intel_dp_set_property(struct drm_connector *connector,
                        DRM_DEBUG_KMS("no scaling not supported\n");
                        return -EINVAL;
                }
+               if (HAS_GMCH_DISPLAY(dev_priv) &&
+                   val == DRM_MODE_SCALE_CENTER) {
+                       DRM_DEBUG_KMS("centering not supported\n");
+                       return -EINVAL;
+               }
 
                if (intel_connector->panel.fitting_mode == val) {
                        /* the eDP scaling property is not changed */
@@ -4914,7 +4855,7 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
        kfree(intel_dig_port);
 }
 
-static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
+void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
 {
        struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
 
@@ -4956,7 +4897,7 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
        edp_panel_vdd_schedule_off(intel_dp);
 }
 
-static void intel_dp_encoder_reset(struct drm_encoder *encoder)
+void intel_dp_encoder_reset(struct drm_encoder *encoder)
 {
        struct intel_dp *intel_dp;
 
index 94b4e83..b6bf7fd 100644 (file)
@@ -109,7 +109,7 @@ static void intel_mst_disable_dp(struct intel_encoder *encoder)
 
        DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
 
-       drm_dp_mst_reset_vcpi_slots(&intel_dp->mst_mgr, intel_mst->port);
+       drm_dp_mst_reset_vcpi_slots(&intel_dp->mst_mgr, intel_mst->connector->port);
 
        ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr);
        if (ret) {
@@ -134,10 +134,11 @@ static void intel_mst_post_disable_dp(struct intel_encoder *encoder)
        /* and this can also fail */
        drm_dp_update_payload_part2(&intel_dp->mst_mgr);
 
-       drm_dp_mst_deallocate_vcpi(&intel_dp->mst_mgr, intel_mst->port);
+       drm_dp_mst_deallocate_vcpi(&intel_dp->mst_mgr, intel_mst->connector->port);
 
        intel_dp->active_mst_links--;
-       intel_mst->port = NULL;
+
+       intel_mst->connector = NULL;
        if (intel_dp->active_mst_links == 0) {
                intel_dig_port->base.post_disable(&intel_dig_port->base);
                intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
@@ -177,7 +178,8 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder)
        found->encoder = encoder;
 
        DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
-       intel_mst->port = found->port;
+
+       intel_mst->connector = found;
 
        if (intel_dp->active_mst_links == 0) {
                intel_prepare_ddi_buffer(&intel_dig_port->base);
@@ -195,7 +197,7 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder)
        }
 
        ret = drm_dp_mst_allocate_vcpi(&intel_dp->mst_mgr,
-                                      intel_mst->port,
+                                      intel_mst->connector->port,
                                       intel_crtc->config->pbn, &slots);
        if (ret == false) {
                DRM_ERROR("failed to allocate vcpi\n");
@@ -244,7 +246,7 @@ static bool intel_dp_mst_enc_get_hw_state(struct intel_encoder *encoder,
 {
        struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
        *pipe = intel_mst->pipe;
-       if (intel_mst->port)
+       if (intel_mst->connector)
                return true;
        return false;
 }
@@ -308,10 +310,11 @@ static int intel_dp_mst_get_ddc_modes(struct drm_connector *connector)
        struct edid *edid;
        int ret;
 
-       edid = drm_dp_mst_get_edid(connector, &intel_dp->mst_mgr, intel_connector->port);
-       if (!edid)
-               return 0;
+       if (!intel_dp) {
+               return intel_connector_update_modes(connector, NULL);
+       }
 
+       edid = drm_dp_mst_get_edid(connector, &intel_dp->mst_mgr, intel_connector->port);
        ret = intel_connector_update_modes(connector, edid);
        kfree(edid);
 
@@ -324,6 +327,8 @@ intel_dp_mst_detect(struct drm_connector *connector, bool force)
        struct intel_connector *intel_connector = to_intel_connector(connector);
        struct intel_dp *intel_dp = intel_connector->mst_port;
 
+       if (!intel_dp)
+               return connector_status_disconnected;
        return drm_dp_mst_detect_port(connector, &intel_dp->mst_mgr, intel_connector->port);
 }
 
@@ -389,6 +394,8 @@ static struct drm_encoder *intel_mst_atomic_best_encoder(struct drm_connector *c
        struct intel_dp *intel_dp = intel_connector->mst_port;
        struct intel_crtc *crtc = to_intel_crtc(state->crtc);
 
+       if (!intel_dp)
+               return NULL;
        return &intel_dp->mst_encoders[crtc->pipe]->base.base;
 }
 
@@ -396,6 +403,8 @@ static struct drm_encoder *intel_mst_best_encoder(struct drm_connector *connecto
 {
        struct intel_connector *intel_connector = to_intel_connector(connector);
        struct intel_dp *intel_dp = intel_connector->mst_port;
+       if (!intel_dp)
+               return NULL;
        return &intel_dp->mst_encoders[0]->base.base;
 }
 
@@ -506,23 +515,11 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
 
        /* need to nuke the connector */
        drm_modeset_lock_all(dev);
-       if (connector->state->crtc) {
-               struct drm_mode_set set;
-               int ret;
-
-               memset(&set, 0, sizeof(set));
-               set.crtc = connector->state->crtc,
-
-               ret = drm_atomic_helper_set_config(&set);
-
-               WARN(ret, "Disabling mst crtc failed with %i\n", ret);
-       }
-
        intel_connector_remove_from_fbdev(intel_connector);
-       drm_connector_cleanup(connector);
+       intel_connector->mst_port = NULL;
        drm_modeset_unlock_all(dev);
 
-       kfree(intel_connector);
+       drm_connector_unreference(&intel_connector->base);
        DRM_DEBUG_KMS("\n");
 }
 
index 0bde6a4..639bf02 100644 (file)
@@ -1295,17 +1295,9 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
        uint32_t temp;
        enum port port = (enum port)pll->id;    /* 1:1 port->PLL mapping */
 
-       temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
-       /*
-        * Definition of each bit polarity has been changed
-        * after A1 stepping
-        */
-       if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
-               temp &= ~PORT_PLL_REF_SEL;
-       else
-               temp |= PORT_PLL_REF_SEL;
-
        /* Non-SSC reference */
+       temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
+       temp |= PORT_PLL_REF_SEL;
        I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
 
        /* Disable 10 bit clock */
@@ -1652,10 +1644,7 @@ static void intel_ddi_pll_init(struct drm_device *dev)
                        DRM_DEBUG_KMS("Sanitized cdclk programmed by pre-os\n");
                if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE))
                        DRM_ERROR("LCPLL1 is disabled\n");
-       } else if (IS_BROXTON(dev)) {
-               broxton_init_cdclk(dev);
-               broxton_ddi_phy_init(dev);
-       } else {
+       } else if (!IS_BROXTON(dev_priv)) {
                /*
                 * The LCPLL register should be turned on by the BIOS. For now
                 * let's just check its state and print errors in case
index e0fcfa1..315c971 100644 (file)
@@ -497,6 +497,11 @@ struct intel_crtc_state {
        /* Actual register state of the dpll, for shared dpll cross-checking. */
        struct intel_dpll_hw_state dpll_hw_state;
 
+       /* DSI PLL registers */
+       struct {
+               u32 ctrl, div;
+       } dsi_pll;
+
        int pipe_bpp;
        struct intel_link_m_n dp_m_n;
 
@@ -878,7 +883,7 @@ struct intel_dp_mst_encoder {
        struct intel_encoder base;
        enum pipe pipe;
        struct intel_digital_port *primary;
-       void *port; /* store this opaque as its illegal to dereference it */
+       struct intel_connector *connector;
 };
 
 static inline enum dpio_channel
@@ -1224,12 +1229,16 @@ void intel_prepare_reset(struct drm_device *dev);
 void intel_finish_reset(struct drm_device *dev);
 void hsw_enable_pc8(struct drm_i915_private *dev_priv);
 void hsw_disable_pc8(struct drm_i915_private *dev_priv);
-void broxton_init_cdclk(struct drm_device *dev);
-void broxton_uninit_cdclk(struct drm_device *dev);
-void broxton_ddi_phy_init(struct drm_device *dev);
-void broxton_ddi_phy_uninit(struct drm_device *dev);
+void broxton_init_cdclk(struct drm_i915_private *dev_priv);
+void broxton_uninit_cdclk(struct drm_i915_private *dev_priv);
+bool broxton_cdclk_verify_state(struct drm_i915_private *dev_priv);
+void broxton_ddi_phy_init(struct drm_i915_private *dev_priv);
+void broxton_ddi_phy_uninit(struct drm_i915_private *dev_priv);
+void broxton_ddi_phy_verify_state(struct drm_i915_private *dev_priv);
+void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv);
 void bxt_enable_dc9(struct drm_i915_private *dev_priv);
 void bxt_disable_dc9(struct drm_i915_private *dev_priv);
+void gen9_enable_dc5(struct drm_i915_private *dev_priv);
 void skl_init_cdclk(struct drm_i915_private *dev_priv);
 int skl_sanitize_cdclk(struct drm_i915_private *dev_priv);
 void skl_uninit_cdclk(struct drm_i915_private *dev_priv);
@@ -1268,6 +1277,8 @@ u32 skl_plane_ctl_rotation(unsigned int rotation);
 void intel_csr_ucode_init(struct drm_i915_private *);
 void intel_csr_load_program(struct drm_i915_private *);
 void intel_csr_ucode_fini(struct drm_i915_private *);
+void intel_csr_ucode_suspend(struct drm_i915_private *);
+void intel_csr_ucode_resume(struct drm_i915_private *);
 
 /* intel_dp.c */
 void intel_dp_init(struct drm_device *dev, i915_reg_t output_reg, enum port port);
@@ -1278,6 +1289,8 @@ void intel_dp_set_link_params(struct intel_dp *intel_dp,
 void intel_dp_start_link_train(struct intel_dp *intel_dp);
 void intel_dp_stop_link_train(struct intel_dp *intel_dp);
 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
+void intel_dp_encoder_reset(struct drm_encoder *encoder);
+void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder);
 void intel_dp_encoder_destroy(struct drm_encoder *encoder);
 int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc);
 bool intel_dp_compute_config(struct intel_encoder *encoder,
@@ -1462,8 +1475,8 @@ int intel_power_domains_init(struct drm_i915_private *);
 void intel_power_domains_fini(struct drm_i915_private *);
 void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume);
 void intel_power_domains_suspend(struct drm_i915_private *dev_priv);
-void skl_pw1_misc_io_init(struct drm_i915_private *dev_priv);
-void skl_pw1_misc_io_fini(struct drm_i915_private *dev_priv);
+void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume);
+void bxt_display_core_uninit(struct drm_i915_private *dev_priv);
 void intel_runtime_pm_enable(struct drm_i915_private *dev_priv);
 const char *
 intel_display_power_domain_str(enum intel_display_power_domain domain);
index a1e0547..2b22bb9 100644 (file)
@@ -290,16 +290,26 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder,
        struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi,
                                                   base);
        struct intel_connector *intel_connector = intel_dsi->attached_connector;
-       struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
+       struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
+       const struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
        struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+       int ret;
 
        DRM_DEBUG_KMS("\n");
 
        pipe_config->has_dsi_encoder = true;
 
-       if (fixed_mode)
+       if (fixed_mode) {
                intel_fixed_panel_mode(fixed_mode, adjusted_mode);
 
+               if (HAS_GMCH_DISPLAY(dev_priv))
+                       intel_gmch_panel_fitting(crtc, pipe_config,
+                                                intel_connector->panel.fitting_mode);
+               else
+                       intel_pch_panel_fitting(crtc, pipe_config,
+                                               intel_connector->panel.fitting_mode);
+       }
+
        /* DSI uses short packets for sync events, so clear mode flags for DSI */
        adjusted_mode->flags = 0;
 
@@ -311,6 +321,12 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder,
                        pipe_config->cpu_transcoder = TRANSCODER_DSI_A;
        }
 
+       ret = intel_compute_dsi_pll(encoder, pipe_config);
+       if (ret)
+               return false;
+
+       pipe_config->clock_set = true;
+
        return true;
 }
 
@@ -498,14 +514,19 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder)
        struct drm_device *dev = encoder->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
-       struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
-       enum pipe pipe = intel_crtc->pipe;
+       struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
        enum port port;
        u32 tmp;
 
        DRM_DEBUG_KMS("\n");
 
-       intel_enable_dsi_pll(encoder);
+       /*
+        * The BIOS may leave the PLL in a wonky state where it doesn't
+        * lock. It needs to be fully powered down to fix it.
+        */
+       intel_disable_dsi_pll(encoder);
+       intel_enable_dsi_pll(encoder, crtc->config);
+
        intel_dsi_prepare(encoder);
 
        /* Panel Enable over CRC PMIC */
@@ -515,19 +536,7 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder)
        msleep(intel_dsi->panel_on_delay);
 
        if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
-               /*
-                * Disable DPOunit clock gating, can stall pipe
-                * and we need DPLL REFA always enabled
-                */
-               tmp = I915_READ(DPLL(pipe));
-               tmp |= DPLL_REF_CLK_ENABLE_VLV;
-               I915_WRITE(DPLL(pipe), tmp);
-
-               /* update the hw state for DPLL */
-               intel_crtc->config->dpll_hw_state.dpll =
-                               DPLL_INTEGRATED_REF_CLK_VLV |
-                                       DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
-
+               /* Disable DPOunit clock gating, can stall pipe */
                tmp = I915_READ(DSPCLK_GATE_D);
                tmp |= DPOUNIT_CLOCK_GATE_DISABLE;
                I915_WRITE(DSPCLK_GATE_D, tmp);
@@ -679,11 +688,16 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder)
        drm_panel_unprepare(intel_dsi->panel);
 
        msleep(intel_dsi->panel_off_delay);
-       msleep(intel_dsi->panel_pwr_cycle_delay);
 
        /* Panel Disable over CRC PMIC */
        if (intel_dsi->gpio_panel)
                gpiod_set_value_cansleep(intel_dsi->gpio_panel, 0);
+
+       /*
+        * FIXME As we do with eDP, just make a note of the time here
+        * and perform the wait before the next panel power on.
+        */
+       msleep(intel_dsi->panel_pwr_cycle_delay);
 }
 
 static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
@@ -716,11 +730,12 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
                        BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
                bool enabled = I915_READ(ctrl_reg) & DPI_ENABLE;
 
-               /* Due to some hardware limitations on BYT, MIPI Port C DPI
-                * Enable bit does not get set. To check whether DSI Port C
-                * was enabled in BIOS, check the Pipe B enable bit
+               /*
+                * Due to some hardware limitations on VLV/CHV, the DPI enable
+                * bit in port C control register does not get set. As a
+                * workaround, check pipe B conf instead.
                 */
-               if (IS_VALLEYVIEW(dev) && port == PORT_C)
+               if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) && port == PORT_C)
                        enabled = I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
 
                /* Try command mode if video mode not enabled */
@@ -826,13 +841,8 @@ static void intel_dsi_get_config(struct intel_encoder *encoder,
        if (IS_BROXTON(dev))
                bxt_dsi_get_pipe_config(encoder, pipe_config);
 
-       /*
-        * DPLL_MD is not used in case of DSI, reading will get some default value
-        * set dpll_md = 0
-        */
-       pipe_config->dpll_hw_state.dpll_md = 0;
-
-       pclk = intel_dsi_get_pclk(encoder, pipe_config->pipe_bpp);
+       pclk = intel_dsi_get_pclk(encoder, pipe_config->pipe_bpp,
+                                 pipe_config);
        if (!pclk)
                return;
 
@@ -845,7 +855,7 @@ intel_dsi_mode_valid(struct drm_connector *connector,
                     struct drm_display_mode *mode)
 {
        struct intel_connector *intel_connector = to_intel_connector(connector);
-       struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
+       const struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
        int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
 
        DRM_DEBUG_KMS("\n");
@@ -1183,6 +1193,48 @@ static int intel_dsi_get_modes(struct drm_connector *connector)
        return 1;
 }
 
+static int intel_dsi_set_property(struct drm_connector *connector,
+                                 struct drm_property *property,
+                                 uint64_t val)
+{
+       struct drm_device *dev = connector->dev;
+       struct intel_connector *intel_connector = to_intel_connector(connector);
+       struct drm_crtc *crtc;
+       int ret;
+
+       ret = drm_object_property_set_value(&connector->base, property, val);
+       if (ret)
+               return ret;
+
+       if (property == dev->mode_config.scaling_mode_property) {
+               if (val == DRM_MODE_SCALE_NONE) {
+                       DRM_DEBUG_KMS("no scaling not supported\n");
+                       return -EINVAL;
+               }
+               if (HAS_GMCH_DISPLAY(dev) &&
+                   val == DRM_MODE_SCALE_CENTER) {
+                       DRM_DEBUG_KMS("centering not supported\n");
+                       return -EINVAL;
+               }
+
+               if (intel_connector->panel.fitting_mode == val)
+                       return 0;
+
+               intel_connector->panel.fitting_mode = val;
+       }
+
+       crtc = intel_attached_encoder(connector)->base.crtc;
+       if (crtc && crtc->state->enable) {
+               /*
+                * If the CRTC is enabled, the display will be changed
+                * according to the new panel fitting mode.
+                */
+               intel_crtc_restore_mode(crtc);
+       }
+
+       return 0;
+}
+
 static void intel_dsi_connector_destroy(struct drm_connector *connector)
 {
        struct intel_connector *intel_connector = to_intel_connector(connector);
@@ -1225,11 +1277,25 @@ static const struct drm_connector_funcs intel_dsi_connector_funcs = {
        .detect = intel_dsi_detect,
        .destroy = intel_dsi_connector_destroy,
        .fill_modes = drm_helper_probe_single_connector_modes,
+       .set_property = intel_dsi_set_property,
        .atomic_get_property = intel_connector_atomic_get_property,
        .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
        .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
 };
 
+static void intel_dsi_add_properties(struct intel_connector *connector)
+{
+       struct drm_device *dev = connector->base.dev;
+
+       if (connector->panel.fixed_mode) {
+               drm_mode_create_scaling_mode_property(dev);
+               drm_object_attach_property(&connector->base.base,
+                                          dev->mode_config.scaling_mode_property,
+                                          DRM_MODE_SCALE_ASPECT);
+               connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
+       }
+}
+
 void intel_dsi_init(struct drm_device *dev)
 {
        struct intel_dsi *intel_dsi;
@@ -1353,8 +1419,6 @@ void intel_dsi_init(struct drm_device *dev)
 
        intel_connector_attach_encoder(intel_connector, intel_encoder);
 
-       drm_connector_register(connector);
-
        drm_panel_attach(intel_dsi->panel, connector);
 
        mutex_lock(&dev->mode_config.mutex);
@@ -1373,6 +1437,11 @@ void intel_dsi_init(struct drm_device *dev)
        }
 
        intel_panel_init(&intel_connector->panel, fixed_mode, NULL);
+
+       intel_dsi_add_properties(intel_connector);
+
+       drm_connector_register(connector);
+
        intel_panel_setup_backlight(connector, INVALID_PIPE);
 
        return;
index dabde19..61a6957 100644 (file)
@@ -127,11 +127,15 @@ static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder)
 }
 
 bool intel_dsi_pll_is_enabled(struct drm_i915_private *dev_priv);
-extern void intel_enable_dsi_pll(struct intel_encoder *encoder);
-extern void intel_disable_dsi_pll(struct intel_encoder *encoder);
-extern u32 intel_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp);
-extern void intel_dsi_reset_clocks(struct intel_encoder *encoder,
-                                                       enum port port);
+int intel_compute_dsi_pll(struct intel_encoder *encoder,
+                         struct intel_crtc_state *config);
+void intel_enable_dsi_pll(struct intel_encoder *encoder,
+                         const struct intel_crtc_state *config);
+void intel_disable_dsi_pll(struct intel_encoder *encoder);
+u32 intel_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
+                      struct intel_crtc_state *config);
+void intel_dsi_reset_clocks(struct intel_encoder *encoder,
+                           enum port port);
 
 struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id);
 enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt);
index 4e53fcf..1765e6e 100644 (file)
 #include "i915_drv.h"
 #include "intel_dsi.h"
 
-struct dsi_mnp {
-       u32 dsi_pll_ctrl;
-       u32 dsi_pll_div;
-};
-
-static const u32 lfsr_converts[] = {
+static const u16 lfsr_converts[] = {
        426, 469, 234, 373, 442, 221, 110, 311, 411,            /* 62 - 70 */
        461, 486, 243, 377, 188, 350, 175, 343, 427, 213,       /* 71 - 80 */
        106, 53, 282, 397, 454, 227, 113, 56, 284, 142,         /* 81 - 90 */
@@ -57,7 +52,8 @@ static u32 dsi_clk_from_pclk(u32 pclk, enum mipi_dsi_pixel_format fmt,
 }
 
 static int dsi_calc_mnp(struct drm_i915_private *dev_priv,
-                       struct dsi_mnp *dsi_mnp, int target_dsi_clk)
+                       struct intel_crtc_state *config,
+                       int target_dsi_clk)
 {
        unsigned int calc_m = 0, calc_p = 0;
        unsigned int m_min, m_max, p_min = 2, p_max = 6;
@@ -103,8 +99,8 @@ static int dsi_calc_mnp(struct drm_i915_private *dev_priv,
        /* register has log2(N1), this works fine for powers of two */
        n = ffs(n) - 1;
        m_seed = lfsr_converts[calc_m - 62];
-       dsi_mnp->dsi_pll_ctrl = 1 << (DSI_PLL_P1_POST_DIV_SHIFT + calc_p - 2);
-       dsi_mnp->dsi_pll_div = n << DSI_PLL_N1_DIV_SHIFT |
+       config->dsi_pll.ctrl = 1 << (DSI_PLL_P1_POST_DIV_SHIFT + calc_p - 2);
+       config->dsi_pll.div = n << DSI_PLL_N1_DIV_SHIFT |
                m_seed << DSI_PLL_M1_DIV_SHIFT;
 
        return 0;
@@ -114,54 +110,55 @@ static int dsi_calc_mnp(struct drm_i915_private *dev_priv,
  * XXX: The muxing and gating is hard coded for now. Need to add support for
  * sharing PLLs with two DSI outputs.
  */
-static void vlv_configure_dsi_pll(struct intel_encoder *encoder)
+static int vlv_compute_dsi_pll(struct intel_encoder *encoder,
+                              struct intel_crtc_state *config)
 {
        struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
        struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
        int ret;
-       struct dsi_mnp dsi_mnp;
        u32 dsi_clk;
 
        dsi_clk = dsi_clk_from_pclk(intel_dsi->pclk, intel_dsi->pixel_format,
                                    intel_dsi->lane_count);
 
-       ret = dsi_calc_mnp(dev_priv, &dsi_mnp, dsi_clk);
+       ret = dsi_calc_mnp(dev_priv, config, dsi_clk);
        if (ret) {
                DRM_DEBUG_KMS("dsi_calc_mnp failed\n");
-               return;
+               return ret;
        }
 
        if (intel_dsi->ports & (1 << PORT_A))
-               dsi_mnp.dsi_pll_ctrl |= DSI_PLL_CLK_GATE_DSI0_DSIPLL;
+               config->dsi_pll.ctrl |= DSI_PLL_CLK_GATE_DSI0_DSIPLL;
 
        if (intel_dsi->ports & (1 << PORT_C))
-               dsi_mnp.dsi_pll_ctrl |= DSI_PLL_CLK_GATE_DSI1_DSIPLL;
+               config->dsi_pll.ctrl |= DSI_PLL_CLK_GATE_DSI1_DSIPLL;
+
+       config->dsi_pll.ctrl |= DSI_PLL_VCO_EN;
 
        DRM_DEBUG_KMS("dsi pll div %08x, ctrl %08x\n",
-                     dsi_mnp.dsi_pll_div, dsi_mnp.dsi_pll_ctrl);
+                     config->dsi_pll.div, config->dsi_pll.ctrl);
 
-       vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, 0);
-       vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_DIVIDER, dsi_mnp.dsi_pll_div);
-       vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, dsi_mnp.dsi_pll_ctrl);
+       return 0;
 }
 
-static void vlv_enable_dsi_pll(struct intel_encoder *encoder)
+static void vlv_enable_dsi_pll(struct intel_encoder *encoder,
+                              const struct intel_crtc_state *config)
 {
-       struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
-       u32 tmp;
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 
        DRM_DEBUG_KMS("\n");
 
        mutex_lock(&dev_priv->sb_lock);
 
-       vlv_configure_dsi_pll(encoder);
+       vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, 0);
+       vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_DIVIDER, config->dsi_pll.div);
+       vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL,
+                     config->dsi_pll.ctrl & ~DSI_PLL_VCO_EN);
 
        /* wait at least 0.5 us after ungating before enabling VCO */
        usleep_range(1, 10);
 
-       tmp = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
-       tmp |= DSI_PLL_VCO_EN;
-       vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, tmp);
+       vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, config->dsi_pll.ctrl);
 
        if (wait_for(vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL) &
                                                DSI_PLL_LOCK, 20)) {
@@ -177,7 +174,7 @@ static void vlv_enable_dsi_pll(struct intel_encoder *encoder)
 
 static void vlv_disable_dsi_pll(struct intel_encoder *encoder)
 {
-       struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        u32 tmp;
 
        DRM_DEBUG_KMS("\n");
@@ -224,7 +221,7 @@ static bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv)
 
 static void bxt_disable_dsi_pll(struct intel_encoder *encoder)
 {
-       struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        u32 val;
 
        DRM_DEBUG_KMS("\n");
@@ -251,14 +248,15 @@ static void assert_bpp_mismatch(enum mipi_dsi_pixel_format fmt, int pipe_bpp)
             bpp, pipe_bpp);
 }
 
-static u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp)
+static u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
+                           struct intel_crtc_state *config)
 {
-       struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
        u32 dsi_clock, pclk;
        u32 pll_ctl, pll_div;
        u32 m = 0, p = 0, n;
-       int refclk = 25000;
+       int refclk = IS_CHERRYVIEW(dev_priv) ? 100000 : 25000;
        int i;
 
        DRM_DEBUG_KMS("\n");
@@ -268,6 +266,9 @@ static u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp)
        pll_div = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_DIVIDER);
        mutex_unlock(&dev_priv->sb_lock);
 
+       config->dsi_pll.ctrl = pll_ctl & ~DSI_PLL_LOCK;
+       config->dsi_pll.div = pll_div;
+
        /* mask out other bits and extract the P1 divisor */
        pll_ctl &= DSI_PLL_P1_POST_DIV_MASK;
        pll_ctl = pll_ctl >> (DSI_PLL_P1_POST_DIV_SHIFT - 2);
@@ -313,7 +314,8 @@ static u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp)
        return pclk;
 }
 
-static u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp)
+static u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
+                           struct intel_crtc_state *config)
 {
        u32 pclk;
        u32 dsi_clk;
@@ -327,15 +329,9 @@ static u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp)
                return 0;
        }
 
-       dsi_ratio = I915_READ(BXT_DSI_PLL_CTL) &
-                               BXT_DSI_PLL_RATIO_MASK;
+       config->dsi_pll.ctrl = I915_READ(BXT_DSI_PLL_CTL);
 
-       /* Invalid DSI ratio ? */
-       if (dsi_ratio < BXT_DSI_PLL_RATIO_MIN ||
-                       dsi_ratio > BXT_DSI_PLL_RATIO_MAX) {
-               DRM_ERROR("Invalid DSI pll ratio(%u) programmed\n", dsi_ratio);
-               return 0;
-       }
+       dsi_ratio = config->dsi_pll.ctrl & BXT_DSI_PLL_RATIO_MASK;
 
        dsi_clk = (dsi_ratio * BXT_REF_CLOCK_KHZ) / 2;
 
@@ -348,12 +344,13 @@ static u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp)
        return pclk;
 }
 
-u32 intel_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp)
+u32 intel_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
+                      struct intel_crtc_state *config)
 {
        if (IS_BROXTON(encoder->base.dev))
-               return bxt_dsi_get_pclk(encoder, pipe_bpp);
+               return bxt_dsi_get_pclk(encoder, pipe_bpp, config);
        else
-               return vlv_dsi_get_pclk(encoder, pipe_bpp);
+               return vlv_dsi_get_pclk(encoder, pipe_bpp, config);
 }
 
 static void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
@@ -370,7 +367,8 @@ static void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
 }
 
 /* Program BXT Mipi clocks and dividers */
-static void bxt_dsi_program_clocks(struct drm_device *dev, enum port port)
+static void bxt_dsi_program_clocks(struct drm_device *dev, enum port port,
+                                  const struct intel_crtc_state *config)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 tmp;
@@ -390,8 +388,7 @@ static void bxt_dsi_program_clocks(struct drm_device *dev, enum port port)
        tmp &= ~(BXT_MIPI_RX_ESCLK_LOWER_FIXDIV_MASK(port));
 
        /* Get the current DSI rate(actual) */
-       pll_ratio = I915_READ(BXT_DSI_PLL_CTL) &
-                               BXT_DSI_PLL_RATIO_MASK;
+       pll_ratio = config->dsi_pll.ctrl & BXT_DSI_PLL_RATIO_MASK;
        dsi_rate = (BXT_REF_CLOCK_KHZ * pll_ratio) / 2;
 
        /*
@@ -427,16 +424,15 @@ static void bxt_dsi_program_clocks(struct drm_device *dev, enum port port)
        I915_WRITE(BXT_MIPI_CLOCK_CTL, tmp);
 }
 
-static bool bxt_configure_dsi_pll(struct intel_encoder *encoder)
+static int bxt_compute_dsi_pll(struct intel_encoder *encoder,
+                              struct intel_crtc_state *config)
 {
-       struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
        struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
        u8 dsi_ratio;
        u32 dsi_clk;
-       u32 val;
 
        dsi_clk = dsi_clk_from_pclk(intel_dsi->pclk, intel_dsi->pixel_format,
-                       intel_dsi->lane_count);
+                                   intel_dsi->lane_count);
 
        /*
         * From clock diagram, to get PLL ratio divider, divide double of DSI
@@ -445,9 +441,9 @@ static bool bxt_configure_dsi_pll(struct intel_encoder *encoder)
         */
        dsi_ratio = DIV_ROUND_UP(dsi_clk * 2, BXT_REF_CLOCK_KHZ);
        if (dsi_ratio < BXT_DSI_PLL_RATIO_MIN ||
-                       dsi_ratio > BXT_DSI_PLL_RATIO_MAX) {
+           dsi_ratio > BXT_DSI_PLL_RATIO_MAX) {
                DRM_ERROR("Cant get a suitable ratio from DSI PLL ratios\n");
-               return false;
+               return -ECHRNG;
        }
 
        /*
@@ -455,27 +451,19 @@ static bool bxt_configure_dsi_pll(struct intel_encoder *encoder)
         * Spec says both have to be programmed, even if one is not getting
         * used. Configure MIPI_CLOCK_CTL dividers in modeset
         */
-       val = I915_READ(BXT_DSI_PLL_CTL);
-       val &= ~BXT_DSI_PLL_PVD_RATIO_MASK;
-       val &= ~BXT_DSI_FREQ_SEL_MASK;
-       val &= ~BXT_DSI_PLL_RATIO_MASK;
-       val |= (dsi_ratio | BXT_DSIA_16X_BY2 | BXT_DSIC_16X_BY2);
+       config->dsi_pll.ctrl = dsi_ratio | BXT_DSIA_16X_BY2 | BXT_DSIC_16X_BY2;
 
        /* As per recommendation from hardware team,
         * Prog PVD ratio =1 if dsi ratio <= 50
         */
-       if (dsi_ratio <= 50) {
-               val &= ~BXT_DSI_PLL_PVD_RATIO_MASK;
-               val |= BXT_DSI_PLL_PVD_RATIO_1;
-       }
+       if (dsi_ratio <= 50)
+               config->dsi_pll.ctrl |= BXT_DSI_PLL_PVD_RATIO_1;
 
-       I915_WRITE(BXT_DSI_PLL_CTL, val);
-       POSTING_READ(BXT_DSI_PLL_CTL);
-
-       return true;
+       return 0;
 }
 
-static void bxt_enable_dsi_pll(struct intel_encoder *encoder)
+static void bxt_enable_dsi_pll(struct intel_encoder *encoder,
+                              const struct intel_crtc_state *config)
 {
        struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
        struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
@@ -484,23 +472,13 @@ static void bxt_enable_dsi_pll(struct intel_encoder *encoder)
 
        DRM_DEBUG_KMS("\n");
 
-       val = I915_READ(BXT_DSI_PLL_ENABLE);
-
-       if (val & BXT_DSI_PLL_DO_ENABLE) {
-               WARN(1, "DSI PLL already enabled. Disabling it.\n");
-               val &= ~BXT_DSI_PLL_DO_ENABLE;
-               I915_WRITE(BXT_DSI_PLL_ENABLE, val);
-       }
-
        /* Configure PLL vales */
-       if (!bxt_configure_dsi_pll(encoder)) {
-               DRM_ERROR("Configure DSI PLL failed, abort PLL enable\n");
-               return;
-       }
+       I915_WRITE(BXT_DSI_PLL_CTL, config->dsi_pll.ctrl);
+       POSTING_READ(BXT_DSI_PLL_CTL);
 
        /* Program TX, RX, Dphy clocks */
        for_each_dsi_port(port, intel_dsi->ports)
-               bxt_dsi_program_clocks(encoder->base.dev, port);
+               bxt_dsi_program_clocks(encoder->base.dev, port, config);
 
        /* Enable DSI PLL */
        val = I915_READ(BXT_DSI_PLL_ENABLE);
@@ -526,14 +504,28 @@ bool intel_dsi_pll_is_enabled(struct drm_i915_private *dev_priv)
        return false;
 }
 
-void intel_enable_dsi_pll(struct intel_encoder *encoder)
+int intel_compute_dsi_pll(struct intel_encoder *encoder,
+                         struct intel_crtc_state *config)
+{
+       struct drm_device *dev = encoder->base.dev;
+
+       if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+               return vlv_compute_dsi_pll(encoder, config);
+       else if (IS_BROXTON(dev))
+               return bxt_compute_dsi_pll(encoder, config);
+
+       return -ENODEV;
+}
+
+void intel_enable_dsi_pll(struct intel_encoder *encoder,
+                         const struct intel_crtc_state *config)
 {
        struct drm_device *dev = encoder->base.dev;
 
        if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
-               vlv_enable_dsi_pll(encoder);
+               vlv_enable_dsi_pll(encoder, config);
        else if (IS_BROXTON(dev))
-               bxt_enable_dsi_pll(encoder);
+               bxt_enable_dsi_pll(encoder, config);
 }
 
 void intel_disable_dsi_pll(struct intel_encoder *encoder)
index 73002e9..9d79c4c 100644 (file)
 #include "intel_guc_fwif.h"
 #include "i915_guc_reg.h"
 
+struct drm_i915_gem_request;
+
+/*
+ * This structure primarily describes the GEM object shared with the GuC.
+ * The GEM object is held for the entire lifetime of our interaction with
+ * the GuC, being allocated before the GuC is loaded with its firmware.
+ * Because there's no way to update the address used by the GuC after
+ * initialisation, the shared object must stay pinned into the GGTT as
+ * long as the GuC is in use. We also keep the first page (only) mapped
+ * into kernel address space, as it includes shared data that must be
+ * updated on every request submission.
+ *
+ * The single GEM object described here is actually made up of several
+ * separate areas, as far as the GuC is concerned. The first page (kept
+ * kmap'd) includes the "process decriptor" which holds sequence data for
+ * the doorbell, and one cacheline which actually *is* the doorbell; a
+ * write to this will "ring the doorbell" (i.e. send an interrupt to the
+ * GuC). The subsequent  pages of the client object constitute the work
+ * queue (a circular array of work items), again described in the process
+ * descriptor. Work queue pages are mapped momentarily as required.
+ *
+ * Finally, we also keep a few statistics here, including the number of
+ * submissions to each engine, and a record of the last submission failure
+ * (if any).
+ */
 struct i915_guc_client {
        struct drm_i915_gem_object *client_obj;
+       void *client_base;              /* first page (only) of above   */
        struct intel_context *owner;
        struct intel_guc *guc;
        uint32_t priority;
@@ -43,13 +69,14 @@ struct i915_guc_client {
        uint32_t wq_offset;
        uint32_t wq_size;
        uint32_t wq_tail;
-       uint32_t wq_head;
+       uint32_t unused;                /* Was 'wq_head'                */
 
        /* GuC submission statistics & status */
        uint64_t submissions[GUC_MAX_ENGINES_NUM];
        uint32_t q_fail;
        uint32_t b_fail;
        int retcode;
+       int spare;                      /* pad to 32 DWords             */
 };
 
 enum intel_guc_fw_status {
index b199ede..2cdab73 100644 (file)
@@ -1412,8 +1412,16 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
                                hdmi_to_dig_port(intel_hdmi));
        }
 
-       if (!live_status)
-               DRM_DEBUG_KMS("Live status not up!");
+       if (!live_status) {
+               DRM_DEBUG_KMS("HDMI live status down\n");
+               /*
+                * Live status register is not reliable on all intel platforms.
+                * So consider live_status only for certain platforms, for
+                * others, read EDID to determine presence of sink.
+                */
+               if (INTEL_INFO(dev_priv)->gen < 7 || IS_IVYBRIDGE(dev_priv))
+                       live_status = true;
+       }
 
        intel_hdmi_unset_edid(connector);
 
index 6dbe73e..81de230 100644 (file)
@@ -571,15 +571,14 @@ clear_err:
        goto out;
 
 timeout:
-       DRM_INFO("GMBUS [%s] timed out, falling back to bit banging on pin %d\n",
-                bus->adapter.name, bus->reg0 & 0xff);
+       DRM_DEBUG_KMS("GMBUS [%s] timed out, falling back to bit banging on pin %d\n",
+                     bus->adapter.name, bus->reg0 & 0xff);
        I915_WRITE(GMBUS0, 0);
 
        /*
         * Hardware may not support GMBUS over these pins? Try GPIO bitbanging
         * instead. Use EAGAIN to have i2c core retry.
         */
-       bus->force_bit = 1;
        ret = -EAGAIN;
 
 out:
@@ -597,10 +596,15 @@ gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
        intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
        mutex_lock(&dev_priv->gmbus_mutex);
 
-       if (bus->force_bit)
+       if (bus->force_bit) {
                ret = i2c_bit_algo.master_xfer(adapter, msgs, num);
-       else
+               if (ret < 0)
+                       bus->force_bit &= ~GMBUS_FORCE_BIT_RETRY;
+       } else {
                ret = do_gmbus_xfer(adapter, msgs, num);
+               if (ret == -EAGAIN)
+                       bus->force_bit |= GMBUS_FORCE_BIT_RETRY;
+       }
 
        mutex_unlock(&dev_priv->gmbus_mutex);
        intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS);
@@ -718,11 +722,16 @@ void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed)
 void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit)
 {
        struct intel_gmbus *bus = to_intel_gmbus(adapter);
+       struct drm_i915_private *dev_priv = bus->dev_priv;
+
+       mutex_lock(&dev_priv->gmbus_mutex);
 
        bus->force_bit += force_bit ? 1 : -1;
        DRM_DEBUG_KMS("%sabling bit-banging on %s. force bit now %d\n",
                      force_bit ? "en" : "dis", adapter->name,
                      bus->force_bit);
+
+       mutex_unlock(&dev_priv->gmbus_mutex);
 }
 
 void intel_teardown_gmbus(struct drm_device *dev)
index 0d6dc5e..6179b59 100644 (file)
@@ -229,9 +229,6 @@ enum {
 
 static int intel_lr_context_pin(struct intel_context *ctx,
                                struct intel_engine_cs *engine);
-static void lrc_setup_hardware_status_page(struct intel_engine_cs *engine,
-                                          struct drm_i915_gem_object *default_ctx_obj);
-
 
 /**
  * intel_sanitize_enable_execlists() - sanitize i915.enable_execlists
@@ -418,6 +415,7 @@ static void execlists_submit_requests(struct drm_i915_gem_request *rq0,
                                      struct drm_i915_gem_request *rq1)
 {
        struct drm_i915_private *dev_priv = rq0->i915;
+       unsigned int fw_domains = rq0->engine->fw_domains;
 
        execlists_update_context(rq0);
 
@@ -425,11 +423,11 @@ static void execlists_submit_requests(struct drm_i915_gem_request *rq0,
                execlists_update_context(rq1);
 
        spin_lock_irq(&dev_priv->uncore.lock);
-       intel_uncore_forcewake_get__locked(dev_priv, FORCEWAKE_ALL);
+       intel_uncore_forcewake_get__locked(dev_priv, fw_domains);
 
        execlists_elsp_write(rq0, rq1);
 
-       intel_uncore_forcewake_put__locked(dev_priv, FORCEWAKE_ALL);
+       intel_uncore_forcewake_put__locked(dev_priv, fw_domains);
        spin_unlock_irq(&dev_priv->uncore.lock);
 }
 
@@ -552,7 +550,7 @@ static void intel_lrc_irq_handler(unsigned long data)
        unsigned int csb_read = 0, i;
        unsigned int submit_contexts = 0;
 
-       intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+       intel_uncore_forcewake_get(dev_priv, engine->fw_domains);
 
        status_pointer = I915_READ_FW(RING_CONTEXT_STATUS_PTR(engine));
 
@@ -577,7 +575,7 @@ static void intel_lrc_irq_handler(unsigned long data)
                      _MASKED_FIELD(GEN8_CSB_READ_PTR_MASK,
                                    engine->next_context_status_buffer << 8));
 
-       intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+       intel_uncore_forcewake_put(dev_priv, engine->fw_domains);
 
        spin_lock(&engine->execlist_lock);
 
@@ -892,17 +890,8 @@ static int logical_ring_prepare(struct drm_i915_gem_request *req, int bytes)
  */
 int intel_logical_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
 {
-       struct drm_i915_private *dev_priv;
        int ret;
 
-       WARN_ON(req == NULL);
-       dev_priv = req->i915;
-
-       ret = i915_gem_check_wedge(&dev_priv->gpu_error,
-                                  dev_priv->mm.interruptible);
-       if (ret)
-               return ret;
-
        ret = logical_ring_prepare(req, num_dwords * sizeof(uint32_t));
        if (ret)
                return ret;
@@ -1016,7 +1005,6 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
        trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags);
 
        i915_gem_execbuffer_move_to_active(vmas, params->request);
-       i915_gem_execbuffer_retire_commands(params);
 
        return 0;
 }
@@ -1057,7 +1045,7 @@ void intel_logical_ring_stop(struct intel_engine_cs *engine)
                return;
 
        ret = intel_engine_idle(engine);
-       if (ret && !i915_reset_in_progress(&to_i915(engine->dev)->gpu_error))
+       if (ret)
                DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
                          engine->name, ret);
 
@@ -1093,8 +1081,8 @@ static int intel_lr_context_do_pin(struct intel_context *ctx,
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state;
        struct intel_ringbuffer *ringbuf = ctx->engine[engine->id].ringbuf;
-       struct page *lrc_state_page;
-       uint32_t *lrc_reg_state;
+       void *vaddr;
+       u32 *lrc_reg_state;
        int ret;
 
        WARN_ON(!mutex_is_locked(&engine->dev->struct_mutex));
@@ -1104,19 +1092,20 @@ static int intel_lr_context_do_pin(struct intel_context *ctx,
        if (ret)
                return ret;
 
-       lrc_state_page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN);
-       if (WARN_ON(!lrc_state_page)) {
-               ret = -ENODEV;
+       vaddr = i915_gem_object_pin_map(ctx_obj);
+       if (IS_ERR(vaddr)) {
+               ret = PTR_ERR(vaddr);
                goto unpin_ctx_obj;
        }
 
+       lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
+
        ret = intel_pin_and_map_ringbuffer_obj(engine->dev, ringbuf);
        if (ret)
-               goto unpin_ctx_obj;
+               goto unpin_map;
 
        ctx->engine[engine->id].lrc_vma = i915_gem_obj_to_ggtt(ctx_obj);
        intel_lr_context_descriptor_update(ctx, engine);
-       lrc_reg_state = kmap(lrc_state_page);
        lrc_reg_state[CTX_RING_BUFFER_START+1] = ringbuf->vma->node.start;
        ctx->engine[engine->id].lrc_reg_state = lrc_reg_state;
        ctx_obj->dirty = true;
@@ -1127,6 +1116,8 @@ static int intel_lr_context_do_pin(struct intel_context *ctx,
 
        return ret;
 
+unpin_map:
+       i915_gem_object_unpin_map(ctx_obj);
 unpin_ctx_obj:
        i915_gem_object_ggtt_unpin(ctx_obj);
 
@@ -1159,7 +1150,7 @@ void intel_lr_context_unpin(struct intel_context *ctx,
 
        WARN_ON(!mutex_is_locked(&ctx->i915->dev->struct_mutex));
        if (--ctx->engine[engine->id].pin_count == 0) {
-               kunmap(kmap_to_page(ctx->engine[engine->id].lrc_reg_state));
+               i915_gem_object_unpin_map(ctx_obj);
                intel_unpin_ringbuffer_obj(ctx->engine[engine->id].ringbuf);
                i915_gem_object_ggtt_unpin(ctx_obj);
                ctx->engine[engine->id].lrc_vma = NULL;
@@ -1579,14 +1570,22 @@ out:
        return ret;
 }
 
+static void lrc_init_hws(struct intel_engine_cs *engine)
+{
+       struct drm_i915_private *dev_priv = engine->dev->dev_private;
+
+       I915_WRITE(RING_HWS_PGA(engine->mmio_base),
+                  (u32)engine->status_page.gfx_addr);
+       POSTING_READ(RING_HWS_PGA(engine->mmio_base));
+}
+
 static int gen8_init_common_ring(struct intel_engine_cs *engine)
 {
        struct drm_device *dev = engine->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        unsigned int next_context_status_buffer_hw;
 
-       lrc_setup_hardware_status_page(engine,
-                                      dev_priv->kernel_context->engine[engine->id].state);
+       lrc_init_hws(engine);
 
        I915_WRITE_IMR(engine,
                       ~(engine->irq_enable_mask | engine->irq_keep_mask));
@@ -1625,7 +1624,7 @@ static int gen8_init_common_ring(struct intel_engine_cs *engine)
 
        intel_engine_init_hangcheck(engine);
 
-       return 0;
+       return intel_mocs_init_engine(engine);
 }
 
 static int gen8_init_render_ring(struct intel_engine_cs *engine)
@@ -1945,15 +1944,18 @@ static int gen8_emit_request_render(struct drm_i915_gem_request *request)
        struct intel_ringbuffer *ringbuf = request->ringbuf;
        int ret;
 
-       ret = intel_logical_ring_begin(request, 6 + WA_TAIL_DWORDS);
+       ret = intel_logical_ring_begin(request, 8 + WA_TAIL_DWORDS);
        if (ret)
                return ret;
 
+       /* We're using qword write, seqno should be aligned to 8 bytes. */
+       BUILD_BUG_ON(I915_GEM_HWS_INDEX & 1);
+
        /* w/a for post sync ops following a GPGPU operation we
         * need a prior CS_STALL, which is emitted by the flush
         * following the batch.
         */
-       intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(5));
+       intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
        intel_logical_ring_emit(ringbuf,
                                (PIPE_CONTROL_GLOBAL_GTT_IVB |
                                 PIPE_CONTROL_CS_STALL |
@@ -1961,7 +1963,10 @@ static int gen8_emit_request_render(struct drm_i915_gem_request *request)
        intel_logical_ring_emit(ringbuf, hws_seqno_address(request->engine));
        intel_logical_ring_emit(ringbuf, 0);
        intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
+       /* We're thrashing one dword of HWS. */
+       intel_logical_ring_emit(ringbuf, 0);
        intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
+       intel_logical_ring_emit(ringbuf, MI_NOOP);
        return intel_logical_ring_advance_and_submit(request);
 }
 
@@ -2048,7 +2053,7 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
        i915_gem_batch_pool_fini(&engine->batch_pool);
 
        if (engine->status_page.obj) {
-               kunmap(sg_page(engine->status_page.obj->pages->sgl));
+               i915_gem_object_unpin_map(engine->status_page.obj);
                engine->status_page.obj = NULL;
        }
 
@@ -2086,10 +2091,30 @@ logical_ring_default_irqs(struct intel_engine_cs *engine, unsigned shift)
        engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
 }
 
+static int
+lrc_setup_hws(struct intel_engine_cs *engine,
+             struct drm_i915_gem_object *dctx_obj)
+{
+       void *hws;
+
+       /* The HWSP is part of the default context object in LRC mode. */
+       engine->status_page.gfx_addr = i915_gem_obj_ggtt_offset(dctx_obj) +
+                                      LRC_PPHWSP_PN * PAGE_SIZE;
+       hws = i915_gem_object_pin_map(dctx_obj);
+       if (IS_ERR(hws))
+               return PTR_ERR(hws);
+       engine->status_page.page_addr = hws + LRC_PPHWSP_PN * PAGE_SIZE;
+       engine->status_page.obj = dctx_obj;
+
+       return 0;
+}
+
 static int
 logical_ring_init(struct drm_device *dev, struct intel_engine_cs *engine)
 {
-       struct intel_context *dctx = to_i915(dev)->kernel_context;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct intel_context *dctx = dev_priv->kernel_context;
+       enum forcewake_domains fw_domains;
        int ret;
 
        /* Intentionally left blank. */
@@ -2111,6 +2136,20 @@ logical_ring_init(struct drm_device *dev, struct intel_engine_cs *engine)
 
        logical_ring_init_platform_invariants(engine);
 
+       fw_domains = intel_uncore_forcewake_for_reg(dev_priv,
+                                                   RING_ELSP(engine),
+                                                   FW_REG_WRITE);
+
+       fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
+                                                    RING_CONTEXT_STATUS_PTR(engine),
+                                                    FW_REG_READ | FW_REG_WRITE);
+
+       fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
+                                                    RING_CONTEXT_STATUS_BUF_BASE(engine),
+                                                    FW_REG_READ);
+
+       engine->fw_domains = fw_domains;
+
        ret = i915_cmd_parser_init_ring(engine);
        if (ret)
                goto error;
@@ -2128,6 +2167,13 @@ logical_ring_init(struct drm_device *dev, struct intel_engine_cs *engine)
                goto error;
        }
 
+       /* And setup the hardware status page. */
+       ret = lrc_setup_hws(engine, dctx->engine[engine->id].state);
+       if (ret) {
+               DRM_ERROR("Failed to set up hws %s: %d\n", engine->name, ret);
+               goto error;
+       }
+
        return 0;
 
 error:
@@ -2378,15 +2424,16 @@ static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine)
 }
 
 static int
-populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_obj,
+populate_lr_context(struct intel_context *ctx,
+                   struct drm_i915_gem_object *ctx_obj,
                    struct intel_engine_cs *engine,
                    struct intel_ringbuffer *ringbuf)
 {
        struct drm_device *dev = engine->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
-       struct page *page;
-       uint32_t *reg_state;
+       void *vaddr;
+       u32 *reg_state;
        int ret;
 
        if (!ppgtt)
@@ -2398,18 +2445,17 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
                return ret;
        }
 
-       ret = i915_gem_object_get_pages(ctx_obj);
-       if (ret) {
-               DRM_DEBUG_DRIVER("Could not get object pages\n");
+       vaddr = i915_gem_object_pin_map(ctx_obj);
+       if (IS_ERR(vaddr)) {
+               ret = PTR_ERR(vaddr);
+               DRM_DEBUG_DRIVER("Could not map object pages! (%d)\n", ret);
                return ret;
        }
-
-       i915_gem_object_pin_pages(ctx_obj);
+       ctx_obj->dirty = true;
 
        /* The second page of the context object contains some fields which must
         * be set up prior to the first execution. */
-       page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN);
-       reg_state = kmap_atomic(page);
+       reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
 
        /* A context is actually a big batch buffer with several MI_LOAD_REGISTER_IMM
         * commands followed by (reg, value) pairs. The values we are setting here are
@@ -2514,8 +2560,7 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
                               make_rpcs(dev));
        }
 
-       kunmap_atomic(reg_state);
-       i915_gem_object_unpin_pages(ctx_obj);
+       i915_gem_object_unpin_map(ctx_obj);
 
        return 0;
 }
@@ -2542,6 +2587,7 @@ void intel_lr_context_free(struct intel_context *ctx)
                if (ctx == ctx->i915->kernel_context) {
                        intel_unpin_ringbuffer_obj(ringbuf);
                        i915_gem_object_ggtt_unpin(ctx_obj);
+                       i915_gem_object_unpin_map(ctx_obj);
                }
 
                WARN_ON(ctx->engine[i].pin_count);
@@ -2588,24 +2634,6 @@ uint32_t intel_lr_context_size(struct intel_engine_cs *engine)
        return ret;
 }
 
-static void lrc_setup_hardware_status_page(struct intel_engine_cs *engine,
-                                          struct drm_i915_gem_object *default_ctx_obj)
-{
-       struct drm_i915_private *dev_priv = engine->dev->dev_private;
-       struct page *page;
-
-       /* The HWSP is part of the default context object in LRC mode. */
-       engine->status_page.gfx_addr = i915_gem_obj_ggtt_offset(default_ctx_obj)
-                       + LRC_PPHWSP_PN * PAGE_SIZE;
-       page = i915_gem_object_get_page(default_ctx_obj, LRC_PPHWSP_PN);
-       engine->status_page.page_addr = kmap(page);
-       engine->status_page.obj = default_ctx_obj;
-
-       I915_WRITE(RING_HWS_PGA(engine->mmio_base),
-                       (u32)engine->status_page.gfx_addr);
-       POSTING_READ(RING_HWS_PGA(engine->mmio_base));
-}
-
 /**
  * intel_lr_context_deferred_alloc() - create the LRC specific bits of a context
  * @ctx: LR context to create.
@@ -2669,13 +2697,12 @@ int intel_lr_context_deferred_alloc(struct intel_context *ctx,
                }
 
                ret = engine->init_context(req);
+               i915_add_request_no_flush(req);
                if (ret) {
                        DRM_ERROR("ring init context: %d\n",
                                ret);
-                       i915_gem_request_cancel(req);
                        goto error_ringbuf;
                }
-               i915_add_request_no_flush(req);
        }
        return 0;
 
@@ -2688,10 +2715,9 @@ error_deref_obj:
        return ret;
 }
 
-void intel_lr_context_reset(struct drm_device *dev,
-                       struct intel_context *ctx)
+void intel_lr_context_reset(struct drm_i915_private *dev_priv,
+                           struct intel_context *ctx)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_engine_cs *engine;
 
        for_each_engine(engine, dev_priv) {
@@ -2699,23 +2725,23 @@ void intel_lr_context_reset(struct drm_device *dev,
                                ctx->engine[engine->id].state;
                struct intel_ringbuffer *ringbuf =
                                ctx->engine[engine->id].ringbuf;
+               void *vaddr;
                uint32_t *reg_state;
-               struct page *page;
 
                if (!ctx_obj)
                        continue;
 
-               if (i915_gem_object_get_pages(ctx_obj)) {
-                       WARN(1, "Failed get_pages for context obj\n");
+               vaddr = i915_gem_object_pin_map(ctx_obj);
+               if (WARN_ON(IS_ERR(vaddr)))
                        continue;
-               }
-               page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN);
-               reg_state = kmap_atomic(page);
+
+               reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
+               ctx_obj->dirty = true;
 
                reg_state[CTX_RING_HEAD+1] = 0;
                reg_state[CTX_RING_TAIL+1] = 0;
 
-               kunmap_atomic(reg_state);
+               i915_gem_object_unpin_map(ctx_obj);
 
                ringbuf->head = 0;
                ringbuf->tail = 0;
index 0b0853e..461f1ef 100644 (file)
@@ -24,6 +24,8 @@
 #ifndef _INTEL_LRC_H_
 #define _INTEL_LRC_H_
 
+#include "intel_ringbuffer.h"
+
 #define GEN8_LR_CONTEXT_ALIGN 4096
 
 /* Execlists regs */
@@ -34,6 +36,7 @@
 #define          CTX_CTRL_INHIBIT_SYN_CTX_SWITCH       (1 << 3)
 #define          CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT   (1 << 0)
 #define   CTX_CTRL_RS_CTX_ENABLE                (1 << 1)
+#define RING_CONTEXT_STATUS_BUF_BASE(ring)     _MMIO((ring)->mmio_base + 0x370)
 #define RING_CONTEXT_STATUS_BUF_LO(ring, i)    _MMIO((ring)->mmio_base + 0x370 + (i) * 8)
 #define RING_CONTEXT_STATUS_BUF_HI(ring, i)    _MMIO((ring)->mmio_base + 0x370 + (i) * 8 + 4)
 #define RING_CONTEXT_STATUS_PTR(ring)          _MMIO((ring)->mmio_base + 0x3a0)
@@ -103,8 +106,11 @@ int intel_lr_context_deferred_alloc(struct intel_context *ctx,
                                    struct intel_engine_cs *engine);
 void intel_lr_context_unpin(struct intel_context *ctx,
                            struct intel_engine_cs *engine);
-void intel_lr_context_reset(struct drm_device *dev,
-                       struct intel_context *ctx);
+
+struct drm_i915_private;
+
+void intel_lr_context_reset(struct drm_i915_private *dev_priv,
+                           struct intel_context *ctx);
 uint64_t intel_lr_context_descriptor(struct intel_context *ctx,
                                     struct intel_engine_cs *engine);
 
index 7c7ac0a..23b8545 100644 (file)
@@ -128,9 +128,9 @@ static const struct drm_i915_mocs_entry broxton_mocs_table[] = {
 
 /**
  * get_mocs_settings()
- * @dev:        DRM device.
+ * @dev_priv:  i915 device.
  * @table:      Output table that will be made to point at appropriate
- *              MOCS values for the device.
+ *           MOCS values for the device.
  *
  * This function will return the values of the MOCS table that needs to
  * be programmed for the platform. It will return the values that need
@@ -138,21 +138,21 @@ static const struct drm_i915_mocs_entry broxton_mocs_table[] = {
  *
  * Return: true if there are applicable MOCS settings for the device.
  */
-static bool get_mocs_settings(struct drm_device *dev,
+static bool get_mocs_settings(struct drm_i915_private *dev_priv,
                              struct drm_i915_mocs_table *table)
 {
        bool result = false;
 
-       if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
+       if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
                table->size  = ARRAY_SIZE(skylake_mocs_table);
                table->table = skylake_mocs_table;
                result = true;
-       } else if (IS_BROXTON(dev)) {
+       } else if (IS_BROXTON(dev_priv)) {
                table->size  = ARRAY_SIZE(broxton_mocs_table);
                table->table = broxton_mocs_table;
                result = true;
        } else {
-               WARN_ONCE(INTEL_INFO(dev)->gen >= 9,
+               WARN_ONCE(INTEL_INFO(dev_priv)->gen >= 9,
                          "Platform that should have a MOCS table does not.\n");
        }
 
@@ -178,11 +178,50 @@ static i915_reg_t mocs_register(enum intel_engine_id ring, int index)
        }
 }
 
+/**
+ * intel_mocs_init_engine() - emit the mocs control table
+ * @engine:    The engine for whom to emit the registers.
+ *
+ * This function simply emits a MI_LOAD_REGISTER_IMM command for the
+ * given table starting at the given address.
+ *
+ * Return: 0 on success, otherwise the error status.
+ */
+int intel_mocs_init_engine(struct intel_engine_cs *engine)
+{
+       struct drm_i915_private *dev_priv = to_i915(engine->dev);
+       struct drm_i915_mocs_table table;
+       unsigned int index;
+
+       if (!get_mocs_settings(dev_priv, &table))
+               return 0;
+
+       if (WARN_ON(table.size > GEN9_NUM_MOCS_ENTRIES))
+               return -ENODEV;
+
+       for (index = 0; index < table.size; index++)
+               I915_WRITE(mocs_register(engine->id, index),
+                          table.table[index].control_value);
+
+       /*
+        * Ok, now set the unused entries to uncached. These entries
+        * are officially undefined and no contract for the contents
+        * and settings is given for these entries.
+        *
+        * Entry 0 in the table is uncached - so we are just writing
+        * that value to all the used entries.
+        */
+       for (; index < GEN9_NUM_MOCS_ENTRIES; index++)
+               I915_WRITE(mocs_register(engine->id, index),
+                          table.table[0].control_value);
+
+       return 0;
+}
+
 /**
  * emit_mocs_control_table() - emit the mocs control table
  * @req:       Request to set up the MOCS table for.
  * @table:     The values to program into the control regs.
- * @ring:      The engine for whom to emit the registers.
  *
  * This function simply emits a MI_LOAD_REGISTER_IMM command for the
  * given table starting at the given address.
@@ -190,10 +229,10 @@ static i915_reg_t mocs_register(enum intel_engine_id ring, int index)
  * Return: 0 on success, otherwise the error status.
  */
 static int emit_mocs_control_table(struct drm_i915_gem_request *req,
-                                  const struct drm_i915_mocs_table *table,
-                                  enum intel_engine_id ring)
+                                  const struct drm_i915_mocs_table *table)
 {
        struct intel_ringbuffer *ringbuf = req->ringbuf;
+       enum intel_engine_id engine = req->engine->id;
        unsigned int index;
        int ret;
 
@@ -210,7 +249,8 @@ static int emit_mocs_control_table(struct drm_i915_gem_request *req,
                                MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES));
 
        for (index = 0; index < table->size; index++) {
-               intel_logical_ring_emit_reg(ringbuf, mocs_register(ring, index));
+               intel_logical_ring_emit_reg(ringbuf,
+                                           mocs_register(engine, index));
                intel_logical_ring_emit(ringbuf,
                                        table->table[index].control_value);
        }
@@ -224,8 +264,10 @@ static int emit_mocs_control_table(struct drm_i915_gem_request *req,
         * that value to all the used entries.
         */
        for (; index < GEN9_NUM_MOCS_ENTRIES; index++) {
-               intel_logical_ring_emit_reg(ringbuf, mocs_register(ring, index));
-               intel_logical_ring_emit(ringbuf, table->table[0].control_value);
+               intel_logical_ring_emit_reg(ringbuf,
+                                           mocs_register(engine, index));
+               intel_logical_ring_emit(ringbuf,
+                                       table->table[0].control_value);
        }
 
        intel_logical_ring_emit(ringbuf, MI_NOOP);
@@ -234,6 +276,14 @@ static int emit_mocs_control_table(struct drm_i915_gem_request *req,
        return 0;
 }
 
+static inline u32 l3cc_combine(const struct drm_i915_mocs_table *table,
+                              u16 low,
+                              u16 high)
+{
+       return table->table[low].l3cc_value |
+              table->table[high].l3cc_value << 16;
+}
+
 /**
  * emit_mocs_l3cc_table() - emit the mocs control table
  * @req:       Request to set up the MOCS table for.
@@ -249,11 +299,7 @@ static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
                                const struct drm_i915_mocs_table *table)
 {
        struct intel_ringbuffer *ringbuf = req->ringbuf;
-       unsigned int count;
        unsigned int i;
-       u32 value;
-       u32 filler = (table->table[0].l3cc_value & 0xffff) |
-                       ((table->table[0].l3cc_value & 0xffff) << 16);
        int ret;
 
        if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES))
@@ -268,20 +314,18 @@ static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
        intel_logical_ring_emit(ringbuf,
                        MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES / 2));
 
-       for (i = 0, count = 0; i < table->size / 2; i++, count += 2) {
-               value = (table->table[count].l3cc_value & 0xffff) |
-                       ((table->table[count + 1].l3cc_value & 0xffff) << 16);
-
+       for (i = 0; i < table->size/2; i++) {
                intel_logical_ring_emit_reg(ringbuf, GEN9_LNCFCMOCS(i));
-               intel_logical_ring_emit(ringbuf, value);
+               intel_logical_ring_emit(ringbuf,
+                                       l3cc_combine(table, 2*i, 2*i+1));
        }
 
        if (table->size & 0x01) {
                /* Odd table size - 1 left over */
-               value = (table->table[count].l3cc_value & 0xffff) |
-                       ((table->table[0].l3cc_value & 0xffff) << 16);
-       } else
-               value = filler;
+               intel_logical_ring_emit_reg(ringbuf, GEN9_LNCFCMOCS(i));
+               intel_logical_ring_emit(ringbuf, l3cc_combine(table, 2*i, 0));
+               i++;
+       }
 
        /*
         * Now set the rest of the table to uncached - use entry 0 as
@@ -290,9 +334,7 @@ static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
         */
        for (; i < GEN9_NUM_MOCS_ENTRIES / 2; i++) {
                intel_logical_ring_emit_reg(ringbuf, GEN9_LNCFCMOCS(i));
-               intel_logical_ring_emit(ringbuf, value);
-
-               value = filler;
+               intel_logical_ring_emit(ringbuf, l3cc_combine(table, 0, 0));
        }
 
        intel_logical_ring_emit(ringbuf, MI_NOOP);
@@ -301,6 +343,47 @@ static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
        return 0;
 }
 
+/**
+ * intel_mocs_init_l3cc_table() - program the mocs control table
+ * @dev:      The the device to be programmed.
+ *
+ * This function simply programs the mocs registers for the given table
+ * starting at the given address. This register set is  programmed in pairs.
+ *
+ * These registers may get programmed more than once, it is simpler to
+ * re-program 32 registers than maintain the state of when they were programmed.
+ * We are always reprogramming with the same values and this only on context
+ * start.
+ *
+ * Return: Nothing.
+ */
+void intel_mocs_init_l3cc_table(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_mocs_table table;
+       unsigned int i;
+
+       if (!get_mocs_settings(dev_priv, &table))
+               return;
+
+       for (i = 0; i < table.size/2; i++)
+               I915_WRITE(GEN9_LNCFCMOCS(i), l3cc_combine(&table, 2*i, 2*i+1));
+
+       /* Odd table size - 1 left over */
+       if (table.size & 0x01) {
+               I915_WRITE(GEN9_LNCFCMOCS(i), l3cc_combine(&table, 2*i, 0));
+               i++;
+       }
+
+       /*
+        * Now set the rest of the table to uncached - use entry 0 as
+        * this will be uncached. Leave the last pair as initialised as
+        * they are reserved by the hardware.
+        */
+       for (; i < (GEN9_NUM_MOCS_ENTRIES / 2); i++)
+               I915_WRITE(GEN9_LNCFCMOCS(i), l3cc_combine(&table, 0, 0));
+}
+
 /**
  * intel_rcs_context_init_mocs() - program the MOCS register.
  * @req:       Request to set up the MOCS tables for.
@@ -322,17 +405,11 @@ int intel_rcs_context_init_mocs(struct drm_i915_gem_request *req)
        struct drm_i915_mocs_table t;
        int ret;
 
-       if (get_mocs_settings(req->engine->dev, &t)) {
-               struct drm_i915_private *dev_priv = req->i915;
-               struct intel_engine_cs *engine;
-               enum intel_engine_id id;
-
-               /* Program the control registers */
-               for_each_engine_id(engine, dev_priv, id) {
-                       ret = emit_mocs_control_table(req, &t, id);
-                       if (ret)
-                               return ret;
-               }
+       if (get_mocs_settings(req->i915, &t)) {
+               /* Program the RCS control registers */
+               ret = emit_mocs_control_table(req, &t);
+               if (ret)
+                       return ret;
 
                /* Now program the l3cc registers */
                ret = emit_mocs_l3cc_table(req, &t);
index 76e45b1..4640299 100644 (file)
@@ -53,5 +53,7 @@
 #include "i915_drv.h"
 
 int intel_rcs_context_init_mocs(struct drm_i915_gem_request *req);
+void intel_mocs_init_l3cc_table(struct drm_device *dev);
+int intel_mocs_init_engine(struct intel_engine_cs *ring);
 
 #endif
index c15718b..99e2603 100644 (file)
 #include "i915_drv.h"
 #include "intel_drv.h"
 
-#define PCI_ASLE               0xe4
-#define PCI_ASLS               0xfc
-#define PCI_SWSCI              0xe8
-#define PCI_SWSCI_SCISEL       (1 << 15)
-#define PCI_SWSCI_GSSCIE       (1 << 0)
-
 #define OPREGION_HEADER_OFFSET 0
 #define OPREGION_ACPI_OFFSET   0x100
 #define   ACPI_CLID 0x01ac /* current lid state indicator */
@@ -246,13 +240,12 @@ struct opregion_asle_ext {
 
 #define MAX_DSLP       1500
 
-#ifdef CONFIG_ACPI
 static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct opregion_swsci *swsci = dev_priv->opregion.swsci;
        u32 main_function, sub_function, scic;
-       u16 pci_swsci;
+       u16 swsci_val;
        u32 dslp;
 
        if (!swsci)
@@ -300,16 +293,16 @@ static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out)
        swsci->scic = scic;
 
        /* Ensure SCI event is selected and event trigger is cleared. */
-       pci_read_config_word(dev->pdev, PCI_SWSCI, &pci_swsci);
-       if (!(pci_swsci & PCI_SWSCI_SCISEL) || (pci_swsci & PCI_SWSCI_GSSCIE)) {
-               pci_swsci |= PCI_SWSCI_SCISEL;
-               pci_swsci &= ~PCI_SWSCI_GSSCIE;
-               pci_write_config_word(dev->pdev, PCI_SWSCI, pci_swsci);
+       pci_read_config_word(dev->pdev, SWSCI, &swsci_val);
+       if (!(swsci_val & SWSCI_SCISEL) || (swsci_val & SWSCI_GSSCIE)) {
+               swsci_val |= SWSCI_SCISEL;
+               swsci_val &= ~SWSCI_GSSCIE;
+               pci_write_config_word(dev->pdev, SWSCI, swsci_val);
        }
 
        /* Use event trigger to tell bios to check the mail. */
-       pci_swsci |= PCI_SWSCI_GSSCIE;
-       pci_write_config_word(dev->pdev, PCI_SWSCI, pci_swsci);
+       swsci_val |= SWSCI_GSSCIE;
+       pci_write_config_word(dev->pdev, SWSCI, swsci_val);
 
        /* Poll for the result. */
 #define C (((scic = swsci->scic) & SWSCI_SCIC_INDICATOR) == 0)
@@ -905,9 +898,6 @@ static void swsci_setup(struct drm_device *dev)
                         opregion->swsci_gbda_sub_functions,
                         opregion->swsci_sbcb_sub_functions);
 }
-#else /* CONFIG_ACPI */
-static inline void swsci_setup(struct drm_device *dev) {}
-#endif  /* CONFIG_ACPI */
 
 static int intel_no_opregion_vbt_callback(const struct dmi_system_id *id)
 {
@@ -943,16 +933,14 @@ int intel_opregion_setup(struct drm_device *dev)
        BUILD_BUG_ON(sizeof(struct opregion_asle) != 0x100);
        BUILD_BUG_ON(sizeof(struct opregion_asle_ext) != 0x400);
 
-       pci_read_config_dword(dev->pdev, PCI_ASLS, &asls);
+       pci_read_config_dword(dev->pdev, ASLS, &asls);
        DRM_DEBUG_DRIVER("graphic opregion physical addr: 0x%x\n", asls);
        if (asls == 0) {
                DRM_DEBUG_DRIVER("ACPI OpRegion not supported!\n");
                return -ENOTSUPP;
        }
 
-#ifdef CONFIG_ACPI
        INIT_WORK(&opregion->asle_work, asle_work);
-#endif
 
        base = memremap(asls, OPREGION_SIZE, MEMREMAP_WB);
        if (!base)
@@ -1024,3 +1012,31 @@ err_out:
        memunmap(base);
        return err;
 }
+
+int
+intel_opregion_get_panel_type(struct drm_device *dev)
+{
+       u32 panel_details;
+       int ret;
+
+       ret = swsci(dev, SWSCI_GBDA_PANEL_DETAILS, 0x0, &panel_details);
+       if (ret) {
+               DRM_DEBUG_KMS("Failed to get panel details from OpRegion (%d)\n",
+                             ret);
+               return ret;
+       }
+
+       ret = (panel_details >> 8) & 0xff;
+       if (ret > 0x10) {
+               DRM_DEBUG_KMS("Invalid OpRegion panel type 0x%x\n", ret);
+               return -EINVAL;
+       }
+
+       /* fall back to VBT panel type? */
+       if (ret == 0x0) {
+               DRM_DEBUG_KMS("No panel type in OpRegion\n");
+               return -ENODEV;
+       }
+
+       return ret - 1;
+}
index 6694e92..bcc3b6a 100644 (file)
@@ -247,7 +247,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
 
        ret = intel_ring_begin(req, 4);
        if (ret) {
-               i915_gem_request_cancel(req);
+               i915_add_request_no_flush(req);
                return ret;
        }
 
@@ -290,7 +290,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
 
        ret = intel_ring_begin(req, 2);
        if (ret) {
-               i915_gem_request_cancel(req);
+               i915_add_request_no_flush(req);
                return ret;
        }
 
@@ -356,7 +356,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
 
        ret = intel_ring_begin(req, 6);
        if (ret) {
-               i915_gem_request_cancel(req);
+               i915_add_request_no_flush(req);
                return ret;
        }
 
@@ -431,7 +431,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
 
                ret = intel_ring_begin(req, 2);
                if (ret) {
-                       i915_gem_request_cancel(req);
+                       i915_add_request_no_flush(req);
                        return ret;
                }
 
index 8c8996f..a078876 100644 (file)
@@ -504,7 +504,7 @@ static u32 i9xx_get_backlight(struct intel_connector *connector)
        if (panel->backlight.combination_mode) {
                u8 lbpc;
 
-               pci_read_config_byte(dev_priv->dev->pdev, PCI_LBPC, &lbpc);
+               pci_read_config_byte(dev_priv->dev->pdev, LBPC, &lbpc);
                val *= lbpc;
        }
 
@@ -592,7 +592,7 @@ static void i9xx_set_backlight(struct intel_connector *connector, u32 level)
 
                lbpc = level * 0xfe / panel->backlight.max + 1;
                level /= lbpc;
-               pci_write_config_byte(dev_priv->dev->pdev, PCI_LBPC, lbpc);
+               pci_write_config_byte(dev_priv->dev->pdev, LBPC, lbpc);
        }
 
        if (IS_GEN4(dev_priv)) {
index 43b24a1..695a464 100644 (file)
@@ -2483,7 +2483,7 @@ static void ilk_wm_merge(struct drm_device *dev,
        /* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
        if ((INTEL_INFO(dev)->gen <= 6 || IS_IVYBRIDGE(dev)) &&
            config->num_pipes_active > 1)
-               return;
+               last_enabled_level = 0;
 
        /* ILK: FBC WM must be disabled always */
        merged->fbc_wm_enabled = INTEL_INFO(dev)->gen >= 6;
@@ -4587,7 +4587,7 @@ void intel_set_rps(struct drm_device *dev, u8 val)
                gen6_set_rps(dev, val);
 }
 
-static void gen9_disable_rps(struct drm_device *dev)
+static void gen9_disable_rc6(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
 
@@ -4595,12 +4595,20 @@ static void gen9_disable_rps(struct drm_device *dev)
        I915_WRITE(GEN9_PG_ENABLE, 0);
 }
 
+static void gen9_disable_rps(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       I915_WRITE(GEN6_RP_CONTROL, 0);
+}
+
 static void gen6_disable_rps(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
 
        I915_WRITE(GEN6_RC_CONTROL, 0);
        I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
+       I915_WRITE(GEN6_RP_CONTROL, 0);
 }
 
 static void cherryview_disable_rps(struct drm_device *dev)
@@ -4804,6 +4812,16 @@ static void gen9_enable_rps(struct drm_device *dev)
 
        /* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */
        if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
+               /*
+                * BIOS could leave the Hw Turbo enabled, so need to explicitly
+                * clear out the Control register just to avoid inconsitency
+                * with debugfs interface, which will show  Turbo as enabled
+                * only and that is not expected by the User after adding the
+                * WaGsvDisableTurbo. Apart from this there is no problem even
+                * if the Turbo is left enabled in the Control register, as the
+                * Up/Down interrupts would remain masked.
+                */
+               gen9_disable_rps(dev);
                intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
                return;
        }
@@ -4997,7 +5015,8 @@ static void gen6_enable_rps(struct drm_device *dev)
        I915_WRITE(GEN6_RC_STATE, 0);
 
        /* Clear the DBG now so we don't confuse earlier errors */
-       if ((gtfifodbg = I915_READ(GTFIFODBG))) {
+       gtfifodbg = I915_READ(GTFIFODBG);
+       if (gtfifodbg) {
                DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
                I915_WRITE(GTFIFODBG, gtfifodbg);
        }
@@ -5528,7 +5547,8 @@ static void cherryview_enable_rps(struct drm_device *dev)
 
        WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
 
-       gtfifodbg = I915_READ(GTFIFODBG);
+       gtfifodbg = I915_READ(GTFIFODBG) & ~(GT_FIFO_SBDEDICATE_FREE_ENTRY_CHV |
+                                            GT_FIFO_FREE_ENTRIES_CHV);
        if (gtfifodbg) {
                DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
                                 gtfifodbg);
@@ -5627,7 +5647,8 @@ static void valleyview_enable_rps(struct drm_device *dev)
 
        valleyview_check_pctx(dev_priv);
 
-       if ((gtfifodbg = I915_READ(GTFIFODBG))) {
+       gtfifodbg = I915_READ(GTFIFODBG);
+       if (gtfifodbg) {
                DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
                                 gtfifodbg);
                I915_WRITE(GTFIFODBG, gtfifodbg);
@@ -6265,9 +6286,10 @@ void intel_disable_gt_powersave(struct drm_device *dev)
                intel_suspend_gt_powersave(dev);
 
                mutex_lock(&dev_priv->rps.hw_lock);
-               if (INTEL_INFO(dev)->gen >= 9)
+               if (INTEL_INFO(dev)->gen >= 9) {
+                       gen9_disable_rc6(dev);
                        gen9_disable_rps(dev);
-               else if (IS_CHERRYVIEW(dev))
+               else if (IS_CHERRYVIEW(dev))
                        cherryview_disable_rps(dev);
                else if (IS_VALLEYVIEW(dev))
                        valleyview_disable_rps(dev);
@@ -6882,23 +6904,10 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
        gen6_check_mch_setup(dev);
 }
 
-static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
-{
-       I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
-
-       /*
-        * Disable trickle feed and enable pnd deadline calculation
-        */
-       I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
-       I915_WRITE(CBR1_VLV, 0);
-}
-
 static void valleyview_init_clock_gating(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
 
-       vlv_init_display_clock_gating(dev_priv);
-
        /* WaDisableEarlyCull:vlv */
        I915_WRITE(_3D_CHICKEN3,
                   _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
@@ -6981,8 +6990,6 @@ static void cherryview_init_clock_gating(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
 
-       vlv_init_display_clock_gating(dev_priv);
-
        /* WaVSRefCountFullforceMissDisable:chv */
        /* WaDSRefCountFullforceMissDisable:chv */
        I915_WRITE(GEN7_FF_THREAD_MODE,
index 41b604e..245386e 100644 (file)
@@ -959,9 +959,10 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
        }
 
        /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt */
-       if (IS_SKL_REVID(dev, SKL_REVID_C0, REVID_FOREVER) || IS_BROXTON(dev))
-               WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
-                                 GEN9_ENABLE_YV12_BUGFIX);
+       /* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt */
+       WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
+                         GEN9_ENABLE_YV12_BUGFIX |
+                         GEN9_ENABLE_GPGPU_PREEMPTION);
 
        /* Wa4x4STCOptimizationDisable:skl,bxt */
        /* WaDisablePartialResolveInVc:skl,bxt */
@@ -980,7 +981,7 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
 
        /* WaForceContextSaveRestoreNonCoherent:skl,bxt */
        tmp = HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT;
-       if (IS_SKL_REVID(dev, SKL_REVID_F0, SKL_REVID_F0) ||
+       if (IS_SKL_REVID(dev, SKL_REVID_F0, REVID_FOREVER) ||
            IS_BXT_REVID(dev, BXT_REVID_B0, REVID_FOREVER))
                tmp |= HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE;
        WA_SET_BIT_MASKED(HDC_CHICKEN0, tmp);
@@ -1097,7 +1098,8 @@ static int skl_init_workarounds(struct intel_engine_cs *engine)
                WA_SET_BIT_MASKED(HIZ_CHICKEN,
                                  BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE);
 
-       if (IS_SKL_REVID(dev, 0, SKL_REVID_F0)) {
+       /* This is tied to WaForceContextSaveRestoreNonCoherent */
+       if (IS_SKL_REVID(dev, 0, REVID_FOREVER)) {
                /*
                 *Use Force Non-Coherent whenever executing a 3D context. This
                 * is a workaround for a possible hang in the unlikely event
@@ -2086,6 +2088,7 @@ void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
                i915_gem_object_unpin_map(ringbuf->obj);
        else
                iounmap(ringbuf->virtual_start);
+       ringbuf->virtual_start = NULL;
        ringbuf->vma = NULL;
        i915_gem_object_ggtt_unpin(ringbuf->obj);
 }
@@ -2096,10 +2099,13 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct i915_ggtt *ggtt = &dev_priv->ggtt;
        struct drm_i915_gem_object *obj = ringbuf->obj;
+       /* Ring wraparound at offset 0 sometimes hangs. No idea why. */
+       unsigned flags = PIN_OFFSET_BIAS | 4096;
+       void *addr;
        int ret;
 
        if (HAS_LLC(dev_priv) && !obj->stolen) {
-               ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, 0);
+               ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, flags);
                if (ret)
                        return ret;
 
@@ -2107,13 +2113,14 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
                if (ret)
                        goto err_unpin;
 
-               ringbuf->virtual_start = i915_gem_object_pin_map(obj);
-               if (ringbuf->virtual_start == NULL) {
-                       ret = -ENOMEM;
+               addr = i915_gem_object_pin_map(obj);
+               if (IS_ERR(addr)) {
+                       ret = PTR_ERR(addr);
                        goto err_unpin;
                }
        } else {
-               ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE);
+               ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE,
+                                           flags | PIN_MAPPABLE);
                if (ret)
                        return ret;
 
@@ -2124,14 +2131,15 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
                /* Access through the GTT requires the device to be awake. */
                assert_rpm_wakelock_held(dev_priv);
 
-               ringbuf->virtual_start = ioremap_wc(ggtt->mappable_base +
-                                                   i915_gem_obj_ggtt_offset(obj), ringbuf->size);
-               if (ringbuf->virtual_start == NULL) {
+               addr = ioremap_wc(ggtt->mappable_base +
+                                 i915_gem_obj_ggtt_offset(obj), ringbuf->size);
+               if (addr == NULL) {
                        ret = -ENOMEM;
                        goto err_unpin;
                }
        }
 
+       ringbuf->virtual_start = addr;
        ringbuf->vma = i915_gem_obj_to_ggtt(obj);
        return 0;
 
@@ -2363,8 +2371,7 @@ int intel_engine_idle(struct intel_engine_cs *engine)
 
        /* Make sure we do not trigger any retires */
        return __i915_wait_request(req,
-                                  atomic_read(&to_i915(engine->dev)->gpu_error.reset_counter),
-                                  to_i915(engine->dev)->mm.interruptible,
+                                  req->i915->mm.interruptible,
                                   NULL, NULL);
 }
 
@@ -2486,19 +2493,9 @@ static int __intel_ring_prepare(struct intel_engine_cs *engine, int bytes)
 int intel_ring_begin(struct drm_i915_gem_request *req,
                     int num_dwords)
 {
-       struct intel_engine_cs *engine;
-       struct drm_i915_private *dev_priv;
+       struct intel_engine_cs *engine = req->engine;
        int ret;
 
-       WARN_ON(req == NULL);
-       engine = req->engine;
-       dev_priv = req->i915;
-
-       ret = i915_gem_check_wedge(&dev_priv->gpu_error,
-                                  dev_priv->mm.interruptible);
-       if (ret)
-               return ret;
-
        ret = __intel_ring_prepare(engine, num_dwords * sizeof(uint32_t));
        if (ret)
                return ret;
@@ -3189,7 +3186,7 @@ intel_stop_engine(struct intel_engine_cs *engine)
                return;
 
        ret = intel_engine_idle(engine);
-       if (ret && !i915_reset_in_progress(&to_i915(engine->dev)->gpu_error))
+       if (ret)
                DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
                          engine->name, ret);
 
index 78dc468..2ade194 100644 (file)
@@ -270,6 +270,7 @@ struct  intel_engine_cs {
        spinlock_t execlist_lock; /* used inside tasklet, use spin_lock_bh */
        struct list_head execlist_queue;
        struct list_head execlist_retired_req_list;
+       unsigned int fw_domains;
        unsigned int next_context_status_buffer;
        unsigned int idle_lite_restore_wa;
        bool disable_lite_restore_wa;
index 80e8bd4..7fb1da4 100644 (file)
@@ -397,11 +397,6 @@ static void hsw_set_power_well(struct drm_i915_private *dev_priv,
        BIT(POWER_DOMAIN_MODESET) |                     \
        BIT(POWER_DOMAIN_AUX_A) |                       \
        BIT(POWER_DOMAIN_INIT))
-#define SKL_DISPLAY_ALWAYS_ON_POWER_DOMAINS (          \
-       (POWER_DOMAIN_MASK & ~(                         \
-       SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS |         \
-       SKL_DISPLAY_DC_OFF_POWER_DOMAINS)) |            \
-       BIT(POWER_DOMAIN_INIT))
 
 #define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS (                \
        BIT(POWER_DOMAIN_TRANSCODER_A) |                \
@@ -419,39 +414,21 @@ static void hsw_set_power_well(struct drm_i915_private *dev_priv,
        BIT(POWER_DOMAIN_VGA) |                         \
        BIT(POWER_DOMAIN_GMBUS) |                       \
        BIT(POWER_DOMAIN_INIT))
-#define BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS (                \
-       BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS |         \
-       BIT(POWER_DOMAIN_PIPE_A) |                      \
-       BIT(POWER_DOMAIN_TRANSCODER_EDP) |              \
-       BIT(POWER_DOMAIN_TRANSCODER_DSI_A) |            \
-       BIT(POWER_DOMAIN_TRANSCODER_DSI_C) |            \
-       BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |         \
-       BIT(POWER_DOMAIN_PORT_DDI_A_LANES) |            \
-       BIT(POWER_DOMAIN_PORT_DSI) |                    \
-       BIT(POWER_DOMAIN_AUX_A) |                       \
-       BIT(POWER_DOMAIN_PLLS) |                        \
-       BIT(POWER_DOMAIN_INIT))
 #define BXT_DISPLAY_DC_OFF_POWER_DOMAINS (             \
        BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS |         \
        BIT(POWER_DOMAIN_MODESET) |                     \
        BIT(POWER_DOMAIN_AUX_A) |                       \
        BIT(POWER_DOMAIN_INIT))
-#define BXT_DISPLAY_ALWAYS_ON_POWER_DOMAINS (          \
-       (POWER_DOMAIN_MASK & ~(BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS |  \
-       BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS)) |       \
-       BIT(POWER_DOMAIN_INIT))
 
 static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
 {
-       struct drm_device *dev = dev_priv->dev;
-
-       WARN(!IS_BROXTON(dev), "Platform doesn't support DC9.\n");
-       WARN((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
-               "DC9 already programmed to be enabled.\n");
-       WARN(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
-               "DC5 still not disabled to enable DC9.\n");
-       WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on.\n");
-       WARN(intel_irqs_enabled(dev_priv), "Interrupts not disabled yet.\n");
+       WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
+                 "DC9 already programmed to be enabled.\n");
+       WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
+                 "DC5 still not disabled to enable DC9.\n");
+       WARN_ONCE(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on.\n");
+       WARN_ONCE(intel_irqs_enabled(dev_priv),
+                 "Interrupts not disabled yet.\n");
 
         /*
          * TODO: check for the following to verify the conditions to enter DC9
@@ -464,9 +441,10 @@ static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
 
 static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
 {
-       WARN(intel_irqs_enabled(dev_priv), "Interrupts not disabled yet.\n");
-       WARN(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
-               "DC5 still not disabled.\n");
+       WARN_ONCE(intel_irqs_enabled(dev_priv),
+                 "Interrupts not disabled yet.\n");
+       WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
+                 "DC5 still not disabled.\n");
 
         /*
          * TODO: check for the following to verify DC9 state was indeed
@@ -514,10 +492,9 @@ static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
                              state, rewrites);
 }
 
-static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state)
+static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
 {
-       uint32_t val;
-       uint32_t mask;
+       u32 mask;
 
        mask = DC_STATE_EN_UPTO_DC5;
        if (IS_BROXTON(dev_priv))
@@ -525,10 +502,30 @@ static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state)
        else
                mask |= DC_STATE_EN_UPTO_DC6;
 
+       return mask;
+}
+
+void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
+{
+       u32 val;
+
+       val = I915_READ(DC_STATE_EN) & gen9_dc_mask(dev_priv);
+
+       DRM_DEBUG_KMS("Resetting DC state tracking from %02x to %02x\n",
+                     dev_priv->csr.dc_state, val);
+       dev_priv->csr.dc_state = val;
+}
+
+static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state)
+{
+       uint32_t val;
+       uint32_t mask;
+
        if (WARN_ON_ONCE(state & ~dev_priv->csr.allowed_dc_mask))
                state &= dev_priv->csr.allowed_dc_mask;
 
        val = I915_READ(DC_STATE_EN);
+       mask = gen9_dc_mask(dev_priv);
        DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n",
                      val & mask, state);
 
@@ -573,13 +570,9 @@ static void assert_csr_loaded(struct drm_i915_private *dev_priv)
 
 static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
 {
-       struct drm_device *dev = dev_priv->dev;
        bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
                                        SKL_DISP_PW_2);
 
-       WARN_ONCE(!IS_SKYLAKE(dev) && !IS_KABYLAKE(dev),
-                 "Platform doesn't support DC5.\n");
-       WARN_ONCE(!HAS_RUNTIME_PM(dev), "Runtime PM not enabled.\n");
        WARN_ONCE(pg2_enabled, "PG2 not disabled to enable DC5.\n");
 
        WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5),
@@ -589,7 +582,7 @@ static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
        assert_csr_loaded(dev_priv);
 }
 
-static void gen9_enable_dc5(struct drm_i915_private *dev_priv)
+void gen9_enable_dc5(struct drm_i915_private *dev_priv)
 {
        assert_can_enable_dc5(dev_priv);
 
@@ -600,11 +593,6 @@ static void gen9_enable_dc5(struct drm_i915_private *dev_priv)
 
 static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
 {
-       struct drm_device *dev = dev_priv->dev;
-
-       WARN_ONCE(!IS_SKYLAKE(dev) && !IS_KABYLAKE(dev),
-                 "Platform doesn't support DC6.\n");
-       WARN_ONCE(!HAS_RUNTIME_PM(dev), "Runtime PM not enabled.\n");
        WARN_ONCE(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
                  "Backlight is not disabled.\n");
        WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
@@ -630,6 +618,45 @@ void skl_disable_dc6(struct drm_i915_private *dev_priv)
        gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
 }
 
+static void
+gen9_sanitize_power_well_requests(struct drm_i915_private *dev_priv,
+                                 struct i915_power_well *power_well)
+{
+       enum skl_disp_power_wells power_well_id = power_well->data;
+       u32 val;
+       u32 mask;
+
+       mask = SKL_POWER_WELL_REQ(power_well_id);
+
+       val = I915_READ(HSW_PWR_WELL_KVMR);
+       if (WARN_ONCE(val & mask, "Clearing unexpected KVMR request for %s\n",
+                     power_well->name))
+               I915_WRITE(HSW_PWR_WELL_KVMR, val & ~mask);
+
+       val = I915_READ(HSW_PWR_WELL_BIOS);
+       val |= I915_READ(HSW_PWR_WELL_DEBUG);
+
+       if (!(val & mask))
+               return;
+
+       /*
+        * DMC is known to force on the request bits for power well 1 on SKL
+        * and BXT and the misc IO power well on SKL but we don't expect any
+        * other request bits to be set, so WARN for those.
+        */
+       if (power_well_id == SKL_DISP_PW_1 ||
+           ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
+            power_well_id == SKL_DISP_PW_MISC_IO))
+               DRM_DEBUG_DRIVER("Clearing auxiliary requests for %s forced on "
+                                "by DMC\n", power_well->name);
+       else
+               WARN_ONCE(1, "Clearing unexpected auxiliary requests for %s\n",
+                         power_well->name);
+
+       I915_WRITE(HSW_PWR_WELL_BIOS, val & ~mask);
+       I915_WRITE(HSW_PWR_WELL_DEBUG, val & ~mask);
+}
+
 static void skl_set_power_well(struct drm_i915_private *dev_priv,
                        struct i915_power_well *power_well, bool enable)
 {
@@ -684,10 +711,6 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv,
 
                if (!is_enabled) {
                        DRM_DEBUG_KMS("Enabling %s\n", power_well->name);
-                       if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
-                               state_mask), 1))
-                               DRM_ERROR("%s enable timeout\n",
-                                       power_well->name);
                        check_fuse_status = true;
                }
        } else {
@@ -696,8 +719,16 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv,
                        POSTING_READ(HSW_PWR_WELL_DRIVER);
                        DRM_DEBUG_KMS("Disabling %s\n", power_well->name);
                }
+
+               if (IS_GEN9(dev_priv))
+                       gen9_sanitize_power_well_requests(dev_priv, power_well);
        }
 
+       if (wait_for(!!(I915_READ(HSW_PWR_WELL_DRIVER) & state_mask) == enable,
+                    1))
+               DRM_ERROR("%s %s timeout\n",
+                         power_well->name, enable ? "enable" : "disable");
+
        if (check_fuse_status) {
                if (power_well->data == SKL_DISP_PW_1) {
                        if (wait_for((I915_READ(SKL_FUSE_STATUS) &
@@ -779,11 +810,19 @@ static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
                                          struct i915_power_well *power_well)
 {
        gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+
+       if (IS_BROXTON(dev_priv)) {
+               broxton_cdclk_verify_state(dev_priv);
+               broxton_ddi_phy_verify_state(dev_priv);
+       }
 }
 
 static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
                                           struct i915_power_well *power_well)
 {
+       if (!dev_priv->csr.dmc_payload)
+               return;
+
        if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC6)
                skl_enable_dc6(dev_priv);
        else if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5)
@@ -900,6 +939,17 @@ static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
        return enabled;
 }
 
+static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
+{
+       I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
+
+       /*
+        * Disable trickle feed and enable pnd deadline calculation
+        */
+       I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
+       I915_WRITE(CBR1_VLV, 0);
+}
+
 static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
 {
        enum pipe pipe;
@@ -922,6 +972,8 @@ static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
                I915_WRITE(DPLL(pipe), val);
        }
 
+       vlv_init_display_clock_gating(dev_priv);
+
        spin_lock_irq(&dev_priv->irq_lock);
        valleyview_enable_display_irqs(dev_priv);
        spin_unlock_irq(&dev_priv->irq_lock);
@@ -1560,34 +1612,56 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
        intel_runtime_pm_put(dev_priv);
 }
 
-#define HSW_ALWAYS_ON_POWER_DOMAINS (                  \
-       BIT(POWER_DOMAIN_PIPE_A) |                      \
-       BIT(POWER_DOMAIN_TRANSCODER_EDP) |              \
-       BIT(POWER_DOMAIN_PORT_DDI_A_LANES) |            \
+#define HSW_DISPLAY_POWER_DOMAINS (                    \
+       BIT(POWER_DOMAIN_PIPE_B) |                      \
+       BIT(POWER_DOMAIN_PIPE_C) |                      \
+       BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |         \
+       BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |         \
+       BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |         \
+       BIT(POWER_DOMAIN_TRANSCODER_A) |                \
+       BIT(POWER_DOMAIN_TRANSCODER_B) |                \
+       BIT(POWER_DOMAIN_TRANSCODER_C) |                \
        BIT(POWER_DOMAIN_PORT_DDI_B_LANES) |            \
        BIT(POWER_DOMAIN_PORT_DDI_C_LANES) |            \
        BIT(POWER_DOMAIN_PORT_DDI_D_LANES) |            \
-       BIT(POWER_DOMAIN_PORT_CRT) |                    \
-       BIT(POWER_DOMAIN_PLLS) |                        \
-       BIT(POWER_DOMAIN_AUX_A) |                       \
-       BIT(POWER_DOMAIN_AUX_B) |                       \
-       BIT(POWER_DOMAIN_AUX_C) |                       \
-       BIT(POWER_DOMAIN_AUX_D) |                       \
-       BIT(POWER_DOMAIN_GMBUS) |                       \
-       BIT(POWER_DOMAIN_INIT))
-#define HSW_DISPLAY_POWER_DOMAINS (                            \
-       (POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) |    \
+       BIT(POWER_DOMAIN_PORT_CRT) | /* DDI E */        \
+       BIT(POWER_DOMAIN_VGA) |                         \
+       BIT(POWER_DOMAIN_AUDIO) |                       \
        BIT(POWER_DOMAIN_INIT))
 
-#define BDW_ALWAYS_ON_POWER_DOMAINS (                  \
-       HSW_ALWAYS_ON_POWER_DOMAINS |                   \
-       BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER))
-#define BDW_DISPLAY_POWER_DOMAINS (                            \
-       (POWER_DOMAIN_MASK & ~BDW_ALWAYS_ON_POWER_DOMAINS) |    \
+#define BDW_DISPLAY_POWER_DOMAINS (                    \
+       BIT(POWER_DOMAIN_PIPE_B) |                      \
+       BIT(POWER_DOMAIN_PIPE_C) |                      \
+       BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |         \
+       BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |         \
+       BIT(POWER_DOMAIN_TRANSCODER_A) |                \
+       BIT(POWER_DOMAIN_TRANSCODER_B) |                \
+       BIT(POWER_DOMAIN_TRANSCODER_C) |                \
+       BIT(POWER_DOMAIN_PORT_DDI_B_LANES) |            \
+       BIT(POWER_DOMAIN_PORT_DDI_C_LANES) |            \
+       BIT(POWER_DOMAIN_PORT_DDI_D_LANES) |            \
+       BIT(POWER_DOMAIN_PORT_CRT) | /* DDI E */        \
+       BIT(POWER_DOMAIN_VGA) |                         \
+       BIT(POWER_DOMAIN_AUDIO) |                       \
        BIT(POWER_DOMAIN_INIT))
 
-#define VLV_ALWAYS_ON_POWER_DOMAINS    BIT(POWER_DOMAIN_INIT)
-#define VLV_DISPLAY_POWER_DOMAINS      POWER_DOMAIN_MASK
+#define VLV_DISPLAY_POWER_DOMAINS (            \
+       BIT(POWER_DOMAIN_PIPE_A) |              \
+       BIT(POWER_DOMAIN_PIPE_B) |              \
+       BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
+       BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
+       BIT(POWER_DOMAIN_TRANSCODER_A) |        \
+       BIT(POWER_DOMAIN_TRANSCODER_B) |        \
+       BIT(POWER_DOMAIN_PORT_DDI_B_LANES) |    \
+       BIT(POWER_DOMAIN_PORT_DDI_C_LANES) |    \
+       BIT(POWER_DOMAIN_PORT_DSI) |            \
+       BIT(POWER_DOMAIN_PORT_CRT) |            \
+       BIT(POWER_DOMAIN_VGA) |                 \
+       BIT(POWER_DOMAIN_AUDIO) |               \
+       BIT(POWER_DOMAIN_AUX_B) |               \
+       BIT(POWER_DOMAIN_AUX_C) |               \
+       BIT(POWER_DOMAIN_GMBUS) |               \
+       BIT(POWER_DOMAIN_INIT))
 
 #define VLV_DPIO_CMN_BC_POWER_DOMAINS (                \
        BIT(POWER_DOMAIN_PORT_DDI_B_LANES) |    \
@@ -1617,6 +1691,28 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
        BIT(POWER_DOMAIN_AUX_C) |               \
        BIT(POWER_DOMAIN_INIT))
 
+#define CHV_DISPLAY_POWER_DOMAINS (            \
+       BIT(POWER_DOMAIN_PIPE_A) |              \
+       BIT(POWER_DOMAIN_PIPE_B) |              \
+       BIT(POWER_DOMAIN_PIPE_C) |              \
+       BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
+       BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
+       BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
+       BIT(POWER_DOMAIN_TRANSCODER_A) |        \
+       BIT(POWER_DOMAIN_TRANSCODER_B) |        \
+       BIT(POWER_DOMAIN_TRANSCODER_C) |        \
+       BIT(POWER_DOMAIN_PORT_DDI_B_LANES) |    \
+       BIT(POWER_DOMAIN_PORT_DDI_C_LANES) |    \
+       BIT(POWER_DOMAIN_PORT_DDI_D_LANES) |    \
+       BIT(POWER_DOMAIN_PORT_DSI) |            \
+       BIT(POWER_DOMAIN_VGA) |                 \
+       BIT(POWER_DOMAIN_AUDIO) |               \
+       BIT(POWER_DOMAIN_AUX_B) |               \
+       BIT(POWER_DOMAIN_AUX_C) |               \
+       BIT(POWER_DOMAIN_AUX_D) |               \
+       BIT(POWER_DOMAIN_GMBUS) |               \
+       BIT(POWER_DOMAIN_INIT))
+
 #define CHV_DPIO_CMN_BC_POWER_DOMAINS (                \
        BIT(POWER_DOMAIN_PORT_DDI_B_LANES) |    \
        BIT(POWER_DOMAIN_PORT_DDI_C_LANES) |    \
@@ -1684,7 +1780,7 @@ static struct i915_power_well hsw_power_wells[] = {
        {
                .name = "always-on",
                .always_on = 1,
-               .domains = HSW_ALWAYS_ON_POWER_DOMAINS,
+               .domains = POWER_DOMAIN_MASK,
                .ops = &i9xx_always_on_power_well_ops,
        },
        {
@@ -1698,7 +1794,7 @@ static struct i915_power_well bdw_power_wells[] = {
        {
                .name = "always-on",
                .always_on = 1,
-               .domains = BDW_ALWAYS_ON_POWER_DOMAINS,
+               .domains = POWER_DOMAIN_MASK,
                .ops = &i9xx_always_on_power_well_ops,
        },
        {
@@ -1733,7 +1829,7 @@ static struct i915_power_well vlv_power_wells[] = {
        {
                .name = "always-on",
                .always_on = 1,
-               .domains = VLV_ALWAYS_ON_POWER_DOMAINS,
+               .domains = POWER_DOMAIN_MASK,
                .ops = &i9xx_always_on_power_well_ops,
                .data = PUNIT_POWER_WELL_ALWAYS_ON,
        },
@@ -1791,7 +1887,7 @@ static struct i915_power_well chv_power_wells[] = {
        {
                .name = "always-on",
                .always_on = 1,
-               .domains = VLV_ALWAYS_ON_POWER_DOMAINS,
+               .domains = POWER_DOMAIN_MASK,
                .ops = &i9xx_always_on_power_well_ops,
        },
        {
@@ -1801,7 +1897,7 @@ static struct i915_power_well chv_power_wells[] = {
                 * power wells don't actually exist. Pipe A power well is
                 * required for any pipe to work.
                 */
-               .domains = VLV_DISPLAY_POWER_DOMAINS,
+               .domains = CHV_DISPLAY_POWER_DOMAINS,
                .data = PIPE_A,
                .ops = &chv_pipe_power_well_ops,
        },
@@ -1835,7 +1931,7 @@ static struct i915_power_well skl_power_wells[] = {
        {
                .name = "always-on",
                .always_on = 1,
-               .domains = SKL_DISPLAY_ALWAYS_ON_POWER_DOMAINS,
+               .domains = POWER_DOMAIN_MASK,
                .ops = &i9xx_always_on_power_well_ops,
                .data = SKL_DISP_PW_ALWAYS_ON,
        },
@@ -1891,44 +1987,16 @@ static struct i915_power_well skl_power_wells[] = {
        },
 };
 
-void skl_pw1_misc_io_init(struct drm_i915_private *dev_priv)
-{
-       struct i915_power_well *well;
-
-       if (!(IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)))
-               return;
-
-       well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
-       intel_power_well_enable(dev_priv, well);
-
-       well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
-       intel_power_well_enable(dev_priv, well);
-}
-
-void skl_pw1_misc_io_fini(struct drm_i915_private *dev_priv)
-{
-       struct i915_power_well *well;
-
-       if (!(IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)))
-               return;
-
-       well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
-       intel_power_well_disable(dev_priv, well);
-
-       well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
-       intel_power_well_disable(dev_priv, well);
-}
-
 static struct i915_power_well bxt_power_wells[] = {
        {
                .name = "always-on",
                .always_on = 1,
-               .domains = BXT_DISPLAY_ALWAYS_ON_POWER_DOMAINS,
+               .domains = POWER_DOMAIN_MASK,
                .ops = &i9xx_always_on_power_well_ops,
        },
        {
                .name = "power well 1",
-               .domains = BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS,
+               .domains = 0,
                .ops = &skl_power_well_ops,
                .data = SKL_DISP_PW_1,
        },
@@ -1953,11 +2021,6 @@ sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
        if (disable_power_well >= 0)
                return !!disable_power_well;
 
-       if (IS_BROXTON(dev_priv)) {
-               DRM_DEBUG_KMS("Disabling display power well support\n");
-               return 0;
-       }
-
        return 1;
 }
 
@@ -2109,9 +2172,10 @@ static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
 }
 
 static void skl_display_core_init(struct drm_i915_private *dev_priv,
-                                 bool resume)
+                                  bool resume)
 {
        struct i915_power_domains *power_domains = &dev_priv->power_domains;
+       struct i915_power_well *well;
        uint32_t val;
 
        gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
@@ -2122,7 +2186,13 @@ static void skl_display_core_init(struct drm_i915_private *dev_priv,
 
        /* enable PG1 and Misc I/O */
        mutex_lock(&power_domains->lock);
-       skl_pw1_misc_io_init(dev_priv);
+
+       well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
+       intel_power_well_enable(dev_priv, well);
+
+       well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
+       intel_power_well_enable(dev_priv, well);
+
        mutex_unlock(&power_domains->lock);
 
        if (!resume)
@@ -2137,6 +2207,7 @@ static void skl_display_core_init(struct drm_i915_private *dev_priv,
 static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
 {
        struct i915_power_domains *power_domains = &dev_priv->power_domains;
+       struct i915_power_well *well;
 
        gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
 
@@ -2144,8 +2215,73 @@ static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
 
        /* The spec doesn't call for removing the reset handshake flag */
        /* disable PG1 and Misc I/O */
+
        mutex_lock(&power_domains->lock);
-       skl_pw1_misc_io_fini(dev_priv);
+
+       well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
+       intel_power_well_disable(dev_priv, well);
+
+       well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
+       intel_power_well_disable(dev_priv, well);
+
+       mutex_unlock(&power_domains->lock);
+}
+
+void bxt_display_core_init(struct drm_i915_private *dev_priv,
+                          bool resume)
+{
+       struct i915_power_domains *power_domains = &dev_priv->power_domains;
+       struct i915_power_well *well;
+       uint32_t val;
+
+       gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+
+       /*
+        * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
+        * or else the reset will hang because there is no PCH to respond.
+        * Move the handshake programming to initialization sequence.
+        * Previously was left up to BIOS.
+        */
+       val = I915_READ(HSW_NDE_RSTWRN_OPT);
+       val &= ~RESET_PCH_HANDSHAKE_ENABLE;
+       I915_WRITE(HSW_NDE_RSTWRN_OPT, val);
+
+       /* Enable PG1 */
+       mutex_lock(&power_domains->lock);
+
+       well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
+       intel_power_well_enable(dev_priv, well);
+
+       mutex_unlock(&power_domains->lock);
+
+       broxton_init_cdclk(dev_priv);
+       broxton_ddi_phy_init(dev_priv);
+
+       broxton_cdclk_verify_state(dev_priv);
+       broxton_ddi_phy_verify_state(dev_priv);
+
+       if (resume && dev_priv->csr.dmc_payload)
+               intel_csr_load_program(dev_priv);
+}
+
+void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
+{
+       struct i915_power_domains *power_domains = &dev_priv->power_domains;
+       struct i915_power_well *well;
+
+       gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+
+       broxton_ddi_phy_uninit(dev_priv);
+       broxton_uninit_cdclk(dev_priv);
+
+       /* The spec doesn't call for removing the reset handshake flag */
+
+       /* Disable PG1 */
+       mutex_lock(&power_domains->lock);
+
+       well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
+       intel_power_well_disable(dev_priv, well);
+
        mutex_unlock(&power_domains->lock);
 }
 
@@ -2280,6 +2416,8 @@ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
 
        if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
                skl_display_core_init(dev_priv, resume);
+       } else if (IS_BROXTON(dev)) {
+               bxt_display_core_init(dev_priv, resume);
        } else if (IS_CHERRYVIEW(dev)) {
                mutex_lock(&power_domains->lock);
                chv_phy_control_init(dev_priv);
@@ -2317,6 +2455,8 @@ void intel_power_domains_suspend(struct drm_i915_private *dev_priv)
 
        if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
                skl_display_core_uninit(dev_priv);
+       else if (IS_BROXTON(dev_priv))
+               bxt_display_core_uninit(dev_priv);
 }
 
 /**
index ac2ac07..4f1dfe6 100644 (file)
@@ -60,7 +60,11 @@ fw_domain_reset(const struct intel_uncore_forcewake_domain *d)
 static inline void
 fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
 {
-       mod_timer_pinned(&d->timer, jiffies + 1);
+       d->wake_count++;
+       hrtimer_start_range_ns(&d->timer,
+                              ktime_set(0, NSEC_PER_MSEC),
+                              NSEC_PER_MSEC,
+                              HRTIMER_MODE_REL);
 }
 
 static inline void
@@ -107,22 +111,22 @@ static void
 fw_domains_get(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
 {
        struct intel_uncore_forcewake_domain *d;
-       enum forcewake_domain_id id;
 
-       for_each_fw_domain_mask(d, fw_domains, dev_priv, id) {
+       for_each_fw_domain_masked(d, fw_domains, dev_priv) {
                fw_domain_wait_ack_clear(d);
                fw_domain_get(d);
-               fw_domain_wait_ack(d);
        }
+
+       for_each_fw_domain_masked(d, fw_domains, dev_priv)
+               fw_domain_wait_ack(d);
 }
 
 static void
 fw_domains_put(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
 {
        struct intel_uncore_forcewake_domain *d;
-       enum forcewake_domain_id id;
 
-       for_each_fw_domain_mask(d, fw_domains, dev_priv, id) {
+       for_each_fw_domain_masked(d, fw_domains, dev_priv) {
                fw_domain_put(d);
                fw_domain_posting_read(d);
        }
@@ -132,10 +136,9 @@ static void
 fw_domains_posting_read(struct drm_i915_private *dev_priv)
 {
        struct intel_uncore_forcewake_domain *d;
-       enum forcewake_domain_id id;
 
        /* No need to do for all, just do for first found */
-       for_each_fw_domain(d, dev_priv, id) {
+       for_each_fw_domain(d, dev_priv) {
                fw_domain_posting_read(d);
                break;
        }
@@ -145,12 +148,11 @@ static void
 fw_domains_reset(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
 {
        struct intel_uncore_forcewake_domain *d;
-       enum forcewake_domain_id id;
 
        if (dev_priv->uncore.fw_domains == 0)
                return;
 
-       for_each_fw_domain_mask(d, fw_domains, dev_priv, id)
+       for_each_fw_domain_masked(d, fw_domains, dev_priv)
                fw_domain_reset(d);
 
        fw_domains_posting_read(dev_priv);
@@ -224,9 +226,11 @@ static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
        return ret;
 }
 
-static void intel_uncore_fw_release_timer(unsigned long arg)
+static enum hrtimer_restart
+intel_uncore_fw_release_timer(struct hrtimer *timer)
 {
-       struct intel_uncore_forcewake_domain *domain = (void *)arg;
+       struct intel_uncore_forcewake_domain *domain =
+              container_of(timer, struct intel_uncore_forcewake_domain, timer);
        unsigned long irqflags;
 
        assert_rpm_device_not_suspended(domain->i915);
@@ -240,6 +244,8 @@ static void intel_uncore_fw_release_timer(unsigned long arg)
                                                          1 << domain->id);
 
        spin_unlock_irqrestore(&domain->i915->uncore.lock, irqflags);
+
+       return HRTIMER_NORESTART;
 }
 
 void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
@@ -248,7 +254,6 @@ void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
        unsigned long irqflags;
        struct intel_uncore_forcewake_domain *domain;
        int retry_count = 100;
-       enum forcewake_domain_id id;
        enum forcewake_domains fw = 0, active_domains;
 
        /* Hold uncore.lock across reset to prevent any register access
@@ -258,18 +263,18 @@ void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
        while (1) {
                active_domains = 0;
 
-               for_each_fw_domain(domain, dev_priv, id) {
-                       if (del_timer_sync(&domain->timer) == 0)
+               for_each_fw_domain(domain, dev_priv) {
+                       if (hrtimer_cancel(&domain->timer) == 0)
                                continue;
 
-                       intel_uncore_fw_release_timer((unsigned long)domain);
+                       intel_uncore_fw_release_timer(&domain->timer);
                }
 
                spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
 
-               for_each_fw_domain(domain, dev_priv, id) {
-                       if (timer_pending(&domain->timer))
-                               active_domains |= (1 << id);
+               for_each_fw_domain(domain, dev_priv) {
+                       if (hrtimer_active(&domain->timer))
+                               active_domains |= domain->mask;
                }
 
                if (active_domains == 0)
@@ -286,9 +291,9 @@ void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
 
        WARN_ON(active_domains);
 
-       for_each_fw_domain(domain, dev_priv, id)
+       for_each_fw_domain(domain, dev_priv)
                if (domain->wake_count)
-                       fw |= 1 << id;
+                       fw |= domain->mask;
 
        if (fw)
                dev_priv->uncore.funcs.force_wake_put(dev_priv, fw);
@@ -310,21 +315,49 @@ void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
        spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 }
 
-static void intel_uncore_ellc_detect(struct drm_device *dev)
+static u64 gen9_edram_size(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       const unsigned int ways[8] = { 4, 8, 12, 16, 16, 16, 16, 16 };
+       const unsigned int sets[4] = { 1, 1, 2, 2 };
+       const u32 cap = dev_priv->edram_cap;
+
+       return EDRAM_NUM_BANKS(cap) *
+               ways[EDRAM_WAYS_IDX(cap)] *
+               sets[EDRAM_SETS_IDX(cap)] *
+               1024 * 1024;
+}
+
+u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv)
+{
+       if (!HAS_EDRAM(dev_priv))
+               return 0;
+
+       /* The needed capability bits for size calculation
+        * are not there with pre gen9 so return 128MB always.
+        */
+       if (INTEL_GEN(dev_priv) < 9)
+               return 128 * 1024 * 1024;
+
+       return gen9_edram_size(dev_priv);
+}
+
+static void intel_uncore_edram_detect(struct drm_i915_private *dev_priv)
+{
+       if (IS_HASWELL(dev_priv) ||
+           IS_BROADWELL(dev_priv) ||
+           INTEL_GEN(dev_priv) >= 9) {
+               dev_priv->edram_cap = __raw_i915_read32(dev_priv,
+                                                       HSW_EDRAM_CAP);
 
-       if ((IS_HASWELL(dev) || IS_BROADWELL(dev) ||
-            INTEL_INFO(dev)->gen >= 9) &&
-           (__raw_i915_read32(dev_priv, HSW_EDRAM_PRESENT) & EDRAM_ENABLED)) {
-               /* The docs do not explain exactly how the calculation can be
-                * made. It is somewhat guessable, but for now, it's always
-                * 128MB.
-                * NB: We can't write IDICR yet because we do not have gt funcs
+               /* NB: We can't write IDICR yet because we do not have gt funcs
                 * set up */
-               dev_priv->ellc_size = 128;
-               DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size);
+       } else {
+               dev_priv->edram_cap = 0;
        }
+
+       if (HAS_EDRAM(dev_priv))
+               DRM_INFO("Found %lluMB of eDRAM\n",
+                        intel_uncore_edram_size(dev_priv) / (1024 * 1024));
 }
 
 static bool
@@ -410,16 +443,15 @@ static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
                                         enum forcewake_domains fw_domains)
 {
        struct intel_uncore_forcewake_domain *domain;
-       enum forcewake_domain_id id;
 
        if (!dev_priv->uncore.funcs.force_wake_get)
                return;
 
        fw_domains &= dev_priv->uncore.fw_domains;
 
-       for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
+       for_each_fw_domain_masked(domain, fw_domains, dev_priv) {
                if (domain->wake_count++)
-                       fw_domains &= ~(1 << id);
+                       fw_domains &= ~domain->mask;
        }
 
        if (fw_domains)
@@ -477,21 +509,19 @@ static void __intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
                                         enum forcewake_domains fw_domains)
 {
        struct intel_uncore_forcewake_domain *domain;
-       enum forcewake_domain_id id;
 
        if (!dev_priv->uncore.funcs.force_wake_put)
                return;
 
        fw_domains &= dev_priv->uncore.fw_domains;
 
-       for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
+       for_each_fw_domain_masked(domain, fw_domains, dev_priv) {
                if (WARN_ON(domain->wake_count == 0))
                        continue;
 
                if (--domain->wake_count)
                        continue;
 
-               domain->wake_count++;
                fw_domain_arm_timer(domain);
        }
 }
@@ -539,18 +569,27 @@ void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
 void assert_forcewakes_inactive(struct drm_i915_private *dev_priv)
 {
        struct intel_uncore_forcewake_domain *domain;
-       enum forcewake_domain_id id;
 
        if (!dev_priv->uncore.funcs.force_wake_get)
                return;
 
-       for_each_fw_domain(domain, dev_priv, id)
+       for_each_fw_domain(domain, dev_priv)
                WARN_ON(domain->wake_count);
 }
 
 /* We give fast paths for the really cool registers */
 #define NEEDS_FORCE_WAKE(reg) ((reg) < 0x40000)
 
+#define __gen6_reg_read_fw_domains(offset) \
+({ \
+       enum forcewake_domains __fwd; \
+       if (NEEDS_FORCE_WAKE(offset)) \
+               __fwd = FORCEWAKE_RENDER; \
+       else \
+               __fwd = 0; \
+       __fwd; \
+})
+
 #define REG_RANGE(reg, start, end) ((reg) >= (start) && (reg) < (end))
 
 #define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \
@@ -564,6 +603,48 @@ void assert_forcewakes_inactive(struct drm_i915_private *dev_priv)
         REG_RANGE((reg), 0x22000, 0x24000) || \
         REG_RANGE((reg), 0x30000, 0x40000))
 
+#define __vlv_reg_read_fw_domains(offset) \
+({ \
+       enum forcewake_domains __fwd = 0; \
+       if (!NEEDS_FORCE_WAKE(offset)) \
+               __fwd = 0; \
+       else if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(offset)) \
+               __fwd = FORCEWAKE_RENDER; \
+       else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(offset)) \
+               __fwd = FORCEWAKE_MEDIA; \
+       __fwd; \
+})
+
+static const i915_reg_t gen8_shadowed_regs[] = {
+       GEN6_RPNSWREQ,
+       GEN6_RC_VIDEO_FREQ,
+       RING_TAIL(RENDER_RING_BASE),
+       RING_TAIL(GEN6_BSD_RING_BASE),
+       RING_TAIL(VEBOX_RING_BASE),
+       RING_TAIL(BLT_RING_BASE),
+       /* TODO: Other registers are not yet used */
+};
+
+static bool is_gen8_shadowed(u32 offset)
+{
+       int i;
+       for (i = 0; i < ARRAY_SIZE(gen8_shadowed_regs); i++)
+               if (offset == gen8_shadowed_regs[i].reg)
+                       return true;
+
+       return false;
+}
+
+#define __gen8_reg_write_fw_domains(offset) \
+({ \
+       enum forcewake_domains __fwd; \
+       if (NEEDS_FORCE_WAKE(offset) && !is_gen8_shadowed(offset)) \
+               __fwd = FORCEWAKE_RENDER; \
+       else \
+               __fwd = 0; \
+       __fwd; \
+})
+
 #define FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg) \
        (REG_RANGE((reg), 0x2000, 0x4000) || \
         REG_RANGE((reg), 0x5200, 0x8000) || \
@@ -586,6 +667,34 @@ void assert_forcewakes_inactive(struct drm_i915_private *dev_priv)
         REG_RANGE((reg), 0x9000, 0xB000) || \
         REG_RANGE((reg), 0xF000, 0x10000))
 
+#define __chv_reg_read_fw_domains(offset) \
+({ \
+       enum forcewake_domains __fwd = 0; \
+       if (!NEEDS_FORCE_WAKE(offset)) \
+               __fwd = 0; \
+       else if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(offset)) \
+               __fwd = FORCEWAKE_RENDER; \
+       else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(offset)) \
+               __fwd = FORCEWAKE_MEDIA; \
+       else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(offset)) \
+               __fwd = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
+       __fwd; \
+})
+
+#define __chv_reg_write_fw_domains(offset) \
+({ \
+       enum forcewake_domains __fwd = 0; \
+       if (!NEEDS_FORCE_WAKE(offset) || is_gen8_shadowed(offset)) \
+               __fwd = 0; \
+       else if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(offset)) \
+               __fwd = FORCEWAKE_RENDER; \
+       else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(offset)) \
+               __fwd = FORCEWAKE_MEDIA; \
+       else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(offset)) \
+               __fwd = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
+       __fwd; \
+})
+
 #define FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) \
        REG_RANGE((reg), 0xB00,  0x2000)
 
@@ -618,6 +727,61 @@ void assert_forcewakes_inactive(struct drm_i915_private *dev_priv)
         !FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) && \
         !FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg))
 
+#define SKL_NEEDS_FORCE_WAKE(reg) \
+       ((reg) < 0x40000 && !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg))
+
+#define __gen9_reg_read_fw_domains(offset) \
+({ \
+       enum forcewake_domains __fwd; \
+       if (!SKL_NEEDS_FORCE_WAKE(offset)) \
+               __fwd = 0; \
+       else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(offset)) \
+               __fwd = FORCEWAKE_RENDER; \
+       else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(offset)) \
+               __fwd = FORCEWAKE_MEDIA; \
+       else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(offset)) \
+               __fwd = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
+       else \
+               __fwd = FORCEWAKE_BLITTER; \
+       __fwd; \
+})
+
+static const i915_reg_t gen9_shadowed_regs[] = {
+       RING_TAIL(RENDER_RING_BASE),
+       RING_TAIL(GEN6_BSD_RING_BASE),
+       RING_TAIL(VEBOX_RING_BASE),
+       RING_TAIL(BLT_RING_BASE),
+       GEN6_RPNSWREQ,
+       GEN6_RC_VIDEO_FREQ,
+       /* TODO: Other registers are not yet used */
+};
+
+static bool is_gen9_shadowed(u32 offset)
+{
+       int i;
+       for (i = 0; i < ARRAY_SIZE(gen9_shadowed_regs); i++)
+               if (offset == gen9_shadowed_regs[i].reg)
+                       return true;
+
+       return false;
+}
+
+#define __gen9_reg_write_fw_domains(offset) \
+({ \
+       enum forcewake_domains __fwd; \
+       if (!SKL_NEEDS_FORCE_WAKE(offset) || is_gen9_shadowed(offset)) \
+               __fwd = 0; \
+       else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(offset)) \
+               __fwd = FORCEWAKE_RENDER; \
+       else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(offset)) \
+               __fwd = FORCEWAKE_MEDIA; \
+       else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(offset)) \
+               __fwd = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
+       else \
+               __fwd = FORCEWAKE_BLITTER; \
+       __fwd; \
+})
+
 static void
 ilk_dummy_write(struct drm_i915_private *dev_priv)
 {
@@ -633,15 +797,6 @@ __unclaimed_reg_debug(struct drm_i915_private *dev_priv,
                      const bool read,
                      const bool before)
 {
-       /* XXX. We limit the auto arming traces for mmio
-        * debugs on these platforms. There are just too many
-        * revealed by these and CI/Bat suffers from the noise.
-        * Please fix and then re-enable the automatic traces.
-        */
-       if (i915.mmio_debug < 2 &&
-           (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
-               return;
-
        if (WARN(check_for_unclaimed_mmio(dev_priv),
                 "Unclaimed register detected %s %s register 0x%x\n",
                 before ? "before" : "after",
@@ -720,19 +875,17 @@ static inline void __force_wake_auto(struct drm_i915_private *dev_priv,
                                     enum forcewake_domains fw_domains)
 {
        struct intel_uncore_forcewake_domain *domain;
-       enum forcewake_domain_id id;
 
        if (WARN_ON(!fw_domains))
                return;
 
        /* Ideally GCC would be constant-fold and eliminate this loop */
-       for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
+       for_each_fw_domain_masked(domain, fw_domains, dev_priv) {
                if (domain->wake_count) {
-                       fw_domains &= ~(1 << id);
+                       fw_domains &= ~domain->mask;
                        continue;
                }
 
-               domain->wake_count++;
                fw_domain_arm_timer(domain);
        }
 
@@ -743,9 +896,11 @@ static inline void __force_wake_auto(struct drm_i915_private *dev_priv,
 #define __gen6_read(x) \
 static u##x \
 gen6_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
+       enum forcewake_domains fw_engine; \
        GEN6_READ_HEADER(x); \
-       if (NEEDS_FORCE_WAKE(offset)) \
-               __force_wake_auto(dev_priv, FORCEWAKE_RENDER); \
+       fw_engine = __gen6_reg_read_fw_domains(offset); \
+       if (fw_engine) \
+               __force_wake_auto(dev_priv, fw_engine); \
        val = __raw_i915_read##x(dev_priv, reg); \
        GEN6_READ_FOOTER; \
 }
@@ -753,14 +908,9 @@ gen6_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
 #define __vlv_read(x) \
 static u##x \
 vlv_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
-       enum forcewake_domains fw_engine = 0; \
+       enum forcewake_domains fw_engine; \
        GEN6_READ_HEADER(x); \
-       if (!NEEDS_FORCE_WAKE(offset)) \
-               fw_engine = 0; \
-       else if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(offset)) \
-               fw_engine = FORCEWAKE_RENDER; \
-       else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(offset)) \
-               fw_engine = FORCEWAKE_MEDIA; \
+       fw_engine = __vlv_reg_read_fw_domains(offset); \
        if (fw_engine) \
                __force_wake_auto(dev_priv, fw_engine); \
        val = __raw_i915_read##x(dev_priv, reg); \
@@ -770,40 +920,21 @@ vlv_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
 #define __chv_read(x) \
 static u##x \
 chv_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
-       enum forcewake_domains fw_engine = 0; \
+       enum forcewake_domains fw_engine; \
        GEN6_READ_HEADER(x); \
-       if (!NEEDS_FORCE_WAKE(offset)) \
-               fw_engine = 0; \
-       else if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(offset)) \
-               fw_engine = FORCEWAKE_RENDER; \
-       else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(offset)) \
-               fw_engine = FORCEWAKE_MEDIA; \
-       else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(offset)) \
-               fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
+       fw_engine = __chv_reg_read_fw_domains(offset); \
        if (fw_engine) \
                __force_wake_auto(dev_priv, fw_engine); \
        val = __raw_i915_read##x(dev_priv, reg); \
        GEN6_READ_FOOTER; \
 }
 
-#define SKL_NEEDS_FORCE_WAKE(reg) \
-       ((reg) < 0x40000 && !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg))
-
 #define __gen9_read(x) \
 static u##x \
 gen9_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
        enum forcewake_domains fw_engine; \
        GEN6_READ_HEADER(x); \
-       if (!SKL_NEEDS_FORCE_WAKE(offset)) \
-               fw_engine = 0; \
-       else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(offset)) \
-               fw_engine = FORCEWAKE_RENDER; \
-       else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(offset)) \
-               fw_engine = FORCEWAKE_MEDIA; \
-       else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(offset)) \
-               fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
-       else \
-               fw_engine = FORCEWAKE_BLITTER; \
+       fw_engine = __gen9_reg_read_fw_domains(offset); \
        if (fw_engine) \
                __force_wake_auto(dev_priv, fw_engine); \
        val = __raw_i915_read##x(dev_priv, reg); \
@@ -942,34 +1073,14 @@ hsw_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool t
        GEN6_WRITE_FOOTER; \
 }
 
-static const i915_reg_t gen8_shadowed_regs[] = {
-       FORCEWAKE_MT,
-       GEN6_RPNSWREQ,
-       GEN6_RC_VIDEO_FREQ,
-       RING_TAIL(RENDER_RING_BASE),
-       RING_TAIL(GEN6_BSD_RING_BASE),
-       RING_TAIL(VEBOX_RING_BASE),
-       RING_TAIL(BLT_RING_BASE),
-       /* TODO: Other registers are not yet used */
-};
-
-static bool is_gen8_shadowed(struct drm_i915_private *dev_priv,
-                            i915_reg_t reg)
-{
-       int i;
-       for (i = 0; i < ARRAY_SIZE(gen8_shadowed_regs); i++)
-               if (i915_mmio_reg_equal(reg, gen8_shadowed_regs[i]))
-                       return true;
-
-       return false;
-}
-
 #define __gen8_write(x) \
 static void \
 gen8_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
+       enum forcewake_domains fw_engine; \
        GEN6_WRITE_HEADER; \
-       if (NEEDS_FORCE_WAKE(offset) && !is_gen8_shadowed(dev_priv, reg)) \
-               __force_wake_auto(dev_priv, FORCEWAKE_RENDER); \
+       fw_engine = __gen8_reg_write_fw_domains(offset); \
+       if (fw_engine) \
+               __force_wake_auto(dev_priv, fw_engine); \
        __raw_i915_write##x(dev_priv, reg, val); \
        GEN6_WRITE_FOOTER; \
 }
@@ -977,64 +1088,22 @@ gen8_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool
 #define __chv_write(x) \
 static void \
 chv_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
-       enum forcewake_domains fw_engine = 0; \
+       enum forcewake_domains fw_engine; \
        GEN6_WRITE_HEADER; \
-       if (!NEEDS_FORCE_WAKE(offset) || \
-           is_gen8_shadowed(dev_priv, reg)) \
-               fw_engine = 0; \
-       else if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(offset)) \
-               fw_engine = FORCEWAKE_RENDER; \
-       else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(offset)) \
-               fw_engine = FORCEWAKE_MEDIA; \
-       else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(offset)) \
-               fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
+       fw_engine = __chv_reg_write_fw_domains(offset); \
        if (fw_engine) \
                __force_wake_auto(dev_priv, fw_engine); \
        __raw_i915_write##x(dev_priv, reg, val); \
        GEN6_WRITE_FOOTER; \
 }
 
-static const i915_reg_t gen9_shadowed_regs[] = {
-       RING_TAIL(RENDER_RING_BASE),
-       RING_TAIL(GEN6_BSD_RING_BASE),
-       RING_TAIL(VEBOX_RING_BASE),
-       RING_TAIL(BLT_RING_BASE),
-       FORCEWAKE_BLITTER_GEN9,
-       FORCEWAKE_RENDER_GEN9,
-       FORCEWAKE_MEDIA_GEN9,
-       GEN6_RPNSWREQ,
-       GEN6_RC_VIDEO_FREQ,
-       /* TODO: Other registers are not yet used */
-};
-
-static bool is_gen9_shadowed(struct drm_i915_private *dev_priv,
-                            i915_reg_t reg)
-{
-       int i;
-       for (i = 0; i < ARRAY_SIZE(gen9_shadowed_regs); i++)
-               if (i915_mmio_reg_equal(reg, gen9_shadowed_regs[i]))
-                       return true;
-
-       return false;
-}
-
 #define __gen9_write(x) \
 static void \
 gen9_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, \
                bool trace) { \
        enum forcewake_domains fw_engine; \
        GEN6_WRITE_HEADER; \
-       if (!SKL_NEEDS_FORCE_WAKE(offset) || \
-           is_gen9_shadowed(dev_priv, reg)) \
-               fw_engine = 0; \
-       else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(offset)) \
-               fw_engine = FORCEWAKE_RENDER; \
-       else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(offset)) \
-               fw_engine = FORCEWAKE_MEDIA; \
-       else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(offset)) \
-               fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
-       else \
-               fw_engine = FORCEWAKE_BLITTER; \
+       fw_engine = __gen9_reg_write_fw_domains(offset); \
        if (fw_engine) \
                __force_wake_auto(dev_priv, fw_engine); \
        __raw_i915_write##x(dev_priv, reg, val); \
@@ -1150,7 +1219,14 @@ static void fw_domain_init(struct drm_i915_private *dev_priv,
        d->i915 = dev_priv;
        d->id = domain_id;
 
-       setup_timer(&d->timer, intel_uncore_fw_release_timer, (unsigned long)d);
+       BUILD_BUG_ON(FORCEWAKE_RENDER != (1 << FW_DOMAIN_ID_RENDER));
+       BUILD_BUG_ON(FORCEWAKE_BLITTER != (1 << FW_DOMAIN_ID_BLITTER));
+       BUILD_BUG_ON(FORCEWAKE_MEDIA != (1 << FW_DOMAIN_ID_MEDIA));
+
+       d->mask = 1 << domain_id;
+
+       hrtimer_init(&d->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+       d->timer.function = intel_uncore_fw_release_timer;
 
        dev_priv->uncore.fw_domains |= (1 << domain_id);
 
@@ -1189,7 +1265,11 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev)
        } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
                dev_priv->uncore.funcs.force_wake_get =
                        fw_domains_get_with_thread_status;
-               dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
+               if (IS_HASWELL(dev))
+                       dev_priv->uncore.funcs.force_wake_put =
+                               fw_domains_put_with_fifo;
+               else
+                       dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
                fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
                               FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
        } else if (IS_IVYBRIDGE(dev)) {
@@ -1253,7 +1333,7 @@ void intel_uncore_init(struct drm_device *dev)
 
        i915_check_vgpu(dev);
 
-       intel_uncore_ellc_detect(dev);
+       intel_uncore_edram_detect(dev_priv);
        intel_uncore_fw_domains_init(dev);
        __intel_uncore_early_sanitize(dev, false);
 
@@ -1715,3 +1795,111 @@ intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv)
 
        return false;
 }
+
+static enum forcewake_domains
+intel_uncore_forcewake_for_read(struct drm_i915_private *dev_priv,
+                               i915_reg_t reg)
+{
+       enum forcewake_domains fw_domains;
+
+       if (intel_vgpu_active(dev_priv->dev))
+               return 0;
+
+       switch (INTEL_INFO(dev_priv)->gen) {
+       case 9:
+               fw_domains = __gen9_reg_read_fw_domains(i915_mmio_reg_offset(reg));
+               break;
+       case 8:
+               if (IS_CHERRYVIEW(dev_priv))
+                       fw_domains = __chv_reg_read_fw_domains(i915_mmio_reg_offset(reg));
+               else
+                       fw_domains = __gen6_reg_read_fw_domains(i915_mmio_reg_offset(reg));
+               break;
+       case 7:
+       case 6:
+               if (IS_VALLEYVIEW(dev_priv))
+                       fw_domains = __vlv_reg_read_fw_domains(i915_mmio_reg_offset(reg));
+               else
+                       fw_domains = __gen6_reg_read_fw_domains(i915_mmio_reg_offset(reg));
+               break;
+       default:
+               MISSING_CASE(INTEL_INFO(dev_priv)->gen);
+       case 5: /* forcewake was introduced with gen6 */
+       case 4:
+       case 3:
+       case 2:
+               return 0;
+       }
+
+       WARN_ON(fw_domains & ~dev_priv->uncore.fw_domains);
+
+       return fw_domains;
+}
+
+static enum forcewake_domains
+intel_uncore_forcewake_for_write(struct drm_i915_private *dev_priv,
+                                i915_reg_t reg)
+{
+       enum forcewake_domains fw_domains;
+
+       if (intel_vgpu_active(dev_priv->dev))
+               return 0;
+
+       switch (INTEL_INFO(dev_priv)->gen) {
+       case 9:
+               fw_domains = __gen9_reg_write_fw_domains(i915_mmio_reg_offset(reg));
+               break;
+       case 8:
+               if (IS_CHERRYVIEW(dev_priv))
+                       fw_domains = __chv_reg_write_fw_domains(i915_mmio_reg_offset(reg));
+               else
+                       fw_domains = __gen8_reg_write_fw_domains(i915_mmio_reg_offset(reg));
+               break;
+       case 7:
+       case 6:
+               fw_domains = FORCEWAKE_RENDER;
+               break;
+       default:
+               MISSING_CASE(INTEL_INFO(dev_priv)->gen);
+       case 5:
+       case 4:
+       case 3:
+       case 2:
+               return 0;
+       }
+
+       WARN_ON(fw_domains & ~dev_priv->uncore.fw_domains);
+
+       return fw_domains;
+}
+
+/**
+ * intel_uncore_forcewake_for_reg - which forcewake domains are needed to access
+ *                                 a register
+ * @dev_priv: pointer to struct drm_i915_private
+ * @reg: register in question
+ * @op: operation bitmask of FW_REG_READ and/or FW_REG_WRITE
+ *
+ * Returns a set of forcewake domains required to be taken with for example
+ * intel_uncore_forcewake_get for the specified register to be accessible in the
+ * specified mode (read, write or read/write) with raw mmio accessors.
+ *
+ * NOTE: On Gen6 and Gen7 write forcewake domain (FORCEWAKE_RENDER) requires the
+ * callers to do FIFO management on their own or risk losing writes.
+ */
+enum forcewake_domains
+intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv,
+                              i915_reg_t reg, unsigned int op)
+{
+       enum forcewake_domains fw_domains = 0;
+
+       WARN_ON(!op);
+
+       if (op & FW_REG_READ)
+               fw_domains = intel_uncore_forcewake_for_read(dev_priv, reg);
+
+       if (op & FW_REG_WRITE)
+               fw_domains |= intel_uncore_forcewake_for_write(dev_priv, reg);
+
+       return fw_domains;
+}
index e26dcde..2453fb1 100644 (file)
@@ -411,7 +411,7 @@ static struct drm_driver imx_drm_driver = {
        .unload                 = imx_drm_driver_unload,
        .lastclose              = imx_drm_driver_lastclose,
        .set_busid              = drm_platform_set_busid,
-       .gem_free_object        = drm_gem_cma_free_object,
+       .gem_free_object_unlocked = drm_gem_cma_free_object,
        .gem_vm_ops             = &drm_gem_cma_vm_ops,
        .dumb_create            = drm_gem_cma_dumb_create,
        .dumb_map_offset        = drm_gem_cma_dumb_map_offset,
index e233acf..3a48889 100644 (file)
@@ -121,7 +121,7 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
                if (!file || (event->base.file_priv == file)) {
                        mdp4_crtc->event = NULL;
                        DBG("%s: send event: %p", mdp4_crtc->name, event);
-                       drm_send_vblank_event(dev, mdp4_crtc->id, event);
+                       drm_crtc_send_vblank_event(crtc, event);
                }
        }
        spin_unlock_irqrestore(&dev->event_lock, flags);
index 9673b95..ce779d9 100644 (file)
@@ -149,7 +149,7 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
                if (!file || (event->base.file_priv == file)) {
                        mdp5_crtc->event = NULL;
                        DBG("%s: send event: %p", mdp5_crtc->name, event);
-                       drm_send_vblank_event(dev, mdp5_crtc->id, event);
+                       drm_crtc_send_vblank_event(crtc, event);
                }
        }
        spin_unlock_irqrestore(&dev->event_lock, flags);
index 7eb253b..5c61309 100644 (file)
@@ -190,17 +190,16 @@ int msm_atomic_check(struct drm_device *dev,
  * drm_atomic_helper_commit - commit validated state object
  * @dev: DRM device
  * @state: the driver state object
- * @async: asynchronous commit
+ * @nonblock: nonblocking commit
  *
  * This function commits a with drm_atomic_helper_check() pre-validated state
- * object. This can still fail when e.g. the framebuffer reservation fails. For
- * now this doesn't implement asynchronous commits.
+ * object. This can still fail when e.g. the framebuffer reservation fails.
  *
  * RETURNS
  * Zero for success or -errno.
  */
 int msm_atomic_commit(struct drm_device *dev,
-               struct drm_atomic_state *state, bool async)
+               struct drm_atomic_state *state, bool nonblock)
 {
        int nplanes = dev->mode_config.num_total_plane;
        int ncrtcs = dev->mode_config.num_crtc;
@@ -276,7 +275,7 @@ int msm_atomic_commit(struct drm_device *dev,
         * current layout.
         */
 
-       if (async) {
+       if (nonblock) {
                msm_queue_fence_cb(dev, &c->fence_cb, c->fence);
                return 0;
        }
index 870dbe5..0b8b0e6 100644 (file)
@@ -174,7 +174,7 @@ void __msm_fence_worker(struct work_struct *work);
 int msm_atomic_check(struct drm_device *dev,
                     struct drm_atomic_state *state);
 int msm_atomic_commit(struct drm_device *dev,
-               struct drm_atomic_state *state, bool async);
+               struct drm_atomic_state *state, bool nonblock);
 
 int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu);
 
index 80398a6..fe79498 100644 (file)
@@ -138,7 +138,7 @@ static bool omap_atomic_is_pending(struct omap_drm_private *priv,
 }
 
 static int omap_atomic_commit(struct drm_device *dev,
-                             struct drm_atomic_state *state, bool async)
+                             struct drm_atomic_state *state, bool nonblock)
 {
        struct omap_drm_private *priv = dev->dev_private;
        struct omap_atomic_state_commit *commit;
@@ -177,7 +177,7 @@ static int omap_atomic_commit(struct drm_device *dev,
        /* Swap the state, this is the point of no return. */
        drm_atomic_helper_swap_state(dev, state);
 
-       if (async)
+       if (nonblock)
                schedule_work(&commit->work);
        else
                omap_atomic_complete(commit);
index 43e5f50..812e5d3 100644 (file)
@@ -460,7 +460,7 @@ static const struct drm_crtc_funcs qxl_crtc_funcs = {
        .page_flip = qxl_crtc_page_flip,
 };
 
-static void qxl_user_framebuffer_destroy(struct drm_framebuffer *fb)
+void qxl_user_framebuffer_destroy(struct drm_framebuffer *fb)
 {
        struct qxl_framebuffer *qxl_fb = to_qxl_framebuffer(fb);
 
@@ -522,12 +522,13 @@ int
 qxl_framebuffer_init(struct drm_device *dev,
                     struct qxl_framebuffer *qfb,
                     const struct drm_mode_fb_cmd2 *mode_cmd,
-                    struct drm_gem_object *obj)
+                    struct drm_gem_object *obj,
+                    const struct drm_framebuffer_funcs *funcs)
 {
        int ret;
 
        qfb->obj = obj;
-       ret = drm_framebuffer_init(dev, &qfb->base, &qxl_fb_funcs);
+       ret = drm_framebuffer_init(dev, &qfb->base, funcs);
        if (ret) {
                qfb->obj = NULL;
                return ret;
@@ -994,7 +995,7 @@ qxl_user_framebuffer_create(struct drm_device *dev,
        if (qxl_fb == NULL)
                return NULL;
 
-       ret = qxl_framebuffer_init(dev, qxl_fb, mode_cmd, obj);
+       ret = qxl_framebuffer_init(dev, qxl_fb, mode_cmd, obj, &qxl_fb_funcs);
        if (ret) {
                kfree(qxl_fb);
                drm_gem_object_unreference_unlocked(obj);
index 6e6b9b1..c9c5426 100644 (file)
@@ -322,8 +322,6 @@ struct qxl_device {
        struct workqueue_struct *gc_queue;
        struct work_struct gc_work;
 
-       struct work_struct fb_work;
-
        struct drm_property *hotplug_mode_update_property;
        int monitors_config_width;
        int monitors_config_height;
@@ -387,11 +385,13 @@ int qxl_get_handle_for_primary_fb(struct qxl_device *qdev,
 void qxl_fbdev_set_suspend(struct qxl_device *qdev, int state);
 
 /* qxl_display.c */
+void qxl_user_framebuffer_destroy(struct drm_framebuffer *fb);
 int
 qxl_framebuffer_init(struct drm_device *dev,
                     struct qxl_framebuffer *rfb,
                     const struct drm_mode_fb_cmd2 *mode_cmd,
-                    struct drm_gem_object *obj);
+                    struct drm_gem_object *obj,
+                    const struct drm_framebuffer_funcs *funcs);
 void qxl_display_read_client_monitors_config(struct qxl_device *qdev);
 void qxl_send_monitors_config(struct qxl_device *qdev);
 int qxl_create_monitors_object(struct qxl_device *qdev);
@@ -551,7 +551,6 @@ int qxl_irq_init(struct qxl_device *qdev);
 irqreturn_t qxl_irq_handler(int irq, void *arg);
 
 /* qxl_fb.c */
-int qxl_fb_init(struct qxl_device *qdev);
 bool qxl_fbdev_qobj_is_fb(struct qxl_device *qdev, struct qxl_bo *qobj);
 
 int qxl_debugfs_add_files(struct qxl_device *qdev,
index bb7ce07..739a08c 100644 (file)
@@ -46,15 +46,6 @@ struct qxl_fbdev {
        struct list_head delayed_ops;
        void *shadow;
        int size;
-
-       /* dirty memory logging */
-       struct {
-               spinlock_t lock;
-               unsigned x1;
-               unsigned y1;
-               unsigned x2;
-               unsigned y2;
-       } dirty;
 };
 
 static void qxl_fb_image_init(struct qxl_fb_image *qxl_fb_image,
@@ -82,169 +73,18 @@ static void qxl_fb_image_init(struct qxl_fb_image *qxl_fb_image,
        }
 }
 
-static void qxl_fb_dirty_flush(struct fb_info *info)
-{
-       struct qxl_fbdev *qfbdev = info->par;
-       struct qxl_device *qdev = qfbdev->qdev;
-       struct qxl_fb_image qxl_fb_image;
-       struct fb_image *image = &qxl_fb_image.fb_image;
-       unsigned long flags;
-       u32 x1, x2, y1, y2;
-
-       /* TODO: hard coding 32 bpp */
-       int stride = qfbdev->qfb.base.pitches[0];
-
-       spin_lock_irqsave(&qfbdev->dirty.lock, flags);
-
-       x1 = qfbdev->dirty.x1;
-       x2 = qfbdev->dirty.x2;
-       y1 = qfbdev->dirty.y1;
-       y2 = qfbdev->dirty.y2;
-       qfbdev->dirty.x1 = 0;
-       qfbdev->dirty.x2 = 0;
-       qfbdev->dirty.y1 = 0;
-       qfbdev->dirty.y2 = 0;
-
-       spin_unlock_irqrestore(&qfbdev->dirty.lock, flags);
-
-       /*
-        * we are using a shadow draw buffer, at qdev->surface0_shadow
-        */
-       qxl_io_log(qdev, "dirty x[%d, %d], y[%d, %d]", x1, x2, y1, y2);
-       image->dx = x1;
-       image->dy = y1;
-       image->width = x2 - x1 + 1;
-       image->height = y2 - y1 + 1;
-       image->fg_color = 0xffffffff; /* unused, just to avoid uninitialized
-                                        warnings */
-       image->bg_color = 0;
-       image->depth = 32;           /* TODO: take from somewhere? */
-       image->cmap.start = 0;
-       image->cmap.len = 0;
-       image->cmap.red = NULL;
-       image->cmap.green = NULL;
-       image->cmap.blue = NULL;
-       image->cmap.transp = NULL;
-       image->data = qfbdev->shadow + (x1 * 4) + (stride * y1);
-
-       qxl_fb_image_init(&qxl_fb_image, qdev, info, NULL);
-       qxl_draw_opaque_fb(&qxl_fb_image, stride);
-}
-
-static void qxl_dirty_update(struct qxl_fbdev *qfbdev,
-                            int x, int y, int width, int height)
-{
-       struct qxl_device *qdev = qfbdev->qdev;
-       unsigned long flags;
-       int x2, y2;
-
-       x2 = x + width - 1;
-       y2 = y + height - 1;
-
-       spin_lock_irqsave(&qfbdev->dirty.lock, flags);
-
-       if ((qfbdev->dirty.y2 - qfbdev->dirty.y1) &&
-           (qfbdev->dirty.x2 - qfbdev->dirty.x1)) {
-               if (qfbdev->dirty.y1 < y)
-                       y = qfbdev->dirty.y1;
-               if (qfbdev->dirty.y2 > y2)
-                       y2 = qfbdev->dirty.y2;
-               if (qfbdev->dirty.x1 < x)
-                       x = qfbdev->dirty.x1;
-               if (qfbdev->dirty.x2 > x2)
-                       x2 = qfbdev->dirty.x2;
-       }
-
-       qfbdev->dirty.x1 = x;
-       qfbdev->dirty.x2 = x2;
-       qfbdev->dirty.y1 = y;
-       qfbdev->dirty.y2 = y2;
-
-       spin_unlock_irqrestore(&qfbdev->dirty.lock, flags);
-
-       schedule_work(&qdev->fb_work);
-}
-
-static void qxl_deferred_io(struct fb_info *info,
-                           struct list_head *pagelist)
-{
-       struct qxl_fbdev *qfbdev = info->par;
-       unsigned long start, end, min, max;
-       struct page *page;
-       int y1, y2;
-
-       min = ULONG_MAX;
-       max = 0;
-       list_for_each_entry(page, pagelist, lru) {
-               start = page->index << PAGE_SHIFT;
-               end = start + PAGE_SIZE - 1;
-               min = min(min, start);
-               max = max(max, end);
-       }
-
-       if (min < max) {
-               y1 = min / info->fix.line_length;
-               y2 = (max / info->fix.line_length) + 1;
-               qxl_dirty_update(qfbdev, 0, y1, info->var.xres, y2 - y1);
-       }
-};
-
 static struct fb_deferred_io qxl_defio = {
        .delay          = QXL_DIRTY_DELAY,
-       .deferred_io    = qxl_deferred_io,
+       .deferred_io    = drm_fb_helper_deferred_io,
 };
 
-static void qxl_fb_fillrect(struct fb_info *info,
-                           const struct fb_fillrect *rect)
-{
-       struct qxl_fbdev *qfbdev = info->par;
-
-       drm_fb_helper_sys_fillrect(info, rect);
-       qxl_dirty_update(qfbdev, rect->dx, rect->dy, rect->width,
-                        rect->height);
-}
-
-static void qxl_fb_copyarea(struct fb_info *info,
-                           const struct fb_copyarea *area)
-{
-       struct qxl_fbdev *qfbdev = info->par;
-
-       drm_fb_helper_sys_copyarea(info, area);
-       qxl_dirty_update(qfbdev, area->dx, area->dy, area->width,
-                        area->height);
-}
-
-static void qxl_fb_imageblit(struct fb_info *info,
-                            const struct fb_image *image)
-{
-       struct qxl_fbdev *qfbdev = info->par;
-
-       drm_fb_helper_sys_imageblit(info, image);
-       qxl_dirty_update(qfbdev, image->dx, image->dy, image->width,
-                        image->height);
-}
-
-static void qxl_fb_work(struct work_struct *work)
-{
-       struct qxl_device *qdev = container_of(work, struct qxl_device, fb_work);
-       struct qxl_fbdev *qfbdev = qdev->mode_info.qfbdev;
-
-       qxl_fb_dirty_flush(qfbdev->helper.fbdev);
-}
-
-int qxl_fb_init(struct qxl_device *qdev)
-{
-       INIT_WORK(&qdev->fb_work, qxl_fb_work);
-       return 0;
-}
-
 static struct fb_ops qxlfb_ops = {
        .owner = THIS_MODULE,
        .fb_check_var = drm_fb_helper_check_var,
        .fb_set_par = drm_fb_helper_set_par, /* TODO: copy vmwgfx */
-       .fb_fillrect = qxl_fb_fillrect,
-       .fb_copyarea = qxl_fb_copyarea,
-       .fb_imageblit = qxl_fb_imageblit,
+       .fb_fillrect = drm_fb_helper_sys_fillrect,
+       .fb_copyarea = drm_fb_helper_sys_copyarea,
+       .fb_imageblit = drm_fb_helper_sys_imageblit,
        .fb_pan_display = drm_fb_helper_pan_display,
        .fb_blank = drm_fb_helper_blank,
        .fb_setcmap = drm_fb_helper_setcmap,
@@ -338,6 +178,57 @@ out_unref:
        return ret;
 }
 
+/*
+ * FIXME
+ * It should not be necessary to have a special dirty() callback for fbdev.
+ */
+static int qxlfb_framebuffer_dirty(struct drm_framebuffer *fb,
+                                  struct drm_file *file_priv,
+                                  unsigned flags, unsigned color,
+                                  struct drm_clip_rect *clips,
+                                  unsigned num_clips)
+{
+       struct qxl_device *qdev = fb->dev->dev_private;
+       struct fb_info *info = qdev->fbdev_info;
+       struct qxl_fbdev *qfbdev = info->par;
+       struct qxl_fb_image qxl_fb_image;
+       struct fb_image *image = &qxl_fb_image.fb_image;
+
+       /* TODO: hard coding 32 bpp */
+       int stride = qfbdev->qfb.base.pitches[0];
+
+       /*
+        * we are using a shadow draw buffer, at qdev->surface0_shadow
+        */
+       qxl_io_log(qdev, "dirty x[%d, %d], y[%d, %d]", clips->x1, clips->x2,
+                  clips->y1, clips->y2);
+       image->dx = clips->x1;
+       image->dy = clips->y1;
+       image->width = clips->x2 - clips->x1;
+       image->height = clips->y2 - clips->y1;
+       image->fg_color = 0xffffffff; /* unused, just to avoid uninitialized
+                                        warnings */
+       image->bg_color = 0;
+       image->depth = 32;           /* TODO: take from somewhere? */
+       image->cmap.start = 0;
+       image->cmap.len = 0;
+       image->cmap.red = NULL;
+       image->cmap.green = NULL;
+       image->cmap.blue = NULL;
+       image->cmap.transp = NULL;
+       image->data = qfbdev->shadow + (clips->x1 * 4) + (stride * clips->y1);
+
+       qxl_fb_image_init(&qxl_fb_image, qdev, info, NULL);
+       qxl_draw_opaque_fb(&qxl_fb_image, stride);
+
+       return 0;
+}
+
+static const struct drm_framebuffer_funcs qxlfb_fb_funcs = {
+       .destroy = qxl_user_framebuffer_destroy,
+       .dirty = qxlfb_framebuffer_dirty,
+};
+
 static int qxlfb_create(struct qxl_fbdev *qfbdev,
                        struct drm_fb_helper_surface_size *sizes)
 {
@@ -383,7 +274,8 @@ static int qxlfb_create(struct qxl_fbdev *qfbdev,
 
        info->par = qfbdev;
 
-       qxl_framebuffer_init(qdev->ddev, &qfbdev->qfb, &mode_cmd, gobj);
+       qxl_framebuffer_init(qdev->ddev, &qfbdev->qfb, &mode_cmd, gobj,
+                            &qxlfb_fb_funcs);
 
        fb = &qfbdev->qfb.base;
 
@@ -504,7 +396,6 @@ int qxl_fbdev_init(struct qxl_device *qdev)
        qfbdev->qdev = qdev;
        qdev->mode_info.qfbdev = qfbdev;
        spin_lock_init(&qfbdev->delayed_ops_lock);
-       spin_lock_init(&qfbdev->dirty.lock);
        INIT_LIST_HEAD(&qfbdev->delayed_ops);
 
        drm_fb_helper_prepare(qdev->ddev, &qfbdev->helper,
index b2977a1..2319800 100644 (file)
@@ -261,10 +261,6 @@ static int qxl_device_init(struct qxl_device *qdev,
        qdev->gc_queue = create_singlethread_workqueue("qxl_gc");
        INIT_WORK(&qdev->gc_work, qxl_gc_work);
 
-       r = qxl_fb_init(qdev);
-       if (r)
-               return r;
-
        return 0;
 }
 
index 7f176ec..628eb87 100644 (file)
@@ -377,7 +377,7 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
 
        /* wakeup userspace */
        if (work->event)
-               drm_send_vblank_event(rdev->ddev, crtc_id, work->event);
+               drm_crtc_send_vblank_event(&radeon_crtc->base, work->event);
 
        spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
 
index c0083f0..b55aa74 100644 (file)
@@ -536,7 +536,7 @@ static struct drm_driver kms_driver = {
        .irq_uninstall = radeon_driver_irq_uninstall_kms,
        .irq_handler = radeon_driver_irq_handler_kms,
        .ioctls = radeon_ioctls_kms,
-       .gem_free_object = radeon_gem_object_free,
+       .gem_free_object_unlocked = radeon_gem_object_free,
        .gem_open_object = radeon_gem_object_open,
        .gem_close_object = radeon_gem_object_close,
        .dumb_create = radeon_mode_dumb_create,
index d9f06cc..0d8bdda 100644 (file)
@@ -314,7 +314,7 @@ static void rcar_du_crtc_finish_page_flip(struct rcar_du_crtc *rcrtc)
                return;
 
        spin_lock_irqsave(&dev->event_lock, flags);
-       drm_send_vblank_event(dev, rcrtc->index, event);
+       drm_crtc_send_vblank_event(&rcrtc->crtc, event);
        wake_up(&rcrtc->flip_wait);
        spin_unlock_irqrestore(&dev->event_lock, flags);
 
index 24725bf..e70a4f3 100644 (file)
@@ -283,7 +283,8 @@ static void rcar_du_atomic_work(struct work_struct *work)
 }
 
 static int rcar_du_atomic_commit(struct drm_device *dev,
-                                struct drm_atomic_state *state, bool async)
+                                struct drm_atomic_state *state,
+                                bool nonblock)
 {
        struct rcar_du_device *rcdu = dev->dev_private;
        struct rcar_du_commit *commit;
@@ -328,7 +329,7 @@ static int rcar_du_atomic_commit(struct drm_device *dev,
        /* Swap the state, this is the point of no return. */
        drm_atomic_helper_swap_state(dev, state);
 
-       if (async)
+       if (nonblock)
                schedule_work(&commit->work);
        else
                rcar_du_atomic_complete(commit);
index a1d94d8..7f6a55c 100644 (file)
@@ -114,27 +114,6 @@ static void rockchip_dp_drm_encoder_enable(struct drm_encoder *encoder)
        int ret;
        u32 val;
 
-       /*
-        * FIXME(Yakir): driver should configure the CRTC output video
-        * mode with the display information which indicated the monitor
-        * support colorimetry.
-        *
-        * But don't know why the CRTC driver seems could only output the
-        * RGBaaa rightly. For example, if connect the "innolux,n116bge"
-        * eDP screen, EDID would indicated that screen only accepted the
-        * 6bpc mode. But if I configure CRTC to RGB666 output, then eDP
-        * screen would show a blue picture (RGB888 show a green picture).
-        * But if I configure CTRC to RGBaaa, and eDP driver still keep
-        * RGB666 input video mode, then screen would works prefect.
-        */
-       ret = rockchip_drm_crtc_mode_config(encoder->crtc,
-                                           DRM_MODE_CONNECTOR_eDP,
-                                           ROCKCHIP_OUT_MODE_AAAA);
-       if (ret < 0) {
-               dev_err(dp->dev, "Could not set crtc mode config (%d)\n", ret);
-               return;
-       }
-
        ret = drm_of_encoder_active_endpoint_id(dp->dev->of_node, encoder);
        if (ret < 0)
                return;
@@ -158,11 +137,38 @@ static void rockchip_dp_drm_encoder_nop(struct drm_encoder *encoder)
        /* do nothing */
 }
 
+static int
+rockchip_dp_drm_encoder_atomic_check(struct drm_encoder *encoder,
+                                     struct drm_crtc_state *crtc_state,
+                                     struct drm_connector_state *conn_state)
+{
+       struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state);
+
+       /*
+        * FIXME(Yakir): driver should configure the CRTC output video
+        * mode with the display information which indicated the monitor
+        * support colorimetry.
+        *
+        * But don't know why the CRTC driver seems could only output the
+        * RGBaaa rightly. For example, if connect the "innolux,n116bge"
+        * eDP screen, EDID would indicated that screen only accepted the
+        * 6bpc mode. But if I configure CRTC to RGB666 output, then eDP
+        * screen would show a blue picture (RGB888 show a green picture).
+        * But if I configure CTRC to RGBaaa, and eDP driver still keep
+        * RGB666 input video mode, then screen would works prefect.
+        */
+       s->output_mode = ROCKCHIP_OUT_MODE_AAAA;
+       s->output_type = DRM_MODE_CONNECTOR_eDP;
+
+       return 0;
+}
+
 static struct drm_encoder_helper_funcs rockchip_dp_encoder_helper_funcs = {
        .mode_fixup = rockchip_dp_drm_encoder_mode_fixup,
        .mode_set = rockchip_dp_drm_encoder_mode_set,
        .enable = rockchip_dp_drm_encoder_enable,
        .disable = rockchip_dp_drm_encoder_nop,
+       .atomic_check = rockchip_dp_drm_encoder_atomic_check,
 };
 
 static void rockchip_dp_drm_encoder_destroy(struct drm_encoder *encoder)
index 7975158..dedc65b 100644 (file)
@@ -879,7 +879,6 @@ static void dw_mipi_dsi_encoder_commit(struct drm_encoder *encoder)
 {
        struct dw_mipi_dsi *dsi = encoder_to_dsi(encoder);
        int mux = drm_of_encoder_active_endpoint_id(dsi->dev->of_node, encoder);
-       u32 interface_pix_fmt;
        u32 val;
 
        if (clk_prepare_enable(dsi->pclk)) {
@@ -895,31 +894,41 @@ static void dw_mipi_dsi_encoder_commit(struct drm_encoder *encoder)
 
        clk_disable_unprepare(dsi->pclk);
 
+       if (mux)
+               val = DSI0_SEL_VOP_LIT | (DSI0_SEL_VOP_LIT << 16);
+       else
+               val = DSI0_SEL_VOP_LIT << 16;
+
+       regmap_write(dsi->grf_regmap, GRF_SOC_CON6, val);
+       dev_dbg(dsi->dev, "vop %s output to dsi0\n", (mux) ? "LIT" : "BIG");
+}
+
+static int
+dw_mipi_dsi_encoder_atomic_check(struct drm_encoder *encoder,
+                                struct drm_crtc_state *crtc_state,
+                                struct drm_connector_state *conn_state)
+{
+       struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state);
+       struct dw_mipi_dsi *dsi = encoder_to_dsi(encoder);
+
        switch (dsi->format) {
        case MIPI_DSI_FMT_RGB888:
-               interface_pix_fmt = ROCKCHIP_OUT_MODE_P888;
+               s->output_mode = ROCKCHIP_OUT_MODE_P888;
                break;
        case MIPI_DSI_FMT_RGB666:
-               interface_pix_fmt = ROCKCHIP_OUT_MODE_P666;
+               s->output_mode = ROCKCHIP_OUT_MODE_P666;
                break;
        case MIPI_DSI_FMT_RGB565:
-               interface_pix_fmt = ROCKCHIP_OUT_MODE_P565;
+               s->output_mode = ROCKCHIP_OUT_MODE_P565;
                break;
        default:
                WARN_ON(1);
-               return;
+               return -EINVAL;
        }
 
-       rockchip_drm_crtc_mode_config(encoder->crtc, DRM_MODE_CONNECTOR_DSI,
-                                     interface_pix_fmt);
+       s->output_type = DRM_MODE_CONNECTOR_DSI;
 
-       if (mux)
-               val = DSI0_SEL_VOP_LIT | (DSI0_SEL_VOP_LIT << 16);
-       else
-               val = DSI0_SEL_VOP_LIT << 16;
-
-       regmap_write(dsi->grf_regmap, GRF_SOC_CON6, val);
-       dev_dbg(dsi->dev, "vop %s output to dsi0\n", (mux) ? "LIT" : "BIG");
+       return 0;
 }
 
 static struct drm_encoder_helper_funcs
@@ -927,6 +936,7 @@ dw_mipi_dsi_encoder_helper_funcs = {
        .commit = dw_mipi_dsi_encoder_commit,
        .mode_set = dw_mipi_dsi_encoder_mode_set,
        .disable = dw_mipi_dsi_encoder_disable,
+       .atomic_check = dw_mipi_dsi_encoder_atomic_check,
 };
 
 static struct drm_encoder_funcs dw_mipi_dsi_encoder_funcs = {
index d5cfef7..801110f 100644 (file)
@@ -201,9 +201,6 @@ static void dw_hdmi_rockchip_encoder_enable(struct drm_encoder *encoder)
        u32 val;
        int mux;
 
-       rockchip_drm_crtc_mode_config(encoder->crtc, DRM_MODE_CONNECTOR_HDMIA,
-                                     ROCKCHIP_OUT_MODE_AAAA);
-
        mux = drm_of_encoder_active_endpoint_id(hdmi->dev->of_node, encoder);
        if (mux)
                val = HDMI_SEL_VOP_LIT | (HDMI_SEL_VOP_LIT << 16);
@@ -215,11 +212,25 @@ static void dw_hdmi_rockchip_encoder_enable(struct drm_encoder *encoder)
                (mux) ? "LIT" : "BIG");
 }
 
+static int
+dw_hdmi_rockchip_encoder_atomic_check(struct drm_encoder *encoder,
+                                     struct drm_crtc_state *crtc_state,
+                                     struct drm_connector_state *conn_state)
+{
+       struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state);
+
+       s->output_mode = ROCKCHIP_OUT_MODE_AAAA;
+       s->output_type = DRM_MODE_CONNECTOR_HDMIA;
+
+       return 0;
+}
+
 static const struct drm_encoder_helper_funcs dw_hdmi_rockchip_encoder_helper_funcs = {
        .mode_fixup = dw_hdmi_rockchip_encoder_mode_fixup,
        .mode_set   = dw_hdmi_rockchip_encoder_mode_set,
        .enable     = dw_hdmi_rockchip_encoder_enable,
        .disable    = dw_hdmi_rockchip_encoder_disable,
+       .atomic_check = dw_hdmi_rockchip_encoder_atomic_check,
 };
 
 static const struct dw_hdmi_plat_data rockchip_hdmi_drv_data = {
index 10d62ff..f8b4feb 100644 (file)
@@ -500,9 +500,6 @@ static void inno_hdmi_encoder_enable(struct drm_encoder *encoder)
 {
        struct inno_hdmi *hdmi = to_inno_hdmi(encoder);
 
-       rockchip_drm_crtc_mode_config(encoder->crtc, DRM_MODE_CONNECTOR_HDMIA,
-                                     ROCKCHIP_OUT_MODE_P888);
-
        inno_hdmi_set_pwr_mode(hdmi, NORMAL);
 }
 
@@ -520,11 +517,25 @@ static bool inno_hdmi_encoder_mode_fixup(struct drm_encoder *encoder,
        return true;
 }
 
+static int
+inno_hdmi_encoder_atomic_check(struct drm_encoder *encoder,
+                              struct drm_crtc_state *crtc_state,
+                              struct drm_connector_state *conn_state)
+{
+       struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state);
+
+       s->output_mode = ROCKCHIP_OUT_MODE_P888;
+       s->output_type = DRM_MODE_CONNECTOR_HDMIA;
+
+       return 0;
+}
+
 static struct drm_encoder_helper_funcs inno_hdmi_encoder_helper_funcs = {
        .enable     = inno_hdmi_encoder_enable,
        .disable    = inno_hdmi_encoder_disable,
        .mode_fixup = inno_hdmi_encoder_mode_fixup,
        .mode_set   = inno_hdmi_encoder_mode_set,
+       .atomic_check = inno_hdmi_encoder_atomic_check,
 };
 
 static struct drm_encoder_funcs inno_hdmi_encoder_funcs = {
@@ -855,8 +866,9 @@ static int inno_hdmi_bind(struct device *dev, struct device *master,
 
        hdmi->ddc = inno_hdmi_i2c_adapter(hdmi);
        if (IS_ERR(hdmi->ddc)) {
+               ret = PTR_ERR(hdmi->ddc);
                hdmi->ddc = NULL;
-               return PTR_ERR(hdmi->ddc);
+               return ret;
        }
 
        /*
index f556a8f..399adf3 100644 (file)
@@ -36,6 +36,8 @@
 #define DRIVER_MAJOR   1
 #define DRIVER_MINOR   0
 
+static bool is_support_iommu = true;
+
 /*
  * Attach a (component) device to the shared drm dma mapping from master drm
  * device.  This is used by the VOPs to map GEM buffers to a common DMA
@@ -47,6 +49,9 @@ int rockchip_drm_dma_attach_device(struct drm_device *drm_dev,
        struct dma_iommu_mapping *mapping = drm_dev->dev->archdata.mapping;
        int ret;
 
+       if (!is_support_iommu)
+               return 0;
+
        ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
        if (ret)
                return ret;
@@ -59,6 +64,9 @@ int rockchip_drm_dma_attach_device(struct drm_device *drm_dev,
 void rockchip_drm_dma_detach_device(struct drm_device *drm_dev,
                                    struct device *dev)
 {
+       if (!is_support_iommu)
+               return;
+
        arm_iommu_detach_device(dev);
 }
 
@@ -127,7 +135,7 @@ static void rockchip_drm_crtc_disable_vblank(struct drm_device *dev,
 static int rockchip_drm_load(struct drm_device *drm_dev, unsigned long flags)
 {
        struct rockchip_drm_private *private;
-       struct dma_iommu_mapping *mapping;
+       struct dma_iommu_mapping *mapping = NULL;
        struct device *dev = drm_dev->dev;
        struct drm_connector *connector;
        int ret;
@@ -152,23 +160,26 @@ static int rockchip_drm_load(struct drm_device *drm_dev, unsigned long flags)
                goto err_config_cleanup;
        }
 
-       /* TODO(djkurtz): fetch the mapping start/size from somewhere */
-       mapping = arm_iommu_create_mapping(&platform_bus_type, 0x00000000,
-                                          SZ_2G);
-       if (IS_ERR(mapping)) {
-               ret = PTR_ERR(mapping);
-               goto err_config_cleanup;
-       }
+       if (is_support_iommu) {
+               /* TODO(djkurtz): fetch the mapping start/size from somewhere */
+               mapping = arm_iommu_create_mapping(&platform_bus_type,
+                                                  0x00000000,
+                                                  SZ_2G);
+               if (IS_ERR(mapping)) {
+                       ret = PTR_ERR(mapping);
+                       goto err_config_cleanup;
+               }
 
-       ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
-       if (ret)
-               goto err_release_mapping;
+               ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+               if (ret)
+                       goto err_release_mapping;
 
-       dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
+               dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
 
-       ret = arm_iommu_attach_device(dev, mapping);
-       if (ret)
-               goto err_release_mapping;
+               ret = arm_iommu_attach_device(dev, mapping);
+               if (ret)
+                       goto err_release_mapping;
+       }
 
        /* Try to bind all sub drivers. */
        ret = component_bind_all(dev, drm_dev);
@@ -218,6 +229,8 @@ static int rockchip_drm_load(struct drm_device *drm_dev, unsigned long flags)
        if (ret)
                goto err_vblank_cleanup;
 
+       if (is_support_iommu)
+               arm_iommu_release_mapping(mapping);
        return 0;
 err_vblank_cleanup:
        drm_vblank_cleanup(drm_dev);
@@ -226,9 +239,11 @@ err_kms_helper_poll_fini:
 err_unbind:
        component_unbind_all(dev, drm_dev);
 err_detach_device:
-       arm_iommu_detach_device(dev);
+       if (is_support_iommu)
+               arm_iommu_detach_device(dev);
 err_release_mapping:
-       arm_iommu_release_mapping(dev->archdata.mapping);
+       if (is_support_iommu)
+               arm_iommu_release_mapping(mapping);
 err_config_cleanup:
        drm_mode_config_cleanup(drm_dev);
        drm_dev->dev_private = NULL;
@@ -243,8 +258,8 @@ static int rockchip_drm_unload(struct drm_device *drm_dev)
        drm_vblank_cleanup(drm_dev);
        drm_kms_helper_poll_fini(drm_dev);
        component_unbind_all(dev, drm_dev);
-       arm_iommu_detach_device(dev);
-       arm_iommu_release_mapping(dev->archdata.mapping);
+       if (is_support_iommu)
+               arm_iommu_detach_device(dev);
        drm_mode_config_cleanup(drm_dev);
        drm_dev->dev_private = NULL;
 
@@ -488,6 +503,8 @@ static int rockchip_drm_platform_probe(struct platform_device *pdev)
         * works as expected.
         */
        for (i = 0;; i++) {
+               struct device_node *iommu;
+
                port = of_parse_phandle(np, "ports", i);
                if (!port)
                        break;
@@ -497,6 +514,17 @@ static int rockchip_drm_platform_probe(struct platform_device *pdev)
                        continue;
                }
 
+               iommu = of_parse_phandle(port->parent, "iommus", 0);
+               if (!iommu || !of_device_is_available(iommu->parent)) {
+                       dev_dbg(dev, "no iommu attached for %s, using non-iommu buffers\n",
+                               port->parent->full_name);
+                       /*
+                        * if there is a crtc not support iommu, force set all
+                        * crtc use non-iommu buffer.
+                        */
+                       is_support_iommu = false;
+               }
+
                component_match_add(dev, &match, compare_of, port->parent);
                of_node_put(port);
        }
index 00d17d7..56f43a3 100644 (file)
@@ -50,6 +50,14 @@ struct rockchip_atomic_commit {
        struct mutex lock;
 };
 
+struct rockchip_crtc_state {
+       struct drm_crtc_state base;
+       int output_type;
+       int output_mode;
+};
+#define to_rockchip_crtc_state(s) \
+               container_of(s, struct rockchip_crtc_state, base)
+
 /*
  * Rockchip drm private structure.
  *
@@ -68,8 +76,6 @@ void rockchip_drm_atomic_work(struct work_struct *work);
 int rockchip_register_crtc_funcs(struct drm_crtc *crtc,
                                 const struct rockchip_crtc_funcs *crtc_funcs);
 void rockchip_unregister_crtc_funcs(struct drm_crtc *crtc);
-int rockchip_drm_crtc_mode_config(struct drm_crtc *crtc, int connector_type,
-                                 int out_mode);
 int rockchip_drm_dma_attach_device(struct drm_device *drm_dev,
                                   struct device *dev);
 void rockchip_drm_dma_detach_device(struct drm_device *drm_dev,
index 3b8f652..8c10163 100644 (file)
@@ -276,7 +276,7 @@ void rockchip_drm_atomic_work(struct work_struct *work)
 
 int rockchip_drm_atomic_commit(struct drm_device *dev,
                               struct drm_atomic_state *state,
-                              bool async)
+                              bool nonblock)
 {
        struct rockchip_drm_private *private = dev->dev_private;
        struct rockchip_atomic_commit *commit = &private->commit;
@@ -286,7 +286,7 @@ int rockchip_drm_atomic_commit(struct drm_device *dev,
        if (ret)
                return ret;
 
-       /* serialize outstanding asynchronous commits */
+       /* serialize outstanding nonblocking commits */
        mutex_lock(&commit->lock);
        flush_work(&commit->work);
 
@@ -295,7 +295,7 @@ int rockchip_drm_atomic_commit(struct drm_device *dev,
        commit->dev = dev;
        commit->state = state;
 
-       if (async)
+       if (nonblock)
                schedule_work(&commit->work);
        else
                rockchip_atomic_commit_complete(commit);
index a619f12..bf55cda 100644 (file)
@@ -310,7 +310,7 @@ static void scl_vop_cal_scl_fac(struct vop *vop, const struct vop_win_data *win,
        uint16_t vsu_mode;
        uint16_t lb_mode;
        uint32_t val;
-       int vskiplines;
+       int vskiplines = 0;
 
        if (dst_w > 3840) {
                DRM_ERROR("Maximum destination width (3840) exceeded\n");
@@ -560,6 +560,22 @@ static void vop_plane_destroy(struct drm_plane *plane)
        drm_plane_cleanup(plane);
 }
 
+static int vop_plane_prepare_fb(struct drm_plane *plane,
+                               const struct drm_plane_state *new_state)
+{
+       if (plane->state->fb)
+               drm_framebuffer_reference(plane->state->fb);
+
+       return 0;
+}
+
+static void vop_plane_cleanup_fb(struct drm_plane *plane,
+                                const struct drm_plane_state *old_state)
+{
+       if (old_state->fb)
+               drm_framebuffer_unreference(old_state->fb);
+}
+
 static int vop_plane_atomic_check(struct drm_plane *plane,
                           struct drm_plane_state *state)
 {
@@ -756,6 +772,8 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
 }
 
 static const struct drm_plane_helper_funcs plane_helper_funcs = {
+       .prepare_fb = vop_plane_prepare_fb,
+       .cleanup_fb = vop_plane_cleanup_fb,
        .atomic_check = vop_plane_atomic_check,
        .atomic_update = vop_plane_atomic_update,
        .atomic_disable = vop_plane_atomic_disable,
@@ -818,38 +836,6 @@ static const struct drm_plane_funcs vop_plane_funcs = {
        .atomic_destroy_state = vop_atomic_plane_destroy_state,
 };
 
-int rockchip_drm_crtc_mode_config(struct drm_crtc *crtc,
-                                 int connector_type,
-                                 int out_mode)
-{
-       struct vop *vop = to_vop(crtc);
-
-       if (WARN_ON(!vop->is_enabled))
-               return -EINVAL;
-
-       switch (connector_type) {
-       case DRM_MODE_CONNECTOR_LVDS:
-               VOP_CTRL_SET(vop, rgb_en, 1);
-               break;
-       case DRM_MODE_CONNECTOR_eDP:
-               VOP_CTRL_SET(vop, edp_en, 1);
-               break;
-       case DRM_MODE_CONNECTOR_HDMIA:
-               VOP_CTRL_SET(vop, hdmi_en, 1);
-               break;
-       case DRM_MODE_CONNECTOR_DSI:
-               VOP_CTRL_SET(vop, mipi_en, 1);
-               break;
-       default:
-               DRM_ERROR("unsupport connector_type[%d]\n", connector_type);
-               return -EINVAL;
-       };
-       VOP_CTRL_SET(vop, out_mode, out_mode);
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(rockchip_drm_crtc_mode_config);
-
 static int vop_crtc_enable_vblank(struct drm_crtc *crtc)
 {
        struct vop *vop = to_vop(crtc);
@@ -931,6 +917,7 @@ static bool vop_crtc_mode_fixup(struct drm_crtc *crtc,
 static void vop_crtc_enable(struct drm_crtc *crtc)
 {
        struct vop *vop = to_vop(crtc);
+       struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc->state);
        struct drm_display_mode *adjusted_mode = &crtc->state->adjusted_mode;
        u16 hsync_len = adjusted_mode->hsync_end - adjusted_mode->hsync_start;
        u16 hdisplay = adjusted_mode->hdisplay;
@@ -985,6 +972,23 @@ static void vop_crtc_enable(struct drm_crtc *crtc)
        val |= (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) ? 0 : 1;
        val |= (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) ? 0 : (1 << 1);
        VOP_CTRL_SET(vop, pin_pol, val);
+       switch (s->output_type) {
+       case DRM_MODE_CONNECTOR_LVDS:
+               VOP_CTRL_SET(vop, rgb_en, 1);
+               break;
+       case DRM_MODE_CONNECTOR_eDP:
+               VOP_CTRL_SET(vop, edp_en, 1);
+               break;
+       case DRM_MODE_CONNECTOR_HDMIA:
+               VOP_CTRL_SET(vop, hdmi_en, 1);
+               break;
+       case DRM_MODE_CONNECTOR_DSI:
+               VOP_CTRL_SET(vop, mipi_en, 1);
+               break;
+       default:
+               DRM_ERROR("unsupport connector_type[%d]\n", s->output_type);
+       }
+       VOP_CTRL_SET(vop, out_mode, s->output_mode);
 
        VOP_CTRL_SET(vop, htotal_pw, (htotal << 16) | hsync_len);
        val = hact_st << 16;
@@ -1044,13 +1048,34 @@ static void vop_crtc_destroy(struct drm_crtc *crtc)
        drm_crtc_cleanup(crtc);
 }
 
+static struct drm_crtc_state *vop_crtc_duplicate_state(struct drm_crtc *crtc)
+{
+       struct rockchip_crtc_state *rockchip_state;
+
+       rockchip_state = kzalloc(sizeof(*rockchip_state), GFP_KERNEL);
+       if (!rockchip_state)
+               return NULL;
+
+       __drm_atomic_helper_crtc_duplicate_state(crtc, &rockchip_state->base);
+       return &rockchip_state->base;
+}
+
+static void vop_crtc_destroy_state(struct drm_crtc *crtc,
+                                  struct drm_crtc_state *state)
+{
+       struct rockchip_crtc_state *s = to_rockchip_crtc_state(state);
+
+       __drm_atomic_helper_crtc_destroy_state(crtc, &s->base);
+       kfree(s);
+}
+
 static const struct drm_crtc_funcs vop_crtc_funcs = {
        .set_config = drm_atomic_helper_set_config,
        .page_flip = drm_atomic_helper_page_flip,
        .destroy = vop_crtc_destroy,
        .reset = drm_atomic_helper_crtc_reset,
-       .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
-       .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+       .atomic_duplicate_state = vop_crtc_duplicate_state,
+       .atomic_destroy_state = vop_crtc_destroy_state,
 };
 
 static bool vop_win_pending_is_complete(struct vop_win *vop_win)
index 88643ab..1e154fc 100644 (file)
@@ -440,7 +440,7 @@ void shmob_drm_crtc_finish_page_flip(struct shmob_drm_crtc *scrtc)
        event = scrtc->event;
        scrtc->event = NULL;
        if (event) {
-               drm_send_vblank_event(dev, 0, event);
+               drm_crtc_send_vblank_event(&scrtc->crtc, event);
                drm_vblank_put(dev, 0);
        }
        spin_unlock_irqrestore(&dev->event_lock, flags);
index 6bd6aba..872495e 100644 (file)
@@ -202,7 +202,7 @@ static void sti_atomic_work(struct work_struct *work)
 }
 
 static int sti_atomic_commit(struct drm_device *drm,
-                            struct drm_atomic_state *state, bool async)
+                            struct drm_atomic_state *state, bool nonblock)
 {
        struct sti_private *private = drm->dev_private;
        int err;
@@ -211,7 +211,7 @@ static int sti_atomic_commit(struct drm_device *drm,
        if (err)
                return err;
 
-       /* serialize outstanding asynchronous commits */
+       /* serialize outstanding nonblocking commits */
        mutex_lock(&private->commit.lock);
        flush_work(&private->commit.work);
 
@@ -223,7 +223,7 @@ static int sti_atomic_commit(struct drm_device *drm,
 
        drm_atomic_helper_swap_state(drm, state);
 
-       if (async)
+       if (nonblock)
                sti_atomic_schedule(private, state);
        else
                sti_atomic_complete(private, state);
index 2be88eb..71a52f4 100644 (file)
@@ -74,7 +74,7 @@ static void tegra_atomic_work(struct work_struct *work)
 }
 
 static int tegra_atomic_commit(struct drm_device *drm,
-                              struct drm_atomic_state *state, bool async)
+                              struct drm_atomic_state *state, bool nonblock)
 {
        struct tegra_drm *tegra = drm->dev_private;
        int err;
@@ -83,7 +83,7 @@ static int tegra_atomic_commit(struct drm_device *drm,
        if (err)
                return err;
 
-       /* serialize outstanding asynchronous commits */
+       /* serialize outstanding nonblocking commits */
        mutex_lock(&tegra->commit.lock);
        flush_work(&tegra->commit.work);
 
@@ -95,7 +95,7 @@ static int tegra_atomic_commit(struct drm_device *drm,
 
        drm_atomic_helper_swap_state(drm, state);
 
-       if (async)
+       if (nonblock)
                tegra_atomic_schedule(tegra, state);
        else
                tegra_atomic_complete(tegra, state);
index 051e5e1..79027b1 100644 (file)
@@ -707,7 +707,7 @@ irqreturn_t tilcdc_crtc_irq(struct drm_crtc *crtc)
                        event = tilcdc_crtc->event;
                        tilcdc_crtc->event = NULL;
                        if (event)
-                               drm_send_vblank_event(dev, 0, event);
+                               drm_crtc_send_vblank_event(crtc, event);
 
                        spin_unlock_irqrestore(&dev->event_lock, flags);
                }
index 4a064ef..0b03d34 100644 (file)
@@ -81,8 +81,6 @@ struct udl_framebuffer {
        struct drm_framebuffer base;
        struct udl_gem_object *obj;
        bool active_16; /* active on the 16-bit channel */
-       int x1, y1, x2, y2; /* dirty rect */
-       spinlock_t dirty_lock;
 };
 
 #define to_udl_fb(x) container_of(x, struct udl_framebuffer, base)
index fd1eb9d..4a9b432 100644 (file)
@@ -77,68 +77,6 @@ static uint16_t rgb16(uint32_t col)
 }
 #endif
 
-/*
- * NOTE: fb_defio.c is holding info->fbdefio.mutex
- *   Touching ANY framebuffer memory that triggers a page fault
- *   in fb_defio will cause a deadlock, when it also tries to
- *   grab the same mutex.
- */
-static void udlfb_dpy_deferred_io(struct fb_info *info,
-                                 struct list_head *pagelist)
-{
-       struct page *cur;
-       struct fb_deferred_io *fbdefio = info->fbdefio;
-       struct udl_fbdev *ufbdev = info->par;
-       struct drm_device *dev = ufbdev->ufb.base.dev;
-       struct udl_device *udl = dev->dev_private;
-       struct urb *urb;
-       char *cmd;
-       cycles_t start_cycles, end_cycles;
-       int bytes_sent = 0;
-       int bytes_identical = 0;
-       int bytes_rendered = 0;
-
-       if (!fb_defio)
-               return;
-
-       start_cycles = get_cycles();
-
-       urb = udl_get_urb(dev);
-       if (!urb)
-               return;
-
-       cmd = urb->transfer_buffer;
-
-       /* walk the written page list and render each to device */
-       list_for_each_entry(cur, &fbdefio->pagelist, lru) {
-
-               if (udl_render_hline(dev, (ufbdev->ufb.base.bits_per_pixel / 8),
-                                    &urb, (char *) info->fix.smem_start,
-                                    &cmd, cur->index << PAGE_SHIFT,
-                                    cur->index << PAGE_SHIFT,
-                                    PAGE_SIZE, &bytes_identical, &bytes_sent))
-                       goto error;
-               bytes_rendered += PAGE_SIZE;
-       }
-
-       if (cmd > (char *) urb->transfer_buffer) {
-               /* Send partial buffer remaining before exiting */
-               int len = cmd - (char *) urb->transfer_buffer;
-               udl_submit_urb(dev, urb, len);
-               bytes_sent += len;
-       } else
-               udl_urb_completion(urb);
-
-error:
-       atomic_add(bytes_sent, &udl->bytes_sent);
-       atomic_add(bytes_identical, &udl->bytes_identical);
-       atomic_add(bytes_rendered, &udl->bytes_rendered);
-       end_cycles = get_cycles();
-       atomic_add(((unsigned int) ((end_cycles - start_cycles)
-                   >> 10)), /* Kcycles */
-                  &udl->cpu_kcycles_used);
-}
-
 int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
                      int width, int height)
 {
@@ -152,9 +90,6 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
        struct urb *urb;
        int aligned_x;
        int bpp = (fb->base.bits_per_pixel / 8);
-       int x2, y2;
-       bool store_for_later = false;
-       unsigned long flags;
 
        if (!fb->active_16)
                return 0;
@@ -180,38 +115,6 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
            (y + height > fb->base.height))
                return -EINVAL;
 
-       /* if we are in atomic just store the info
-          can't test inside spin lock */
-       if (in_atomic())
-               store_for_later = true;
-
-       x2 = x + width - 1;
-       y2 = y + height - 1;
-
-       spin_lock_irqsave(&fb->dirty_lock, flags);
-
-       if (fb->y1 < y)
-               y = fb->y1;
-       if (fb->y2 > y2)
-               y2 = fb->y2;
-       if (fb->x1 < x)
-               x = fb->x1;
-       if (fb->x2 > x2)
-               x2 = fb->x2;
-
-       if (store_for_later) {
-               fb->x1 = x;
-               fb->x2 = x2;
-               fb->y1 = y;
-               fb->y2 = y2;
-               spin_unlock_irqrestore(&fb->dirty_lock, flags);
-               return 0;
-       }
-
-       fb->x1 = fb->y1 = INT_MAX;
-       fb->x2 = fb->y2 = 0;
-
-       spin_unlock_irqrestore(&fb->dirty_lock, flags);
        start_cycles = get_cycles();
 
        urb = udl_get_urb(dev);
@@ -219,14 +122,14 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
                return 0;
        cmd = urb->transfer_buffer;
 
-       for (i = y; i <= y2 ; i++) {
+       for (i = y; i < height ; i++) {
                const int line_offset = fb->base.pitches[0] * i;
                const int byte_offset = line_offset + (x * bpp);
                const int dev_byte_offset = (fb->base.width * bpp * i) + (x * bpp);
                if (udl_render_hline(dev, bpp, &urb,
                                     (char *) fb->obj->vmapping,
                                     &cmd, byte_offset, dev_byte_offset,
-                                    (x2 - x + 1) * bpp,
+                                    width * bpp,
                                     &bytes_identical, &bytes_sent))
                        goto error;
        }
@@ -283,36 +186,6 @@ static int udl_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
        return 0;
 }
 
-static void udl_fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
-{
-       struct udl_fbdev *ufbdev = info->par;
-
-       drm_fb_helper_sys_fillrect(info, rect);
-
-       udl_handle_damage(&ufbdev->ufb, rect->dx, rect->dy, rect->width,
-                         rect->height);
-}
-
-static void udl_fb_copyarea(struct fb_info *info, const struct fb_copyarea *region)
-{
-       struct udl_fbdev *ufbdev = info->par;
-
-       drm_fb_helper_sys_copyarea(info, region);
-
-       udl_handle_damage(&ufbdev->ufb, region->dx, region->dy, region->width,
-                         region->height);
-}
-
-static void udl_fb_imageblit(struct fb_info *info, const struct fb_image *image)
-{
-       struct udl_fbdev *ufbdev = info->par;
-
-       drm_fb_helper_sys_imageblit(info, image);
-
-       udl_handle_damage(&ufbdev->ufb, image->dx, image->dy, image->width,
-                         image->height);
-}
-
 /*
  * It's common for several clients to have framebuffer open simultaneously.
  * e.g. both fbcon and X. Makes things interesting.
@@ -339,7 +212,7 @@ static int udl_fb_open(struct fb_info *info, int user)
 
                if (fbdefio) {
                        fbdefio->delay = DL_DEFIO_WRITE_DELAY;
-                       fbdefio->deferred_io = udlfb_dpy_deferred_io;
+                       fbdefio->deferred_io = drm_fb_helper_deferred_io;
                }
 
                info->fbdefio = fbdefio;
@@ -379,9 +252,9 @@ static struct fb_ops udlfb_ops = {
        .owner = THIS_MODULE,
        .fb_check_var = drm_fb_helper_check_var,
        .fb_set_par = drm_fb_helper_set_par,
-       .fb_fillrect = udl_fb_fillrect,
-       .fb_copyarea = udl_fb_copyarea,
-       .fb_imageblit = udl_fb_imageblit,
+       .fb_fillrect = drm_fb_helper_sys_fillrect,
+       .fb_copyarea = drm_fb_helper_sys_copyarea,
+       .fb_imageblit = drm_fb_helper_sys_imageblit,
        .fb_pan_display = drm_fb_helper_pan_display,
        .fb_blank = drm_fb_helper_blank,
        .fb_setcmap = drm_fb_helper_setcmap,
@@ -458,7 +331,6 @@ udl_framebuffer_init(struct drm_device *dev,
 {
        int ret;
 
-       spin_lock_init(&ufb->dirty_lock);
        ufb->obj = obj;
        drm_helper_mode_fill_fb_struct(&ufb->base, mode_cmd);
        ret = drm_framebuffer_init(dev, &ufb->base, &udlfb_funcs);
index 5848104..e53df59 100644 (file)
@@ -5,6 +5,7 @@ config DRM_VC4
        select DRM_KMS_HELPER
        select DRM_KMS_CMA_HELPER
        select DRM_GEM_CMA_HELPER
+       select DRM_PANEL
        help
          Choose this option if you have a system that has a Broadcom
          VC4 GPU, such as the Raspberry Pi or other BCM2708/BCM2835.
index 4c6a99f..fb77db7 100644 (file)
@@ -7,6 +7,7 @@ vc4-y := \
        vc4_bo.o \
        vc4_crtc.o \
        vc4_drv.o \
+       vc4_dpi.o \
        vc4_kms.o \
        vc4_gem.o \
        vc4_hdmi.o \
index 355ee4b..231356f 100644 (file)
@@ -49,6 +49,10 @@ struct vc4_crtc {
        /* Which HVS channel we're using for our CRTC. */
        int channel;
 
+       u8 lut_r[256];
+       u8 lut_g[256];
+       u8 lut_b[256];
+
        struct drm_pending_vblank_event *event;
 };
 
@@ -147,6 +151,46 @@ static void vc4_crtc_destroy(struct drm_crtc *crtc)
        drm_crtc_cleanup(crtc);
 }
 
+static void
+vc4_crtc_lut_load(struct drm_crtc *crtc)
+{
+       struct drm_device *dev = crtc->dev;
+       struct vc4_dev *vc4 = to_vc4_dev(dev);
+       struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
+       u32 i;
+
+       /* The LUT memory is laid out with each HVS channel in order,
+        * each of which takes 256 writes for R, 256 for G, then 256
+        * for B.
+        */
+       HVS_WRITE(SCALER_GAMADDR,
+                 SCALER_GAMADDR_AUTOINC |
+                 (vc4_crtc->channel * 3 * crtc->gamma_size));
+
+       for (i = 0; i < crtc->gamma_size; i++)
+               HVS_WRITE(SCALER_GAMDATA, vc4_crtc->lut_r[i]);
+       for (i = 0; i < crtc->gamma_size; i++)
+               HVS_WRITE(SCALER_GAMDATA, vc4_crtc->lut_g[i]);
+       for (i = 0; i < crtc->gamma_size; i++)
+               HVS_WRITE(SCALER_GAMDATA, vc4_crtc->lut_b[i]);
+}
+
+static void
+vc4_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
+                  uint32_t start, uint32_t size)
+{
+       struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
+       u32 i;
+
+       for (i = start; i < start + size; i++) {
+               vc4_crtc->lut_r[i] = r[i] >> 8;
+               vc4_crtc->lut_g[i] = g[i] >> 8;
+               vc4_crtc->lut_b[i] = b[i] >> 8;
+       }
+
+       vc4_crtc_lut_load(crtc);
+}
+
 static u32 vc4_get_fifo_full_level(u32 format)
 {
        static const u32 fifo_len_bytes = 64;
@@ -260,8 +304,14 @@ static void vc4_crtc_mode_set_nofb(struct drm_crtc *crtc)
 
        HVS_WRITE(SCALER_DISPBKGNDX(vc4_crtc->channel),
                  SCALER_DISPBKGND_AUTOHS |
+                 SCALER_DISPBKGND_GAMMA |
                  (interlace ? SCALER_DISPBKGND_INTERLACE : 0));
 
+       /* Reload the LUT, since the SRAMs would have been disabled if
+        * all CRTCs had SCALER_DISPBKGND_GAMMA unset at once.
+        */
+       vc4_crtc_lut_load(crtc);
+
        if (debug_dump_regs) {
                DRM_INFO("CRTC %d regs after:\n", drm_crtc_index(crtc));
                vc4_crtc_dump_regs(vc4_crtc);
@@ -613,6 +663,7 @@ static const struct drm_crtc_funcs vc4_crtc_funcs = {
        .reset = drm_atomic_helper_crtc_reset,
        .atomic_duplicate_state = vc4_crtc_duplicate_state,
        .atomic_destroy_state = vc4_crtc_destroy_state,
+       .gamma_set = vc4_crtc_gamma_set,
 };
 
 static const struct drm_crtc_helper_funcs vc4_crtc_helper_funcs = {
@@ -711,6 +762,7 @@ static int vc4_crtc_bind(struct device *dev, struct device *master, void *data)
        primary_plane->crtc = crtc;
        vc4->crtc[drm_crtc_index(crtc)] = vc4_crtc;
        vc4_crtc->channel = vc4_crtc->data->hvs_channel;
+       drm_mode_crtc_set_gamma_size(crtc, ARRAY_SIZE(vc4_crtc->lut_r));
 
        /* Set up some arbitrary number of planes.  We're not limited
         * by a set number of physical registers, just the space in
@@ -751,6 +803,12 @@ static int vc4_crtc_bind(struct device *dev, struct device *master, void *data)
 
        vc4_set_crtc_possible_masks(drm, crtc);
 
+       for (i = 0; i < crtc->gamma_size; i++) {
+               vc4_crtc->lut_r[i] = i;
+               vc4_crtc->lut_g[i] = i;
+               vc4_crtc->lut_b[i] = i;
+       }
+
        platform_set_drvdata(pdev, vc4_crtc);
 
        return 0;
index d76ad10..245115d 100644 (file)
@@ -17,6 +17,7 @@
 
 static const struct drm_info_list vc4_debugfs_list[] = {
        {"bo_stats", vc4_bo_stats_debugfs, 0},
+       {"dpi_regs", vc4_dpi_debugfs_regs, 0},
        {"hdmi_regs", vc4_hdmi_debugfs_regs, 0},
        {"hvs_regs", vc4_hvs_debugfs_regs, 0},
        {"crtc0_regs", vc4_crtc_debugfs_regs, 0, (void *)(uintptr_t)0},
diff --git a/drivers/gpu/drm/vc4/vc4_dpi.c b/drivers/gpu/drm/vc4/vc4_dpi.c
new file mode 100644 (file)
index 0000000..9817dbf
--- /dev/null
@@ -0,0 +1,520 @@
+/*
+ * Copyright (C) 2016 Broadcom Limited
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/**
+ * DOC: VC4 DPI module
+ *
+ * The VC4 DPI hardware supports MIPI DPI type 4 and Nokia ViSSI
+ * signals, which are routed out to GPIO0-27 with the ALT2 function.
+ */
+
+#include "drm_atomic_helper.h"
+#include "drm_crtc_helper.h"
+#include "drm_edid.h"
+#include "drm_panel.h"
+#include "linux/clk.h"
+#include "linux/component.h"
+#include "linux/of_graph.h"
+#include "linux/of_platform.h"
+#include "vc4_drv.h"
+#include "vc4_regs.h"
+
+#define DPI_C                  0x00
+# define DPI_OUTPUT_ENABLE_MODE                BIT(16)
+
+/* The order field takes the incoming 24 bit RGB from the pixel valve
+ * and shuffles the 3 channels.
+ */
+# define DPI_ORDER_MASK                        VC4_MASK(15, 14)
+# define DPI_ORDER_SHIFT               14
+# define DPI_ORDER_RGB                 0
+# define DPI_ORDER_BGR                 1
+# define DPI_ORDER_GRB                 2
+# define DPI_ORDER_BRG                 3
+
+/* The format field takes the ORDER-shuffled pixel valve data and
+ * formats it onto the output lines.
+ */
+# define DPI_FORMAT_MASK               VC4_MASK(13, 11)
+# define DPI_FORMAT_SHIFT              11
+/* This define is named in the hardware, but actually just outputs 0. */
+# define DPI_FORMAT_9BIT_666_RGB       0
+/* Outputs 00000000rrrrrggggggbbbbb */
+# define DPI_FORMAT_16BIT_565_RGB_1    1
+/* Outputs 000rrrrr00gggggg000bbbbb */
+# define DPI_FORMAT_16BIT_565_RGB_2    2
+/* Outputs 00rrrrr000gggggg00bbbbb0 */
+# define DPI_FORMAT_16BIT_565_RGB_3    3
+/* Outputs 000000rrrrrrggggggbbbbbb */
+# define DPI_FORMAT_18BIT_666_RGB_1    4
+/* Outputs 00rrrrrr00gggggg00bbbbbb */
+# define DPI_FORMAT_18BIT_666_RGB_2    5
+/* Outputs rrrrrrrrggggggggbbbbbbbb */
+# define DPI_FORMAT_24BIT_888_RGB      6
+
+/* Reverses the polarity of the corresponding signal */
+# define DPI_PIXEL_CLK_INVERT          BIT(10)
+# define DPI_HSYNC_INVERT              BIT(9)
+# define DPI_VSYNC_INVERT              BIT(8)
+# define DPI_OUTPUT_ENABLE_INVERT      BIT(7)
+
+/* Outputs the signal the falling clock edge instead of rising. */
+# define DPI_HSYNC_NEGATE              BIT(6)
+# define DPI_VSYNC_NEGATE              BIT(5)
+# define DPI_OUTPUT_ENABLE_NEGATE      BIT(4)
+
+/* Disables the signal */
+# define DPI_HSYNC_DISABLE             BIT(3)
+# define DPI_VSYNC_DISABLE             BIT(2)
+# define DPI_OUTPUT_ENABLE_DISABLE     BIT(1)
+
+/* Power gate to the device, full reset at 0 -> 1 transition */
+# define DPI_ENABLE                    BIT(0)
+
+/* All other registers besides DPI_C return the ID */
+#define DPI_ID                 0x04
+# define DPI_ID_VALUE          0x00647069
+
+/* General DPI hardware state. */
+struct vc4_dpi {
+       struct platform_device *pdev;
+
+       struct drm_encoder *encoder;
+       struct drm_connector *connector;
+       struct drm_panel *panel;
+
+       void __iomem *regs;
+
+       struct clk *pixel_clock;
+       struct clk *core_clock;
+};
+
+#define DPI_READ(offset) readl(dpi->regs + (offset))
+#define DPI_WRITE(offset, val) writel(val, dpi->regs + (offset))
+
+/* VC4 DPI encoder KMS struct */
+struct vc4_dpi_encoder {
+       struct vc4_encoder base;
+       struct vc4_dpi *dpi;
+};
+
+static inline struct vc4_dpi_encoder *
+to_vc4_dpi_encoder(struct drm_encoder *encoder)
+{
+       return container_of(encoder, struct vc4_dpi_encoder, base.base);
+}
+
+/* VC4 DPI connector KMS struct */
+struct vc4_dpi_connector {
+       struct drm_connector base;
+       struct vc4_dpi *dpi;
+
+       /* Since the connector is attached to just the one encoder,
+        * this is the reference to it so we can do the best_encoder()
+        * hook.
+        */
+       struct drm_encoder *encoder;
+};
+
+static inline struct vc4_dpi_connector *
+to_vc4_dpi_connector(struct drm_connector *connector)
+{
+       return container_of(connector, struct vc4_dpi_connector, base);
+}
+
+#define DPI_REG(reg) { reg, #reg }
+static const struct {
+       u32 reg;
+       const char *name;
+} dpi_regs[] = {
+       DPI_REG(DPI_C),
+       DPI_REG(DPI_ID),
+};
+
+static void vc4_dpi_dump_regs(struct vc4_dpi *dpi)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(dpi_regs); i++) {
+               DRM_INFO("0x%04x (%s): 0x%08x\n",
+                        dpi_regs[i].reg, dpi_regs[i].name,
+                        DPI_READ(dpi_regs[i].reg));
+       }
+}
+
+#ifdef CONFIG_DEBUG_FS
+int vc4_dpi_debugfs_regs(struct seq_file *m, void *unused)
+{
+       struct drm_info_node *node = (struct drm_info_node *)m->private;
+       struct drm_device *dev = node->minor->dev;
+       struct vc4_dev *vc4 = to_vc4_dev(dev);
+       struct vc4_dpi *dpi = vc4->dpi;
+       int i;
+
+       if (!dpi)
+               return 0;
+
+       for (i = 0; i < ARRAY_SIZE(dpi_regs); i++) {
+               seq_printf(m, "%s (0x%04x): 0x%08x\n",
+                          dpi_regs[i].name, dpi_regs[i].reg,
+                          DPI_READ(dpi_regs[i].reg));
+       }
+
+       return 0;
+}
+#endif
+
+static enum drm_connector_status
+vc4_dpi_connector_detect(struct drm_connector *connector, bool force)
+{
+       struct vc4_dpi_connector *vc4_connector =
+               to_vc4_dpi_connector(connector);
+       struct vc4_dpi *dpi = vc4_connector->dpi;
+
+       if (dpi->panel)
+               return connector_status_connected;
+       else
+               return connector_status_disconnected;
+}
+
+static void vc4_dpi_connector_destroy(struct drm_connector *connector)
+{
+       drm_connector_unregister(connector);
+       drm_connector_cleanup(connector);
+}
+
+static int vc4_dpi_connector_get_modes(struct drm_connector *connector)
+{
+       struct vc4_dpi_connector *vc4_connector =
+               to_vc4_dpi_connector(connector);
+       struct vc4_dpi *dpi = vc4_connector->dpi;
+
+       if (dpi->panel)
+               return drm_panel_get_modes(dpi->panel);
+
+       return 0;
+}
+
+static struct drm_encoder *
+vc4_dpi_connector_best_encoder(struct drm_connector *connector)
+{
+       struct vc4_dpi_connector *dpi_connector =
+               to_vc4_dpi_connector(connector);
+       return dpi_connector->encoder;
+}
+
+static const struct drm_connector_funcs vc4_dpi_connector_funcs = {
+       .dpms = drm_atomic_helper_connector_dpms,
+       .detect = vc4_dpi_connector_detect,
+       .fill_modes = drm_helper_probe_single_connector_modes,
+       .destroy = vc4_dpi_connector_destroy,
+       .reset = drm_atomic_helper_connector_reset,
+       .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+       .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static const struct drm_connector_helper_funcs vc4_dpi_connector_helper_funcs = {
+       .get_modes = vc4_dpi_connector_get_modes,
+       .best_encoder = vc4_dpi_connector_best_encoder,
+};
+
+static struct drm_connector *vc4_dpi_connector_init(struct drm_device *dev,
+                                                   struct vc4_dpi *dpi)
+{
+       struct drm_connector *connector = NULL;
+       struct vc4_dpi_connector *dpi_connector;
+       int ret = 0;
+
+       dpi_connector = devm_kzalloc(dev->dev, sizeof(*dpi_connector),
+                                    GFP_KERNEL);
+       if (!dpi_connector) {
+               ret = -ENOMEM;
+               goto fail;
+       }
+       connector = &dpi_connector->base;
+
+       dpi_connector->encoder = dpi->encoder;
+       dpi_connector->dpi = dpi;
+
+       drm_connector_init(dev, connector, &vc4_dpi_connector_funcs,
+                          DRM_MODE_CONNECTOR_DPI);
+       drm_connector_helper_add(connector, &vc4_dpi_connector_helper_funcs);
+
+       connector->polled = 0;
+       connector->interlace_allowed = 0;
+       connector->doublescan_allowed = 0;
+
+       drm_mode_connector_attach_encoder(connector, dpi->encoder);
+
+       return connector;
+
+ fail:
+       if (connector)
+               vc4_dpi_connector_destroy(connector);
+
+       return ERR_PTR(ret);
+}
+
+static const struct drm_encoder_funcs vc4_dpi_encoder_funcs = {
+       .destroy = drm_encoder_cleanup,
+};
+
+static void vc4_dpi_encoder_disable(struct drm_encoder *encoder)
+{
+       struct vc4_dpi_encoder *vc4_encoder = to_vc4_dpi_encoder(encoder);
+       struct vc4_dpi *dpi = vc4_encoder->dpi;
+
+       drm_panel_disable(dpi->panel);
+
+       clk_disable_unprepare(dpi->pixel_clock);
+
+       drm_panel_unprepare(dpi->panel);
+}
+
+static void vc4_dpi_encoder_enable(struct drm_encoder *encoder)
+{
+       struct drm_display_mode *mode = &encoder->crtc->mode;
+       struct vc4_dpi_encoder *vc4_encoder = to_vc4_dpi_encoder(encoder);
+       struct vc4_dpi *dpi = vc4_encoder->dpi;
+       u32 dpi_c = DPI_ENABLE | DPI_OUTPUT_ENABLE_MODE;
+       int ret;
+
+       ret = drm_panel_prepare(dpi->panel);
+       if (ret) {
+               DRM_ERROR("Panel failed to prepare\n");
+               return;
+       }
+
+       if (dpi->connector->display_info.num_bus_formats) {
+               u32 bus_format = dpi->connector->display_info.bus_formats[0];
+
+               switch (bus_format) {
+               case MEDIA_BUS_FMT_RGB888_1X24:
+                       dpi_c |= VC4_SET_FIELD(DPI_FORMAT_24BIT_888_RGB,
+                                              DPI_FORMAT);
+                       break;
+               case MEDIA_BUS_FMT_BGR888_1X24:
+                       dpi_c |= VC4_SET_FIELD(DPI_FORMAT_24BIT_888_RGB,
+                                              DPI_FORMAT);
+                       dpi_c |= VC4_SET_FIELD(DPI_ORDER_BGR, DPI_ORDER);
+                       break;
+               case MEDIA_BUS_FMT_RGB666_1X24_CPADHI:
+                       dpi_c |= VC4_SET_FIELD(DPI_FORMAT_18BIT_666_RGB_2,
+                                              DPI_FORMAT);
+                       break;
+               case MEDIA_BUS_FMT_RGB666_1X18:
+                       dpi_c |= VC4_SET_FIELD(DPI_FORMAT_18BIT_666_RGB_1,
+                                              DPI_FORMAT);
+                       break;
+               case MEDIA_BUS_FMT_RGB565_1X16:
+                       dpi_c |= VC4_SET_FIELD(DPI_FORMAT_16BIT_565_RGB_3,
+                                              DPI_FORMAT);
+                       break;
+               default:
+                       DRM_ERROR("Unknown media bus format %d\n", bus_format);
+                       break;
+               }
+       }
+
+       if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+               dpi_c |= DPI_HSYNC_INVERT;
+       else if (!(mode->flags & DRM_MODE_FLAG_PHSYNC))
+               dpi_c |= DPI_HSYNC_DISABLE;
+
+       if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+               dpi_c |= DPI_VSYNC_INVERT;
+       else if (!(mode->flags & DRM_MODE_FLAG_PVSYNC))
+               dpi_c |= DPI_VSYNC_DISABLE;
+
+       DPI_WRITE(DPI_C, dpi_c);
+
+       ret = clk_set_rate(dpi->pixel_clock, mode->clock * 1000);
+       if (ret)
+               DRM_ERROR("Failed to set clock rate: %d\n", ret);
+
+       ret = clk_prepare_enable(dpi->pixel_clock);
+       if (ret)
+               DRM_ERROR("Failed to set clock rate: %d\n", ret);
+
+       ret = drm_panel_enable(dpi->panel);
+       if (ret) {
+               DRM_ERROR("Panel failed to enable\n");
+               drm_panel_unprepare(dpi->panel);
+               return;
+       }
+}
+
+static const struct drm_encoder_helper_funcs vc4_dpi_encoder_helper_funcs = {
+       .disable = vc4_dpi_encoder_disable,
+       .enable = vc4_dpi_encoder_enable,
+};
+
+static const struct of_device_id vc4_dpi_dt_match[] = {
+       { .compatible = "brcm,bcm2835-dpi", .data = NULL },
+       {}
+};
+
+/* Walks the OF graph to find the panel node and then asks DRM to look
+ * up the panel.
+ */
+static struct drm_panel *vc4_dpi_get_panel(struct device *dev)
+{
+       struct device_node *endpoint, *panel_node;
+       struct device_node *np = dev->of_node;
+       struct drm_panel *panel;
+
+       endpoint = of_graph_get_next_endpoint(np, NULL);
+       if (!endpoint) {
+               dev_err(dev, "no endpoint to fetch DPI panel\n");
+               return NULL;
+       }
+
+       /* don't proceed if we have an endpoint but no panel_node tied to it */
+       panel_node = of_graph_get_remote_port_parent(endpoint);
+       of_node_put(endpoint);
+       if (!panel_node) {
+               dev_err(dev, "no valid panel node\n");
+               return NULL;
+       }
+
+       panel = of_drm_find_panel(panel_node);
+       of_node_put(panel_node);
+
+       return panel;
+}
+
+static int vc4_dpi_bind(struct device *dev, struct device *master, void *data)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct drm_device *drm = dev_get_drvdata(master);
+       struct vc4_dev *vc4 = to_vc4_dev(drm);
+       struct vc4_dpi *dpi;
+       struct vc4_dpi_encoder *vc4_dpi_encoder;
+       int ret;
+
+       dpi = devm_kzalloc(dev, sizeof(*dpi), GFP_KERNEL);
+       if (!dpi)
+               return -ENOMEM;
+
+       vc4_dpi_encoder = devm_kzalloc(dev, sizeof(*vc4_dpi_encoder),
+                                      GFP_KERNEL);
+       if (!vc4_dpi_encoder)
+               return -ENOMEM;
+       vc4_dpi_encoder->base.type = VC4_ENCODER_TYPE_DPI;
+       vc4_dpi_encoder->dpi = dpi;
+       dpi->encoder = &vc4_dpi_encoder->base.base;
+
+       dpi->pdev = pdev;
+       dpi->regs = vc4_ioremap_regs(pdev, 0);
+       if (IS_ERR(dpi->regs))
+               return PTR_ERR(dpi->regs);
+
+       vc4_dpi_dump_regs(dpi);
+
+       if (DPI_READ(DPI_ID) != DPI_ID_VALUE) {
+               dev_err(dev, "Port returned 0x%08x for ID instead of 0x%08x\n",
+                       DPI_READ(DPI_ID), DPI_ID_VALUE);
+               return -ENODEV;
+       }
+
+       dpi->core_clock = devm_clk_get(dev, "core");
+       if (IS_ERR(dpi->core_clock)) {
+               ret = PTR_ERR(dpi->core_clock);
+               if (ret != -EPROBE_DEFER)
+                       DRM_ERROR("Failed to get core clock: %d\n", ret);
+               return ret;
+       }
+       dpi->pixel_clock = devm_clk_get(dev, "pixel");
+       if (IS_ERR(dpi->pixel_clock)) {
+               ret = PTR_ERR(dpi->pixel_clock);
+               if (ret != -EPROBE_DEFER)
+                       DRM_ERROR("Failed to get pixel clock: %d\n", ret);
+               return ret;
+       }
+
+       ret = clk_prepare_enable(dpi->core_clock);
+       if (ret)
+               DRM_ERROR("Failed to turn on core clock: %d\n", ret);
+
+       dpi->panel = vc4_dpi_get_panel(dev);
+
+       drm_encoder_init(drm, dpi->encoder, &vc4_dpi_encoder_funcs,
+                        DRM_MODE_ENCODER_DPI, NULL);
+       drm_encoder_helper_add(dpi->encoder, &vc4_dpi_encoder_helper_funcs);
+
+       dpi->connector = vc4_dpi_connector_init(drm, dpi);
+       if (IS_ERR(dpi->connector)) {
+               ret = PTR_ERR(dpi->connector);
+               goto err_destroy_encoder;
+       }
+
+       if (dpi->panel)
+               drm_panel_attach(dpi->panel, dpi->connector);
+
+       dev_set_drvdata(dev, dpi);
+
+       vc4->dpi = dpi;
+
+       return 0;
+
+err_destroy_encoder:
+       drm_encoder_cleanup(dpi->encoder);
+       clk_disable_unprepare(dpi->core_clock);
+       return ret;
+}
+
+static void vc4_dpi_unbind(struct device *dev, struct device *master,
+                          void *data)
+{
+       struct drm_device *drm = dev_get_drvdata(master);
+       struct vc4_dev *vc4 = to_vc4_dev(drm);
+       struct vc4_dpi *dpi = dev_get_drvdata(dev);
+
+       if (dpi->panel)
+               drm_panel_detach(dpi->panel);
+
+       vc4_dpi_connector_destroy(dpi->connector);
+       drm_encoder_cleanup(dpi->encoder);
+
+       clk_disable_unprepare(dpi->core_clock);
+
+       vc4->dpi = NULL;
+}
+
+static const struct component_ops vc4_dpi_ops = {
+       .bind   = vc4_dpi_bind,
+       .unbind = vc4_dpi_unbind,
+};
+
+static int vc4_dpi_dev_probe(struct platform_device *pdev)
+{
+       return component_add(&pdev->dev, &vc4_dpi_ops);
+}
+
+static int vc4_dpi_dev_remove(struct platform_device *pdev)
+{
+       component_del(&pdev->dev, &vc4_dpi_ops);
+       return 0;
+}
+
+struct platform_driver vc4_dpi_driver = {
+       .probe = vc4_dpi_dev_probe,
+       .remove = vc4_dpi_dev_remove,
+       .driver = {
+               .name = "vc4_dpi",
+               .of_match_table = vc4_dpi_dt_match,
+       },
+};
index b7d2ff0..143dd98 100644 (file)
@@ -81,6 +81,7 @@ static struct drm_driver vc4_drm_driver = {
                            DRIVER_ATOMIC |
                            DRIVER_GEM |
                            DRIVER_HAVE_IRQ |
+                           DRIVER_RENDER |
                            DRIVER_PRIME),
        .lastclose = vc4_lastclose,
        .irq_handler = vc4_irq,
@@ -237,6 +238,7 @@ static const struct component_master_ops vc4_drm_ops = {
 
 static struct platform_driver *const component_drivers[] = {
        &vc4_hdmi_driver,
+       &vc4_dpi_driver,
        &vc4_crtc_driver,
        &vc4_hvs_driver,
        &vc4_v3d_driver,
index fa2ad15..37cac59 100644 (file)
@@ -16,6 +16,7 @@ struct vc4_dev {
        struct vc4_hvs *hvs;
        struct vc4_crtc *crtc[3];
        struct vc4_v3d *v3d;
+       struct vc4_dpi *dpi;
 
        struct drm_fbdev_cma *fbdev;
 
@@ -422,6 +423,10 @@ void vc4_debugfs_cleanup(struct drm_minor *minor);
 /* vc4_drv.c */
 void __iomem *vc4_ioremap_regs(struct platform_device *dev, int index);
 
+/* vc4_dpi.c */
+extern struct platform_driver vc4_dpi_driver;
+int vc4_dpi_debugfs_regs(struct seq_file *m, void *unused);
+
 /* vc4_gem.c */
 void vc4_gem_init(struct drm_device *dev);
 void vc4_gem_destroy(struct drm_device *dev);
index d8b8649..fd2644d 100644 (file)
@@ -573,7 +573,7 @@ err_unprepare_hsm:
 err_unprepare_pix:
        clk_disable_unprepare(hdmi->pixel_clock);
 err_put_i2c:
-       put_device(&vc4->hdmi->ddc->dev);
+       put_device(&hdmi->ddc->dev);
 
        return ret;
 }
index 4718ae5..d423ba1 100644 (file)
@@ -93,7 +93,7 @@ static struct vc4_commit *commit_init(struct drm_atomic_state *state)
  * vc4_atomic_commit - commit validated state object
  * @dev: DRM device
  * @state: the driver state object
- * @async: asynchronous commit
+ * @nonblock: nonblocking commit
  *
  * This function commits a with drm_atomic_helper_check() pre-validated state
  * object. This can still fail when e.g. the framebuffer reservation fails. For
@@ -104,7 +104,7 @@ static struct vc4_commit *commit_init(struct drm_atomic_state *state)
  */
 static int vc4_atomic_commit(struct drm_device *dev,
                             struct drm_atomic_state *state,
-                            bool async)
+                            bool nonblock)
 {
        struct vc4_dev *vc4 = to_vc4_dev(dev);
        int ret;
@@ -170,7 +170,7 @@ static int vc4_atomic_commit(struct drm_device *dev,
         * current layout.
         */
 
-       if (async) {
+       if (nonblock) {
                vc4_queue_seqno_cb(dev, &c->cb, wait_seqno,
                                   vc4_atomic_complete_commit_seqno_cb);
        } else {
index bf42a8e..6163b95 100644 (file)
 #define SCALER_DISPBASE2                        0x0000006c
 #define SCALER_DISPALPHA2                       0x00000070
 #define SCALER_GAMADDR                          0x00000078
+# define SCALER_GAMADDR_AUTOINC                        BIT(31)
+/* Enables all gamma ramp SRAMs, not just those of CRTCs with gamma
+ * enabled.
+ */
+# define SCALER_GAMADDR_SRAMENB                        BIT(30)
+
 #define SCALER_GAMDATA                          0x000000e0
 #define SCALER_DLIST_START                      0x00002000
 #define SCALER_DLIST_SIZE                       0x00004000
index 57721c7..74b5bca 100644 (file)
@@ -164,7 +164,7 @@ static const struct address_space_operations fb_deferred_io_aops = {
        .set_page_dirty = fb_deferred_io_set_page_dirty,
 };
 
-static int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma)
+int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma)
 {
        vma->vm_ops = &fb_deferred_io_vm_ops;
        vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
@@ -173,6 +173,7 @@ static int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma)
        vma->vm_private_data = info;
        return 0;
 }
+EXPORT_SYMBOL(fb_deferred_io_mmap);
 
 /* workqueue callback */
 static void fb_deferred_io_work(struct work_struct *work)
index 5de4cff..360b2a7 100644 (file)
@@ -580,12 +580,21 @@ struct drm_driver {
        void (*debugfs_cleanup)(struct drm_minor *minor);
 
        /**
-        * Driver-specific constructor for drm_gem_objects, to set up
-        * obj->driver_private.
+        * @gem_free_object: deconstructor for drm_gem_objects
         *
-        * Returns 0 on success.
+        * This is deprecated and should not be used by new drivers. Use
+        * @gem_free_object_unlocked instead.
         */
        void (*gem_free_object) (struct drm_gem_object *obj);
+
+       /**
+        * @gem_free_object_unlocked: deconstructor for drm_gem_objects
+        *
+        * This is for drivers which are not encumbered with dev->struct_mutex
+        * legacy locking schemes. Use this hook instead of @gem_free_object.
+        */
+       void (*gem_free_object_unlocked) (struct drm_gem_object *obj);
+
        int (*gem_open_object) (struct drm_gem_object *, struct drm_file *);
        void (*gem_close_object) (struct drm_gem_object *, struct drm_file *);
 
@@ -769,6 +778,7 @@ struct drm_device {
        atomic_t buf_alloc;             /**< Buffer allocation in progress */
        /*@} */
 
+       struct mutex filelist_mutex;
        struct list_head filelist;
 
        /** \name Memory management */
index 193ef19..b2d9126 100644 (file)
@@ -37,7 +37,7 @@ struct agp_memory *drm_agp_bind_pages(struct drm_device *dev,
                                uint32_t type);
 
 struct drm_agp_head *drm_agp_init(struct drm_device *dev);
-void drm_agp_clear(struct drm_device *dev);
+void drm_legacy_agp_clear(struct drm_device *dev);
 int drm_agp_acquire(struct drm_device *dev);
 int drm_agp_acquire_ioctl(struct drm_device *dev, void *data,
                          struct drm_file *file_priv);
@@ -93,7 +93,7 @@ static inline struct drm_agp_head *drm_agp_init(struct drm_device *dev)
        return NULL;
 }
 
-static inline void drm_agp_clear(struct drm_device *dev)
+static inline void drm_legacy_agp_clear(struct drm_device *dev)
 {
 }
 
index d3eaa5d..92c84e9 100644 (file)
@@ -137,7 +137,7 @@ drm_atomic_clean_old_fb(struct drm_device *dev, unsigned plane_mask, int ret);
 
 int __must_check drm_atomic_check_only(struct drm_atomic_state *state);
 int __must_check drm_atomic_commit(struct drm_atomic_state *state);
-int __must_check drm_atomic_async_commit(struct drm_atomic_state *state);
+int __must_check drm_atomic_nonblocking_commit(struct drm_atomic_state *state);
 
 #define for_each_connector_in_state(state, connector, connector_state, __i) \
        for ((__i) = 0;                                                 \
index fe9d89c..0364287 100644 (file)
@@ -40,7 +40,7 @@ int drm_atomic_helper_check(struct drm_device *dev,
                            struct drm_atomic_state *state);
 int drm_atomic_helper_commit(struct drm_device *dev,
                             struct drm_atomic_state *state,
-                            bool async);
+                            bool nonblock);
 
 void drm_atomic_helper_wait_for_fences(struct drm_device *dev,
                                        struct drm_atomic_state *state);
index 297e527..4acdaf5 100644 (file)
@@ -1886,7 +1886,7 @@ struct drm_mode_config_funcs {
         * drm_atomic_helper_commit(), or one of the exported sub-functions of
         * it.
         *
-        * Asynchronous commits (as indicated with the async parameter) must
+        * Nonblocking commits (as indicated with the nonblock parameter) must
         * do any preparatory work which might result in an unsuccessful commit
         * in the context of this callback. The only exceptions are hardware
         * errors resulting in -EIO. But even in that case the driver must
@@ -1899,7 +1899,7 @@ struct drm_mode_config_funcs {
         * The driver must wait for any pending rendering to the new
         * framebuffers to complete before executing the flip. It should also
         * wait for any pending rendering from other drivers if the underlying
-        * buffer is a shared dma-buf. Asynchronous commits must not wait for
+        * buffer is a shared dma-buf. Nonblocking commits must not wait for
         * rendering in the context of this callback.
         *
         * An application can request to be notified when the atomic commit has
@@ -1930,7 +1930,7 @@ struct drm_mode_config_funcs {
         *
         * 0 on success or one of the below negative error codes:
         *
-        *  - -EBUSY, if an asynchronous updated is requested and there is
+        *  - -EBUSY, if a nonblocking updated is requested and there is
         *    an earlier updated pending. Drivers are allowed to support a queue
         *    of outstanding updates, but currently no driver supports that.
         *    Note that drivers must wait for preceding updates to complete if a
@@ -1960,7 +1960,7 @@ struct drm_mode_config_funcs {
         */
        int (*atomic_commit)(struct drm_device *dev,
                             struct drm_atomic_state *state,
-                            bool async);
+                            bool nonblock);
 
        /**
         * @atomic_state_alloc:
@@ -2571,7 +2571,15 @@ static inline struct drm_encoder *drm_encoder_find(struct drm_device *dev,
        return mo ? obj_to_encoder(mo) : NULL;
 }
 
-static inline struct drm_connector *drm_connector_find(struct drm_device *dev,
+/**
+ * drm_connector_lookup - lookup connector object
+ * @dev: DRM device
+ * @id: connector object id
+ *
+ * This function looks up the connector object specified by id
+ * add takes a reference to it.
+ */
+static inline struct drm_connector *drm_connector_lookup(struct drm_device *dev,
                uint32_t id)
 {
        struct drm_mode_object *mo;
@@ -2606,7 +2614,7 @@ static inline uint32_t drm_color_lut_extract(uint32_t user_input,
        return clamp_val(val, 0, max);
 }
 
-/*
+/**
  * drm_framebuffer_reference - incr the fb refcnt
  * @fb: framebuffer
  *
@@ -2639,6 +2647,28 @@ static inline uint32_t drm_framebuffer_read_refcount(struct drm_framebuffer *fb)
        return atomic_read(&fb->base.refcount.refcount);
 }
 
+/**
+ * drm_connector_reference - incr the connector refcnt
+ * @connector: connector
+ *
+ * This function increments the connector's refcount.
+ */
+static inline void drm_connector_reference(struct drm_connector *connector)
+{
+       drm_mode_object_reference(&connector->base);
+}
+
+/**
+ * drm_connector_unreference - unref a connector
+ * @connector: connector to unref
+ *
+ * This function decrements the connector's refcount and frees it if it drops to zero.
+ */
+static inline void drm_connector_unreference(struct drm_connector *connector)
+{
+       drm_mode_object_unreference(&connector->base);
+}
+
 /* Plane list iterator for legacy (overlay only) planes. */
 #define drm_for_each_legacy_plane(plane, dev) \
        list_for_each_entry(plane, &(dev)->mode_config.plane_list, head) \
index ae49c24..c6d9c9c 100644 (file)
@@ -4,11 +4,18 @@
 struct drm_fbdev_cma;
 struct drm_gem_cma_object;
 
+struct drm_fb_helper_surface_size;
+struct drm_framebuffer_funcs;
+struct drm_fb_helper_funcs;
 struct drm_framebuffer;
+struct drm_fb_helper;
 struct drm_device;
 struct drm_file;
 struct drm_mode_fb_cmd2;
 
+struct drm_fbdev_cma *drm_fbdev_cma_init_with_funcs(struct drm_device *dev,
+       unsigned int preferred_bpp, unsigned int num_crtc,
+       unsigned int max_conn_count, const struct drm_fb_helper_funcs *funcs);
 struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev,
        unsigned int preferred_bpp, unsigned int num_crtc,
        unsigned int max_conn_count);
@@ -16,6 +23,13 @@ void drm_fbdev_cma_fini(struct drm_fbdev_cma *fbdev_cma);
 
 void drm_fbdev_cma_restore_mode(struct drm_fbdev_cma *fbdev_cma);
 void drm_fbdev_cma_hotplug_event(struct drm_fbdev_cma *fbdev_cma);
+int drm_fbdev_cma_create_with_funcs(struct drm_fb_helper *helper,
+       struct drm_fb_helper_surface_size *sizes,
+       struct drm_framebuffer_funcs *funcs);
+
+void drm_fb_cma_destroy(struct drm_framebuffer *fb);
+int drm_fb_cma_create_handle(struct drm_framebuffer *fb,
+       struct drm_file *file_priv, unsigned int *handle);
 
 struct drm_framebuffer *drm_fb_cma_create(struct drm_device *dev,
        struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd);
index 062723b..5b4aa35 100644 (file)
@@ -172,6 +172,10 @@ struct drm_fb_helper_connector {
  * @funcs: driver callbacks for fb helper
  * @fbdev: emulated fbdev device info struct
  * @pseudo_palette: fake palette of 16 colors
+ * @dirty_clip: clip rectangle used with deferred_io to accumulate damage to
+ *              the screen buffer
+ * @dirty_lock: spinlock protecting @dirty_clip
+ * @dirty_work: worker used to flush the framebuffer
  *
  * This is the main structure used by the fbdev helpers. Drivers supporting
  * fbdev emulation should embedded this into their overall driver structure.
@@ -189,6 +193,9 @@ struct drm_fb_helper {
        const struct drm_fb_helper_funcs *funcs;
        struct fb_info *fbdev;
        u32 pseudo_palette[17];
+       struct drm_clip_rect dirty_clip;
+       spinlock_t dirty_lock;
+       struct work_struct dirty_work;
 
        /**
         * @kernel_fb_list:
@@ -245,6 +252,9 @@ void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
 
 void drm_fb_helper_unlink_fbi(struct drm_fb_helper *fb_helper);
 
+void drm_fb_helper_deferred_io(struct fb_info *info,
+                              struct list_head *pagelist);
+
 ssize_t drm_fb_helper_sys_read(struct fb_info *info, char __user *buf,
                               size_t count, loff_t *ppos);
 ssize_t drm_fb_helper_sys_write(struct fb_info *info, const char __user *buf,
@@ -368,6 +378,11 @@ static inline void drm_fb_helper_unlink_fbi(struct drm_fb_helper *fb_helper)
 {
 }
 
+static inline void drm_fb_helper_deferred_io(struct fb_info *info,
+                                            struct list_head *pagelist)
+{
+}
+
 static inline ssize_t drm_fb_helper_sys_read(struct fb_info *info,
                                             char __user *buf, size_t count,
                                             loff_t *ppos)
index 0b3e11a..408d6c4 100644 (file)
@@ -200,47 +200,29 @@ drm_gem_object_reference(struct drm_gem_object *obj)
 }
 
 /**
- * drm_gem_object_unreference - release a GEM BO reference
+ * __drm_gem_object_unreference - raw function to release a GEM BO reference
  * @obj: GEM buffer object
  *
- * This releases a reference to @obj. Callers must hold the dev->struct_mutex
- * lock when calling this function, even when the driver doesn't use
- * dev->struct_mutex for anything.
+ * This function is meant to be used by drivers which are not encumbered with
+ * dev->struct_mutex legacy locking and which are using the
+ * gem_free_object_unlocked callback. It avoids all the locking checks and
+ * locking overhead of drm_gem_object_unreference() and
+ * drm_gem_object_unreference_unlocked().
  *
- * For drivers not encumbered with legacy locking use
- * drm_gem_object_unreference_unlocked() instead.
+ * Drivers should never call this directly in their code. Instead they should
+ * wrap it up into a driver_gem_object_unreference(struct driver_gem_object
+ * *obj) wrapper function, and use that. Shared code should never call this, to
+ * avoid breaking drivers by accident which still depend upon dev->struct_mutex
+ * locking.
  */
 static inline void
-drm_gem_object_unreference(struct drm_gem_object *obj)
+__drm_gem_object_unreference(struct drm_gem_object *obj)
 {
-       if (obj != NULL) {
-               WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
-
-               kref_put(&obj->refcount, drm_gem_object_free);
-       }
+       kref_put(&obj->refcount, drm_gem_object_free);
 }
 
-/**
- * drm_gem_object_unreference_unlocked - release a GEM BO reference
- * @obj: GEM buffer object
- *
- * This releases a reference to @obj. Callers must not hold the
- * dev->struct_mutex lock when calling this function.
- */
-static inline void
-drm_gem_object_unreference_unlocked(struct drm_gem_object *obj)
-{
-       struct drm_device *dev;
-
-       if (!obj)
-               return;
-
-       dev = obj->dev;
-       if (kref_put_mutex(&obj->refcount, drm_gem_object_free, &dev->struct_mutex))
-               mutex_unlock(&dev->struct_mutex);
-       else
-               might_lock(&dev->struct_mutex);
-}
+void drm_gem_object_unreference_unlocked(struct drm_gem_object *obj);
+void drm_gem_object_unreference(struct drm_gem_object *obj);
 
 int drm_gem_handle_create(struct drm_file *file_priv,
                          struct drm_gem_object *obj,
index 3e69803..a5ef2c7 100644 (file)
@@ -154,8 +154,10 @@ struct drm_map_list {
 int drm_legacy_addmap(struct drm_device *d, resource_size_t offset,
                      unsigned int size, enum drm_map_type type,
                      enum drm_map_flags flags, struct drm_local_map **map_p);
-int drm_legacy_rmmap(struct drm_device *d, struct drm_local_map *map);
+void drm_legacy_rmmap(struct drm_device *d, struct drm_local_map *map);
 int drm_legacy_rmmap_locked(struct drm_device *d, struct drm_local_map *map);
+void drm_legacy_master_rmmaps(struct drm_device *dev,
+                             struct drm_master *master);
 struct drm_local_map *drm_legacy_getsarea(struct drm_device *dev);
 int drm_legacy_mmap(struct file *filp, struct vm_area_struct *vma);
 
index dfe8835..a964d07 100644 (file)
@@ -673,6 +673,7 @@ static inline void __fb_pad_aligned_buffer(u8 *dst, u32 d_pitch,
 }
 
 /* drivers/video/fb_defio.c */
+int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma);
 extern void fb_deferred_io_init(struct fb_info *info);
 extern void fb_deferred_io_open(struct fb_info *info,
                                struct inode *inode,
index 7a7856e..f7e53ea 100644 (file)
@@ -202,6 +202,7 @@ struct drm_mode_get_plane_res {
 #define DRM_MODE_ENCODER_VIRTUAL 5
 #define DRM_MODE_ENCODER_DSI   6
 #define DRM_MODE_ENCODER_DPMST 7
+#define DRM_MODE_ENCODER_DPI   8
 
 struct drm_mode_get_encoder {
        __u32 encoder_id;
@@ -241,6 +242,7 @@ struct drm_mode_get_encoder {
 #define DRM_MODE_CONNECTOR_eDP         14
 #define DRM_MODE_CONNECTOR_VIRTUAL      15
 #define DRM_MODE_CONNECTOR_DSI         16
+#define DRM_MODE_CONNECTOR_DPI         17
 
 struct drm_mode_get_connector {
 
index c1c1ca1..0098a52 100644 (file)
 #define TRIGCON_TRIGMODE_W1BUF         (1 << 10)
 #define TRIGCON_SWTRIGCMD_W0BUF                (1 << 6)
 #define TRIGCON_TRIGMODE_W0BUF         (1 << 5)
-#define TRIGCON_HWTRIGMASK_I80_RGB     (1 << 4)
-#define TRIGCON_HWTRIGEN_I80_RGB       (1 << 3)
-#define TRIGCON_HWTRIG_INV_I80_RGB     (1 << 2)
+#define TRIGCON_HWTRIGMASK             (1 << 4)
+#define TRIGCON_HWTRIGEN               (1 << 3)
+#define TRIGCON_HWTRIG_INV             (1 << 2)
 #define TRIGCON_SWTRIGCMD              (1 << 1)
 #define TRIGCON_SWTRIGEN               (1 << 0)