X-Git-Url: http://git.cascardo.info/?a=blobdiff_plain;f=drivers%2Fgpu%2Fdrm%2Fi915%2Fi915_drv.h;h=b4ea941d87f320e8d5edaf9b272929c892224d80;hb=66fd7a66e8b9e11e49f46ea77910f935c4dee5c3;hp=daba7ebb969903d11dbf6d20a0e932bfc922f128;hpb=db1f3283edcff33719592f3b374f40cf6e8256f4;p=cascardo%2Flinux.git diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index daba7ebb9699..b4ea941d87f3 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -33,33 +33,40 @@ #include #include -#include -#include "i915_params.h" -#include "i915_reg.h" -#include "intel_bios.h" -#include "intel_ringbuffer.h" -#include "intel_lrc.h" -#include "i915_gem_gtt.h" -#include "i915_gem_render_state.h" #include #include #include -#include -#include /* for struct drm_dma_handle */ -#include #include #include #include #include #include +#include + +#include +#include +#include /* for struct drm_dma_handle */ +#include + +#include "i915_params.h" +#include "i915_reg.h" + +#include "intel_bios.h" +#include "intel_dpll_mgr.h" #include "intel_guc.h" +#include "intel_lrc.h" +#include "intel_ringbuffer.h" + +#include "i915_gem.h" +#include "i915_gem_gtt.h" +#include "i915_gem_render_state.h" /* General customization: */ #define DRIVER_NAME "i915" #define DRIVER_DESC "Intel Graphics" -#define DRIVER_DATE "20160229" +#define DRIVER_DATE "20160522" #undef WARN_ON /* Many gcc seem to no see through this and fall over :( */ @@ -97,6 +104,10 @@ #define I915_STATE_WARN_ON(x) \ I915_STATE_WARN((x), "%s", "WARN_ON(" __stringify(x) ")") +bool __i915_inject_load_failure(const char *func, int line); +#define i915_inject_load_failure() \ + __i915_inject_load_failure(__func__, __LINE__) + static inline const char *yesno(bool v) { return v ? "yes" : "no"; @@ -122,9 +133,35 @@ enum transcoder { TRANSCODER_B, TRANSCODER_C, TRANSCODER_EDP, + TRANSCODER_DSI_A, + TRANSCODER_DSI_C, I915_MAX_TRANSCODERS }; -#define transcoder_name(t) ((t) + 'A') + +static inline const char *transcoder_name(enum transcoder transcoder) +{ + switch (transcoder) { + case TRANSCODER_A: + return "A"; + case TRANSCODER_B: + return "B"; + case TRANSCODER_C: + return "C"; + case TRANSCODER_EDP: + return "EDP"; + case TRANSCODER_DSI_A: + return "DSI A"; + case TRANSCODER_DSI_C: + return "DSI C"; + default: + return ""; + } +} + +static inline bool transcoder_is_dsi(enum transcoder transcoder) +{ + return transcoder == TRANSCODER_DSI_A || transcoder == TRANSCODER_DSI_C; +} /* * I915_MAX_PLANES in the enum below is the maximum (across all platforms) @@ -176,6 +213,8 @@ enum intel_display_power_domain { POWER_DOMAIN_TRANSCODER_B, POWER_DOMAIN_TRANSCODER_C, POWER_DOMAIN_TRANSCODER_EDP, + POWER_DOMAIN_TRANSCODER_DSI_A, + POWER_DOMAIN_TRANSCODER_DSI_C, POWER_DOMAIN_PORT_DDI_A_LANES, POWER_DOMAIN_PORT_DDI_B_LANES, POWER_DOMAIN_PORT_DDI_C_LANES, @@ -273,6 +312,10 @@ struct i915_hotplug { (__s) < INTEL_INFO(__dev_priv)->num_sprites[(__p)]; \ (__s)++) +#define for_each_port_masked(__port, __ports_mask) \ + for ((__port) = PORT_A; (__port) < I915_MAX_PORTS; (__port)++) \ + for_each_if ((__ports_mask) & (1 << (__port))) + #define for_each_crtc(dev, crtc) \ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) @@ -281,6 +324,12 @@ struct i915_hotplug { &dev->mode_config.plane_list, \ base.head) +#define for_each_intel_plane_mask(dev, intel_plane, plane_mask) \ + list_for_each_entry(intel_plane, &dev->mode_config.plane_list, \ + base.head) \ + for_each_if ((plane_mask) & \ + (1 << drm_plane_index(&intel_plane->base))) + #define for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) \ list_for_each_entry(intel_plane, \ &(dev)->mode_config.plane_list, \ @@ -290,6 +339,10 @@ struct i915_hotplug { #define for_each_intel_crtc(dev, intel_crtc) \ list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) +#define for_each_intel_crtc_mask(dev, intel_crtc, crtc_mask) \ + list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) \ + for_each_if ((crtc_mask) & (1 << drm_crtc_index(&intel_crtc->base))) + #define for_each_intel_encoder(dev, intel_encoder) \ list_for_each_entry(intel_encoder, \ &(dev)->mode_config.encoder_list, \ @@ -340,81 +393,6 @@ struct drm_i915_file_private { unsigned int bsd_ring; }; -enum intel_dpll_id { - DPLL_ID_PRIVATE = -1, /* non-shared dpll in use */ - /* real shared dpll ids must be >= 0 */ - DPLL_ID_PCH_PLL_A = 0, - DPLL_ID_PCH_PLL_B = 1, - /* hsw/bdw */ - DPLL_ID_WRPLL1 = 0, - DPLL_ID_WRPLL2 = 1, - DPLL_ID_SPLL = 2, - - /* skl */ - DPLL_ID_SKL_DPLL1 = 0, - DPLL_ID_SKL_DPLL2 = 1, - DPLL_ID_SKL_DPLL3 = 2, -}; -#define I915_NUM_PLLS 3 - -struct intel_dpll_hw_state { - /* i9xx, pch plls */ - uint32_t dpll; - uint32_t dpll_md; - uint32_t fp0; - uint32_t fp1; - - /* hsw, bdw */ - uint32_t wrpll; - uint32_t spll; - - /* skl */ - /* - * DPLL_CTRL1 has 6 bits for each each this DPLL. We store those in - * lower part of ctrl1 and they get shifted into position when writing - * the register. This allows us to easily compare the state to share - * the DPLL. - */ - uint32_t ctrl1; - /* HDMI only, 0 when used for DP */ - uint32_t cfgcr1, cfgcr2; - - /* bxt */ - uint32_t ebb0, ebb4, pll0, pll1, pll2, pll3, pll6, pll8, pll9, pll10, - pcsdw12; -}; - -struct intel_shared_dpll_config { - unsigned crtc_mask; /* mask of CRTCs sharing this PLL */ - struct intel_dpll_hw_state hw_state; -}; - -struct intel_shared_dpll { - struct intel_shared_dpll_config config; - - int active; /* count of number of active CRTCs (i.e. DPMS on) */ - bool on; /* is the PLL actually active? Disabled during modeset */ - const char *name; - /* should match the index in the dev_priv->shared_dplls array */ - enum intel_dpll_id id; - /* The mode_set hook is optional and should be used together with the - * intel_prepare_shared_dpll function. */ - void (*mode_set)(struct drm_i915_private *dev_priv, - struct intel_shared_dpll *pll); - void (*enable)(struct drm_i915_private *dev_priv, - struct intel_shared_dpll *pll); - void (*disable)(struct drm_i915_private *dev_priv, - struct intel_shared_dpll *pll); - bool (*get_hw_state)(struct drm_i915_private *dev_priv, - struct intel_shared_dpll *pll, - struct intel_dpll_hw_state *hw_state); -}; - -#define SKL_DPLL0 0 -#define SKL_DPLL1 1 -#define SKL_DPLL2 2 -#define SKL_DPLL3 3 - /* Used by dp and fdi links */ struct intel_link_m_n { uint32_t tu; @@ -533,7 +511,8 @@ struct drm_i915_error_state { u32 cpu_ring_head; u32 cpu_ring_tail; - u32 semaphore_seqno[I915_NUM_RINGS - 1]; + u32 last_seqno; + u32 semaphore_seqno[I915_NUM_ENGINES - 1]; /* Register state */ u32 start; @@ -553,7 +532,7 @@ struct drm_i915_error_state { u32 fault_reg; u64 faddr; u32 rc_psmi; /* sleep state */ - u32 semaphore_mboxes[I915_NUM_RINGS - 1]; + u32 semaphore_mboxes[I915_NUM_ENGINES - 1]; struct drm_i915_error_object { int page_count; @@ -561,6 +540,8 @@ struct drm_i915_error_state { u32 *pages[0]; } *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page; + struct drm_i915_error_object *wa_ctx; + struct drm_i915_error_request { long jiffies; u32 seqno; @@ -577,12 +558,12 @@ struct drm_i915_error_state { pid_t pid; char comm[TASK_COMM_LEN]; - } ring[I915_NUM_RINGS]; + } ring[I915_NUM_ENGINES]; struct drm_i915_error_buffer { u32 size; u32 name; - u32 rseqno[I915_NUM_RINGS], wseqno; + u32 rseqno[I915_NUM_ENGINES], wseqno; u64 gtt_offset; u32 read_domains; u32 write_domain; @@ -611,27 +592,13 @@ struct dpll; struct drm_i915_display_funcs { int (*get_display_clock_speed)(struct drm_device *dev); int (*get_fifo_size)(struct drm_device *dev, int plane); - /** - * find_dpll() - Find the best values for the PLL - * @limit: limits for the PLL - * @crtc: current CRTC - * @target: target frequency in kHz - * @refclk: reference clock frequency in kHz - * @match_clock: if provided, @best_clock P divider must - * match the P divider from @match_clock - * used for LVDS downclocking - * @best_clock: best PLL values found - * - * Returns true on success, false on failure. - */ - bool (*find_dpll)(const struct intel_limit *limit, - struct intel_crtc_state *crtc_state, - int target, int refclk, - struct dpll *match_clock, - struct dpll *best_clock); - int (*compute_pipe_wm)(struct intel_crtc *crtc, - struct drm_atomic_state *state); - void (*program_watermarks)(struct intel_crtc_state *cstate); + int (*compute_pipe_wm)(struct intel_crtc_state *cstate); + int (*compute_intermediate_wm)(struct drm_device *dev, + struct intel_crtc *intel_crtc, + struct intel_crtc_state *newstate); + void (*initial_watermarks)(struct intel_crtc_state *cstate); + void (*optimize_watermarks)(struct intel_crtc_state *cstate); + int (*compute_global_watermarks)(struct drm_atomic_state *state); void (*update_wm)(struct drm_crtc *crtc); int (*modeset_calc_cdclk)(struct drm_atomic_state *state); void (*modeset_commit_cdclk)(struct drm_atomic_state *state); @@ -656,12 +623,15 @@ struct drm_i915_display_funcs { struct drm_i915_gem_object *obj, struct drm_i915_gem_request *req, uint32_t flags); - void (*hpd_irq_setup)(struct drm_device *dev); + void (*hpd_irq_setup)(struct drm_i915_private *dev_priv); /* clock updates for mode set */ /* cursor updates */ /* render clock increase/decrease */ /* display clock increase/decrease */ /* pll clock increase/decrease */ + + void (*load_csc_matrix)(struct drm_crtc_state *crtc_state); + void (*load_luts)(struct drm_crtc_state *crtc_state); }; enum forcewake_domain_id { @@ -681,6 +651,13 @@ enum forcewake_domains { FORCEWAKE_MEDIA) }; +#define FW_REG_READ (1) +#define FW_REG_WRITE (2) + +enum forcewake_domains +intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv, + i915_reg_t reg, unsigned int op); + struct intel_uncore_funcs { void (*force_wake_get)(struct drm_i915_private *dev_priv, enum forcewake_domains domains); @@ -713,8 +690,9 @@ struct intel_uncore { struct intel_uncore_forcewake_domain { struct drm_i915_private *i915; enum forcewake_domain_id id; + enum forcewake_domains mask; unsigned wake_count; - struct timer_list timer; + struct hrtimer timer; i915_reg_t reg_set; u32 val_set; u32 val_clear; @@ -727,14 +705,14 @@ struct intel_uncore { }; /* Iterate over initialised fw domains */ -#define for_each_fw_domain_mask(domain__, mask__, dev_priv__, i__) \ - for ((i__) = 0, (domain__) = &(dev_priv__)->uncore.fw_domain[0]; \ - (i__) < FW_DOMAIN_ID_COUNT; \ - (i__)++, (domain__) = &(dev_priv__)->uncore.fw_domain[i__]) \ - for_each_if (((mask__) & (dev_priv__)->uncore.fw_domains) & (1 << (i__))) +#define for_each_fw_domain_masked(domain__, mask__, dev_priv__) \ + for ((domain__) = &(dev_priv__)->uncore.fw_domain[0]; \ + (domain__) < &(dev_priv__)->uncore.fw_domain[FW_DOMAIN_ID_COUNT]; \ + (domain__)++) \ + for_each_if ((mask__) & (domain__)->mask) -#define for_each_fw_domain(domain__, dev_priv__, i__) \ - for_each_fw_domain_mask(domain__, FORCEWAKE_ALL, dev_priv__, i__) +#define for_each_fw_domain(domain__, dev_priv__) \ + for_each_fw_domain_masked(domain__, FORCEWAKE_ALL, dev_priv__) #define CSR_VERSION(major, minor) ((major) << 16 | (minor)) #define CSR_VERSION_MAJOR(version) ((version) >> 16) @@ -750,6 +728,7 @@ struct intel_csr { i915_reg_t mmioaddr[8]; uint32_t mmiodata[8]; uint32_t dc_state; + uint32_t allowed_dc_mask; }; #define DEV_INFO_FOR_EACH_FLAG(func, sep) \ @@ -767,6 +746,7 @@ struct intel_csr { func(is_valleyview) sep \ func(is_cherryview) sep \ func(is_haswell) sep \ + func(is_broadwell) sep \ func(is_skylake) sep \ func(is_broxton) sep \ func(is_kabylake) sep \ @@ -779,6 +759,7 @@ struct intel_csr { func(overlay_needs_physical) sep \ func(supports_tv) sep \ func(has_llc) sep \ + func(has_snoop) sep \ func(has_ddi) sep \ func(has_fpga_dbg) @@ -788,9 +769,10 @@ struct intel_csr { struct intel_device_info { u32 display_mmio_offset; u16 device_id; - u8 num_pipes:3; + u8 num_pipes; u8 num_sprites[I915_MAX_PIPES]; u8 gen; + u16 gen_mask; u8 ring_mask; /* Rings supported by the HW */ DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON); /* Register offsets for the various display pipes and transcoders */ @@ -810,6 +792,11 @@ struct intel_device_info { u8 has_slice_pg:1; u8 has_subslice_pg:1; u8 has_eu_pg:1; + + struct color_luts { + u16 degamma_lut_size; + u16 gamma_lut_size; + } color; }; #undef DEFINE_FLAG @@ -877,6 +864,9 @@ struct intel_context { struct i915_ctx_hang_stats hang_stats; struct i915_hw_ppgtt *ppgtt; + /* Unique identifier for this context, used by the hw for tracking */ + unsigned hw_id; + /* Legacy ring buffer submission */ struct { struct drm_i915_gem_object *rcs_state; @@ -891,7 +881,8 @@ struct intel_context { struct i915_vma *lrc_vma; u64 lrc_desc; uint32_t *lrc_reg_state; - } engine[I915_NUM_RINGS]; + bool initialised; + } engine[I915_NUM_ENGINES]; struct list_head link; }; @@ -1036,6 +1027,7 @@ struct intel_fbc_work; struct intel_gmbus { struct i2c_adapter adapter; +#define GMBUS_FORCE_BIT_RETRY (1U << 31) u32 force_bit; u32 reg0; i915_reg_t gpio_reg; @@ -1159,6 +1151,7 @@ struct intel_gen6_power_mgmt { u8 efficient_freq; /* AKA RPe. Pre-determined balanced frequency */ u8 rp1_freq; /* "less than" RP0 power/freqency */ u8 rp0_freq; /* Non-overclocked max frequency. */ + u16 gpll_ref_freq; /* vlv/chv GPLL reference frequency */ u8 up_threshold; /* Current %busy required to uplock */ u8 down_threshold; /* Current %busy required to downclock */ @@ -1298,6 +1291,7 @@ struct i915_gem_mm { struct i915_hw_ppgtt *aliasing_ppgtt; struct notifier_block oom_notifier; + struct notifier_block vmap_notifier; struct shrinker shrinker; bool shrinker_no_lock_stealing; @@ -1423,9 +1417,6 @@ struct i915_gpu_error { /* For missed irq/seqno simulation. */ unsigned int test_irq_rings; - - /* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset */ - bool reload_in_reset; }; enum modeset_restore { @@ -1482,21 +1473,23 @@ struct intel_vbt_data { unsigned int lvds_use_ssc:1; unsigned int display_clock_mode:1; unsigned int fdi_rx_polarity_inverted:1; - unsigned int has_mipi:1; + unsigned int panel_type:4; int lvds_ssc_freq; unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */ enum drrs_support_type drrs_type; - /* eDP */ - int edp_rate; - int edp_lanes; - int edp_preemphasis; - int edp_vswing; - bool edp_initialized; - bool edp_support; - int edp_bpp; - struct edp_power_seq edp_pps; + struct { + int rate; + int lanes; + int preemphasis; + int vswing; + bool low_vswing; + bool initialized; + bool support; + int bpp; + struct edp_power_seq pps; + } edp; struct { bool full_link; @@ -1512,11 +1505,11 @@ struct intel_vbt_data { bool present; bool active_low_pwm; u8 min_brightness; /* min_brightness/255 of max */ + enum intel_backlight_type type; } backlight; /* MIPI DSI */ struct { - u16 port; u16 panel_id; struct mipi_config *config; struct mipi_pps_data *pps; @@ -1532,6 +1525,7 @@ struct intel_vbt_data { union child_device_config *child_dev; struct ddi_vbt_port_info ddi_port_info[I915_MAX_PORTS]; + struct sdvo_device_mapping sdvo_mappings[2]; }; enum intel_ddb_partitioning { @@ -1604,7 +1598,7 @@ struct skl_ddb_allocation { }; struct skl_wm_values { - bool dirty[I915_MAX_PIPES]; + unsigned dirty_pipes; struct skl_ddb_allocation ddb; uint32_t wm_linetime[I915_MAX_PIPES]; uint32_t plane[I915_MAX_PIPES][I915_MAX_PLANES][8]; @@ -1706,7 +1700,7 @@ struct i915_wa_reg { struct i915_workarounds { struct i915_wa_reg reg[I915_MAX_WA_REGS]; u32 count; - u32 hw_whitelist_count[I915_NUM_RINGS]; + u32 hw_whitelist_count[I915_NUM_ENGINES]; }; struct i915_virtual_gpu { @@ -1719,7 +1713,7 @@ struct i915_execbuffer_params { uint32_t dispatch_flags; uint32_t args_batch_start_offset; uint64_t batch_obj_vm_offset; - struct intel_engine_cs *ring; + struct intel_engine_cs *engine; struct drm_i915_gem_object *batch_obj; struct intel_context *ctx; struct drm_i915_gem_request *request; @@ -1771,7 +1765,7 @@ struct drm_i915_private { wait_queue_head_t gmbus_wait_queue; struct pci_dev *bridge_dev; - struct intel_engine_cs ring[I915_NUM_RINGS]; + struct intel_engine_cs engine[I915_NUM_ENGINES]; struct drm_i915_gem_object *semaphore_obj; uint32_t last_seqno, next_seqno; @@ -1829,6 +1823,7 @@ struct drm_i915_private { unsigned int skl_boot_cdclk; unsigned int cdclk_freq, max_cdclk_freq, atomic_cdclk_freq; unsigned int max_dotclk_freq; + unsigned int rawclk_freq; unsigned int hpll_freq; unsigned int czclk_freq; @@ -1855,15 +1850,20 @@ struct drm_i915_private { struct drm_atomic_state *modeset_restore_state; struct list_head vm_list; /* Global list of all address spaces */ - struct i915_gtt gtt; /* VM representing the global address space */ + struct i915_ggtt ggtt; /* VM representing the global address space */ struct i915_gem_mm mm; DECLARE_HASHTABLE(mm_structs, 7); struct mutex mm_lock; - /* Kernel Modesetting */ + /* The hw wants to have a stable context identifier for the lifetime + * of the context (for OA, PASID, faults, etc). This is limited + * in execlists to 21 bits. + */ + struct ida context_hw_ida; +#define MAX_CONTEXT_HW_ID (1<<21) /* exclusive */ - struct sdvo_device_mapping sdvo_mappings[2]; + /* Kernel Modesetting */ struct drm_crtc *plane_to_crtc_mapping[I915_MAX_PIPES]; struct drm_crtc *pipe_to_crtc_mapping[I915_MAX_PIPES]; @@ -1876,6 +1876,14 @@ struct drm_i915_private { /* dpll and cdclk state is protected by connection_mutex */ int num_shared_dpll; struct intel_shared_dpll shared_dplls[I915_NUM_PLLS]; + const struct intel_dpll_mgr *dpll_mgr; + + /* + * dpll_lock serializes intel_{prepare,enable,disable}_shared_dpll. + * Must be global rather than per dpll, because on some platforms + * plls share registers. + */ + struct mutex dpll_lock; unsigned int active_crtcs; unsigned int min_pixclk[I915_MAX_PIPES]; @@ -1884,9 +1892,6 @@ struct drm_i915_private { struct i915_workarounds workarounds; - /* Reclocking support */ - bool render_reclock_avail; - struct i915_frontbuffer_tracking fb_tracking; u16 orig_clock; @@ -1896,7 +1901,7 @@ struct drm_i915_private { struct intel_l3_parity l3_parity; /* Cannot be determined by PCIID. You must always read a register. */ - size_t ellc_size; + u32 edram_cap; /* gen6+ rps state */ struct intel_gen6_power_mgmt rps; @@ -1936,7 +1941,15 @@ struct drm_i915_private { u32 fdi_rx_config; + /* Shadow for DISPLAY_PHY_CONTROL which can't be safely read */ u32 chv_phy_control; + /* + * Shadows for CHV DPLL_MD regs to keep the state + * checker somewhat working in the presence hardware + * crappiness (can't read out DPLL_MD for pipes B & C). + */ + u32 chv_dpll_md[I915_MAX_PIPES]; + u32 bxt_phy_grc; u32 suspend_count; bool suspended_to_idle; @@ -1962,9 +1975,6 @@ struct drm_i915_private { */ uint16_t skl_latency[8]; - /* Committed wm config */ - struct intel_wm_config config; - /* * The skl_wm_values structure is a bit too big for stack * allocation, so we keep the staging struct where we store @@ -1980,6 +1990,20 @@ struct drm_i915_private { }; uint8_t max_level; + + /* + * Should be held around atomic WM register writing; also + * protects * intel_crtc->wm.active and + * cstate->wm.need_postvbl_update. + */ + struct mutex wm_mutex; + + /* + * Set during HW readout of watermarks/DDB. Some platforms + * need to know when we're still using BIOS-provided values + * (which we don't fully trust). + */ + bool distrust_bios_wm; } wm; struct i915_runtime_pm pm; @@ -1989,15 +2013,13 @@ struct drm_i915_private { int (*execbuf_submit)(struct i915_execbuffer_params *params, struct drm_i915_gem_execbuffer2 *args, struct list_head *vmas); - int (*init_rings)(struct drm_device *dev); - void (*cleanup_ring)(struct intel_engine_cs *ring); - void (*stop_ring)(struct intel_engine_cs *ring); + int (*init_engines)(struct drm_device *dev); + void (*cleanup_engine)(struct intel_engine_cs *engine); + void (*stop_engine)(struct intel_engine_cs *engine); } gt; struct intel_context *kernel_context; - bool edp_low_vswing; - /* perform PHY state sanity checks? */ bool chv_phy_assert[2]; @@ -2024,10 +2046,28 @@ static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc) return container_of(guc, struct drm_i915_private, guc); } -/* Iterate over initialised rings */ -#define for_each_ring(ring__, dev_priv__, i__) \ - for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \ - for_each_if ((((ring__) = &(dev_priv__)->ring[(i__)]), intel_ring_initialized((ring__)))) +/* Simple iterator over all initialised engines */ +#define for_each_engine(engine__, dev_priv__) \ + for ((engine__) = &(dev_priv__)->engine[0]; \ + (engine__) < &(dev_priv__)->engine[I915_NUM_ENGINES]; \ + (engine__)++) \ + for_each_if (intel_engine_initialized(engine__)) + +/* Iterator with engine_id */ +#define for_each_engine_id(engine__, dev_priv__, id__) \ + for ((engine__) = &(dev_priv__)->engine[0], (id__) = 0; \ + (engine__) < &(dev_priv__)->engine[I915_NUM_ENGINES]; \ + (engine__)++) \ + for_each_if (((id__) = (engine__)->id, \ + intel_engine_initialized(engine__))) + +/* Iterator over subset of engines selected by mask */ +#define for_each_engine_masked(engine__, dev_priv__, mask__) \ + for ((engine__) = &(dev_priv__)->engine[0]; \ + (engine__) < &(dev_priv__)->engine[I915_NUM_ENGINES]; \ + (engine__)++) \ + for_each_if (((mask__) & intel_engine_flag(engine__)) && \ + intel_engine_initialized(engine__)) enum hdmi_force_audio { HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */ @@ -2097,7 +2137,7 @@ struct drm_i915_gem_object { struct drm_mm_node *stolen; struct list_head global_list; - struct list_head ring_list[I915_NUM_RINGS]; + struct list_head engine_list[I915_NUM_ENGINES]; /** Used in execbuf to temporarily hold a ref */ struct list_head obj_exec_link; @@ -2108,7 +2148,7 @@ struct drm_i915_gem_object { * rendering and so a non-zero seqno), and is not set if it i s on * inactive (ready to be unbound) list. */ - unsigned int active:I915_NUM_RINGS; + unsigned int active:I915_NUM_ENGINES; /** * This is set if the object has been written to since last bound @@ -2172,10 +2212,7 @@ struct drm_i915_gem_object { struct scatterlist *sg; int last; } get_page; - - /* prime dma-buf support */ - void *dma_buf_vmapping; - int vmapping_count; + void *mapping; /** Breadcrumb of last rendering to the buffer. * There can only be one writer, but we allow for multiple readers. @@ -2187,7 +2224,7 @@ struct drm_i915_gem_object { * read request. This allows for the CPU to read from an active * buffer by only waiting for the write to complete. * */ - struct drm_i915_gem_request *last_read_req[I915_NUM_RINGS]; + struct drm_i915_gem_request *last_read_req[I915_NUM_ENGINES]; struct drm_i915_gem_request *last_write_req; /** Breadcrumb of last fenced GPU access to the buffer. */ struct drm_i915_gem_request *last_fenced_req; @@ -2219,9 +2256,75 @@ struct drm_i915_gem_object { }; #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base) -void i915_gem_track_fb(struct drm_i915_gem_object *old, - struct drm_i915_gem_object *new, - unsigned frontbuffer_bits); +/* + * Optimised SGL iterator for GEM objects + */ +static __always_inline struct sgt_iter { + struct scatterlist *sgp; + union { + unsigned long pfn; + dma_addr_t dma; + }; + unsigned int curr; + unsigned int max; +} __sgt_iter(struct scatterlist *sgl, bool dma) { + struct sgt_iter s = { .sgp = sgl }; + + if (s.sgp) { + s.max = s.curr = s.sgp->offset; + s.max += s.sgp->length; + if (dma) + s.dma = sg_dma_address(s.sgp); + else + s.pfn = page_to_pfn(sg_page(s.sgp)); + } + + return s; +} + +/** + * __sg_next - return the next scatterlist entry in a list + * @sg: The current sg entry + * + * Description: + * If the entry is the last, return NULL; otherwise, step to the next + * element in the array (@sg@+1). If that's a chain pointer, follow it; + * otherwise just return the pointer to the current element. + **/ +static inline struct scatterlist *__sg_next(struct scatterlist *sg) +{ +#ifdef CONFIG_DEBUG_SG + BUG_ON(sg->sg_magic != SG_MAGIC); +#endif + return sg_is_last(sg) ? NULL : + likely(!sg_is_chain(++sg)) ? sg : + sg_chain_ptr(sg); +} + +/** + * for_each_sgt_dma - iterate over the DMA addresses of the given sg_table + * @__dmap: DMA address (output) + * @__iter: 'struct sgt_iter' (iterator state, internal) + * @__sgt: sg_table to iterate over (input) + */ +#define for_each_sgt_dma(__dmap, __iter, __sgt) \ + for ((__iter) = __sgt_iter((__sgt)->sgl, true); \ + ((__dmap) = (__iter).dma + (__iter).curr); \ + (((__iter).curr += PAGE_SIZE) < (__iter).max) || \ + ((__iter) = __sgt_iter(__sg_next((__iter).sgp), true), 0)) + +/** + * for_each_sgt_page - iterate over the pages of the given sg_table + * @__pp: page pointer (output) + * @__iter: 'struct sgt_iter' (iterator state, internal) + * @__sgt: sg_table to iterate over (input) + */ +#define for_each_sgt_page(__pp, __iter, __sgt) \ + for ((__iter) = __sgt_iter((__sgt)->sgl, false); \ + ((__pp) = (__iter).pfn == 0 ? NULL : \ + pfn_to_page((__iter).pfn + ((__iter).curr >> PAGE_SHIFT))); \ + (((__iter).curr += PAGE_SIZE) < (__iter).max) || \ + ((__iter) = __sgt_iter(__sg_next((__iter).sgp), false), 0)) /** * Request queue structure. @@ -2242,7 +2345,8 @@ struct drm_i915_gem_request { /** On Which ring this request was generated */ struct drm_i915_private *i915; - struct intel_engine_cs *ring; + struct intel_engine_cs *engine; + unsigned reset_counter; /** GEM sequence number associated with the previous request, * when the HWS breadcrumb is equal to this the GPU is processing @@ -2269,6 +2373,9 @@ struct drm_i915_gem_request { /** Position in the ringbuffer of the end of the whole request */ u32 tail; + /** Preallocate space in the ringbuffer for the emitting the request */ + u32 reserved_space; + /** * Context and ring buffer related to this request * Contexts are refcounted, so when this request is associated with a @@ -2282,6 +2389,17 @@ struct drm_i915_gem_request { struct intel_context *ctx; struct intel_ringbuffer *ringbuf; + /** + * Context related to the previous request. + * As the contexts are accessed by the hardware until the switch is + * completed to a new context, the hardware may still be writing + * to the context object after the breadcrumb is visible. We must + * not unpin/unbind/prune that object whilst still active and so + * we keep the previous context pinned until the following (this) + * request is retired. + */ + struct intel_context *previous_context; + /** Batch buffer related to this request if any (used for error state dump only) */ struct drm_i915_gem_object *batch_obj; @@ -2318,12 +2436,13 @@ struct drm_i915_gem_request { /** Execlists no. of times this request has been sent to the ELSP */ int elsp_submitted; + /** Execlists context hardware id. */ + unsigned ctx_hw_id; }; struct drm_i915_gem_request * __must_check i915_gem_request_alloc(struct intel_engine_cs *engine, struct intel_context *ctx); -void i915_gem_request_cancel(struct drm_i915_gem_request *req); void i915_gem_request_free(struct kref *req_ref); int i915_gem_request_add_to_client(struct drm_i915_gem_request *req, struct drm_file *file); @@ -2335,9 +2454,9 @@ i915_gem_request_get_seqno(struct drm_i915_gem_request *req) } static inline struct intel_engine_cs * -i915_gem_request_get_ring(struct drm_i915_gem_request *req) +i915_gem_request_get_engine(struct drm_i915_gem_request *req) { - return req ? req->ring : NULL; + return req ? req->engine : NULL; } static inline struct drm_i915_gem_request * @@ -2351,23 +2470,9 @@ i915_gem_request_reference(struct drm_i915_gem_request *req) static inline void i915_gem_request_unreference(struct drm_i915_gem_request *req) { - WARN_ON(!mutex_is_locked(&req->ring->dev->struct_mutex)); kref_put(&req->ref, i915_gem_request_free); } -static inline void -i915_gem_request_unreference__unlocked(struct drm_i915_gem_request *req) -{ - struct drm_device *dev; - - if (!req) - return; - - dev = req->ring->dev; - if (kref_put_mutex(&req->ref, i915_gem_request_free, &dev->struct_mutex)) - mutex_unlock(&dev->struct_mutex); -} - static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst, struct drm_i915_gem_request *src) { @@ -2493,10 +2598,31 @@ struct drm_i915_cmd_table { __p; \ }) #define INTEL_INFO(p) (&__I915__(p)->info) +#define INTEL_GEN(p) (INTEL_INFO(p)->gen) #define INTEL_DEVID(p) (INTEL_INFO(p)->device_id) -#define INTEL_REVID(p) (__I915__(p)->dev->pdev->revision) #define REVID_FOREVER 0xff +#define INTEL_REVID(p) (__I915__(p)->dev->pdev->revision) + +#define GEN_FOREVER (0) +/* + * Returns true if Gen is in inclusive range [Start, End]. + * + * Use GEN_FOREVER for unbound start and or end. + */ +#define IS_GEN(p, s, e) ({ \ + unsigned int __s = (s), __e = (e); \ + BUILD_BUG_ON(!__builtin_constant_p(s)); \ + BUILD_BUG_ON(!__builtin_constant_p(e)); \ + if ((__s) != GEN_FOREVER) \ + __s = (s) - 1; \ + if ((__e) == GEN_FOREVER) \ + __e = BITS_PER_LONG - 1; \ + else \ + __e = (e) - 1; \ + !!(INTEL_INFO(p)->gen_mask & GENMASK((__e), (__s))); \ +}) + /* * Return true if revision is in range [since,until] inclusive. * @@ -2529,7 +2655,7 @@ struct drm_i915_cmd_table { #define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview) #define IS_CHERRYVIEW(dev) (INTEL_INFO(dev)->is_cherryview) #define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell) -#define IS_BROADWELL(dev) (!INTEL_INFO(dev)->is_cherryview && IS_GEN8(dev)) +#define IS_BROADWELL(dev) (INTEL_INFO(dev)->is_broadwell) #define IS_SKYLAKE(dev) (INTEL_INFO(dev)->is_skylake) #define IS_BROXTON(dev) (INTEL_INFO(dev)->is_broxton) #define IS_KABYLAKE(dev) (INTEL_INFO(dev)->is_kabylake) @@ -2597,27 +2723,31 @@ struct drm_i915_cmd_table { * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular * chips, etc.). */ -#define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2) -#define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3) -#define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4) -#define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5) -#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6) -#define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7) -#define IS_GEN8(dev) (INTEL_INFO(dev)->gen == 8) -#define IS_GEN9(dev) (INTEL_INFO(dev)->gen == 9) +#define IS_GEN2(dev) (INTEL_INFO(dev)->gen_mask & BIT(1)) +#define IS_GEN3(dev) (INTEL_INFO(dev)->gen_mask & BIT(2)) +#define IS_GEN4(dev) (INTEL_INFO(dev)->gen_mask & BIT(3)) +#define IS_GEN5(dev) (INTEL_INFO(dev)->gen_mask & BIT(4)) +#define IS_GEN6(dev) (INTEL_INFO(dev)->gen_mask & BIT(5)) +#define IS_GEN7(dev) (INTEL_INFO(dev)->gen_mask & BIT(6)) +#define IS_GEN8(dev) (INTEL_INFO(dev)->gen_mask & BIT(7)) +#define IS_GEN9(dev) (INTEL_INFO(dev)->gen_mask & BIT(8)) #define RENDER_RING (1<ring_mask & BSD_RING) #define HAS_BSD2(dev) (INTEL_INFO(dev)->ring_mask & BSD2_RING) #define HAS_BLT(dev) (INTEL_INFO(dev)->ring_mask & BLT_RING) #define HAS_VEBOX(dev) (INTEL_INFO(dev)->ring_mask & VEBOX_RING) #define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc) +#define HAS_SNOOP(dev) (INTEL_INFO(dev)->has_snoop) +#define HAS_EDRAM(dev) (__I915__(dev)->edram_cap & EDRAM_ENABLED) #define HAS_WT(dev) ((IS_HASWELL(dev) || IS_BROADWELL(dev)) && \ - __I915__(dev)->ellc_size) + HAS_EDRAM(dev)) #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) #define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6) @@ -2671,9 +2801,9 @@ struct drm_i915_cmd_table { #define HAS_RUNTIME_PM(dev) (IS_GEN6(dev) || IS_HASWELL(dev) || \ IS_BROADWELL(dev) || IS_VALLEYVIEW(dev) || \ IS_CHERRYVIEW(dev) || IS_SKYLAKE(dev) || \ - IS_KABYLAKE(dev)) + IS_KABYLAKE(dev) || IS_BROXTON(dev)) #define HAS_RC6(dev) (INTEL_INFO(dev)->gen >= 6) -#define HAS_RC6p(dev) (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev)) +#define HAS_RC6p(dev) (IS_GEN6(dev) || IS_IVYBRIDGE(dev)) #define HAS_CSR(dev) (IS_GEN9(dev)) @@ -2696,6 +2826,7 @@ struct drm_i915_cmd_table { #define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100 #define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00 #define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100 +#define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000 #define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */ #define INTEL_PCH_TYPE(dev) (__I915__(dev)->pch_type) @@ -2726,7 +2857,17 @@ extern int i915_max_ioctl; extern int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state); extern int i915_resume_switcheroo(struct drm_device *dev); +int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv, + int enable_ppgtt); + /* i915_dma.c */ +void __printf(3, 4) +__i915_printk(struct drm_i915_private *dev_priv, const char *level, + const char *fmt, ...); + +#define i915_report_error(dev_priv, fmt, ...) \ + __i915_printk(dev_priv, KERN_ERR, fmt, ##__VA_ARGS__) + extern int i915_driver_load(struct drm_device *, unsigned long flags); extern int i915_driver_unload(struct drm_device *); extern int i915_driver_open(struct drm_device *dev, struct drm_file *file); @@ -2739,9 +2880,11 @@ extern void i915_driver_postclose(struct drm_device *dev, extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); #endif -extern int intel_gpu_reset(struct drm_device *dev); -extern bool intel_has_gpu_reset(struct drm_device *dev); -extern int i915_reset(struct drm_device *dev); +extern int intel_gpu_reset(struct drm_i915_private *dev_priv, u32 engine_mask); +extern bool intel_has_gpu_reset(struct drm_i915_private *dev_priv); +extern int i915_reset(struct drm_i915_private *dev_priv); +extern int intel_guc_reset(struct drm_i915_private *dev_priv); +extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine); extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv); extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv); extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv); @@ -2749,30 +2892,33 @@ extern void i915_update_gfx_val(struct drm_i915_private *dev_priv); int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on); /* intel_hotplug.c */ -void intel_hpd_irq_handler(struct drm_device *dev, u32 pin_mask, u32 long_mask); +void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, + u32 pin_mask, u32 long_mask); void intel_hpd_init(struct drm_i915_private *dev_priv); void intel_hpd_init_work(struct drm_i915_private *dev_priv); void intel_hpd_cancel_work(struct drm_i915_private *dev_priv); bool intel_hpd_pin_to_port(enum hpd_pin pin, enum port *port); /* i915_irq.c */ -void i915_queue_hangcheck(struct drm_device *dev); +void i915_queue_hangcheck(struct drm_i915_private *dev_priv); __printf(3, 4) -void i915_handle_error(struct drm_device *dev, bool wedged, +void i915_handle_error(struct drm_i915_private *dev_priv, + u32 engine_mask, const char *fmt, ...); extern void intel_irq_init(struct drm_i915_private *dev_priv); int intel_irq_install(struct drm_i915_private *dev_priv); void intel_irq_uninstall(struct drm_i915_private *dev_priv); -extern void intel_uncore_sanitize(struct drm_device *dev); -extern void intel_uncore_early_sanitize(struct drm_device *dev, +extern void intel_uncore_sanitize(struct drm_i915_private *dev_priv); +extern void intel_uncore_early_sanitize(struct drm_i915_private *dev_priv, bool restore_forcewake); -extern void intel_uncore_init(struct drm_device *dev); +extern void intel_uncore_init(struct drm_i915_private *dev_priv); extern bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv); extern bool intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv); -extern void intel_uncore_fini(struct drm_device *dev); -extern void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore); +extern void intel_uncore_fini(struct drm_i915_private *dev_priv); +extern void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv, + bool restore); const char *intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id); void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv, enum forcewake_domains domains); @@ -2785,10 +2931,12 @@ void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv, enum forcewake_domains domains); void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv, enum forcewake_domains domains); +u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv); + void assert_forcewakes_inactive(struct drm_i915_private *dev_priv); -static inline bool intel_vgpu_active(struct drm_device *dev) +static inline bool intel_vgpu_active(struct drm_i915_private *dev_priv) { - return to_i915(dev)->vgpu.active; + return dev_priv->vgpu.active; } void @@ -2863,7 +3011,6 @@ int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); void i915_gem_execbuffer_move_to_active(struct list_head *vmas, struct drm_i915_gem_request *req); -void i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params); int i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params, struct drm_i915_gem_execbuffer2 *args, struct list_head *vmas); @@ -2885,7 +3032,7 @@ int i915_gem_set_tiling(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_get_tiling(struct drm_device *dev, void *data, struct drm_file *file_priv); -int i915_gem_init_userptr(struct drm_device *dev); +void i915_gem_init_userptr(struct drm_i915_private *dev_priv); int i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file); int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, @@ -2894,11 +3041,14 @@ int i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); void i915_gem_load_init(struct drm_device *dev); void i915_gem_load_cleanup(struct drm_device *dev); +void i915_gem_load_init_fences(struct drm_i915_private *dev_priv); +int i915_gem_freeze_late(struct drm_i915_private *dev_priv); + void *i915_gem_object_alloc(struct drm_device *dev); void i915_gem_object_free(struct drm_i915_gem_object *obj); void i915_gem_object_init(struct drm_i915_gem_object *obj, const struct drm_i915_gem_object_ops *ops); -struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, +struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev, size_t size); struct drm_i915_gem_object *i915_gem_object_create_from_data( struct drm_device *dev, const void *data, size_t size); @@ -2978,12 +3128,46 @@ static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) BUG_ON(obj->pages == NULL); obj->pages_pin_count++; } + static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) { BUG_ON(obj->pages_pin_count == 0); obj->pages_pin_count--; } +/** + * i915_gem_object_pin_map - return a contiguous mapping of the entire object + * @obj - the object to map into kernel address space + * + * Calls i915_gem_object_pin_pages() to prevent reaping of the object's + * pages and then returns a contiguous mapping of the backing storage into + * the kernel address space. + * + * The caller must hold the struct_mutex, and is responsible for calling + * i915_gem_object_unpin_map() when the mapping is no longer required. + * + * Returns the pointer through which to access the mapped object, or an + * ERR_PTR() on error. + */ +void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj); + +/** + * i915_gem_object_unpin_map - releases an earlier mapping + * @obj - the object to unmap + * + * After pinning the object and mapping its pages, once you are finished + * with your access, call i915_gem_object_unpin_map() to release the pin + * upon the mapping. Once the pin count reaches zero, that mapping may be + * removed. + * + * The caller must hold the struct_mutex. + */ +static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj) +{ + lockdep_assert_held(&obj->base.dev->struct_mutex); + i915_gem_object_unpin_pages(obj); +} + int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); int i915_gem_object_sync(struct drm_i915_gem_object *obj, struct intel_engine_cs *to, @@ -2995,6 +3179,11 @@ int i915_gem_dumb_create(struct drm_file *file_priv, struct drm_mode_create_dumb *args); int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev, uint32_t handle, uint64_t *offset); + +void i915_gem_track_fb(struct drm_i915_gem_object *old, + struct drm_i915_gem_object *new, + unsigned frontbuffer_bits); + /** * Returns true if seq1 is later than seq2. */ @@ -3007,42 +3196,68 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2) static inline bool i915_gem_request_started(struct drm_i915_gem_request *req, bool lazy_coherency) { - u32 seqno = req->ring->get_seqno(req->ring, lazy_coherency); - return i915_seqno_passed(seqno, req->previous_seqno); + if (!lazy_coherency && req->engine->irq_seqno_barrier) + req->engine->irq_seqno_barrier(req->engine); + return i915_seqno_passed(req->engine->get_seqno(req->engine), + req->previous_seqno); } static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req, bool lazy_coherency) { - u32 seqno = req->ring->get_seqno(req->ring, lazy_coherency); - return i915_seqno_passed(seqno, req->seqno); + if (!lazy_coherency && req->engine->irq_seqno_barrier) + req->engine->irq_seqno_barrier(req->engine); + return i915_seqno_passed(req->engine->get_seqno(req->engine), + req->seqno); } -int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno); +int __must_check i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno); int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno); struct drm_i915_gem_request * -i915_gem_find_active_request(struct intel_engine_cs *ring); +i915_gem_find_active_request(struct intel_engine_cs *engine); + +bool i915_gem_retire_requests(struct drm_i915_private *dev_priv); +void i915_gem_retire_requests_ring(struct intel_engine_cs *engine); + +static inline u32 i915_reset_counter(struct i915_gpu_error *error) +{ + return atomic_read(&error->reset_counter); +} -bool i915_gem_retire_requests(struct drm_device *dev); -void i915_gem_retire_requests_ring(struct intel_engine_cs *ring); -int __must_check i915_gem_check_wedge(struct i915_gpu_error *error, - bool interruptible); +static inline bool __i915_reset_in_progress(u32 reset) +{ + return unlikely(reset & I915_RESET_IN_PROGRESS_FLAG); +} + +static inline bool __i915_reset_in_progress_or_wedged(u32 reset) +{ + return unlikely(reset & (I915_RESET_IN_PROGRESS_FLAG | I915_WEDGED)); +} + +static inline bool __i915_terminally_wedged(u32 reset) +{ + return unlikely(reset & I915_WEDGED); +} static inline bool i915_reset_in_progress(struct i915_gpu_error *error) { - return unlikely(atomic_read(&error->reset_counter) - & (I915_RESET_IN_PROGRESS_FLAG | I915_WEDGED)); + return __i915_reset_in_progress(i915_reset_counter(error)); +} + +static inline bool i915_reset_in_progress_or_wedged(struct i915_gpu_error *error) +{ + return __i915_reset_in_progress_or_wedged(i915_reset_counter(error)); } static inline bool i915_terminally_wedged(struct i915_gpu_error *error) { - return atomic_read(&error->reset_counter) & I915_WEDGED; + return __i915_terminally_wedged(i915_reset_counter(error)); } static inline u32 i915_reset_count(struct i915_gpu_error *error) { - return ((atomic_read(&error->reset_counter) & ~I915_WEDGED) + 1) / 2; + return ((i915_reset_counter(error) & ~I915_WEDGED) + 1) / 2; } static inline bool i915_stop_ring_allow_ban(struct drm_i915_private *dev_priv) @@ -3060,11 +3275,10 @@ static inline bool i915_stop_ring_allow_warn(struct drm_i915_private *dev_priv) void i915_gem_reset(struct drm_device *dev); bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force); int __must_check i915_gem_init(struct drm_device *dev); -int i915_gem_init_rings(struct drm_device *dev); +int i915_gem_init_engines(struct drm_device *dev); int __must_check i915_gem_init_hw(struct drm_device *dev); -int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice); void i915_gem_init_swizzling(struct drm_device *dev); -void i915_gem_cleanup_ringbuffer(struct drm_device *dev); +void i915_gem_cleanup_engines(struct drm_device *dev); int __must_check i915_gpu_idle(struct drm_device *dev); int __must_check i915_gem_suspend(struct drm_device *dev); void __i915_add_request(struct drm_i915_gem_request *req, @@ -3075,7 +3289,6 @@ void __i915_add_request(struct drm_i915_gem_request *req, #define i915_add_request_no_flush(req) \ __i915_add_request(req, NULL, false) int __i915_wait_request(struct drm_i915_gem_request *req, - unsigned reset_counter, bool interruptible, s64 *timeout, struct intel_rps_client *rps); @@ -3131,8 +3344,6 @@ bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o, bool i915_gem_obj_bound(struct drm_i915_gem_object *o, struct i915_address_space *vm); -unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o, - struct i915_address_space *vm); struct i915_vma * i915_gem_obj_to_vma(struct drm_i915_gem_object *obj, struct i915_address_space *vm); @@ -3155,13 +3366,9 @@ i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj) bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj); /* Some GGTT VM helpers */ -#define i915_obj_to_ggtt(obj) \ - (&((struct drm_i915_private *)(obj)->base.dev->dev_private)->gtt.base) - static inline struct i915_hw_ppgtt * i915_vm_to_ppgtt(struct i915_address_space *vm) { - WARN_ON(i915_is_ggtt(vm)); return container_of(vm, struct i915_hw_ppgtt, base); } @@ -3171,25 +3378,19 @@ static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj) return i915_gem_obj_ggtt_bound_view(obj, &i915_ggtt_view_normal); } -static inline unsigned long -i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj) -{ - return i915_gem_obj_size(obj, i915_obj_to_ggtt(obj)); -} +unsigned long +i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj); static inline int __must_check i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj, uint32_t alignment, unsigned flags) { - return i915_gem_object_pin(obj, i915_obj_to_ggtt(obj), - alignment, flags | PIN_GLOBAL); -} + struct drm_i915_private *dev_priv = to_i915(obj->base.dev); + struct i915_ggtt *ggtt = &dev_priv->ggtt; -static inline int -i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj) -{ - return i915_vma_unbind(i915_gem_obj_to_ggtt(obj)); + return i915_gem_object_pin(obj, &ggtt->base, + alignment, flags | PIN_GLOBAL); } void i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj, @@ -3215,10 +3416,10 @@ void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj); /* i915_gem_context.c */ int __must_check i915_gem_context_init(struct drm_device *dev); +void i915_gem_context_lost(struct drm_i915_private *dev_priv); void i915_gem_context_fini(struct drm_device *dev); void i915_gem_context_reset(struct drm_device *dev); int i915_gem_context_open(struct drm_device *dev, struct drm_file *file); -int i915_gem_context_enable(struct drm_i915_gem_request *req); void i915_gem_context_close(struct drm_device *dev, struct drm_file *file); int i915_switch_context(struct drm_i915_gem_request *req); struct intel_context * @@ -3249,6 +3450,8 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, void *data, + struct drm_file *file); /* i915_gem_evict.c */ int __must_check i915_gem_evict_something(struct drm_device *dev, @@ -3263,9 +3466,9 @@ int __must_check i915_gem_evict_for_vma(struct i915_vma *target); int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle); /* belongs in i915_gem_gtt.h */ -static inline void i915_gem_chipset_flush(struct drm_device *dev) +static inline void i915_gem_chipset_flush(struct drm_i915_private *dev_priv) { - if (INTEL_INFO(dev)->gen < 6) + if (INTEL_GEN(dev_priv) < 6) intel_gtt_chipset_flush(); } @@ -3297,6 +3500,7 @@ unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv, #define I915_SHRINK_UNBOUND 0x2 #define I915_SHRINK_BOUND 0x4 #define I915_SHRINK_ACTIVE 0x8 +#define I915_SHRINK_VMAPS 0x10 unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv); void i915_gem_shrinker_init(struct drm_i915_private *dev_priv); void i915_gem_shrinker_cleanup(struct drm_i915_private *dev_priv); @@ -3343,22 +3547,23 @@ static inline void i915_error_state_buf_release( { kfree(eb->buf); } -void i915_capture_error_state(struct drm_device *dev, bool wedge, +void i915_capture_error_state(struct drm_i915_private *dev_priv, + u32 engine_mask, const char *error_msg); void i915_error_state_get(struct drm_device *dev, struct i915_error_state_file_priv *error_priv); void i915_error_state_put(struct i915_error_state_file_priv *error_priv); void i915_destroy_error_state(struct drm_device *dev); -void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone); +void i915_get_extra_instdone(struct drm_i915_private *dev_priv, uint32_t *instdone); const char *i915_cache_level_str(struct drm_i915_private *i915, int type); /* i915_cmd_parser.c */ -int i915_cmd_parser_get_version(void); -int i915_cmd_parser_init_ring(struct intel_engine_cs *ring); -void i915_cmd_parser_fini_ring(struct intel_engine_cs *ring); -bool i915_needs_cmd_parser(struct intel_engine_cs *ring); -int i915_parse_cmds(struct intel_engine_cs *ring, +int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv); +int i915_cmd_parser_init_ring(struct intel_engine_cs *engine); +void i915_cmd_parser_fini_ring(struct intel_engine_cs *engine); +bool i915_needs_cmd_parser(struct intel_engine_cs *engine); +int i915_parse_cmds(struct intel_engine_cs *engine, struct drm_i915_gem_object *batch_obj, struct drm_i915_gem_object *shadow_batch_obj, u32 batch_start_offset, @@ -3392,22 +3597,32 @@ extern void intel_i2c_reset(struct drm_device *dev); /* intel_bios.c */ int intel_bios_init(struct drm_i915_private *dev_priv); bool intel_bios_is_valid_vbt(const void *buf, size_t size); +bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv); +bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin); +bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port); +bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port); +bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port); +bool intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv, + enum port port); /* intel_opregion.c */ #ifdef CONFIG_ACPI extern int intel_opregion_setup(struct drm_device *dev); extern void intel_opregion_init(struct drm_device *dev); extern void intel_opregion_fini(struct drm_device *dev); -extern void intel_opregion_asle_intr(struct drm_device *dev); +extern void intel_opregion_asle_intr(struct drm_i915_private *dev_priv); extern int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable); extern int intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state); +extern int intel_opregion_get_panel_type(struct drm_device *dev); #else static inline int intel_opregion_setup(struct drm_device *dev) { return 0; } static inline void intel_opregion_init(struct drm_device *dev) { return; } static inline void intel_opregion_fini(struct drm_device *dev) { return; } -static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; } +static inline void intel_opregion_asle_intr(struct drm_i915_private *dev_priv) +{ +} static inline int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable) { @@ -3418,6 +3633,10 @@ intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state) { return 0; } +static inline int intel_opregion_get_panel_type(struct drm_device *dev) +{ + return -ENODEV; +} #endif /* intel_acpi.c */ @@ -3439,26 +3658,25 @@ extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); extern void intel_display_resume(struct drm_device *dev); extern void i915_redisable_vga(struct drm_device *dev); extern void i915_redisable_vga_power_on(struct drm_device *dev); -extern bool ironlake_set_drps(struct drm_device *dev, u8 val); +extern bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val); extern void intel_init_pch_refclk(struct drm_device *dev); -extern void intel_set_rps(struct drm_device *dev, u8 val); +extern void intel_set_rps(struct drm_i915_private *dev_priv, u8 val); extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable); extern void intel_detect_pch(struct drm_device *dev); -extern int intel_enable_rc6(const struct drm_device *dev); -extern bool i915_semaphore_is_enabled(struct drm_device *dev); +extern bool i915_semaphore_is_enabled(struct drm_i915_private *dev_priv); int i915_reg_read_ioctl(struct drm_device *dev, void *data, struct drm_file *file); -int i915_get_reset_stats_ioctl(struct drm_device *dev, void *data, - struct drm_file *file); /* overlay */ -extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev); +extern struct intel_overlay_error_state * +intel_overlay_capture_error_state(struct drm_i915_private *dev_priv); extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e, struct intel_overlay_error_state *error); -extern struct intel_display_error_state *intel_display_capture_error_state(struct drm_device *dev); +extern struct intel_display_error_state * +intel_display_capture_error_state(struct drm_i915_private *dev_priv); extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e, struct drm_device *dev, struct intel_display_error_state *error); @@ -3487,6 +3705,24 @@ void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value, u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg); void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); +/* intel_dpio_phy.c */ +void chv_set_phy_signal_level(struct intel_encoder *encoder, + u32 deemph_reg_value, u32 margin_reg_value, + bool uniq_trans_scale); +void chv_data_lane_soft_reset(struct intel_encoder *encoder, + bool reset); +void chv_phy_pre_pll_enable(struct intel_encoder *encoder); +void chv_phy_pre_encoder_enable(struct intel_encoder *encoder); +void chv_phy_release_cl2_override(struct intel_encoder *encoder); +void chv_phy_post_pll_disable(struct intel_encoder *encoder); + +void vlv_set_phy_signal_level(struct intel_encoder *encoder, + u32 demph_reg_value, u32 preemph_reg_value, + u32 uniqtranscale_reg_value, u32 tx3_demph); +void vlv_phy_pre_pll_enable(struct intel_encoder *encoder); +void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder); +void vlv_phy_reset_lanes(struct intel_encoder *encoder); + int intel_gpu_freq(struct drm_i915_private *dev_priv, int val); int intel_freq_opcode(struct drm_i915_private *dev_priv, int val); @@ -3577,11 +3813,6 @@ static inline i915_reg_t i915_vgacntrl_reg(struct drm_device *dev) return VGACNTRL; } -static inline void __user *to_user_ptr(u64 address) -{ - return (void __user *)(uintptr_t)address; -} - static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m) { unsigned long j = msecs_to_jiffies(m); @@ -3629,11 +3860,11 @@ wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms) } } -static inline void i915_trace_irq_get(struct intel_engine_cs *ring, +static inline void i915_trace_irq_get(struct intel_engine_cs *engine, struct drm_i915_gem_request *req) { - if (ring->trace_irq_req == NULL && ring->irq_get(ring)) - i915_gem_request_assign(&ring->trace_irq_req, req); + if (engine->trace_irq_req == NULL && engine->irq_get(engine)) + i915_gem_request_assign(&engine->trace_irq_req, req); } #endif