diff options
151 files changed, 9062 insertions, 7063 deletions
diff --git a/arch/arm/mach-u300/core.c b/arch/arm/mach-u300/core.c index a79fa3b0c8ed..a1694d977ec9 100644 --- a/arch/arm/mach-u300/core.c +++ b/arch/arm/mach-u300/core.c @@ -201,7 +201,7 @@ static unsigned long pin_highz_conf[] = { }; /* Pin control settings */ -static struct pinctrl_map __initdata u300_pinmux_map[] = { +static const struct pinctrl_map u300_pinmux_map[] = { /* anonymous maps for chip power and EMIFs */ PIN_MAP_MUX_GROUP_HOG_DEFAULT("pinctrl-u300", NULL, "power"), PIN_MAP_MUX_GROUP_HOG_DEFAULT("pinctrl-u300", NULL, "emif0"), diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c index 895b73f23079..6d4a29e99ae2 100644 --- a/drivers/gpu/drm/drm_client_modeset.c +++ b/drivers/gpu/drm/drm_client_modeset.c @@ -115,6 +115,33 @@ drm_client_find_modeset(struct drm_client_dev *client, struct drm_crtc *crtc) } static struct drm_display_mode * +drm_connector_get_tiled_mode(struct drm_connector *connector) +{ + struct drm_display_mode *mode; + + list_for_each_entry(mode, &connector->modes, head) { + if (mode->hdisplay == connector->tile_h_size && + mode->vdisplay == connector->tile_v_size) + return mode; + } + return NULL; +} + +static struct drm_display_mode * +drm_connector_fallback_non_tiled_mode(struct drm_connector *connector) +{ + struct drm_display_mode *mode; + + list_for_each_entry(mode, &connector->modes, head) { + if (mode->hdisplay == connector->tile_h_size && + mode->vdisplay == connector->tile_v_size) + continue; + return mode; + } + return NULL; +} + +static struct drm_display_mode * drm_connector_has_preferred_mode(struct drm_connector *connector, int width, int height) { struct drm_display_mode *mode; @@ -348,8 +375,15 @@ static bool drm_client_target_preferred(struct drm_connector **connectors, struct drm_connector *connector; u64 conn_configured = 0; int tile_pass = 0; + int num_tiled_conns = 0; int i; + for (i = 0; i < connector_count; i++) { + if (connectors[i]->has_tile && + connectors[i]->status == connector_status_connected) + num_tiled_conns++; + } + retry: for (i = 0; i < connector_count; i++) { connector = connectors[i]; @@ -399,6 +433,28 @@ retry: list_for_each_entry(modes[i], &connector->modes, head) break; } + /* + * In case of tiled mode if all tiles not present fallback to + * first available non tiled mode. + * After all tiles are present, try to find the tiled mode + * for all and if tiled mode not present due to fbcon size + * limitations, use first non tiled mode only for + * tile 0,0 and set to no mode for all other tiles. + */ + if (connector->has_tile) { + if (num_tiled_conns < + connector->num_h_tile * connector->num_v_tile || + (connector->tile_h_loc == 0 && + connector->tile_v_loc == 0 && + !drm_connector_get_tiled_mode(connector))) { + DRM_DEBUG_KMS("Falling back to non tiled mode on Connector %d\n", + connector->base.id); + modes[i] = drm_connector_fallback_non_tiled_mode(connector); + } else { + modes[i] = drm_connector_get_tiled_mode(connector); + } + } + DRM_DEBUG_KMS("found mode %s\n", modes[i] ? modes[i]->name : "none"); conn_configured |= BIT_ULL(i); @@ -515,6 +571,7 @@ static bool drm_client_firmware_config(struct drm_client_dev *client, bool fallback = true, ret = true; int num_connectors_enabled = 0; int num_connectors_detected = 0; + int num_tiled_conns = 0; struct drm_modeset_acquire_ctx ctx; if (!drm_drv_uses_atomic_modeset(dev)) @@ -532,6 +589,11 @@ static bool drm_client_firmware_config(struct drm_client_dev *client, memcpy(save_enabled, enabled, count); mask = GENMASK(count - 1, 0); conn_configured = 0; + for (i = 0; i < count; i++) { + if (connectors[i]->has_tile && + connectors[i]->status == connector_status_connected) + num_tiled_conns++; + } retry: conn_seq = conn_configured; for (i = 0; i < count; i++) { @@ -631,6 +693,16 @@ retry: connector->name); modes[i] = &connector->state->crtc->mode; } + /* + * In case of tiled modes, if all tiles are not present + * then fallback to a non tiled mode. + */ + if (connector->has_tile && + num_tiled_conns < connector->num_h_tile * connector->num_v_tile) { + DRM_DEBUG_KMS("Falling back to non tiled mode on Connector %d\n", + connector->base.id); + modes[i] = drm_connector_fallback_non_tiled_mode(connector); + } crtcs[i] = new_crtc; DRM_DEBUG_KMS("connector %s on [CRTC:%d:%s]: %dx%d%s\n", diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index f8e905192608..57f510687b85 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -1561,7 +1561,9 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper, for (j = 0; j < mode_set->num_connectors; j++) { struct drm_connector *connector = mode_set->connectors[j]; - if (connector->has_tile) { + if (connector->has_tile && + desired_mode->hdisplay == connector->tile_h_size && + desired_mode->vdisplay == connector->tile_v_size) { lasth = (connector->tile_h_loc == (connector->num_h_tile - 1)); lastv = (connector->tile_v_loc == (connector->num_v_tile - 1)); /* cloning to multiple tiles is just crazy-talk, so: */ diff --git a/drivers/gpu/drm/i915/.gitignore b/drivers/gpu/drm/i915/.gitignore new file mode 100644 index 000000000000..d9a77f3b59b2 --- /dev/null +++ b/drivers/gpu/drm/i915/.gitignore @@ -0,0 +1 @@ +*.hdrtest diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index b0c53661f62b..b8c5f8934dbd 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -31,9 +31,6 @@ CFLAGS_display/intel_fbdev.o = $(call cc-disable-warning, override-init) subdir-ccflags-y += \ $(call as-instr,movntdqa (%eax)$(comma)%xmm0,-DCONFIG_AS_MOVNTDQA) -# Extra header tests -header-test-pattern-$(CONFIG_DRM_I915_WERROR) := *.h - subdir-ccflags-y += -I$(srctree)/$(src) # Please keep these build lists sorted! @@ -73,11 +70,12 @@ i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o display/intel_pipe_crc.o i915-$(CONFIG_PERF_EVENTS) += i915_pmu.o # "Graphics Technology" (aka we talk to the gpu) -obj-y += gt/ gt-y += \ gt/debugfs_engines.o \ gt/debugfs_gt.o \ gt/debugfs_gt_pm.o \ + gt/gen6_ppgtt.o \ + gt/gen8_ppgtt.o \ gt/intel_breadcrumbs.o \ gt/intel_context.o \ gt/intel_engine_cs.o \ @@ -85,14 +83,17 @@ gt-y += \ gt/intel_engine_pm.o \ gt/intel_engine_pool.o \ gt/intel_engine_user.o \ + gt/intel_ggtt.o \ gt/intel_gt.o \ gt/intel_gt_irq.o \ gt/intel_gt_pm.o \ gt/intel_gt_pm_irq.o \ gt/intel_gt_requests.o \ + gt/intel_gtt.o \ gt/intel_llc.o \ gt/intel_lrc.o \ gt/intel_mocs.o \ + gt/intel_ppgtt.o \ gt/intel_rc6.o \ gt/intel_renderstate.o \ gt/intel_reset.o \ @@ -111,7 +112,6 @@ gt-y += \ i915-y += $(gt-y) # GEM (Graphics Execution Management) code -obj-y += gem/ gem-y += \ gem/i915_gem_busy.o \ gem/i915_gem_clflush.o \ @@ -157,7 +157,6 @@ i915-y += \ intel_wopcm.o # general-purpose microcontroller (GuC) support -obj-y += gt/uc/ i915-y += gt/uc/intel_uc.o \ gt/uc/intel_uc_fw.o \ gt/uc/intel_guc.o \ @@ -170,7 +169,6 @@ i915-y += gt/uc/intel_uc.o \ gt/uc/intel_huc_fw.o # modesetting core code -obj-y += display/ i915-y += \ display/intel_atomic.o \ display/intel_atomic_plane.o \ @@ -235,7 +233,6 @@ i915-y += \ display/vlv_dsi_pll.o # perf code -obj-y += oa/ i915-y += \ oa/i915_oa_hsw.o \ oa/i915_oa_bdw.o \ @@ -260,6 +257,7 @@ i915-$(CONFIG_DRM_I915_SELFTEST) += \ gem/selftests/igt_gem_utils.o \ selftests/i915_random.o \ selftests/i915_selftest.o \ + selftests/igt_atomic.o \ selftests/igt_flush_test.o \ selftests/igt_live_test.o \ selftests/igt_mmap.o \ @@ -276,3 +274,27 @@ endif obj-$(CONFIG_DRM_I915) += i915.o obj-$(CONFIG_DRM_I915_GVT_KVMGT) += gvt/kvmgt.o + +# header test + +# exclude some broken headers from the test coverage +no-header-test := \ + display/intel_vbt_defs.h \ + gvt/execlist.h \ + gvt/fb_decoder.h \ + gvt/gtt.h \ + gvt/gvt.h \ + gvt/interrupt.h \ + gvt/mmio_context.h \ + gvt/mpt.h \ + gvt/scheduler.h + +extra-$(CONFIG_DRM_I915_WERROR) += \ + $(patsubst %.h,%.hdrtest, $(filter-out $(no-header-test), \ + $(shell cd $(srctree)/$(src) && find * -name '*.h'))) + +quiet_cmd_hdrtest = HDRTEST $(patsubst %.hdrtest,%.h,$@) + cmd_hdrtest = $(CC) $(c_flags) -S -o /dev/null -x c /dev/null -include $<; touch $@ + +$(obj)/%.hdrtest: $(src)/%.h FORCE + $(call if_changed_dep,hdrtest) diff --git a/drivers/gpu/drm/i915/display/Makefile b/drivers/gpu/drm/i915/display/Makefile deleted file mode 100644 index 173c305d7866..000000000000 --- a/drivers/gpu/drm/i915/display/Makefile +++ /dev/null @@ -1,6 +0,0 @@ -# For building individual subdir files on the command line -subdir-ccflags-y += -I$(srctree)/$(src)/.. - -# Extra header tests -header-test-pattern-$(CONFIG_DRM_I915_WERROR) := *.h -header-test- := intel_vbt_defs.h diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c index 006b1a297e6f..f8e882101396 100644 --- a/drivers/gpu/drm/i915/display/icl_dsi.c +++ b/drivers/gpu/drm/i915/display/icl_dsi.c @@ -77,7 +77,7 @@ static enum transcoder dsi_port_to_transcoder(enum port port) static void wait_for_cmds_dispatched_to_panel(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); struct mipi_dsi_device *dsi; enum port port; enum transcoder dsi_trans; @@ -202,7 +202,7 @@ static int dsi_send_pkt_payld(struct intel_dsi_host *host, static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum phy phy; u32 tmp; int lane; @@ -267,7 +267,7 @@ static void configure_dual_link_mode(struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); u32 dss_ctl1; dss_ctl1 = I915_READ(DSS_CTL1); @@ -306,7 +306,7 @@ static void configure_dual_link_mode(struct intel_encoder *encoder, static int afe_clk(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); int bpp; if (crtc_state->dsc.compression_enable) @@ -321,7 +321,7 @@ static void gen11_dsi_program_esc_clk_div(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; int afe_clk_khz; u32 esc_clk_div_m; @@ -360,7 +360,7 @@ static void get_dsi_io_power_domains(struct drm_i915_private *dev_priv, static void gen11_dsi_enable_io_power(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; u32 tmp; @@ -376,7 +376,7 @@ static void gen11_dsi_enable_io_power(struct intel_encoder *encoder) static void gen11_dsi_power_up_lanes(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum phy phy; for_each_dsi_phy(phy, intel_dsi->phys) @@ -387,7 +387,7 @@ static void gen11_dsi_power_up_lanes(struct intel_encoder *encoder) static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum phy phy; u32 tmp; int lane; @@ -436,7 +436,7 @@ static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder) static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); u32 tmp; enum phy phy; @@ -488,7 +488,7 @@ static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder) static void gen11_dsi_enable_ddi_buffer(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); u32 tmp; enum port port; @@ -509,7 +509,7 @@ gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); u32 tmp; enum port port; enum phy phy; @@ -575,7 +575,7 @@ gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder, static void gen11_dsi_gate_clocks(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); u32 tmp; enum phy phy; @@ -591,7 +591,7 @@ static void gen11_dsi_gate_clocks(struct intel_encoder *encoder) static void gen11_dsi_ungate_clocks(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); u32 tmp; enum phy phy; @@ -608,7 +608,7 @@ static void gen11_dsi_map_pll(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); struct intel_shared_dpll *pll = crtc_state->shared_dpll; enum phy phy; u32 val; @@ -640,7 +640,7 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc); enum pipe pipe = intel_crtc->pipe; u32 tmp; @@ -789,7 +789,7 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; enum port port; @@ -923,7 +923,7 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder, static void gen11_dsi_enable_transcoder(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; enum transcoder dsi_trans; u32 tmp; @@ -945,7 +945,7 @@ static void gen11_dsi_setup_timeouts(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; enum transcoder dsi_trans; u32 tmp, hs_tx_timeout, lp_rx_timeout, ta_timeout, divisor, mul; @@ -1026,7 +1026,7 @@ gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder, static void gen11_dsi_powerup_panel(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); struct mipi_dsi_device *dsi; enum port port; enum transcoder dsi_trans; @@ -1077,7 +1077,7 @@ static void gen11_dsi_pre_enable(struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); /* step3b */ gen11_dsi_map_pll(encoder, pipe_config); @@ -1104,7 +1104,7 @@ static void gen11_dsi_pre_enable(struct intel_encoder *encoder, static void gen11_dsi_disable_transcoder(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; enum transcoder dsi_trans; u32 tmp; @@ -1126,7 +1126,7 @@ static void gen11_dsi_disable_transcoder(struct intel_encoder *encoder) static void gen11_dsi_powerdown_panel(struct intel_encoder *encoder) { - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_OFF); intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_ASSERT_RESET); @@ -1139,7 +1139,7 @@ static void gen11_dsi_powerdown_panel(struct intel_encoder *encoder) static void gen11_dsi_deconfigure_trancoder(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; enum transcoder dsi_trans; u32 tmp; @@ -1180,7 +1180,7 @@ static void gen11_dsi_deconfigure_trancoder(struct intel_encoder *encoder) static void gen11_dsi_disable_port(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); u32 tmp; enum port port; @@ -1202,7 +1202,7 @@ static void gen11_dsi_disable_port(struct intel_encoder *encoder) static void gen11_dsi_disable_io_power(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; u32 tmp; @@ -1229,7 +1229,7 @@ static void gen11_dsi_disable(struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); /* step1: turn off backlight */ intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_OFF); @@ -1259,7 +1259,7 @@ static void gen11_dsi_post_disable(struct intel_encoder *encoder, intel_dsc_disable(old_crtc_state); - skylake_scaler_disable(old_crtc_state); + skl_scaler_disable(old_crtc_state); } static enum drm_mode_status gen11_dsi_mode_valid(struct drm_connector *connector, @@ -1272,7 +1272,7 @@ static enum drm_mode_status gen11_dsi_mode_valid(struct drm_connector *connector static void gen11_dsi_get_timings(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; @@ -1313,7 +1313,7 @@ static void gen11_dsi_get_config(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); intel_dsc_get_config(encoder, pipe_config); @@ -1417,7 +1417,8 @@ static void gen11_dsi_get_power_domains(struct intel_encoder *encoder, { struct drm_i915_private *i915 = to_i915(encoder->base.dev); - get_dsi_io_power_domains(i915, enc_to_intel_dsi(&encoder->base)); + get_dsi_io_power_domains(i915, + enc_to_intel_dsi(encoder)); if (crtc_state->dsc.compression_enable) intel_display_power_get(i915, @@ -1428,7 +1429,7 @@ static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum transcoder dsi_trans; intel_wakeref_t wakeref; enum port port; diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c index fd0026fc3618..c362eecdd414 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic.c +++ b/drivers/gpu/drm/i915/display/intel_atomic.c @@ -37,6 +37,7 @@ #include "intel_atomic.h" #include "intel_display_types.h" #include "intel_hdcp.h" +#include "intel_psr.h" #include "intel_sprite.h" /** @@ -129,6 +130,7 @@ int intel_digital_connector_atomic_check(struct drm_connector *conn, struct drm_crtc_state *crtc_state; intel_hdcp_atomic_check(conn, old_state, new_state); + intel_psr_atomic_check(conn, old_state, new_state); if (!new_state->crtc) return 0; @@ -175,6 +177,38 @@ intel_digital_connector_duplicate_state(struct drm_connector *connector) } /** + * intel_connector_needs_modeset - check if connector needs a modeset + */ +bool +intel_connector_needs_modeset(struct intel_atomic_state *state, + struct drm_connector *connector) +{ + const struct drm_connector_state *old_conn_state, *new_conn_state; + + old_conn_state = drm_atomic_get_old_connector_state(&state->base, connector); + new_conn_state = drm_atomic_get_new_connector_state(&state->base, connector); + + return old_conn_state->crtc != new_conn_state->crtc || + (new_conn_state->crtc && + drm_atomic_crtc_needs_modeset(drm_atomic_get_new_crtc_state(&state->base, + new_conn_state->crtc))); +} + +struct intel_digital_connector_state * +intel_atomic_get_digital_connector_state(struct intel_atomic_state *state, + struct intel_connector *connector) +{ + struct drm_connector_state *conn_state; + + conn_state = drm_atomic_get_connector_state(&state->base, + &connector->base); + if (IS_ERR(conn_state)) + return ERR_CAST(conn_state); + + return to_intel_digital_connector_state(conn_state); +} + +/** * intel_crtc_duplicate_state - duplicate crtc state * @crtc: drm crtc * diff --git a/drivers/gpu/drm/i915/display/intel_atomic.h b/drivers/gpu/drm/i915/display/intel_atomic.h index 7b49623419ba..74c749dbfb4f 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic.h +++ b/drivers/gpu/drm/i915/display/intel_atomic.h @@ -17,6 +17,7 @@ struct drm_device; struct drm_i915_private; struct drm_property; struct intel_atomic_state; +struct intel_connector; struct intel_crtc; struct intel_crtc_state; @@ -32,6 +33,11 @@ int intel_digital_connector_atomic_check(struct drm_connector *conn, struct drm_atomic_state *state); struct drm_connector_state * intel_digital_connector_duplicate_state(struct drm_connector *connector); +bool intel_connector_needs_modeset(struct intel_atomic_state *state, + struct drm_connector *connector); +struct intel_digital_connector_state * +intel_atomic_get_digital_connector_state(struct intel_atomic_state *state, + struct intel_connector *connector); struct drm_crtc_state *intel_crtc_duplicate_state(struct drm_crtc *crtc); void intel_crtc_destroy_state(struct drm_crtc *crtc, diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c index 27710098d056..b18040793d9e 100644 --- a/drivers/gpu/drm/i915/display/intel_audio.c +++ b/drivers/gpu/drm/i915/display/intel_audio.c @@ -707,8 +707,8 @@ void intel_audio_codec_enable(struct intel_encoder *encoder, DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", connector->base.id, connector->name, - connector->encoder->base.id, - connector->encoder->name); + encoder->base.base.id, + encoder->base.name); connector->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2; @@ -856,7 +856,7 @@ static unsigned long i915_audio_component_get_power(struct device *kdev) } /* Force CDCLK to 2*BCLK as long as we need audio powered. */ - if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) + if (IS_GEMINILAKE(dev_priv)) glk_force_audio_cdclk(dev_priv, true); if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) @@ -875,7 +875,7 @@ static void i915_audio_component_put_power(struct device *kdev, /* Stop forcing CDCLK to 2*BCLK if no need for audio to be powered. */ if (--dev_priv->audio_power_refcount == 0) - if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) + if (IS_GEMINILAKE(dev_priv)) glk_force_audio_cdclk(dev_priv, false); intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO, cookie); diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c index dcb66a33be9b..b228671d5a5d 100644 --- a/drivers/gpu/drm/i915/display/intel_bw.c +++ b/drivers/gpu/drm/i915/display/intel_bw.c @@ -486,3 +486,8 @@ int intel_bw_init(struct drm_i915_private *dev_priv) return 0; } + +void intel_bw_cleanup(struct drm_i915_private *dev_priv) +{ + drm_atomic_private_obj_fini(&dev_priv->bw_obj); +} diff --git a/drivers/gpu/drm/i915/display/intel_bw.h b/drivers/gpu/drm/i915/display/intel_bw.h index 9db10af012f4..20b9ad241802 100644 --- a/drivers/gpu/drm/i915/display/intel_bw.h +++ b/drivers/gpu/drm/i915/display/intel_bw.h @@ -25,6 +25,7 @@ struct intel_bw_state { void intel_bw_init_hw(struct drm_i915_private *dev_priv); int intel_bw_init(struct drm_i915_private *dev_priv); +void intel_bw_cleanup(struct drm_i915_private *dev_priv); int intel_bw_atomic_check(struct intel_atomic_state *state); void intel_bw_crtc_update(struct intel_bw_state *bw_state, const struct intel_crtc_state *crtc_state); diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c index 7d1ab1e5b7c3..0ce5926006ca 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.c +++ b/drivers/gpu/drm/i915/display/intel_cdclk.c @@ -2004,6 +2004,18 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state) /* Account for additional needs from the planes */ min_cdclk = max(intel_planes_min_cdclk(crtc_state), min_cdclk); + /* + * HACK. Currently for TGL platforms we calculate + * min_cdclk initially based on pixel_rate divided + * by 2, accounting for also plane requirements, + * however in some cases the lowest possible CDCLK + * doesn't work and causing the underruns. + * Explicitly stating here that this seems to be currently + * rather a Hack, than final solution. + */ + if (IS_TIGERLAKE(dev_priv)) + min_cdclk = max(min_cdclk, (int)crtc_state->pixel_rate); + if (min_cdclk > dev_priv->max_cdclk_freq) { DRM_DEBUG_KMS("required cdclk (%d kHz) exceeds max (%d kHz)\n", min_cdclk, dev_priv->max_cdclk_freq); diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c index b2b1336ecdb6..f976b800b245 100644 --- a/drivers/gpu/drm/i915/display/intel_crt.c +++ b/drivers/gpu/drm/i915/display/intel_crt.c @@ -65,7 +65,7 @@ static struct intel_crt *intel_encoder_to_crt(struct intel_encoder *encoder) return container_of(encoder, struct intel_crt, base); } -static struct intel_crt *intel_attached_crt(struct drm_connector *connector) +static struct intel_crt *intel_attached_crt(struct intel_connector *connector) { return intel_encoder_to_crt(intel_attached_encoder(connector)); } @@ -247,7 +247,7 @@ static void hsw_post_disable_crt(struct intel_encoder *encoder, intel_ddi_disable_transcoder_func(old_crtc_state); - ironlake_pfit_disable(old_crtc_state); + ilk_pfit_disable(old_crtc_state); intel_ddi_disable_pipe_clock(old_crtc_state); @@ -351,7 +351,7 @@ intel_crt_mode_valid(struct drm_connector *connector, /* The FDI receiver on LPT only supports 8bpc and only has 2 lanes. */ if (HAS_PCH_LPT(dev_priv) && - (ironlake_get_lanes_required(mode->clock, 270000, 24) > 2)) + ilk_get_lanes_required(mode->clock, 270000, 24) > 2) return MODE_CLOCK_HIGH; /* HSW/BDW FDI limited to 4k */ @@ -427,10 +427,10 @@ static int hsw_crt_compute_config(struct intel_encoder *encoder, return 0; } -static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector) +static bool ilk_crt_detect_hotplug(struct drm_connector *connector) { struct drm_device *dev = connector->dev; - struct intel_crt *crt = intel_attached_crt(connector); + struct intel_crt *crt = intel_attached_crt(to_intel_connector(connector)); struct drm_i915_private *dev_priv = to_i915(dev); u32 adpa; bool ret; @@ -440,7 +440,7 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector) bool turn_off_dac = HAS_PCH_SPLIT(dev_priv); u32 save_adpa; - crt->force_hotplug_required = 0; + crt->force_hotplug_required = false; save_adpa = adpa = I915_READ(crt->adpa_reg); DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa); @@ -477,7 +477,7 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector) static bool valleyview_crt_detect_hotplug(struct drm_connector *connector) { struct drm_device *dev = connector->dev; - struct intel_crt *crt = intel_attached_crt(connector); + struct intel_crt *crt = intel_attached_crt(to_intel_connector(connector)); struct drm_i915_private *dev_priv = to_i915(dev); bool reenable_hpd; u32 adpa; @@ -535,7 +535,7 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector) int i, tries = 0; if (HAS_PCH_SPLIT(dev_priv)) - return intel_ironlake_crt_detect_hotplug(connector); + return ilk_crt_detect_hotplug(connector); if (IS_VALLEYVIEW(dev_priv)) return valleyview_crt_detect_hotplug(connector); @@ -609,7 +609,7 @@ static int intel_crt_ddc_get_modes(struct drm_connector *connector, static bool intel_crt_detect_ddc(struct drm_connector *connector) { - struct intel_crt *crt = intel_attached_crt(connector); + struct intel_crt *crt = intel_attached_crt(to_intel_connector(connector)); struct drm_i915_private *dev_priv = to_i915(crt->base.base.dev); struct edid *edid; struct i2c_adapter *i2c; @@ -795,7 +795,7 @@ intel_crt_detect(struct drm_connector *connector, bool force) { struct drm_i915_private *dev_priv = to_i915(connector->dev); - struct intel_crt *crt = intel_attached_crt(connector); + struct intel_crt *crt = intel_attached_crt(to_intel_connector(connector)); struct intel_encoder *intel_encoder = &crt->base; intel_wakeref_t wakeref; int status, ret; @@ -886,7 +886,7 @@ static int intel_crt_get_modes(struct drm_connector *connector) { struct drm_device *dev = connector->dev; struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_crt *crt = intel_attached_crt(connector); + struct intel_crt *crt = intel_attached_crt(to_intel_connector(connector)); struct intel_encoder *intel_encoder = &crt->base; intel_wakeref_t wakeref; struct i2c_adapter *i2c; @@ -925,7 +925,7 @@ void intel_crt_reset(struct drm_encoder *encoder) POSTING_READ(crt->adpa_reg); DRM_DEBUG_KMS("crt adpa set to 0x%x\n", adpa); - crt->force_hotplug_required = 1; + crt->force_hotplug_required = true; } } @@ -1063,7 +1063,7 @@ void intel_crt_init(struct drm_i915_private *dev_priv) /* * Configure the automatic hotplug detection stuff */ - crt->force_hotplug_required = 0; + crt->force_hotplug_required = false; /* * TODO: find a proper way to discover whether we need to set the the diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index c9ba7d7f3787..33f1dc3d7c1a 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -34,6 +34,7 @@ #include "intel_ddi.h" #include "intel_display_types.h" #include "intel_dp.h" +#include "intel_dp_mst.h" #include "intel_dp_link_training.h" #include "intel_dpio_phy.h" #include "intel_dsi.h" @@ -1237,9 +1238,9 @@ void hsw_fdi_link_train(struct intel_encoder *encoder, static void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder) { - struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct intel_digital_port *intel_dig_port = - enc_to_dig_port(&encoder->base); + enc_to_dig_port(encoder); intel_dp->DP = intel_dig_port->saved_port_bits | DDI_BUF_CTL_ENABLE | DDI_BUF_TRANS_SELECT(0); @@ -1899,8 +1900,13 @@ intel_ddi_transcoder_func_reg_val_get(const struct intel_crtc_state *crtc_state) temp |= TRANS_DDI_MODE_SELECT_DP_MST; temp |= DDI_PORT_WIDTH(crtc_state->lane_count); - if (INTEL_GEN(dev_priv) >= 12) - temp |= TRANS_DDI_MST_TRANSPORT_SELECT(crtc_state->cpu_transcoder); + if (INTEL_GEN(dev_priv) >= 12) { + enum transcoder master; + + master = crtc_state->mst_master_transcoder; + WARN_ON(master == INVALID_TRANSCODER); + temp |= TRANS_DDI_MST_TRANSPORT_SELECT(master); + } } else { temp |= TRANS_DDI_MODE_SELECT_DP_SST; temp |= DDI_PORT_WIDTH(crtc_state->lane_count); @@ -1944,17 +1950,18 @@ void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; - i915_reg_t reg = TRANS_DDI_FUNC_CTL(cpu_transcoder); - u32 val = I915_READ(reg); + u32 val; + + val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)); + val &= ~TRANS_DDI_FUNC_ENABLE; if (INTEL_GEN(dev_priv) >= 12) { - val &= ~(TRANS_DDI_FUNC_ENABLE | TGL_TRANS_DDI_PORT_MASK | - TRANS_DDI_DP_VC_PAYLOAD_ALLOC); + if (!intel_dp_mst_is_master_trans(crtc_state)) + val &= ~TGL_TRANS_DDI_PORT_MASK; } else { - val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK | - TRANS_DDI_DP_VC_PAYLOAD_ALLOC); + val &= ~TRANS_DDI_PORT_MASK; } - I915_WRITE(reg, val); + I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), val); if (dev_priv->quirks & QUIRK_INCREASE_DDI_DISABLED_TIME && intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) { @@ -2217,7 +2224,7 @@ static void intel_ddi_get_power_domains(struct intel_encoder *encoder, if (WARN_ON(intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST))) return; - dig_port = enc_to_dig_port(&encoder->base); + dig_port = enc_to_dig_port(encoder); intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain); /* @@ -2287,7 +2294,7 @@ static void _skl_ddi_set_iboost(struct drm_i915_private *dev_priv, static void skl_ddi_set_iboost(struct intel_encoder *encoder, int level, enum intel_output_type type) { - struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base); + struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum port port = encoder->port; u8 iboost; @@ -2358,7 +2365,7 @@ static void bxt_ddi_vswing_sequence(struct intel_encoder *encoder, u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); enum port port = encoder->port; enum phy phy = intel_port_to_phy(dev_priv, port); int n_entries; @@ -2497,7 +2504,7 @@ static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder, width = 4; rate = 0; /* Rate is always < than 6GHz for HDMI */ } else { - struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); width = intel_dp->lane_count; rate = intel_dp->link_rate; @@ -2623,7 +2630,7 @@ static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder, width = 4; /* Rate is always < than 6GHz for HDMI */ } else { - struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); width = intel_dp->lane_count; rate = intel_dp->link_rate; @@ -3161,57 +3168,6 @@ static void intel_ddi_clk_disable(struct intel_encoder *encoder) } static void -icl_phy_set_clock_gating(struct intel_digital_port *dig_port, bool enable) -{ - struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); - enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port); - u32 val, bits; - int ln; - - if (tc_port == PORT_TC_NONE) - return; - - bits = MG_DP_MODE_CFG_TR2PWR_GATING | MG_DP_MODE_CFG_TRPWR_GATING | - MG_DP_MODE_CFG_CLNPWR_GATING | MG_DP_MODE_CFG_DIGPWR_GATING | - MG_DP_MODE_CFG_GAONPWR_GATING; - - for (ln = 0; ln < 2; ln++) { - if (INTEL_GEN(dev_priv) >= 12) { - I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, ln)); - val = I915_READ(DKL_DP_MODE(tc_port)); - } else { - val = I915_READ(MG_DP_MODE(ln, tc_port)); - } - - if (enable) - val |= bits; - else - val &= ~bits; - - if (INTEL_GEN(dev_priv) >= 12) - I915_WRITE(DKL_DP_MODE(tc_port), val); - else - I915_WRITE(MG_DP_MODE(ln, tc_port), val); - } - - if (INTEL_GEN(dev_priv) == 11) { - bits = MG_MISC_SUS0_CFG_TR2PWR_GATING | - MG_MISC_SUS0_CFG_CL2PWR_GATING | - MG_MISC_SUS0_CFG_GAONPWR_GATING | - MG_MISC_SUS0_CFG_TRPWR_GATING | - MG_MISC_SUS0_CFG_CL1PWR_GATING | - MG_MISC_SUS0_CFG_DGPWR_GATING; - - val = I915_READ(MG_MISC_SUS0(tc_port)); - if (enable) - val |= (bits | MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE(3)); - else - val &= ~(bits | MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE_MASK); - I915_WRITE(MG_MISC_SUS0(tc_port), val); - } -} - -static void icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port, const struct intel_crtc_state *crtc_state) { @@ -3317,7 +3273,7 @@ static void intel_ddi_enable_fec(struct intel_encoder *encoder, if (!crtc_state->fec_enable) return; - intel_dp = enc_to_intel_dp(&encoder->base); + intel_dp = enc_to_intel_dp(encoder); val = I915_READ(intel_dp->regs.dp_tp_ctl); val |= DP_TP_CTL_FEC_ENABLE; I915_WRITE(intel_dp->regs.dp_tp_ctl, val); @@ -3337,7 +3293,7 @@ static void intel_ddi_disable_fec_state(struct intel_encoder *encoder, if (!crtc_state->fec_enable) return; - intel_dp = enc_to_intel_dp(&encoder->base); + intel_dp = enc_to_intel_dp(encoder); val = I915_READ(intel_dp->regs.dp_tp_ctl); val &= ~DP_TP_CTL_FEC_ENABLE; I915_WRITE(intel_dp->regs.dp_tp_ctl, val); @@ -3428,10 +3384,10 @@ static void tgl_ddi_pre_enable_dp(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { - struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum phy phy = intel_port_to_phy(dev_priv, encoder->port); - struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); + struct intel_digital_port *dig_port = enc_to_dig_port(encoder); bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST); int level = intel_ddi_dp_level(intel_dp); enum transcoder transcoder = crtc_state->cpu_transcoder; @@ -3458,14 +3414,14 @@ static void tgl_ddi_pre_enable_dp(struct intel_encoder *encoder, * (DFLEXDPSP.DPX4TXLATC) * * This was done before tgl_ddi_pre_enable_dp by - * haswell_crtc_enable()->intel_encoders_pre_pll_enable(). + * hsw_crtc_enable()->intel_encoders_pre_pll_enable(). */ /* * 4. Enable the port PLL. * * The PLL enabling itself was already done before this function by - * haswell_crtc_enable()->intel_enable_shared_dpll(). We need only + * hsw_crtc_enable()->intel_enable_shared_dpll(). We need only * configure the PLL to port mapping here. */ intel_ddi_clk_select(encoder, crtc_state); @@ -3509,12 +3465,6 @@ static void tgl_ddi_pre_enable_dp(struct intel_encoder *encoder, * down this function. */ - /* - * 7.d Type C with DP alternate or fixed/legacy/static connection - - * Disable PHY clock gating per Type-C DDI Buffer page - */ - icl_phy_set_clock_gating(dig_port, false); - /* 7.e Configure voltage swing and related IO settings */ tgl_ddi_vswing_sequence(encoder, crtc_state->port_clock, level, encoder->type); @@ -3566,15 +3516,6 @@ static void tgl_ddi_pre_enable_dp(struct intel_encoder *encoder, if (!is_trans_port_sync_mode(crtc_state)) intel_dp_stop_link_train(intel_dp); - /* - * TODO: enable clock gating - * - * It is not written in DP enabling sequence but "PHY Clockgating - * programming" states that clock gating should be enabled after the - * link training but doing so causes all the following trainings to fail - * so not enabling it for now. - */ - /* 7.l Configure and enable FEC if needed */ intel_ddi_enable_fec(encoder, crtc_state); intel_dsc_enable(encoder, crtc_state); @@ -3584,15 +3525,18 @@ static void hsw_ddi_pre_enable_dp(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { - struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum port port = encoder->port; enum phy phy = intel_port_to_phy(dev_priv, port); - struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); + struct intel_digital_port *dig_port = enc_to_dig_port(encoder); bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST); int level = intel_ddi_dp_level(intel_dp); - WARN_ON(is_mst && (port == PORT_A || port == PORT_E)); + if (INTEL_GEN(dev_priv) < 11) + WARN_ON(is_mst && (port == PORT_A || port == PORT_E)); + else + WARN_ON(is_mst && port == PORT_A); intel_dp_set_link_params(intel_dp, crtc_state->port_clock, crtc_state->lane_count, is_mst); @@ -3610,7 +3554,6 @@ static void hsw_ddi_pre_enable_dp(struct intel_encoder *encoder, dig_port->ddi_io_power_domain); icl_program_mg_dp_mode(dig_port, crtc_state); - icl_phy_set_clock_gating(dig_port, false); if (INTEL_GEN(dev_priv) >= 11) icl_ddi_vswing_sequence(encoder, crtc_state->port_clock, @@ -3644,8 +3587,6 @@ static void hsw_ddi_pre_enable_dp(struct intel_encoder *encoder, intel_ddi_enable_fec(encoder, crtc_state); - icl_phy_set_clock_gating(dig_port, true); - if (!is_mst) intel_ddi_enable_pipe_clock(crtc_state); @@ -3674,12 +3615,12 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { - struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base); + struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi; struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum port port = encoder->port; int level = intel_ddi_hdmi_level(dev_priv, port); - struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); + struct intel_digital_port *dig_port = enc_to_dig_port(encoder); intel_dp_dual_mode_set_tmds_output(intel_hdmi, true); intel_ddi_clk_select(encoder, crtc_state); @@ -3687,7 +3628,6 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder, intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain); icl_program_mg_dp_mode(dig_port, crtc_state); - icl_phy_set_clock_gating(dig_port, false); if (INTEL_GEN(dev_priv) >= 12) tgl_ddi_vswing_sequence(encoder, crtc_state->port_clock, @@ -3702,8 +3642,6 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder, else intel_prepare_hdmi_ddi_buffers(encoder, level); - icl_phy_set_clock_gating(dig_port, true); - if (IS_GEN9_BC(dev_priv)) skl_ddi_set_iboost(encoder, level, INTEL_OUTPUT_HDMI); @@ -3746,12 +3684,12 @@ static void intel_ddi_pre_enable(struct intel_encoder *encoder, intel_ddi_pre_enable_hdmi(encoder, crtc_state, conn_state); } else { struct intel_lspcon *lspcon = - enc_to_intel_lspcon(&encoder->base); + enc_to_intel_lspcon(encoder); intel_ddi_pre_enable_dp(encoder, crtc_state, conn_state); if (lspcon->active) { struct intel_digital_port *dig_port = - enc_to_dig_port(&encoder->base); + enc_to_dig_port(encoder); dig_port->set_infoframes(encoder, crtc_state->has_infoframe, @@ -3776,7 +3714,7 @@ static void intel_disable_ddi_buf(struct intel_encoder *encoder, } if (intel_crtc_has_dp_encoder(crtc_state)) { - struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); val = I915_READ(intel_dp->regs.dp_tp_ctl); val &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK); @@ -3796,7 +3734,7 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder, const struct drm_connector_state *old_conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); + struct intel_digital_port *dig_port = enc_to_dig_port(encoder); struct intel_dp *intel_dp = &dig_port->dp; bool is_mst = intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST); @@ -3808,8 +3746,19 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder, */ intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); - if (INTEL_GEN(dev_priv) < 12 && !is_mst) - intel_ddi_disable_pipe_clock(old_crtc_state); + if (INTEL_GEN(dev_priv) >= 12) { + if (is_mst) { + enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder; + u32 val; + + val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)); + val &= ~TGL_TRANS_DDI_PORT_MASK; + I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), val); + } + } else { + if (!is_mst) + intel_ddi_disable_pipe_clock(old_crtc_state); + } intel_disable_ddi_buf(encoder, old_crtc_state); @@ -3838,7 +3787,7 @@ static void intel_ddi_post_disable_hdmi(struct intel_encoder *encoder, const struct drm_connector_state *old_conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); + struct intel_digital_port *dig_port = enc_to_dig_port(encoder); struct intel_hdmi *intel_hdmi = &dig_port->hdmi; dig_port->set_infoframes(encoder, false, @@ -3860,8 +3809,6 @@ static void icl_disable_transcoder_port_sync(const struct intel_crtc_state *old_ { struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - i915_reg_t reg; - u32 trans_ddi_func_ctl2_val; if (old_crtc_state->master_transcoder == INVALID_TRANSCODER) return; @@ -3869,10 +3816,7 @@ static void icl_disable_transcoder_port_sync(const struct intel_crtc_state *old_ DRM_DEBUG_KMS("Disabling Transcoder Port Sync on Slave Transcoder %s\n", transcoder_name(old_crtc_state->cpu_transcoder)); - reg = TRANS_DDI_FUNC_CTL2(old_crtc_state->cpu_transcoder); - trans_ddi_func_ctl2_val = ~(PORT_SYNC_MODE_ENABLE | - PORT_SYNC_MODE_MASTER_SELECT_MASK); - I915_WRITE(reg, trans_ddi_func_ctl2_val); + I915_WRITE(TRANS_DDI_FUNC_CTL2(old_crtc_state->cpu_transcoder), 0); } static void intel_ddi_post_disable(struct intel_encoder *encoder, @@ -3880,25 +3824,27 @@ static void intel_ddi_post_disable(struct intel_encoder *encoder, const struct drm_connector_state *old_conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); + struct intel_digital_port *dig_port = enc_to_dig_port(encoder); enum phy phy = intel_port_to_phy(dev_priv, encoder->port); bool is_tc_port = intel_phy_is_tc(dev_priv, phy); - intel_crtc_vblank_off(old_crtc_state); + if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST)) { + intel_crtc_vblank_off(old_crtc_state); - intel_disable_pipe(old_crtc_state); + intel_disable_pipe(old_crtc_state); - if (INTEL_GEN(dev_priv) >= 11) - icl_disable_transcoder_port_sync(old_crtc_state); + if (INTEL_GEN(dev_priv) >= 11) + icl_disable_transcoder_port_sync(old_crtc_state); - intel_ddi_disable_transcoder_func(old_crtc_state); + intel_ddi_disable_transcoder_func(old_crtc_state); - intel_dsc_disable(old_crtc_state); + intel_dsc_disable(old_crtc_state); - if (INTEL_GEN(dev_priv) >= 9) - skylake_scaler_disable(old_crtc_state); - else - ironlake_pfit_disable(old_crtc_state); + if (INTEL_GEN(dev_priv) >= 9) + skl_scaler_disable(old_crtc_state); + else + ilk_pfit_disable(old_crtc_state); + } /* * When called from DP MST code: @@ -3970,7 +3916,7 @@ static void intel_enable_ddi_dp(struct intel_encoder *encoder, const struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); enum port port = encoder->port; if (port == PORT_A && INTEL_GEN(dev_priv) < 9) @@ -4011,7 +3957,7 @@ static void intel_enable_ddi_hdmi(struct intel_encoder *encoder, const struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); + struct intel_digital_port *dig_port = enc_to_dig_port(encoder); struct drm_connector *connector = conn_state->connector; enum port port = encoder->port; @@ -4088,7 +4034,7 @@ static void intel_disable_ddi_dp(struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { - struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); intel_dp->link_trained = false; @@ -4136,7 +4082,7 @@ static void intel_ddi_update_pipe_dp(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { - struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); intel_ddi_set_dp_msa(crtc_state, conn_state); @@ -4200,7 +4146,8 @@ intel_ddi_update_prepare(struct intel_atomic_state *state, WARN_ON(crtc && crtc->active); - intel_tc_port_get_link(enc_to_dig_port(&encoder->base), required_lanes); + intel_tc_port_get_link(enc_to_dig_port(encoder), + required_lanes); if (crtc_state && crtc_state->hw.active) intel_update_active_dpll(state, crtc, encoder); } @@ -4210,7 +4157,7 @@ intel_ddi_update_complete(struct intel_atomic_state *state, struct intel_encoder *encoder, struct intel_crtc *crtc) { - intel_tc_port_put_link(enc_to_dig_port(&encoder->base)); + intel_tc_port_put_link(enc_to_dig_port(encoder)); } static void @@ -4219,7 +4166,7 @@ intel_ddi_pre_pll_enable(struct intel_encoder *encoder, const struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); + struct intel_digital_port *dig_port = enc_to_dig_port(encoder); enum phy phy = intel_port_to_phy(dev_priv, encoder->port); bool is_tc_port = intel_phy_is_tc(dev_priv, phy); @@ -4405,6 +4352,11 @@ void intel_ddi_get_config(struct intel_encoder *encoder, pipe_config->output_types |= BIT(INTEL_OUTPUT_DP_MST); pipe_config->lane_count = ((temp & DDI_PORT_WIDTH_MASK) >> DDI_PORT_WIDTH_SHIFT) + 1; + + if (INTEL_GEN(dev_priv) >= 12) + pipe_config->mst_master_transcoder = + REG_FIELD_GET(TRANS_DDI_MST_TRANSPORT_SELECT_MASK, temp); + intel_dp_get_m_n(intel_crtc, pipe_config); break; default: @@ -4518,7 +4470,7 @@ static int intel_ddi_compute_config(struct intel_encoder *encoder, static void intel_ddi_encoder_destroy(struct drm_encoder *encoder) { - struct intel_digital_port *dig_port = enc_to_dig_port(encoder); + struct intel_digital_port *dig_port = enc_to_dig_port(to_intel_encoder(encoder)); intel_dp_encoder_flush_work(encoder); @@ -4585,7 +4537,7 @@ static int intel_hdmi_reset_link(struct intel_encoder *encoder, struct drm_modeset_acquire_ctx *ctx) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_hdmi *hdmi = enc_to_intel_hdmi(&encoder->base); + struct intel_hdmi *hdmi = enc_to_intel_hdmi(encoder); struct intel_connector *connector = hdmi->attached_connector; struct i2c_adapter *adapter = intel_gmbus_get_adapter(dev_priv, hdmi->ddc_bus); @@ -4657,7 +4609,7 @@ intel_ddi_hotplug(struct intel_encoder *encoder, struct intel_connector *connector, bool irq_received) { - struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); + struct intel_digital_port *dig_port = enc_to_dig_port(encoder); struct drm_modeset_acquire_ctx ctx; enum intel_hotplug_state state; int ret; diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 1860da0a493e..19ea842cfd84 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -46,6 +46,7 @@ #include "display/intel_crt.h" #include "display/intel_ddi.h" #include "display/intel_dp.h" +#include "display/intel_dp_mst.h" #include "display/intel_dsi.h" #include "display/intel_dvo.h" #include "display/intel_gmbus.h" @@ -145,8 +146,8 @@ static const u64 cursor_format_modifiers[] = { static void i9xx_crtc_clock_get(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config); -static void ironlake_pch_clock_get(struct intel_crtc *crtc, - struct intel_crtc_state *pipe_config); +static void ilk_pch_clock_get(struct intel_crtc *crtc, + struct intel_crtc_state *pipe_config); static int intel_framebuffer_init(struct intel_framebuffer *ifb, struct drm_i915_gem_object *obj, @@ -157,15 +158,15 @@ static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_sta const struct intel_link_m_n *m_n, const struct intel_link_m_n *m2_n2); static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state); -static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state); -static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state); +static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state); +static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state); static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state); static void vlv_prepare_pll(struct intel_crtc *crtc, const struct intel_crtc_state *pipe_config); static void chv_prepare_pll(struct intel_crtc *crtc, const struct intel_crtc_state *pipe_config); -static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state); -static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state); +static void skl_pfit_enable(const struct intel_crtc_state *crtc_state); +static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state); static void intel_modeset_setup_hw_state(struct drm_device *dev, struct drm_modeset_acquire_ctx *ctx); static struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc); @@ -369,7 +370,7 @@ static const struct intel_limit intel_limits_g4x_dual_channel_lvds = { }, }; -static const struct intel_limit intel_limits_pineview_sdvo = { +static const struct intel_limit pnv_limits_sdvo = { .dot = { .min = 20000, .max = 400000}, .vco = { .min = 1700000, .max = 3500000 }, /* Pineview's Ncounter is a ring counter */ @@ -384,7 +385,7 @@ static const struct intel_limit intel_limits_pineview_sdvo = { .p2_slow = 10, .p2_fast = 5 }, }; -static const struct intel_limit intel_limits_pineview_lvds = { +static const struct intel_limit pnv_limits_lvds = { .dot = { .min = 20000, .max = 400000 }, .vco = { .min = 1700000, .max = 3500000 }, .n = { .min = 3, .max = 6 }, @@ -402,7 +403,7 @@ static const struct intel_limit intel_limits_pineview_lvds = { * We calculate clock using (register_value + 2) for N/M1/M2, so here * the range value for them is (actual_value - 2). */ -static const struct intel_limit intel_limits_ironlake_dac = { +static const struct intel_limit ilk_limits_dac = { .dot = { .min = 25000, .max = 350000 }, .vco = { .min = 1760000, .max = 3510000 }, .n = { .min = 1, .max = 5 }, @@ -415,7 +416,7 @@ static const struct intel_limit intel_limits_ironlake_dac = { .p2_slow = 10, .p2_fast = 5 }, }; -static const struct intel_limit intel_limits_ironlake_single_lvds = { +static const struct intel_limit ilk_limits_single_lvds = { .dot = { .min = 25000, .max = 350000 }, .vco = { .min = 1760000, .max = 3510000 }, .n = { .min = 1, .max = 3 }, @@ -428,7 +429,7 @@ static const struct intel_limit intel_limits_ironlake_single_lvds = { .p2_slow = 14, .p2_fast = 14 }, }; -static const struct intel_limit intel_limits_ironlake_dual_lvds = { +static const struct intel_limit ilk_limits_dual_lvds = { .dot = { .min = 25000, .max = 350000 }, .vco = { .min = 1760000, .max = 3510000 }, .n = { .min = 1, .max = 3 }, @@ -442,7 +443,7 @@ static const struct intel_limit intel_limits_ironlake_dual_lvds = { }; /* LVDS 100mhz refclk limits. */ -static const struct intel_limit intel_limits_ironlake_single_lvds_100m = { +static const struct intel_limit ilk_limits_single_lvds_100m = { .dot = { .min = 25000, .max = 350000 }, .vco = { .min = 1760000, .max = 3510000 }, .n = { .min = 1, .max = 2 }, @@ -455,7 +456,7 @@ static const struct intel_limit intel_limits_ironlake_single_lvds_100m = { .p2_slow = 14, .p2_fast = 14 }, }; -static const struct intel_limit intel_limits_ironlake_dual_lvds_100m = { +static const struct intel_limit ilk_limits_dual_lvds_100m = { .dot = { .min = 25000, .max = 350000 }, .vco = { .min = 1760000, .max = 3510000 }, .n = { .min = 1, .max = 3 }, @@ -553,13 +554,6 @@ is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state) } static bool -is_trans_port_sync_master(const struct intel_crtc_state *crtc_state) -{ - return (crtc_state->master_transcoder == INVALID_TRANSCODER && - crtc_state->sync_mode_slaves_mask); -} - -static bool is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state) { return crtc_state->master_transcoder != INVALID_TRANSCODER; @@ -1637,7 +1631,7 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv, I915_READ(dpll_reg) & port_mask, expected_mask); } -static void ironlake_enable_pch_transcoder(const struct intel_crtc_state *crtc_state) +static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); @@ -1735,8 +1729,8 @@ static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv, DRM_ERROR("Failed to enable PCH transcoder\n"); } -static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv, - enum pipe pipe) +static void ilk_disable_pch_transcoder(struct drm_i915_private *dev_priv, + enum pipe pipe) { i915_reg_t reg; u32 val; @@ -1944,7 +1938,9 @@ static bool is_ccs_plane(const struct drm_framebuffer *fb, int plane) static bool is_gen12_ccs_modifier(u64 modifier) { - return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS; + return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS || + modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS; + } static bool is_gen12_ccs_plane(const struct drm_framebuffer *fb, int plane) @@ -1977,8 +1973,7 @@ static int ccs_to_main_plane(const struct drm_framebuffer *fb, int ccs_plane) } /* Return either the main plane's CCS or - if not a CCS FB - UV plane */ -static int -intel_main_to_aux_plane(const struct drm_framebuffer *fb, int main_plane) +int intel_main_to_aux_plane(const struct drm_framebuffer *fb, int main_plane) { if (is_ccs_modifier(fb->modifier)) return main_to_ccs_plane(fb, main_plane); @@ -1994,6 +1989,13 @@ intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info, info->num_planes == (is_ccs_modifier(modifier) ? 4 : 2); } +static bool is_semiplanar_uv_plane(const struct drm_framebuffer *fb, + int color_plane) +{ + return intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier) && + color_plane == 1; +} + static unsigned int intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane) { @@ -2013,6 +2015,7 @@ intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane) return 128; /* fall through */ case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS: + case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS: if (is_ccs_plane(fb, color_plane)) return 64; /* fall through */ @@ -2068,6 +2071,16 @@ static void intel_tile_dims(const struct drm_framebuffer *fb, int color_plane, *tile_height = intel_tile_height(fb, color_plane); } +static unsigned int intel_tile_row_size(const struct drm_framebuffer *fb, + int color_plane) +{ + unsigned int tile_width, tile_height; + + intel_tile_dims(fb, color_plane, &tile_width, &tile_height); + + return fb->pitches[color_plane] * tile_height; +} + unsigned int intel_fb_align_height(const struct drm_framebuffer *fb, int color_plane, unsigned int height) @@ -2142,7 +2155,8 @@ static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb, struct drm_i915_private *dev_priv = to_i915(fb->dev); /* AUX_DIST needs only 4K alignment */ - if (is_aux_plane(fb, color_plane)) + if ((INTEL_GEN(dev_priv) < 12 && is_aux_plane(fb, color_plane)) || + is_ccs_plane(fb, color_plane)) return 4096; switch (fb->modifier) { @@ -2152,11 +2166,19 @@ static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb, if (INTEL_GEN(dev_priv) >= 9) return 256 * 1024; return 0; + case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS: + if (is_semiplanar_uv_plane(fb, color_plane)) + return intel_tile_row_size(fb, color_plane); + /* Fall-through */ case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS: return 16 * 1024; case I915_FORMAT_MOD_Y_TILED_CCS: case I915_FORMAT_MOD_Yf_TILED_CCS: case I915_FORMAT_MOD_Y_TILED: + if (INTEL_GEN(dev_priv) >= 12 && + is_semiplanar_uv_plane(fb, color_plane)) + return intel_tile_row_size(fb, color_plane); + /* Fall-through */ case I915_FORMAT_MOD_Yf_TILED: return 1 * 1024 * 1024; default: @@ -2193,6 +2215,8 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, return ERR_PTR(-EINVAL); alignment = intel_surf_alignment(fb, 0); + if (WARN_ON(alignment && !is_power_of_2(alignment))) + return ERR_PTR(-EINVAL); /* Note that the w/a also requires 64 PTE of padding following the * bo. We currently fill all unused PTE with the shadow page and so @@ -2431,9 +2455,6 @@ static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv, unsigned int cpp = fb->format->cpp[color_plane]; u32 offset, offset_aligned; - if (alignment) - alignment--; - if (!is_surface_linear(fb, color_plane)) { unsigned int tile_size, tile_width, tile_height; unsigned int tile_rows, tiles, pitch_tiles; @@ -2455,17 +2476,24 @@ static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv, *x %= tile_width; offset = (tile_rows * pitch_tiles + tiles) * tile_size; - offset_aligned = offset & ~alignment; + + offset_aligned = offset; + if (alignment) + offset_aligned = rounddown(offset_aligned, alignment); intel_adjust_tile_offset(x, y, tile_width, tile_height, tile_size, pitch_tiles, offset, offset_aligned); } else { offset = *y * pitch + *x * cpp; - offset_aligned = offset & ~alignment; - - *y = (offset & alignment) / pitch; - *x = ((offset & alignment) - *y * pitch) / cpp; + offset_aligned = offset; + if (alignment) { + offset_aligned = rounddown(offset_aligned, alignment); + *y = (offset % alignment) / pitch; + *x = ((offset % alignment) - *y * pitch) / cpp; + } else { + *y = *x = 0; + } } return offset_aligned; @@ -2498,9 +2526,17 @@ static int intel_fb_offset_to_xy(int *x, int *y, { struct drm_i915_private *dev_priv = to_i915(fb->dev); unsigned int height; + u32 alignment; - if (fb->modifier != DRM_FORMAT_MOD_LINEAR && - fb->offsets[color_plane] % intel_tile_size(dev_priv)) { + if (INTEL_GEN(dev_priv) >= 12 && + is_semiplanar_uv_plane(fb, color_plane)) + alignment = intel_tile_row_size(fb, color_plane); + else if (fb->modifier != DRM_FORMAT_MOD_LINEAR) + alignment = intel_tile_size(dev_priv); + else + alignment = 0; + + if (alignment != 0 && fb->offsets[color_plane] % alignment) { DRM_DEBUG_KMS("Misaligned offset 0x%08x for color plane %d\n", fb->offsets[color_plane], color_plane); return -EINVAL; @@ -2537,6 +2573,7 @@ static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier) case I915_FORMAT_MOD_Y_TILED: case I915_FORMAT_MOD_Y_TILED_CCS: case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS: + case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS: return I915_TILING_Y; default: return I915_TILING_NONE; @@ -2588,6 +2625,30 @@ static const struct drm_format_info gen12_ccs_formats[] = { { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2, .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, .hsub = 1, .vsub = 1, .has_alpha = true }, + { .format = DRM_FORMAT_YUYV, .num_planes = 2, + .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, + .hsub = 2, .vsub = 1, .is_yuv = true }, + { .format = DRM_FORMAT_YVYU, .num_planes = 2, + .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, + .hsub = 2, .vsub = 1, .is_yuv = true }, + { .format = DRM_FORMAT_UYVY, .num_planes = 2, + .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, + .hsub = 2, .vsub = 1, .is_yuv = true }, + { .format = DRM_FORMAT_VYUY, .num_planes = 2, + .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, + .hsub = 2, .vsub = 1, .is_yuv = true }, + { .format = DRM_FORMAT_NV12, .num_planes = 4, + .char_per_block = { 1, 2, 1, 1 }, .block_w = { 1, 1, 4, 4 }, .block_h = { 1, 1, 1, 1 }, + .hsub = 2, .vsub = 2, .is_yuv = true }, + { .format = DRM_FORMAT_P010, .num_planes = 4, + .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 }, + .hsub = 2, .vsub = 2, .is_yuv = true }, + { .format = DRM_FORMAT_P012, .num_planes = 4, + .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 }, + .hsub = 2, .vsub = 2, .is_yuv = true }, + { .format = DRM_FORMAT_P016, .num_planes = 4, + .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 }, + .hsub = 2, .vsub = 2, .is_yuv = true }, }; static const struct drm_format_info * @@ -2614,6 +2675,7 @@ intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd) ARRAY_SIZE(skl_ccs_formats), cmd->pixel_format); case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS: + case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS: return lookup_format_info(gen12_ccs_formats, ARRAY_SIZE(gen12_ccs_formats), cmd->pixel_format); @@ -2625,6 +2687,7 @@ intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd) bool is_ccs_modifier(u64 modifier) { return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS || + modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS || modifier == I915_FORMAT_MOD_Y_TILED_CCS || modifier == I915_FORMAT_MOD_Yf_TILED_CCS; } @@ -2698,7 +2761,7 @@ intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane) } tile_width = intel_tile_width_bytes(fb, color_plane); - if (is_ccs_modifier(fb->modifier) && color_plane == 0) { + if (is_ccs_modifier(fb->modifier)) { /* * Display WA #0531: skl,bxt,kbl,glk * @@ -2708,7 +2771,7 @@ intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane) * require the entire fb to accommodate that to avoid * potential runtime errors at plane configuration time. */ - if (IS_GEN(dev_priv, 9) && fb->width > 3840) + if (IS_GEN(dev_priv, 9) && color_plane == 0 && fb->width > 3840) tile_width *= 4; /* * The main surface pitch must be padded to a multiple of four @@ -2876,11 +2939,15 @@ intel_fb_check_ccs_xy(struct drm_framebuffer *fb, int ccs_plane, int x, int y) static void intel_fb_plane_dims(int *w, int *h, struct drm_framebuffer *fb, int color_plane) { + int main_plane = is_ccs_plane(fb, color_plane) ? + ccs_to_main_plane(fb, color_plane) : 0; + int main_hsub, main_vsub; int hsub, vsub; + intel_fb_plane_get_subsampling(&main_hsub, &main_vsub, fb, main_plane); intel_fb_plane_get_subsampling(&hsub, &vsub, fb, color_plane); - *w = fb->width / hsub; - *h = fb->height / vsub; + *w = fb->width / main_hsub / hsub; + *h = fb->height / main_vsub / vsub; } /* @@ -3598,6 +3665,7 @@ static int skl_max_plane_width(const struct drm_framebuffer *fb, return 5120; case I915_FORMAT_MOD_Y_TILED_CCS: case I915_FORMAT_MOD_Yf_TILED_CCS: + case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS: /* FIXME AUX plane? */ case I915_FORMAT_MOD_Y_TILED: case I915_FORMAT_MOD_Yf_TILED: @@ -3656,11 +3724,12 @@ static int icl_max_plane_height(void) return 4320; } -static bool skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state, - int main_x, int main_y, u32 main_offset) +static bool +skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state, + int main_x, int main_y, u32 main_offset, + int ccs_plane) { const struct drm_framebuffer *fb = plane_state->hw.fb; - int ccs_plane = main_to_ccs_plane(fb, 0); int aux_x = plane_state->color_plane[ccs_plane].x; int aux_y = plane_state->color_plane[ccs_plane].y; u32 aux_offset = plane_state->color_plane[ccs_plane].offset; @@ -3737,6 +3806,8 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state) intel_add_fb_offsets(&x, &y, plane_state, 0); offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 0); alignment = intel_surf_alignment(fb, 0); + if (WARN_ON(alignment && !is_power_of_2(alignment))) + return -EINVAL; /* * AUX surface offset is specified as the distance from the @@ -3772,7 +3843,8 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state) * they match with the main surface x/y offsets. */ if (is_ccs_modifier(fb->modifier)) { - while (!skl_check_main_ccs_coordinates(plane_state, x, y, offset)) { + while (!skl_check_main_ccs_coordinates(plane_state, x, y, + offset, aux_plane)) { if (offset == 0) break; @@ -3805,7 +3877,8 @@ static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state) { const struct drm_framebuffer *fb = plane_state->hw.fb; unsigned int rotation = plane_state->hw.rotation; - int max_width = skl_max_plane_width(fb, 1, rotation); + int uv_plane = 1; + int max_width = skl_max_plane_width(fb, uv_plane, rotation); int max_height = 4096; int x = plane_state->uapi.src.x1 >> 17; int y = plane_state->uapi.src.y1 >> 17; @@ -3813,8 +3886,9 @@ static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state) int h = drm_rect_height(&plane_state->uapi.src) >> 17; u32 offset; - intel_add_fb_offsets(&x, &y, plane_state, 1); - offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 1); + intel_add_fb_offsets(&x, &y, plane_state, uv_plane); + offset = intel_plane_compute_aligned_offset(&x, &y, + plane_state, uv_plane); /* FIXME not quite sure how/if these apply to the chroma plane */ if (w > max_width || h > max_height) { @@ -3823,9 +3897,39 @@ static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state) return -EINVAL; } - plane_state->color_plane[1].offset = offset; - plane_state->color_plane[1].x = x; - plane_state->color_plane[1].y = y; + if (is_ccs_modifier(fb->modifier)) { + int ccs_plane = main_to_ccs_plane(fb, uv_plane); + int aux_offset = plane_state->color_plane[ccs_plane].offset; + int alignment = intel_surf_alignment(fb, uv_plane); + + if (offset > aux_offset) + offset = intel_plane_adjust_aligned_offset(&x, &y, + plane_state, + uv_plane, + offset, + aux_offset & ~(alignment - 1)); + + while (!skl_check_main_ccs_coordinates(plane_state, x, y, + offset, ccs_plane)) { + if (offset == 0) + break; + + offset = intel_plane_adjust_aligned_offset(&x, &y, + plane_state, + uv_plane, + offset, offset - alignment); + } + + if (x != plane_state->color_plane[ccs_plane].x || + y != plane_state->color_plane[ccs_plane].y) { + DRM_DEBUG_KMS("Unable to find suitable display surface offset due to CCS\n"); + return -EINVAL; + } + } + + plane_state->color_plane[uv_plane].offset = offset; + plane_state->color_plane[uv_plane].x = x; + plane_state->color_plane[uv_plane].y = y; return 0; } @@ -3835,21 +3939,40 @@ static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state) const struct drm_framebuffer *fb = plane_state->hw.fb; int src_x = plane_state->uapi.src.x1 >> 16; int src_y = plane_state->uapi.src.y1 >> 16; - int hsub; - int vsub; - int x; - int y; u32 offset; + int ccs_plane; + + for (ccs_plane = 0; ccs_plane < fb->format->num_planes; ccs_plane++) { + int main_hsub, main_vsub; + int hsub, vsub; + int x, y; - intel_fb_plane_get_subsampling(&hsub, &vsub, fb, 1); - x = src_x / hsub; - y = src_y / vsub; - intel_add_fb_offsets(&x, &y, plane_state, 1); - offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 1); + if (!is_ccs_plane(fb, ccs_plane)) + continue; + + intel_fb_plane_get_subsampling(&main_hsub, &main_vsub, fb, + ccs_to_main_plane(fb, ccs_plane)); + intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane); - plane_state->color_plane[1].offset = offset; - plane_state->color_plane[1].x = x * hsub + src_x % hsub; - plane_state->color_plane[1].y = y * vsub + src_y % vsub; + hsub *= main_hsub; + vsub *= main_vsub; + x = src_x / hsub; + y = src_y / vsub; + + intel_add_fb_offsets(&x, &y, plane_state, ccs_plane); + + offset = intel_plane_compute_aligned_offset(&x, &y, + plane_state, + ccs_plane); + + plane_state->color_plane[ccs_plane].offset = offset; + plane_state->color_plane[ccs_plane].x = (x * hsub + + src_x % hsub) / + main_hsub; + plane_state->color_plane[ccs_plane].y = (y * vsub + + src_y % vsub) / + main_vsub; + } return 0; } @@ -3858,6 +3981,7 @@ int skl_check_plane_surface(struct intel_plane_state *plane_state) { const struct drm_framebuffer *fb = plane_state->hw.fb; int ret; + bool needs_aux = false; ret = intel_plane_compute_gtt(plane_state); if (ret) @@ -3867,22 +3991,32 @@ int skl_check_plane_surface(struct intel_plane_state *plane_state) return 0; /* - * Handle the AUX surface first since - * the main surface setup depends on it. + * Handle the AUX surface first since the main surface setup depends on + * it. */ + if (is_ccs_modifier(fb->modifier)) { + needs_aux = true; + ret = skl_check_ccs_aux_surface(plane_state); + if (ret) + return ret; + } + if (intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier)) { + needs_aux = true; ret = skl_check_nv12_aux_surface(plane_state); if (ret) return ret; - } else if (is_ccs_modifier(fb->modifier)) { - ret = skl_check_ccs_aux_surface(plane_state); - if (ret) - return ret; - } else { - plane_state->color_plane[1].offset = ~0xfff; - plane_state->color_plane[1].x = 0; - plane_state->color_plane[1].y = 0; + } + + if (!needs_aux) { + int i; + + for (i = 1; i < fb->format->num_planes; i++) { + plane_state->color_plane[i].offset = ~0xfff; + plane_state->color_plane[i].x = 0; + plane_state->color_plane[i].y = 0; + } } ret = skl_check_main_surface(plane_state); @@ -4472,6 +4606,8 @@ static u32 skl_plane_ctl_tiling(u64 fb_modifier) return PLANE_CTL_TILED_Y | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE | PLANE_CTL_CLEAR_COLOR_DISABLE; + case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS: + return PLANE_CTL_TILED_Y | PLANE_CTL_MEDIA_DECOMPRESSION_ENABLE; case I915_FORMAT_MOD_Yf_TILED: return PLANE_CTL_TILED_YF; case I915_FORMAT_MOD_Yf_TILED_CCS: @@ -4869,8 +5005,8 @@ static void intel_fdi_normal_train(struct intel_crtc *crtc) } /* The FDI link training functions for ILK/Ibexpeak. */ -static void ironlake_fdi_link_train(struct intel_crtc *crtc, - const struct intel_crtc_state *crtc_state) +static void ilk_fdi_link_train(struct intel_crtc *crtc, + const struct intel_crtc_state *crtc_state) { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); @@ -5222,7 +5358,7 @@ train_done: DRM_DEBUG_KMS("FDI train done.\n"); } -static void ironlake_fdi_pll_enable(const struct intel_crtc_state *crtc_state) +static void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state) { struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); @@ -5259,7 +5395,7 @@ static void ironlake_fdi_pll_enable(const struct intel_crtc_state *crtc_state) } } -static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc) +static void ilk_fdi_pll_disable(struct intel_crtc *intel_crtc) { struct drm_device *dev = intel_crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); @@ -5289,7 +5425,7 @@ static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc) udelay(100); } -static void ironlake_fdi_disable(struct intel_crtc *crtc) +static void ilk_fdi_disable(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; @@ -5496,8 +5632,8 @@ int lpt_get_iclkip(struct drm_i915_private *dev_priv) desired_divisor << auxdiv); } -static void ironlake_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state, - enum pipe pch_transcoder) +static void ilk_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state, + enum pipe pch_transcoder) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); @@ -5540,7 +5676,7 @@ static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool e POSTING_READ(SOUTH_CHICKEN1); } -static void ivybridge_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state) +static void ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); @@ -5601,8 +5737,8 @@ intel_get_crtc_new_encoder(const struct intel_atomic_state *state, * - DP transcoding bits * - transcoder */ -static void ironlake_pch_enable(const struct intel_atomic_state *state, - const struct intel_crtc_state *crtc_state) +static void ilk_pch_enable(const struct intel_atomic_state *state, + const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_device *dev = crtc->base.dev; @@ -5613,7 +5749,7 @@ static void ironlake_pch_enable(const struct intel_atomic_state *state, assert_pch_transcoder_disabled(dev_priv, pipe); if (IS_IVYBRIDGE(dev_priv)) - ivybridge_update_fdi_bc_bifurcation(crtc_state); + ivb_update_fdi_bc_bifurcation(crtc_state); /* Write the TU size bits before fdi link training, so that error * detection works. */ @@ -5650,7 +5786,7 @@ static void ironlake_pch_enable(const struct intel_atomic_state *state, /* set transcoder timing, panel must allow it */ assert_panel_unlocked(dev_priv, pipe); - ironlake_pch_transcoder_set_timings(crtc_state, pipe); + ilk_pch_transcoder_set_timings(crtc_state, pipe); intel_fdi_normal_train(crtc); @@ -5682,7 +5818,7 @@ static void ironlake_pch_enable(const struct intel_atomic_state *state, I915_WRITE(reg, temp); } - ironlake_enable_pch_transcoder(crtc_state); + ilk_enable_pch_transcoder(crtc_state); } static void lpt_pch_enable(const struct intel_atomic_state *state, @@ -5697,7 +5833,7 @@ static void lpt_pch_enable(const struct intel_atomic_state *state, lpt_program_iclkip(crtc_state); /* Set transcoder timing. */ - ironlake_pch_transcoder_set_timings(crtc_state, PIPE_A); + ilk_pch_transcoder_set_timings(crtc_state, PIPE_A); lpt_enable_pch_transcoder(dev_priv, cpu_transcoder); } @@ -6001,7 +6137,7 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state, return 0; } -void skylake_scaler_disable(const struct intel_crtc_state *old_crtc_state) +void skl_scaler_disable(const struct intel_crtc_state *old_crtc_state) { struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); int i; @@ -6010,7 +6146,7 @@ void skylake_scaler_disable(const struct intel_crtc_state *old_crtc_state) skl_detach_scaler(crtc, i); } -static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state) +static void skl_pfit_enable(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); @@ -6047,7 +6183,7 @@ static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state) } } -static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state) +static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); @@ -6411,45 +6547,29 @@ intel_connector_primary_encoder(struct intel_connector *connector) if (connector->mst_port) return &dp_to_dig_port(connector->mst_port)->base; - encoder = intel_attached_encoder(&connector->base); + encoder = intel_attached_encoder(connector); WARN_ON(!encoder); return encoder; } -static bool -intel_connector_needs_modeset(struct intel_atomic_state *state, - const struct drm_connector_state *old_conn_state, - const struct drm_connector_state *new_conn_state) -{ - struct intel_crtc *old_crtc = old_conn_state->crtc ? - to_intel_crtc(old_conn_state->crtc) : NULL; - struct intel_crtc *new_crtc = new_conn_state->crtc ? - to_intel_crtc(new_conn_state->crtc) : NULL; - - return new_crtc != old_crtc || - (new_crtc && - needs_modeset(intel_atomic_get_new_crtc_state(state, new_crtc))); -} - static void intel_encoders_update_prepare(struct intel_atomic_state *state) { - struct drm_connector_state *old_conn_state; struct drm_connector_state *new_conn_state; - struct drm_connector *conn; + struct drm_connector *connector; int i; - for_each_oldnew_connector_in_state(&state->base, conn, - old_conn_state, new_conn_state, i) { + for_each_new_connector_in_state(&state->base, connector, new_conn_state, + i) { + struct intel_connector *intel_connector; struct intel_encoder *encoder; struct intel_crtc *crtc; - if (!intel_connector_needs_modeset(state, - old_conn_state, - new_conn_state)) + if (!intel_connector_needs_modeset(state, connector)) continue; - encoder = intel_connector_primary_encoder(to_intel_connector(conn)); + intel_connector = to_intel_connector(connector); + encoder = intel_connector_primary_encoder(intel_connector); if (!encoder->update_prepare) continue; @@ -6461,22 +6581,21 @@ static void intel_encoders_update_prepare(struct intel_atomic_state *state) static void intel_encoders_update_complete(struct intel_atomic_state *state) { - struct drm_connector_state *old_conn_state; struct drm_connector_state *new_conn_state; - struct drm_connector *conn; + struct drm_connector *connector; int i; - for_each_oldnew_connector_in_state(&state->base, conn, - old_conn_state, new_conn_state, i) { + for_each_new_connector_in_state(&state->base, connector, new_conn_state, + i) { + struct intel_connector *intel_connector; struct intel_encoder *encoder; struct intel_crtc *crtc; - if (!intel_connector_needs_modeset(state, - old_conn_state, - new_conn_state)) + if (!intel_connector_needs_modeset(state, connector)) continue; - encoder = intel_connector_primary_encoder(to_intel_connector(conn)); + intel_connector = to_intel_connector(connector); + encoder = intel_connector_primary_encoder(intel_connector); if (!encoder->update_complete) continue; @@ -6643,8 +6762,8 @@ static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_stat plane->disable_plane(plane, crtc_state); } -static void ironlake_crtc_enable(struct intel_atomic_state *state, - struct intel_crtc *crtc) +static void ilk_crtc_enable(struct intel_atomic_state *state, + struct intel_crtc *crtc) { const struct intel_crtc_state *new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); @@ -6680,7 +6799,7 @@ static void ironlake_crtc_enable(struct intel_atomic_state *state, intel_cpu_transcoder_set_m_n(new_crtc_state, &new_crtc_state->fdi_m_n, NULL); - ironlake_set_pipeconf(new_crtc_state); + ilk_set_pipeconf(new_crtc_state); crtc->active = true; @@ -6690,13 +6809,13 @@ static void ironlake_crtc_enable(struct intel_atomic_state *state, /* Note: FDI PLL enabling _must_ be done before we enable the * cpu pipes, hence this is separate from all the other fdi/pch * enabling. */ - ironlake_fdi_pll_enable(new_crtc_state); + ilk_fdi_pll_enable(new_crtc_state); } else { assert_fdi_tx_disabled(dev_priv, pipe); assert_fdi_rx_disabled(dev_priv, pipe); } - ironlake_pfit_enable(new_crtc_state); + ilk_pfit_enable(new_crtc_state); /* * On ILK+ LUT must be loaded before the pipe is running but with @@ -6712,7 +6831,7 @@ static void ironlake_crtc_enable(struct intel_atomic_state *state, intel_enable_pipe(new_crtc_state); if (new_crtc_state->has_pch_encoder) - ironlake_pch_enable(state, new_crtc_state); + ilk_pch_enable(state, new_crtc_state); intel_crtc_vblank_on(new_crtc_state); @@ -6787,8 +6906,8 @@ static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state) I915_WRITE(reg, val); } -static void haswell_crtc_enable(struct intel_atomic_state *state, - struct intel_crtc *crtc) +static void hsw_crtc_enable(struct intel_atomic_state *state, + struct intel_crtc *crtc) { const struct intel_crtc_state *new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); @@ -6829,7 +6948,7 @@ static void haswell_crtc_enable(struct intel_atomic_state *state, if (!transcoder_is_dsi(cpu_transcoder)) { hsw_set_frame_start_delay(new_crtc_state); - haswell_set_pipeconf(new_crtc_state); + hsw_set_pipeconf(new_crtc_state); } if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv)) @@ -6844,9 +6963,9 @@ static void haswell_crtc_enable(struct intel_atomic_state *state, glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true); if (INTEL_GEN(dev_priv) >= 9) - skylake_pfit_enable(new_crtc_state); + skl_pfit_enable(new_crtc_state); else - ironlake_pfit_enable(new_crtc_state); + ilk_pfit_enable(new_crtc_state); /* * On ILK+ LUT must be loaded before the pipe is running but with @@ -6895,7 +7014,7 @@ static void haswell_crtc_enable(struct intel_atomic_state *state, } } -void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state) +void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state) { struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); @@ -6910,8 +7029,8 @@ void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state) } } -static void ironlake_crtc_disable(struct intel_atomic_state *state, - struct intel_crtc *crtc) +static void ilk_crtc_disable(struct intel_atomic_state *state, + struct intel_crtc *crtc) { const struct intel_crtc_state *old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); @@ -6932,15 +7051,15 @@ static void ironlake_crtc_disable(struct intel_atomic_state *state, intel_disable_pipe(old_crtc_state); - ironlake_pfit_disable(old_crtc_state); + ilk_pfit_disable(old_crtc_state); if (old_crtc_state->has_pch_encoder) - ironlake_fdi_disable(crtc); + ilk_fdi_disable(crtc); intel_encoders_post_disable(state, crtc); if (old_crtc_state->has_pch_encoder) { - ironlake_disable_pch_transcoder(dev_priv, pipe); + ilk_disable_pch_transcoder(dev_priv, pipe); if (HAS_PCH_CPT(dev_priv)) { i915_reg_t reg; @@ -6960,15 +7079,15 @@ static void ironlake_crtc_disable(struct intel_atomic_state *state, I915_WRITE(PCH_DPLL_SEL, temp); } - ironlake_fdi_pll_disable(crtc); + ilk_fdi_pll_disable(crtc); } intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); } -static void haswell_crtc_disable(struct intel_atomic_state *state, - struct intel_crtc *crtc) +static void hsw_crtc_disable(struct intel_atomic_state *state, + struct intel_crtc *crtc) { /* * FIXME collapse everything to one hook. @@ -7505,8 +7624,8 @@ static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state) return 0; } -static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe, - struct intel_crtc_state *pipe_config) +static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe, + struct intel_crtc_state *pipe_config) { struct drm_i915_private *dev_priv = to_i915(dev); struct drm_atomic_state *state = pipe_config->uapi.state; @@ -7578,8 +7697,8 @@ static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe, } #define RETRY 1 -static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc, - struct intel_crtc_state *pipe_config) +static int ilk_fdi_compute_config(struct intel_crtc *intel_crtc, + struct intel_crtc_state *pipe_config) { struct drm_device *dev = intel_crtc->base.dev; const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; @@ -7598,15 +7717,15 @@ retry: fdi_dotclock = adjusted_mode->crtc_clock; - lane = ironlake_get_lanes_required(fdi_dotclock, link_bw, - pipe_config->pipe_bpp); + lane = ilk_get_lanes_required(fdi_dotclock, link_bw, + pipe_config->pipe_bpp); pipe_config->fdi_lanes = lane; intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock, link_bw, &pipe_config->fdi_m_n, false, false); - ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config); + ret = ilk_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config); if (ret == -EDEADLK) return ret; @@ -7812,7 +7931,7 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc, intel_crtc_compute_pixel_rate(pipe_config); if (pipe_config->has_pch_encoder) - return ironlake_fdi_compute_config(crtc, pipe_config); + return ilk_fdi_compute_config(crtc, pipe_config); return 0; } @@ -8795,9 +8914,9 @@ static int pnv_crtc_compute_clock(struct intel_crtc *crtc, DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk); } - limit = &intel_limits_pineview_lvds; + limit = &pnv_limits_lvds; } else { - limit = &intel_limits_pineview_sdvo; + limit = &pnv_limits_sdvo; } if (!crtc_state->clock_set && @@ -9224,7 +9343,7 @@ out: return ret; } -static void ironlake_init_pch_refclk(struct drm_i915_private *dev_priv) +static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv) { struct intel_encoder *encoder; int i; @@ -9722,12 +9841,12 @@ static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv) void intel_init_pch_refclk(struct drm_i915_private *dev_priv) { if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) - ironlake_init_pch_refclk(dev_priv); + ilk_init_pch_refclk(dev_priv); else if (HAS_PCH_LPT(dev_priv)) lpt_init_pch_refclk(dev_priv); } -static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state) +static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); @@ -9783,7 +9902,7 @@ static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state) POSTING_READ(PIPECONF(pipe)); } -static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state) +static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); @@ -9871,7 +9990,7 @@ int bdw_get_pipemisc_bpp(struct intel_crtc *crtc) } } -int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp) +int ilk_get_lanes_required(int target_clock, int link_bw, int bpp) { /* * Account for spread spectrum to avoid @@ -9882,14 +10001,14 @@ int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp) return DIV_ROUND_UP(bps, link_bw * 8); } -static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor) +static bool ilk_needs_fb_cb_tune(struct dpll *dpll, int factor) { return i9xx_dpll_compute_m(dpll) < factor * dpll->n; } -static void ironlake_compute_dpll(struct intel_crtc *crtc, - struct intel_crtc_state *crtc_state, - struct dpll *reduced_clock) +static void ilk_compute_dpll(struct intel_crtc *crtc, + struct intel_crtc_state *crtc_state, + struct dpll *reduced_clock) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); u32 dpll, fp, fp2; @@ -9909,7 +10028,7 @@ static void ironlake_compute_dpll(struct intel_crtc *crtc, fp = i9xx_dpll_compute_fp(&crtc_state->dpll); - if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor)) + if (ilk_needs_fb_cb_tune(&crtc_state->dpll, factor)) fp |= FP_CB_TUNE; if (reduced_clock) { @@ -9989,8 +10108,8 @@ static void ironlake_compute_dpll(struct intel_crtc *crtc, crtc_state->dpll_hw_state.fp1 = fp2; } -static int ironlake_crtc_compute_clock(struct intel_crtc *crtc, - struct intel_crtc_state *crtc_state) +static int ilk_crtc_compute_clock(struct intel_crtc *crtc, + struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_atomic_state *state = @@ -10014,17 +10133,17 @@ static int ironlake_crtc_compute_clock(struct intel_crtc *crtc, if (intel_is_dual_link_lvds(dev_priv)) { if (refclk == 100000) - limit = &intel_limits_ironlake_dual_lvds_100m; + limit = &ilk_limits_dual_lvds_100m; else - limit = &intel_limits_ironlake_dual_lvds; + limit = &ilk_limits_dual_lvds; } else { if (refclk == 100000) - limit = &intel_limits_ironlake_single_lvds_100m; + limit = &ilk_limits_single_lvds_100m; else - limit = &intel_limits_ironlake_single_lvds; + limit = &ilk_limits_single_lvds; } } else { - limit = &intel_limits_ironlake_dac; + limit = &ilk_limits_dac; } if (!crtc_state->clock_set && @@ -10034,7 +10153,7 @@ static int ironlake_crtc_compute_clock(struct intel_crtc *crtc, return -EINVAL; } - ironlake_compute_dpll(crtc, crtc_state, NULL); + ilk_compute_dpll(crtc, crtc_state, NULL); if (!intel_reserve_shared_dplls(state, crtc, NULL)) { DRM_DEBUG_KMS("failed to find PLL for pipe %c\n", @@ -10109,15 +10228,15 @@ void intel_dp_get_m_n(struct intel_crtc *crtc, &pipe_config->dp_m2_n2); } -static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc, - struct intel_crtc_state *pipe_config) +static void ilk_get_fdi_m_n_config(struct intel_crtc *crtc, + struct intel_crtc_state *pipe_config) { intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder, &pipe_config->fdi_m_n, NULL); } -static void skylake_get_pfit_config(struct intel_crtc *crtc, - struct intel_crtc_state *pipe_config) +static void skl_get_pfit_config(struct intel_crtc *crtc, + struct intel_crtc_state *pipe_config) { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); @@ -10148,8 +10267,8 @@ static void skylake_get_pfit_config(struct intel_crtc *crtc, } static void -skylake_get_initial_plane_config(struct intel_crtc *crtc, - struct intel_initial_plane_config *plane_config) +skl_get_initial_plane_config(struct intel_crtc *crtc, + struct intel_initial_plane_config *plane_config) { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); @@ -10210,6 +10329,8 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc, fb->modifier = INTEL_GEN(dev_priv) >= 12 ? I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS : I915_FORMAT_MOD_Y_TILED_CCS; + else if (val & PLANE_CTL_MEDIA_DECOMPRESSION_ENABLE) + fb->modifier = I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS; else fb->modifier = I915_FORMAT_MOD_Y_TILED; break; @@ -10276,8 +10397,8 @@ error: kfree(intel_fb); } -static void ironlake_get_pfit_config(struct intel_crtc *crtc, - struct intel_crtc_state *pipe_config) +static void ilk_get_pfit_config(struct intel_crtc *crtc, + struct intel_crtc_state *pipe_config) { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); @@ -10300,8 +10421,8 @@ static void ironlake_get_pfit_config(struct intel_crtc *crtc, } } -static bool ironlake_get_pipe_config(struct intel_crtc *crtc, - struct intel_crtc_state *pipe_config) +static bool ilk_get_pipe_config(struct intel_crtc *crtc, + struct intel_crtc_state *pipe_config) { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); @@ -10372,7 +10493,7 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc, pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >> FDI_DP_PORT_WIDTH_SHIFT) + 1; - ironlake_get_fdi_m_n_config(crtc, pipe_config); + ilk_get_fdi_m_n_config(crtc, pipe_config); if (HAS_PCH_IBX(dev_priv)) { /* @@ -10400,7 +10521,7 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc, ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK) >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1; - ironlake_pch_clock_get(crtc, pipe_config); + ilk_pch_clock_get(crtc, pipe_config); } else { pipe_config->pixel_multiplier = 1; } @@ -10408,7 +10529,7 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc, intel_get_pipe_timings(crtc, pipe_config); intel_get_pipe_src_size(crtc, pipe_config); - ironlake_get_pfit_config(crtc, pipe_config); + ilk_get_pfit_config(crtc, pipe_config); ret = true; @@ -10417,8 +10538,9 @@ out: return ret; } -static int haswell_crtc_compute_clock(struct intel_crtc *crtc, - struct intel_crtc_state *crtc_state) + +static int hsw_crtc_compute_clock(struct intel_crtc *crtc, + struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_atomic_state *state = @@ -10439,9 +10561,8 @@ static int haswell_crtc_compute_clock(struct intel_crtc *crtc, return 0; } -static void cannonlake_get_ddi_pll(struct drm_i915_private *dev_priv, - enum port port, - struct intel_crtc_state *pipe_config) +static void cnl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port, + struct intel_crtc_state *pipe_config) { enum intel_dpll_id id; u32 temp; @@ -10455,9 +10576,8 @@ static void cannonlake_get_ddi_pll(struct drm_i915_private *dev_priv, pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id); } -static void icelake_get_ddi_pll(struct drm_i915_private *dev_priv, - enum port port, - struct intel_crtc_state *pipe_config) +static void icl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port, + struct intel_crtc_state *pipe_config) { enum phy phy = intel_port_to_phy(dev_priv, port); enum icl_port_dpll_id port_dpll_id; @@ -10516,9 +10636,8 @@ static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv, pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id); } -static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv, - enum port port, - struct intel_crtc_state *pipe_config) +static void skl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port, + struct intel_crtc_state *pipe_config) { enum intel_dpll_id id; u32 temp; @@ -10532,9 +10651,8 @@ static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv, pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id); } -static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv, - enum port port, - struct intel_crtc_state *pipe_config) +static void hsw_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port, + struct intel_crtc_state *pipe_config) { enum intel_dpll_id id; u32 ddi_pll_sel = I915_READ(PORT_CLK_SEL(port)); @@ -10722,8 +10840,8 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc, return transcoder_is_dsi(pipe_config->cpu_transcoder); } -static void haswell_get_ddi_port_state(struct intel_crtc *crtc, - struct intel_crtc_state *pipe_config) +static void hsw_get_ddi_port_state(struct intel_crtc *crtc, + struct intel_crtc_state *pipe_config) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; @@ -10743,15 +10861,15 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc, } if (INTEL_GEN(dev_priv) >= 11) - icelake_get_ddi_pll(dev_priv, port, pipe_config); + icl_get_ddi_pll(dev_priv, port, pipe_config); else if (IS_CANNONLAKE(dev_priv)) - cannonlake_get_ddi_pll(dev_priv, port, pipe_config); + cnl_get_ddi_pll(dev_priv, port, pipe_config); else if (IS_GEN9_BC(dev_priv)) - skylake_get_ddi_pll(dev_priv, port, pipe_config); + skl_get_ddi_pll(dev_priv, port, pipe_config); else if (IS_GEN9_LP(dev_priv)) bxt_get_ddi_pll(dev_priv, port, pipe_config); else - haswell_get_ddi_pll(dev_priv, port, pipe_config); + hsw_get_ddi_pll(dev_priv, port, pipe_config); pll = pipe_config->shared_dpll; if (pll) { @@ -10772,7 +10890,7 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc, pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >> FDI_DP_PORT_WIDTH_SHIFT) + 1; - ironlake_get_fdi_m_n_config(crtc, pipe_config); + ilk_get_fdi_m_n_config(crtc, pipe_config); } } @@ -10794,7 +10912,7 @@ static enum transcoder transcoder_master_readout(struct drm_i915_private *dev_pr return master_select - 1; } -static void icelake_get_trans_port_sync_config(struct intel_crtc_state *crtc_state) +static void icl_get_trans_port_sync_config(struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); u32 transcoders; @@ -10829,8 +10947,8 @@ static void icelake_get_trans_port_sync_config(struct intel_crtc_state *crtc_sta crtc_state->sync_mode_slaves_mask); } -static bool haswell_get_pipe_config(struct intel_crtc *crtc, - struct intel_crtc_state *pipe_config) +static bool hsw_get_pipe_config(struct intel_crtc *crtc, + struct intel_crtc_state *pipe_config) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); intel_wakeref_t wakerefs[POWER_DOMAIN_NUM], wf; @@ -10865,7 +10983,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc, if (!transcoder_is_dsi(pipe_config->cpu_transcoder) || INTEL_GEN(dev_priv) >= 11) { - haswell_get_ddi_port_state(crtc, pipe_config); + hsw_get_ddi_port_state(crtc, pipe_config); intel_get_pipe_timings(crtc, pipe_config); } @@ -10922,9 +11040,9 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc, power_domain_mask |= BIT_ULL(power_domain); if (INTEL_GEN(dev_priv) >= 9) - skylake_get_pfit_config(crtc, pipe_config); + skl_get_pfit_config(crtc, pipe_config); else - ironlake_get_pfit_config(crtc, pipe_config); + ilk_get_pfit_config(crtc, pipe_config); } if (hsw_crtc_supports_ips(crtc)) { @@ -10950,7 +11068,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc, if (INTEL_GEN(dev_priv) >= 11 && !transcoder_is_dsi(pipe_config->cpu_transcoder)) - icelake_get_trans_port_sync_config(pipe_config); + icl_get_trans_port_sync_config(pipe_config); out: for_each_power_domain(power_domain, power_domain_mask) @@ -11570,7 +11688,7 @@ int intel_get_load_detect_pipe(struct drm_connector *connector, { struct intel_crtc *intel_crtc; struct intel_encoder *intel_encoder = - intel_attached_encoder(connector); + intel_attached_encoder(to_intel_connector(connector)); struct drm_crtc *possible_crtc; struct drm_encoder *encoder = &intel_encoder->base; struct drm_crtc *crtc = NULL; @@ -11724,7 +11842,7 @@ void intel_release_load_detect_pipe(struct drm_connector *connector, struct drm_modeset_acquire_ctx *ctx) { struct intel_encoder *intel_encoder = - intel_attached_encoder(connector); + intel_attached_encoder(to_intel_connector(connector)); struct drm_encoder *encoder = &intel_encoder->base; struct drm_atomic_state *state = old->restore_state; int ret; @@ -11867,8 +11985,8 @@ int intel_dotclock_calculate(int link_freq, return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n); } -static void ironlake_pch_clock_get(struct intel_crtc *crtc, - struct intel_crtc_state *pipe_config) +static void ilk_pch_clock_get(struct intel_crtc *crtc, + struct intel_crtc_state *pipe_config) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); @@ -11897,6 +12015,7 @@ static void intel_crtc_state_reset(struct intel_crtc_state *crtc_state, crtc_state->hsw_workaround_pipe = INVALID_PIPE; crtc_state->output_format = INTEL_OUTPUT_FORMAT_INVALID; crtc_state->scaler_state.scaler_id = -1; + crtc_state->mst_master_transcoder = INVALID_TRANSCODER; } static struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc) @@ -12278,88 +12397,121 @@ static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state) return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes; } -static int icl_add_sync_mode_crtcs(struct intel_crtc_state *crtc_state) +static bool +intel_atomic_is_master_connector(struct intel_crtc_state *crtc_state) +{ + struct drm_crtc *crtc = crtc_state->uapi.crtc; + struct drm_atomic_state *state = crtc_state->uapi.state; + struct drm_connector *connector; + struct drm_connector_state *connector_state; + int i; + + for_each_new_connector_in_state(state, connector, connector_state, i) { + if (connector_state->crtc != crtc) + continue; + if (connector->has_tile && + connector->tile_h_loc == connector->num_h_tile - 1 && + connector->tile_v_loc == connector->num_v_tile - 1) + return true; + } + + return false; +} + +static void reset_port_sync_mode_state(struct intel_crtc_state *crtc_state) +{ + crtc_state->master_transcoder = INVALID_TRANSCODER; + crtc_state->sync_mode_slaves_mask = 0; +} + +static int icl_compute_port_sync_crtc_state(struct drm_connector *connector, + struct intel_crtc_state *crtc_state, + int num_tiled_conns) { struct drm_crtc *crtc = crtc_state->uapi.crtc; struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state); struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); - struct drm_connector *master_connector, *connector; - struct drm_connector_state *connector_state; + struct drm_connector *master_connector; struct drm_connector_list_iter conn_iter; struct drm_crtc *master_crtc = NULL; struct drm_crtc_state *master_crtc_state; struct intel_crtc_state *master_pipe_config; - int i, tile_group_id; if (INTEL_GEN(dev_priv) < 11) return 0; + if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP)) + return 0; + /* * In case of tiled displays there could be one or more slaves but there is * only one master. Lets make the CRTC used by the connector corresponding * to the last horizonal and last vertical tile a master/genlock CRTC. * All the other CRTCs corresponding to other tiles of the same Tile group * are the slave CRTCs and hold a pointer to their genlock CRTC. + * If all tiles not present do not make master slave assignments. */ - for_each_new_connector_in_state(&state->base, connector, connector_state, i) { - if (connector_state->crtc != crtc) - continue; - if (!connector->has_tile) + if (!connector->has_tile || + crtc_state->hw.mode.hdisplay != connector->tile_h_size || + crtc_state->hw.mode.vdisplay != connector->tile_v_size || + num_tiled_conns < connector->num_h_tile * connector->num_v_tile) { + reset_port_sync_mode_state(crtc_state); + return 0; + } + /* Last Horizontal and last vertical tile connector is a master + * Master's crtc state is already populated in slave for port sync + */ + if (connector->tile_h_loc == connector->num_h_tile - 1 && + connector->tile_v_loc == connector->num_v_tile - 1) + return 0; + + /* Loop through all connectors and configure the Slave crtc_state + * to point to the correct master. + */ + drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); + drm_for_each_connector_iter(master_connector, &conn_iter) { + struct drm_connector_state *master_conn_state = NULL; + + if (!(master_connector->has_tile && + master_connector->tile_group->id == connector->tile_group->id)) continue; - if (crtc_state->hw.mode.hdisplay != connector->tile_h_size || - crtc_state->hw.mode.vdisplay != connector->tile_v_size) - return 0; - if (connector->tile_h_loc == connector->num_h_tile - 1 && - connector->tile_v_loc == connector->num_v_tile - 1) + if (master_connector->tile_h_loc != master_connector->num_h_tile - 1 || + master_connector->tile_v_loc != master_connector->num_v_tile - 1) continue; - crtc_state->sync_mode_slaves_mask = 0; - tile_group_id = connector->tile_group->id; - drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); - drm_for_each_connector_iter(master_connector, &conn_iter) { - struct drm_connector_state *master_conn_state = NULL; - - if (!master_connector->has_tile) - continue; - if (master_connector->tile_h_loc != master_connector->num_h_tile - 1 || - master_connector->tile_v_loc != master_connector->num_v_tile - 1) - continue; - if (master_connector->tile_group->id != tile_group_id) - continue; - master_conn_state = drm_atomic_get_connector_state(&state->base, - master_connector); - if (IS_ERR(master_conn_state)) { - drm_connector_list_iter_end(&conn_iter); - return PTR_ERR(master_conn_state); - } - if (master_conn_state->crtc) { - master_crtc = master_conn_state->crtc; - break; - } + master_conn_state = drm_atomic_get_connector_state(&state->base, + master_connector); + if (IS_ERR(master_conn_state)) { + drm_connector_list_iter_end(&conn_iter); + return PTR_ERR(master_conn_state); } - drm_connector_list_iter_end(&conn_iter); - - if (!master_crtc) { - DRM_DEBUG_KMS("Could not find Master CRTC for Slave CRTC %d\n", - connector_state->crtc->base.id); - return -EINVAL; + if (master_conn_state->crtc) { + master_crtc = master_conn_state->crtc; + break; } + } + drm_connector_list_iter_end(&conn_iter); - master_crtc_state = drm_atomic_get_crtc_state(&state->base, - master_crtc); - if (IS_ERR(master_crtc_state)) - return PTR_ERR(master_crtc_state); - - master_pipe_config = to_intel_crtc_state(master_crtc_state); - crtc_state->master_transcoder = master_pipe_config->cpu_transcoder; - master_pipe_config->sync_mode_slaves_mask |= - BIT(crtc_state->cpu_transcoder); - DRM_DEBUG_KMS("Master Transcoder = %s added for Slave CRTC = %d, slave transcoder bitmask = %d\n", - transcoder_name(crtc_state->master_transcoder), - crtc_state->uapi.crtc->base.id, - master_pipe_config->sync_mode_slaves_mask); + if (!master_crtc) { + DRM_DEBUG_KMS("Could not find Master CRTC for Slave CRTC %d\n", + crtc->base.id); + return -EINVAL; } + master_crtc_state = drm_atomic_get_crtc_state(&state->base, + master_crtc); + if (IS_ERR(master_crtc_state)) + return PTR_ERR(master_crtc_state); + + master_pipe_config = to_intel_crtc_state(master_crtc_state); + crtc_state->master_transcoder = master_pipe_config->cpu_transcoder; + master_pipe_config->sync_mode_slaves_mask |= + BIT(crtc_state->cpu_transcoder); + DRM_DEBUG_KMS("Master Transcoder = %s added for Slave CRTC = %d, slave transcoder bitmask = %d\n", + transcoder_name(crtc_state->master_transcoder), + crtc->base.id, + master_pipe_config->sync_mode_slaves_mask); + return 0; } @@ -12755,6 +12907,9 @@ static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config, pipe_config->csc_mode, pipe_config->gamma_mode, pipe_config->gamma_enable, pipe_config->csc_enable); + DRM_DEBUG_KMS("MST master transcoder: %s\n", + transcoder_name(pipe_config->mst_master_transcoder)); + dump_planes: if (!state) return; @@ -12901,9 +13056,11 @@ intel_crtc_prepare_cleared_state(struct intel_crtc_state *crtc_state) saved_state->wm = crtc_state->wm; /* * Save the slave bitmask which gets filled for master crtc state during - * slave atomic check call. + * slave atomic check call. For all other CRTCs reset the port sync variables + * crtc_state->master_transcoder needs to be set to INVALID */ - if (is_trans_port_sync_master(crtc_state)) + reset_port_sync_mode_state(saved_state); + if (intel_atomic_is_master_connector(crtc_state)) saved_state->sync_mode_slaves_mask = crtc_state->sync_mode_slaves_mask; @@ -12924,7 +13081,7 @@ intel_modeset_pipe_config(struct intel_crtc_state *pipe_config) struct drm_connector *connector; struct drm_connector_state *connector_state; int base_bpp, ret; - int i; + int i, tile_group_id = -1, num_tiled_conns = 0; bool retry = true; pipe_config->cpu_transcoder = @@ -12994,13 +13151,22 @@ encoder_retry: drm_mode_set_crtcinfo(&pipe_config->hw.adjusted_mode, CRTC_STEREO_DOUBLE); - /* Set the crtc_state defaults for trans_port_sync */ - pipe_config->master_transcoder = INVALID_TRANSCODER; - ret = icl_add_sync_mode_crtcs(pipe_config); - if (ret) { - DRM_DEBUG_KMS("Cannot assign Sync Mode CRTCs: %d\n", - ret); - return ret; + /* Get tile_group_id of tiled connector */ + for_each_new_connector_in_state(state, connector, connector_state, i) { + if (connector_state->crtc == crtc && + connector->has_tile) { + tile_group_id = connector->tile_group->id; + break; + } + } + + /* Get total number of tiled connectors in state that belong to + * this tile group. + */ + for_each_new_connector_in_state(state, connector, connector_state, i) { + if (connector->has_tile && + connector->tile_group->id == tile_group_id) + num_tiled_conns++; } /* Pass our mode to the connectors and the CRTC to give them a chance to @@ -13011,6 +13177,14 @@ encoder_retry: if (connector_state->crtc != crtc) continue; + ret = icl_compute_port_sync_crtc_state(connector, pipe_config, + num_tiled_conns); + if (ret) { + DRM_DEBUG_KMS("Cannot assign Sync Mode CRTCs: %d\n", + ret); + return ret; + } + encoder = to_intel_encoder(connector_state->best_encoder); ret = encoder->compute_config(encoder, pipe_config, connector_state); @@ -13535,6 +13709,8 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, PIPE_CONF_CHECK_I(dsc.dsc_split); PIPE_CONF_CHECK_I(dsc.compressed_bpp); + PIPE_CONF_CHECK_I(mst_master_transcoder); + #undef PIPE_CONF_CHECK_X #undef PIPE_CONF_CHECK_I #undef PIPE_CONF_CHECK_BOOL @@ -14048,7 +14224,7 @@ static void intel_modeset_clear_plls(struct intel_atomic_state *state) * multiple pipes, and planes are enabled after the pipe, we need to wait at * least 2 vblanks on the first pipe before enabling planes on the second pipe. */ -static int haswell_mode_set_planes_workaround(struct intel_atomic_state *state) +static int hsw_mode_set_planes_workaround(struct intel_atomic_state *state) { struct intel_crtc_state *crtc_state; struct intel_crtc *crtc; @@ -14143,7 +14319,7 @@ static int intel_modeset_checks(struct intel_atomic_state *state) intel_modeset_clear_plls(state); if (IS_HASWELL(dev_priv)) - return haswell_mode_set_planes_workaround(state); + return hsw_mode_set_planes_workaround(state); return 0; } @@ -14173,7 +14349,11 @@ static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_sta new_crtc_state->uapi.mode_changed = false; new_crtc_state->update_pipe = true; +} +static void intel_crtc_copy_fastset(const struct intel_crtc_state *old_crtc_state, + struct intel_crtc_state *new_crtc_state) +{ /* * If we're not doing the full modeset we want to * keep the current M/N values as they may be @@ -14296,6 +14476,107 @@ static int intel_atomic_check_crtcs(struct intel_atomic_state *state) return 0; } +static bool intel_cpu_transcoder_needs_modeset(struct intel_atomic_state *state, + enum transcoder transcoder) +{ + struct intel_crtc_state *new_crtc_state; + struct intel_crtc *crtc; + int i; + + for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) + if (new_crtc_state->cpu_transcoder == transcoder) + return needs_modeset(new_crtc_state); + + return false; +} + +static void +intel_modeset_synced_crtcs(struct intel_atomic_state *state, + u8 transcoders) +{ + struct intel_crtc_state *new_crtc_state; + struct intel_crtc *crtc; + int i; + + for_each_new_intel_crtc_in_state(state, crtc, + new_crtc_state, i) { + if (transcoders & BIT(new_crtc_state->cpu_transcoder)) { + new_crtc_state->uapi.mode_changed = true; + new_crtc_state->update_pipe = false; + } + } +} + +static int +intel_modeset_all_tiles(struct intel_atomic_state *state, int tile_grp_id) +{ + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct drm_connector *connector; + struct drm_connector_list_iter conn_iter; + int ret = 0; + + drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { + struct drm_connector_state *conn_state; + struct drm_crtc_state *crtc_state; + + if (!connector->has_tile || + connector->tile_group->id != tile_grp_id) + continue; + conn_state = drm_atomic_get_connector_state(&state->base, + connector); + if (IS_ERR(conn_state)) { + ret = PTR_ERR(conn_state); + break; + } + + if (!conn_state->crtc) + continue; + + crtc_state = drm_atomic_get_crtc_state(&state->base, + conn_state->crtc); + if (IS_ERR(crtc_state)) { + ret = PTR_ERR(crtc_state); + break; + } + crtc_state->mode_changed = true; + ret = drm_atomic_add_affected_connectors(&state->base, + conn_state->crtc); + if (ret) + break; + } + drm_connector_list_iter_end(&conn_iter); + + return ret; +} + +static int +intel_atomic_check_tiled_conns(struct intel_atomic_state *state) +{ + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct drm_connector *connector; + struct drm_connector_state *old_conn_state, *new_conn_state; + int i, ret; + + if (INTEL_GEN(dev_priv) < 11) + return 0; + + /* Is tiled, mark all other tiled CRTCs as needing a modeset */ + for_each_oldnew_connector_in_state(&state->base, connector, + old_conn_state, new_conn_state, i) { + if (!connector->has_tile) + continue; + if (!intel_connector_needs_modeset(state, connector)) + continue; + + ret = intel_modeset_all_tiles(state, connector->tile_group->id); + if (ret) + return ret; + } + + return 0; +} + /** * intel_atomic_check - validate state object * @dev: drm device @@ -14323,6 +14604,21 @@ static int intel_atomic_check(struct drm_device *dev, if (ret) goto fail; + /** + * This check adds all the connectors in current state that belong to + * the same tile group to a full modeset. + * This function directly sets the mode_changed to true and we also call + * drm_atomic_add_affected_connectors(). Hence we are not explicitly + * calling drm_atomic_helper_check_modeset() after this. + * + * Fixme: Handle some corner cases where one of the + * tiled connectors gets disconnected and tile info is lost but since it + * was previously synced to other conn, we need to add that to the modeset. + */ + ret = intel_atomic_check_tiled_conns(state); + if (ret) + goto fail; + for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { if (!needs_modeset(new_crtc_state)) { @@ -14334,8 +14630,6 @@ static int intel_atomic_check(struct drm_device *dev, if (!new_crtc_state->uapi.enable) { intel_crtc_copy_uapi_to_hw_state(new_crtc_state); - - any_ms = true; continue; } @@ -14348,9 +14642,49 @@ static int intel_atomic_check(struct drm_device *dev, goto fail; intel_crtc_check_fastset(old_crtc_state, new_crtc_state); + } + + /** + * Check if fastset is allowed by external dependencies like other + * pipes and transcoders. + * + * Right now it only forces a fullmodeset when the MST master + * transcoder did not changed but the pipe of the master transcoder + * needs a fullmodeset so all slaves also needs to do a fullmodeset or + * in case of port synced crtcs, if one of the synced crtcs + * needs a full modeset, all other synced crtcs should be + * forced a full modeset. + */ + for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { + if (!new_crtc_state->hw.enable || needs_modeset(new_crtc_state)) + continue; + + if (intel_dp_mst_is_slave_trans(new_crtc_state)) { + enum transcoder master = new_crtc_state->mst_master_transcoder; + + if (intel_cpu_transcoder_needs_modeset(state, master)) { + new_crtc_state->uapi.mode_changed = true; + new_crtc_state->update_pipe = false; + } + } else if (is_trans_port_sync_mode(new_crtc_state)) { + u8 trans = new_crtc_state->sync_mode_slaves_mask | + BIT(new_crtc_state->master_transcoder); - if (needs_modeset(new_crtc_state)) + intel_modeset_synced_crtcs(state, trans); + } + } + + for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, + new_crtc_state, i) { + if (needs_modeset(new_crtc_state)) { any_ms = true; + continue; + } + + if (!new_crtc_state->update_pipe) + continue; + + intel_crtc_copy_fastset(old_crtc_state, new_crtc_state); } if (any_ms && !check_digital_port_conflicts(state)) { @@ -14472,12 +14806,12 @@ static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state, skl_detach_scalers(new_crtc_state); if (new_crtc_state->pch_pfit.enabled) - skylake_pfit_enable(new_crtc_state); + skl_pfit_enable(new_crtc_state); } else if (HAS_PCH_SPLIT(dev_priv)) { if (new_crtc_state->pch_pfit.enabled) - ironlake_pfit_enable(new_crtc_state); + ilk_pfit_enable(new_crtc_state); else if (old_crtc_state->pch_pfit.enabled) - ironlake_pfit_disable(old_crtc_state); + ilk_pfit_disable(old_crtc_state); } if (INTEL_GEN(dev_priv) >= 11) @@ -14619,7 +14953,7 @@ static void intel_commit_modeset_disables(struct intel_atomic_state *state) u32 handled = 0; int i; - /* Only disable port sync slaves */ + /* Only disable port sync and MST slaves */ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { if (!needs_modeset(new_crtc_state)) @@ -14633,7 +14967,8 @@ static void intel_commit_modeset_disables(struct intel_atomic_state *state) * slave CRTCs are disabled first and then master CRTC since * Slave vblanks are masked till Master Vblanks. */ - if (!is_trans_port_sync_slave(old_crtc_state)) + if (!is_trans_port_sync_slave(old_crtc_state) && + !intel_dp_mst_is_slave_trans(old_crtc_state)) continue; intel_pre_plane_update(state, crtc); @@ -14694,10 +15029,14 @@ static void intel_set_dp_tp_ctl_normal(struct intel_crtc *crtc, if (conn_state->crtc == &crtc->base) break; } - intel_dp = enc_to_intel_dp(&intel_attached_encoder(conn)->base); + intel_dp = enc_to_intel_dp(intel_attached_encoder(to_intel_connector(conn))); intel_dp_stop_link_train(intel_dp); } +/* + * TODO: This is only called from port sync and it is identical to what will be + * executed again in intel_update_crtc() over port sync pipes + */ static void intel_post_crtc_enable_updates(struct intel_crtc *crtc, struct intel_atomic_state *state) { @@ -14786,15 +15125,21 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state) u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices; u8 required_slices = state->wm_results.ddb.enabled_slices; struct skl_ddb_entry entries[I915_MAX_PIPES] = {}; - u8 dirty_pipes = 0; + const u8 num_pipes = INTEL_NUM_PIPES(dev_priv); + u8 update_pipes = 0, modeset_pipes = 0; int i; for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { + if (!new_crtc_state->hw.active) + continue; + /* ignore allocations for crtc's that have been turned off. */ - if (!needs_modeset(new_crtc_state) && new_crtc_state->hw.active) + if (!needs_modeset(new_crtc_state)) { entries[i] = old_crtc_state->wm.skl.ddb; - if (new_crtc_state->hw.active) - dirty_pipes |= BIT(crtc->pipe); + update_pipes |= BIT(crtc->pipe); + } else { + modeset_pipes |= BIT(crtc->pipe); + } } /* If 2nd DBuf slice required, enable it here */ @@ -14804,38 +15149,29 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state) /* * Whenever the number of active pipes changes, we need to make sure we * update the pipes in the right order so that their ddb allocations - * never overlap with eachother inbetween CRTC updates. Otherwise we'll + * never overlap with each other between CRTC updates. Otherwise we'll * cause pipe underruns and other bad stuff. + * + * So first lets enable all pipes that do not need a fullmodeset as + * those don't have any external dependency. */ - while (dirty_pipes) { + while (update_pipes) { for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { enum pipe pipe = crtc->pipe; - bool modeset = needs_modeset(new_crtc_state); - if ((dirty_pipes & BIT(pipe)) == 0) + if ((update_pipes & BIT(pipe)) == 0) continue; if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb, - entries, - INTEL_NUM_PIPES(dev_priv), i)) + entries, num_pipes, i)) continue; entries[i] = new_crtc_state->wm.skl.ddb; - dirty_pipes &= ~BIT(pipe); - - if (modeset && is_trans_port_sync_mode(new_crtc_state)) { - if (is_trans_port_sync_master(new_crtc_state)) - intel_update_trans_port_sync_crtcs(crtc, - state, - old_crtc_state, - new_crtc_state); - else - continue; - } else { - intel_update_crtc(crtc, state, old_crtc_state, - new_crtc_state); - } + update_pipes &= ~BIT(pipe); + + intel_update_crtc(crtc, state, old_crtc_state, + new_crtc_state); /* * If this is an already active pipe, it's DDB changed, @@ -14845,11 +15181,72 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state) */ if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb, &old_crtc_state->wm.skl.ddb) && - !modeset && dirty_pipes) + (update_pipes | modeset_pipes)) intel_wait_for_vblank(dev_priv, pipe); } } + /* + * Enable all pipes that needs a modeset and do not depends on other + * pipes + */ + for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, + new_crtc_state, i) { + enum pipe pipe = crtc->pipe; + + if ((modeset_pipes & BIT(pipe)) == 0) + continue; + + if (intel_dp_mst_is_slave_trans(new_crtc_state) || + is_trans_port_sync_slave(new_crtc_state)) + continue; + + WARN_ON(skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb, + entries, num_pipes, i)); + + entries[i] = new_crtc_state->wm.skl.ddb; + modeset_pipes &= ~BIT(pipe); + + if (is_trans_port_sync_mode(new_crtc_state)) { + struct intel_crtc *slave_crtc; + + intel_update_trans_port_sync_crtcs(crtc, state, + old_crtc_state, + new_crtc_state); + + slave_crtc = intel_get_slave_crtc(new_crtc_state); + /* TODO: update entries[] of slave */ + modeset_pipes &= ~BIT(slave_crtc->pipe); + + } else { + intel_update_crtc(crtc, state, old_crtc_state, + new_crtc_state); + } + } + + /* + * Finally enable all pipes that needs a modeset and depends on + * other pipes, right now it is only MST slaves as both port sync slave + * and master are enabled together + */ + for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, + new_crtc_state, i) { + enum pipe pipe = crtc->pipe; + + if ((modeset_pipes & BIT(pipe)) == 0) + continue; + + WARN_ON(skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb, + entries, num_pipes, i)); + + entries[i] = new_crtc_state->wm.skl.ddb; + modeset_pipes &= ~BIT(pipe); + + intel_update_crtc(crtc, state, old_crtc_state, new_crtc_state); + } + + WARN_ON(modeset_pipes); + /* If 2nd DBuf slice is no more required disable it */ if (INTEL_GEN(dev_priv) >= 11 && required_slices < hw_enabled_slices) icl_dbuf_slices_update(dev_priv, required_slices); @@ -16586,8 +16983,11 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb, } /* FIXME need to adjust LINOFF/TILEOFF accordingly. */ - if (mode_cmd->offsets[0] != 0) + if (mode_cmd->offsets[0] != 0) { + DRM_DEBUG_KMS("plane 0 offset (0x%08x) must be 0\n", + mode_cmd->offsets[0]); goto err; + } drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd); @@ -16814,29 +17214,28 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv) intel_init_cdclk_hooks(dev_priv); if (INTEL_GEN(dev_priv) >= 9) { - dev_priv->display.get_pipe_config = haswell_get_pipe_config; + dev_priv->display.get_pipe_config = hsw_get_pipe_config; dev_priv->display.get_initial_plane_config = - skylake_get_initial_plane_config; - dev_priv->display.crtc_compute_clock = - haswell_crtc_compute_clock; - dev_priv->display.crtc_enable = haswell_crtc_enable; - dev_priv->display.crtc_disable = haswell_crtc_disable; + skl_get_initial_plane_config; + dev_priv->display.crtc_compute_clock = hsw_crtc_compute_clock; + dev_priv->display.crtc_enable = hsw_crtc_enable; + dev_priv->display.crtc_disable = hsw_crtc_disable; } else if (HAS_DDI(dev_priv)) { - dev_priv->display.get_pipe_config = haswell_get_pipe_config; + dev_priv->display.get_pipe_config = hsw_get_pipe_config; dev_priv->display.get_initial_plane_config = i9xx_get_initial_plane_config; dev_priv->display.crtc_compute_clock = - haswell_crtc_compute_clock; - dev_priv->display.crtc_enable = haswell_crtc_enable; - dev_priv->display.crtc_disable = haswell_crtc_disable; + hsw_crtc_compute_clock; + dev_priv->display.crtc_enable = hsw_crtc_enable; + dev_priv->display.crtc_disable = hsw_crtc_disable; } else if (HAS_PCH_SPLIT(dev_priv)) { - dev_priv->display.get_pipe_config = ironlake_get_pipe_config; + dev_priv->display.get_pipe_config = ilk_get_pipe_config; dev_priv->display.get_initial_plane_config = i9xx_get_initial_plane_config; dev_priv->display.crtc_compute_clock = - ironlake_crtc_compute_clock; - dev_priv->display.crtc_enable = ironlake_crtc_enable; - dev_priv->display.crtc_disable = ironlake_crtc_disable; + ilk_crtc_compute_clock; + dev_priv->display.crtc_enable = ilk_crtc_enable; + dev_priv->display.crtc_disable = ilk_crtc_disable; } else if (IS_CHERRYVIEW(dev_priv)) { dev_priv->display.get_pipe_config = i9xx_get_pipe_config; dev_priv->display.get_initial_plane_config = @@ -16882,7 +17281,7 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv) } if (IS_GEN(dev_priv, 5)) { - dev_priv->display.fdi_link_train = ironlake_fdi_link_train; + dev_priv->display.fdi_link_train = ilk_fdi_link_train; } else if (IS_GEN(dev_priv, 6)) { dev_priv->display.fdi_link_train = gen6_fdi_link_train; } else if (IS_IVYBRIDGE(dev_priv)) { @@ -17827,8 +18226,11 @@ get_encoder_power_domains(struct drm_i915_private *dev_priv) static void intel_early_display_was(struct drm_i915_private *dev_priv) { - /* Display WA #1185 WaDisableDARBFClkGating:cnl,glk */ - if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv)) + /* + * Display WA #1185 WaDisableDARBFClkGating:cnl,glk,icl,ehl,tgl + * Also known as Wa_14010480278. + */ + if (IS_GEN_RANGE(dev_priv, 10, 12) || IS_GEMINILAKE(dev_priv)) I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) | DARBF_GATING_DIS); @@ -17928,7 +18330,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev, /* We need to sanitize only the MST primary port. */ if (encoder->type != INTEL_OUTPUT_DP_MST && intel_phy_is_tc(dev_priv, phy)) - intel_tc_port_sanitize(enc_to_dig_port(&encoder->base)); + intel_tc_port_sanitize(enc_to_dig_port(encoder)); } get_encoder_power_domains(dev_priv); @@ -18101,6 +18503,8 @@ void intel_modeset_driver_remove(struct drm_i915_private *i915) intel_gmbus_teardown(i915); + intel_bw_cleanup(i915); + destroy_workqueue(i915->flip_wq); destroy_workqueue(i915->modeset_wq); diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h index 0fef9263cddc..028aab728514 100644 --- a/drivers/gpu/drm/i915/display/intel_display.h +++ b/drivers/gpu/drm/i915/display/intel_display.h @@ -474,6 +474,7 @@ void intel_link_compute_m_n(u16 bpp, int nlanes, struct intel_link_m_n *m_n, bool constant_n, bool fec_enable); bool is_ccs_modifier(u64 modifier); +int intel_main_to_aux_plane(const struct drm_framebuffer *fb, int main_plane); void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv); u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv, u32 pixel_format, u64 modifier); @@ -521,7 +522,7 @@ int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data, u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc); void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state); -int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp); +int ilk_get_lanes_required(int target_clock, int link_bw, int bpp); void vlv_wait_port_ready(struct drm_i915_private *dev_priv, struct intel_digital_port *dport, unsigned int expected_mask); @@ -578,8 +579,8 @@ void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc, u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_center); int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state); -void skylake_scaler_disable(const struct intel_crtc_state *old_crtc_state); -void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state); +void skl_scaler_disable(const struct intel_crtc_state *old_crtc_state); +void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state); u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state); u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state); diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index 679457156797..21561acfa3ac 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -514,7 +514,7 @@ static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv, if (encoder->type == INTEL_OUTPUT_DP_MST) continue; - dig_port = enc_to_dig_port(&encoder->base); + dig_port = enc_to_dig_port(encoder); if (WARN_ON(!dig_port)) continue; @@ -1664,8 +1664,8 @@ void chv_phy_powergate_lanes(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct i915_power_domains *power_domains = &dev_priv->power_domains; - enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(&encoder->base)); - enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base)); + enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(encoder)); + enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(encoder)); mutex_lock(&power_domains->lock); diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h index 83ea04149b77..888ea8a170d1 100644 --- a/drivers/gpu/drm/i915/display/intel_display_types.h +++ b/drivers/gpu/drm/i915/display/intel_display_types.h @@ -90,8 +90,8 @@ struct intel_framebuffer { /* for each plane in the normal GTT view */ struct { unsigned int x, y; - } normal[2]; - /* for each plane in the rotated GTT view */ + } normal[4]; + /* for each plane in the rotated GTT view for no-CCS formats */ struct { unsigned int x, y; unsigned int pitch; /* pixels */ @@ -555,7 +555,7 @@ struct intel_plane_state { */ u32 stride; int x, y; - } color_plane[2]; + } color_plane[4]; /* plane control register */ u32 ctl; @@ -1054,6 +1054,9 @@ struct intel_crtc_state { /* Bitmask to indicate slaves attached */ u8 sync_mode_slaves_mask; + + /* Only valid on TGL+ */ + enum transcoder mst_master_transcoder; }; struct intel_crtc { @@ -1435,9 +1438,9 @@ struct intel_load_detect_pipe { }; static inline struct intel_encoder * -intel_attached_encoder(struct drm_connector *connector) +intel_attached_encoder(struct intel_connector *connector) { - return to_intel_connector(connector)->encoder; + return connector->encoder; } static inline bool intel_encoder_is_dig_port(struct intel_encoder *encoder) @@ -1454,12 +1457,12 @@ static inline bool intel_encoder_is_dig_port(struct intel_encoder *encoder) } static inline struct intel_digital_port * -enc_to_dig_port(struct drm_encoder *encoder) +enc_to_dig_port(struct intel_encoder *encoder) { - struct intel_encoder *intel_encoder = to_intel_encoder(encoder); + struct intel_encoder *intel_encoder = encoder; if (intel_encoder_is_dig_port(intel_encoder)) - return container_of(encoder, struct intel_digital_port, + return container_of(&encoder->base, struct intel_digital_port, base.base); else return NULL; @@ -1468,16 +1471,17 @@ enc_to_dig_port(struct drm_encoder *encoder) static inline struct intel_digital_port * conn_to_dig_port(struct intel_connector *connector) { - return enc_to_dig_port(&intel_attached_encoder(&connector->base)->base); + return enc_to_dig_port(intel_attached_encoder(connector)); } static inline struct intel_dp_mst_encoder * -enc_to_mst(struct drm_encoder *encoder) +enc_to_mst(struct intel_encoder *encoder) { - return container_of(encoder, struct intel_dp_mst_encoder, base.base); + return container_of(&encoder->base, struct intel_dp_mst_encoder, + base.base); } -static inline struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder) +static inline struct intel_dp *enc_to_intel_dp(struct intel_encoder *encoder) { return &enc_to_dig_port(encoder)->dp; } @@ -1490,14 +1494,14 @@ static inline bool intel_encoder_is_dp(struct intel_encoder *encoder) return true; case INTEL_OUTPUT_DDI: /* Skip pure HDMI/DVI DDI encoders */ - return i915_mmio_reg_valid(enc_to_intel_dp(&encoder->base)->output_reg); + return i915_mmio_reg_valid(enc_to_intel_dp(encoder)->output_reg); default: return false; } } static inline struct intel_lspcon * -enc_to_intel_lspcon(struct drm_encoder *encoder) +enc_to_intel_lspcon(struct intel_encoder *encoder) { return &enc_to_dig_port(encoder)->lspcon; } diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 2f31d226c6eb..c7424e2a04a3 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -146,9 +146,9 @@ bool intel_dp_is_edp(struct intel_dp *intel_dp) return intel_dig_port->base.type == INTEL_OUTPUT_EDP; } -static struct intel_dp *intel_attached_dp(struct drm_connector *connector) +static struct intel_dp *intel_attached_dp(struct intel_connector *connector) { - return enc_to_intel_dp(&intel_attached_encoder(connector)->base); + return enc_to_intel_dp(intel_attached_encoder(connector)); } static void intel_dp_link_down(struct intel_encoder *encoder, @@ -614,7 +614,7 @@ static enum drm_mode_status intel_dp_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { - struct intel_dp *intel_dp = intel_attached_dp(connector); + struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); struct intel_connector *intel_connector = to_intel_connector(connector); struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; struct drm_i915_private *dev_priv = to_i915(connector->dev); @@ -834,7 +834,7 @@ static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv) * Pick one that's not used by other ports. */ for_each_intel_dp(&dev_priv->drm, encoder) { - struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); if (encoder->type == INTEL_OUTPUT_EDP) { WARN_ON(intel_dp->active_pipe != INVALID_PIPE && @@ -1031,7 +1031,7 @@ void intel_power_sequencer_reset(struct drm_i915_private *dev_priv) */ for_each_intel_dp(&dev_priv->drm, encoder) { - struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); WARN_ON(intel_dp->active_pipe != INVALID_PIPE); @@ -2034,7 +2034,7 @@ static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc) static int intel_dp_dsc_compute_params(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state) { - struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config; u8 line_buf_depth; int ret; @@ -2205,7 +2205,7 @@ intel_dp_compute_link_config(struct intel_encoder *encoder, struct drm_connector_state *conn_state) { struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; - struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct link_config_limits limits; int common_len; int ret; @@ -2366,8 +2366,8 @@ intel_dp_compute_config(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; - struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); - struct intel_lspcon *lspcon = enc_to_intel_lspcon(&encoder->base); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + struct intel_lspcon *lspcon = enc_to_intel_lspcon(encoder); enum port port = encoder->port; struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc); struct intel_connector *intel_connector = intel_dp->attached_connector; @@ -2482,7 +2482,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); enum port port = encoder->port; struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; @@ -2509,7 +2509,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder, * * CPT PCH is quite different, having many bits moved * to the TRANS_DP_CTL register instead. That - * configuration happens (oddly) in ironlake_pch_enable + * configuration happens (oddly) in ilk_pch_enable */ /* Preserve the BIOS-computed detected bit. This is @@ -2653,7 +2653,7 @@ static void edp_wait_backlight_off(struct intel_dp *intel_dp) * is locked */ -static u32 ironlake_get_pp_control(struct intel_dp *intel_dp) +static u32 ilk_get_pp_control(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); u32 control; @@ -2703,7 +2703,7 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp) if (!edp_have_panel_power(intel_dp)) wait_panel_power_cycle(intel_dp); - pp = ironlake_get_pp_control(intel_dp); + pp = ilk_get_pp_control(intel_dp); pp |= EDP_FORCE_VDD; pp_stat_reg = _pp_stat_reg(intel_dp); @@ -2768,7 +2768,7 @@ static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp) intel_dig_port->base.base.base.id, intel_dig_port->base.base.name); - pp = ironlake_get_pp_control(intel_dp); + pp = ilk_get_pp_control(intel_dp); pp &= ~EDP_FORCE_VDD; pp_ctrl_reg = _pp_ctrl_reg(intel_dp); @@ -2864,7 +2864,7 @@ static void edp_panel_on(struct intel_dp *intel_dp) wait_panel_power_cycle(intel_dp); pp_ctrl_reg = _pp_ctrl_reg(intel_dp); - pp = ironlake_get_pp_control(intel_dp); + pp = ilk_get_pp_control(intel_dp); if (IS_GEN(dev_priv, 5)) { /* ILK workaround: disable reset around power sequence */ pp &= ~PANEL_POWER_RESET; @@ -2919,7 +2919,7 @@ static void edp_panel_off(struct intel_dp *intel_dp) WARN(!intel_dp->want_panel_vdd, "Need [ENCODER:%d:%s] VDD to turn off panel\n", dig_port->base.base.base.id, dig_port->base.base.name); - pp = ironlake_get_pp_control(intel_dp); + pp = ilk_get_pp_control(intel_dp); /* We need to switch off panel power _and_ force vdd, for otherwise some * panels get very unhappy and cease to work. */ pp &= ~(PANEL_POWER_ON | PANEL_POWER_RESET | EDP_FORCE_VDD | @@ -2968,7 +2968,7 @@ static void _intel_edp_backlight_on(struct intel_dp *intel_dp) i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp); u32 pp; - pp = ironlake_get_pp_control(intel_dp); + pp = ilk_get_pp_control(intel_dp); pp |= EDP_BLC_ENABLE; I915_WRITE(pp_ctrl_reg, pp); @@ -2980,7 +2980,7 @@ static void _intel_edp_backlight_on(struct intel_dp *intel_dp) void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { - struct intel_dp *intel_dp = enc_to_intel_dp(conn_state->best_encoder); + struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(conn_state->best_encoder)); if (!intel_dp_is_edp(intel_dp)) return; @@ -3004,7 +3004,7 @@ static void _intel_edp_backlight_off(struct intel_dp *intel_dp) i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp); u32 pp; - pp = ironlake_get_pp_control(intel_dp); + pp = ilk_get_pp_control(intel_dp); pp &= ~EDP_BLC_ENABLE; I915_WRITE(pp_ctrl_reg, pp); @@ -3018,7 +3018,7 @@ static void _intel_edp_backlight_off(struct intel_dp *intel_dp) /* Disable backlight PP control and backlight PWM. */ void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state) { - struct intel_dp *intel_dp = enc_to_intel_dp(old_conn_state->best_encoder); + struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(old_conn_state->best_encoder)); if (!intel_dp_is_edp(intel_dp)) return; @@ -3036,13 +3036,13 @@ void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state) static void intel_edp_backlight_power(struct intel_connector *connector, bool enable) { - struct intel_dp *intel_dp = intel_attached_dp(&connector->base); + struct intel_dp *intel_dp = intel_attached_dp(connector); intel_wakeref_t wakeref; bool is_enabled; is_enabled = false; with_pps_lock(intel_dp, wakeref) - is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE; + is_enabled = ilk_get_pp_control(intel_dp) & EDP_BLC_ENABLE; if (is_enabled == enable) return; @@ -3079,13 +3079,13 @@ static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state) #define assert_edp_pll_enabled(d) assert_edp_pll((d), true) #define assert_edp_pll_disabled(d) assert_edp_pll((d), false) -static void ironlake_edp_pll_on(struct intel_dp *intel_dp, - const struct intel_crtc_state *pipe_config) +static void ilk_edp_pll_on(struct intel_dp *intel_dp, + const struct intel_crtc_state *pipe_config) { struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - assert_pipe_disabled(dev_priv, crtc->pipe); + assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder); assert_dp_port_disabled(intel_dp); assert_edp_pll_disabled(dev_priv); @@ -3119,13 +3119,13 @@ static void ironlake_edp_pll_on(struct intel_dp *intel_dp, udelay(200); } -static void ironlake_edp_pll_off(struct intel_dp *intel_dp, - const struct intel_crtc_state *old_crtc_state) +static void ilk_edp_pll_off(struct intel_dp *intel_dp, + const struct intel_crtc_state *old_crtc_state) { struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - assert_pipe_disabled(dev_priv, crtc->pipe); + assert_pipe_disabled(dev_priv, old_crtc_state->cpu_transcoder); assert_dp_port_disabled(intel_dp); assert_edp_pll_enabled(dev_priv); @@ -3258,7 +3258,7 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); intel_wakeref_t wakeref; bool ret; @@ -3279,7 +3279,7 @@ static void intel_dp_get_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); u32 tmp, flags = 0; enum port port = encoder->port; struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); @@ -3363,7 +3363,7 @@ static void intel_disable_dp(struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { - struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); intel_dp->link_trained = false; @@ -3397,7 +3397,7 @@ static void g4x_post_disable_dp(struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { - struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); enum port port = encoder->port; /* @@ -3410,7 +3410,7 @@ static void g4x_post_disable_dp(struct intel_encoder *encoder, /* Only ilk+ has port A */ if (port == PORT_A) - ironlake_edp_pll_off(intel_dp, old_crtc_state); + ilk_edp_pll_off(intel_dp, old_crtc_state); } static void vlv_post_disable_dp(struct intel_encoder *encoder, @@ -3548,7 +3548,7 @@ static void intel_enable_dp(struct intel_encoder *encoder, const struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); u32 dp_reg = I915_READ(intel_dp->output_reg); enum pipe pipe = crtc->pipe; @@ -3608,14 +3608,14 @@ static void g4x_pre_enable_dp(struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { - struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); enum port port = encoder->port; intel_dp_prepare(encoder, pipe_config); /* Only ilk+ has port A */ if (port == PORT_A) - ironlake_edp_pll_on(intel_dp, pipe_config); + ilk_edp_pll_on(intel_dp, pipe_config); } static void vlv_detach_power_sequencer(struct intel_dp *intel_dp) @@ -3658,7 +3658,7 @@ static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv, lockdep_assert_held(&dev_priv->pps_mutex); for_each_intel_dp(&dev_priv->drm, encoder) { - struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); WARN(intel_dp->active_pipe == pipe, "stealing pipe %c power sequencer from active [ENCODER:%d:%s]\n", @@ -3681,7 +3681,7 @@ static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); lockdep_assert_held(&dev_priv->pps_mutex); @@ -4203,7 +4203,7 @@ intel_dp_link_down(struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); enum port port = encoder->port; u32 DP = intel_dp->DP; @@ -4903,7 +4903,7 @@ static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp) intel_dp->compliance.test_data.hdisplay = be16_to_cpu(h_width); intel_dp->compliance.test_data.vdisplay = be16_to_cpu(v_height); /* Set test active flag here so userspace doesn't interrupt things */ - intel_dp->compliance.test_active = 1; + intel_dp->compliance.test_active = true; return DP_TEST_ACK; } @@ -4947,7 +4947,7 @@ static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp) } /* Set test active flag here so userspace doesn't interrupt things */ - intel_dp->compliance.test_active = 1; + intel_dp->compliance.test_active = true; return test_result; } @@ -5096,7 +5096,7 @@ int intel_dp_retrain_link(struct intel_encoder *encoder, struct drm_modeset_acquire_ctx *ctx) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct intel_connector *connector = intel_dp->attached_connector; struct drm_connector_state *conn_state; struct intel_crtc_state *crtc_state; @@ -5536,7 +5536,7 @@ static bool intel_combo_phy_connected(struct drm_i915_private *dev_priv, static bool icp_digital_port_connected(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); + struct intel_digital_port *dig_port = enc_to_dig_port(encoder); enum phy phy = intel_port_to_phy(dev_priv, encoder->port); if (intel_phy_is_combo(dev_priv, phy)) @@ -5651,7 +5651,7 @@ intel_dp_detect(struct drm_connector *connector, bool force) { struct drm_i915_private *dev_priv = to_i915(connector->dev); - struct intel_dp *intel_dp = intel_attached_dp(connector); + struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct intel_encoder *encoder = &dig_port->base; enum drm_connector_status status; @@ -5755,7 +5755,7 @@ out: static void intel_dp_force(struct drm_connector *connector) { - struct intel_dp *intel_dp = intel_attached_dp(connector); + struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct intel_encoder *intel_encoder = &dig_port->base; struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev); @@ -5790,7 +5790,7 @@ static int intel_dp_get_modes(struct drm_connector *connector) } /* if eDP has no EDID, fall back to fixed mode */ - if (intel_dp_is_edp(intel_attached_dp(connector)) && + if (intel_dp_is_edp(intel_attached_dp(to_intel_connector(connector))) && intel_connector->panel.fixed_mode) { struct drm_display_mode *mode; @@ -5808,7 +5808,7 @@ static int intel_dp_get_modes(struct drm_connector *connector) static int intel_dp_connector_register(struct drm_connector *connector) { - struct intel_dp *intel_dp = intel_attached_dp(connector); + struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); int ret; ret = intel_connector_register(connector); @@ -5830,7 +5830,7 @@ intel_dp_connector_register(struct drm_connector *connector) static void intel_dp_connector_unregister(struct drm_connector *connector) { - struct intel_dp *intel_dp = intel_attached_dp(connector); + struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); drm_dp_cec_unregister_connector(&intel_dp->aux); drm_dp_aux_unregister(&intel_dp->aux); @@ -5839,7 +5839,7 @@ intel_dp_connector_unregister(struct drm_connector *connector) void intel_dp_encoder_flush_work(struct drm_encoder *encoder) { - struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); + struct intel_digital_port *intel_dig_port = enc_to_dig_port(to_intel_encoder(encoder)); struct intel_dp *intel_dp = &intel_dig_port->dp; intel_dp_mst_encoder_cleanup(intel_dig_port); @@ -5868,12 +5868,12 @@ static void intel_dp_encoder_destroy(struct drm_encoder *encoder) intel_dp_encoder_flush_work(encoder); drm_encoder_cleanup(encoder); - kfree(enc_to_dig_port(encoder)); + kfree(enc_to_dig_port(to_intel_encoder(encoder))); } void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder) { - struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); + struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder); intel_wakeref_t wakeref; if (!intel_dp_is_edp(intel_dp)) @@ -5904,7 +5904,7 @@ static int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port, u8 *an) { - struct intel_dp *intel_dp = enc_to_intel_dp(&intel_dig_port->base.base); + struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(&intel_dig_port->base.base)); static const struct drm_dp_aux_msg msg = { .request = DP_AUX_NATIVE_WRITE, .address = DP_AUX_HDCP_AKSV, @@ -6514,7 +6514,7 @@ static enum pipe vlv_active_pipe(struct intel_dp *intel_dp) void intel_dp_encoder_reset(struct drm_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->dev); - struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(encoder)); struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp); intel_wakeref_t wakeref; @@ -6693,7 +6693,7 @@ intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq) intel_pps_get_registers(intel_dp, ®s); - pp_ctl = ironlake_get_pp_control(intel_dp); + pp_ctl = ilk_get_pp_control(intel_dp); /* Ensure PPS is unlocked */ if (!HAS_DDI(dev_priv)) @@ -6863,7 +6863,7 @@ intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp, * soon as the new power sequencer gets initialized. */ if (force_disable_vdd) { - u32 pp = ironlake_get_pp_control(intel_dp); + u32 pp = ilk_get_pp_control(intel_dp); WARN(pp & PANEL_POWER_ON, "Panel power already on\n"); @@ -7660,7 +7660,7 @@ void intel_dp_mst_suspend(struct drm_i915_private *dev_priv) if (encoder->type != INTEL_OUTPUT_DDI) continue; - intel_dp = enc_to_intel_dp(&encoder->base); + intel_dp = enc_to_intel_dp(encoder); if (!intel_dp->can_mst) continue; @@ -7681,7 +7681,7 @@ void intel_dp_mst_resume(struct drm_i915_private *dev_priv) if (encoder->type != INTEL_OUTPUT_DDI) continue; - intel_dp = enc_to_intel_dp(&encoder->base); + intel_dp = enc_to_intel_dp(encoder); if (!intel_dp->can_mst) continue; diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c index 020422da2ae2..7c653f8c307f 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c +++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c @@ -57,7 +57,7 @@ static void set_aux_backlight_enable(struct intel_dp *intel_dp, bool enable) */ static u32 intel_dp_aux_get_backlight(struct intel_connector *connector) { - struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base); + struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder); u8 read_val[2] = { 0x0 }; u16 level = 0; @@ -82,7 +82,7 @@ static void intel_dp_aux_set_backlight(const struct drm_connector_state *conn_state, u32 level) { struct intel_connector *connector = to_intel_connector(conn_state->connector); - struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base); + struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder); u8 vals[2] = { 0x0 }; vals[0] = level; @@ -110,7 +110,7 @@ intel_dp_aux_set_backlight(const struct drm_connector_state *conn_state, u32 lev static bool intel_dp_aux_set_pwm_freq(struct intel_connector *connector) { struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base); + struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder); int freq, fxp, fxp_min, fxp_max, fxp_actual, f = 1; u8 pn, pn_min, pn_max; @@ -178,7 +178,7 @@ static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_st const struct drm_connector_state *conn_state) { struct intel_connector *connector = to_intel_connector(conn_state->connector); - struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base); + struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder); u8 dpcd_buf, new_dpcd_buf, edp_backlight_mode; if (drm_dp_dpcd_readb(&intel_dp->aux, @@ -222,13 +222,14 @@ static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_st static void intel_dp_aux_disable_backlight(const struct drm_connector_state *old_conn_state) { - set_aux_backlight_enable(enc_to_intel_dp(old_conn_state->best_encoder), false); + set_aux_backlight_enable(enc_to_intel_dp(to_intel_encoder(old_conn_state->best_encoder)), + false); } static int intel_dp_aux_setup_backlight(struct intel_connector *connector, enum pipe pipe) { - struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base); + struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder); struct intel_panel *panel = &connector->panel; if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT) @@ -247,7 +248,7 @@ static int intel_dp_aux_setup_backlight(struct intel_connector *connector, static bool intel_dp_aux_display_control_capable(struct intel_connector *connector) { - struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base); + struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder); /* Check the eDP Display control capabilities registers to determine if * the panel can support backlight control over the aux channel diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c index 53bc14d0e953..cba68c5a80fa 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c @@ -43,7 +43,7 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder, struct link_config_limits *limits) { struct drm_atomic_state *state = crtc_state->uapi.state; - struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base); + struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder); struct intel_dp *intel_dp = &intel_mst->primary->dp; struct intel_connector *connector = to_intel_connector(conn_state->connector); @@ -88,12 +88,58 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder, return 0; } +/* + * Iterate over all connectors and return the smallest transcoder in the MST + * stream + */ +static enum transcoder +intel_dp_mst_master_trans_compute(struct intel_atomic_state *state, + struct intel_dp *mst_port) +{ + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct intel_digital_connector_state *conn_state; + struct intel_connector *connector; + enum pipe ret = I915_MAX_PIPES; + int i; + + if (INTEL_GEN(dev_priv) < 12) + return INVALID_TRANSCODER; + + for_each_new_intel_connector_in_state(state, connector, conn_state, i) { + struct intel_crtc_state *crtc_state; + struct intel_crtc *crtc; + + if (connector->mst_port != mst_port || !conn_state->base.crtc) + continue; + + crtc = to_intel_crtc(conn_state->base.crtc); + crtc_state = intel_atomic_get_new_crtc_state(state, crtc); + if (!crtc_state->uapi.active) + continue; + + /* + * Using crtc->pipe because crtc_state->cpu_transcoder is + * computed, so others CRTCs could have non-computed + * cpu_transcoder + */ + if (crtc->pipe < ret) + ret = crtc->pipe; + } + + if (ret == I915_MAX_PIPES) + return INVALID_TRANSCODER; + + /* Simple cast works because TGL don't have a eDP transcoder */ + return (enum transcoder)ret; +} + static int intel_dp_mst_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, struct drm_connector_state *conn_state) { + struct intel_atomic_state *state = to_intel_atomic_state(conn_state->state); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base); + struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder); struct intel_dp *intel_dp = &intel_mst->primary->dp; struct intel_connector *connector = to_intel_connector(conn_state->connector); @@ -155,24 +201,91 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder, intel_ddi_compute_min_voltage_level(dev_priv, pipe_config); + pipe_config->mst_master_transcoder = intel_dp_mst_master_trans_compute(state, intel_dp); + + return 0; +} + +/* + * If one of the connectors in a MST stream needs a modeset, mark all CRTCs + * that shares the same MST stream as mode changed, + * intel_modeset_pipe_config()+intel_crtc_check_fastset() will take care to do + * a fastset when possible. + */ +static int +intel_dp_mst_atomic_master_trans_check(struct intel_connector *connector, + struct intel_atomic_state *state) +{ + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct drm_connector_list_iter connector_list_iter; + struct intel_connector *connector_iter; + + if (INTEL_GEN(dev_priv) < 12) + return 0; + + if (!intel_connector_needs_modeset(state, &connector->base)) + return 0; + + drm_connector_list_iter_begin(&dev_priv->drm, &connector_list_iter); + for_each_intel_connector_iter(connector_iter, &connector_list_iter) { + struct intel_digital_connector_state *conn_iter_state; + struct intel_crtc_state *crtc_state; + struct intel_crtc *crtc; + int ret; + + if (connector_iter->mst_port != connector->mst_port || + connector_iter == connector) + continue; + + conn_iter_state = intel_atomic_get_digital_connector_state(state, + connector_iter); + if (IS_ERR(conn_iter_state)) { + drm_connector_list_iter_end(&connector_list_iter); + return PTR_ERR(conn_iter_state); + } + + if (!conn_iter_state->base.crtc) + continue; + + crtc = to_intel_crtc(conn_iter_state->base.crtc); + crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); + if (IS_ERR(crtc_state)) { + drm_connector_list_iter_end(&connector_list_iter); + return PTR_ERR(crtc_state); + } + + ret = drm_atomic_add_affected_planes(&state->base, &crtc->base); + if (ret) { + drm_connector_list_iter_end(&connector_list_iter); + return ret; + } + crtc_state->uapi.mode_changed = true; + } + drm_connector_list_iter_end(&connector_list_iter); + return 0; } static int intel_dp_mst_atomic_check(struct drm_connector *connector, - struct drm_atomic_state *state) + struct drm_atomic_state *_state) { + struct intel_atomic_state *state = to_intel_atomic_state(_state); struct drm_connector_state *new_conn_state = - drm_atomic_get_new_connector_state(state, connector); + drm_atomic_get_new_connector_state(&state->base, connector); struct drm_connector_state *old_conn_state = - drm_atomic_get_old_connector_state(state, connector); + drm_atomic_get_old_connector_state(&state->base, connector); struct intel_connector *intel_connector = to_intel_connector(connector); struct drm_crtc *new_crtc = new_conn_state->crtc; struct drm_dp_mst_topology_mgr *mgr; int ret; - ret = intel_digital_connector_atomic_check(connector, state); + ret = intel_digital_connector_atomic_check(connector, &state->base); + if (ret) + return ret; + + ret = intel_dp_mst_atomic_master_trans_check(intel_connector, state); if (ret) return ret; @@ -183,12 +296,9 @@ intel_dp_mst_atomic_check(struct drm_connector *connector, * connector */ if (new_crtc) { - struct intel_atomic_state *intel_state = - to_intel_atomic_state(state); struct intel_crtc *intel_crtc = to_intel_crtc(new_crtc); struct intel_crtc_state *crtc_state = - intel_atomic_get_new_crtc_state(intel_state, - intel_crtc); + intel_atomic_get_new_crtc_state(state, intel_crtc); if (!crtc_state || !drm_atomic_crtc_needs_modeset(&crtc_state->uapi) || @@ -196,8 +306,8 @@ intel_dp_mst_atomic_check(struct drm_connector *connector, return 0; } - mgr = &enc_to_mst(old_conn_state->best_encoder)->primary->dp.mst_mgr; - ret = drm_dp_atomic_release_vcpi_slots(state, mgr, + mgr = &enc_to_mst(to_intel_encoder(old_conn_state->best_encoder))->primary->dp.mst_mgr; + ret = drm_dp_atomic_release_vcpi_slots(&state->base, mgr, intel_connector->port); return ret; @@ -207,7 +317,7 @@ static void intel_mst_disable_dp(struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { - struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base); + struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder); struct intel_digital_port *intel_dig_port = intel_mst->primary; struct intel_dp *intel_dp = &intel_dig_port->dp; struct intel_connector *connector = @@ -231,29 +341,51 @@ static void intel_mst_post_disable_dp(struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { - struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base); + struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder); struct intel_digital_port *intel_dig_port = intel_mst->primary; struct intel_dp *intel_dp = &intel_dig_port->dp; struct intel_connector *connector = to_intel_connector(old_conn_state->connector); struct drm_i915_private *dev_priv = to_i915(connector->base.dev); bool last_mst_stream; + u32 val; intel_dp->active_mst_links--; last_mst_stream = intel_dp->active_mst_links == 0; + WARN_ON(INTEL_GEN(dev_priv) >= 12 && last_mst_stream && + !intel_dp_mst_is_master_trans(old_crtc_state)); intel_crtc_vblank_off(old_crtc_state); intel_disable_pipe(old_crtc_state); + drm_dp_update_payload_part2(&intel_dp->mst_mgr); + + val = I915_READ(TRANS_DDI_FUNC_CTL(old_crtc_state->cpu_transcoder)); + val &= ~TRANS_DDI_DP_VC_PAYLOAD_ALLOC; + I915_WRITE(TRANS_DDI_FUNC_CTL(old_crtc_state->cpu_transcoder), val); + + if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status, + DP_TP_STATUS_ACT_SENT, 1)) + DRM_ERROR("Timed out waiting for ACT sent when disabling\n"); + drm_dp_check_act_status(&intel_dp->mst_mgr); + + drm_dp_mst_deallocate_vcpi(&intel_dp->mst_mgr, connector->port); + intel_ddi_disable_transcoder_func(old_crtc_state); if (INTEL_GEN(dev_priv) >= 9) - skylake_scaler_disable(old_crtc_state); + skl_scaler_disable(old_crtc_state); else - ironlake_pfit_disable(old_crtc_state); + ilk_pfit_disable(old_crtc_state); /* + * Power down mst path before disabling the port, otherwise we end + * up getting interrupts from the sink upon detecting link loss. + */ + drm_dp_send_power_updown_phy(&intel_dp->mst_mgr, connector->port, + false); + /* * From TGL spec: "If multi-stream slave transcoder: Configure * Transcoder Clock Select to direct no clock to the transcoder" * @@ -263,19 +395,6 @@ static void intel_mst_post_disable_dp(struct intel_encoder *encoder, if (INTEL_GEN(dev_priv) < 12 || !last_mst_stream) intel_ddi_disable_pipe_clock(old_crtc_state); - /* this can fail */ - drm_dp_check_act_status(&intel_dp->mst_mgr); - /* and this can also fail */ - drm_dp_update_payload_part2(&intel_dp->mst_mgr); - - drm_dp_mst_deallocate_vcpi(&intel_dp->mst_mgr, connector->port); - - /* - * Power down mst path before disabling the port, otherwise we end - * up getting interrupts from the sink upon detecting link loss. - */ - drm_dp_send_power_updown_phy(&intel_dp->mst_mgr, connector->port, - false); intel_mst->connector = NULL; if (last_mst_stream) @@ -289,7 +408,7 @@ static void intel_mst_pre_pll_enable_dp(struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { - struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base); + struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder); struct intel_digital_port *intel_dig_port = intel_mst->primary; struct intel_dp *intel_dp = &intel_dig_port->dp; @@ -302,7 +421,7 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { - struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base); + struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder); struct intel_digital_port *intel_dig_port = intel_mst->primary; struct intel_dp *intel_dp = &intel_dig_port->dp; struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); @@ -318,6 +437,8 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder, connector->encoder = encoder; intel_mst->connector = connector; first_mst_stream = intel_dp->active_mst_links == 0; + WARN_ON(INTEL_GEN(dev_priv) >= 12 && first_mst_stream && + !intel_dp_mst_is_master_trans(pipe_config)); DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links); @@ -360,7 +481,7 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { - struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base); + struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder); struct intel_digital_port *intel_dig_port = intel_mst->primary; struct intel_dp *intel_dp = &intel_dig_port->dp; struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); @@ -381,7 +502,7 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder, static bool intel_dp_mst_enc_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe) { - struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base); + struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder); *pipe = intel_mst->pipe; if (intel_mst->connector) return true; @@ -391,7 +512,7 @@ static bool intel_dp_mst_enc_get_hw_state(struct intel_encoder *encoder, static void intel_dp_mst_enc_get_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { - struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base); + struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder); struct intel_digital_port *intel_dig_port = intel_mst->primary; intel_ddi_get_config(&intel_dig_port->base, pipe_config); @@ -499,7 +620,7 @@ static const struct drm_connector_helper_funcs intel_dp_mst_connector_helper_fun static void intel_dp_mst_encoder_destroy(struct drm_encoder *encoder) { - struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder); + struct intel_dp_mst_encoder *intel_mst = enc_to_mst(to_intel_encoder(encoder)); drm_encoder_cleanup(encoder); kfree(intel_mst); @@ -723,3 +844,14 @@ intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port) drm_dp_mst_topology_mgr_destroy(&intel_dp->mst_mgr); /* encoders will get killed by normal cleanup */ } + +bool intel_dp_mst_is_master_trans(const struct intel_crtc_state *crtc_state) +{ + return crtc_state->mst_master_transcoder == crtc_state->cpu_transcoder; +} + +bool intel_dp_mst_is_slave_trans(const struct intel_crtc_state *crtc_state) +{ + return crtc_state->mst_master_transcoder != INVALID_TRANSCODER && + crtc_state->mst_master_transcoder != crtc_state->cpu_transcoder; +} diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.h b/drivers/gpu/drm/i915/display/intel_dp_mst.h index f660ad80db04..854724f68f09 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.h +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.h @@ -6,10 +6,15 @@ #ifndef __INTEL_DP_MST_H__ #define __INTEL_DP_MST_H__ +#include <linux/types.h> + struct intel_digital_port; +struct intel_crtc_state; int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id); void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port); int intel_dp_mst_encoder_active_links(struct intel_digital_port *intel_dig_port); +bool intel_dp_mst_is_master_trans(const struct intel_crtc_state *crtc_state); +bool intel_dp_mst_is_slave_trans(const struct intel_crtc_state *crtc_state); #endif /* __INTEL_DP_MST_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_dpio_phy.c b/drivers/gpu/drm/i915/display/intel_dpio_phy.c index 704f38681c4b..6fb1f7a7364e 100644 --- a/drivers/gpu/drm/i915/display/intel_dpio_phy.c +++ b/drivers/gpu/drm/i915/display/intel_dpio_phy.c @@ -642,7 +642,7 @@ void chv_set_phy_signal_level(struct intel_encoder *encoder, bool uniq_trans_scale) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); + struct intel_digital_port *dport = enc_to_dig_port(encoder); struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); enum dpio_channel ch = vlv_dport_to_channel(dport); enum pipe pipe = intel_crtc->pipe; @@ -738,7 +738,7 @@ void chv_data_lane_soft_reset(struct intel_encoder *encoder, bool reset) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base)); + enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(encoder)); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); enum pipe pipe = crtc->pipe; u32 val; @@ -781,7 +781,7 @@ void chv_data_lane_soft_reset(struct intel_encoder *encoder, void chv_phy_pre_pll_enable(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { - struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); + struct intel_digital_port *dport = enc_to_dig_port(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); enum dpio_channel ch = vlv_dport_to_channel(dport); @@ -861,7 +861,7 @@ void chv_phy_pre_pll_enable(struct intel_encoder *encoder, void chv_phy_pre_encoder_enable(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { - struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct intel_digital_port *dport = dp_to_dig_port(intel_dp); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); @@ -940,7 +940,7 @@ void chv_phy_pre_encoder_enable(struct intel_encoder *encoder, void chv_phy_release_cl2_override(struct intel_encoder *encoder) { - struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); + struct intel_digital_port *dport = enc_to_dig_port(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); if (dport->release_cl2_override) { @@ -989,7 +989,7 @@ void vlv_set_phy_signal_level(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); - struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); + struct intel_digital_port *dport = enc_to_dig_port(encoder); enum dpio_channel port = vlv_dport_to_channel(dport); enum pipe pipe = intel_crtc->pipe; @@ -1014,7 +1014,7 @@ void vlv_set_phy_signal_level(struct intel_encoder *encoder, void vlv_phy_pre_pll_enable(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { - struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); + struct intel_digital_port *dport = enc_to_dig_port(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); enum dpio_channel port = vlv_dport_to_channel(dport); @@ -1043,7 +1043,7 @@ void vlv_phy_pre_pll_enable(struct intel_encoder *encoder, void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { - struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct intel_digital_port *dport = dp_to_dig_port(intel_dp); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); @@ -1073,7 +1073,7 @@ void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder, void vlv_phy_reset_lanes(struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state) { - struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); + struct intel_digital_port *dport = enc_to_dig_port(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); enum dpio_channel port = vlv_dport_to_channel(dport); diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c index 728a4b045de7..c75e34d87111 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c @@ -2972,8 +2972,8 @@ static void icl_update_active_dpll(struct intel_atomic_state *state, enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT; primary_port = encoder->type == INTEL_OUTPUT_DP_MST ? - enc_to_mst(&encoder->base)->primary : - enc_to_dig_port(&encoder->base); + enc_to_mst(encoder)->primary : + enc_to_dig_port(encoder); if (primary_port && (primary_port->tc_mode == TC_PORT_DP_ALT || diff --git a/drivers/gpu/drm/i915/display/intel_dsi.h b/drivers/gpu/drm/i915/display/intel_dsi.h index b15be5814599..19f78a4022d3 100644 --- a/drivers/gpu/drm/i915/display/intel_dsi.h +++ b/drivers/gpu/drm/i915/display/intel_dsi.h @@ -45,8 +45,9 @@ struct intel_dsi { struct intel_dsi_host *dsi_hosts[I915_MAX_PORTS]; intel_wakeref_t io_wakeref[I915_MAX_PORTS]; - /* GPIO Desc for CRC based Panel control */ + /* GPIO Desc for panel and backlight control */ struct gpio_desc *gpio_panel; + struct gpio_desc *gpio_backlight; struct intel_connector *attached_connector; @@ -68,6 +69,9 @@ struct intel_dsi { /* number of DSI lanes */ unsigned int lane_count; + /* i2c bus associated with the slave device */ + int i2c_bus_num; + /* * video mode pixel format * @@ -141,9 +145,9 @@ static inline struct intel_dsi_host *to_intel_dsi_host(struct mipi_dsi_host *h) #define for_each_dsi_phy(__phy, __phys_mask) \ for_each_phy_masked(__phy, __phys_mask) -static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder) +static inline struct intel_dsi *enc_to_intel_dsi(struct intel_encoder *encoder) { - return container_of(encoder, struct intel_dsi, base.base); + return container_of(&encoder->base, struct intel_dsi, base.base); } static inline bool is_vid_mode(struct intel_dsi *intel_dsi) @@ -158,7 +162,7 @@ static inline bool is_cmd_mode(struct intel_dsi *intel_dsi) static inline u16 intel_dsi_encoder_ports(struct intel_encoder *encoder) { - return enc_to_intel_dsi(&encoder->base)->ports; + return enc_to_intel_dsi(encoder)->ports; } /* icl_dsi.c */ @@ -203,6 +207,8 @@ void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port); /* intel_dsi_vbt.c */ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id); +void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on); +void intel_dsi_vbt_gpio_cleanup(struct intel_dsi *intel_dsi); void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi, enum mipi_seq seq_id); void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec); diff --git a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c index bb3fd8b786a2..c87838843d0b 100644 --- a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c +++ b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c @@ -46,7 +46,7 @@ static u32 dcs_get_backlight(struct intel_connector *connector) { struct intel_encoder *encoder = connector->encoder; - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); struct mipi_dsi_device *dsi_device; u8 data = 0; enum port port; @@ -64,7 +64,7 @@ static u32 dcs_get_backlight(struct intel_connector *connector) static void dcs_set_backlight(const struct drm_connector_state *conn_state, u32 level) { - struct intel_dsi *intel_dsi = enc_to_intel_dsi(conn_state->best_encoder); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(conn_state->best_encoder)); struct mipi_dsi_device *dsi_device; u8 data = level; enum port port; @@ -79,7 +79,7 @@ static void dcs_set_backlight(const struct drm_connector_state *conn_state, u32 static void dcs_disable_backlight(const struct drm_connector_state *conn_state) { - struct intel_dsi *intel_dsi = enc_to_intel_dsi(conn_state->best_encoder); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(conn_state->best_encoder)); struct mipi_dsi_device *dsi_device; enum port port; @@ -113,7 +113,7 @@ static void dcs_disable_backlight(const struct drm_connector_state *conn_state) static void dcs_enable_backlight(const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { - struct intel_dsi *intel_dsi = enc_to_intel_dsi(conn_state->best_encoder); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(conn_state->best_encoder)); struct intel_panel *panel = &to_intel_connector(conn_state->connector)->panel; struct mipi_dsi_device *dsi_device; enum port port; diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c index f90946c912ee..89fb0d90b694 100644 --- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c +++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c @@ -25,7 +25,10 @@ */ #include <linux/gpio/consumer.h> +#include <linux/gpio/machine.h> #include <linux/mfd/intel_soc_pmic.h> +#include <linux/pinctrl/consumer.h> +#include <linux/pinctrl/machine.h> #include <linux/slab.h> #include <asm/intel-mid.h> @@ -83,6 +86,12 @@ static struct gpio_map vlv_gpio_table[] = { { VLV_GPIO_NC_11_PANEL1_BKLTCTL }, }; +struct i2c_adapter_lookup { + u16 slave_addr; + struct intel_dsi *intel_dsi; + acpi_handle dev_handle; +}; + #define CHV_GPIO_IDX_START_N 0 #define CHV_GPIO_IDX_START_E 73 #define CHV_GPIO_IDX_START_SW 100 @@ -375,11 +384,98 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data) return data; } +static int i2c_adapter_lookup(struct acpi_resource *ares, void *data) +{ + struct i2c_adapter_lookup *lookup = data; + struct intel_dsi *intel_dsi = lookup->intel_dsi; + struct acpi_resource_i2c_serialbus *sb; + struct i2c_adapter *adapter; + acpi_handle adapter_handle; + acpi_status status; + + if (intel_dsi->i2c_bus_num >= 0 || + !i2c_acpi_get_i2c_resource(ares, &sb)) + return 1; + + if (lookup->slave_addr != sb->slave_address) + return 1; + + status = acpi_get_handle(lookup->dev_handle, + sb->resource_source.string_ptr, + &adapter_handle); + if (ACPI_FAILURE(status)) + return 1; + + adapter = i2c_acpi_find_adapter_by_handle(adapter_handle); + if (adapter) + intel_dsi->i2c_bus_num = adapter->nr; + + return 1; +} + static const u8 *mipi_exec_i2c(struct intel_dsi *intel_dsi, const u8 *data) { - DRM_DEBUG_KMS("Skipping I2C element execution\n"); + struct drm_device *drm_dev = intel_dsi->base.base.dev; + struct device *dev = &drm_dev->pdev->dev; + struct i2c_adapter *adapter; + struct acpi_device *acpi_dev; + struct list_head resource_list; + struct i2c_adapter_lookup lookup; + struct i2c_msg msg; + int ret; + u8 vbt_i2c_bus_num = *(data + 2); + u16 slave_addr = *(u16 *)(data + 3); + u8 reg_offset = *(data + 5); + u8 payload_size = *(data + 6); + u8 *payload_data; + + if (intel_dsi->i2c_bus_num < 0) { + intel_dsi->i2c_bus_num = vbt_i2c_bus_num; + + acpi_dev = ACPI_COMPANION(dev); + if (acpi_dev) { + memset(&lookup, 0, sizeof(lookup)); + lookup.slave_addr = slave_addr; + lookup.intel_dsi = intel_dsi; + lookup.dev_handle = acpi_device_handle(acpi_dev); + + INIT_LIST_HEAD(&resource_list); + acpi_dev_get_resources(acpi_dev, &resource_list, + i2c_adapter_lookup, + &lookup); + acpi_dev_free_resource_list(&resource_list); + } + } - return data + *(data + 6) + 7; + adapter = i2c_get_adapter(intel_dsi->i2c_bus_num); + if (!adapter) { + DRM_DEV_ERROR(dev, "Cannot find a valid i2c bus for xfer\n"); + goto err_bus; + } + + payload_data = kzalloc(payload_size + 1, GFP_KERNEL); + if (!payload_data) + goto err_alloc; + + payload_data[0] = reg_offset; + memcpy(&payload_data[1], (data + 7), payload_size); + + msg.addr = slave_addr; + msg.flags = 0; + msg.len = payload_size + 1; + msg.buf = payload_data; + + ret = i2c_transfer(adapter, &msg, 1); + if (ret < 0) + DRM_DEV_ERROR(dev, + "Failed to xfer payload of size (%u) to reg (%u)\n", + payload_size, reg_offset); + + kfree(payload_data); +err_alloc: + i2c_put_adapter(adapter); +err_bus: + return data + payload_size + 7; } static const u8 *mipi_exec_spi(struct intel_dsi *intel_dsi, const u8 *data) @@ -453,8 +549,8 @@ static const char *sequence_name(enum mipi_seq seq_id) return "(unknown)"; } -void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi, - enum mipi_seq seq_id) +static void intel_dsi_vbt_exec(struct intel_dsi *intel_dsi, + enum mipi_seq seq_id) { struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev); const u8 *data; @@ -519,6 +615,22 @@ void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi, } } +void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi, + enum mipi_seq seq_id) +{ + if (seq_id == MIPI_SEQ_POWER_ON && intel_dsi->gpio_panel) + gpiod_set_value_cansleep(intel_dsi->gpio_panel, 1); + if (seq_id == MIPI_SEQ_BACKLIGHT_ON && intel_dsi->gpio_backlight) + gpiod_set_value_cansleep(intel_dsi->gpio_backlight, 1); + + intel_dsi_vbt_exec(intel_dsi, seq_id); + + if (seq_id == MIPI_SEQ_POWER_OFF && intel_dsi->gpio_panel) + gpiod_set_value_cansleep(intel_dsi->gpio_panel, 0); + if (seq_id == MIPI_SEQ_BACKLIGHT_OFF && intel_dsi->gpio_backlight) + gpiod_set_value_cansleep(intel_dsi->gpio_backlight, 0); +} + void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec) { struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev); @@ -664,6 +776,8 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id) intel_dsi->panel_off_delay = pps->panel_off_delay / 10; intel_dsi->panel_pwr_cycle_delay = pps->panel_power_cycle_delay / 10; + intel_dsi->i2c_bus_num = -1; + /* a regular driver would get the device in probe */ for_each_dsi_port(port, intel_dsi->ports) { mipi_dsi_attach(intel_dsi->dsi_hosts[port]->device); @@ -671,3 +785,110 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id) return true; } + +/* + * On some BYT/CHT devs some sequences are incomplete and we need to manually + * control some GPIOs. We need to add a GPIO lookup table before we get these. + * If the GOP did not initialize the panel (HDMI inserted) we may need to also + * change the pinmux for the SoC's PWM0 pin from GPIO to PWM. + */ +static struct gpiod_lookup_table pmic_panel_gpio_table = { + /* Intel GFX is consumer */ + .dev_id = "0000:00:02.0", + .table = { + /* Panel EN/DISABLE */ + GPIO_LOOKUP("gpio_crystalcove", 94, "panel", GPIO_ACTIVE_HIGH), + { } + }, +}; + +static struct gpiod_lookup_table soc_panel_gpio_table = { + .dev_id = "0000:00:02.0", + .table = { + GPIO_LOOKUP("INT33FC:01", 10, "backlight", GPIO_ACTIVE_HIGH), + GPIO_LOOKUP("INT33FC:01", 11, "panel", GPIO_ACTIVE_HIGH), + { } + }, +}; + +static const struct pinctrl_map soc_pwm_pinctrl_map[] = { + PIN_MAP_MUX_GROUP("0000:00:02.0", "soc_pwm0", "INT33FC:00", + "pwm0_grp", "pwm"), +}; + +void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on) +{ + struct drm_device *dev = intel_dsi->base.base.dev; + struct drm_i915_private *dev_priv = to_i915(dev); + struct mipi_config *mipi_config = dev_priv->vbt.dsi.config; + enum gpiod_flags flags = panel_is_on ? GPIOD_OUT_HIGH : GPIOD_OUT_LOW; + bool want_backlight_gpio = false; + bool want_panel_gpio = false; + struct pinctrl *pinctrl; + int ret; + + if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && + mipi_config->pwm_blc == PPS_BLC_PMIC) { + gpiod_add_lookup_table(&pmic_panel_gpio_table); + want_panel_gpio = true; + } + + if (IS_VALLEYVIEW(dev_priv) && mipi_config->pwm_blc == PPS_BLC_SOC) { + gpiod_add_lookup_table(&soc_panel_gpio_table); + want_panel_gpio = true; + want_backlight_gpio = true; + + /* Ensure PWM0 pin is muxed as PWM instead of GPIO */ + ret = pinctrl_register_mappings(soc_pwm_pinctrl_map, + ARRAY_SIZE(soc_pwm_pinctrl_map)); + if (ret) + DRM_ERROR("Failed to register pwm0 pinmux mapping\n"); + + pinctrl = devm_pinctrl_get_select(dev->dev, "soc_pwm0"); + if (IS_ERR(pinctrl)) + DRM_ERROR("Failed to set pinmux to PWM\n"); + } + + if (want_panel_gpio) { + intel_dsi->gpio_panel = gpiod_get(dev->dev, "panel", flags); + if (IS_ERR(intel_dsi->gpio_panel)) { + DRM_ERROR("Failed to own gpio for panel control\n"); + intel_dsi->gpio_panel = NULL; + } + } + + if (want_backlight_gpio) { + intel_dsi->gpio_backlight = + gpiod_get(dev->dev, "backlight", flags); + if (IS_ERR(intel_dsi->gpio_backlight)) { + DRM_ERROR("Failed to own gpio for backlight control\n"); + intel_dsi->gpio_backlight = NULL; + } + } +} + +void intel_dsi_vbt_gpio_cleanup(struct intel_dsi *intel_dsi) +{ + struct drm_device *dev = intel_dsi->base.base.dev; + struct drm_i915_private *dev_priv = to_i915(dev); + struct mipi_config *mipi_config = dev_priv->vbt.dsi.config; + + if (intel_dsi->gpio_panel) { + gpiod_put(intel_dsi->gpio_panel); + intel_dsi->gpio_panel = NULL; + } + + if (intel_dsi->gpio_backlight) { + gpiod_put(intel_dsi->gpio_backlight); + intel_dsi->gpio_backlight = NULL; + } + + if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && + mipi_config->pwm_blc == PPS_BLC_PMIC) + gpiod_remove_lookup_table(&pmic_panel_gpio_table); + + if (IS_VALLEYVIEW(dev_priv) && mipi_config->pwm_blc == PPS_BLC_SOC) { + pinctrl_unregister_mappings(soc_pwm_pinctrl_map); + gpiod_remove_lookup_table(&soc_panel_gpio_table); + } +} diff --git a/drivers/gpu/drm/i915/display/intel_dvo.c b/drivers/gpu/drm/i915/display/intel_dvo.c index a74dc5b915d1..86a337c9d85d 100644 --- a/drivers/gpu/drm/i915/display/intel_dvo.c +++ b/drivers/gpu/drm/i915/display/intel_dvo.c @@ -125,7 +125,7 @@ static struct intel_dvo *enc_to_dvo(struct intel_encoder *encoder) return container_of(encoder, struct intel_dvo, base); } -static struct intel_dvo *intel_attached_dvo(struct drm_connector *connector) +static struct intel_dvo *intel_attached_dvo(struct intel_connector *connector) { return enc_to_dvo(intel_attached_encoder(connector)); } @@ -134,7 +134,7 @@ static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector) { struct drm_device *dev = connector->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_dvo *intel_dvo = intel_attached_dvo(&connector->base); + struct intel_dvo *intel_dvo = intel_attached_dvo(connector); u32 tmp; tmp = I915_READ(intel_dvo->dev.dvo_reg); @@ -220,7 +220,7 @@ static enum drm_mode_status intel_dvo_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { - struct intel_dvo *intel_dvo = intel_attached_dvo(connector); + struct intel_dvo *intel_dvo = intel_attached_dvo(to_intel_connector(connector)); const struct drm_display_mode *fixed_mode = to_intel_connector(connector)->panel.fixed_mode; int max_dotclk = to_i915(connector->dev)->max_dotclk_freq; @@ -311,7 +311,7 @@ static void intel_dvo_pre_enable(struct intel_encoder *encoder, static enum drm_connector_status intel_dvo_detect(struct drm_connector *connector, bool force) { - struct intel_dvo *intel_dvo = intel_attached_dvo(connector); + struct intel_dvo *intel_dvo = intel_attached_dvo(to_intel_connector(connector)); DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, connector->name); return intel_dvo->dev.dev_ops->detect(&intel_dvo->dev); diff --git a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c index ab61f88d1d33..6c83b350525d 100644 --- a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c +++ b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c @@ -126,8 +126,8 @@ static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev, } } -static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev, - enum pipe pipe, bool enable) +static void ilk_set_fifo_underrun_reporting(struct drm_device *dev, + enum pipe pipe, bool enable) { struct drm_i915_private *dev_priv = to_i915(dev); u32 bit = (pipe == PIPE_A) ? @@ -139,7 +139,7 @@ static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev, ilk_disable_display_irq(dev_priv, bit); } -static void ivybridge_check_fifo_underruns(struct intel_crtc *crtc) +static void ivb_check_fifo_underruns(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; @@ -157,9 +157,9 @@ static void ivybridge_check_fifo_underruns(struct intel_crtc *crtc) DRM_ERROR("fifo underrun on pipe %c\n", pipe_name(pipe)); } -static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev, - enum pipe pipe, - bool enable, bool old) +static void ivb_set_fifo_underrun_reporting(struct drm_device *dev, + enum pipe pipe, bool enable, + bool old) { struct drm_i915_private *dev_priv = to_i915(dev); if (enable) { @@ -180,8 +180,8 @@ static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev, } } -static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev, - enum pipe pipe, bool enable) +static void bdw_set_fifo_underrun_reporting(struct drm_device *dev, + enum pipe pipe, bool enable) { struct drm_i915_private *dev_priv = to_i915(dev); @@ -264,11 +264,11 @@ static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev, if (HAS_GMCH(dev_priv)) i9xx_set_fifo_underrun_reporting(dev, pipe, enable, old); else if (IS_GEN_RANGE(dev_priv, 5, 6)) - ironlake_set_fifo_underrun_reporting(dev, pipe, enable); + ilk_set_fifo_underrun_reporting(dev, pipe, enable); else if (IS_GEN(dev_priv, 7)) - ivybridge_set_fifo_underrun_reporting(dev, pipe, enable, old); + ivb_set_fifo_underrun_reporting(dev, pipe, enable, old); else if (INTEL_GEN(dev_priv) >= 8) - broadwell_set_fifo_underrun_reporting(dev, pipe, enable); + bdw_set_fifo_underrun_reporting(dev, pipe, enable); return old; } @@ -427,7 +427,7 @@ void intel_check_cpu_fifo_underruns(struct drm_i915_private *dev_priv) if (HAS_GMCH(dev_priv)) i9xx_check_fifo_underruns(crtc); else if (IS_GEN(dev_priv, 7)) - ivybridge_check_fifo_underruns(crtc); + ivb_check_fifo_underruns(crtc); } spin_unlock_irq(&dev_priv->irq_lock); diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c index 685589064d10..93ac0f296852 100644 --- a/drivers/gpu/drm/i915/display/intel_hdmi.c +++ b/drivers/gpu/drm/i915/display/intel_hdmi.c @@ -85,16 +85,17 @@ assert_hdmi_transcoder_func_disabled(struct drm_i915_private *dev_priv, "HDMI transcoder function enabled, expecting disabled\n"); } -struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder) +struct intel_hdmi *enc_to_intel_hdmi(struct intel_encoder *encoder) { struct intel_digital_port *intel_dig_port = - container_of(encoder, struct intel_digital_port, base.base); + container_of(&encoder->base, struct intel_digital_port, + base.base); return &intel_dig_port->hdmi; } -static struct intel_hdmi *intel_attached_hdmi(struct drm_connector *connector) +static struct intel_hdmi *intel_attached_hdmi(struct intel_connector *connector) { - return enc_to_intel_hdmi(&intel_attached_encoder(connector)->base); + return enc_to_intel_hdmi(intel_attached_encoder(connector)); } static u32 g4x_infoframe_index(unsigned int type) @@ -602,7 +603,7 @@ u32 intel_hdmi_infoframes_enabled(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); + struct intel_digital_port *dig_port = enc_to_dig_port(encoder); u32 val, ret = 0; int i; @@ -646,7 +647,7 @@ static void intel_write_infoframe(struct intel_encoder *encoder, enum hdmi_infoframe_type type, const union hdmi_infoframe *frame) { - struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base); + struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); u8 buffer[VIDEO_DIP_DATA_SIZE]; ssize_t len; @@ -675,7 +676,7 @@ void intel_read_infoframe(struct intel_encoder *encoder, enum hdmi_infoframe_type type, union hdmi_infoframe *frame) { - struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base); + struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); u8 buffer[VIDEO_DIP_DATA_SIZE]; int ret; @@ -855,7 +856,7 @@ static void g4x_set_infoframes(struct intel_encoder *encoder, const struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base); + struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi; i915_reg_t reg = VIDEO_DIP_CTL; u32 val = I915_READ(reg); @@ -1038,7 +1039,7 @@ static void ibx_set_infoframes(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base); + struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi; i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe); u32 val = I915_READ(reg); @@ -1097,7 +1098,7 @@ static void cpt_set_infoframes(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe); u32 val = I915_READ(reg); @@ -1146,7 +1147,7 @@ static void vlv_set_infoframes(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); i915_reg_t reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe); u32 val = I915_READ(reg); u32 port = VIDEO_DIP_PORT(encoder->port); @@ -1737,7 +1738,7 @@ static void intel_hdmi_prepare(struct intel_encoder *encoder, struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; u32 hdmi_val; @@ -1774,7 +1775,7 @@ static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); intel_wakeref_t wakeref; bool ret; @@ -1793,7 +1794,7 @@ static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder, static void intel_hdmi_get_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { - struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); u32 tmp, flags = 0; @@ -1874,7 +1875,7 @@ static void g4x_enable_hdmi(struct intel_encoder *encoder, { struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); u32 temp; temp = I915_READ(intel_hdmi->hdmi_reg); @@ -1896,7 +1897,7 @@ static void ibx_enable_hdmi(struct intel_encoder *encoder, { struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); u32 temp; temp = I915_READ(intel_hdmi->hdmi_reg); @@ -1947,7 +1948,7 @@ static void cpt_enable_hdmi(struct intel_encoder *encoder, struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); - struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); enum pipe pipe = crtc->pipe; u32 temp; @@ -2007,7 +2008,7 @@ static void intel_disable_hdmi(struct intel_encoder *encoder, { struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); struct intel_digital_port *intel_dig_port = hdmi_to_dig_port(intel_hdmi); struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); @@ -2160,7 +2161,7 @@ static enum drm_mode_status intel_hdmi_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { - struct intel_hdmi *hdmi = intel_attached_hdmi(connector); + struct intel_hdmi *hdmi = intel_attached_hdmi(to_intel_connector(connector)); struct drm_device *dev = intel_hdmi_to_dev(hdmi); struct drm_i915_private *dev_priv = to_i915(dev); enum drm_mode_status status; @@ -2316,7 +2317,7 @@ static int intel_hdmi_compute_bpc(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state, int clock, bool force_dvi) { - struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); int bpc; for (bpc = 12; bpc >= 10; bpc -= 2) { @@ -2334,7 +2335,7 @@ static int intel_hdmi_compute_clock(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state, bool force_dvi) { - struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; int bpc, clock = adjusted_mode->crtc_clock; @@ -2404,7 +2405,7 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, struct drm_connector_state *conn_state) { - struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; struct drm_connector *connector = conn_state->connector; @@ -2496,7 +2497,7 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder, static void intel_hdmi_unset_edid(struct drm_connector *connector) { - struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); + struct intel_hdmi *intel_hdmi = intel_attached_hdmi(to_intel_connector(connector)); intel_hdmi->has_hdmi_sink = false; intel_hdmi->has_audio = false; @@ -2512,7 +2513,7 @@ static void intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector, bool has_edid) { struct drm_i915_private *dev_priv = to_i915(connector->dev); - struct intel_hdmi *hdmi = intel_attached_hdmi(connector); + struct intel_hdmi *hdmi = intel_attached_hdmi(to_intel_connector(connector)); enum port port = hdmi_to_dig_port(hdmi)->base.port; struct i2c_adapter *adapter = intel_gmbus_get_adapter(dev_priv, hdmi->ddc_bus); @@ -2559,7 +2560,7 @@ static bool intel_hdmi_set_edid(struct drm_connector *connector) { struct drm_i915_private *dev_priv = to_i915(connector->dev); - struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); + struct intel_hdmi *intel_hdmi = intel_attached_hdmi(to_intel_connector(connector)); intel_wakeref_t wakeref; struct edid *edid; bool connected = false; @@ -2600,7 +2601,7 @@ intel_hdmi_detect(struct drm_connector *connector, bool force) { enum drm_connector_status status = connector_status_disconnected; struct drm_i915_private *dev_priv = to_i915(connector->dev); - struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); + struct intel_hdmi *intel_hdmi = intel_attached_hdmi(to_intel_connector(connector)); struct intel_encoder *encoder = &hdmi_to_dig_port(intel_hdmi)->base; intel_wakeref_t wakeref; @@ -2663,7 +2664,7 @@ static void intel_hdmi_pre_enable(struct intel_encoder *encoder, const struct drm_connector_state *conn_state) { struct intel_digital_port *intel_dig_port = - enc_to_dig_port(&encoder->base); + enc_to_dig_port(encoder); intel_hdmi_prepare(encoder, pipe_config); @@ -2676,7 +2677,7 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { - struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); + struct intel_digital_port *dport = enc_to_dig_port(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); vlv_phy_pre_encoder_enable(encoder, pipe_config); @@ -2746,7 +2747,7 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { - struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); + struct intel_digital_port *dport = enc_to_dig_port(encoder); struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); @@ -2772,7 +2773,7 @@ static struct i2c_adapter * intel_hdmi_get_i2c_adapter(struct drm_connector *connector) { struct drm_i915_private *dev_priv = to_i915(connector->dev); - struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); + struct intel_hdmi *intel_hdmi = intel_attached_hdmi(to_intel_connector(connector)); return intel_gmbus_get_adapter(dev_priv, intel_hdmi->ddc_bus); } @@ -2816,7 +2817,7 @@ intel_hdmi_connector_register(struct drm_connector *connector) static void intel_hdmi_destroy(struct drm_connector *connector) { - struct cec_notifier *n = intel_attached_hdmi(connector)->cec_notifier; + struct cec_notifier *n = intel_attached_hdmi(to_intel_connector(connector))->cec_notifier; cec_notifier_conn_unregister(n); @@ -2906,7 +2907,7 @@ bool intel_hdmi_handle_sink_scrambling(struct intel_encoder *encoder, bool scrambling) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); struct drm_scrambling *sink_scrambling = &connector->display_info.hdmi.scdc.scrambling; struct i2c_adapter *adapter = diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.h b/drivers/gpu/drm/i915/display/intel_hdmi.h index cf1ea5427639..d3659d0b408b 100644 --- a/drivers/gpu/drm/i915/display/intel_hdmi.h +++ b/drivers/gpu/drm/i915/display/intel_hdmi.h @@ -29,7 +29,7 @@ void intel_hdmi_init(struct drm_i915_private *dev_priv, i915_reg_t hdmi_reg, enum port port); void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, struct intel_connector *intel_connector); -struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder); +struct intel_hdmi *enc_to_intel_hdmi(struct intel_encoder *encoder); int intel_hdmi_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, struct drm_connector_state *conn_state); diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c index fc29046d48ea..99d3a3c7989e 100644 --- a/drivers/gpu/drm/i915/display/intel_hotplug.c +++ b/drivers/gpu/drm/i915/display/intel_hotplug.c @@ -302,7 +302,7 @@ intel_encoder_hotplug(struct intel_encoder *encoder, static bool intel_encoder_has_hpd_pulse(struct intel_encoder *encoder) { return intel_encoder_is_dig_port(encoder) && - enc_to_dig_port(&encoder->base)->hpd_pulse != NULL; + enc_to_dig_port(encoder)->hpd_pulse != NULL; } static void i915_digport_work_func(struct work_struct *work) @@ -335,7 +335,7 @@ static void i915_digport_work_func(struct work_struct *work) if (!long_hpd && !short_hpd) continue; - dig_port = enc_to_dig_port(&encoder->base); + dig_port = enc_to_dig_port(encoder); ret = dig_port->hpd_pulse(dig_port, long_hpd); if (ret == IRQ_NONE) { diff --git a/drivers/gpu/drm/i915/display/intel_lspcon.c b/drivers/gpu/drm/i915/display/intel_lspcon.c index 5145ff8b962b..d807c5648c87 100644 --- a/drivers/gpu/drm/i915/display/intel_lspcon.c +++ b/drivers/gpu/drm/i915/display/intel_lspcon.c @@ -434,8 +434,8 @@ void lspcon_write_infoframe(struct intel_encoder *encoder, const void *frame, ssize_t len) { bool ret; - struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); - struct intel_lspcon *lspcon = enc_to_intel_lspcon(&encoder->base); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + struct intel_lspcon *lspcon = enc_to_intel_lspcon(encoder); /* LSPCON only needs AVI IF */ if (type != HDMI_INFOFRAME_TYPE_AVI) @@ -472,7 +472,7 @@ void lspcon_set_infoframes(struct intel_encoder *encoder, ssize_t ret; union hdmi_infoframe frame; u8 buf[VIDEO_DIP_DATA_SIZE]; - struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); + struct intel_digital_port *dig_port = enc_to_dig_port(encoder); struct intel_lspcon *lspcon = &dig_port->lspcon; const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; @@ -522,7 +522,7 @@ u32 lspcon_infoframes_enabled(struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config) { /* FIXME actually read this from the hw */ - return enc_to_intel_lspcon(&encoder->base)->active; + return enc_to_intel_lspcon(encoder)->active; } void lspcon_resume(struct intel_lspcon *lspcon) diff --git a/drivers/gpu/drm/i915/display/intel_pipe_crc.c b/drivers/gpu/drm/i915/display/intel_pipe_crc.c index 2746512f4466..520408e83681 100644 --- a/drivers/gpu/drm/i915/display/intel_pipe_crc.c +++ b/drivers/gpu/drm/i915/display/intel_pipe_crc.c @@ -98,7 +98,7 @@ static int i9xx_pipe_crc_auto_source(struct drm_i915_private *dev_priv, break; case INTEL_OUTPUT_DP: case INTEL_OUTPUT_EDP: - dig_port = enc_to_dig_port(&encoder->base); + dig_port = enc_to_dig_port(encoder); switch (dig_port->base.port) { case PORT_B: *source = INTEL_PIPE_CRC_SOURCE_DP_B; diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c index 16e9ff47d519..89c9cf5f38d2 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.c +++ b/drivers/gpu/drm/i915/display/intel_psr.c @@ -1523,3 +1523,27 @@ bool intel_psr_enabled(struct intel_dp *intel_dp) return ret; } + +void intel_psr_atomic_check(struct drm_connector *connector, + struct drm_connector_state *old_state, + struct drm_connector_state *new_state) +{ + struct drm_i915_private *dev_priv = to_i915(connector->dev); + struct intel_connector *intel_connector; + struct intel_digital_port *dig_port; + struct drm_crtc_state *crtc_state; + + if (!CAN_PSR(dev_priv) || !new_state->crtc || + dev_priv->psr.initially_probed) + return; + + intel_connector = to_intel_connector(connector); + dig_port = enc_to_dig_port(intel_connector->encoder); + if (dev_priv->psr.dp != &dig_port->dp) + return; + + crtc_state = drm_atomic_get_new_crtc_state(new_state->state, + new_state->crtc); + crtc_state->mode_changed = true; + dev_priv->psr.initially_probed = true; +} diff --git a/drivers/gpu/drm/i915/display/intel_psr.h b/drivers/gpu/drm/i915/display/intel_psr.h index 46e4de8b8cd5..c58a1d438808 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.h +++ b/drivers/gpu/drm/i915/display/intel_psr.h @@ -8,6 +8,8 @@ #include "intel_frontbuffer.h" +struct drm_connector; +struct drm_connector_state; struct drm_i915_private; struct intel_crtc_state; struct intel_dp; @@ -35,5 +37,8 @@ void intel_psr_short_pulse(struct intel_dp *intel_dp); int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state, u32 *out_value); bool intel_psr_enabled(struct intel_dp *intel_dp); +void intel_psr_atomic_check(struct drm_connector *connector, + struct drm_connector_state *old_state, + struct drm_connector_state *new_state); #endif /* __INTEL_PSR_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c index 8758ee2a4442..e8819fd21e03 100644 --- a/drivers/gpu/drm/i915/display/intel_sdvo.c +++ b/drivers/gpu/drm/i915/display/intel_sdvo.c @@ -180,7 +180,7 @@ static struct intel_sdvo *to_sdvo(struct intel_encoder *encoder) return container_of(encoder, struct intel_sdvo, base); } -static struct intel_sdvo *intel_attached_sdvo(struct drm_connector *connector) +static struct intel_sdvo *intel_attached_sdvo(struct intel_connector *connector) { return to_sdvo(intel_attached_encoder(connector)); } @@ -1551,7 +1551,7 @@ static bool intel_sdvo_connector_get_hw_state(struct intel_connector *connector) { struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(&connector->base); - struct intel_sdvo *intel_sdvo = intel_attached_sdvo(&connector->base); + struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); u16 active_outputs = 0; intel_sdvo_get_active_outputs(intel_sdvo, &active_outputs); @@ -1823,7 +1823,7 @@ static enum drm_mode_status intel_sdvo_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { - struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); + struct intel_sdvo *intel_sdvo = intel_attached_sdvo(to_intel_connector(connector)); struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); int max_dotclk = to_i915(connector->dev)->max_dotclk_freq; @@ -1941,7 +1941,7 @@ intel_sdvo_multifunc_encoder(struct intel_sdvo *intel_sdvo) static struct edid * intel_sdvo_get_edid(struct drm_connector *connector) { - struct intel_sdvo *sdvo = intel_attached_sdvo(connector); + struct intel_sdvo *sdvo = intel_attached_sdvo(to_intel_connector(connector)); return drm_get_edid(connector, &sdvo->ddc); } @@ -1959,7 +1959,7 @@ intel_sdvo_get_analog_edid(struct drm_connector *connector) static enum drm_connector_status intel_sdvo_tmds_sink_detect(struct drm_connector *connector) { - struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); + struct intel_sdvo *intel_sdvo = intel_attached_sdvo(to_intel_connector(connector)); struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); enum drm_connector_status status; @@ -2028,7 +2028,7 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connector, bool force) { u16 response; - struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); + struct intel_sdvo *intel_sdvo = intel_attached_sdvo(to_intel_connector(connector)); struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); enum drm_connector_status ret; @@ -2175,7 +2175,7 @@ static const struct drm_display_mode sdvo_tv_modes[] = { static void intel_sdvo_get_tv_modes(struct drm_connector *connector) { - struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); + struct intel_sdvo *intel_sdvo = intel_attached_sdvo(to_intel_connector(connector)); const struct drm_connector_state *conn_state = connector->state; struct intel_sdvo_sdtv_resolution_request tv_res; u32 reply = 0, format_map = 0; @@ -2215,7 +2215,7 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector) static void intel_sdvo_get_lvds_modes(struct drm_connector *connector) { - struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); + struct intel_sdvo *intel_sdvo = intel_attached_sdvo(to_intel_connector(connector)); struct drm_i915_private *dev_priv = to_i915(connector->dev); struct drm_display_mode *newmode; @@ -2379,7 +2379,7 @@ intel_sdvo_connector_atomic_set_property(struct drm_connector *connector, static int intel_sdvo_connector_register(struct drm_connector *connector) { - struct intel_sdvo *sdvo = intel_attached_sdvo(connector); + struct intel_sdvo *sdvo = intel_attached_sdvo(to_intel_connector(connector)); int ret; ret = intel_connector_register(connector); @@ -2394,7 +2394,7 @@ intel_sdvo_connector_register(struct drm_connector *connector) static void intel_sdvo_connector_unregister(struct drm_connector *connector) { - struct intel_sdvo *sdvo = intel_attached_sdvo(connector); + struct intel_sdvo *sdvo = intel_attached_sdvo(to_intel_connector(connector)); sysfs_remove_link(&connector->kdev->kobj, sdvo->ddc.dev.kobj.name); @@ -2932,7 +2932,7 @@ static void intel_sdvo_output_cleanup(struct intel_sdvo *intel_sdvo) list_for_each_entry_safe(connector, tmp, &dev->mode_config.connector_list, head) { - if (intel_attached_encoder(connector) == &intel_sdvo->base) { + if (intel_attached_encoder(to_intel_connector(connector)) == &intel_sdvo->base) { drm_connector_unregister(connector); intel_connector_destroy(connector); } diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c index 3f7b8f2ff671..fca77ec1e0dd 100644 --- a/drivers/gpu/drm/i915/display/intel_sprite.c +++ b/drivers/gpu/drm/i915/display/intel_sprite.c @@ -583,15 +583,16 @@ skl_program_plane(struct intel_plane *plane, const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; u32 surf_addr = plane_state->color_plane[color_plane].offset; u32 stride = skl_plane_stride(plane_state, color_plane); - u32 aux_dist = plane_state->color_plane[1].offset - surf_addr; - u32 aux_stride = skl_plane_stride(plane_state, 1); + const struct drm_framebuffer *fb = plane_state->hw.fb; + int aux_plane = intel_main_to_aux_plane(fb, color_plane); + u32 aux_dist = plane_state->color_plane[aux_plane].offset - surf_addr; + u32 aux_stride = skl_plane_stride(plane_state, aux_plane); int crtc_x = plane_state->uapi.dst.x1; int crtc_y = plane_state->uapi.dst.y1; u32 x = plane_state->color_plane[color_plane].x; u32 y = plane_state->color_plane[color_plane].y; u32 src_w = drm_rect_width(&plane_state->uapi.src) >> 16; u32 src_h = drm_rect_height(&plane_state->uapi.src) >> 16; - const struct drm_framebuffer *fb = plane_state->hw.fb; u8 alpha = plane_state->hw.alpha >> 8; u32 plane_color_ctl = 0; unsigned long irqflags; @@ -2106,7 +2107,8 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state, fb->modifier == I915_FORMAT_MOD_Yf_TILED || fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS || fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS || - fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS)) { + fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS || + fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS)) { DRM_DEBUG_KMS("Y/Yf tiling not supported in IF-ID mode\n"); return -EINVAL; } @@ -2578,7 +2580,16 @@ static const u64 skl_plane_format_modifiers_ccs[] = { DRM_FORMAT_MOD_INVALID }; -static const u64 gen12_plane_format_modifiers_ccs[] = { +static const u64 gen12_plane_format_modifiers_mc_ccs[] = { + I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS, + I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS, + I915_FORMAT_MOD_Y_TILED, + I915_FORMAT_MOD_X_TILED, + DRM_FORMAT_MOD_LINEAR, + DRM_FORMAT_MOD_INVALID +}; + +static const u64 gen12_plane_format_modifiers_rc_ccs[] = { I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS, I915_FORMAT_MOD_Y_TILED, I915_FORMAT_MOD_X_TILED, @@ -2743,10 +2754,21 @@ static bool skl_plane_format_mod_supported(struct drm_plane *_plane, } } +static bool gen12_plane_supports_mc_ccs(enum plane_id plane_id) +{ + return plane_id < PLANE_SPRITE4; +} + static bool gen12_plane_format_mod_supported(struct drm_plane *_plane, u32 format, u64 modifier) { + struct intel_plane *plane = to_intel_plane(_plane); + switch (modifier) { + case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS: + if (!gen12_plane_supports_mc_ccs(plane->id)) + return false; + /* fall through */ case DRM_FORMAT_MOD_LINEAR: case I915_FORMAT_MOD_X_TILED: case I915_FORMAT_MOD_Y_TILED: @@ -2764,11 +2786,6 @@ static bool gen12_plane_format_mod_supported(struct drm_plane *_plane, if (is_ccs_modifier(modifier)) return true; /* fall through */ - case DRM_FORMAT_RGB565: - case DRM_FORMAT_XRGB2101010: - case DRM_FORMAT_XBGR2101010: - case DRM_FORMAT_ARGB2101010: - case DRM_FORMAT_ABGR2101010: case DRM_FORMAT_YUYV: case DRM_FORMAT_YVYU: case DRM_FORMAT_UYVY: @@ -2777,6 +2794,14 @@ static bool gen12_plane_format_mod_supported(struct drm_plane *_plane, case DRM_FORMAT_P010: case DRM_FORMAT_P012: case DRM_FORMAT_P016: + if (modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS) + return true; + /* fall through */ + case DRM_FORMAT_RGB565: + case DRM_FORMAT_XRGB2101010: + case DRM_FORMAT_XBGR2101010: + case DRM_FORMAT_ARGB2101010: + case DRM_FORMAT_ABGR2101010: case DRM_FORMAT_XVYU2101010: case DRM_FORMAT_C8: case DRM_FORMAT_XBGR16161616F: @@ -2910,6 +2935,14 @@ static const u32 *icl_get_plane_formats(struct drm_i915_private *dev_priv, } } +static const u64 *gen12_get_plane_modifiers(enum plane_id plane_id) +{ + if (gen12_plane_supports_mc_ccs(plane_id)) + return gen12_plane_format_modifiers_mc_ccs; + else + return gen12_plane_format_modifiers_rc_ccs; +} + static bool skl_plane_has_ccs(struct drm_i915_private *dev_priv, enum pipe pipe, enum plane_id plane_id) { @@ -2975,7 +3008,7 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv, plane->has_ccs = skl_plane_has_ccs(dev_priv, pipe, plane_id); if (INTEL_GEN(dev_priv) >= 12) { - modifiers = gen12_plane_format_modifiers_ccs; + modifiers = gen12_get_plane_modifiers(plane_id); plane_funcs = &gen12_plane_funcs; } else { if (plane->has_ccs) diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c index 50703536436c..c75e0ceecee6 100644 --- a/drivers/gpu/drm/i915/display/intel_tv.c +++ b/drivers/gpu/drm/i915/display/intel_tv.c @@ -898,7 +898,7 @@ static struct intel_tv *enc_to_tv(struct intel_encoder *encoder) return container_of(encoder, struct intel_tv, base); } -static struct intel_tv *intel_attached_tv(struct drm_connector *connector) +static struct intel_tv *intel_attached_tv(struct intel_connector *connector) { return enc_to_tv(intel_attached_encoder(connector)); } @@ -1527,7 +1527,7 @@ static void intel_tv_pre_enable(struct intel_encoder *encoder, ((video_levels->black << TV_BLACK_LEVEL_SHIFT) | (video_levels->blank << TV_BLANK_LEVEL_SHIFT))); - assert_pipe_disabled(dev_priv, intel_crtc->pipe); + assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder); /* Filter ctl must be set before TV_WIN_SIZE */ tv_filter_ctl = TV_AUTO_SCALE; @@ -1662,7 +1662,7 @@ intel_tv_detect_type(struct intel_tv *intel_tv, */ static void intel_tv_find_better_format(struct drm_connector *connector) { - struct intel_tv *intel_tv = intel_attached_tv(connector); + struct intel_tv *intel_tv = intel_attached_tv(to_intel_connector(connector)); const struct tv_mode *tv_mode = intel_tv_mode_find(connector->state); int i; @@ -1689,7 +1689,7 @@ intel_tv_detect(struct drm_connector *connector, struct drm_modeset_acquire_ctx *ctx, bool force) { - struct intel_tv *intel_tv = intel_attached_tv(connector); + struct intel_tv *intel_tv = intel_attached_tv(to_intel_connector(connector)); enum drm_connector_status status; int type; diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c index 6bab08db5d75..9e6aaa302e40 100644 --- a/drivers/gpu/drm/i915/display/intel_vdsc.c +++ b/drivers/gpu/drm/i915/display/intel_vdsc.c @@ -943,7 +943,7 @@ static void intel_dsc_dsi_pps_write(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config; - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); struct mipi_dsi_device *dsi; struct drm_dsc_picture_parameter_set pps; enum port port; @@ -961,7 +961,7 @@ static void intel_dsc_dsi_pps_write(struct intel_encoder *encoder, static void intel_dsc_dp_pps_write(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { - struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config; struct drm_dsc_pps_infoframe dp_dsc_pps_sdp; diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c index 21e820299107..daf4fc3dab6f 100644 --- a/drivers/gpu/drm/i915/display/vlv_dsi.c +++ b/drivers/gpu/drm/i915/display/vlv_dsi.c @@ -23,7 +23,6 @@ * Author: Jani Nikula <jani.nikula@intel.com> */ -#include <linux/gpio/consumer.h> #include <linux/slab.h> #include <drm/drm_atomic_helper.h> @@ -319,7 +318,7 @@ static int intel_dsi_compute_config(struct intel_encoder *encoder, static bool glk_dsi_enable_io(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; u32 tmp; bool cold_boot = false; @@ -367,7 +366,7 @@ static bool glk_dsi_enable_io(struct intel_encoder *encoder) static void glk_dsi_device_ready(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; u32 val; @@ -438,7 +437,7 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder) static void bxt_dsi_device_ready(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; u32 val; @@ -465,7 +464,7 @@ static void bxt_dsi_device_ready(struct intel_encoder *encoder) static void vlv_dsi_device_ready(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; u32 val; @@ -516,7 +515,7 @@ static void intel_dsi_device_ready(struct intel_encoder *encoder) static void glk_dsi_enter_low_power_mode(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; u32 val; @@ -546,7 +545,7 @@ static void glk_dsi_enter_low_power_mode(struct intel_encoder *encoder) static void glk_dsi_disable_mipi_io(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; u32 tmp; @@ -579,7 +578,7 @@ static void glk_dsi_clear_device_ready(struct intel_encoder *encoder) static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; DRM_DEBUG_KMS("\n"); @@ -625,7 +624,7 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) { @@ -681,7 +680,7 @@ static void intel_dsi_port_disable(struct intel_encoder *encoder) { struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; for_each_dsi_port(port, intel_dsi->ports) { @@ -745,7 +744,7 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); struct drm_crtc *crtc = pipe_config->uapi.crtc; struct drm_i915_private *dev_priv = to_i915(crtc->dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); @@ -793,9 +792,6 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder, if (!IS_GEMINILAKE(dev_priv)) intel_dsi_prepare(encoder, pipe_config); - /* Power on, try both CRC pmic gpio and VBT */ - if (intel_dsi->gpio_panel) - gpiod_set_value_cansleep(intel_dsi->gpio_panel, 1); intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_POWER_ON); intel_dsi_msleep(intel_dsi, intel_dsi->panel_on_delay); @@ -850,7 +846,7 @@ static void intel_dsi_disable(struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; DRM_DEBUG_KMS("\n"); @@ -886,7 +882,7 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder, const struct drm_connector_state *old_conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; u32 val; @@ -895,7 +891,7 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder, if (IS_GEN9_LP(dev_priv)) { intel_crtc_vblank_off(old_crtc_state); - skylake_scaler_disable(old_crtc_state); + skl_scaler_disable(old_crtc_state); } if (is_vid_mode(intel_dsi)) { @@ -945,11 +941,8 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder, /* Assert reset */ intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_ASSERT_RESET); - /* Power off, try both CRC pmic gpio and VBT */ intel_dsi_msleep(intel_dsi, intel_dsi->panel_off_delay); intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_POWER_OFF); - if (intel_dsi->gpio_panel) - gpiod_set_value_cansleep(intel_dsi->gpio_panel, 0); /* * FIXME As we do with eDP, just make a note of the time here @@ -962,7 +955,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); intel_wakeref_t wakeref; enum port port; bool active = false; @@ -1041,7 +1034,7 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder, &pipe_config->hw.adjusted_mode; struct drm_display_mode *adjusted_mode_sw; struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); unsigned int lane_count = intel_dsi->lane_count; unsigned int bpp, fmt; enum port port; @@ -1234,7 +1227,7 @@ static void set_dsi_timings(struct drm_encoder *encoder, { struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(encoder)); enum port port; unsigned int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format); unsigned int lane_count = intel_dsi->lane_count; @@ -1322,7 +1315,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder, struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc); - struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(encoder)); const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; enum port port; unsigned int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format); @@ -1512,7 +1505,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder, static void intel_dsi_unprepare(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; u32 val; @@ -1539,12 +1532,9 @@ static void intel_dsi_unprepare(struct intel_encoder *encoder) static void intel_dsi_encoder_destroy(struct drm_encoder *encoder) { - struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); - - /* dispose of the gpios */ - if (intel_dsi->gpio_panel) - gpiod_put(intel_dsi->gpio_panel); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(encoder)); + intel_dsi_vbt_gpio_cleanup(intel_dsi); intel_encoder_destroy(encoder); } @@ -1825,6 +1815,7 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv) struct drm_connector *connector; struct drm_display_mode *current_mode, *fixed_mode; enum port port; + enum pipe pipe; DRM_DEBUG_KMS("\n"); @@ -1923,20 +1914,8 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv) vlv_dphy_param_init(intel_dsi); - /* - * In case of BYT with CRC PMIC, we need to use GPIO for - * Panel control. - */ - if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && - (dev_priv->vbt.dsi.config->pwm_blc == PPS_BLC_PMIC)) { - intel_dsi->gpio_panel = - gpiod_get(dev->dev, "panel", GPIOD_OUT_HIGH); - - if (IS_ERR(intel_dsi->gpio_panel)) { - DRM_ERROR("Failed to own gpio for panel control\n"); - intel_dsi->gpio_panel = NULL; - } - } + intel_dsi_vbt_gpio_init(intel_dsi, + intel_dsi_get_hw_state(intel_encoder, &pipe)); drm_connector_init(dev, connector, &intel_dsi_connector_funcs, DRM_MODE_CONNECTOR_DSI); diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c index 95f39cd0ce02..6b89e67b120f 100644 --- a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c +++ b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c @@ -117,7 +117,7 @@ int vlv_dsi_pll_compute(struct intel_encoder *encoder, struct intel_crtc_state *config) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); int ret; u32 dsi_clk; @@ -255,7 +255,7 @@ u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, struct intel_crtc_state *config) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format); u32 dsi_clock, pclk; u32 pll_ctl, pll_div; @@ -321,7 +321,7 @@ u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, u32 pclk; u32 dsi_clk; u32 dsi_ratio; - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format); @@ -341,7 +341,7 @@ void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port) { u32 temp; struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); temp = I915_READ(MIPI_CTRL(port)); temp &= ~ESCAPE_CLOCK_DIVIDER_MASK; @@ -455,7 +455,7 @@ int bxt_dsi_pll_compute(struct intel_encoder *encoder, struct intel_crtc_state *config) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); u8 dsi_ratio, dsi_ratio_min, dsi_ratio_max; u32 dsi_clk; @@ -503,7 +503,7 @@ void bxt_dsi_pll_enable(struct intel_encoder *encoder, const struct intel_crtc_state *config) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; u32 val; diff --git a/drivers/gpu/drm/i915/gem/Makefile b/drivers/gpu/drm/i915/gem/Makefile deleted file mode 100644 index 7e73aa587967..000000000000 --- a/drivers/gpu/drm/i915/gem/Makefile +++ /dev/null @@ -1,5 +0,0 @@ -# For building individual subdir files on the command line -subdir-ccflags-y += -I$(srctree)/$(src)/.. - -# Extra header tests -header-test-pattern-$(CONFIG_DRM_I915_WERROR) := *.h diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c index dc90b044a217..a2e57e62af30 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c @@ -69,6 +69,7 @@ #include <drm/i915_drm.h> +#include "gt/gen6_ppgtt.h" #include "gt/intel_context.h" #include "gt/intel_engine_heartbeat.h" #include "gt/intel_engine_pm.h" @@ -705,7 +706,7 @@ i915_gem_create_context(struct drm_i915_private *i915, unsigned int flags) if (HAS_FULL_PPGTT(i915)) { struct i915_ppgtt *ppgtt; - ppgtt = i915_ppgtt_create(i915); + ppgtt = i915_ppgtt_create(&i915->gt); if (IS_ERR(ppgtt)) { DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n", PTR_ERR(ppgtt)); @@ -760,12 +761,6 @@ void i915_gem_driver_release__contexts(struct drm_i915_private *i915) flush_work(&i915->gem.contexts.free_work); } -static int context_idr_cleanup(int id, void *p, void *data) -{ - context_close(p); - return 0; -} - static int vm_idr_cleanup(int id, void *p, void *data) { i915_vm_put(p); @@ -773,7 +768,8 @@ static int vm_idr_cleanup(int id, void *p, void *data) } static int gem_context_register(struct i915_gem_context *ctx, - struct drm_i915_file_private *fpriv) + struct drm_i915_file_private *fpriv, + u32 *id) { struct i915_address_space *vm; int ret; @@ -791,14 +787,10 @@ static int gem_context_register(struct i915_gem_context *ctx, current->comm, pid_nr(ctx->pid)); /* And finally expose ourselves to userspace via the idr */ - mutex_lock(&fpriv->context_idr_lock); - ret = idr_alloc(&fpriv->context_idr, ctx, 0, 0, GFP_KERNEL); - mutex_unlock(&fpriv->context_idr_lock); - if (ret >= 0) - goto out; + ret = xa_alloc(&fpriv->context_xa, id, ctx, xa_limit_32b, GFP_KERNEL); + if (ret) + put_pid(fetch_and_zero(&ctx->pid)); - put_pid(fetch_and_zero(&ctx->pid)); -out: return ret; } @@ -808,11 +800,11 @@ int i915_gem_context_open(struct drm_i915_private *i915, struct drm_i915_file_private *file_priv = file->driver_priv; struct i915_gem_context *ctx; int err; + u32 id; - mutex_init(&file_priv->context_idr_lock); - mutex_init(&file_priv->vm_idr_lock); + xa_init_flags(&file_priv->context_xa, XA_FLAGS_ALLOC); - idr_init(&file_priv->context_idr); + mutex_init(&file_priv->vm_idr_lock); idr_init_base(&file_priv->vm_idr, 1); ctx = i915_gem_create_context(i915, 0); @@ -821,21 +813,19 @@ int i915_gem_context_open(struct drm_i915_private *i915, goto err; } - err = gem_context_register(ctx, file_priv); + err = gem_context_register(ctx, file_priv, &id); if (err < 0) goto err_ctx; - GEM_BUG_ON(err > 0); - + GEM_BUG_ON(id); return 0; err_ctx: context_close(ctx); err: idr_destroy(&file_priv->vm_idr); - idr_destroy(&file_priv->context_idr); + xa_destroy(&file_priv->context_xa); mutex_destroy(&file_priv->vm_idr_lock); - mutex_destroy(&file_priv->context_idr_lock); return err; } @@ -843,10 +833,12 @@ void i915_gem_context_close(struct drm_file *file) { struct drm_i915_file_private *file_priv = file->driver_priv; struct drm_i915_private *i915 = file_priv->dev_priv; + struct i915_gem_context *ctx; + unsigned long idx; - idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL); - idr_destroy(&file_priv->context_idr); - mutex_destroy(&file_priv->context_idr_lock); + xa_for_each(&file_priv->context_xa, idx, ctx) + context_close(ctx); + xa_destroy(&file_priv->context_xa); idr_for_each(&file_priv->vm_idr, vm_idr_cleanup, NULL); idr_destroy(&file_priv->vm_idr); @@ -870,7 +862,7 @@ int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data, if (args->flags) return -EINVAL; - ppgtt = i915_ppgtt_create(i915); + ppgtt = i915_ppgtt_create(&i915->gt); if (IS_ERR(ppgtt)) return PTR_ERR(ppgtt); @@ -1244,12 +1236,14 @@ gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu) * image, or into the registers directory, does not stick). Pristine * and idle contexts will be configured on pinning. */ - if (!intel_context_is_pinned(ce)) + if (!intel_context_pin_if_active(ce)) return 0; rq = intel_engine_create_kernel_request(ce->engine); - if (IS_ERR(rq)) - return PTR_ERR(rq); + if (IS_ERR(rq)) { + ret = PTR_ERR(rq); + goto out_unpin; + } /* Serialise with the remote context */ ret = intel_context_prepare_remote_request(ce, rq); @@ -1257,6 +1251,8 @@ gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu) ret = gen8_emit_rpcs_config(rq, ce, sseu); i915_request_add(rq); +out_unpin: + intel_context_unpin(ce); return ret; } @@ -2187,6 +2183,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, struct drm_i915_gem_context_create_ext *args = data; struct create_ext ext_data; int ret; + u32 id; if (!DRIVER_CAPS(i915)->has_logical_contexts) return -ENODEV; @@ -2218,11 +2215,11 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, goto err_ctx; } - ret = gem_context_register(ext_data.ctx, ext_data.fpriv); + ret = gem_context_register(ext_data.ctx, ext_data.fpriv, &id); if (ret < 0) goto err_ctx; - args->ctx_id = ret; + args->ctx_id = id; DRM_DEBUG("HW context %d created\n", args->ctx_id); return 0; @@ -2245,11 +2242,7 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, if (!args->ctx_id) return -ENOENT; - if (mutex_lock_interruptible(&file_priv->context_idr_lock)) - return -EINTR; - - ctx = idr_remove(&file_priv->context_idr, args->ctx_id); - mutex_unlock(&file_priv->context_idr_lock); + ctx = xa_erase(&file_priv->context_xa, args->ctx_id); if (!ctx) return -ENOENT; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.h b/drivers/gpu/drm/i915/gem/i915_gem_context.h index 14f3cc1b7583..3ae61a355d87 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.h @@ -13,7 +13,6 @@ #include "i915_drv.h" #include "i915_gem.h" -#include "i915_gem_gtt.h" #include "i915_scheduler.h" #include "intel_device_info.h" diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index cbd2bcade3c8..d5a0f5ae4a8b 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -2173,7 +2173,7 @@ static int eb_submit(struct i915_execbuffer *eb) } if (intel_context_nopreempt(eb->context)) - eb->request->flags |= I915_REQUEST_NOPREEMPT; + __set_bit(I915_FENCE_FLAG_NOPREEMPT, &eb->request->fence.flags); return 0; } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c index 520cc9cac471..70543c83df06 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c @@ -16,46 +16,6 @@ const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops = { .release = i915_gem_object_release_memory_region, }; -/* XXX: Time to vfunc your life up? */ -void __iomem * -i915_gem_object_lmem_io_map_page(struct drm_i915_gem_object *obj, - unsigned long n) -{ - resource_size_t offset; - - offset = i915_gem_object_get_dma_address(obj, n); - offset -= obj->mm.region->region.start; - - return io_mapping_map_wc(&obj->mm.region->iomap, offset, PAGE_SIZE); -} - -void __iomem * -i915_gem_object_lmem_io_map_page_atomic(struct drm_i915_gem_object *obj, - unsigned long n) -{ - resource_size_t offset; - - offset = i915_gem_object_get_dma_address(obj, n); - offset -= obj->mm.region->region.start; - - return io_mapping_map_atomic_wc(&obj->mm.region->iomap, offset); -} - -void __iomem * -i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj, - unsigned long n, - unsigned long size) -{ - resource_size_t offset; - - GEM_BUG_ON(!i915_gem_object_is_contiguous(obj)); - - offset = i915_gem_object_get_dma_address(obj, n); - offset -= obj->mm.region->region.start; - - return io_mapping_map_wc(&obj->mm.region->iomap, offset, size); -} - bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj) { return obj->ops == &i915_gem_lmem_obj_ops; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.h b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h index 7c176b8b7d2f..fc3f15580fe3 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h @@ -14,14 +14,6 @@ struct intel_memory_region; extern const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops; -void __iomem *i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj, - unsigned long n, unsigned long size); -void __iomem *i915_gem_object_lmem_io_map_page(struct drm_i915_gem_object *obj, - unsigned long n); -void __iomem * -i915_gem_object_lmem_io_map_page_atomic(struct drm_i915_gem_object *obj, - unsigned long n); - bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj); struct drm_i915_gem_object * diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c index 879fff8adc48..b9fdac2f9003 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c @@ -4,6 +4,7 @@ * Copyright © 2014-2016 Intel Corporation */ +#include <linux/anon_inodes.h> #include <linux/mman.h> #include <linux/pfn_t.h> #include <linux/sizes.h> @@ -212,6 +213,7 @@ static vm_fault_t i915_error_to_vmf_fault(int err) case -EIO: /* shmemfs failure from swap device */ case -EFAULT: /* purged object */ case -ENODEV: /* bad object, how did you get here! */ + case -ENXIO: /* unable to access backing store (on device) */ return VM_FAULT_SIGBUS; case -ENOSPC: /* shmemfs allocation failure */ @@ -236,42 +238,38 @@ static vm_fault_t vm_fault_cpu(struct vm_fault *vmf) struct vm_area_struct *area = vmf->vma; struct i915_mmap_offset *mmo = area->vm_private_data; struct drm_i915_gem_object *obj = mmo->obj; - unsigned long i, size = area->vm_end - area->vm_start; - bool write = area->vm_flags & VM_WRITE; - vm_fault_t ret = VM_FAULT_SIGBUS; + resource_size_t iomap; int err; - if (!i915_gem_object_has_struct_page(obj)) - return ret; - /* Sanity check that we allow writing into this object */ - if (i915_gem_object_is_readonly(obj) && write) - return ret; + if (unlikely(i915_gem_object_is_readonly(obj) && + area->vm_flags & VM_WRITE)) + return VM_FAULT_SIGBUS; err = i915_gem_object_pin_pages(obj); if (err) - return i915_error_to_vmf_fault(err); + goto out; - /* PTEs are revoked in obj->ops->put_pages() */ - for (i = 0; i < size >> PAGE_SHIFT; i++) { - struct page *page = i915_gem_object_get_page(obj, i); - - ret = vmf_insert_pfn(area, - (unsigned long)area->vm_start + i * PAGE_SIZE, - page_to_pfn(page)); - if (ret != VM_FAULT_NOPAGE) - break; + iomap = -1; + if (!i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_STRUCT_PAGE)) { + iomap = obj->mm.region->iomap.base; + iomap -= obj->mm.region->region.start; } - if (write) { + /* PTEs are revoked in obj->ops->put_pages() */ + err = remap_io_sg(area, + area->vm_start, area->vm_end - area->vm_start, + obj->mm.pages->sgl, iomap); + + if (area->vm_flags & VM_WRITE) { GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); - obj->cache_dirty = true; /* XXX flush after PAT update? */ obj->mm.dirty = true; } i915_gem_object_unpin_pages(obj); - return ret; +out: + return i915_error_to_vmf_fault(err); } static vm_fault_t vm_fault_gtt(struct vm_fault *vmf) @@ -560,7 +558,9 @@ __assign_mmap_offset(struct drm_file *file, } if (mmap_type != I915_MMAP_TYPE_GTT && - !i915_gem_object_has_struct_page(obj)) { + !i915_gem_object_type_has(obj, + I915_GEM_OBJECT_HAS_STRUCT_PAGE | + I915_GEM_OBJECT_HAS_IOMEM)) { err = -ENODEV; goto out; } @@ -694,6 +694,46 @@ static const struct vm_operations_struct vm_ops_cpu = { .close = vm_close, }; +static int singleton_release(struct inode *inode, struct file *file) +{ + struct drm_i915_private *i915 = file->private_data; + + cmpxchg(&i915->gem.mmap_singleton, file, NULL); + drm_dev_put(&i915->drm); + + return 0; +} + +static const struct file_operations singleton_fops = { + .owner = THIS_MODULE, + .release = singleton_release, +}; + +static struct file *mmap_singleton(struct drm_i915_private *i915) +{ + struct file *file; + + rcu_read_lock(); + file = i915->gem.mmap_singleton; + if (file && !get_file_rcu(file)) + file = NULL; + rcu_read_unlock(); + if (file) + return file; + + file = anon_inode_getfile("i915.gem", &singleton_fops, i915, O_RDWR); + if (IS_ERR(file)) + return file; + + /* Everyone shares a single global address space */ + file->f_mapping = i915->drm.anon_inode->i_mapping; + + smp_store_mb(i915->gem.mmap_singleton, file); + drm_dev_get(&i915->drm); + + return file; +} + /* * This overcomes the limitation in drm_gem_mmap's assignment of a * drm_gem_object as the vma->vm_private_data. Since we need to @@ -707,6 +747,7 @@ int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma) struct drm_device *dev = priv->minor->dev; struct i915_mmap_offset *mmo = NULL; struct drm_gem_object *obj = NULL; + struct file *anon; if (drm_dev_is_unplugged(dev)) return -ENODEV; @@ -755,9 +796,26 @@ int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma) vma->vm_flags &= ~VM_MAYWRITE; } + anon = mmap_singleton(to_i915(obj->dev)); + if (IS_ERR(anon)) { + drm_gem_object_put_unlocked(obj); + return PTR_ERR(anon); + } + vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; vma->vm_private_data = mmo; + /* + * We keep the ref on mmo->obj, not vm_file, but we require + * vma->vm_file->f_mapping, see vma_link(), for later revocation. + * Our userspace is accustomed to having per-file resource cleanup + * (i.e. contexts, objects and requests) on their close(fd), which + * requires avoiding extraneous references to their filp, hence why + * we prefer to use an anonymous file for their mmaps. + */ + fput(vma->vm_file); + vma->vm_file = anon; + switch (mmo->mmap_type) { case I915_MMAP_TYPE_WC: vma->vm_page_prot = diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h index 858f8bf49a04..db70a3306e59 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h @@ -16,6 +16,7 @@ #include "display/intel_frontbuffer.h" #include "i915_gem_object_types.h" #include "i915_gem_gtt.h" +#include "i915_vma_types.h" void i915_gem_init__objects(struct drm_i915_private *i915); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c index 75197ca696a8..54aca5c9101e 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c @@ -158,9 +158,7 @@ static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj) static void unmap_object(struct drm_i915_gem_object *obj, void *ptr) { - if (i915_gem_object_is_lmem(obj)) - io_mapping_unmap((void __force __iomem *)ptr); - else if (is_vmalloc_addr(ptr)) + if (is_vmalloc_addr(ptr)) vunmap(ptr); else kunmap(kmap_to_page(ptr)); @@ -236,46 +234,44 @@ unlock: return err; } +static inline pte_t iomap_pte(resource_size_t base, + dma_addr_t offset, + pgprot_t prot) +{ + return pte_mkspecial(pfn_pte((base + offset) >> PAGE_SHIFT, prot)); +} + /* The 'mapping' part of i915_gem_object_pin_map() below */ static void *i915_gem_object_map(struct drm_i915_gem_object *obj, enum i915_map_type type) { - unsigned long n_pages = obj->base.size >> PAGE_SHIFT; + unsigned long n_pte = obj->base.size >> PAGE_SHIFT; struct sg_table *sgt = obj->mm.pages; - struct sgt_iter sgt_iter; - struct page *page; - struct page *stack_pages[32]; - struct page **pages = stack_pages; - unsigned long i = 0; + pte_t *stack[32], **mem; + struct vm_struct *area; pgprot_t pgprot; - void *addr; - if (i915_gem_object_is_lmem(obj)) { - void __iomem *io; - - if (type != I915_MAP_WC) - return NULL; - - io = i915_gem_object_lmem_io_map(obj, 0, obj->base.size); - return (void __force *)io; - } + if (!i915_gem_object_has_struct_page(obj) && type != I915_MAP_WC) + return NULL; /* A single page can always be kmapped */ - if (n_pages == 1 && type == I915_MAP_WB) + if (n_pte == 1 && type == I915_MAP_WB) return kmap(sg_page(sgt->sgl)); - if (n_pages > ARRAY_SIZE(stack_pages)) { + mem = stack; + if (n_pte > ARRAY_SIZE(stack)) { /* Too big for stack -- allocate temporary array instead */ - pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL); - if (!pages) + mem = kvmalloc_array(n_pte, sizeof(*mem), GFP_KERNEL); + if (!mem) return NULL; } - for_each_sgt_page(page, sgt_iter, sgt) - pages[i++] = page; - - /* Check that we have the expected number of pages */ - GEM_BUG_ON(i != n_pages); + area = alloc_vm_area(obj->base.size, mem); + if (!area) { + if (mem != stack) + kvfree(mem); + return NULL; + } switch (type) { default: @@ -288,12 +284,31 @@ static void *i915_gem_object_map(struct drm_i915_gem_object *obj, pgprot = pgprot_writecombine(PAGE_KERNEL_IO); break; } - addr = vmap(pages, n_pages, 0, pgprot); - if (pages != stack_pages) - kvfree(pages); + if (i915_gem_object_has_struct_page(obj)) { + struct sgt_iter iter; + struct page *page; + pte_t **ptes = mem; + + for_each_sgt_page(page, iter, sgt) + **ptes++ = mk_pte(page, pgprot); + } else { + resource_size_t iomap; + struct sgt_iter iter; + pte_t **ptes = mem; + dma_addr_t addr; + + iomap = obj->mm.region->iomap.base; + iomap -= obj->mm.region->region.start; + + for_each_sgt_daddr(addr, iter, sgt) + **ptes++ = iomap_pte(iomap, addr, pgprot); + } + + if (mem != stack) + kvfree(mem); - return addr; + return area->addr; } /* get, pin, and map the pages of the object into kernel space */ diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.c b/drivers/gpu/drm/i915/gem/i915_gem_region.c index d50adac12249..1515384d7e0e 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_region.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_region.c @@ -107,7 +107,10 @@ void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj, { INIT_LIST_HEAD(&obj->mm.blocks); obj->mm.region = intel_memory_region_get(mem); + obj->flags |= flags; + if (obj->base.size <= mem->min_page_size) + obj->flags |= I915_BO_ALLOC_CONTIGUOUS; mutex_lock(&mem->objects.lock); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c index 4d69c3fc3439..a2a980d9d241 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c @@ -594,6 +594,8 @@ static int init_shmem(struct intel_memory_region *mem) err); } + intel_memory_region_set_name(mem, "system"); + return 0; /* Don't error, we can simply fallback to the kernel mnt */ } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c index afb08a1704a2..451f3078d60d 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c @@ -645,6 +645,8 @@ i915_gem_object_create_stolen(struct drm_i915_private *i915, static int init_stolen(struct intel_memory_region *mem) { + intel_memory_region_set_name(mem, "stolen"); + /* * Initialise stolen early so that we may reserve preallocated * objects for the BIOS to KMS transition. diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.h b/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.h index 549c1394bcdc..b8cf31b7bf14 100644 --- a/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.h +++ b/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.h @@ -7,6 +7,12 @@ #ifndef __HUGE_GEM_OBJECT_H #define __HUGE_GEM_OBJECT_H +#include <linux/types.h> + +#include "gem/i915_gem_object_types.h" + +struct drm_i915_private; + struct drm_i915_gem_object * huge_gem_object(struct drm_i915_private *i915, phys_addr_t phys_size, diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c index 2479395c1873..9311250d7d6f 100644 --- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c +++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c @@ -1017,38 +1017,33 @@ __cpu_check_shmem(struct drm_i915_gem_object *obj, u32 dword, u32 val) return err; } -static int __cpu_check_lmem(struct drm_i915_gem_object *obj, u32 dword, u32 val) +static int __cpu_check_vmap(struct drm_i915_gem_object *obj, u32 dword, u32 val) { - unsigned long n; + unsigned long n = obj->base.size >> PAGE_SHIFT; + u32 *ptr; int err; - i915_gem_object_lock(obj); - err = i915_gem_object_set_to_wc_domain(obj, false); - i915_gem_object_unlock(obj); - if (err) - return err; - - err = i915_gem_object_pin_pages(obj); + err = i915_gem_object_wait(obj, 0, MAX_SCHEDULE_TIMEOUT); if (err) return err; - for (n = 0; n < obj->base.size >> PAGE_SHIFT; ++n) { - u32 __iomem *base; - u32 read_val; - - base = i915_gem_object_lmem_io_map_page_atomic(obj, n); + ptr = i915_gem_object_pin_map(obj, I915_MAP_WC); + if (IS_ERR(ptr)) + return PTR_ERR(ptr); - read_val = ioread32(base + dword); - io_mapping_unmap_atomic(base); - if (read_val != val) { - pr_err("n=%lu base[%u]=%u, val=%u\n", - n, dword, read_val, val); + ptr += dword; + while (n--) { + if (*ptr != val) { + pr_err("base[%u]=%08x, val=%08x\n", + dword, *ptr, val); err = -EINVAL; break; } + + ptr += PAGE_SIZE / sizeof(*ptr); } - i915_gem_object_unpin_pages(obj); + i915_gem_object_unpin_map(obj); return err; } @@ -1056,10 +1051,8 @@ static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val) { if (i915_gem_object_has_struct_page(obj)) return __cpu_check_shmem(obj, dword, val); - else if (i915_gem_object_is_lmem(obj)) - return __cpu_check_lmem(obj, dword, val); - - return -ENODEV; + else + return __cpu_check_vmap(obj, dword, val); } static int __igt_write_huge(struct intel_context *ce, @@ -1872,7 +1865,7 @@ int i915_gem_huge_page_mock_selftests(void) mkwrite_device_info(dev_priv)->ppgtt_type = INTEL_PPGTT_FULL; mkwrite_device_info(dev_priv)->ppgtt_size = 48; - ppgtt = i915_ppgtt_create(dev_priv); + ppgtt = i915_ppgtt_create(&dev_priv->gt); if (IS_ERR(ppgtt)) { err = PTR_ERR(ppgtt); goto out_unlock; diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c index 49edc51111d5..3f6079e1dfb6 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c @@ -325,7 +325,10 @@ static int igt_gem_coherency(void *arg) values = offsets + ncachelines; ctx.engine = random_engine(i915, &prng); - GEM_BUG_ON(!ctx.engine); + if (!ctx.engine) { + err = -ENODEV; + goto out_free; + } pr_info("%s: using %s\n", __func__, ctx.engine->name); intel_engine_pm_get(ctx.engine); @@ -354,7 +357,7 @@ static int igt_gem_coherency(void *arg) ctx.obj = i915_gem_object_create_internal(i915, PAGE_SIZE); if (IS_ERR(ctx.obj)) { err = PTR_ERR(ctx.obj); - goto free; + goto out_pm; } i915_random_reorder(offsets, ncachelines, &prng); @@ -405,14 +408,15 @@ static int igt_gem_coherency(void *arg) } } } -free: +out_pm: intel_engine_pm_put(ctx.engine); +out_free: kfree(offsets); return err; put_object: i915_gem_object_put(ctx.obj); - goto free; + goto out_pm; } int i915_gem_coherency_live_selftests(struct drm_i915_private *i915) diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c index cbf796da64e3..ef7c74cff28a 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c @@ -9,6 +9,7 @@ #include "gt/intel_engine_pm.h" #include "gt/intel_gt.h" #include "gt/intel_gt_pm.h" +#include "gem/i915_gem_region.h" #include "huge_gem_object.h" #include "i915_selftest.h" #include "selftests/i915_random.h" @@ -725,114 +726,359 @@ err_obj: goto out; } -#define expand32(x) (((x) << 0) | ((x) << 8) | ((x) << 16) | ((x) << 24)) -static int igt_mmap(void *arg, enum i915_mmap_type type) +static int gtt_set(struct drm_i915_gem_object *obj) { - struct drm_i915_private *i915 = arg; - struct drm_i915_gem_object *obj; - struct i915_mmap_offset *mmo; - struct vm_area_struct *area; - unsigned long addr; - void *vaddr; - int err = 0, i; + struct i915_vma *vma; + void __iomem *map; + int err = 0; - if (!i915_ggtt_has_aperture(&i915->ggtt)) - return 0; + vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE); + if (IS_ERR(vma)) + return PTR_ERR(vma); - obj = i915_gem_object_create_internal(i915, PAGE_SIZE); - if (IS_ERR(obj)) - return PTR_ERR(obj); + intel_gt_pm_get(vma->vm->gt); + map = i915_vma_pin_iomap(vma); + i915_vma_unpin(vma); + if (IS_ERR(map)) { + err = PTR_ERR(map); + goto out; + } + + memset_io(map, POISON_INUSE, obj->base.size); + i915_vma_unpin_iomap(vma); + +out: + intel_gt_pm_put(vma->vm->gt); + return err; +} + +static int gtt_check(struct drm_i915_gem_object *obj) +{ + struct i915_vma *vma; + void __iomem *map; + int err = 0; - vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB); - if (IS_ERR(vaddr)) { - err = PTR_ERR(vaddr); + vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE); + if (IS_ERR(vma)) + return PTR_ERR(vma); + + intel_gt_pm_get(vma->vm->gt); + map = i915_vma_pin_iomap(vma); + i915_vma_unpin(vma); + if (IS_ERR(map)) { + err = PTR_ERR(map); goto out; } - memset(vaddr, POISON_INUSE, PAGE_SIZE); + + if (memchr_inv((void __force *)map, POISON_FREE, obj->base.size)) { + pr_err("%s: Write via mmap did not land in backing store (GTT)\n", + obj->mm.region->name); + err = -EINVAL; + } + i915_vma_unpin_iomap(vma); + +out: + intel_gt_pm_put(vma->vm->gt); + return err; +} + +static int wc_set(struct drm_i915_gem_object *obj) +{ + void *vaddr; + + vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC); + if (IS_ERR(vaddr)) + return PTR_ERR(vaddr); + + memset(vaddr, POISON_INUSE, obj->base.size); i915_gem_object_flush_map(obj); i915_gem_object_unpin_map(obj); - mmo = mmap_offset_attach(obj, type, NULL); - if (IS_ERR(mmo)) { - err = PTR_ERR(mmo); - goto out; + return 0; +} + +static int wc_check(struct drm_i915_gem_object *obj) +{ + void *vaddr; + int err = 0; + + vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC); + if (IS_ERR(vaddr)) + return PTR_ERR(vaddr); + + if (memchr_inv(vaddr, POISON_FREE, obj->base.size)) { + pr_err("%s: Write via mmap did not land in backing store (WC)\n", + obj->mm.region->name); + err = -EINVAL; } + i915_gem_object_unpin_map(obj); + + return err; +} + +static bool can_mmap(struct drm_i915_gem_object *obj, enum i915_mmap_type type) +{ + if (type == I915_MMAP_TYPE_GTT && + !i915_ggtt_has_aperture(&to_i915(obj->base.dev)->ggtt)) + return false; + + if (type != I915_MMAP_TYPE_GTT && + !i915_gem_object_type_has(obj, + I915_GEM_OBJECT_HAS_STRUCT_PAGE | + I915_GEM_OBJECT_HAS_IOMEM)) + return false; + + return true; +} + +#define expand32(x) (((x) << 0) | ((x) << 8) | ((x) << 16) | ((x) << 24)) +static int __igt_mmap(struct drm_i915_private *i915, + struct drm_i915_gem_object *obj, + enum i915_mmap_type type) +{ + struct i915_mmap_offset *mmo; + struct vm_area_struct *area; + unsigned long addr; + int err, i; + + if (!can_mmap(obj, type)) + return 0; + + err = wc_set(obj); + if (err == -ENXIO) + err = gtt_set(obj); + if (err) + return err; + + mmo = mmap_offset_attach(obj, type, NULL); + if (IS_ERR(mmo)) + return PTR_ERR(mmo); addr = igt_mmap_node(i915, &mmo->vma_node, 0, PROT_WRITE, MAP_SHARED); - if (IS_ERR_VALUE(addr)) { - err = addr; - goto out; - } + if (IS_ERR_VALUE(addr)) + return addr; - pr_debug("igt_mmap() @ %lx\n", addr); + pr_debug("igt_mmap(%s, %d) @ %lx\n", obj->mm.region->name, type, addr); area = find_vma(current->mm, addr); if (!area) { - pr_err("Did not create a vm_area_struct for the mmap\n"); + pr_err("%s: Did not create a vm_area_struct for the mmap\n", + obj->mm.region->name); err = -EINVAL; goto out_unmap; } if (area->vm_private_data != mmo) { - pr_err("vm_area_struct did not point back to our mmap_offset object!\n"); + pr_err("%s: vm_area_struct did not point back to our mmap_offset object!\n", + obj->mm.region->name); err = -EINVAL; goto out_unmap; } - for (i = 0; i < PAGE_SIZE / sizeof(u32); i++) { + for (i = 0; i < obj->base.size / sizeof(u32); i++) { u32 __user *ux = u64_to_user_ptr((u64)(addr + i * sizeof(*ux))); u32 x; if (get_user(x, ux)) { - pr_err("Unable to read from mmap, offset:%zd\n", - i * sizeof(x)); + pr_err("%s: Unable to read from mmap, offset:%zd\n", + obj->mm.region->name, i * sizeof(x)); err = -EFAULT; - break; + goto out_unmap; } if (x != expand32(POISON_INUSE)) { - pr_err("Read incorrect value from mmap, offset:%zd, found:%x, expected:%x\n", + pr_err("%s: Read incorrect value from mmap, offset:%zd, found:%x, expected:%x\n", + obj->mm.region->name, i * sizeof(x), x, expand32(POISON_INUSE)); err = -EINVAL; - break; + goto out_unmap; } x = expand32(POISON_FREE); if (put_user(x, ux)) { - pr_err("Unable to write to mmap, offset:%zd\n", - i * sizeof(x)); + pr_err("%s: Unable to write to mmap, offset:%zd\n", + obj->mm.region->name, i * sizeof(x)); err = -EFAULT; - break; + goto out_unmap; } } + if (type == I915_MMAP_TYPE_GTT) + intel_gt_flush_ggtt_writes(&i915->gt); + + err = wc_check(obj); + if (err == -ENXIO) + err = gtt_check(obj); out_unmap: - vm_munmap(addr, PAGE_SIZE); + vm_munmap(addr, obj->base.size); + return err; +} - vaddr = i915_gem_object_pin_map(obj, I915_MAP_FORCE_WC); - if (IS_ERR(vaddr)) { - err = PTR_ERR(vaddr); - goto out; - } - if (err == 0 && memchr_inv(vaddr, POISON_FREE, PAGE_SIZE)) { - pr_err("Write via mmap did not land in backing store\n"); - err = -EINVAL; +static int igt_mmap(void *arg) +{ + struct drm_i915_private *i915 = arg; + struct intel_memory_region *mr; + enum intel_region_id id; + + for_each_memory_region(mr, i915, id) { + unsigned long sizes[] = { + PAGE_SIZE, + mr->min_page_size, + SZ_4M, + }; + int i; + + for (i = 0; i < ARRAY_SIZE(sizes); i++) { + struct drm_i915_gem_object *obj; + int err; + + obj = i915_gem_object_create_region(mr, sizes[i], 0); + if (obj == ERR_PTR(-ENODEV)) + continue; + + if (IS_ERR(obj)) + return PTR_ERR(obj); + + err = __igt_mmap(i915, obj, I915_MMAP_TYPE_GTT); + if (err == 0) + err = __igt_mmap(i915, obj, I915_MMAP_TYPE_WC); + + i915_gem_object_put(obj); + if (err) + return err; + } } - i915_gem_object_unpin_map(obj); -out: - i915_gem_object_put(obj); - return err; + return 0; } -static int igt_mmap_gtt(void *arg) +static int __igt_mmap_gpu(struct drm_i915_private *i915, + struct drm_i915_gem_object *obj, + enum i915_mmap_type type) { - return igt_mmap(arg, I915_MMAP_TYPE_GTT); + struct intel_engine_cs *engine; + struct i915_mmap_offset *mmo; + unsigned long addr; + u32 __user *ux; + u32 bbe; + int err; + + /* + * Verify that the mmap access into the backing store aligns with + * that of the GPU, i.e. that mmap is indeed writing into the same + * page as being read by the GPU. + */ + + if (!can_mmap(obj, type)) + return 0; + + err = wc_set(obj); + if (err == -ENXIO) + err = gtt_set(obj); + if (err) + return err; + + mmo = mmap_offset_attach(obj, type, NULL); + if (IS_ERR(mmo)) + return PTR_ERR(mmo); + + addr = igt_mmap_node(i915, &mmo->vma_node, 0, PROT_WRITE, MAP_SHARED); + if (IS_ERR_VALUE(addr)) + return addr; + + ux = u64_to_user_ptr((u64)addr); + bbe = MI_BATCH_BUFFER_END; + if (put_user(bbe, ux)) { + pr_err("%s: Unable to write to mmap\n", obj->mm.region->name); + err = -EFAULT; + goto out_unmap; + } + + if (type == I915_MMAP_TYPE_GTT) + intel_gt_flush_ggtt_writes(&i915->gt); + + for_each_uabi_engine(engine, i915) { + struct i915_request *rq; + struct i915_vma *vma; + + vma = i915_vma_instance(obj, engine->kernel_context->vm, NULL); + if (IS_ERR(vma)) { + err = PTR_ERR(vma); + goto out_unmap; + } + + err = i915_vma_pin(vma, 0, 0, PIN_USER); + if (err) + goto out_unmap; + + rq = i915_request_create(engine->kernel_context); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto out_unpin; + } + + i915_vma_lock(vma); + err = i915_request_await_object(rq, vma->obj, false); + if (err == 0) + err = i915_vma_move_to_active(vma, rq, 0); + i915_vma_unlock(vma); + + err = engine->emit_bb_start(rq, vma->node.start, 0, 0); + i915_request_get(rq); + i915_request_add(rq); + + if (i915_request_wait(rq, 0, HZ / 5) < 0) { + struct drm_printer p = + drm_info_printer(engine->i915->drm.dev); + + pr_err("%s(%s, %s): Failed to execute batch\n", + __func__, engine->name, obj->mm.region->name); + intel_engine_dump(engine, &p, + "%s\n", engine->name); + + intel_gt_set_wedged(engine->gt); + err = -EIO; + } + i915_request_put(rq); + +out_unpin: + i915_vma_unpin(vma); + if (err) + goto out_unmap; + } + +out_unmap: + vm_munmap(addr, obj->base.size); + return err; } -static int igt_mmap_cpu(void *arg) +static int igt_mmap_gpu(void *arg) { - return igt_mmap(arg, I915_MMAP_TYPE_WC); + struct drm_i915_private *i915 = arg; + struct intel_memory_region *mr; + enum intel_region_id id; + + for_each_memory_region(mr, i915, id) { + struct drm_i915_gem_object *obj; + int err; + + obj = i915_gem_object_create_region(mr, PAGE_SIZE, 0); + if (obj == ERR_PTR(-ENODEV)) + continue; + + if (IS_ERR(obj)) + return PTR_ERR(obj); + + err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_GTT); + if (err == 0) + err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_WC); + + i915_gem_object_put(obj); + if (err) + return err; + } + + return 0; } static int check_present_pte(pte_t *pte, unsigned long addr, void *data) @@ -887,32 +1133,24 @@ static int prefault_range(u64 start, u64 len) return __get_user(c, end - 1); } -static int igt_mmap_revoke(void *arg, enum i915_mmap_type type) +static int __igt_mmap_revoke(struct drm_i915_private *i915, + struct drm_i915_gem_object *obj, + enum i915_mmap_type type) { - struct drm_i915_private *i915 = arg; - struct drm_i915_gem_object *obj; struct i915_mmap_offset *mmo; unsigned long addr; int err; - if (!i915_ggtt_has_aperture(&i915->ggtt)) + if (!can_mmap(obj, type)) return 0; - obj = i915_gem_object_create_internal(i915, SZ_4M); - if (IS_ERR(obj)) - return PTR_ERR(obj); - mmo = mmap_offset_attach(obj, type, NULL); - if (IS_ERR(mmo)) { - err = PTR_ERR(mmo); - goto out; - } + if (IS_ERR(mmo)) + return PTR_ERR(mmo); addr = igt_mmap_node(i915, &mmo->vma_node, 0, PROT_WRITE, MAP_SHARED); - if (IS_ERR_VALUE(addr)) { - err = addr; - goto out; - } + if (IS_ERR_VALUE(addr)) + return addr; err = prefault_range(addr, obj->base.size); if (err) @@ -922,8 +1160,10 @@ static int igt_mmap_revoke(void *arg, enum i915_mmap_type type) !atomic_read(&obj->bind_count)); err = check_present(addr, obj->base.size); - if (err) + if (err) { + pr_err("%s: was not present\n", obj->mm.region->name); goto out_unmap; + } /* * After unbinding the object from the GGTT, its address may be reused @@ -947,24 +1187,43 @@ static int igt_mmap_revoke(void *arg, enum i915_mmap_type type) } err = check_absent(addr, obj->base.size); - if (err) + if (err) { + pr_err("%s: was not absent\n", obj->mm.region->name); goto out_unmap; + } out_unmap: vm_munmap(addr, obj->base.size); -out: - i915_gem_object_put(obj); return err; } -static int igt_mmap_gtt_revoke(void *arg) +static int igt_mmap_revoke(void *arg) { - return igt_mmap_revoke(arg, I915_MMAP_TYPE_GTT); -} + struct drm_i915_private *i915 = arg; + struct intel_memory_region *mr; + enum intel_region_id id; -static int igt_mmap_cpu_revoke(void *arg) -{ - return igt_mmap_revoke(arg, I915_MMAP_TYPE_WC); + for_each_memory_region(mr, i915, id) { + struct drm_i915_gem_object *obj; + int err; + + obj = i915_gem_object_create_region(mr, PAGE_SIZE, 0); + if (obj == ERR_PTR(-ENODEV)) + continue; + + if (IS_ERR(obj)) + return PTR_ERR(obj); + + err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_GTT); + if (err == 0) + err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_WC); + + i915_gem_object_put(obj); + if (err) + return err; + } + + return 0; } int i915_gem_mman_live_selftests(struct drm_i915_private *i915) @@ -973,10 +1232,9 @@ int i915_gem_mman_live_selftests(struct drm_i915_private *i915) SUBTEST(igt_partial_tiling), SUBTEST(igt_smoke_tiling), SUBTEST(igt_mmap_offset_exhaustion), - SUBTEST(igt_mmap_gtt), - SUBTEST(igt_mmap_cpu), - SUBTEST(igt_mmap_gtt_revoke), - SUBTEST(igt_mmap_cpu_revoke), + SUBTEST(igt_mmap), + SUBTEST(igt_mmap_revoke), + SUBTEST(igt_mmap_gpu), }; return i915_subtests(tests, i915); diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_context.c b/drivers/gpu/drm/i915/gem/selftests/mock_context.c index 7d7e13dc2fdf..384143aa7776 100644 --- a/drivers/gpu/drm/i915/gem/selftests/mock_context.c +++ b/drivers/gpu/drm/i915/gem/selftests/mock_context.c @@ -77,12 +77,13 @@ live_context(struct drm_i915_private *i915, struct file *file) { struct i915_gem_context *ctx; int err; + u32 id; ctx = i915_gem_create_context(i915, 0); if (IS_ERR(ctx)) return ctx; - err = gem_context_register(ctx, to_drm_file(file)->driver_priv); + err = gem_context_register(ctx, to_drm_file(file)->driver_priv, &id); if (err < 0) goto err_ctx; diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_gem_object.h b/drivers/gpu/drm/i915/gem/selftests/mock_gem_object.h index 370360b4a148..688511afa883 100644 --- a/drivers/gpu/drm/i915/gem/selftests/mock_gem_object.h +++ b/drivers/gpu/drm/i915/gem/selftests/mock_gem_object.h @@ -7,6 +7,8 @@ #ifndef __MOCK_GEM_OBJECT_H__ #define __MOCK_GEM_OBJECT_H__ +#include "gem/i915_gem_object_types.h" + struct mock_object { struct drm_i915_gem_object base; }; diff --git a/drivers/gpu/drm/i915/gt/Makefile b/drivers/gpu/drm/i915/gt/Makefile deleted file mode 100644 index 7e73aa587967..000000000000 --- a/drivers/gpu/drm/i915/gt/Makefile +++ /dev/null @@ -1,5 +0,0 @@ -# For building individual subdir files on the command line -subdir-ccflags-y += -I$(srctree)/$(src)/.. - -# Extra header tests -header-test-pattern-$(CONFIG_DRM_I915_WERROR) := *.h diff --git a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c new file mode 100644 index 000000000000..f10b2c41571c --- /dev/null +++ b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c @@ -0,0 +1,482 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2020 Intel Corporation + */ + +#include <linux/log2.h> + +#include "gen6_ppgtt.h" +#include "i915_scatterlist.h" +#include "i915_trace.h" +#include "i915_vgpu.h" +#include "intel_gt.h" + +/* Write pde (index) from the page directory @pd to the page table @pt */ +static inline void gen6_write_pde(const struct gen6_ppgtt *ppgtt, + const unsigned int pde, + const struct i915_page_table *pt) +{ + /* Caller needs to make sure the write completes if necessary */ + iowrite32(GEN6_PDE_ADDR_ENCODE(px_dma(pt)) | GEN6_PDE_VALID, + ppgtt->pd_addr + pde); +} + +void gen7_ppgtt_enable(struct intel_gt *gt) +{ + struct drm_i915_private *i915 = gt->i915; + struct intel_uncore *uncore = gt->uncore; + struct intel_engine_cs *engine; + enum intel_engine_id id; + u32 ecochk; + + intel_uncore_rmw(uncore, GAC_ECO_BITS, 0, ECOBITS_PPGTT_CACHE64B); + + ecochk = intel_uncore_read(uncore, GAM_ECOCHK); + if (IS_HASWELL(i915)) { + ecochk |= ECOCHK_PPGTT_WB_HSW; + } else { + ecochk |= ECOCHK_PPGTT_LLC_IVB; + ecochk &= ~ECOCHK_PPGTT_GFDT_IVB; + } + intel_uncore_write(uncore, GAM_ECOCHK, ecochk); + + for_each_engine(engine, gt, id) { + /* GFX_MODE is per-ring on gen7+ */ + ENGINE_WRITE(engine, + RING_MODE_GEN7, + _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); + } +} + +void gen6_ppgtt_enable(struct intel_gt *gt) +{ + struct intel_uncore *uncore = gt->uncore; + + intel_uncore_rmw(uncore, + GAC_ECO_BITS, + 0, + ECOBITS_SNB_BIT | ECOBITS_PPGTT_CACHE64B); + + intel_uncore_rmw(uncore, + GAB_CTL, + 0, + GAB_CTL_CONT_AFTER_PAGEFAULT); + + intel_uncore_rmw(uncore, + GAM_ECOCHK, + 0, + ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B); + + if (HAS_PPGTT(uncore->i915)) /* may be disabled for VT-d */ + intel_uncore_write(uncore, + GFX_MODE, + _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); +} + +/* PPGTT support for Sandybdrige/Gen6 and later */ +static void gen6_ppgtt_clear_range(struct i915_address_space *vm, + u64 start, u64 length) +{ + struct gen6_ppgtt * const ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm)); + const unsigned int first_entry = start / I915_GTT_PAGE_SIZE; + const gen6_pte_t scratch_pte = vm->scratch[0].encode; + unsigned int pde = first_entry / GEN6_PTES; + unsigned int pte = first_entry % GEN6_PTES; + unsigned int num_entries = length / I915_GTT_PAGE_SIZE; + + while (num_entries) { + struct i915_page_table * const pt = + i915_pt_entry(ppgtt->base.pd, pde++); + const unsigned int count = min(num_entries, GEN6_PTES - pte); + gen6_pte_t *vaddr; + + GEM_BUG_ON(px_base(pt) == px_base(&vm->scratch[1])); + + num_entries -= count; + + GEM_BUG_ON(count > atomic_read(&pt->used)); + if (!atomic_sub_return(count, &pt->used)) + ppgtt->scan_for_unused_pt = true; + + /* + * Note that the hw doesn't support removing PDE on the fly + * (they are cached inside the context with no means to + * invalidate the cache), so we can only reset the PTE + * entries back to scratch. + */ + + vaddr = kmap_atomic_px(pt); + memset32(vaddr + pte, scratch_pte, count); + kunmap_atomic(vaddr); + + pte = 0; + } +} + +static void gen6_ppgtt_insert_entries(struct i915_address_space *vm, + struct i915_vma *vma, + enum i915_cache_level cache_level, + u32 flags) +{ + struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); + struct i915_page_directory * const pd = ppgtt->pd; + unsigned int first_entry = vma->node.start / I915_GTT_PAGE_SIZE; + unsigned int act_pt = first_entry / GEN6_PTES; + unsigned int act_pte = first_entry % GEN6_PTES; + const u32 pte_encode = vm->pte_encode(0, cache_level, flags); + struct sgt_dma iter = sgt_dma(vma); + gen6_pte_t *vaddr; + + GEM_BUG_ON(pd->entry[act_pt] == &vm->scratch[1]); + + vaddr = kmap_atomic_px(i915_pt_entry(pd, act_pt)); + do { + vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma); + + iter.dma += I915_GTT_PAGE_SIZE; + if (iter.dma == iter.max) { + iter.sg = __sg_next(iter.sg); + if (!iter.sg) + break; + + iter.dma = sg_dma_address(iter.sg); + iter.max = iter.dma + iter.sg->length; + } + + if (++act_pte == GEN6_PTES) { + kunmap_atomic(vaddr); + vaddr = kmap_atomic_px(i915_pt_entry(pd, ++act_pt)); + act_pte = 0; + } + } while (1); + kunmap_atomic(vaddr); + + vma->page_sizes.gtt = I915_GTT_PAGE_SIZE; +} + +static void gen6_flush_pd(struct gen6_ppgtt *ppgtt, u64 start, u64 end) +{ + struct i915_page_directory * const pd = ppgtt->base.pd; + struct i915_page_table *pt; + unsigned int pde; + + start = round_down(start, SZ_64K); + end = round_up(end, SZ_64K) - start; + + mutex_lock(&ppgtt->flush); + + gen6_for_each_pde(pt, pd, start, end, pde) + gen6_write_pde(ppgtt, pde, pt); + + mb(); + ioread32(ppgtt->pd_addr + pde - 1); + gen6_ggtt_invalidate(ppgtt->base.vm.gt->ggtt); + mb(); + + mutex_unlock(&ppgtt->flush); +} + +static int gen6_alloc_va_range(struct i915_address_space *vm, + u64 start, u64 length) +{ + struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm)); + struct i915_page_directory * const pd = ppgtt->base.pd; + struct i915_page_table *pt, *alloc = NULL; + intel_wakeref_t wakeref; + u64 from = start; + unsigned int pde; + int ret = 0; + + wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm); + + spin_lock(&pd->lock); + gen6_for_each_pde(pt, pd, start, length, pde) { + const unsigned int count = gen6_pte_count(start, length); + + if (px_base(pt) == px_base(&vm->scratch[1])) { + spin_unlock(&pd->lock); + + pt = fetch_and_zero(&alloc); + if (!pt) + pt = alloc_pt(vm); + if (IS_ERR(pt)) { + ret = PTR_ERR(pt); + goto unwind_out; + } + + fill32_px(pt, vm->scratch[0].encode); + + spin_lock(&pd->lock); + if (pd->entry[pde] == &vm->scratch[1]) { + pd->entry[pde] = pt; + } else { + alloc = pt; + pt = pd->entry[pde]; + } + } + + atomic_add(count, &pt->used); + } + spin_unlock(&pd->lock); + + if (i915_vma_is_bound(ppgtt->vma, I915_VMA_GLOBAL_BIND)) + gen6_flush_pd(ppgtt, from, start); + + goto out; + +unwind_out: + gen6_ppgtt_clear_range(vm, from, start - from); +out: + if (alloc) + free_px(vm, alloc); + intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref); + return ret; +} + +static int gen6_ppgtt_init_scratch(struct gen6_ppgtt *ppgtt) +{ + struct i915_address_space * const vm = &ppgtt->base.vm; + struct i915_page_directory * const pd = ppgtt->base.pd; + int ret; + + ret = setup_scratch_page(vm, __GFP_HIGHMEM); + if (ret) + return ret; + + vm->scratch[0].encode = + vm->pte_encode(px_dma(&vm->scratch[0]), + I915_CACHE_NONE, PTE_READ_ONLY); + + if (unlikely(setup_page_dma(vm, px_base(&vm->scratch[1])))) { + cleanup_scratch_page(vm); + return -ENOMEM; + } + + fill32_px(&vm->scratch[1], vm->scratch[0].encode); + memset_p(pd->entry, &vm->scratch[1], I915_PDES); + + return 0; +} + +static void gen6_ppgtt_free_pd(struct gen6_ppgtt *ppgtt) +{ + struct i915_page_directory * const pd = ppgtt->base.pd; + struct i915_page_dma * const scratch = + px_base(&ppgtt->base.vm.scratch[1]); + struct i915_page_table *pt; + u32 pde; + + gen6_for_all_pdes(pt, pd, pde) + if (px_base(pt) != scratch) + free_px(&ppgtt->base.vm, pt); +} + +static void gen6_ppgtt_cleanup(struct i915_address_space *vm) +{ + struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm)); + + __i915_vma_put(ppgtt->vma); + + gen6_ppgtt_free_pd(ppgtt); + free_scratch(vm); + + mutex_destroy(&ppgtt->flush); + mutex_destroy(&ppgtt->pin_mutex); + kfree(ppgtt->base.pd); +} + +static int pd_vma_set_pages(struct i915_vma *vma) +{ + vma->pages = ERR_PTR(-ENODEV); + return 0; +} + +static void pd_vma_clear_pages(struct i915_vma *vma) +{ + GEM_BUG_ON(!vma->pages); + + vma->pages = NULL; +} + +static int pd_vma_bind(struct i915_vma *vma, + enum i915_cache_level cache_level, + u32 unused) +{ + struct i915_ggtt *ggtt = i915_vm_to_ggtt(vma->vm); + struct gen6_ppgtt *ppgtt = vma->private; + u32 ggtt_offset = i915_ggtt_offset(vma) / I915_GTT_PAGE_SIZE; + + px_base(ppgtt->base.pd)->ggtt_offset = ggtt_offset * sizeof(gen6_pte_t); + ppgtt->pd_addr = (gen6_pte_t __iomem *)ggtt->gsm + ggtt_offset; + + gen6_flush_pd(ppgtt, 0, ppgtt->base.vm.total); + return 0; +} + +static void pd_vma_unbind(struct i915_vma *vma) +{ + struct gen6_ppgtt *ppgtt = vma->private; + struct i915_page_directory * const pd = ppgtt->base.pd; + struct i915_page_dma * const scratch = + px_base(&ppgtt->base.vm.scratch[1]); + struct i915_page_table *pt; + unsigned int pde; + + if (!ppgtt->scan_for_unused_pt) + return; + + /* Free all no longer used page tables */ + gen6_for_all_pdes(pt, ppgtt->base.pd, pde) { + if (px_base(pt) == scratch || atomic_read(&pt->used)) + continue; + + free_px(&ppgtt->base.vm, pt); + pd->entry[pde] = scratch; + } + + ppgtt->scan_for_unused_pt = false; +} + +static const struct i915_vma_ops pd_vma_ops = { + .set_pages = pd_vma_set_pages, + .clear_pages = pd_vma_clear_pages, + .bind_vma = pd_vma_bind, + .unbind_vma = pd_vma_unbind, +}; + +static struct i915_vma *pd_vma_create(struct gen6_ppgtt *ppgtt, int size) +{ + struct i915_ggtt *ggtt = ppgtt->base.vm.gt->ggtt; + struct i915_vma *vma; + + GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)); + GEM_BUG_ON(size > ggtt->vm.total); + + vma = i915_vma_alloc(); + if (!vma) + return ERR_PTR(-ENOMEM); + + i915_active_init(&vma->active, NULL, NULL); + + kref_init(&vma->ref); + mutex_init(&vma->pages_mutex); + vma->vm = i915_vm_get(&ggtt->vm); + vma->ops = &pd_vma_ops; + vma->private = ppgtt; + + vma->size = size; + vma->fence_size = size; + atomic_set(&vma->flags, I915_VMA_GGTT); + vma->ggtt_view.type = I915_GGTT_VIEW_ROTATED; /* prevent fencing */ + + INIT_LIST_HEAD(&vma->obj_link); + INIT_LIST_HEAD(&vma->closed_link); + + return vma; +} + +int gen6_ppgtt_pin(struct i915_ppgtt *base) +{ + struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base); + int err; + + GEM_BUG_ON(!atomic_read(&ppgtt->base.vm.open)); + + /* + * Workaround the limited maximum vma->pin_count and the aliasing_ppgtt + * which will be pinned into every active context. + * (When vma->pin_count becomes atomic, I expect we will naturally + * need a larger, unpacked, type and kill this redundancy.) + */ + if (atomic_add_unless(&ppgtt->pin_count, 1, 0)) + return 0; + + if (mutex_lock_interruptible(&ppgtt->pin_mutex)) + return -EINTR; + + /* + * PPGTT PDEs reside in the GGTT and consists of 512 entries. The + * allocator works in address space sizes, so it's multiplied by page + * size. We allocate at the top of the GTT to avoid fragmentation. + */ + err = 0; + if (!atomic_read(&ppgtt->pin_count)) + err = i915_ggtt_pin(ppgtt->vma, GEN6_PD_ALIGN, PIN_HIGH); + if (!err) + atomic_inc(&ppgtt->pin_count); + mutex_unlock(&ppgtt->pin_mutex); + + return err; +} + +void gen6_ppgtt_unpin(struct i915_ppgtt *base) +{ + struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base); + + GEM_BUG_ON(!atomic_read(&ppgtt->pin_count)); + if (atomic_dec_and_test(&ppgtt->pin_count)) + i915_vma_unpin(ppgtt->vma); +} + +void gen6_ppgtt_unpin_all(struct i915_ppgtt *base) +{ + struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base); + + if (!atomic_read(&ppgtt->pin_count)) + return; + + i915_vma_unpin(ppgtt->vma); + atomic_set(&ppgtt->pin_count, 0); +} + +struct i915_ppgtt *gen6_ppgtt_create(struct intel_gt *gt) +{ + struct i915_ggtt * const ggtt = gt->ggtt; + struct gen6_ppgtt *ppgtt; + int err; + + ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL); + if (!ppgtt) + return ERR_PTR(-ENOMEM); + + mutex_init(&ppgtt->flush); + mutex_init(&ppgtt->pin_mutex); + + ppgtt_init(&ppgtt->base, gt); + ppgtt->base.vm.top = 1; + + ppgtt->base.vm.bind_async_flags = I915_VMA_LOCAL_BIND; + ppgtt->base.vm.allocate_va_range = gen6_alloc_va_range; + ppgtt->base.vm.clear_range = gen6_ppgtt_clear_range; + ppgtt->base.vm.insert_entries = gen6_ppgtt_insert_entries; + ppgtt->base.vm.cleanup = gen6_ppgtt_cleanup; + + ppgtt->base.vm.pte_encode = ggtt->vm.pte_encode; + + ppgtt->base.pd = __alloc_pd(sizeof(*ppgtt->base.pd)); + if (!ppgtt->base.pd) { + err = -ENOMEM; + goto err_free; + } + + err = gen6_ppgtt_init_scratch(ppgtt); + if (err) + goto err_pd; + + ppgtt->vma = pd_vma_create(ppgtt, GEN6_PD_SIZE); + if (IS_ERR(ppgtt->vma)) { + err = PTR_ERR(ppgtt->vma); + goto err_scratch; + } + + return &ppgtt->base; + +err_scratch: + free_scratch(&ppgtt->base.vm); +err_pd: + kfree(ppgtt->base.pd); +err_free: + mutex_destroy(&ppgtt->pin_mutex); + kfree(ppgtt); + return ERR_PTR(err); +} diff --git a/drivers/gpu/drm/i915/gt/gen6_ppgtt.h b/drivers/gpu/drm/i915/gt/gen6_ppgtt.h new file mode 100644 index 000000000000..72e481806c96 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/gen6_ppgtt.h @@ -0,0 +1,76 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2020 Intel Corporation + */ + +#ifndef __GEN6_PPGTT_H__ +#define __GEN6_PPGTT_H__ + +#include "intel_gtt.h" + +struct gen6_ppgtt { + struct i915_ppgtt base; + + struct mutex flush; + struct i915_vma *vma; + gen6_pte_t __iomem *pd_addr; + + atomic_t pin_count; + struct mutex pin_mutex; + + bool scan_for_unused_pt; +}; + +static inline u32 gen6_pte_index(u32 addr) +{ + return i915_pte_index(addr, GEN6_PDE_SHIFT); +} + +static inline u32 gen6_pte_count(u32 addr, u32 length) +{ + return i915_pte_count(addr, length, GEN6_PDE_SHIFT); +} + +static inline u32 gen6_pde_index(u32 addr) +{ + return i915_pde_index(addr, GEN6_PDE_SHIFT); +} + +#define __to_gen6_ppgtt(base) container_of(base, struct gen6_ppgtt, base) + +static inline struct gen6_ppgtt *to_gen6_ppgtt(struct i915_ppgtt *base) +{ + BUILD_BUG_ON(offsetof(struct gen6_ppgtt, base)); + return __to_gen6_ppgtt(base); +} + +/* + * gen6_for_each_pde() iterates over every pde from start until start+length. + * If start and start+length are not perfectly divisible, the macro will round + * down and up as needed. Start=0 and length=2G effectively iterates over + * every PDE in the system. The macro modifies ALL its parameters except 'pd', + * so each of the other parameters should preferably be a simple variable, or + * at most an lvalue with no side-effects! + */ +#define gen6_for_each_pde(pt, pd, start, length, iter) \ + for (iter = gen6_pde_index(start); \ + length > 0 && iter < I915_PDES && \ + (pt = i915_pt_entry(pd, iter), true); \ + ({ u32 temp = ALIGN(start+1, 1 << GEN6_PDE_SHIFT); \ + temp = min(temp - start, length); \ + start += temp, length -= temp; }), ++iter) + +#define gen6_for_all_pdes(pt, pd, iter) \ + for (iter = 0; \ + iter < I915_PDES && \ + (pt = i915_pt_entry(pd, iter), true); \ + ++iter) + +int gen6_ppgtt_pin(struct i915_ppgtt *base); +void gen6_ppgtt_unpin(struct i915_ppgtt *base); +void gen6_ppgtt_unpin_all(struct i915_ppgtt *base); +void gen6_ppgtt_enable(struct intel_gt *gt); +void gen7_ppgtt_enable(struct intel_gt *gt); +struct i915_ppgtt *gen6_ppgtt_create(struct intel_gt *gt); + +#endif diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c new file mode 100644 index 000000000000..077b8f7cf6cb --- /dev/null +++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c @@ -0,0 +1,723 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2020 Intel Corporation + */ + +#include <linux/log2.h> + +#include "gen8_ppgtt.h" +#include "i915_scatterlist.h" +#include "i915_trace.h" +#include "i915_vgpu.h" +#include "intel_gt.h" +#include "intel_gtt.h" + +static u64 gen8_pde_encode(const dma_addr_t addr, + const enum i915_cache_level level) +{ + u64 pde = addr | _PAGE_PRESENT | _PAGE_RW; + + if (level != I915_CACHE_NONE) + pde |= PPAT_CACHED_PDE; + else + pde |= PPAT_UNCACHED; + + return pde; +} + +static void gen8_ppgtt_notify_vgt(struct i915_ppgtt *ppgtt, bool create) +{ + struct drm_i915_private *i915 = ppgtt->vm.i915; + struct intel_uncore *uncore = ppgtt->vm.gt->uncore; + enum vgt_g2v_type msg; + int i; + + if (create) + atomic_inc(px_used(ppgtt->pd)); /* never remove */ + else + atomic_dec(px_used(ppgtt->pd)); + + mutex_lock(&i915->vgpu.lock); + + if (i915_vm_is_4lvl(&ppgtt->vm)) { + const u64 daddr = px_dma(ppgtt->pd); + + intel_uncore_write(uncore, + vgtif_reg(pdp[0].lo), lower_32_bits(daddr)); + intel_uncore_write(uncore, + vgtif_reg(pdp[0].hi), upper_32_bits(daddr)); + + msg = create ? + VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE : + VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY; + } else { + for (i = 0; i < GEN8_3LVL_PDPES; i++) { + const u64 daddr = i915_page_dir_dma_addr(ppgtt, i); + + intel_uncore_write(uncore, + vgtif_reg(pdp[i].lo), + lower_32_bits(daddr)); + intel_uncore_write(uncore, + vgtif_reg(pdp[i].hi), + upper_32_bits(daddr)); + } + + msg = create ? + VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE : + VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY; + } + + /* g2v_notify atomically (via hv trap) consumes the message packet. */ + intel_uncore_write(uncore, vgtif_reg(g2v_notify), msg); + + mutex_unlock(&i915->vgpu.lock); +} + +/* Index shifts into the pagetable are offset by GEN8_PTE_SHIFT [12] */ +#define GEN8_PAGE_SIZE (SZ_4K) /* page and page-directory sizes are the same */ +#define GEN8_PTE_SHIFT (ilog2(GEN8_PAGE_SIZE)) +#define GEN8_PDES (GEN8_PAGE_SIZE / sizeof(u64)) +#define gen8_pd_shift(lvl) ((lvl) * ilog2(GEN8_PDES)) +#define gen8_pd_index(i, lvl) i915_pde_index((i), gen8_pd_shift(lvl)) +#define __gen8_pte_shift(lvl) (GEN8_PTE_SHIFT + gen8_pd_shift(lvl)) +#define __gen8_pte_index(a, lvl) i915_pde_index((a), __gen8_pte_shift(lvl)) + +#define as_pd(x) container_of((x), typeof(struct i915_page_directory), pt) + +static inline unsigned int +gen8_pd_range(u64 start, u64 end, int lvl, unsigned int *idx) +{ + const int shift = gen8_pd_shift(lvl); + const u64 mask = ~0ull << gen8_pd_shift(lvl + 1); + + GEM_BUG_ON(start >= end); + end += ~mask >> gen8_pd_shift(1); + + *idx = i915_pde_index(start, shift); + if ((start ^ end) & mask) + return GEN8_PDES - *idx; + else + return i915_pde_index(end, shift) - *idx; +} + +static inline bool gen8_pd_contains(u64 start, u64 end, int lvl) +{ + const u64 mask = ~0ull << gen8_pd_shift(lvl + 1); + + GEM_BUG_ON(start >= end); + return (start ^ end) & mask && (start & ~mask) == 0; +} + +static inline unsigned int gen8_pt_count(u64 start, u64 end) +{ + GEM_BUG_ON(start >= end); + if ((start ^ end) >> gen8_pd_shift(1)) + return GEN8_PDES - (start & (GEN8_PDES - 1)); + else + return end - start; +} + +static inline unsigned int +gen8_pd_top_count(const struct i915_address_space *vm) +{ + unsigned int shift = __gen8_pte_shift(vm->top); + return (vm->total + (1ull << shift) - 1) >> shift; +} + +static inline struct i915_page_directory * +gen8_pdp_for_page_index(struct i915_address_space * const vm, const u64 idx) +{ + struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm); + + if (vm->top == 2) + return ppgtt->pd; + else + return i915_pd_entry(ppgtt->pd, gen8_pd_index(idx, vm->top)); +} + +static inline struct i915_page_directory * +gen8_pdp_for_page_address(struct i915_address_space * const vm, const u64 addr) +{ + return gen8_pdp_for_page_index(vm, addr >> GEN8_PTE_SHIFT); +} + +static void __gen8_ppgtt_cleanup(struct i915_address_space *vm, + struct i915_page_directory *pd, + int count, int lvl) +{ + if (lvl) { + void **pde = pd->entry; + + do { + if (!*pde) + continue; + + __gen8_ppgtt_cleanup(vm, *pde, GEN8_PDES, lvl - 1); + } while (pde++, --count); + } + + free_px(vm, pd); +} + +static void gen8_ppgtt_cleanup(struct i915_address_space *vm) +{ + struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); + + if (intel_vgpu_active(vm->i915)) + gen8_ppgtt_notify_vgt(ppgtt, false); + + __gen8_ppgtt_cleanup(vm, ppgtt->pd, gen8_pd_top_count(vm), vm->top); + free_scratch(vm); +} + +static u64 __gen8_ppgtt_clear(struct i915_address_space * const vm, + struct i915_page_directory * const pd, + u64 start, const u64 end, int lvl) +{ + const struct i915_page_scratch * const scratch = &vm->scratch[lvl]; + unsigned int idx, len; + + GEM_BUG_ON(end > vm->total >> GEN8_PTE_SHIFT); + + len = gen8_pd_range(start, end, lvl--, &idx); + DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d }\n", + __func__, vm, lvl + 1, start, end, + idx, len, atomic_read(px_used(pd))); + GEM_BUG_ON(!len || len >= atomic_read(px_used(pd))); + + do { + struct i915_page_table *pt = pd->entry[idx]; + + if (atomic_fetch_inc(&pt->used) >> gen8_pd_shift(1) && + gen8_pd_contains(start, end, lvl)) { + DBG("%s(%p):{ lvl:%d, idx:%d, start:%llx, end:%llx } removing pd\n", + __func__, vm, lvl + 1, idx, start, end); + clear_pd_entry(pd, idx, scratch); + __gen8_ppgtt_cleanup(vm, as_pd(pt), I915_PDES, lvl); + start += (u64)I915_PDES << gen8_pd_shift(lvl); + continue; + } + + if (lvl) { + start = __gen8_ppgtt_clear(vm, as_pd(pt), + start, end, lvl); + } else { + unsigned int count; + u64 *vaddr; + + count = gen8_pt_count(start, end); + DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d } removing pte\n", + __func__, vm, lvl, start, end, + gen8_pd_index(start, 0), count, + atomic_read(&pt->used)); + GEM_BUG_ON(!count || count >= atomic_read(&pt->used)); + + vaddr = kmap_atomic_px(pt); + memset64(vaddr + gen8_pd_index(start, 0), + vm->scratch[0].encode, + count); + kunmap_atomic(vaddr); + + atomic_sub(count, &pt->used); + start += count; + } + + if (release_pd_entry(pd, idx, pt, scratch)) + free_px(vm, pt); + } while (idx++, --len); + + return start; +} + +static void gen8_ppgtt_clear(struct i915_address_space *vm, + u64 start, u64 length) +{ + GEM_BUG_ON(!IS_ALIGNED(start, BIT_ULL(GEN8_PTE_SHIFT))); + GEM_BUG_ON(!IS_ALIGNED(length, BIT_ULL(GEN8_PTE_SHIFT))); + GEM_BUG_ON(range_overflows(start, length, vm->total)); + + start >>= GEN8_PTE_SHIFT; + length >>= GEN8_PTE_SHIFT; + GEM_BUG_ON(length == 0); + + __gen8_ppgtt_clear(vm, i915_vm_to_ppgtt(vm)->pd, + start, start + length, vm->top); +} + +static int __gen8_ppgtt_alloc(struct i915_address_space * const vm, + struct i915_page_directory * const pd, + u64 * const start, const u64 end, int lvl) +{ + const struct i915_page_scratch * const scratch = &vm->scratch[lvl]; + struct i915_page_table *alloc = NULL; + unsigned int idx, len; + int ret = 0; + + GEM_BUG_ON(end > vm->total >> GEN8_PTE_SHIFT); + + len = gen8_pd_range(*start, end, lvl--, &idx); + DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d }\n", + __func__, vm, lvl + 1, *start, end, + idx, len, atomic_read(px_used(pd))); + GEM_BUG_ON(!len || (idx + len - 1) >> gen8_pd_shift(1)); + + spin_lock(&pd->lock); + GEM_BUG_ON(!atomic_read(px_used(pd))); /* Must be pinned! */ + do { + struct i915_page_table *pt = pd->entry[idx]; + + if (!pt) { + spin_unlock(&pd->lock); + + DBG("%s(%p):{ lvl:%d, idx:%d } allocating new tree\n", + __func__, vm, lvl + 1, idx); + + pt = fetch_and_zero(&alloc); + if (lvl) { + if (!pt) { + pt = &alloc_pd(vm)->pt; + if (IS_ERR(pt)) { + ret = PTR_ERR(pt); + goto out; + } + } + + fill_px(pt, vm->scratch[lvl].encode); + } else { + if (!pt) { + pt = alloc_pt(vm); + if (IS_ERR(pt)) { + ret = PTR_ERR(pt); + goto out; + } + } + + if (intel_vgpu_active(vm->i915) || + gen8_pt_count(*start, end) < I915_PDES) + fill_px(pt, vm->scratch[lvl].encode); + } + + spin_lock(&pd->lock); + if (likely(!pd->entry[idx])) + set_pd_entry(pd, idx, pt); + else + alloc = pt, pt = pd->entry[idx]; + } + + if (lvl) { + atomic_inc(&pt->used); + spin_unlock(&pd->lock); + + ret = __gen8_ppgtt_alloc(vm, as_pd(pt), + start, end, lvl); + if (unlikely(ret)) { + if (release_pd_entry(pd, idx, pt, scratch)) + free_px(vm, pt); + goto out; + } + + spin_lock(&pd->lock); + atomic_dec(&pt->used); + GEM_BUG_ON(!atomic_read(&pt->used)); + } else { + unsigned int count = gen8_pt_count(*start, end); + + DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d } inserting pte\n", + __func__, vm, lvl, *start, end, + gen8_pd_index(*start, 0), count, + atomic_read(&pt->used)); + + atomic_add(count, &pt->used); + /* All other pdes may be simultaneously removed */ + GEM_BUG_ON(atomic_read(&pt->used) > NALLOC * I915_PDES); + *start += count; + } + } while (idx++, --len); + spin_unlock(&pd->lock); +out: + if (alloc) + free_px(vm, alloc); + return ret; +} + +static int gen8_ppgtt_alloc(struct i915_address_space *vm, + u64 start, u64 length) +{ + u64 from; + int err; + + GEM_BUG_ON(!IS_ALIGNED(start, BIT_ULL(GEN8_PTE_SHIFT))); + GEM_BUG_ON(!IS_ALIGNED(length, BIT_ULL(GEN8_PTE_SHIFT))); + GEM_BUG_ON(range_overflows(start, length, vm->total)); + + start >>= GEN8_PTE_SHIFT; + length >>= GEN8_PTE_SHIFT; + GEM_BUG_ON(length == 0); + from = start; + + err = __gen8_ppgtt_alloc(vm, i915_vm_to_ppgtt(vm)->pd, + &start, start + length, vm->top); + if (unlikely(err && from != start)) + __gen8_ppgtt_clear(vm, i915_vm_to_ppgtt(vm)->pd, + from, start, vm->top); + + return err; +} + +static __always_inline u64 +gen8_ppgtt_insert_pte(struct i915_ppgtt *ppgtt, + struct i915_page_directory *pdp, + struct sgt_dma *iter, + u64 idx, + enum i915_cache_level cache_level, + u32 flags) +{ + struct i915_page_directory *pd; + const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags); + gen8_pte_t *vaddr; + + pd = i915_pd_entry(pdp, gen8_pd_index(idx, 2)); + vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1))); + do { + vaddr[gen8_pd_index(idx, 0)] = pte_encode | iter->dma; + + iter->dma += I915_GTT_PAGE_SIZE; + if (iter->dma >= iter->max) { + iter->sg = __sg_next(iter->sg); + if (!iter->sg) { + idx = 0; + break; + } + + iter->dma = sg_dma_address(iter->sg); + iter->max = iter->dma + iter->sg->length; + } + + if (gen8_pd_index(++idx, 0) == 0) { + if (gen8_pd_index(idx, 1) == 0) { + /* Limited by sg length for 3lvl */ + if (gen8_pd_index(idx, 2) == 0) + break; + + pd = pdp->entry[gen8_pd_index(idx, 2)]; + } + + kunmap_atomic(vaddr); + vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1))); + } + } while (1); + kunmap_atomic(vaddr); + + return idx; +} + +static void gen8_ppgtt_insert_huge(struct i915_vma *vma, + struct sgt_dma *iter, + enum i915_cache_level cache_level, + u32 flags) +{ + const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags); + u64 start = vma->node.start; + dma_addr_t rem = iter->sg->length; + + GEM_BUG_ON(!i915_vm_is_4lvl(vma->vm)); + + do { + struct i915_page_directory * const pdp = + gen8_pdp_for_page_address(vma->vm, start); + struct i915_page_directory * const pd = + i915_pd_entry(pdp, __gen8_pte_index(start, 2)); + gen8_pte_t encode = pte_encode; + unsigned int maybe_64K = -1; + unsigned int page_size; + gen8_pte_t *vaddr; + u16 index; + + if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_2M && + IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_2M) && + rem >= I915_GTT_PAGE_SIZE_2M && + !__gen8_pte_index(start, 0)) { + index = __gen8_pte_index(start, 1); + encode |= GEN8_PDE_PS_2M; + page_size = I915_GTT_PAGE_SIZE_2M; + + vaddr = kmap_atomic_px(pd); + } else { + struct i915_page_table *pt = + i915_pt_entry(pd, __gen8_pte_index(start, 1)); + + index = __gen8_pte_index(start, 0); + page_size = I915_GTT_PAGE_SIZE; + + if (!index && + vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K && + IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) && + (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) || + rem >= (I915_PDES - index) * I915_GTT_PAGE_SIZE)) + maybe_64K = __gen8_pte_index(start, 1); + + vaddr = kmap_atomic_px(pt); + } + + do { + GEM_BUG_ON(iter->sg->length < page_size); + vaddr[index++] = encode | iter->dma; + + start += page_size; + iter->dma += page_size; + rem -= page_size; + if (iter->dma >= iter->max) { + iter->sg = __sg_next(iter->sg); + if (!iter->sg) + break; + + rem = iter->sg->length; + iter->dma = sg_dma_address(iter->sg); + iter->max = iter->dma + rem; + + if (maybe_64K != -1 && index < I915_PDES && + !(IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) && + (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) || + rem >= (I915_PDES - index) * I915_GTT_PAGE_SIZE))) + maybe_64K = -1; + + if (unlikely(!IS_ALIGNED(iter->dma, page_size))) + break; + } + } while (rem >= page_size && index < I915_PDES); + + kunmap_atomic(vaddr); + + /* + * Is it safe to mark the 2M block as 64K? -- Either we have + * filled whole page-table with 64K entries, or filled part of + * it and have reached the end of the sg table and we have + * enough padding. + */ + if (maybe_64K != -1 && + (index == I915_PDES || + (i915_vm_has_scratch_64K(vma->vm) && + !iter->sg && IS_ALIGNED(vma->node.start + + vma->node.size, + I915_GTT_PAGE_SIZE_2M)))) { + vaddr = kmap_atomic_px(pd); + vaddr[maybe_64K] |= GEN8_PDE_IPS_64K; + kunmap_atomic(vaddr); + page_size = I915_GTT_PAGE_SIZE_64K; + + /* + * We write all 4K page entries, even when using 64K + * pages. In order to verify that the HW isn't cheating + * by using the 4K PTE instead of the 64K PTE, we want + * to remove all the surplus entries. If the HW skipped + * the 64K PTE, it will read/write into the scratch page + * instead - which we detect as missing results during + * selftests. + */ + if (I915_SELFTEST_ONLY(vma->vm->scrub_64K)) { + u16 i; + + encode = vma->vm->scratch[0].encode; + vaddr = kmap_atomic_px(i915_pt_entry(pd, maybe_64K)); + + for (i = 1; i < index; i += 16) + memset64(vaddr + i, encode, 15); + + kunmap_atomic(vaddr); + } + } + + vma->page_sizes.gtt |= page_size; + } while (iter->sg); +} + +static void gen8_ppgtt_insert(struct i915_address_space *vm, + struct i915_vma *vma, + enum i915_cache_level cache_level, + u32 flags) +{ + struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm); + struct sgt_dma iter = sgt_dma(vma); + + if (vma->page_sizes.sg > I915_GTT_PAGE_SIZE) { + gen8_ppgtt_insert_huge(vma, &iter, cache_level, flags); + } else { + u64 idx = vma->node.start >> GEN8_PTE_SHIFT; + + do { + struct i915_page_directory * const pdp = + gen8_pdp_for_page_index(vm, idx); + + idx = gen8_ppgtt_insert_pte(ppgtt, pdp, &iter, idx, + cache_level, flags); + } while (idx); + + vma->page_sizes.gtt = I915_GTT_PAGE_SIZE; + } +} + +static int gen8_init_scratch(struct i915_address_space *vm) +{ + int ret; + int i; + + /* + * If everybody agrees to not to write into the scratch page, + * we can reuse it for all vm, keeping contexts and processes separate. + */ + if (vm->has_read_only && vm->gt->vm && !i915_is_ggtt(vm->gt->vm)) { + struct i915_address_space *clone = vm->gt->vm; + + GEM_BUG_ON(!clone->has_read_only); + + vm->scratch_order = clone->scratch_order; + memcpy(vm->scratch, clone->scratch, sizeof(vm->scratch)); + px_dma(&vm->scratch[0]) = 0; /* no xfer of ownership */ + return 0; + } + + ret = setup_scratch_page(vm, __GFP_HIGHMEM); + if (ret) + return ret; + + vm->scratch[0].encode = + gen8_pte_encode(px_dma(&vm->scratch[0]), + I915_CACHE_LLC, vm->has_read_only); + + for (i = 1; i <= vm->top; i++) { + if (unlikely(setup_page_dma(vm, px_base(&vm->scratch[i])))) + goto free_scratch; + + fill_px(&vm->scratch[i], vm->scratch[i - 1].encode); + vm->scratch[i].encode = + gen8_pde_encode(px_dma(&vm->scratch[i]), + I915_CACHE_LLC); + } + + return 0; + +free_scratch: + free_scratch(vm); + return -ENOMEM; +} + +static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt) +{ + struct i915_address_space *vm = &ppgtt->vm; + struct i915_page_directory *pd = ppgtt->pd; + unsigned int idx; + + GEM_BUG_ON(vm->top != 2); + GEM_BUG_ON(gen8_pd_top_count(vm) != GEN8_3LVL_PDPES); + + for (idx = 0; idx < GEN8_3LVL_PDPES; idx++) { + struct i915_page_directory *pde; + + pde = alloc_pd(vm); + if (IS_ERR(pde)) + return PTR_ERR(pde); + + fill_px(pde, vm->scratch[1].encode); + set_pd_entry(pd, idx, pde); + atomic_inc(px_used(pde)); /* keep pinned */ + } + wmb(); + + return 0; +} + +static struct i915_page_directory * +gen8_alloc_top_pd(struct i915_address_space *vm) +{ + const unsigned int count = gen8_pd_top_count(vm); + struct i915_page_directory *pd; + + GEM_BUG_ON(count > ARRAY_SIZE(pd->entry)); + + pd = __alloc_pd(offsetof(typeof(*pd), entry[count])); + if (unlikely(!pd)) + return ERR_PTR(-ENOMEM); + + if (unlikely(setup_page_dma(vm, px_base(pd)))) { + kfree(pd); + return ERR_PTR(-ENOMEM); + } + + fill_page_dma(px_base(pd), vm->scratch[vm->top].encode, count); + atomic_inc(px_used(pd)); /* mark as pinned */ + return pd; +} + +/* + * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers + * with a net effect resembling a 2-level page table in normal x86 terms. Each + * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address + * space. + * + */ +struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt) +{ + struct i915_ppgtt *ppgtt; + int err; + + ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL); + if (!ppgtt) + return ERR_PTR(-ENOMEM); + + ppgtt_init(ppgtt, gt); + ppgtt->vm.top = i915_vm_is_4lvl(&ppgtt->vm) ? 3 : 2; + + /* + * From bdw, there is hw support for read-only pages in the PPGTT. + * + * Gen11 has HSDES#:1807136187 unresolved. Disable ro support + * for now. + * + * Gen12 has inherited the same read-only fault issue from gen11. + */ + ppgtt->vm.has_read_only = !IS_GEN_RANGE(gt->i915, 11, 12); + + /* + * There are only few exceptions for gen >=6. chv and bxt. + * And we are not sure about the latter so play safe for now. + */ + if (IS_CHERRYVIEW(gt->i915) || IS_BROXTON(gt->i915)) + ppgtt->vm.pt_kmap_wc = true; + + err = gen8_init_scratch(&ppgtt->vm); + if (err) + goto err_free; + + ppgtt->pd = gen8_alloc_top_pd(&ppgtt->vm); + if (IS_ERR(ppgtt->pd)) { + err = PTR_ERR(ppgtt->pd); + goto err_free_scratch; + } + + if (!i915_vm_is_4lvl(&ppgtt->vm)) { + err = gen8_preallocate_top_level_pdp(ppgtt); + if (err) + goto err_free_pd; + } + + ppgtt->vm.bind_async_flags = I915_VMA_LOCAL_BIND; + ppgtt->vm.insert_entries = gen8_ppgtt_insert; + ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc; + ppgtt->vm.clear_range = gen8_ppgtt_clear; + + if (intel_vgpu_active(gt->i915)) + gen8_ppgtt_notify_vgt(ppgtt, true); + + ppgtt->vm.cleanup = gen8_ppgtt_cleanup; + + return ppgtt; + +err_free_pd: + __gen8_ppgtt_cleanup(&ppgtt->vm, ppgtt->pd, + gen8_pd_top_count(&ppgtt->vm), ppgtt->vm.top); +err_free_scratch: + free_scratch(&ppgtt->vm); +err_free: + kfree(ppgtt); + return ERR_PTR(err); +} diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.h b/drivers/gpu/drm/i915/gt/gen8_ppgtt.h new file mode 100644 index 000000000000..76a08b9c1f5c --- /dev/null +++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2020 Intel Corporation + */ + +#ifndef __GEN8_PPGTT_H__ +#define __GEN8_PPGTT_H__ + +struct intel_gt; + +struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt); + +#endif diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c index fbaa9df6f436..23137b2a8689 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.c +++ b/drivers/gpu/drm/i915/gt/intel_context.c @@ -43,30 +43,76 @@ intel_context_create(struct intel_engine_cs *engine) return ce; } -int __intel_context_do_pin(struct intel_context *ce) +int intel_context_alloc_state(struct intel_context *ce) { - int err; + int err = 0; if (mutex_lock_interruptible(&ce->pin_mutex)) return -EINTR; - if (likely(!atomic_read(&ce->pin_count))) { - intel_wakeref_t wakeref; + if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) { + err = ce->ops->alloc(ce); + if (unlikely(err)) + goto unlock; - if (unlikely(!test_bit(CONTEXT_ALLOC_BIT, &ce->flags))) { - err = ce->ops->alloc(ce); - if (unlikely(err)) - goto err; + set_bit(CONTEXT_ALLOC_BIT, &ce->flags); + } - __set_bit(CONTEXT_ALLOC_BIT, &ce->flags); +unlock: + mutex_unlock(&ce->pin_mutex); + return err; +} + +static int intel_context_active_acquire(struct intel_context *ce) +{ + int err; + + err = i915_active_acquire(&ce->active); + if (err) + return err; + + /* Preallocate tracking nodes */ + if (!intel_context_is_barrier(ce)) { + err = i915_active_acquire_preallocate_barrier(&ce->active, + ce->engine); + if (err) { + i915_active_release(&ce->active); + return err; } + } + + return 0; +} + +static void intel_context_active_release(struct intel_context *ce) +{ + /* Nodes preallocated in intel_context_active() */ + i915_active_acquire_barrier(&ce->active); + i915_active_release(&ce->active); +} - err = 0; - with_intel_runtime_pm(ce->engine->uncore->rpm, wakeref) - err = ce->ops->pin(ce); +int __intel_context_do_pin(struct intel_context *ce) +{ + int err; + + if (unlikely(!test_bit(CONTEXT_ALLOC_BIT, &ce->flags))) { + err = intel_context_alloc_state(ce); if (err) + return err; + } + + if (mutex_lock_interruptible(&ce->pin_mutex)) + return -EINTR; + + if (likely(!atomic_read(&ce->pin_count))) { + err = intel_context_active_acquire(ce); + if (unlikely(err)) goto err; + err = ce->ops->pin(ce); + if (unlikely(err)) + goto err_active; + CE_TRACE(ce, "pin ring:{head:%04x, tail:%04x}\n", ce->ring->head, ce->ring->tail); @@ -79,6 +125,8 @@ int __intel_context_do_pin(struct intel_context *ce) mutex_unlock(&ce->pin_mutex); return 0; +err_active: + intel_context_active_release(ce); err: mutex_unlock(&ce->pin_mutex); return err; @@ -86,22 +134,20 @@ err: void intel_context_unpin(struct intel_context *ce) { - if (likely(atomic_add_unless(&ce->pin_count, -1, 1))) + if (!atomic_dec_and_test(&ce->pin_count)) return; - /* We may be called from inside intel_context_pin() to evict another */ - intel_context_get(ce); - mutex_lock_nested(&ce->pin_mutex, SINGLE_DEPTH_NESTING); - - if (likely(atomic_dec_and_test(&ce->pin_count))) { - CE_TRACE(ce, "retire\n"); - - ce->ops->unpin(ce); + CE_TRACE(ce, "unpin\n"); + ce->ops->unpin(ce); - intel_context_active_release(ce); - } - - mutex_unlock(&ce->pin_mutex); + /* + * Once released, we may asynchronously drop the active reference. + * As that may be the only reference keeping the context alive, + * take an extra now so that it is not freed before we finish + * dereferencing it. + */ + intel_context_get(ce); + intel_context_active_release(ce); intel_context_put(ce); } @@ -114,6 +160,10 @@ static int __context_pin_state(struct i915_vma *vma) if (err) return err; + err = i915_active_acquire(&vma->active); + if (err) + goto err_unpin; + /* * And mark it as a globally pinned object to let the shrinker know * it cannot reclaim the object until we release it. @@ -122,14 +172,44 @@ static int __context_pin_state(struct i915_vma *vma) vma->obj->mm.dirty = true; return 0; + +err_unpin: + i915_vma_unpin(vma); + return err; } static void __context_unpin_state(struct i915_vma *vma) { i915_vma_make_shrinkable(vma); + i915_active_release(&vma->active); __i915_vma_unpin(vma); } +static int __ring_active(struct intel_ring *ring) +{ + int err; + + err = i915_active_acquire(&ring->vma->active); + if (err) + return err; + + err = intel_ring_pin(ring); + if (err) + goto err_active; + + return 0; + +err_active: + i915_active_release(&ring->vma->active); + return err; +} + +static void __ring_retire(struct intel_ring *ring) +{ + intel_ring_unpin(ring); + i915_active_release(&ring->vma->active); +} + __i915_active_call static void __intel_context_retire(struct i915_active *active) { @@ -142,7 +222,7 @@ static void __intel_context_retire(struct i915_active *active) __context_unpin_state(ce->state); intel_timeline_unpin(ce->timeline); - intel_ring_unpin(ce->ring); + __ring_retire(ce->ring); intel_context_put(ce); } @@ -152,9 +232,11 @@ static int __intel_context_active(struct i915_active *active) struct intel_context *ce = container_of(active, typeof(*ce), active); int err; + CE_TRACE(ce, "active\n"); + intel_context_get(ce); - err = intel_ring_pin(ce->ring); + err = __ring_active(ce->ring); if (err) goto err_put; @@ -174,40 +256,12 @@ static int __intel_context_active(struct i915_active *active) err_timeline: intel_timeline_unpin(ce->timeline); err_ring: - intel_ring_unpin(ce->ring); + __ring_retire(ce->ring); err_put: intel_context_put(ce); return err; } -int intel_context_active_acquire(struct intel_context *ce) -{ - int err; - - err = i915_active_acquire(&ce->active); - if (err) - return err; - - /* Preallocate tracking nodes */ - if (!intel_context_is_barrier(ce)) { - err = i915_active_acquire_preallocate_barrier(&ce->active, - ce->engine); - if (err) { - i915_active_release(&ce->active); - return err; - } - } - - return 0; -} - -void intel_context_active_release(struct intel_context *ce) -{ - /* Nodes preallocated in intel_context_active() */ - i915_active_acquire_barrier(&ce->active); - i915_active_release(&ce->active); -} - void intel_context_init(struct intel_context *ce, struct intel_engine_cs *engine) diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h index 1d4a1b1357cf..30bd248827d8 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.h +++ b/drivers/gpu/drm/i915/gt/intel_context.h @@ -19,7 +19,7 @@ #define CE_TRACE(ce, fmt, ...) do { \ const struct intel_context *ce__ = (ce); \ - ENGINE_TRACE(ce__->engine, "context:%llx" fmt, \ + ENGINE_TRACE(ce__->engine, "context:%llx " fmt, \ ce__->timeline->fence_context, \ ##__VA_ARGS__); \ } while (0) @@ -31,6 +31,8 @@ void intel_context_fini(struct intel_context *ce); struct intel_context * intel_context_create(struct intel_engine_cs *engine); +int intel_context_alloc_state(struct intel_context *ce); + void intel_context_free(struct intel_context *ce); /** @@ -76,9 +78,14 @@ static inline void intel_context_unlock_pinned(struct intel_context *ce) int __intel_context_do_pin(struct intel_context *ce); +static inline bool intel_context_pin_if_active(struct intel_context *ce) +{ + return atomic_inc_not_zero(&ce->pin_count); +} + static inline int intel_context_pin(struct intel_context *ce) { - if (likely(atomic_inc_not_zero(&ce->pin_count))) + if (likely(intel_context_pin_if_active(ce))) return 0; return __intel_context_do_pin(ce); @@ -116,9 +123,6 @@ static inline void intel_context_exit(struct intel_context *ce) ce->ops->exit(ce); } -int intel_context_active_acquire(struct intel_context *ce); -void intel_context_active_release(struct intel_context *ce); - static inline struct intel_context *intel_context_get(struct intel_context *ce) { kref_get(&ce->ref); diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h index 9527a659546c..ca1420fb8b53 100644 --- a/drivers/gpu/drm/i915/gt/intel_context_types.h +++ b/drivers/gpu/drm/i915/gt/intel_context_types.h @@ -17,6 +17,8 @@ #include "intel_engine_types.h" #include "intel_sseu.h" +#define CONTEXT_REDZONE POISON_INUSE + struct i915_gem_context; struct i915_vma; struct intel_context; diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h index 79ecac5ac0ab..5df003061e44 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine.h +++ b/drivers/gpu/drm/i915/gt/intel_engine.h @@ -202,7 +202,7 @@ void intel_engine_set_hwsp_writemask(struct intel_engine_cs *engine, u32 mask); u64 intel_engine_get_active_head(const struct intel_engine_cs *engine); u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine); -void intel_engine_get_instdone(struct intel_engine_cs *engine, +void intel_engine_get_instdone(const struct intel_engine_cs *engine, struct intel_instdone *instdone); void intel_engine_init_execlists(struct intel_engine_cs *engine); @@ -282,7 +282,7 @@ static inline void __intel_engine_reset(struct intel_engine_cs *engine, bool intel_engines_are_idle(struct intel_gt *gt); bool intel_engine_is_idle(struct intel_engine_cs *engine); -bool intel_engine_flush_submission(struct intel_engine_cs *engine); +void intel_engine_flush_submission(struct intel_engine_cs *engine); void intel_engines_reset_default_submission(struct intel_gt *gt); diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index ddf9543b1261..f451ef376548 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -914,8 +914,8 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type) } static u32 -read_subslice_reg(struct intel_engine_cs *engine, int slice, int subslice, - i915_reg_t reg) +read_subslice_reg(const struct intel_engine_cs *engine, + int slice, int subslice, i915_reg_t reg) { struct drm_i915_private *i915 = engine->i915; struct intel_uncore *uncore = engine->uncore; @@ -959,7 +959,7 @@ read_subslice_reg(struct intel_engine_cs *engine, int slice, int subslice, } /* NB: please notice the memset */ -void intel_engine_get_instdone(struct intel_engine_cs *engine, +void intel_engine_get_instdone(const struct intel_engine_cs *engine, struct intel_instdone *instdone) { struct drm_i915_private *i915 = engine->i915; @@ -1047,10 +1047,9 @@ static bool ring_is_idle(struct intel_engine_cs *engine) return idle; } -bool intel_engine_flush_submission(struct intel_engine_cs *engine) +void intel_engine_flush_submission(struct intel_engine_cs *engine) { struct tasklet_struct *t = &engine->execlists.tasklet; - bool active = tasklet_is_locked(t); if (__tasklet_is_scheduled(t)) { local_bh_disable(); @@ -1061,13 +1060,10 @@ bool intel_engine_flush_submission(struct intel_engine_cs *engine) tasklet_unlock(t); } local_bh_enable(); - active = true; } /* Otherwise flush the tasklet if it was running on another cpu */ tasklet_unlock_wait(t); - - return active; } /** diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c index 742628e40201..6c6fd185457c 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c @@ -199,7 +199,7 @@ int intel_engine_pulse(struct intel_engine_cs *engine) goto out_unlock; } - rq->flags |= I915_REQUEST_SENTINEL; + __set_bit(I915_FENCE_FLAG_SENTINEL, &rq->fence.flags); idle_pulse(engine, rq); __i915_request_commit(rq); diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c index 010620b78202..ea90ab3e396e 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c @@ -20,6 +20,7 @@ static int __engine_unpark(struct intel_wakeref *wf) { struct intel_engine_cs *engine = container_of(wf, typeof(*engine), wakeref); + struct intel_context *ce; void *map; ENGINE_TRACE(engine, "\n"); @@ -34,6 +35,27 @@ static int __engine_unpark(struct intel_wakeref *wf) if (!IS_ERR_OR_NULL(map)) engine->pinned_default_state = map; + /* Discard stale context state from across idling */ + ce = engine->kernel_context; + if (ce) { + GEM_BUG_ON(test_bit(CONTEXT_VALID_BIT, &ce->flags)); + + /* First poison the image to verify we never fully trust it */ + if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) && ce->state) { + struct drm_i915_gem_object *obj = ce->state->obj; + int type = i915_coherent_map_type(engine->i915); + + map = i915_gem_object_pin_map(obj, type); + if (!IS_ERR(map)) { + memset(map, CONTEXT_REDZONE, obj->base.size); + i915_gem_object_flush_map(obj); + i915_gem_object_unpin_map(obj); + } + } + + ce->ops->reset(ce); + } + if (engine->unpark) engine->unpark(engine); @@ -123,16 +145,16 @@ static bool switch_to_kernel_context(struct intel_engine_cs *engine) unsigned long flags; bool result = true; + /* GPU is pointing to the void, as good as in the kernel context. */ + if (intel_gt_is_wedged(engine->gt)) + return true; + GEM_BUG_ON(!intel_context_is_barrier(ce)); /* Already inside the kernel context, safe to power down. */ if (engine->wakeref_serial == engine->serial) return true; - /* GPU is pointing to the void, as good as in the kernel context. */ - if (intel_gt_is_wedged(engine->gt)) - return true; - /* * Note, we do this without taking the timeline->mutex. We cannot * as we may be called while retiring the kernel context and so diff --git a/drivers/gpu/drm/i915/gt/intel_engine_user.c b/drivers/gpu/drm/i915/gt/intel_engine_user.c index 7f7150a733f4..9e7f12bef828 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_user.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_user.c @@ -11,6 +11,7 @@ #include "i915_drv.h" #include "intel_engine.h" #include "intel_engine_user.h" +#include "intel_gt.h" struct intel_engine_cs * intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance) @@ -200,6 +201,9 @@ void intel_engines_driver_register(struct drm_i915_private *i915) uabi_node); char old[sizeof(engine->name)]; + if (intel_gt_has_init_error(engine->gt)) + continue; /* ignore incomplete engines */ + GEM_BUG_ON(engine->class >= ARRAY_SIZE(uabi_classes)); engine->uabi_class = uabi_classes[engine->class]; diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c new file mode 100644 index 000000000000..79096722ce16 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c @@ -0,0 +1,1486 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2020 Intel Corporation + */ + +#include <linux/stop_machine.h> + +#include <asm/set_memory.h> +#include <asm/smp.h> + +#include "intel_gt.h" +#include "i915_drv.h" +#include "i915_scatterlist.h" +#include "i915_vgpu.h" + +#include "intel_gtt.h" + +static int +i915_get_ggtt_vma_pages(struct i915_vma *vma); + +static void i915_ggtt_color_adjust(const struct drm_mm_node *node, + unsigned long color, + u64 *start, + u64 *end) +{ + if (i915_node_color_differs(node, color)) + *start += I915_GTT_PAGE_SIZE; + + /* + * Also leave a space between the unallocated reserved node after the + * GTT and any objects within the GTT, i.e. we use the color adjustment + * to insert a guard page to prevent prefetches crossing over the + * GTT boundary. + */ + node = list_next_entry(node, node_list); + if (node->color != color) + *end -= I915_GTT_PAGE_SIZE; +} + +static int ggtt_init_hw(struct i915_ggtt *ggtt) +{ + struct drm_i915_private *i915 = ggtt->vm.i915; + + i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT); + + ggtt->vm.is_ggtt = true; + + /* Only VLV supports read-only GGTT mappings */ + ggtt->vm.has_read_only = IS_VALLEYVIEW(i915); + + if (!HAS_LLC(i915) && !HAS_PPGTT(i915)) + ggtt->vm.mm.color_adjust = i915_ggtt_color_adjust; + + if (ggtt->mappable_end) { + if (!io_mapping_init_wc(&ggtt->iomap, + ggtt->gmadr.start, + ggtt->mappable_end)) { + ggtt->vm.cleanup(&ggtt->vm); + return -EIO; + } + + ggtt->mtrr = arch_phys_wc_add(ggtt->gmadr.start, + ggtt->mappable_end); + } + + i915_ggtt_init_fences(ggtt); + + return 0; +} + +/** + * i915_ggtt_init_hw - Initialize GGTT hardware + * @i915: i915 device + */ +int i915_ggtt_init_hw(struct drm_i915_private *i915) +{ + int ret; + + stash_init(&i915->mm.wc_stash); + + /* + * Note that we use page colouring to enforce a guard page at the + * end of the address space. This is required as the CS may prefetch + * beyond the end of the batch buffer, across the page boundary, + * and beyond the end of the GTT if we do not provide a guard. + */ + ret = ggtt_init_hw(&i915->ggtt); + if (ret) + return ret; + + return 0; +} + +/* + * Certain Gen5 chipsets require require idling the GPU before + * unmapping anything from the GTT when VT-d is enabled. + */ +static bool needs_idle_maps(struct drm_i915_private *i915) +{ + /* + * Query intel_iommu to see if we need the workaround. Presumably that + * was loaded first. + */ + return IS_GEN(i915, 5) && IS_MOBILE(i915) && intel_vtd_active(); +} + +static void ggtt_suspend_mappings(struct i915_ggtt *ggtt) +{ + struct drm_i915_private *i915 = ggtt->vm.i915; + + /* + * Don't bother messing with faults pre GEN6 as we have little + * documentation supporting that it's a good idea. + */ + if (INTEL_GEN(i915) < 6) + return; + + intel_gt_check_and_clear_faults(ggtt->vm.gt); + + ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total); + + ggtt->invalidate(ggtt); +} + +void i915_gem_suspend_gtt_mappings(struct drm_i915_private *i915) +{ + ggtt_suspend_mappings(&i915->ggtt); +} + +void gen6_ggtt_invalidate(struct i915_ggtt *ggtt) +{ + struct intel_uncore *uncore = ggtt->vm.gt->uncore; + + spin_lock_irq(&uncore->lock); + intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); + intel_uncore_read_fw(uncore, GFX_FLSH_CNTL_GEN6); + spin_unlock_irq(&uncore->lock); +} + +static void gen8_ggtt_invalidate(struct i915_ggtt *ggtt) +{ + struct intel_uncore *uncore = ggtt->vm.gt->uncore; + + /* + * Note that as an uncached mmio write, this will flush the + * WCB of the writes into the GGTT before it triggers the invalidate. + */ + intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); +} + +static void guc_ggtt_invalidate(struct i915_ggtt *ggtt) +{ + struct intel_uncore *uncore = ggtt->vm.gt->uncore; + struct drm_i915_private *i915 = ggtt->vm.i915; + + gen8_ggtt_invalidate(ggtt); + + if (INTEL_GEN(i915) >= 12) + intel_uncore_write_fw(uncore, GEN12_GUC_TLB_INV_CR, + GEN12_GUC_TLB_INV_CR_INVALIDATE); + else + intel_uncore_write_fw(uncore, GEN8_GTCR, GEN8_GTCR_INVALIDATE); +} + +static void gmch_ggtt_invalidate(struct i915_ggtt *ggtt) +{ + intel_gtt_chipset_flush(); +} + +static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte) +{ + writeq(pte, addr); +} + +static void gen8_ggtt_insert_page(struct i915_address_space *vm, + dma_addr_t addr, + u64 offset, + enum i915_cache_level level, + u32 unused) +{ + struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); + gen8_pte_t __iomem *pte = + (gen8_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE; + + gen8_set_pte(pte, gen8_pte_encode(addr, level, 0)); + + ggtt->invalidate(ggtt); +} + +static void gen8_ggtt_insert_entries(struct i915_address_space *vm, + struct i915_vma *vma, + enum i915_cache_level level, + u32 flags) +{ + struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); + struct sgt_iter sgt_iter; + gen8_pte_t __iomem *gtt_entries; + const gen8_pte_t pte_encode = gen8_pte_encode(0, level, 0); + dma_addr_t addr; + + /* + * Note that we ignore PTE_READ_ONLY here. The caller must be careful + * not to allow the user to override access to a read only page. + */ + + gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm; + gtt_entries += vma->node.start / I915_GTT_PAGE_SIZE; + for_each_sgt_daddr(addr, sgt_iter, vma->pages) + gen8_set_pte(gtt_entries++, pte_encode | addr); + + /* + * We want to flush the TLBs only after we're certain all the PTE + * updates have finished. + */ + ggtt->invalidate(ggtt); +} + +static void gen6_ggtt_insert_page(struct i915_address_space *vm, + dma_addr_t addr, + u64 offset, + enum i915_cache_level level, + u32 flags) +{ + struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); + gen6_pte_t __iomem *pte = + (gen6_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE; + + iowrite32(vm->pte_encode(addr, level, flags), pte); + + ggtt->invalidate(ggtt); +} + +/* + * Binds an object into the global gtt with the specified cache level. + * The object will be accessible to the GPU via commands whose operands + * reference offsets within the global GTT as well as accessible by the GPU + * through the GMADR mapped BAR (i915->mm.gtt->gtt). + */ +static void gen6_ggtt_insert_entries(struct i915_address_space *vm, + struct i915_vma *vma, + enum i915_cache_level level, + u32 flags) +{ + struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); + gen6_pte_t __iomem *entries = (gen6_pte_t __iomem *)ggtt->gsm; + unsigned int i = vma->node.start / I915_GTT_PAGE_SIZE; + struct sgt_iter iter; + dma_addr_t addr; + + for_each_sgt_daddr(addr, iter, vma->pages) + iowrite32(vm->pte_encode(addr, level, flags), &entries[i++]); + + /* + * We want to flush the TLBs only after we're certain all the PTE + * updates have finished. + */ + ggtt->invalidate(ggtt); +} + +static void nop_clear_range(struct i915_address_space *vm, + u64 start, u64 length) +{ +} + +static void gen8_ggtt_clear_range(struct i915_address_space *vm, + u64 start, u64 length) +{ + struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); + unsigned int first_entry = start / I915_GTT_PAGE_SIZE; + unsigned int num_entries = length / I915_GTT_PAGE_SIZE; + const gen8_pte_t scratch_pte = vm->scratch[0].encode; + gen8_pte_t __iomem *gtt_base = + (gen8_pte_t __iomem *)ggtt->gsm + first_entry; + const int max_entries = ggtt_total_entries(ggtt) - first_entry; + int i; + + if (WARN(num_entries > max_entries, + "First entry = %d; Num entries = %d (max=%d)\n", + first_entry, num_entries, max_entries)) + num_entries = max_entries; + + for (i = 0; i < num_entries; i++) + gen8_set_pte(>t_base[i], scratch_pte); +} + +static void bxt_vtd_ggtt_wa(struct i915_address_space *vm) +{ + /* + * Make sure the internal GAM fifo has been cleared of all GTT + * writes before exiting stop_machine(). This guarantees that + * any aperture accesses waiting to start in another process + * cannot back up behind the GTT writes causing a hang. + * The register can be any arbitrary GAM register. + */ + intel_uncore_posting_read_fw(vm->gt->uncore, GFX_FLSH_CNTL_GEN6); +} + +struct insert_page { + struct i915_address_space *vm; + dma_addr_t addr; + u64 offset; + enum i915_cache_level level; +}; + +static int bxt_vtd_ggtt_insert_page__cb(void *_arg) +{ + struct insert_page *arg = _arg; + + gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, arg->level, 0); + bxt_vtd_ggtt_wa(arg->vm); + + return 0; +} + +static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm, + dma_addr_t addr, + u64 offset, + enum i915_cache_level level, + u32 unused) +{ + struct insert_page arg = { vm, addr, offset, level }; + + stop_machine(bxt_vtd_ggtt_insert_page__cb, &arg, NULL); +} + +struct insert_entries { + struct i915_address_space *vm; + struct i915_vma *vma; + enum i915_cache_level level; + u32 flags; +}; + +static int bxt_vtd_ggtt_insert_entries__cb(void *_arg) +{ + struct insert_entries *arg = _arg; + + gen8_ggtt_insert_entries(arg->vm, arg->vma, arg->level, arg->flags); + bxt_vtd_ggtt_wa(arg->vm); + + return 0; +} + +static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm, + struct i915_vma *vma, + enum i915_cache_level level, + u32 flags) +{ + struct insert_entries arg = { vm, vma, level, flags }; + + stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL); +} + +struct clear_range { + struct i915_address_space *vm; + u64 start; + u64 length; +}; + +static int bxt_vtd_ggtt_clear_range__cb(void *_arg) +{ + struct clear_range *arg = _arg; + + gen8_ggtt_clear_range(arg->vm, arg->start, arg->length); + bxt_vtd_ggtt_wa(arg->vm); + + return 0; +} + +static void bxt_vtd_ggtt_clear_range__BKL(struct i915_address_space *vm, + u64 start, + u64 length) +{ + struct clear_range arg = { vm, start, length }; + + stop_machine(bxt_vtd_ggtt_clear_range__cb, &arg, NULL); +} + +static void gen6_ggtt_clear_range(struct i915_address_space *vm, + u64 start, u64 length) +{ + struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); + unsigned int first_entry = start / I915_GTT_PAGE_SIZE; + unsigned int num_entries = length / I915_GTT_PAGE_SIZE; + gen6_pte_t scratch_pte, __iomem *gtt_base = + (gen6_pte_t __iomem *)ggtt->gsm + first_entry; + const int max_entries = ggtt_total_entries(ggtt) - first_entry; + int i; + + if (WARN(num_entries > max_entries, + "First entry = %d; Num entries = %d (max=%d)\n", + first_entry, num_entries, max_entries)) + num_entries = max_entries; + + scratch_pte = vm->scratch[0].encode; + for (i = 0; i < num_entries; i++) + iowrite32(scratch_pte, >t_base[i]); +} + +static void i915_ggtt_insert_page(struct i915_address_space *vm, + dma_addr_t addr, + u64 offset, + enum i915_cache_level cache_level, + u32 unused) +{ + unsigned int flags = (cache_level == I915_CACHE_NONE) ? + AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY; + + intel_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags); +} + +static void i915_ggtt_insert_entries(struct i915_address_space *vm, + struct i915_vma *vma, + enum i915_cache_level cache_level, + u32 unused) +{ + unsigned int flags = (cache_level == I915_CACHE_NONE) ? + AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY; + + intel_gtt_insert_sg_entries(vma->pages, vma->node.start >> PAGE_SHIFT, + flags); +} + +static void i915_ggtt_clear_range(struct i915_address_space *vm, + u64 start, u64 length) +{ + intel_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT); +} + +static int ggtt_bind_vma(struct i915_vma *vma, + enum i915_cache_level cache_level, + u32 flags) +{ + struct drm_i915_gem_object *obj = vma->obj; + u32 pte_flags; + + /* Applicable to VLV (gen8+ do not support RO in the GGTT) */ + pte_flags = 0; + if (i915_gem_object_is_readonly(obj)) + pte_flags |= PTE_READ_ONLY; + + vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags); + + vma->page_sizes.gtt = I915_GTT_PAGE_SIZE; + + /* + * Without aliasing PPGTT there's no difference between + * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally + * upgrade to both bound if we bind either to avoid double-binding. + */ + atomic_or(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND, &vma->flags); + + return 0; +} + +static void ggtt_unbind_vma(struct i915_vma *vma) +{ + vma->vm->clear_range(vma->vm, vma->node.start, vma->size); +} + +static int ggtt_reserve_guc_top(struct i915_ggtt *ggtt) +{ + u64 size; + int ret; + + if (!USES_GUC(ggtt->vm.i915)) + return 0; + + GEM_BUG_ON(ggtt->vm.total <= GUC_GGTT_TOP); + size = ggtt->vm.total - GUC_GGTT_TOP; + + ret = i915_gem_gtt_reserve(&ggtt->vm, &ggtt->uc_fw, size, + GUC_GGTT_TOP, I915_COLOR_UNEVICTABLE, + PIN_NOEVICT); + if (ret) + DRM_DEBUG_DRIVER("Failed to reserve top of GGTT for GuC\n"); + + return ret; +} + +static void ggtt_release_guc_top(struct i915_ggtt *ggtt) +{ + if (drm_mm_node_allocated(&ggtt->uc_fw)) + drm_mm_remove_node(&ggtt->uc_fw); +} + +static void cleanup_init_ggtt(struct i915_ggtt *ggtt) +{ + ggtt_release_guc_top(ggtt); + if (drm_mm_node_allocated(&ggtt->error_capture)) + drm_mm_remove_node(&ggtt->error_capture); + mutex_destroy(&ggtt->error_mutex); +} + +static int init_ggtt(struct i915_ggtt *ggtt) +{ + /* + * Let GEM Manage all of the aperture. + * + * However, leave one page at the end still bound to the scratch page. + * There are a number of places where the hardware apparently prefetches + * past the end of the object, and we've seen multiple hangs with the + * GPU head pointer stuck in a batchbuffer bound at the last page of the + * aperture. One page should be enough to keep any prefetching inside + * of the aperture. + */ + unsigned long hole_start, hole_end; + struct drm_mm_node *entry; + int ret; + + /* + * GuC requires all resources that we're sharing with it to be placed in + * non-WOPCM memory. If GuC is not present or not in use we still need a + * small bias as ring wraparound at offset 0 sometimes hangs. No idea + * why. + */ + ggtt->pin_bias = max_t(u32, I915_GTT_PAGE_SIZE, + intel_wopcm_guc_size(&ggtt->vm.i915->wopcm)); + + ret = intel_vgt_balloon(ggtt); + if (ret) + return ret; + + mutex_init(&ggtt->error_mutex); + if (ggtt->mappable_end) { + /* Reserve a mappable slot for our lockless error capture */ + ret = drm_mm_insert_node_in_range(&ggtt->vm.mm, + &ggtt->error_capture, + PAGE_SIZE, 0, + I915_COLOR_UNEVICTABLE, + 0, ggtt->mappable_end, + DRM_MM_INSERT_LOW); + if (ret) + return ret; + } + + /* + * The upper portion of the GuC address space has a sizeable hole + * (several MB) that is inaccessible by GuC. Reserve this range within + * GGTT as it can comfortably hold GuC/HuC firmware images. + */ + ret = ggtt_reserve_guc_top(ggtt); + if (ret) + goto err; + + /* Clear any non-preallocated blocks */ + drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) { + DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n", + hole_start, hole_end); + ggtt->vm.clear_range(&ggtt->vm, hole_start, + hole_end - hole_start); + } + + /* And finally clear the reserved guard page */ + ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE, PAGE_SIZE); + + return 0; + +err: + cleanup_init_ggtt(ggtt); + return ret; +} + +static int aliasing_gtt_bind_vma(struct i915_vma *vma, + enum i915_cache_level cache_level, + u32 flags) +{ + u32 pte_flags; + int ret; + + /* Currently applicable only to VLV */ + pte_flags = 0; + if (i915_gem_object_is_readonly(vma->obj)) + pte_flags |= PTE_READ_ONLY; + + if (flags & I915_VMA_LOCAL_BIND) { + struct i915_ppgtt *alias = i915_vm_to_ggtt(vma->vm)->alias; + + if (flags & I915_VMA_ALLOC) { + ret = alias->vm.allocate_va_range(&alias->vm, + vma->node.start, + vma->size); + if (ret) + return ret; + + set_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma)); + } + + GEM_BUG_ON(!test_bit(I915_VMA_ALLOC_BIT, + __i915_vma_flags(vma))); + alias->vm.insert_entries(&alias->vm, vma, + cache_level, pte_flags); + } + + if (flags & I915_VMA_GLOBAL_BIND) + vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags); + + return 0; +} + +static void aliasing_gtt_unbind_vma(struct i915_vma *vma) +{ + if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) { + struct i915_address_space *vm = vma->vm; + + vm->clear_range(vm, vma->node.start, vma->size); + } + + if (test_and_clear_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma))) { + struct i915_address_space *vm = + &i915_vm_to_ggtt(vma->vm)->alias->vm; + + vm->clear_range(vm, vma->node.start, vma->size); + } +} + +static int init_aliasing_ppgtt(struct i915_ggtt *ggtt) +{ + struct i915_ppgtt *ppgtt; + int err; + + ppgtt = i915_ppgtt_create(ggtt->vm.gt); + if (IS_ERR(ppgtt)) + return PTR_ERR(ppgtt); + + if (GEM_WARN_ON(ppgtt->vm.total < ggtt->vm.total)) { + err = -ENODEV; + goto err_ppgtt; + } + + /* + * Note we only pre-allocate as far as the end of the global + * GTT. On 48b / 4-level page-tables, the difference is very, + * very significant! We have to preallocate as GVT/vgpu does + * not like the page directory disappearing. + */ + err = ppgtt->vm.allocate_va_range(&ppgtt->vm, 0, ggtt->vm.total); + if (err) + goto err_ppgtt; + + ggtt->alias = ppgtt; + ggtt->vm.bind_async_flags |= ppgtt->vm.bind_async_flags; + + GEM_BUG_ON(ggtt->vm.vma_ops.bind_vma != ggtt_bind_vma); + ggtt->vm.vma_ops.bind_vma = aliasing_gtt_bind_vma; + + GEM_BUG_ON(ggtt->vm.vma_ops.unbind_vma != ggtt_unbind_vma); + ggtt->vm.vma_ops.unbind_vma = aliasing_gtt_unbind_vma; + + return 0; + +err_ppgtt: + i915_vm_put(&ppgtt->vm); + return err; +} + +static void fini_aliasing_ppgtt(struct i915_ggtt *ggtt) +{ + struct i915_ppgtt *ppgtt; + + ppgtt = fetch_and_zero(&ggtt->alias); + if (!ppgtt) + return; + + i915_vm_put(&ppgtt->vm); + + ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma; + ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma; +} + +int i915_init_ggtt(struct drm_i915_private *i915) +{ + int ret; + + ret = init_ggtt(&i915->ggtt); + if (ret) + return ret; + + if (INTEL_PPGTT(i915) == INTEL_PPGTT_ALIASING) { + ret = init_aliasing_ppgtt(&i915->ggtt); + if (ret) + cleanup_init_ggtt(&i915->ggtt); + } + + return 0; +} + +static void ggtt_cleanup_hw(struct i915_ggtt *ggtt) +{ + struct i915_vma *vma, *vn; + + atomic_set(&ggtt->vm.open, 0); + + rcu_barrier(); /* flush the RCU'ed__i915_vm_release */ + flush_workqueue(ggtt->vm.i915->wq); + + mutex_lock(&ggtt->vm.mutex); + + list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) + WARN_ON(__i915_vma_unbind(vma)); + + if (drm_mm_node_allocated(&ggtt->error_capture)) + drm_mm_remove_node(&ggtt->error_capture); + mutex_destroy(&ggtt->error_mutex); + + ggtt_release_guc_top(ggtt); + intel_vgt_deballoon(ggtt); + + ggtt->vm.cleanup(&ggtt->vm); + + mutex_unlock(&ggtt->vm.mutex); + i915_address_space_fini(&ggtt->vm); + + arch_phys_wc_del(ggtt->mtrr); + + if (ggtt->iomap.size) + io_mapping_fini(&ggtt->iomap); +} + +/** + * i915_ggtt_driver_release - Clean up GGTT hardware initialization + * @i915: i915 device + */ +void i915_ggtt_driver_release(struct drm_i915_private *i915) +{ + struct pagevec *pvec; + + fini_aliasing_ppgtt(&i915->ggtt); + + ggtt_cleanup_hw(&i915->ggtt); + + pvec = &i915->mm.wc_stash.pvec; + if (pvec->nr) { + set_pages_array_wb(pvec->pages, pvec->nr); + __pagevec_release(pvec); + } +} + +static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl) +{ + snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT; + snb_gmch_ctl &= SNB_GMCH_GGMS_MASK; + return snb_gmch_ctl << 20; +} + +static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl) +{ + bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT; + bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK; + if (bdw_gmch_ctl) + bdw_gmch_ctl = 1 << bdw_gmch_ctl; + +#ifdef CONFIG_X86_32 + /* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * I915_GTT_PAGE_SIZE */ + if (bdw_gmch_ctl > 4) + bdw_gmch_ctl = 4; +#endif + + return bdw_gmch_ctl << 20; +} + +static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl) +{ + gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT; + gmch_ctrl &= SNB_GMCH_GGMS_MASK; + + if (gmch_ctrl) + return 1 << (20 + gmch_ctrl); + + return 0; +} + +static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size) +{ + struct drm_i915_private *i915 = ggtt->vm.i915; + struct pci_dev *pdev = i915->drm.pdev; + phys_addr_t phys_addr; + int ret; + + /* For Modern GENs the PTEs and register space are split in the BAR */ + phys_addr = pci_resource_start(pdev, 0) + pci_resource_len(pdev, 0) / 2; + + /* + * On BXT+/CNL+ writes larger than 64 bit to the GTT pagetable range + * will be dropped. For WC mappings in general we have 64 byte burst + * writes when the WC buffer is flushed, so we can't use it, but have to + * resort to an uncached mapping. The WC issue is easily caught by the + * readback check when writing GTT PTE entries. + */ + if (IS_GEN9_LP(i915) || INTEL_GEN(i915) >= 10) + ggtt->gsm = ioremap_nocache(phys_addr, size); + else + ggtt->gsm = ioremap_wc(phys_addr, size); + if (!ggtt->gsm) { + DRM_ERROR("Failed to map the ggtt page table\n"); + return -ENOMEM; + } + + ret = setup_scratch_page(&ggtt->vm, GFP_DMA32); + if (ret) { + DRM_ERROR("Scratch setup failed\n"); + /* iounmap will also get called at remove, but meh */ + iounmap(ggtt->gsm); + return ret; + } + + ggtt->vm.scratch[0].encode = + ggtt->vm.pte_encode(px_dma(&ggtt->vm.scratch[0]), + I915_CACHE_NONE, 0); + + return 0; +} + +int ggtt_set_pages(struct i915_vma *vma) +{ + int ret; + + GEM_BUG_ON(vma->pages); + + ret = i915_get_ggtt_vma_pages(vma); + if (ret) + return ret; + + vma->page_sizes = vma->obj->mm.page_sizes; + + return 0; +} + +static void gen6_gmch_remove(struct i915_address_space *vm) +{ + struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); + + iounmap(ggtt->gsm); + cleanup_scratch_page(vm); +} + +static struct resource pci_resource(struct pci_dev *pdev, int bar) +{ + return (struct resource)DEFINE_RES_MEM(pci_resource_start(pdev, bar), + pci_resource_len(pdev, bar)); +} + +static int gen8_gmch_probe(struct i915_ggtt *ggtt) +{ + struct drm_i915_private *i915 = ggtt->vm.i915; + struct pci_dev *pdev = i915->drm.pdev; + unsigned int size; + u16 snb_gmch_ctl; + int err; + + /* TODO: We're not aware of mappable constraints on gen8 yet */ + if (!IS_DGFX(i915)) { + ggtt->gmadr = pci_resource(pdev, 2); + ggtt->mappable_end = resource_size(&ggtt->gmadr); + } + + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(39)); + if (!err) + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(39)); + if (err) + DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err); + + pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); + if (IS_CHERRYVIEW(i915)) + size = chv_get_total_gtt_size(snb_gmch_ctl); + else + size = gen8_get_total_gtt_size(snb_gmch_ctl); + + ggtt->vm.total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE; + ggtt->vm.cleanup = gen6_gmch_remove; + ggtt->vm.insert_page = gen8_ggtt_insert_page; + ggtt->vm.clear_range = nop_clear_range; + if (intel_scanout_needs_vtd_wa(i915)) + ggtt->vm.clear_range = gen8_ggtt_clear_range; + + ggtt->vm.insert_entries = gen8_ggtt_insert_entries; + + /* Serialize GTT updates with aperture access on BXT if VT-d is on. */ + if (intel_ggtt_update_needs_vtd_wa(i915) || + IS_CHERRYVIEW(i915) /* fails with concurrent use/update */) { + ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL; + ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL; + if (ggtt->vm.clear_range != nop_clear_range) + ggtt->vm.clear_range = bxt_vtd_ggtt_clear_range__BKL; + } + + ggtt->invalidate = gen8_ggtt_invalidate; + + ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma; + ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma; + ggtt->vm.vma_ops.set_pages = ggtt_set_pages; + ggtt->vm.vma_ops.clear_pages = clear_pages; + + ggtt->vm.pte_encode = gen8_pte_encode; + + setup_private_pat(ggtt->vm.gt->uncore); + + return ggtt_probe_common(ggtt, size); +} + +static u64 snb_pte_encode(dma_addr_t addr, + enum i915_cache_level level, + u32 flags) +{ + gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID; + + switch (level) { + case I915_CACHE_L3_LLC: + case I915_CACHE_LLC: + pte |= GEN6_PTE_CACHE_LLC; + break; + case I915_CACHE_NONE: + pte |= GEN6_PTE_UNCACHED; + break; + default: + MISSING_CASE(level); + } + + return pte; +} + +static u64 ivb_pte_encode(dma_addr_t addr, + enum i915_cache_level level, + u32 flags) +{ + gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID; + + switch (level) { + case I915_CACHE_L3_LLC: + pte |= GEN7_PTE_CACHE_L3_LLC; + break; + case I915_CACHE_LLC: + pte |= GEN6_PTE_CACHE_LLC; + break; + case I915_CACHE_NONE: + pte |= GEN6_PTE_UNCACHED; + break; + default: + MISSING_CASE(level); + } + + return pte; +} + +static u64 byt_pte_encode(dma_addr_t addr, + enum i915_cache_level level, + u32 flags) +{ + gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID; + + if (!(flags & PTE_READ_ONLY)) + pte |= BYT_PTE_WRITEABLE; + + if (level != I915_CACHE_NONE) + pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES; + + return pte; +} + +static u64 hsw_pte_encode(dma_addr_t addr, + enum i915_cache_level level, + u32 flags) +{ + gen6_pte_t pte = HSW_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID; + + if (level != I915_CACHE_NONE) + pte |= HSW_WB_LLC_AGE3; + + return pte; +} + +static u64 iris_pte_encode(dma_addr_t addr, + enum i915_cache_level level, + u32 flags) +{ + gen6_pte_t pte = HSW_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID; + + switch (level) { + case I915_CACHE_NONE: + break; + case I915_CACHE_WT: + pte |= HSW_WT_ELLC_LLC_AGE3; + break; + default: + pte |= HSW_WB_ELLC_LLC_AGE3; + break; + } + + return pte; +} + +static int gen6_gmch_probe(struct i915_ggtt *ggtt) +{ + struct drm_i915_private *i915 = ggtt->vm.i915; + struct pci_dev *pdev = i915->drm.pdev; + unsigned int size; + u16 snb_gmch_ctl; + int err; + + ggtt->gmadr = pci_resource(pdev, 2); + ggtt->mappable_end = resource_size(&ggtt->gmadr); + + /* + * 64/512MB is the current min/max we actually know of, but this is + * just a coarse sanity check. + */ + if (ggtt->mappable_end < (64<<20) || ggtt->mappable_end > (512<<20)) { + DRM_ERROR("Unknown GMADR size (%pa)\n", &ggtt->mappable_end); + return -ENXIO; + } + + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40)); + if (!err) + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40)); + if (err) + DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err); + pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); + + size = gen6_get_total_gtt_size(snb_gmch_ctl); + ggtt->vm.total = (size / sizeof(gen6_pte_t)) * I915_GTT_PAGE_SIZE; + + ggtt->vm.clear_range = nop_clear_range; + if (!HAS_FULL_PPGTT(i915) || intel_scanout_needs_vtd_wa(i915)) + ggtt->vm.clear_range = gen6_ggtt_clear_range; + ggtt->vm.insert_page = gen6_ggtt_insert_page; + ggtt->vm.insert_entries = gen6_ggtt_insert_entries; + ggtt->vm.cleanup = gen6_gmch_remove; + + ggtt->invalidate = gen6_ggtt_invalidate; + + if (HAS_EDRAM(i915)) + ggtt->vm.pte_encode = iris_pte_encode; + else if (IS_HASWELL(i915)) + ggtt->vm.pte_encode = hsw_pte_encode; + else if (IS_VALLEYVIEW(i915)) + ggtt->vm.pte_encode = byt_pte_encode; + else if (INTEL_GEN(i915) >= 7) + ggtt->vm.pte_encode = ivb_pte_encode; + else + ggtt->vm.pte_encode = snb_pte_encode; + + ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma; + ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma; + ggtt->vm.vma_ops.set_pages = ggtt_set_pages; + ggtt->vm.vma_ops.clear_pages = clear_pages; + + return ggtt_probe_common(ggtt, size); +} + +static void i915_gmch_remove(struct i915_address_space *vm) +{ + intel_gmch_remove(); +} + +static int i915_gmch_probe(struct i915_ggtt *ggtt) +{ + struct drm_i915_private *i915 = ggtt->vm.i915; + phys_addr_t gmadr_base; + int ret; + + ret = intel_gmch_probe(i915->bridge_dev, i915->drm.pdev, NULL); + if (!ret) { + DRM_ERROR("failed to set up gmch\n"); + return -EIO; + } + + intel_gtt_get(&ggtt->vm.total, &gmadr_base, &ggtt->mappable_end); + + ggtt->gmadr = + (struct resource)DEFINE_RES_MEM(gmadr_base, ggtt->mappable_end); + + ggtt->do_idle_maps = needs_idle_maps(i915); + ggtt->vm.insert_page = i915_ggtt_insert_page; + ggtt->vm.insert_entries = i915_ggtt_insert_entries; + ggtt->vm.clear_range = i915_ggtt_clear_range; + ggtt->vm.cleanup = i915_gmch_remove; + + ggtt->invalidate = gmch_ggtt_invalidate; + + ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma; + ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma; + ggtt->vm.vma_ops.set_pages = ggtt_set_pages; + ggtt->vm.vma_ops.clear_pages = clear_pages; + + if (unlikely(ggtt->do_idle_maps)) + dev_notice(i915->drm.dev, + "Applying Ironlake quirks for intel_iommu\n"); + + return 0; +} + +static int ggtt_probe_hw(struct i915_ggtt *ggtt, struct intel_gt *gt) +{ + struct drm_i915_private *i915 = gt->i915; + int ret; + + ggtt->vm.gt = gt; + ggtt->vm.i915 = i915; + ggtt->vm.dma = &i915->drm.pdev->dev; + + if (INTEL_GEN(i915) <= 5) + ret = i915_gmch_probe(ggtt); + else if (INTEL_GEN(i915) < 8) + ret = gen6_gmch_probe(ggtt); + else + ret = gen8_gmch_probe(ggtt); + if (ret) + return ret; + + if ((ggtt->vm.total - 1) >> 32) { + DRM_ERROR("We never expected a Global GTT with more than 32bits" + " of address space! Found %lldM!\n", + ggtt->vm.total >> 20); + ggtt->vm.total = 1ULL << 32; + ggtt->mappable_end = + min_t(u64, ggtt->mappable_end, ggtt->vm.total); + } + + if (ggtt->mappable_end > ggtt->vm.total) { + DRM_ERROR("mappable aperture extends past end of GGTT," + " aperture=%pa, total=%llx\n", + &ggtt->mappable_end, ggtt->vm.total); + ggtt->mappable_end = ggtt->vm.total; + } + + /* GMADR is the PCI mmio aperture into the global GTT. */ + DRM_DEBUG_DRIVER("GGTT size = %lluM\n", ggtt->vm.total >> 20); + DRM_DEBUG_DRIVER("GMADR size = %lluM\n", (u64)ggtt->mappable_end >> 20); + DRM_DEBUG_DRIVER("DSM size = %lluM\n", + (u64)resource_size(&intel_graphics_stolen_res) >> 20); + + return 0; +} + +/** + * i915_ggtt_probe_hw - Probe GGTT hardware location + * @i915: i915 device + */ +int i915_ggtt_probe_hw(struct drm_i915_private *i915) +{ + int ret; + + ret = ggtt_probe_hw(&i915->ggtt, &i915->gt); + if (ret) + return ret; + + if (intel_vtd_active()) + dev_info(i915->drm.dev, "VT-d active for gfx access\n"); + + return 0; +} + +int i915_ggtt_enable_hw(struct drm_i915_private *i915) +{ + if (INTEL_GEN(i915) < 6 && !intel_enable_gtt()) + return -EIO; + + return 0; +} + +void i915_ggtt_enable_guc(struct i915_ggtt *ggtt) +{ + GEM_BUG_ON(ggtt->invalidate != gen8_ggtt_invalidate); + + ggtt->invalidate = guc_ggtt_invalidate; + + ggtt->invalidate(ggtt); +} + +void i915_ggtt_disable_guc(struct i915_ggtt *ggtt) +{ + /* XXX Temporary pardon for error unload */ + if (ggtt->invalidate == gen8_ggtt_invalidate) + return; + + /* We should only be called after i915_ggtt_enable_guc() */ + GEM_BUG_ON(ggtt->invalidate != guc_ggtt_invalidate); + + ggtt->invalidate = gen8_ggtt_invalidate; + + ggtt->invalidate(ggtt); +} + +static void ggtt_restore_mappings(struct i915_ggtt *ggtt) +{ + struct i915_vma *vma; + bool flush = false; + int open; + + intel_gt_check_and_clear_faults(ggtt->vm.gt); + + mutex_lock(&ggtt->vm.mutex); + + /* First fill our portion of the GTT with scratch pages */ + ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total); + + /* Skip rewriting PTE on VMA unbind. */ + open = atomic_xchg(&ggtt->vm.open, 0); + + /* clflush objects bound into the GGTT and rebind them. */ + list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link) { + struct drm_i915_gem_object *obj = vma->obj; + + if (!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) + continue; + + clear_bit(I915_VMA_GLOBAL_BIND_BIT, __i915_vma_flags(vma)); + WARN_ON(i915_vma_bind(vma, + obj ? obj->cache_level : 0, + PIN_GLOBAL, NULL)); + if (obj) { /* only used during resume => exclusive access */ + flush |= fetch_and_zero(&obj->write_domain); + obj->read_domains |= I915_GEM_DOMAIN_GTT; + } + } + + atomic_set(&ggtt->vm.open, open); + ggtt->invalidate(ggtt); + + mutex_unlock(&ggtt->vm.mutex); + + if (flush) + wbinvd_on_all_cpus(); +} + +void i915_gem_restore_gtt_mappings(struct drm_i915_private *i915) +{ + struct i915_ggtt *ggtt = &i915->ggtt; + + ggtt_restore_mappings(ggtt); + + if (INTEL_GEN(i915) >= 8) + setup_private_pat(ggtt->vm.gt->uncore); +} + +static struct scatterlist * +rotate_pages(struct drm_i915_gem_object *obj, unsigned int offset, + unsigned int width, unsigned int height, + unsigned int stride, + struct sg_table *st, struct scatterlist *sg) +{ + unsigned int column, row; + unsigned int src_idx; + + for (column = 0; column < width; column++) { + src_idx = stride * (height - 1) + column + offset; + for (row = 0; row < height; row++) { + st->nents++; + /* + * We don't need the pages, but need to initialize + * the entries so the sg list can be happily traversed. + * The only thing we need are DMA addresses. + */ + sg_set_page(sg, NULL, I915_GTT_PAGE_SIZE, 0); + sg_dma_address(sg) = + i915_gem_object_get_dma_address(obj, src_idx); + sg_dma_len(sg) = I915_GTT_PAGE_SIZE; + sg = sg_next(sg); + src_idx -= stride; + } + } + + return sg; +} + +static noinline struct sg_table * +intel_rotate_pages(struct intel_rotation_info *rot_info, + struct drm_i915_gem_object *obj) +{ + unsigned int size = intel_rotation_info_size(rot_info); + struct sg_table *st; + struct scatterlist *sg; + int ret = -ENOMEM; + int i; + + /* Allocate target SG list. */ + st = kmalloc(sizeof(*st), GFP_KERNEL); + if (!st) + goto err_st_alloc; + + ret = sg_alloc_table(st, size, GFP_KERNEL); + if (ret) + goto err_sg_alloc; + + st->nents = 0; + sg = st->sgl; + + for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) { + sg = rotate_pages(obj, rot_info->plane[i].offset, + rot_info->plane[i].width, rot_info->plane[i].height, + rot_info->plane[i].stride, st, sg); + } + + return st; + +err_sg_alloc: + kfree(st); +err_st_alloc: + + DRM_DEBUG_DRIVER("Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n", + obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size); + + return ERR_PTR(ret); +} + +static struct scatterlist * +remap_pages(struct drm_i915_gem_object *obj, unsigned int offset, + unsigned int width, unsigned int height, + unsigned int stride, + struct sg_table *st, struct scatterlist *sg) +{ + unsigned int row; + + for (row = 0; row < height; row++) { + unsigned int left = width * I915_GTT_PAGE_SIZE; + + while (left) { + dma_addr_t addr; + unsigned int length; + + /* + * We don't need the pages, but need to initialize + * the entries so the sg list can be happily traversed. + * The only thing we need are DMA addresses. + */ + + addr = i915_gem_object_get_dma_address_len(obj, offset, &length); + + length = min(left, length); + + st->nents++; + + sg_set_page(sg, NULL, length, 0); + sg_dma_address(sg) = addr; + sg_dma_len(sg) = length; + sg = sg_next(sg); + + offset += length / I915_GTT_PAGE_SIZE; + left -= length; + } + + offset += stride - width; + } + + return sg; +} + +static noinline struct sg_table * +intel_remap_pages(struct intel_remapped_info *rem_info, + struct drm_i915_gem_object *obj) +{ + unsigned int size = intel_remapped_info_size(rem_info); + struct sg_table *st; + struct scatterlist *sg; + int ret = -ENOMEM; + int i; + + /* Allocate target SG list. */ + st = kmalloc(sizeof(*st), GFP_KERNEL); + if (!st) + goto err_st_alloc; + + ret = sg_alloc_table(st, size, GFP_KERNEL); + if (ret) + goto err_sg_alloc; + + st->nents = 0; + sg = st->sgl; + + for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) { + sg = remap_pages(obj, rem_info->plane[i].offset, + rem_info->plane[i].width, rem_info->plane[i].height, + rem_info->plane[i].stride, st, sg); + } + + i915_sg_trim(st); + + return st; + +err_sg_alloc: + kfree(st); +err_st_alloc: + + DRM_DEBUG_DRIVER("Failed to create remapped mapping for object size %zu! (%ux%u tiles, %u pages)\n", + obj->base.size, rem_info->plane[0].width, rem_info->plane[0].height, size); + + return ERR_PTR(ret); +} + +static noinline struct sg_table * +intel_partial_pages(const struct i915_ggtt_view *view, + struct drm_i915_gem_object *obj) +{ + struct sg_table *st; + struct scatterlist *sg, *iter; + unsigned int count = view->partial.size; + unsigned int offset; + int ret = -ENOMEM; + + st = kmalloc(sizeof(*st), GFP_KERNEL); + if (!st) + goto err_st_alloc; + + ret = sg_alloc_table(st, count, GFP_KERNEL); + if (ret) + goto err_sg_alloc; + + iter = i915_gem_object_get_sg(obj, view->partial.offset, &offset); + GEM_BUG_ON(!iter); + + sg = st->sgl; + st->nents = 0; + do { + unsigned int len; + + len = min(iter->length - (offset << PAGE_SHIFT), + count << PAGE_SHIFT); + sg_set_page(sg, NULL, len, 0); + sg_dma_address(sg) = + sg_dma_address(iter) + (offset << PAGE_SHIFT); + sg_dma_len(sg) = len; + + st->nents++; + count -= len >> PAGE_SHIFT; + if (count == 0) { + sg_mark_end(sg); + i915_sg_trim(st); /* Drop any unused tail entries. */ + + return st; + } + + sg = __sg_next(sg); + iter = __sg_next(iter); + offset = 0; + } while (1); + +err_sg_alloc: + kfree(st); +err_st_alloc: + return ERR_PTR(ret); +} + +static int +i915_get_ggtt_vma_pages(struct i915_vma *vma) +{ + int ret; + + /* + * The vma->pages are only valid within the lifespan of the borrowed + * obj->mm.pages. When the obj->mm.pages sg_table is regenerated, so + * must be the vma->pages. A simple rule is that vma->pages must only + * be accessed when the obj->mm.pages are pinned. + */ + GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj)); + + switch (vma->ggtt_view.type) { + default: + GEM_BUG_ON(vma->ggtt_view.type); + /* fall through */ + case I915_GGTT_VIEW_NORMAL: + vma->pages = vma->obj->mm.pages; + return 0; + + case I915_GGTT_VIEW_ROTATED: + vma->pages = + intel_rotate_pages(&vma->ggtt_view.rotated, vma->obj); + break; + + case I915_GGTT_VIEW_REMAPPED: + vma->pages = + intel_remap_pages(&vma->ggtt_view.remapped, vma->obj); + break; + + case I915_GGTT_VIEW_PARTIAL: + vma->pages = intel_partial_pages(&vma->ggtt_view, vma->obj); + break; + } + + ret = 0; + if (IS_ERR(vma->pages)) { + ret = PTR_ERR(vma->pages); + vma->pages = NULL; + DRM_ERROR("Failed to get pages for VMA view type %u (%d)!\n", + vma->ggtt_view.type, ret); + } + return ret; +} diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c index ec84b5e62fef..da2b6e2ae692 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.c +++ b/drivers/gpu/drm/i915/gt/intel_gt.c @@ -38,8 +38,6 @@ void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915) void intel_gt_init_hw_early(struct intel_gt *gt, struct i915_ggtt *ggtt) { gt->ggtt = ggtt; - - intel_gt_sanitize(gt, false); } static void init_unused_ring(struct intel_gt *gt, u32 base) @@ -77,10 +75,6 @@ int intel_gt_init_hw(struct intel_gt *gt) struct intel_uncore *uncore = gt->uncore; int ret; - ret = intel_gt_terminally_wedged(gt); - if (ret) - return ret; - gt->last_init_time = ktime_get(); /* Double layer security blanket, see i915_gem_init() */ @@ -372,7 +366,7 @@ static void intel_gt_fini_scratch(struct intel_gt *gt) static struct i915_address_space *kernel_vm(struct intel_gt *gt) { if (INTEL_PPGTT(gt->i915) > INTEL_PPGTT_ALIASING) - return &i915_ppgtt_create(gt->i915)->vm; + return &i915_ppgtt_create(gt)->vm; else return i915_vm_get(>->ggtt->vm); } @@ -410,14 +404,13 @@ static int __engines_record_defaults(struct intel_gt *gt) struct intel_context *ce; struct i915_request *rq; + /* We must be able to switch to something! */ + GEM_BUG_ON(!engine->kernel_context); + err = intel_renderstate_init(&so, engine); if (err) goto out; - /* We must be able to switch to something! */ - GEM_BUG_ON(!engine->kernel_context); - engine->serial++; /* force the kernel context switch */ - ce = intel_context_create(engine); if (IS_ERR(ce)) { err = PTR_ERR(ce); diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h index 2355cf129e9c..1dac441cb8f4 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.h +++ b/drivers/gpu/drm/i915/gt/intel_gt.h @@ -58,9 +58,14 @@ static inline u32 intel_gt_scratch_offset(const struct intel_gt *gt, return i915_ggtt_offset(gt->scratch) + field; } -static inline bool intel_gt_is_wedged(struct intel_gt *gt) +static inline bool intel_gt_is_wedged(const struct intel_gt *gt) { return __intel_reset_failed(>->reset); } +static inline bool intel_gt_has_init_error(const struct intel_gt *gt) +{ + return test_bit(I915_WEDGED_ON_INIT, >->reset.flags); +} + #endif /* __INTEL_GT_H__ */ diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c index 45b68a17da4d..d1c2f034296a 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c @@ -126,17 +126,7 @@ static bool reset_engines(struct intel_gt *gt) return __intel_gt_reset(gt, ALL_ENGINES) == 0; } -/** - * intel_gt_sanitize: called after the GPU has lost power - * @gt: the i915 GT container - * @force: ignore a failed reset and sanitize engine state anyway - * - * Anytime we reset the GPU, either with an explicit GPU reset or through a - * PCI power cycle, the GPU loses state and we must reset our state tracking - * to match. Note that calling intel_gt_sanitize() if the GPU has not - * been reset results in much confusion! - */ -void intel_gt_sanitize(struct intel_gt *gt, bool force) +static void gt_sanitize(struct intel_gt *gt, bool force) { struct intel_engine_cs *engine; enum intel_engine_id id; @@ -189,6 +179,10 @@ int intel_gt_resume(struct intel_gt *gt) enum intel_engine_id id; int err; + err = intel_gt_has_init_error(gt); + if (err) + return err; + GT_TRACE(gt, "\n"); /* @@ -201,30 +195,26 @@ int intel_gt_resume(struct intel_gt *gt) intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL); intel_rc6_sanitize(>->rc6); + gt_sanitize(gt, true); + if (intel_gt_is_wedged(gt)) { + err = -EIO; + goto out_fw; + } /* Only when the HW is re-initialised, can we replay the requests */ err = intel_gt_init_hw(gt); if (err) { dev_err(gt->i915->drm.dev, "Failed to initialize GPU, declaring it wedged!\n"); - intel_gt_set_wedged(gt); - goto err_fw; + goto err_wedged; } intel_rps_enable(>->rps); intel_llc_enable(>->llc); for_each_engine(engine, gt, id) { - struct intel_context *ce; - intel_engine_pm_get(engine); - ce = engine->kernel_context; - if (ce) { - GEM_BUG_ON(!intel_context_is_pinned(ce)); - ce->ops->reset(ce); - } - engine->serial++; /* kernel context lost */ err = engine->resume(engine); @@ -233,7 +223,7 @@ int intel_gt_resume(struct intel_gt *gt) dev_err(gt->i915->drm.dev, "Failed to restart %s (%d)\n", engine->name, err); - break; + goto err_wedged; } } @@ -243,11 +233,14 @@ int intel_gt_resume(struct intel_gt *gt) user_forcewake(gt, false); -err_fw: +out_fw: intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL); intel_gt_pm_put(gt); - return err; + +err_wedged: + intel_gt_set_wedged(gt); + goto out_fw; } static void wait_for_suspend(struct intel_gt *gt) @@ -315,7 +308,7 @@ void intel_gt_suspend_late(struct intel_gt *gt) intel_llc_disable(>->llc); } - intel_gt_sanitize(gt, false); + gt_sanitize(gt, false); GT_TRACE(gt, "\n"); } diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.h b/drivers/gpu/drm/i915/gt/intel_gt_pm.h index 4a9e48c12bd4..60f0e2fbe55c 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_pm.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.h @@ -51,8 +51,6 @@ void intel_gt_pm_init_early(struct intel_gt *gt); void intel_gt_pm_init(struct intel_gt *gt); void intel_gt_pm_fini(struct intel_gt *gt); -void intel_gt_sanitize(struct intel_gt *gt, bool force); - void intel_gt_suspend_prepare(struct intel_gt *gt); void intel_gt_suspend_late(struct intel_gt *gt); int intel_gt_resume(struct intel_gt *gt); diff --git a/drivers/gpu/drm/i915/gt/intel_gt_requests.c b/drivers/gpu/drm/i915/gt/intel_gt_requests.c index b4f04614230e..7ef1d37970f6 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_requests.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_requests.c @@ -14,13 +14,16 @@ #include "intel_gt_requests.h" #include "intel_timeline.h" -static void retire_requests(struct intel_timeline *tl) +static bool retire_requests(struct intel_timeline *tl) { struct i915_request *rq, *rn; list_for_each_entry_safe(rq, rn, &tl->requests, link) if (!i915_request_retire(rq)) - break; + return false; + + /* And check nothing new was submitted */ + return !i915_active_fence_isset(&tl->last_request); } static bool flush_submission(struct intel_gt *gt) @@ -29,9 +32,13 @@ static bool flush_submission(struct intel_gt *gt) enum intel_engine_id id; bool active = false; + if (!intel_gt_pm_is_awake(gt)) + return false; + for_each_engine(engine, gt, id) { - active |= intel_engine_flush_submission(engine); + intel_engine_flush_submission(engine); active |= flush_work(&engine->retire_work); + active |= flush_work(&engine->wakeref.work); } return active; @@ -120,7 +127,6 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout) timeout = -timeout, interruptible = false; flush_submission(gt); /* kick the ksoftirqd tasklets */ - spin_lock(&timelines->lock); list_for_each_entry_safe(tl, tn, &timelines->active_list, link) { if (!mutex_trylock(&tl->mutex)) { @@ -145,7 +151,8 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout) } } - retire_requests(tl); + if (!retire_requests(tl) || flush_submission(gt)) + active_count++; spin_lock(&timelines->lock); @@ -153,8 +160,6 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout) list_safe_reset_next(tl, tn, link); if (atomic_dec_and_test(&tl->active_count)) list_del(&tl->link); - else - active_count += i915_active_fence_isset(&tl->last_request); mutex_unlock(&tl->mutex); @@ -169,9 +174,6 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout) list_for_each_entry_safe(tl, tn, &free, link) __intel_timeline_free(&tl->kref); - if (flush_submission(gt)) - active_count++; - return active_count ? timeout : 0; } diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c new file mode 100644 index 000000000000..16acdc5d6734 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_gtt.c @@ -0,0 +1,598 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2020 Intel Corporation + */ + +#include <linux/slab.h> /* fault-inject.h is not standalone! */ + +#include <linux/fault-inject.h> + +#include "i915_trace.h" +#include "intel_gt.h" +#include "intel_gtt.h" + +void stash_init(struct pagestash *stash) +{ + pagevec_init(&stash->pvec); + spin_lock_init(&stash->lock); +} + +static struct page *stash_pop_page(struct pagestash *stash) +{ + struct page *page = NULL; + + spin_lock(&stash->lock); + if (likely(stash->pvec.nr)) + page = stash->pvec.pages[--stash->pvec.nr]; + spin_unlock(&stash->lock); + + return page; +} + +static void stash_push_pagevec(struct pagestash *stash, struct pagevec *pvec) +{ + unsigned int nr; + + spin_lock_nested(&stash->lock, SINGLE_DEPTH_NESTING); + + nr = min_t(typeof(nr), pvec->nr, pagevec_space(&stash->pvec)); + memcpy(stash->pvec.pages + stash->pvec.nr, + pvec->pages + pvec->nr - nr, + sizeof(pvec->pages[0]) * nr); + stash->pvec.nr += nr; + + spin_unlock(&stash->lock); + + pvec->nr -= nr; +} + +static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp) +{ + struct pagevec stack; + struct page *page; + + if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1))) + i915_gem_shrink_all(vm->i915); + + page = stash_pop_page(&vm->free_pages); + if (page) + return page; + + if (!vm->pt_kmap_wc) + return alloc_page(gfp); + + /* Look in our global stash of WC pages... */ + page = stash_pop_page(&vm->i915->mm.wc_stash); + if (page) + return page; + + /* + * Otherwise batch allocate pages to amortize cost of set_pages_wc. + * + * We have to be careful as page allocation may trigger the shrinker + * (via direct reclaim) which will fill up the WC stash underneath us. + * So we add our WB pages into a temporary pvec on the stack and merge + * them into the WC stash after all the allocations are complete. + */ + pagevec_init(&stack); + do { + struct page *page; + + page = alloc_page(gfp); + if (unlikely(!page)) + break; + + stack.pages[stack.nr++] = page; + } while (pagevec_space(&stack)); + + if (stack.nr && !set_pages_array_wc(stack.pages, stack.nr)) { + page = stack.pages[--stack.nr]; + + /* Merge spare WC pages to the global stash */ + if (stack.nr) + stash_push_pagevec(&vm->i915->mm.wc_stash, &stack); + + /* Push any surplus WC pages onto the local VM stash */ + if (stack.nr) + stash_push_pagevec(&vm->free_pages, &stack); + } + + /* Return unwanted leftovers */ + if (unlikely(stack.nr)) { + WARN_ON_ONCE(set_pages_array_wb(stack.pages, stack.nr)); + __pagevec_release(&stack); + } + + return page; +} + +static void vm_free_pages_release(struct i915_address_space *vm, + bool immediate) +{ + struct pagevec *pvec = &vm->free_pages.pvec; + struct pagevec stack; + + lockdep_assert_held(&vm->free_pages.lock); + GEM_BUG_ON(!pagevec_count(pvec)); + + if (vm->pt_kmap_wc) { + /* + * When we use WC, first fill up the global stash and then + * only if full immediately free the overflow. + */ + stash_push_pagevec(&vm->i915->mm.wc_stash, pvec); + + /* + * As we have made some room in the VM's free_pages, + * we can wait for it to fill again. Unless we are + * inside i915_address_space_fini() and must + * immediately release the pages! + */ + if (pvec->nr <= (immediate ? 0 : PAGEVEC_SIZE - 1)) + return; + + /* + * We have to drop the lock to allow ourselves to sleep, + * so take a copy of the pvec and clear the stash for + * others to use it as we sleep. + */ + stack = *pvec; + pagevec_reinit(pvec); + spin_unlock(&vm->free_pages.lock); + + pvec = &stack; + set_pages_array_wb(pvec->pages, pvec->nr); + + spin_lock(&vm->free_pages.lock); + } + + __pagevec_release(pvec); +} + +static void vm_free_page(struct i915_address_space *vm, struct page *page) +{ + /* + * On !llc, we need to change the pages back to WB. We only do so + * in bulk, so we rarely need to change the page attributes here, + * but doing so requires a stop_machine() from deep inside arch/x86/mm. + * To make detection of the possible sleep more likely, use an + * unconditional might_sleep() for everybody. + */ + might_sleep(); + spin_lock(&vm->free_pages.lock); + while (!pagevec_space(&vm->free_pages.pvec)) + vm_free_pages_release(vm, false); + GEM_BUG_ON(pagevec_count(&vm->free_pages.pvec) >= PAGEVEC_SIZE); + pagevec_add(&vm->free_pages.pvec, page); + spin_unlock(&vm->free_pages.lock); +} + +void __i915_vm_close(struct i915_address_space *vm) +{ + struct i915_vma *vma, *vn; + + mutex_lock(&vm->mutex); + list_for_each_entry_safe(vma, vn, &vm->bound_list, vm_link) { + struct drm_i915_gem_object *obj = vma->obj; + + /* Keep the obj (and hence the vma) alive as _we_ destroy it */ + if (!kref_get_unless_zero(&obj->base.refcount)) + continue; + + atomic_and(~I915_VMA_PIN_MASK, &vma->flags); + WARN_ON(__i915_vma_unbind(vma)); + __i915_vma_put(vma); + + i915_gem_object_put(obj); + } + GEM_BUG_ON(!list_empty(&vm->bound_list)); + mutex_unlock(&vm->mutex); +} + +void i915_address_space_fini(struct i915_address_space *vm) +{ + spin_lock(&vm->free_pages.lock); + if (pagevec_count(&vm->free_pages.pvec)) + vm_free_pages_release(vm, true); + GEM_BUG_ON(pagevec_count(&vm->free_pages.pvec)); + spin_unlock(&vm->free_pages.lock); + + drm_mm_takedown(&vm->mm); + + mutex_destroy(&vm->mutex); +} + +static void __i915_vm_release(struct work_struct *work) +{ + struct i915_address_space *vm = + container_of(work, struct i915_address_space, rcu.work); + + vm->cleanup(vm); + i915_address_space_fini(vm); + + kfree(vm); +} + +void i915_vm_release(struct kref *kref) +{ + struct i915_address_space *vm = + container_of(kref, struct i915_address_space, ref); + + GEM_BUG_ON(i915_is_ggtt(vm)); + trace_i915_ppgtt_release(vm); + + queue_rcu_work(vm->i915->wq, &vm->rcu); +} + +void i915_address_space_init(struct i915_address_space *vm, int subclass) +{ + kref_init(&vm->ref); + INIT_RCU_WORK(&vm->rcu, __i915_vm_release); + atomic_set(&vm->open, 1); + + /* + * The vm->mutex must be reclaim safe (for use in the shrinker). + * Do a dummy acquire now under fs_reclaim so that any allocation + * attempt holding the lock is immediately reported by lockdep. + */ + mutex_init(&vm->mutex); + lockdep_set_subclass(&vm->mutex, subclass); + i915_gem_shrinker_taints_mutex(vm->i915, &vm->mutex); + + GEM_BUG_ON(!vm->total); + drm_mm_init(&vm->mm, 0, vm->total); + vm->mm.head_node.color = I915_COLOR_UNEVICTABLE; + + stash_init(&vm->free_pages); + + INIT_LIST_HEAD(&vm->bound_list); +} + +void clear_pages(struct i915_vma *vma) +{ + GEM_BUG_ON(!vma->pages); + + if (vma->pages != vma->obj->mm.pages) { + sg_free_table(vma->pages); + kfree(vma->pages); + } + vma->pages = NULL; + + memset(&vma->page_sizes, 0, sizeof(vma->page_sizes)); +} + +static int __setup_page_dma(struct i915_address_space *vm, + struct i915_page_dma *p, + gfp_t gfp) +{ + p->page = vm_alloc_page(vm, gfp | I915_GFP_ALLOW_FAIL); + if (unlikely(!p->page)) + return -ENOMEM; + + p->daddr = dma_map_page_attrs(vm->dma, + p->page, 0, PAGE_SIZE, + PCI_DMA_BIDIRECTIONAL, + DMA_ATTR_SKIP_CPU_SYNC | + DMA_ATTR_NO_WARN); + if (unlikely(dma_mapping_error(vm->dma, p->daddr))) { + vm_free_page(vm, p->page); + return -ENOMEM; + } + + return 0; +} + +int setup_page_dma(struct i915_address_space *vm, struct i915_page_dma *p) +{ + return __setup_page_dma(vm, p, __GFP_HIGHMEM); +} + +void cleanup_page_dma(struct i915_address_space *vm, struct i915_page_dma *p) +{ + dma_unmap_page(vm->dma, p->daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); + vm_free_page(vm, p->page); +} + +void +fill_page_dma(const struct i915_page_dma *p, const u64 val, unsigned int count) +{ + kunmap_atomic(memset64(kmap_atomic(p->page), val, count)); +} + +int setup_scratch_page(struct i915_address_space *vm, gfp_t gfp) +{ + unsigned long size; + + /* + * In order to utilize 64K pages for an object with a size < 2M, we will + * need to support a 64K scratch page, given that every 16th entry for a + * page-table operating in 64K mode must point to a properly aligned 64K + * region, including any PTEs which happen to point to scratch. + * + * This is only relevant for the 48b PPGTT where we support + * huge-gtt-pages, see also i915_vma_insert(). However, as we share the + * scratch (read-only) between all vm, we create one 64k scratch page + * for all. + */ + size = I915_GTT_PAGE_SIZE_4K; + if (i915_vm_is_4lvl(vm) && + HAS_PAGE_SIZES(vm->i915, I915_GTT_PAGE_SIZE_64K)) { + size = I915_GTT_PAGE_SIZE_64K; + gfp |= __GFP_NOWARN; + } + gfp |= __GFP_ZERO | __GFP_RETRY_MAYFAIL; + + do { + unsigned int order = get_order(size); + struct page *page; + dma_addr_t addr; + + page = alloc_pages(gfp, order); + if (unlikely(!page)) + goto skip; + + addr = dma_map_page_attrs(vm->dma, + page, 0, size, + PCI_DMA_BIDIRECTIONAL, + DMA_ATTR_SKIP_CPU_SYNC | + DMA_ATTR_NO_WARN); + if (unlikely(dma_mapping_error(vm->dma, addr))) + goto free_page; + + if (unlikely(!IS_ALIGNED(addr, size))) + goto unmap_page; + + vm->scratch[0].base.page = page; + vm->scratch[0].base.daddr = addr; + vm->scratch_order = order; + return 0; + +unmap_page: + dma_unmap_page(vm->dma, addr, size, PCI_DMA_BIDIRECTIONAL); +free_page: + __free_pages(page, order); +skip: + if (size == I915_GTT_PAGE_SIZE_4K) + return -ENOMEM; + + size = I915_GTT_PAGE_SIZE_4K; + gfp &= ~__GFP_NOWARN; + } while (1); +} + +void cleanup_scratch_page(struct i915_address_space *vm) +{ + struct i915_page_dma *p = px_base(&vm->scratch[0]); + unsigned int order = vm->scratch_order; + + dma_unmap_page(vm->dma, p->daddr, BIT(order) << PAGE_SHIFT, + PCI_DMA_BIDIRECTIONAL); + __free_pages(p->page, order); +} + +void free_scratch(struct i915_address_space *vm) +{ + int i; + + if (!px_dma(&vm->scratch[0])) /* set to 0 on clones */ + return; + + for (i = 1; i <= vm->top; i++) { + if (!px_dma(&vm->scratch[i])) + break; + cleanup_page_dma(vm, px_base(&vm->scratch[i])); + } + + cleanup_scratch_page(vm); +} + +void gtt_write_workarounds(struct intel_gt *gt) +{ + struct drm_i915_private *i915 = gt->i915; + struct intel_uncore *uncore = gt->uncore; + + /* + * This function is for gtt related workarounds. This function is + * called on driver load and after a GPU reset, so you can place + * workarounds here even if they get overwritten by GPU reset. + */ + /* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk,cfl,cnl,icl */ + if (IS_BROADWELL(i915)) + intel_uncore_write(uncore, + GEN8_L3_LRA_1_GPGPU, + GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW); + else if (IS_CHERRYVIEW(i915)) + intel_uncore_write(uncore, + GEN8_L3_LRA_1_GPGPU, + GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV); + else if (IS_GEN9_LP(i915)) + intel_uncore_write(uncore, + GEN8_L3_LRA_1_GPGPU, + GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT); + else if (INTEL_GEN(i915) >= 9 && INTEL_GEN(i915) <= 11) + intel_uncore_write(uncore, + GEN8_L3_LRA_1_GPGPU, + GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL); + + /* + * To support 64K PTEs we need to first enable the use of the + * Intermediate-Page-Size(IPS) bit of the PDE field via some magical + * mmio, otherwise the page-walker will simply ignore the IPS bit. This + * shouldn't be needed after GEN10. + * + * 64K pages were first introduced from BDW+, although technically they + * only *work* from gen9+. For pre-BDW we instead have the option for + * 32K pages, but we don't currently have any support for it in our + * driver. + */ + if (HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_64K) && + INTEL_GEN(i915) <= 10) + intel_uncore_rmw(uncore, + GEN8_GAMW_ECO_DEV_RW_IA, + 0, + GAMW_ECO_ENABLE_64K_IPS_FIELD); + + if (IS_GEN_RANGE(i915, 8, 11)) { + bool can_use_gtt_cache = true; + + /* + * According to the BSpec if we use 2M/1G pages then we also + * need to disable the GTT cache. At least on BDW we can see + * visual corruption when using 2M pages, and not disabling the + * GTT cache. + */ + if (HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_2M)) + can_use_gtt_cache = false; + + /* WaGttCachingOffByDefault */ + intel_uncore_write(uncore, + HSW_GTT_CACHE_EN, + can_use_gtt_cache ? GTT_CACHE_EN_ALL : 0); + WARN_ON_ONCE(can_use_gtt_cache && + intel_uncore_read(uncore, + HSW_GTT_CACHE_EN) == 0); + } +} + +u64 gen8_pte_encode(dma_addr_t addr, + enum i915_cache_level level, + u32 flags) +{ + gen8_pte_t pte = addr | _PAGE_PRESENT | _PAGE_RW; + + if (unlikely(flags & PTE_READ_ONLY)) + pte &= ~_PAGE_RW; + + switch (level) { + case I915_CACHE_NONE: + pte |= PPAT_UNCACHED; + break; + case I915_CACHE_WT: + pte |= PPAT_DISPLAY_ELLC; + break; + default: + pte |= PPAT_CACHED; + break; + } + + return pte; +} + +static void tgl_setup_private_ppat(struct intel_uncore *uncore) +{ + /* TGL doesn't support LLC or AGE settings */ + intel_uncore_write(uncore, GEN12_PAT_INDEX(0), GEN8_PPAT_WB); + intel_uncore_write(uncore, GEN12_PAT_INDEX(1), GEN8_PPAT_WC); + intel_uncore_write(uncore, GEN12_PAT_INDEX(2), GEN8_PPAT_WT); + intel_uncore_write(uncore, GEN12_PAT_INDEX(3), GEN8_PPAT_UC); + intel_uncore_write(uncore, GEN12_PAT_INDEX(4), GEN8_PPAT_WB); + intel_uncore_write(uncore, GEN12_PAT_INDEX(5), GEN8_PPAT_WB); + intel_uncore_write(uncore, GEN12_PAT_INDEX(6), GEN8_PPAT_WB); + intel_uncore_write(uncore, GEN12_PAT_INDEX(7), GEN8_PPAT_WB); +} + +static void cnl_setup_private_ppat(struct intel_uncore *uncore) +{ + intel_uncore_write(uncore, + GEN10_PAT_INDEX(0), + GEN8_PPAT_WB | GEN8_PPAT_LLC); + intel_uncore_write(uncore, + GEN10_PAT_INDEX(1), + GEN8_PPAT_WC | GEN8_PPAT_LLCELLC); + intel_uncore_write(uncore, + GEN10_PAT_INDEX(2), + GEN8_PPAT_WT | GEN8_PPAT_LLCELLC); + intel_uncore_write(uncore, + GEN10_PAT_INDEX(3), + GEN8_PPAT_UC); + intel_uncore_write(uncore, + GEN10_PAT_INDEX(4), + GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)); + intel_uncore_write(uncore, + GEN10_PAT_INDEX(5), + GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)); + intel_uncore_write(uncore, + GEN10_PAT_INDEX(6), + GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)); + intel_uncore_write(uncore, + GEN10_PAT_INDEX(7), + GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3)); +} + +/* + * The GGTT and PPGTT need a private PPAT setup in order to handle cacheability + * bits. When using advanced contexts each context stores its own PAT, but + * writing this data shouldn't be harmful even in those cases. + */ +static void bdw_setup_private_ppat(struct intel_uncore *uncore) +{ + u64 pat; + + pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC) | /* for normal objects, no eLLC */ + GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) | /* for something pointing to ptes? */ + GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC) | /* for scanout with eLLC */ + GEN8_PPAT(3, GEN8_PPAT_UC) | /* Uncached objects, mostly for scanout */ + GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) | + GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) | + GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) | + GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3)); + + intel_uncore_write(uncore, GEN8_PRIVATE_PAT_LO, lower_32_bits(pat)); + intel_uncore_write(uncore, GEN8_PRIVATE_PAT_HI, upper_32_bits(pat)); +} + +static void chv_setup_private_ppat(struct intel_uncore *uncore) +{ + u64 pat; + + /* + * Map WB on BDW to snooped on CHV. + * + * Only the snoop bit has meaning for CHV, the rest is + * ignored. + * + * The hardware will never snoop for certain types of accesses: + * - CPU GTT (GMADR->GGTT->no snoop->memory) + * - PPGTT page tables + * - some other special cycles + * + * As with BDW, we also need to consider the following for GT accesses: + * "For GGTT, there is NO pat_sel[2:0] from the entry, + * so RTL will always use the value corresponding to + * pat_sel = 000". + * Which means we must set the snoop bit in PAT entry 0 + * in order to keep the global status page working. + */ + + pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) | + GEN8_PPAT(1, 0) | + GEN8_PPAT(2, 0) | + GEN8_PPAT(3, 0) | + GEN8_PPAT(4, CHV_PPAT_SNOOP) | + GEN8_PPAT(5, CHV_PPAT_SNOOP) | + GEN8_PPAT(6, CHV_PPAT_SNOOP) | + GEN8_PPAT(7, CHV_PPAT_SNOOP); + + intel_uncore_write(uncore, GEN8_PRIVATE_PAT_LO, lower_32_bits(pat)); + intel_uncore_write(uncore, GEN8_PRIVATE_PAT_HI, upper_32_bits(pat)); +} + +void setup_private_pat(struct intel_uncore *uncore) +{ + struct drm_i915_private *i915 = uncore->i915; + + GEM_BUG_ON(INTEL_GEN(i915) < 8); + + if (INTEL_GEN(i915) >= 12) + tgl_setup_private_ppat(uncore); + else if (INTEL_GEN(i915) >= 10) + cnl_setup_private_ppat(uncore); + else if (IS_CHERRYVIEW(i915) || IS_GEN9_LP(i915)) + chv_setup_private_ppat(uncore); + else + bdw_setup_private_ppat(uncore); +} + +#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) +#include "selftests/mock_gtt.c" +#endif diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h new file mode 100644 index 000000000000..7da7681c20b1 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_gtt.h @@ -0,0 +1,587 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2020 Intel Corporation + * + * Please try to maintain the following order within this file unless it makes + * sense to do otherwise. From top to bottom: + * 1. typedefs + * 2. #defines, and macros + * 3. structure definitions + * 4. function prototypes + * + * Within each section, please try to order by generation in ascending order, + * from top to bottom (ie. gen6 on the top, gen8 on the bottom). + */ + +#ifndef __INTEL_GTT_H__ +#define __INTEL_GTT_H__ + +#include <linux/io-mapping.h> +#include <linux/kref.h> +#include <linux/mm.h> +#include <linux/pagevec.h> +#include <linux/scatterlist.h> +#include <linux/workqueue.h> + +#include <drm/drm_mm.h> + +#include "gt/intel_reset.h" +#include "i915_gem_fence_reg.h" +#include "i915_selftest.h" +#include "i915_vma_types.h" + +#define I915_GFP_ALLOW_FAIL (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN) + +#if IS_ENABLED(CONFIG_DRM_I915_TRACE_GTT) +#define DBG(...) trace_printk(__VA_ARGS__) +#else +#define DBG(...) +#endif + +#define NALLOC 3 /* 1 normal, 1 for concurrent threads, 1 for preallocation */ + +#define I915_GTT_PAGE_SIZE_4K BIT_ULL(12) +#define I915_GTT_PAGE_SIZE_64K BIT_ULL(16) +#define I915_GTT_PAGE_SIZE_2M BIT_ULL(21) + +#define I915_GTT_PAGE_SIZE I915_GTT_PAGE_SIZE_4K +#define I915_GTT_MAX_PAGE_SIZE I915_GTT_PAGE_SIZE_2M + +#define I915_GTT_PAGE_MASK -I915_GTT_PAGE_SIZE + +#define I915_GTT_MIN_ALIGNMENT I915_GTT_PAGE_SIZE + +#define I915_FENCE_REG_NONE -1 +#define I915_MAX_NUM_FENCES 32 +/* 32 fences + sign bit for FENCE_REG_NONE */ +#define I915_MAX_NUM_FENCE_BITS 6 + +typedef u32 gen6_pte_t; +typedef u64 gen8_pte_t; + +#define ggtt_total_entries(ggtt) ((ggtt)->vm.total >> PAGE_SHIFT) + +#define I915_PTES(pte_len) ((unsigned int)(PAGE_SIZE / (pte_len))) +#define I915_PTE_MASK(pte_len) (I915_PTES(pte_len) - 1) +#define I915_PDES 512 +#define I915_PDE_MASK (I915_PDES - 1) + +/* gen6-hsw has bit 11-4 for physical addr bit 39-32 */ +#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0)) +#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr) +#define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr) +#define GEN6_PTE_CACHE_LLC (2 << 1) +#define GEN6_PTE_UNCACHED (1 << 1) +#define GEN6_PTE_VALID REG_BIT(0) + +#define GEN6_PTES I915_PTES(sizeof(gen6_pte_t)) +#define GEN6_PD_SIZE (I915_PDES * PAGE_SIZE) +#define GEN6_PD_ALIGN (PAGE_SIZE * 16) +#define GEN6_PDE_SHIFT 22 +#define GEN6_PDE_VALID REG_BIT(0) +#define NUM_PTE(pde_shift) (1 << (pde_shift - PAGE_SHIFT)) + +#define GEN7_PTE_CACHE_L3_LLC (3 << 1) + +#define BYT_PTE_SNOOPED_BY_CPU_CACHES REG_BIT(2) +#define BYT_PTE_WRITEABLE REG_BIT(1) + +/* + * Cacheability Control is a 4-bit value. The low three bits are stored in bits + * 3:1 of the PTE, while the fourth bit is stored in bit 11 of the PTE. + */ +#define HSW_CACHEABILITY_CONTROL(bits) ((((bits) & 0x7) << 1) | \ + (((bits) & 0x8) << (11 - 3))) +#define HSW_WB_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x2) +#define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3) +#define HSW_WB_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x8) +#define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb) +#define HSW_WT_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x7) +#define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6) +#define HSW_PTE_UNCACHED (0) +#define HSW_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0x7f0)) +#define HSW_PTE_ADDR_ENCODE(addr) HSW_GTT_ADDR_ENCODE(addr) + +/* + * GEN8 32b style address is defined as a 3 level page table: + * 31:30 | 29:21 | 20:12 | 11:0 + * PDPE | PDE | PTE | offset + * The difference as compared to normal x86 3 level page table is the PDPEs are + * programmed via register. + * + * GEN8 48b style address is defined as a 4 level page table: + * 47:39 | 38:30 | 29:21 | 20:12 | 11:0 + * PML4E | PDPE | PDE | PTE | offset + */ +#define GEN8_3LVL_PDPES 4 + +#define PPAT_UNCACHED (_PAGE_PWT | _PAGE_PCD) +#define PPAT_CACHED_PDE 0 /* WB LLC */ +#define PPAT_CACHED _PAGE_PAT /* WB LLCeLLC */ +#define PPAT_DISPLAY_ELLC _PAGE_PCD /* WT eLLC */ + +#define CHV_PPAT_SNOOP REG_BIT(6) +#define GEN8_PPAT_AGE(x) ((x)<<4) +#define GEN8_PPAT_LLCeLLC (3<<2) +#define GEN8_PPAT_LLCELLC (2<<2) +#define GEN8_PPAT_LLC (1<<2) +#define GEN8_PPAT_WB (3<<0) +#define GEN8_PPAT_WT (2<<0) +#define GEN8_PPAT_WC (1<<0) +#define GEN8_PPAT_UC (0<<0) +#define GEN8_PPAT_ELLC_OVERRIDE (0<<2) +#define GEN8_PPAT(i, x) ((u64)(x) << ((i) * 8)) + +#define GEN8_PDE_IPS_64K BIT(11) +#define GEN8_PDE_PS_2M BIT(7) + +#define for_each_sgt_daddr(__dp, __iter, __sgt) \ + __for_each_sgt_daddr(__dp, __iter, __sgt, I915_GTT_PAGE_SIZE) + +struct i915_page_dma { + struct page *page; + union { + dma_addr_t daddr; + + /* + * For gen6/gen7 only. This is the offset in the GGTT + * where the page directory entries for PPGTT begin + */ + u32 ggtt_offset; + }; +}; + +struct i915_page_scratch { + struct i915_page_dma base; + u64 encode; +}; + +struct i915_page_table { + struct i915_page_dma base; + atomic_t used; +}; + +struct i915_page_directory { + struct i915_page_table pt; + spinlock_t lock; + void *entry[512]; +}; + +#define __px_choose_expr(x, type, expr, other) \ + __builtin_choose_expr( \ + __builtin_types_compatible_p(typeof(x), type) || \ + __builtin_types_compatible_p(typeof(x), const type), \ + ({ type __x = (type)(x); expr; }), \ + other) + +#define px_base(px) \ + __px_choose_expr(px, struct i915_page_dma *, __x, \ + __px_choose_expr(px, struct i915_page_scratch *, &__x->base, \ + __px_choose_expr(px, struct i915_page_table *, &__x->base, \ + __px_choose_expr(px, struct i915_page_directory *, &__x->pt.base, \ + (void)0)))) +#define px_dma(px) (px_base(px)->daddr) + +#define px_pt(px) \ + __px_choose_expr(px, struct i915_page_table *, __x, \ + __px_choose_expr(px, struct i915_page_directory *, &__x->pt, \ + (void)0)) +#define px_used(px) (&px_pt(px)->used) + +enum i915_cache_level; + +struct drm_i915_file_private; +struct drm_i915_gem_object; +struct i915_vma; +struct intel_gt; + +struct i915_vma_ops { + /* Map an object into an address space with the given cache flags. */ + int (*bind_vma)(struct i915_vma *vma, + enum i915_cache_level cache_level, + u32 flags); + /* + * Unmap an object from an address space. This usually consists of + * setting the valid PTE entries to a reserved scratch page. + */ + void (*unbind_vma)(struct i915_vma *vma); + + int (*set_pages)(struct i915_vma *vma); + void (*clear_pages)(struct i915_vma *vma); +}; + +struct pagestash { + spinlock_t lock; + struct pagevec pvec; +}; + +void stash_init(struct pagestash *stash); + +struct i915_address_space { + struct kref ref; + struct rcu_work rcu; + + struct drm_mm mm; + struct intel_gt *gt; + struct drm_i915_private *i915; + struct device *dma; + /* + * Every address space belongs to a struct file - except for the global + * GTT that is owned by the driver (and so @file is set to NULL). In + * principle, no information should leak from one context to another + * (or between files/processes etc) unless explicitly shared by the + * owner. Tracking the owner is important in order to free up per-file + * objects along with the file, to aide resource tracking, and to + * assign blame. + */ + struct drm_i915_file_private *file; + u64 total; /* size addr space maps (ex. 2GB for ggtt) */ + u64 reserved; /* size addr space reserved */ + + unsigned int bind_async_flags; + + /* + * Each active user context has its own address space (in full-ppgtt). + * Since the vm may be shared between multiple contexts, we count how + * many contexts keep us "open". Once open hits zero, we are closed + * and do not allow any new attachments, and proceed to shutdown our + * vma and page directories. + */ + atomic_t open; + + struct mutex mutex; /* protects vma and our lists */ +#define VM_CLASS_GGTT 0 +#define VM_CLASS_PPGTT 1 + + struct i915_page_scratch scratch[4]; + unsigned int scratch_order; + unsigned int top; + + /** + * List of vma currently bound. + */ + struct list_head bound_list; + + struct pagestash free_pages; + + /* Global GTT */ + bool is_ggtt:1; + + /* Some systems require uncached updates of the page directories */ + bool pt_kmap_wc:1; + + /* Some systems support read-only mappings for GGTT and/or PPGTT */ + bool has_read_only:1; + + u64 (*pte_encode)(dma_addr_t addr, + enum i915_cache_level level, + u32 flags); /* Create a valid PTE */ +#define PTE_READ_ONLY BIT(0) + + int (*allocate_va_range)(struct i915_address_space *vm, + u64 start, u64 length); + void (*clear_range)(struct i915_address_space *vm, + u64 start, u64 length); + void (*insert_page)(struct i915_address_space *vm, + dma_addr_t addr, + u64 offset, + enum i915_cache_level cache_level, + u32 flags); + void (*insert_entries)(struct i915_address_space *vm, + struct i915_vma *vma, + enum i915_cache_level cache_level, + u32 flags); + void (*cleanup)(struct i915_address_space *vm); + + struct i915_vma_ops vma_ops; + + I915_SELFTEST_DECLARE(struct fault_attr fault_attr); + I915_SELFTEST_DECLARE(bool scrub_64K); +}; + +/* + * The Graphics Translation Table is the way in which GEN hardware translates a + * Graphics Virtual Address into a Physical Address. In addition to the normal + * collateral associated with any va->pa translations GEN hardware also has a + * portion of the GTT which can be mapped by the CPU and remain both coherent + * and correct (in cases like swizzling). That region is referred to as GMADR in + * the spec. + */ +struct i915_ggtt { + struct i915_address_space vm; + + struct io_mapping iomap; /* Mapping to our CPU mappable region */ + struct resource gmadr; /* GMADR resource */ + resource_size_t mappable_end; /* End offset that we can CPU map */ + + /** "Graphics Stolen Memory" holds the global PTEs */ + void __iomem *gsm; + void (*invalidate)(struct i915_ggtt *ggtt); + + /** PPGTT used for aliasing the PPGTT with the GTT */ + struct i915_ppgtt *alias; + + bool do_idle_maps; + + int mtrr; + + /** Bit 6 swizzling required for X tiling */ + u32 bit_6_swizzle_x; + /** Bit 6 swizzling required for Y tiling */ + u32 bit_6_swizzle_y; + + u32 pin_bias; + + unsigned int num_fences; + struct i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; + struct list_head fence_list; + + /** + * List of all objects in gtt_space, currently mmaped by userspace. + * All objects within this list must also be on bound_list. + */ + struct list_head userfault_list; + + /* Manual runtime pm autosuspend delay for user GGTT mmaps */ + struct intel_wakeref_auto userfault_wakeref; + + struct mutex error_mutex; + struct drm_mm_node error_capture; + struct drm_mm_node uc_fw; +}; + +struct i915_ppgtt { + struct i915_address_space vm; + + struct i915_page_directory *pd; +}; + +#define i915_is_ggtt(vm) ((vm)->is_ggtt) + +static inline bool +i915_vm_is_4lvl(const struct i915_address_space *vm) +{ + return (vm->total - 1) >> 32; +} + +static inline bool +i915_vm_has_scratch_64K(struct i915_address_space *vm) +{ + return vm->scratch_order == get_order(I915_GTT_PAGE_SIZE_64K); +} + +static inline bool +i915_vm_has_cache_coloring(struct i915_address_space *vm) +{ + return i915_is_ggtt(vm) && vm->mm.color_adjust; +} + +static inline struct i915_ggtt * +i915_vm_to_ggtt(struct i915_address_space *vm) +{ + BUILD_BUG_ON(offsetof(struct i915_ggtt, vm)); + GEM_BUG_ON(!i915_is_ggtt(vm)); + return container_of(vm, struct i915_ggtt, vm); +} + +static inline struct i915_ppgtt * +i915_vm_to_ppgtt(struct i915_address_space *vm) +{ + BUILD_BUG_ON(offsetof(struct i915_ppgtt, vm)); + GEM_BUG_ON(i915_is_ggtt(vm)); + return container_of(vm, struct i915_ppgtt, vm); +} + +static inline struct i915_address_space * +i915_vm_get(struct i915_address_space *vm) +{ + kref_get(&vm->ref); + return vm; +} + +void i915_vm_release(struct kref *kref); + +static inline void i915_vm_put(struct i915_address_space *vm) +{ + kref_put(&vm->ref, i915_vm_release); +} + +static inline struct i915_address_space * +i915_vm_open(struct i915_address_space *vm) +{ + GEM_BUG_ON(!atomic_read(&vm->open)); + atomic_inc(&vm->open); + return i915_vm_get(vm); +} + +static inline bool +i915_vm_tryopen(struct i915_address_space *vm) +{ + if (atomic_add_unless(&vm->open, 1, 0)) + return i915_vm_get(vm); + + return false; +} + +void __i915_vm_close(struct i915_address_space *vm); + +static inline void +i915_vm_close(struct i915_address_space *vm) +{ + GEM_BUG_ON(!atomic_read(&vm->open)); + if (atomic_dec_and_test(&vm->open)) + __i915_vm_close(vm); + + i915_vm_put(vm); +} + +void i915_address_space_init(struct i915_address_space *vm, int subclass); +void i915_address_space_fini(struct i915_address_space *vm); + +static inline u32 i915_pte_index(u64 address, unsigned int pde_shift) +{ + const u32 mask = NUM_PTE(pde_shift) - 1; + + return (address >> PAGE_SHIFT) & mask; +} + +/* + * Helper to counts the number of PTEs within the given length. This count + * does not cross a page table boundary, so the max value would be + * GEN6_PTES for GEN6, and GEN8_PTES for GEN8. + */ +static inline u32 i915_pte_count(u64 addr, u64 length, unsigned int pde_shift) +{ + const u64 mask = ~((1ULL << pde_shift) - 1); + u64 end; + + GEM_BUG_ON(length == 0); + GEM_BUG_ON(offset_in_page(addr | length)); + + end = addr + length; + + if ((addr & mask) != (end & mask)) + return NUM_PTE(pde_shift) - i915_pte_index(addr, pde_shift); + + return i915_pte_index(end, pde_shift) - i915_pte_index(addr, pde_shift); +} + +static inline u32 i915_pde_index(u64 addr, u32 shift) +{ + return (addr >> shift) & I915_PDE_MASK; +} + +static inline struct i915_page_table * +i915_pt_entry(const struct i915_page_directory * const pd, + const unsigned short n) +{ + return pd->entry[n]; +} + +static inline struct i915_page_directory * +i915_pd_entry(const struct i915_page_directory * const pdp, + const unsigned short n) +{ + return pdp->entry[n]; +} + +static inline dma_addr_t +i915_page_dir_dma_addr(const struct i915_ppgtt *ppgtt, const unsigned int n) +{ + struct i915_page_dma *pt = ppgtt->pd->entry[n]; + + return px_dma(pt ?: px_base(&ppgtt->vm.scratch[ppgtt->vm.top])); +} + +void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt); + +int i915_ggtt_probe_hw(struct drm_i915_private *i915); +int i915_ggtt_init_hw(struct drm_i915_private *i915); +int i915_ggtt_enable_hw(struct drm_i915_private *i915); +void i915_ggtt_enable_guc(struct i915_ggtt *ggtt); +void i915_ggtt_disable_guc(struct i915_ggtt *ggtt); +int i915_init_ggtt(struct drm_i915_private *i915); +void i915_ggtt_driver_release(struct drm_i915_private *i915); + +static inline bool i915_ggtt_has_aperture(const struct i915_ggtt *ggtt) +{ + return ggtt->mappable_end > 0; +} + +int i915_ppgtt_init_hw(struct intel_gt *gt); + +struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt); + +void i915_gem_suspend_gtt_mappings(struct drm_i915_private *i915); +void i915_gem_restore_gtt_mappings(struct drm_i915_private *i915); + +u64 gen8_pte_encode(dma_addr_t addr, + enum i915_cache_level level, + u32 flags); + +int setup_page_dma(struct i915_address_space *vm, struct i915_page_dma *p); +void cleanup_page_dma(struct i915_address_space *vm, struct i915_page_dma *p); + +#define kmap_atomic_px(px) kmap_atomic(px_base(px)->page) + +void +fill_page_dma(const struct i915_page_dma *p, const u64 val, unsigned int count); + +#define fill_px(px, v) fill_page_dma(px_base(px), (v), PAGE_SIZE / sizeof(u64)) +#define fill32_px(px, v) do { \ + u64 v__ = lower_32_bits(v); \ + fill_px((px), v__ << 32 | v__); \ +} while (0) + +int setup_scratch_page(struct i915_address_space *vm, gfp_t gfp); +void cleanup_scratch_page(struct i915_address_space *vm); +void free_scratch(struct i915_address_space *vm); + +struct i915_page_table *alloc_pt(struct i915_address_space *vm); +struct i915_page_directory *alloc_pd(struct i915_address_space *vm); +struct i915_page_directory *__alloc_pd(size_t sz); + +void free_pd(struct i915_address_space *vm, struct i915_page_dma *pd); + +#define free_px(vm, px) free_pd(vm, px_base(px)) + +void +__set_pd_entry(struct i915_page_directory * const pd, + const unsigned short idx, + struct i915_page_dma * const to, + u64 (*encode)(const dma_addr_t, const enum i915_cache_level)); + +#define set_pd_entry(pd, idx, to) \ + __set_pd_entry((pd), (idx), px_base(to), gen8_pde_encode) + +void +clear_pd_entry(struct i915_page_directory * const pd, + const unsigned short idx, + const struct i915_page_scratch * const scratch); + +bool +release_pd_entry(struct i915_page_directory * const pd, + const unsigned short idx, + struct i915_page_table * const pt, + const struct i915_page_scratch * const scratch); +void gen6_ggtt_invalidate(struct i915_ggtt *ggtt); + +int ggtt_set_pages(struct i915_vma *vma); +int ppgtt_set_pages(struct i915_vma *vma); +void clear_pages(struct i915_vma *vma); + +void gtt_write_workarounds(struct intel_gt *gt); + +void setup_private_pat(struct intel_uncore *uncore); + +static inline struct sgt_dma { + struct scatterlist *sg; + dma_addr_t dma, max; +} sgt_dma(struct i915_vma *vma) { + struct scatterlist *sg = vma->pages->sgl; + dma_addr_t addr = sg_dma_address(sg); + + return (struct sgt_dma){ sg, addr, addr + sg->length }; +} + +#endif diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 4fb70a7716e3..9e430590fb3a 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -488,17 +488,23 @@ lrc_descriptor(struct intel_context *ce, struct intel_engine_cs *engine) return desc; } -static u32 *set_offsets(u32 *regs, +static inline unsigned int dword_in_page(void *addr) +{ + return offset_in_page(addr) / sizeof(u32); +} + +static void set_offsets(u32 *regs, const u8 *data, - const struct intel_engine_cs *engine) + const struct intel_engine_cs *engine, + bool clear) #define NOP(x) (BIT(7) | (x)) -#define LRI(count, flags) ((flags) << 6 | (count)) +#define LRI(count, flags) ((flags) << 6 | (count) | BUILD_BUG_ON_ZERO(count >= BIT(6))) #define POSTED BIT(0) #define REG(x) (((x) >> 2) | BUILD_BUG_ON_ZERO(x >= 0x200)) #define REG16(x) \ (((x) >> 9) | BIT(7) | BUILD_BUG_ON_ZERO(x >= 0x10000)), \ (((x) >> 2) & 0x7f) -#define END() 0 +#define END(x) 0, (x) { const u32 base = engine->mmio_base; @@ -506,7 +512,10 @@ static u32 *set_offsets(u32 *regs, u8 count, flags; if (*data & BIT(7)) { /* skip */ - regs += *data++ & ~BIT(7); + count = *data++ & ~BIT(7); + if (clear) + memset32(regs, MI_NOOP, count); + regs += count; continue; } @@ -532,12 +541,25 @@ static u32 *set_offsets(u32 *regs, offset |= v & ~BIT(7); } while (v & BIT(7)); - *regs = base + (offset << 2); + regs[0] = base + (offset << 2); + if (clear) + regs[1] = 0; regs += 2; } while (--count); } - return regs; + if (clear) { + u8 count = *++data; + + /* Clear past the tail for HW access */ + GEM_BUG_ON(dword_in_page(regs) > count); + memset32(regs, MI_NOOP, count - dword_in_page(regs)); + + /* Close the batch; used mainly by live_lrc_layout() */ + *regs = MI_BATCH_BUFFER_END; + if (INTEL_GEN(engine->i915) >= 10) + *regs |= BIT(0); + } } static const u8 gen8_xcs_offsets[] = { @@ -572,7 +594,7 @@ static const u8 gen8_xcs_offsets[] = { REG16(0x200), REG(0x028), - END(), + END(80) }; static const u8 gen9_xcs_offsets[] = { @@ -656,7 +678,7 @@ static const u8 gen9_xcs_offsets[] = { REG16(0x67c), REG(0x068), - END(), + END(176) }; static const u8 gen12_xcs_offsets[] = { @@ -688,7 +710,7 @@ static const u8 gen12_xcs_offsets[] = { REG16(0x274), REG16(0x270), - END(), + END(80) }; static const u8 gen8_rcs_offsets[] = { @@ -725,7 +747,91 @@ static const u8 gen8_rcs_offsets[] = { LRI(1, 0), REG(0x0c8), - END(), + END(80) +}; + +static const u8 gen9_rcs_offsets[] = { + NOP(1), + LRI(14, POSTED), + REG16(0x244), + REG(0x34), + REG(0x30), + REG(0x38), + REG(0x3c), + REG(0x168), + REG(0x140), + REG(0x110), + REG(0x11c), + REG(0x114), + REG(0x118), + REG(0x1c0), + REG(0x1c4), + REG(0x1c8), + + NOP(3), + LRI(9, POSTED), + REG16(0x3a8), + REG16(0x28c), + REG16(0x288), + REG16(0x284), + REG16(0x280), + REG16(0x27c), + REG16(0x278), + REG16(0x274), + REG16(0x270), + + NOP(13), + LRI(1, 0), + REG(0xc8), + + NOP(13), + LRI(44, POSTED), + REG(0x28), + REG(0x9c), + REG(0xc0), + REG(0x178), + REG(0x17c), + REG16(0x358), + REG(0x170), + REG(0x150), + REG(0x154), + REG(0x158), + REG16(0x41c), + REG16(0x600), + REG16(0x604), + REG16(0x608), + REG16(0x60c), + REG16(0x610), + REG16(0x614), + REG16(0x618), + REG16(0x61c), + REG16(0x620), + REG16(0x624), + REG16(0x628), + REG16(0x62c), + REG16(0x630), + REG16(0x634), + REG16(0x638), + REG16(0x63c), + REG16(0x640), + REG16(0x644), + REG16(0x648), + REG16(0x64c), + REG16(0x650), + REG16(0x654), + REG16(0x658), + REG16(0x65c), + REG16(0x660), + REG16(0x664), + REG16(0x668), + REG16(0x66c), + REG16(0x670), + REG16(0x674), + REG16(0x678), + REG16(0x67c), + REG(0x68), + + END(176) }; static const u8 gen11_rcs_offsets[] = { @@ -766,7 +872,7 @@ static const u8 gen11_rcs_offsets[] = { LRI(1, 0), REG(0x0c8), - END(), + END(80) }; static const u8 gen12_rcs_offsets[] = { @@ -807,7 +913,7 @@ static const u8 gen12_rcs_offsets[] = { LRI(1, 0), REG(0x0c8), - END(), + END(80) }; #undef END @@ -832,6 +938,8 @@ static const u8 *reg_offsets(const struct intel_engine_cs *engine) return gen12_rcs_offsets; else if (INTEL_GEN(engine->i915) >= 11) return gen11_rcs_offsets; + else if (INTEL_GEN(engine->i915) >= 9) + return gen9_rcs_offsets; else return gen8_rcs_offsets; } else { @@ -1108,7 +1216,7 @@ __execlists_schedule_in(struct i915_request *rq) /* We don't need a strict matching tag, just different values */ ce->lrc_desc &= ~GENMASK_ULL(47, 37); ce->lrc_desc |= - (u64)(engine->context_tag++ % NUM_CONTEXT_TAG) << + (u64)(++engine->context_tag % NUM_CONTEXT_TAG) << GEN11_SW_CTX_ID_SHIFT; BUILD_BUG_ON(NUM_CONTEXT_TAG > GEN12_MAX_CONTEXT_HW_ID); } @@ -1243,10 +1351,6 @@ static u64 execlists_update_context(struct i915_request *rq) */ wmb(); - /* Wa_1607138340:tgl */ - if (IS_TGL_REVID(rq->i915, TGL_REVID_A0, TGL_REVID_A0)) - desc |= CTX_DESC_FORCE_RESTORE; - ce->lrc_desc &= ~CTX_DESC_FORCE_RESTORE; return desc; } @@ -1430,8 +1534,8 @@ static bool can_merge_rq(const struct i915_request *prev, if (i915_request_completed(next)) return true; - if (unlikely((prev->flags ^ next->flags) & - (I915_REQUEST_NOPREEMPT | I915_REQUEST_SENTINEL))) + if (unlikely((prev->fence.flags ^ next->fence.flags) & + (I915_FENCE_FLAG_NOPREEMPT | I915_FENCE_FLAG_SENTINEL))) return false; if (!can_merge_ctx(prev->context, next->context)) @@ -1443,7 +1547,7 @@ static bool can_merge_rq(const struct i915_request *prev, static void virtual_update_register_offsets(u32 *regs, struct intel_engine_cs *engine) { - set_offsets(regs, reg_offsets(engine), engine); + set_offsets(regs, reg_offsets(engine), engine, false); } static bool virtual_matches(const struct virtual_engine *ve, @@ -1590,7 +1694,7 @@ active_timeslice(const struct intel_engine_cs *engine) { const struct i915_request *rq = *engine->execlists.active; - if (i915_request_completed(rq)) + if (!rq || i915_request_completed(rq)) return 0; if (engine->execlists.switch_priority_hint < effective_prio(rq)) @@ -1636,6 +1740,11 @@ static void set_preempt_timeout(struct intel_engine_cs *engine) active_preempt_timeout(engine)); } +static inline void clear_ports(struct i915_request **ports, int count) +{ + memset_p((void **)ports, NULL, count); +} + static void execlists_dequeue(struct intel_engine_cs *engine) { struct intel_engine_execlists * const execlists = &engine->execlists; @@ -1996,10 +2105,9 @@ done: goto skip_submit; } + clear_ports(port + 1, last_port - port); - memset(port + 1, 0, (last_port - port) * sizeof(*port)); execlists_submit_ports(engine); - set_preempt_timeout(engine); } else { skip_submit: @@ -2014,13 +2122,14 @@ cancel_port_requests(struct intel_engine_execlists * const execlists) for (port = execlists->pending; *port; port++) execlists_schedule_out(*port); - memset(execlists->pending, 0, sizeof(execlists->pending)); + clear_ports(execlists->pending, ARRAY_SIZE(execlists->pending)); /* Mark the end of active before we overwrite *active */ for (port = xchg(&execlists->active, execlists->pending); *port; port++) execlists_schedule_out(*port); - WRITE_ONCE(execlists->active, - memset(execlists->inflight, 0, sizeof(execlists->inflight))); + clear_ports(execlists->inflight, ARRAY_SIZE(execlists->inflight)); + + WRITE_ONCE(execlists->active, execlists->inflight); } static inline void @@ -2176,7 +2285,6 @@ static void process_csb(struct intel_engine_cs *engine) /* Point active to the new ELSP; prevent overwriting */ WRITE_ONCE(execlists->active, execlists->pending); - set_timeslice(engine); if (!inject_preempt_hang(execlists)) ring_set_paused(engine, 0); @@ -2217,6 +2325,7 @@ static void process_csb(struct intel_engine_cs *engine) } while (head != tail); execlists->csb_head = head; + set_timeslice(engine); /* * Gen11 has proven to fail wrt global observation point between @@ -2399,7 +2508,7 @@ set_redzone(void *vaddr, const struct intel_engine_cs *engine) vaddr += engine->context_size; - memset(vaddr, POISON_INUSE, I915_GTT_PAGE_SIZE); + memset(vaddr, CONTEXT_REDZONE, I915_GTT_PAGE_SIZE); } static void @@ -2410,7 +2519,7 @@ check_redzone(const void *vaddr, const struct intel_engine_cs *engine) vaddr += engine->context_size; - if (memchr_inv(vaddr, POISON_INUSE, I915_GTT_PAGE_SIZE)) + if (memchr_inv(vaddr, CONTEXT_REDZONE, I915_GTT_PAGE_SIZE)) dev_err_once(engine->i915->drm.dev, "%s context redzone overwritten!\n", engine->name); @@ -2453,33 +2562,21 @@ __execlists_context_pin(struct intel_context *ce, struct intel_engine_cs *engine) { void *vaddr; - int ret; GEM_BUG_ON(!ce->state); - - ret = intel_context_active_acquire(ce); - if (ret) - goto err; GEM_BUG_ON(!i915_vma_is_pinned(ce->state)); vaddr = i915_gem_object_pin_map(ce->state->obj, i915_coherent_map_type(engine->i915) | I915_MAP_OVERRIDE); - if (IS_ERR(vaddr)) { - ret = PTR_ERR(vaddr); - goto unpin_active; - } + if (IS_ERR(vaddr)) + return PTR_ERR(vaddr); - ce->lrc_desc = lrc_descriptor(ce, engine); + ce->lrc_desc = lrc_descriptor(ce, engine) | CTX_DESC_FORCE_RESTORE; ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE; __execlists_update_reg_state(ce, engine); return 0; - -unpin_active: - intel_context_active_release(ce); -err: - return ret; } static int execlists_context_pin(struct intel_context *ce) @@ -2494,6 +2591,9 @@ static int execlists_context_alloc(struct intel_context *ce) static void execlists_context_reset(struct intel_context *ce) { + CE_TRACE(ce, "reset\n"); + GEM_BUG_ON(!intel_context_is_pinned(ce)); + /* * Because we emit WA_TAIL_DWORDS there may be a disparity * between our bookkeeping in ce->ring->head and ce->ring->tail and @@ -2510,8 +2610,14 @@ static void execlists_context_reset(struct intel_context *ce) * So to avoid that we reset the context images upon resume. For * simplicity, we just zero everything out. */ - intel_ring_reset(ce->ring, 0); + intel_ring_reset(ce->ring, ce->ring->emit); + + /* Scrub away the garbage */ + execlists_init_reg_state(ce->lrc_reg_state, + ce, ce->engine, ce->ring, true); __execlists_update_reg_state(ce, ce->engine); + + ce->lrc_desc |= CTX_DESC_FORCE_RESTORE; } static const struct intel_context_ops execlists_context_ops = { @@ -2925,6 +3031,8 @@ static void enable_execlists(struct intel_engine_cs *engine) RING_HWS_PGA, i915_ggtt_offset(engine->status_page.vma)); ENGINE_POSTING_READ(engine, RING_HWS_PGA); + + engine->context_tag = 0; } static bool unexpected_starting_state(struct intel_engine_cs *engine) @@ -3030,10 +3138,8 @@ static void reset_csb_pointers(struct intel_engine_cs *engine) &execlists->csb_status[reset_value]); } -static void __execlists_reset_reg_state(const struct intel_context *ce, - const struct intel_engine_cs *engine) +static void __reset_stop_ring(u32 *regs, const struct intel_engine_cs *engine) { - u32 *regs = ce->lrc_reg_state; int x; x = lrc_ring_mi_mode(engine); @@ -3043,6 +3149,14 @@ static void __execlists_reset_reg_state(const struct intel_context *ce, } } +static void __execlists_reset_reg_state(const struct intel_context *ce, + const struct intel_engine_cs *engine) +{ + u32 *regs = ce->lrc_reg_state; + + __reset_stop_ring(regs, engine); +} + static void __execlists_reset(struct intel_engine_cs *engine, bool stalled) { struct intel_engine_execlists * const execlists = &engine->execlists; @@ -3795,7 +3909,6 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine) { /* Default vfuncs which can be overriden by each engine. */ - engine->release = execlists_release; engine->resume = execlists_resume; engine->cops = &execlists_context_ops; @@ -3910,6 +4023,9 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine) reset_csb_pointers(engine); + /* Finally, take ownership and responsibility for cleanup! */ + engine->release = execlists_release; + return 0; } @@ -3949,18 +4065,21 @@ static u32 intel_lr_indirect_ctx_offset(const struct intel_engine_cs *engine) static void init_common_reg_state(u32 * const regs, const struct intel_engine_cs *engine, - const struct intel_ring *ring) + const struct intel_ring *ring, + bool inhibit) { - regs[CTX_CONTEXT_CONTROL] = - _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT) | - _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH); + u32 ctl; + + ctl = _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH); + ctl |= _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT); + if (inhibit) + ctl |= CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT; if (INTEL_GEN(engine->i915) < 11) - regs[CTX_CONTEXT_CONTROL] |= - _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT | - CTX_CTRL_RS_CTX_ENABLE); + ctl |= _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT | + CTX_CTRL_RS_CTX_ENABLE); + regs[CTX_CONTEXT_CONTROL] = ctl; regs[CTX_RING_CTL] = RING_CTL_SIZE(ring->size) | RING_VALID; - regs[CTX_BB_STATE] = RING_BB_PPGTT; } static void init_wa_bb_reg_state(u32 * const regs, @@ -4016,7 +4135,7 @@ static void execlists_init_reg_state(u32 *regs, const struct intel_context *ce, const struct intel_engine_cs *engine, const struct intel_ring *ring, - bool close) + bool inhibit) { /* * A context is actually a big batch buffer with several @@ -4028,21 +4147,17 @@ static void execlists_init_reg_state(u32 *regs, * * Must keep consistent with virtual_update_register_offsets(). */ - u32 *bbe = set_offsets(regs, reg_offsets(engine), engine); - - if (close) { /* Close the batch; used mainly by live_lrc_layout() */ - *bbe = MI_BATCH_BUFFER_END; - if (INTEL_GEN(engine->i915) >= 10) - *bbe |= BIT(0); - } + set_offsets(regs, reg_offsets(engine), engine, inhibit); - init_common_reg_state(regs, engine, ring); + init_common_reg_state(regs, engine, ring, inhibit); init_ppgtt_reg_state(regs, vm_alias(ce->vm)); init_wa_bb_reg_state(regs, engine, INTEL_GEN(engine->i915) >= 12 ? GEN12_CTX_BB_PER_CTX_PTR : CTX_BB_PER_CTX_PTR); + + __reset_stop_ring(regs, engine); } static int @@ -4053,7 +4168,6 @@ populate_lr_context(struct intel_context *ce, { bool inhibit = true; void *vaddr; - u32 *regs; int ret; vaddr = i915_gem_object_pin_map(ctx_obj, I915_MAP_WB); @@ -4083,11 +4197,8 @@ populate_lr_context(struct intel_context *ce, /* The second page of the context object contains some fields which must * be set up prior to the first execution. */ - regs = vaddr + LRC_STATE_PN * PAGE_SIZE; - execlists_init_reg_state(regs, ce, engine, ring, inhibit); - if (inhibit) - regs[CTX_CONTEXT_CONTROL] |= - _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT); + execlists_init_reg_state(vaddr + LRC_STATE_PN * PAGE_SIZE, + ce, engine, ring, inhibit); ret = 0; err_unpin_ctx: @@ -4481,9 +4592,11 @@ intel_execlists_create_virtual(struct intel_engine_cs **siblings, ve->base.gt = siblings[0]->gt; ve->base.uncore = siblings[0]->uncore; ve->base.id = -1; + ve->base.class = OTHER_CLASS; ve->base.uabi_class = I915_ENGINE_CLASS_INVALID; ve->base.instance = I915_ENGINE_CLASS_INVALID_VIRTUAL; + ve->base.uabi_instance = I915_ENGINE_CLASS_INVALID_VIRTUAL; /* * The decision on whether to submit a request using semaphores diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c index 893249ea48d4..eeef90b55c64 100644 --- a/drivers/gpu/drm/i915/gt/intel_mocs.c +++ b/drivers/gpu/drm/i915/gt/intel_mocs.c @@ -127,7 +127,7 @@ struct drm_i915_mocs_table { LE_0_PAGETABLE | LE_TC_2_LLC_ELLC | LE_LRUM(3), \ L3_3_WB) -static const struct drm_i915_mocs_entry skylake_mocs_table[] = { +static const struct drm_i915_mocs_entry skl_mocs_table[] = { GEN9_MOCS_ENTRIES, MOCS_ENTRY(I915_MOCS_CACHED, LE_3_WB | LE_TC_2_LLC_ELLC | LE_LRUM(3), @@ -233,7 +233,7 @@ static const struct drm_i915_mocs_entry broxton_mocs_table[] = { LE_3_WB | LE_TC_1_LLC | LE_LRUM(3), \ L3_1_UC) -static const struct drm_i915_mocs_entry tigerlake_mocs_table[] = { +static const struct drm_i915_mocs_entry tgl_mocs_table[] = { /* Base - Error (Reserved for Non-Use) */ MOCS_ENTRY(0, 0x0, 0x0), /* Base - Reserved */ @@ -267,7 +267,7 @@ static const struct drm_i915_mocs_entry tigerlake_mocs_table[] = { L3_3_WB), }; -static const struct drm_i915_mocs_entry icelake_mocs_table[] = { +static const struct drm_i915_mocs_entry icl_mocs_table[] = { /* Base - Uncached (Deprecated) */ MOCS_ENTRY(I915_MOCS_UNCACHED, LE_1_UC | LE_TC_1_LLC, @@ -284,17 +284,17 @@ static bool get_mocs_settings(const struct drm_i915_private *i915, struct drm_i915_mocs_table *table) { if (INTEL_GEN(i915) >= 12) { - table->size = ARRAY_SIZE(tigerlake_mocs_table); - table->table = tigerlake_mocs_table; + table->size = ARRAY_SIZE(tgl_mocs_table); + table->table = tgl_mocs_table; table->n_entries = GEN11_NUM_MOCS_ENTRIES; } else if (IS_GEN(i915, 11)) { - table->size = ARRAY_SIZE(icelake_mocs_table); - table->table = icelake_mocs_table; + table->size = ARRAY_SIZE(icl_mocs_table); + table->table = icl_mocs_table; table->n_entries = GEN11_NUM_MOCS_ENTRIES; } else if (IS_GEN9_BC(i915) || IS_CANNONLAKE(i915)) { - table->size = ARRAY_SIZE(skylake_mocs_table); + table->size = ARRAY_SIZE(skl_mocs_table); table->n_entries = GEN9_NUM_MOCS_ENTRIES; - table->table = skylake_mocs_table; + table->table = skl_mocs_table; } else if (IS_GEN9_LP(i915)) { table->size = ARRAY_SIZE(broxton_mocs_table); table->n_entries = GEN9_NUM_MOCS_ENTRIES; diff --git a/drivers/gpu/drm/i915/gt/intel_ppgtt.c b/drivers/gpu/drm/i915/gt/intel_ppgtt.c new file mode 100644 index 000000000000..f86f7e68ce5e --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_ppgtt.c @@ -0,0 +1,218 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2020 Intel Corporation + */ + +#include <linux/slab.h> + +#include "i915_trace.h" +#include "intel_gtt.h" +#include "gen6_ppgtt.h" +#include "gen8_ppgtt.h" + +struct i915_page_table *alloc_pt(struct i915_address_space *vm) +{ + struct i915_page_table *pt; + + pt = kmalloc(sizeof(*pt), I915_GFP_ALLOW_FAIL); + if (unlikely(!pt)) + return ERR_PTR(-ENOMEM); + + if (unlikely(setup_page_dma(vm, &pt->base))) { + kfree(pt); + return ERR_PTR(-ENOMEM); + } + + atomic_set(&pt->used, 0); + return pt; +} + +struct i915_page_directory *__alloc_pd(size_t sz) +{ + struct i915_page_directory *pd; + + pd = kzalloc(sz, I915_GFP_ALLOW_FAIL); + if (unlikely(!pd)) + return NULL; + + spin_lock_init(&pd->lock); + return pd; +} + +struct i915_page_directory *alloc_pd(struct i915_address_space *vm) +{ + struct i915_page_directory *pd; + + pd = __alloc_pd(sizeof(*pd)); + if (unlikely(!pd)) + return ERR_PTR(-ENOMEM); + + if (unlikely(setup_page_dma(vm, px_base(pd)))) { + kfree(pd); + return ERR_PTR(-ENOMEM); + } + + return pd; +} + +void free_pd(struct i915_address_space *vm, struct i915_page_dma *pd) +{ + cleanup_page_dma(vm, pd); + kfree(pd); +} + +static inline void +write_dma_entry(struct i915_page_dma * const pdma, + const unsigned short idx, + const u64 encoded_entry) +{ + u64 * const vaddr = kmap_atomic(pdma->page); + + vaddr[idx] = encoded_entry; + kunmap_atomic(vaddr); +} + +void +__set_pd_entry(struct i915_page_directory * const pd, + const unsigned short idx, + struct i915_page_dma * const to, + u64 (*encode)(const dma_addr_t, const enum i915_cache_level)) +{ + /* Each thread pre-pins the pd, and we may have a thread per pde. */ + GEM_BUG_ON(atomic_read(px_used(pd)) > NALLOC * ARRAY_SIZE(pd->entry)); + + atomic_inc(px_used(pd)); + pd->entry[idx] = to; + write_dma_entry(px_base(pd), idx, encode(to->daddr, I915_CACHE_LLC)); +} + +void +clear_pd_entry(struct i915_page_directory * const pd, + const unsigned short idx, + const struct i915_page_scratch * const scratch) +{ + GEM_BUG_ON(atomic_read(px_used(pd)) == 0); + + write_dma_entry(px_base(pd), idx, scratch->encode); + pd->entry[idx] = NULL; + atomic_dec(px_used(pd)); +} + +bool +release_pd_entry(struct i915_page_directory * const pd, + const unsigned short idx, + struct i915_page_table * const pt, + const struct i915_page_scratch * const scratch) +{ + bool free = false; + + if (atomic_add_unless(&pt->used, -1, 1)) + return false; + + spin_lock(&pd->lock); + if (atomic_dec_and_test(&pt->used)) { + clear_pd_entry(pd, idx, scratch); + free = true; + } + spin_unlock(&pd->lock); + + return free; +} + +int i915_ppgtt_init_hw(struct intel_gt *gt) +{ + struct drm_i915_private *i915 = gt->i915; + + gtt_write_workarounds(gt); + + if (IS_GEN(i915, 6)) + gen6_ppgtt_enable(gt); + else if (IS_GEN(i915, 7)) + gen7_ppgtt_enable(gt); + + return 0; +} + +static struct i915_ppgtt * +__ppgtt_create(struct intel_gt *gt) +{ + if (INTEL_GEN(gt->i915) < 8) + return gen6_ppgtt_create(gt); + else + return gen8_ppgtt_create(gt); +} + +struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt) +{ + struct i915_ppgtt *ppgtt; + + ppgtt = __ppgtt_create(gt); + if (IS_ERR(ppgtt)) + return ppgtt; + + trace_i915_ppgtt_create(&ppgtt->vm); + + return ppgtt; +} + +static int ppgtt_bind_vma(struct i915_vma *vma, + enum i915_cache_level cache_level, + u32 flags) +{ + u32 pte_flags; + int err; + + if (flags & I915_VMA_ALLOC) { + err = vma->vm->allocate_va_range(vma->vm, + vma->node.start, vma->size); + if (err) + return err; + + set_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma)); + } + + /* Applicable to VLV, and gen8+ */ + pte_flags = 0; + if (i915_gem_object_is_readonly(vma->obj)) + pte_flags |= PTE_READ_ONLY; + + GEM_BUG_ON(!test_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma))); + vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags); + wmb(); + + return 0; +} + +static void ppgtt_unbind_vma(struct i915_vma *vma) +{ + if (test_and_clear_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma))) + vma->vm->clear_range(vma->vm, vma->node.start, vma->size); +} + +int ppgtt_set_pages(struct i915_vma *vma) +{ + GEM_BUG_ON(vma->pages); + + vma->pages = vma->obj->mm.pages; + + vma->page_sizes = vma->obj->mm.page_sizes; + + return 0; +} + +void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt) +{ + struct drm_i915_private *i915 = gt->i915; + + ppgtt->vm.gt = gt; + ppgtt->vm.i915 = i915; + ppgtt->vm.dma = &i915->drm.pdev->dev; + ppgtt->vm.total = BIT_ULL(INTEL_INFO(i915)->ppgtt_size); + + i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT); + + ppgtt->vm.vma_ops.bind_vma = ppgtt_bind_vma; + ppgtt->vm.vma_ops.unbind_vma = ppgtt_unbind_vma; + ppgtt->vm.vma_ops.set_pages = ppgtt_set_pages; + ppgtt->vm.vma_ops.clear_pages = clear_pages; +} diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c index 1c51296646e0..beee0cf89bce 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.c +++ b/drivers/gpu/drm/i915/gt/intel_reset.c @@ -147,11 +147,7 @@ static void mark_innocent(struct i915_request *rq) void __i915_request_reset(struct i915_request *rq, bool guilty) { - GEM_TRACE("%s rq=%llx:%lld, guilty? %s\n", - rq->engine->name, - rq->fence.context, - rq->fence.seqno, - yesno(guilty)); + RQ_TRACE(rq, "guilty? %s\n", yesno(guilty)); GEM_BUG_ON(i915_request_completed(rq)); @@ -251,9 +247,8 @@ out: return ret; } -static int ironlake_do_reset(struct intel_gt *gt, - intel_engine_mask_t engine_mask, - unsigned int retry) +static int ilk_do_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask, + unsigned int retry) { struct intel_uncore *uncore = gt->uncore; int ret; @@ -597,7 +592,7 @@ static reset_func intel_get_gpu_reset(const struct intel_gt *gt) else if (INTEL_GEN(i915) >= 6) return gen6_reset_engines; else if (INTEL_GEN(i915) >= 5) - return ironlake_do_reset; + return ilk_do_reset; else if (IS_G4X(i915)) return g4x_do_reset; else if (IS_G33(i915) || IS_PINEVIEW(i915)) @@ -625,7 +620,7 @@ int __intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask) */ intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL); for (retry = 0; ret == -ETIMEDOUT && retry < retries; retry++) { - GEM_TRACE("engine_mask=%x\n", engine_mask); + GT_TRACE(gt, "engine_mask=%x\n", engine_mask); preempt_disable(); ret = reset(gt, engine_mask, retry); preempt_enable(); @@ -785,8 +780,7 @@ static void nop_submit_request(struct i915_request *request) struct intel_engine_cs *engine = request->engine; unsigned long flags; - GEM_TRACE("%s fence %llx:%lld -> -EIO\n", - engine->name, request->fence.context, request->fence.seqno); + RQ_TRACE(request, "-EIO\n"); dma_fence_set_error(&request->fence, -EIO); spin_lock_irqsave(&engine->active.lock, flags); @@ -813,7 +807,7 @@ static void __intel_gt_set_wedged(struct intel_gt *gt) intel_engine_dump(engine, &p, "%s\n", engine->name); } - GEM_TRACE("start\n"); + GT_TRACE(gt, "start\n"); /* * First, stop submission to hw, but do not yet complete requests by @@ -844,7 +838,7 @@ static void __intel_gt_set_wedged(struct intel_gt *gt) reset_finish(gt, awake); - GEM_TRACE("end\n"); + GT_TRACE(gt, "end\n"); } void intel_gt_set_wedged(struct intel_gt *gt) @@ -870,7 +864,7 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt) if (test_bit(I915_WEDGED_ON_INIT, >->reset.flags)) return false; - GEM_TRACE("start\n"); + GT_TRACE(gt, "start\n"); /* * Before unwedging, make sure that all pending operations @@ -932,7 +926,7 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt) */ intel_engines_reset_default_submission(gt); - GEM_TRACE("end\n"); + GT_TRACE(gt, "end\n"); smp_mb__before_atomic(); /* complete takeover before enabling execbuf */ clear_bit(I915_WEDGED, >->reset.flags); @@ -1007,7 +1001,7 @@ void intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t awake; int ret; - GEM_TRACE("flags=%lx\n", gt->reset.flags); + GT_TRACE(gt, "flags=%lx\n", gt->reset.flags); might_sleep(); GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, >->reset.flags)); @@ -1236,7 +1230,7 @@ void intel_gt_handle_error(struct intel_gt *gt, engine_mask &= INTEL_INFO(gt->i915)->engine_mask; if (flags & I915_ERROR_CAPTURE) { - i915_capture_error_state(gt->i915, engine_mask, msg); + i915_capture_error_state(gt->i915); intel_gt_clear_error_registers(gt, engine_mask); } @@ -1329,10 +1323,10 @@ int intel_gt_terminally_wedged(struct intel_gt *gt) if (!intel_gt_is_wedged(gt)) return 0; - /* Reset still in progress? Maybe we will recover? */ - if (!test_bit(I915_RESET_BACKOFF, >->reset.flags)) + if (intel_gt_has_init_error(gt)) return -EIO; + /* Reset still in progress? Maybe we will recover? */ if (wait_event_interruptible(gt->reset.queue, !test_bit(I915_RESET_BACKOFF, >->reset.flags))) @@ -1354,6 +1348,9 @@ void intel_gt_init_reset(struct intel_gt *gt) init_waitqueue_head(>->reset.queue); mutex_init(>->reset.mutex); init_srcu_struct(>->reset.backoff_srcu); + + /* no GPU until we are ready! */ + __set_bit(I915_WEDGED, >->reset.flags); } void intel_gt_fini_reset(struct intel_gt *gt) diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c index 81f872f9ef03..bc44fe8e5ffa 100644 --- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c +++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c @@ -33,6 +33,7 @@ #include "gem/i915_gem_context.h" +#include "gen6_ppgtt.h" #include "i915_drv.h" #include "i915_trace.h" #include "intel_context.h" @@ -1328,26 +1329,12 @@ static int ring_context_alloc(struct intel_context *ce) static int ring_context_pin(struct intel_context *ce) { - int err; - - err = intel_context_active_acquire(ce); - if (err) - return err; - - err = __context_pin_ppgtt(ce); - if (err) - goto err_active; - - return 0; - -err_active: - intel_context_active_release(ce); - return err; + return __context_pin_ppgtt(ce); } static void ring_context_reset(struct intel_context *ce) { - intel_ring_reset(ce->ring, 0); + intel_ring_reset(ce->ring, ce->ring->emit); } static const struct intel_context_ops ring_context_ops = { @@ -1394,7 +1381,7 @@ static int load_pd_dir(struct i915_request *rq, intel_ring_advance(rq, cs); - return 0; + return rq->engine->emit_flush(rq, EMIT_FLUSH); } static inline int mi_set_context(struct i915_request *rq, u32 flags) @@ -1408,14 +1395,6 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags) int len; u32 *cs; - flags |= MI_MM_SPACE_GTT; - if (IS_HASWELL(i915)) - /* These flags are for resource streamer on HSW+ */ - flags |= HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN; - else - /* We need to save the extended state for powersaving modes */ - flags |= MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN; - len = 4; if (IS_GEN(i915, 7)) len += 2 + (num_engines ? 4 * num_engines + 6 : 0); @@ -1592,7 +1571,7 @@ static int switch_mm(struct i915_request *rq, struct i915_address_space *vm) if (ret) return ret; - return rq->engine->emit_flush(rq, EMIT_FLUSH); + return rq->engine->emit_flush(rq, EMIT_INVALIDATE); } static int switch_context(struct i915_request *rq) @@ -1607,15 +1586,21 @@ static int switch_context(struct i915_request *rq) return ret; if (ce->state) { - u32 hw_flags; + u32 flags; GEM_BUG_ON(rq->engine->id != RCS0); - hw_flags = 0; - if (!test_bit(CONTEXT_VALID_BIT, &ce->flags)) - hw_flags = MI_RESTORE_INHIBIT; + /* For resource streamer on HSW+ and power context elsewhere */ + BUILD_BUG_ON(HSW_MI_RS_SAVE_STATE_EN != MI_SAVE_EXT_STATE_EN); + BUILD_BUG_ON(HSW_MI_RS_RESTORE_STATE_EN != MI_RESTORE_EXT_STATE_EN); + + flags = MI_SAVE_EXT_STATE_EN | MI_MM_SPACE_GTT; + if (test_bit(CONTEXT_VALID_BIT, &ce->flags)) + flags |= MI_RESTORE_EXT_STATE_EN; + else + flags |= MI_RESTORE_INHIBIT; - ret = mi_set_context(rq, hw_flags); + ret = mi_set_context(rq, flags); if (ret) return ret; } @@ -1842,8 +1827,6 @@ static void setup_common(struct intel_engine_cs *engine) setup_irq(engine); - engine->release = ring_release; - engine->resume = xcs_resume; engine->reset.prepare = reset_prepare; engine->reset.rewind = reset_rewind; @@ -2009,6 +1992,9 @@ int intel_ring_submission_setup(struct intel_engine_cs *engine) GEM_BUG_ON(timeline->hwsp_ggtt != engine->status_page.vma); + /* Finally, take ownership and responsibility for cleanup! */ + engine->release = ring_release; + return 0; err_ring: diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c index f232036c3c7a..d2a3d935d186 100644 --- a/drivers/gpu/drm/i915/gt/intel_rps.c +++ b/drivers/gpu/drm/i915/gt/intel_rps.c @@ -777,7 +777,7 @@ void intel_rps_boost(struct i915_request *rq) spin_lock_irqsave(&rq->lock, flags); if (!i915_request_has_waitboost(rq) && !dma_fence_is_signaled_locked(&rq->fence)) { - rq->flags |= I915_REQUEST_WAITBOOST; + set_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags); if (!atomic_fetch_inc(&rps->num_waiters) && READ_ONCE(rps->cur_freq) < rps->boost_freq) diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c index ee5dc4fbdeb9..87716529cd2f 100644 --- a/drivers/gpu/drm/i915/gt/intel_timeline.c +++ b/drivers/gpu/drm/i915/gt/intel_timeline.c @@ -348,7 +348,6 @@ void intel_timeline_enter(struct intel_timeline *tl) * use atomic to manipulate tl->active_count. */ lockdep_assert_held(&tl->mutex); - GEM_BUG_ON(!atomic_read(&tl->pin_count)); if (atomic_add_unless(&tl->active_count, 1, 0)) return; diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index 195ccf7db272..4e292d4bf7b9 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -254,7 +254,7 @@ static void bdw_ctx_workarounds_init(struct intel_engine_cs *engine, /* WaDisableDopClockGating:bdw * - * Also see the related UCGTCL1 write in broadwell_init_clock_gating() + * Also see the related UCGTCL1 write in bdw_init_clock_gating() * to disable EUTC clock gating. */ WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2, diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c index 4e1eafa94be9..a560b7eee2cd 100644 --- a/drivers/gpu/drm/i915/gt/mock_engine.c +++ b/drivers/gpu/drm/i915/gt/mock_engine.c @@ -149,7 +149,11 @@ static int mock_context_alloc(struct intel_context *ce) static int mock_context_pin(struct intel_context *ce) { - return intel_context_active_acquire(ce); + return 0; +} + +static void mock_context_reset(struct intel_context *ce) +{ } static const struct intel_context_ops mock_context_ops = { @@ -161,6 +165,7 @@ static const struct intel_context_ops mock_context_ops = { .enter = intel_context_enter_engine, .exit = intel_context_exit_engine, + .reset = mock_context_reset, .destroy = mock_context_destroy, }; diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c index 5dbda2a74272..3e5e6c86e843 100644 --- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c +++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c @@ -1312,7 +1312,7 @@ static int igt_reset_evict_ppgtt(void *arg) if (INTEL_PPGTT(gt->i915) < INTEL_PPGTT_FULL) return 0; - ppgtt = i915_ppgtt_create(gt->i915); + ppgtt = i915_ppgtt_create(gt); if (IS_ERR(ppgtt)) return PTR_ERR(ppgtt); @@ -1498,7 +1498,7 @@ static int igt_handle_error(void *arg) struct intel_engine_cs *engine = gt->engine[RCS0]; struct hang h; struct i915_request *rq; - struct i915_gpu_state *error; + struct i915_gpu_coredump *error; int err; /* Check that we can issue a global GPU and engine reset */ diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c index 9ec9833c9c7b..15cda024e3e4 100644 --- a/drivers/gpu/drm/i915/gt/selftest_lrc.c +++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c @@ -527,13 +527,19 @@ static struct i915_request *nop_request(struct intel_engine_cs *engine) return rq; } -static void wait_for_submit(struct intel_engine_cs *engine, - struct i915_request *rq) +static int wait_for_submit(struct intel_engine_cs *engine, + struct i915_request *rq, + unsigned long timeout) { + timeout += jiffies; do { cond_resched(); intel_engine_flush_submission(engine); - } while (!i915_request_is_active(rq)); + if (i915_request_is_active(rq)) + return 0; + } while (time_before(jiffies, timeout)); + + return -ETIME; } static long timeslice_threshold(const struct intel_engine_cs *engine) @@ -601,7 +607,12 @@ static int live_timeslice_queue(void *arg) goto err_heartbeat; } engine->schedule(rq, &attr); - wait_for_submit(engine, rq); + err = wait_for_submit(engine, rq, HZ / 2); + if (err) { + pr_err("%s: Timed out trying to submit semaphores\n", + engine->name); + goto err_rq; + } /* ELSP[1]: nop request */ nop = nop_request(engine); @@ -609,8 +620,13 @@ static int live_timeslice_queue(void *arg) err = PTR_ERR(nop); goto err_rq; } - wait_for_submit(engine, nop); + err = wait_for_submit(engine, nop, HZ / 2); i915_request_put(nop); + if (err) { + pr_err("%s: Timed out trying to submit nop\n", + engine->name); + goto err_rq; + } GEM_BUG_ON(i915_request_completed(rq)); GEM_BUG_ON(execlists_active(&engine->execlists) != rq); @@ -1137,7 +1153,7 @@ static int live_nopreempt(void *arg) } /* Low priority client, but unpreemptable! */ - rq_a->flags |= I915_REQUEST_NOPREEMPT; + __set_bit(I915_FENCE_FLAG_NOPREEMPT, &rq_a->fence.flags); i915_request_add(rq_a); if (!igt_wait_for_spinner(&a.spin, rq_a)) { @@ -3362,7 +3378,7 @@ static int live_lrc_layout(void *arg) struct intel_gt *gt = arg; struct intel_engine_cs *engine; enum intel_engine_id id; - u32 *mem; + u32 *lrc; int err; /* @@ -3370,13 +3386,13 @@ static int live_lrc_layout(void *arg) * match the layout saved by HW. */ - mem = kmalloc(PAGE_SIZE, GFP_KERNEL); - if (!mem) + lrc = kmalloc(PAGE_SIZE, GFP_KERNEL); + if (!lrc) return -ENOMEM; err = 0; for_each_engine(engine, gt, id) { - u32 *hw, *lrc; + u32 *hw; int dw; if (!engine->default_state) @@ -3390,8 +3406,7 @@ static int live_lrc_layout(void *arg) } hw += LRC_STATE_PN * PAGE_SIZE / sizeof(*hw); - lrc = memset(mem, 0, PAGE_SIZE); - execlists_init_reg_state(lrc, + execlists_init_reg_state(memset(lrc, POISON_INUSE, PAGE_SIZE), engine->kernel_context, engine, engine->kernel_context->ring, @@ -3406,6 +3421,13 @@ static int live_lrc_layout(void *arg) continue; } + if (lrc[dw] == 0) { + pr_debug("%s: skipped instruction %x at dword %d\n", + engine->name, lri, dw); + dw++; + continue; + } + if ((lri & GENMASK(31, 23)) != MI_INSTR(0x22, 0)) { pr_err("%s: Expected LRI command at dword %d, found %08x\n", engine->name, dw, lri); @@ -3454,7 +3476,7 @@ static int live_lrc_layout(void *arg) break; } - kfree(mem); + kfree(lrc); return err; } diff --git a/drivers/gpu/drm/i915/gt/uc/Makefile b/drivers/gpu/drm/i915/gt/uc/Makefile deleted file mode 100644 index bec94d434cb6..000000000000 --- a/drivers/gpu/drm/i915/gt/uc/Makefile +++ /dev/null @@ -1,5 +0,0 @@ -# For building individual subdir files on the command line -subdir-ccflags-y += -I$(srctree)/$(src)/../.. - -# Extra header tests -header-test-pattern-$(CONFIG_DRM_I915_WERROR) := *.h diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c index 3ffc6267f96e..64934a876a50 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c @@ -12,6 +12,9 @@ #include "i915_drv.h" +static const struct intel_uc_ops uc_ops_off; +static const struct intel_uc_ops uc_ops_on; + /* Reset GuC providing us with fresh state for both GuC and HuC. */ static int __intel_uc_reset_hw(struct intel_uc *uc) @@ -89,6 +92,11 @@ void intel_uc_init_early(struct intel_uc *uc) intel_huc_init_early(&uc->huc); __confirm_options(uc); + + if (intel_uc_uses_guc(uc)) + uc->ops = &uc_ops_on; + else + uc->ops = &uc_ops_off; } void intel_uc_driver_late_release(struct intel_uc *uc) @@ -245,12 +253,11 @@ static void guc_disable_communication(struct intel_guc *guc) DRM_INFO("GuC communication disabled\n"); } -void intel_uc_fetch_firmwares(struct intel_uc *uc) +static void __uc_fetch_firmwares(struct intel_uc *uc) { int err; - if (!intel_uc_uses_guc(uc)) - return; + GEM_BUG_ON(!intel_uc_uses_guc(uc)); err = intel_uc_fw_fetch(&uc->guc.fw); if (err) @@ -260,20 +267,19 @@ void intel_uc_fetch_firmwares(struct intel_uc *uc) intel_uc_fw_fetch(&uc->huc.fw); } -void intel_uc_cleanup_firmwares(struct intel_uc *uc) +static void __uc_cleanup_firmwares(struct intel_uc *uc) { intel_uc_fw_cleanup_fetch(&uc->huc.fw); intel_uc_fw_cleanup_fetch(&uc->guc.fw); } -void intel_uc_init(struct intel_uc *uc) +static void __uc_init(struct intel_uc *uc) { struct intel_guc *guc = &uc->guc; struct intel_huc *huc = &uc->huc; int ret; - if (!intel_uc_uses_guc(uc)) - return; + GEM_BUG_ON(!intel_uc_uses_guc(uc)); /* XXX: GuC submission is unavailable for now */ GEM_BUG_ON(intel_uc_supports_guc_submission(uc)); @@ -288,7 +294,7 @@ void intel_uc_init(struct intel_uc *uc) intel_huc_init(huc); } -void intel_uc_fini(struct intel_uc *uc) +static void __uc_fini(struct intel_uc *uc) { intel_huc_fini(&uc->huc); intel_guc_fini(&uc->guc); @@ -309,14 +315,6 @@ static int __uc_sanitize(struct intel_uc *uc) return __intel_uc_reset_hw(uc); } -void intel_uc_sanitize(struct intel_uc *uc) -{ - if (!intel_uc_supports_guc(uc)) - return; - - __uc_sanitize(uc); -} - /* Initialize and verify the uC regs related to uC positioning in WOPCM */ static int uc_init_wopcm(struct intel_uc *uc) { @@ -380,13 +378,8 @@ static bool uc_is_wopcm_locked(struct intel_uc *uc) (intel_uncore_read(uncore, DMA_GUC_WOPCM_OFFSET) & GUC_WOPCM_OFFSET_VALID); } -int intel_uc_init_hw(struct intel_uc *uc) +static int __uc_check_hw(struct intel_uc *uc) { - struct drm_i915_private *i915 = uc_to_gt(uc)->i915; - struct intel_guc *guc = &uc->guc; - struct intel_huc *huc = &uc->huc; - int ret, attempts; - if (!intel_uc_supports_guc(uc)) return 0; @@ -395,11 +388,24 @@ int intel_uc_init_hw(struct intel_uc *uc) * before on this system after reboot, otherwise we risk GPU hangs. * To check if GuC was loaded before we look at WOPCM registers. */ - if (!intel_uc_uses_guc(uc) && !uc_is_wopcm_locked(uc)) - return 0; + if (uc_is_wopcm_locked(uc)) + return -EIO; + + return 0; +} + +static int __uc_init_hw(struct intel_uc *uc) +{ + struct drm_i915_private *i915 = uc_to_gt(uc)->i915; + struct intel_guc *guc = &uc->guc; + struct intel_huc *huc = &uc->huc; + int ret, attempts; + + GEM_BUG_ON(!intel_uc_supports_guc(uc)); + GEM_BUG_ON(!intel_uc_uses_guc(uc)); if (!intel_uc_fw_is_available(&guc->fw)) { - ret = uc_is_wopcm_locked(uc) || + ret = __uc_check_hw(uc) || intel_uc_fw_is_overridden(&guc->fw) || intel_uc_supports_guc_submission(uc) ? intel_uc_fw_status_to_error(guc->fw.status) : 0; @@ -495,7 +501,7 @@ err_out: return -EIO; } -void intel_uc_fini_hw(struct intel_uc *uc) +static void __uc_fini_hw(struct intel_uc *uc) { struct intel_guc *guc = &uc->guc; @@ -595,3 +601,20 @@ int intel_uc_runtime_resume(struct intel_uc *uc) */ return __uc_resume(uc, true); } + +static const struct intel_uc_ops uc_ops_off = { + .init_hw = __uc_check_hw, +}; + +static const struct intel_uc_ops uc_ops_on = { + .sanitize = __uc_sanitize, + + .init_fw = __uc_fetch_firmwares, + .fini_fw = __uc_cleanup_firmwares, + + .init = __uc_init, + .fini = __uc_fini, + + .init_hw = __uc_init_hw, + .fini_hw = __uc_fini_hw, +}; diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.h b/drivers/gpu/drm/i915/gt/uc/intel_uc.h index 527995c21196..49c913524686 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.h @@ -10,7 +10,20 @@ #include "intel_huc.h" #include "i915_params.h" +struct intel_uc; + +struct intel_uc_ops { + int (*sanitize)(struct intel_uc *uc); + void (*init_fw)(struct intel_uc *uc); + void (*fini_fw)(struct intel_uc *uc); + void (*init)(struct intel_uc *uc); + void (*fini)(struct intel_uc *uc); + int (*init_hw)(struct intel_uc *uc); + void (*fini_hw)(struct intel_uc *uc); +}; + struct intel_uc { + struct intel_uc_ops const *ops; struct intel_guc guc; struct intel_huc huc; @@ -21,13 +34,6 @@ struct intel_uc { void intel_uc_init_early(struct intel_uc *uc); void intel_uc_driver_late_release(struct intel_uc *uc); void intel_uc_init_mmio(struct intel_uc *uc); -void intel_uc_fetch_firmwares(struct intel_uc *uc); -void intel_uc_cleanup_firmwares(struct intel_uc *uc); -void intel_uc_sanitize(struct intel_uc *uc); -void intel_uc_init(struct intel_uc *uc); -int intel_uc_init_hw(struct intel_uc *uc); -void intel_uc_fini_hw(struct intel_uc *uc); -void intel_uc_fini(struct intel_uc *uc); void intel_uc_reset_prepare(struct intel_uc *uc); void intel_uc_suspend(struct intel_uc *uc); void intel_uc_runtime_suspend(struct intel_uc *uc); @@ -64,4 +70,20 @@ static inline bool intel_uc_uses_huc(struct intel_uc *uc) return intel_huc_is_enabled(&uc->huc); } +#define intel_uc_ops_function(_NAME, _OPS, _TYPE, _RET) \ +static inline _TYPE intel_uc_##_NAME(struct intel_uc *uc) \ +{ \ + if (uc->ops->_OPS) \ + return uc->ops->_OPS(uc); \ + return _RET; \ +} +intel_uc_ops_function(sanitize, sanitize, int, 0); +intel_uc_ops_function(fetch_firmwares, init_fw, void, ); +intel_uc_ops_function(cleanup_firmwares, fini_fw, void, ); +intel_uc_ops_function(init, init, void, ); +intel_uc_ops_function(fini, fini, void, ); +intel_uc_ops_function(init_hw, init_hw, int, 0); +intel_uc_ops_function(fini_hw, fini_hw, void, ); +#undef intel_uc_ops_function + #endif diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index bb9fe6bf5275..21af822a79e0 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -2675,7 +2675,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) return 0; } -static int init_broadwell_mmio_info(struct intel_gvt *gvt) +static int init_bdw_mmio_info(struct intel_gvt *gvt) { struct drm_i915_private *dev_priv = gvt->dev_priv; int ret; @@ -3364,20 +3364,20 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt) goto err; if (IS_BROADWELL(dev_priv)) { - ret = init_broadwell_mmio_info(gvt); + ret = init_bdw_mmio_info(gvt); if (ret) goto err; } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) { - ret = init_broadwell_mmio_info(gvt); + ret = init_bdw_mmio_info(gvt); if (ret) goto err; ret = init_skl_mmio_info(gvt); if (ret) goto err; } else if (IS_BROXTON(dev_priv)) { - ret = init_broadwell_mmio_info(gvt); + ret = init_bdw_mmio_info(gvt); if (ret) goto err; ret = init_skl_mmio_info(gvt); diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index b3299f88e24e..685d1e04a5ff 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -1224,7 +1224,7 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu) enum intel_engine_id i; int ret; - ppgtt = i915_ppgtt_create(i915); + ppgtt = i915_ppgtt_create(&i915->gt); if (IS_ERR(ppgtt)) return PTR_ERR(ppgtt); diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c index cfe09964622b..f3da5c06f331 100644 --- a/drivers/gpu/drm/i915/i915_active.c +++ b/drivers/gpu/drm/i915/i915_active.c @@ -605,12 +605,15 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref, struct intel_engine_cs *engine) { intel_engine_mask_t tmp, mask = engine->mask; + struct llist_node *pos = NULL, *next; struct intel_gt *gt = engine->gt; - struct llist_node *pos, *next; int err; GEM_BUG_ON(i915_active_is_idle(ref)); - GEM_BUG_ON(!llist_empty(&ref->preallocated_barriers)); + + /* Wait until the previous preallocation is completed */ + while (!llist_empty(&ref->preallocated_barriers)) + cond_resched(); /* * Preallocate a node for each physical engine supporting the target @@ -653,16 +656,24 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref, GEM_BUG_ON(rcu_access_pointer(node->base.fence) != ERR_PTR(-EAGAIN)); GEM_BUG_ON(barrier_to_engine(node) != engine); - llist_add(barrier_to_ll(node), &ref->preallocated_barriers); + next = barrier_to_ll(node); + next->next = pos; + if (!pos) + pos = next; intel_engine_pm_get(engine); } + GEM_BUG_ON(!llist_empty(&ref->preallocated_barriers)); + llist_add_batch(next, pos, &ref->preallocated_barriers); + return 0; unwind: - llist_for_each_safe(pos, next, take_preallocated_barriers(ref)) { + while (pos) { struct active_node *node = barrier_from_ll(pos); + pos = pos->next; + atomic_dec(&ref->count); intel_engine_pm_put(barrier_to_engine(node)); diff --git a/drivers/gpu/drm/i915/i915_buddy.c b/drivers/gpu/drm/i915/i915_buddy.c index e9d4200ce3bc..66883af64ca1 100644 --- a/drivers/gpu/drm/i915/i915_buddy.c +++ b/drivers/gpu/drm/i915/i915_buddy.c @@ -262,8 +262,10 @@ void i915_buddy_free_list(struct i915_buddy_mm *mm, struct list_head *objects) { struct i915_buddy_block *block, *on; - list_for_each_entry_safe(block, on, objects, link) + list_for_each_entry_safe(block, on, objects, link) { i915_buddy_free(mm, block); + cond_resched(); + } INIT_LIST_HEAD(objects); } diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index d28468eaed57..d5a9b8a964c2 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -321,16 +321,15 @@ static void print_context_stats(struct seq_file *m, for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) { - intel_context_lock_pinned(ce); - if (intel_context_is_pinned(ce)) { + if (intel_context_pin_if_active(ce)) { rcu_read_lock(); if (ce->state) per_file_stats(0, ce->state->obj, &kstats); per_file_stats(0, ce->ring->vma->obj, &kstats); rcu_read_unlock(); + intel_context_unpin(ce); } - intel_context_unlock_pinned(ce); } i915_gem_context_unlock_engines(ctx); @@ -367,12 +366,16 @@ static void print_context_stats(struct seq_file *m, static int i915_gem_object_info(struct seq_file *m, void *data) { struct drm_i915_private *i915 = node_to_i915(m->private); + struct intel_memory_region *mr; + enum intel_region_id id; seq_printf(m, "%u shrinkable [%u free] objects, %llu bytes\n", i915->mm.shrink_count, atomic_read(&i915->mm.free_count), i915->mm.shrink_memory); - + for_each_memory_region(mr, i915, id) + seq_printf(m, "%s: total:%pa, available:%pa bytes\n", + mr->name, &mr->total, &mr->avail); seq_putc(m, '\n'); print_context_stats(m, i915); @@ -682,7 +685,7 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data) static ssize_t gpu_state_read(struct file *file, char __user *ubuf, size_t count, loff_t *pos) { - struct i915_gpu_state *error; + struct i915_gpu_coredump *error; ssize_t ret; void *buf; @@ -695,7 +698,7 @@ static ssize_t gpu_state_read(struct file *file, char __user *ubuf, if (!buf) return -ENOMEM; - ret = i915_gpu_state_copy_to_buffer(error, buf, *pos, count); + ret = i915_gpu_coredump_copy_to_buffer(error, buf, *pos, count); if (ret <= 0) goto out; @@ -711,19 +714,19 @@ out: static int gpu_state_release(struct inode *inode, struct file *file) { - i915_gpu_state_put(file->private_data); + i915_gpu_coredump_put(file->private_data); return 0; } static int i915_gpu_info_open(struct inode *inode, struct file *file) { struct drm_i915_private *i915 = inode->i_private; - struct i915_gpu_state *gpu; + struct i915_gpu_coredump *gpu; intel_wakeref_t wakeref; gpu = NULL; with_intel_runtime_pm(&i915->runtime_pm, wakeref) - gpu = i915_capture_gpu_state(i915); + gpu = i915_gpu_coredump(i915); if (IS_ERR(gpu)) return PTR_ERR(gpu); @@ -745,7 +748,7 @@ i915_error_state_write(struct file *filp, size_t cnt, loff_t *ppos) { - struct i915_gpu_state *error = filp->private_data; + struct i915_gpu_coredump *error = filp->private_data; if (!error) return 0; @@ -758,7 +761,7 @@ i915_error_state_write(struct file *filp, static int i915_error_state_open(struct inode *inode, struct file *file) { - struct i915_gpu_state *error; + struct i915_gpu_coredump *error; error = i915_first_error_state(inode->i_private); if (IS_ERR(error)) @@ -1001,7 +1004,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused) return ret; } -static int ironlake_drpc_info(struct seq_file *m) +static int ilk_drpc_info(struct seq_file *m) { struct drm_i915_private *i915 = node_to_i915(m->private); struct intel_uncore *uncore = &i915->uncore; @@ -1209,7 +1212,7 @@ static int i915_drpc_info(struct seq_file *m, void *unused) else if (INTEL_GEN(dev_priv) >= 6) err = gen6_drpc_info(m); else - err = ironlake_drpc_info(m); + err = ilk_drpc_info(m); } return err; @@ -1509,15 +1512,14 @@ static int i915_context_status(struct seq_file *m, void *unused) for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) { - intel_context_lock_pinned(ce); - if (intel_context_is_pinned(ce)) { + if (intel_context_pin_if_active(ce)) { seq_printf(m, "%s: ", ce->engine->name); if (ce->state) describe_obj(m, ce->state->obj); describe_ctx_ring(m, ce->ring); seq_putc(m, '\n'); + intel_context_unpin(ce); } - intel_context_unlock_pinned(ce); } i915_gem_context_unlock_engines(ctx); @@ -1977,7 +1979,7 @@ static int i915_psr_sink_status_show(struct seq_file *m, void *data) struct drm_connector *connector = m->private; struct drm_i915_private *dev_priv = to_i915(connector->dev); struct intel_dp *intel_dp = - enc_to_intel_dp(&intel_attached_encoder(connector)->base); + enc_to_intel_dp(intel_attached_encoder(to_intel_connector(connector))); int ret; if (!CAN_PSR(dev_priv)) { @@ -2389,7 +2391,7 @@ static void intel_dp_info(struct seq_file *m, struct intel_connector *intel_connector) { struct intel_encoder *intel_encoder = intel_connector->encoder; - struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); + struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder); seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]); seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio)); @@ -2409,7 +2411,7 @@ static void intel_dp_mst_info(struct seq_file *m, { struct intel_encoder *intel_encoder = intel_connector->encoder; struct intel_dp_mst_encoder *intel_mst = - enc_to_mst(&intel_encoder->base); + enc_to_mst(intel_encoder); struct intel_digital_port *intel_dig_port = intel_mst->primary; struct intel_dp *intel_dp = &intel_dig_port->dp; bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr, @@ -2422,7 +2424,7 @@ static void intel_hdmi_info(struct seq_file *m, struct intel_connector *intel_connector) { struct intel_encoder *intel_encoder = intel_connector->encoder; - struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base); + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(intel_encoder); seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio)); if (intel_connector->hdcp.shim) { @@ -3012,11 +3014,11 @@ static int i915_dp_mst_info(struct seq_file *m, void *unused) if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) continue; - intel_encoder = intel_attached_encoder(connector); + intel_encoder = intel_attached_encoder(to_intel_connector(connector)); if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST) continue; - intel_dig_port = enc_to_dig_port(&intel_encoder->base); + intel_dig_port = enc_to_dig_port(intel_encoder); if (!intel_dig_port->dp.can_mst) continue; @@ -3066,7 +3068,7 @@ static ssize_t i915_displayport_test_active_write(struct file *file, continue; if (encoder && connector->status == connector_status_connected) { - intel_dp = enc_to_intel_dp(&encoder->base); + intel_dp = enc_to_intel_dp(encoder); status = kstrtoint(input_buffer, 10, &val); if (status < 0) break; @@ -3075,9 +3077,9 @@ static ssize_t i915_displayport_test_active_write(struct file *file, * testing code, only accept an actual value of 1 here */ if (val == 1) - intel_dp->compliance.test_active = 1; + intel_dp->compliance.test_active = true; else - intel_dp->compliance.test_active = 0; + intel_dp->compliance.test_active = false; } } drm_connector_list_iter_end(&conn_iter); @@ -3110,7 +3112,7 @@ static int i915_displayport_test_active_show(struct seq_file *m, void *data) continue; if (encoder && connector->status == connector_status_connected) { - intel_dp = enc_to_intel_dp(&encoder->base); + intel_dp = enc_to_intel_dp(encoder); if (intel_dp->compliance.test_active) seq_puts(m, "1"); else @@ -3160,7 +3162,7 @@ static int i915_displayport_test_data_show(struct seq_file *m, void *data) continue; if (encoder && connector->status == connector_status_connected) { - intel_dp = enc_to_intel_dp(&encoder->base); + intel_dp = enc_to_intel_dp(encoder); if (intel_dp->compliance.test_type == DP_TEST_LINK_EDID_READ) seq_printf(m, "%lx", @@ -3204,7 +3206,7 @@ static int i915_displayport_test_type_show(struct seq_file *m, void *data) continue; if (encoder && connector->status == connector_status_connected) { - intel_dp = enc_to_intel_dp(&encoder->base); + intel_dp = enc_to_intel_dp(encoder); seq_printf(m, "%02lx", intel_dp->compliance.test_type); } else seq_puts(m, "0"); @@ -3815,8 +3817,8 @@ static void gen9_sseu_device_status(struct drm_i915_private *dev_priv, #undef SS_MAX } -static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv, - struct sseu_dev_info *sseu) +static void bdw_sseu_device_status(struct drm_i915_private *dev_priv, + struct sseu_dev_info *sseu) { const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv); u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO); @@ -3901,7 +3903,7 @@ static int i915_sseu_status(struct seq_file *m, void *unused) if (IS_CHERRYVIEW(dev_priv)) cherryview_sseu_device_status(dev_priv, &sseu); else if (IS_BROADWELL(dev_priv)) - broadwell_sseu_device_status(dev_priv, &sseu); + bdw_sseu_device_status(dev_priv, &sseu); else if (IS_GEN(dev_priv, 9)) gen9_sseu_device_status(dev_priv, &sseu); else if (INTEL_GEN(dev_priv) >= 10) @@ -4142,14 +4144,14 @@ static int i915_drrs_ctl_set(void *data, u64 val) drm_connector_mask(connector))) continue; - encoder = intel_attached_encoder(connector); + encoder = intel_attached_encoder(to_intel_connector(connector)); if (encoder->type != INTEL_OUTPUT_EDP) continue; DRM_DEBUG_DRIVER("Manually %sabling DRRS. %llu\n", val ? "en" : "dis", val); - intel_dp = enc_to_intel_dp(&encoder->base); + intel_dp = enc_to_intel_dp(encoder); if (val) intel_edp_drrs_enable(intel_dp, crtc_state); @@ -4353,7 +4355,7 @@ static int i915_dpcd_show(struct seq_file *m, void *data) { struct drm_connector *connector = m->private; struct intel_dp *intel_dp = - enc_to_intel_dp(&intel_attached_encoder(connector)->base); + enc_to_intel_dp(intel_attached_encoder(to_intel_connector(connector))); u8 buf[16]; ssize_t err; int i; @@ -4388,7 +4390,7 @@ static int i915_panel_show(struct seq_file *m, void *data) { struct drm_connector *connector = m->private; struct intel_dp *intel_dp = - enc_to_intel_dp(&intel_attached_encoder(connector)->base); + enc_to_intel_dp(intel_attached_encoder(to_intel_connector(connector))); if (connector->status != connector_status_connected) return -ENODEV; @@ -4466,7 +4468,7 @@ static int i915_dsc_fec_support_show(struct seq_file *m, void *data) } else if (ret) { break; } - intel_dp = enc_to_intel_dp(&intel_attached_encoder(connector)->base); + intel_dp = enc_to_intel_dp(intel_attached_encoder(to_intel_connector(connector))); crtc_state = to_intel_crtc_state(crtc->state); seq_printf(m, "DSC_Enabled: %s\n", yesno(crtc_state->dsc.compression_enable)); @@ -4493,8 +4495,8 @@ static ssize_t i915_dsc_fec_support_write(struct file *file, int ret; struct drm_connector *connector = ((struct seq_file *)file->private_data)->private; - struct intel_encoder *encoder = intel_attached_encoder(connector); - struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector)); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); if (len == 0) return 0; diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 59525094d0e3..f7385abdd74b 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -469,6 +469,12 @@ static void vlv_free_s0ix_state(struct drm_i915_private *i915) i915->vlv_s0ix_state = NULL; } +static void sanitize_gpu(struct drm_i915_private *i915) +{ + if (!INTEL_INFO(i915)->gpu_reset_clobbers_display) + __intel_gt_reset(&i915->gt, ALL_ENGINES); +} + /** * i915_driver_early_probe - setup state not requiring device access * @dev_priv: device private @@ -602,6 +608,9 @@ static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv) if (ret) goto err_uncore; + /* As early as possible, scrub existing GPU state before clobbering */ + sanitize_gpu(dev_priv); + return 0; err_uncore: @@ -1817,7 +1826,7 @@ static int i915_drm_resume(struct drm_device *dev) disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); - intel_gt_sanitize(&dev_priv->gt, true); + sanitize_gpu(dev_priv); ret = i915_ggtt_enable_hw(dev_priv); if (ret) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index d05a968227f7..077af22b8340 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -46,6 +46,7 @@ #include <linux/dma-resv.h> #include <linux/shmem_fs.h> #include <linux/stackdepot.h> +#include <linux/xarray.h> #include <drm/intel-gtt.h> #include <drm/drm_legacy.h> /* for struct drm_dma_handle */ @@ -110,8 +111,8 @@ #define DRIVER_NAME "i915" #define DRIVER_DESC "Intel Graphics" -#define DRIVER_DATE "20191223" -#define DRIVER_TIMESTAMP 1577120893 +#define DRIVER_DATE "20200114" +#define DRIVER_TIMESTAMP 1579001978 struct drm_i915_gem_object; @@ -201,8 +202,7 @@ struct drm_i915_file_private { struct list_head request_list; } mm; - struct idr context_idr; - struct mutex context_idr_lock; /* guards context_idr */ + struct xarray context_xa; struct idr vm_idr; struct mutex vm_idr_lock; /* guards vm_idr */ @@ -505,6 +505,7 @@ struct i915_psr { bool dc3co_enabled; u32 dc3co_exit_delay; struct delayed_work idle_work; + bool initially_probed; }; #define QUIRK_LVDS_SSC_DISABLE (1<<1) @@ -1252,6 +1253,16 @@ struct drm_i915_private { struct llist_head free_list; struct work_struct free_work; } contexts; + + /* + * We replace the local file with a global mappings as the + * backing storage for the mmap is on the device and not + * on the struct file, and we do not want to prolong the + * lifetime of the local fd. To minimise the number of + * anonymous inodes we create, we use a global singleton to + * share the global mapping. + */ + struct file *mmap_singleton; } gem; u8 pch_ssc_use; @@ -1657,8 +1668,10 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, (IS_BROADWELL(dev_priv) || IS_GEN(dev_priv, 9)) /* WaRsDisableCoarsePowerGating:skl,cnl */ -#define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \ - IS_GEN_RANGE(dev_priv, 9, 10) +#define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \ + (IS_CANNONLAKE(dev_priv) || \ + IS_SKL_GT3(dev_priv) || \ + IS_SKL_GT4(dev_priv)) #define HAS_GMBUS_IRQ(dev_priv) (INTEL_GEN(dev_priv) >= 4) #define HAS_GMBUS_BURST_READ(dev_priv) (INTEL_GEN(dev_priv) >= 10 || \ @@ -1861,7 +1874,7 @@ static inline u32 i915_reset_count(struct i915_gpu_error *error) } static inline u32 i915_reset_engine_count(struct i915_gpu_error *error, - struct intel_engine_cs *engine) + const struct intel_engine_cs *engine) { return atomic_read(&error->reset_engine_count[engine->uabi_class]); } @@ -1889,7 +1902,7 @@ struct dma_buf *i915_gem_prime_export(struct drm_gem_object *gem_obj, int flags) static inline struct i915_gem_context * __i915_gem_context_lookup_rcu(struct drm_i915_file_private *file_priv, u32 id) { - return idr_find(&file_priv->context_idr, id); + return xa_load(&file_priv->context_xa, id); } static inline struct i915_gem_context * @@ -2015,6 +2028,9 @@ int i915_reg_read_ioctl(struct drm_device *dev, void *data, int remap_io_mapping(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn, unsigned long size, struct io_mapping *iomap); +int remap_io_sg(struct vm_area_struct *vma, + unsigned long addr, unsigned long size, + struct scatterlist *sgl, resource_size_t iobase); static inline int intel_hws_csb_write_index(struct drm_i915_private *i915) { diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 9ddcf17230e6..94f993e4c12f 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -45,6 +45,7 @@ #include "gem/i915_gem_context.h" #include "gem/i915_gem_ioctls.h" #include "gem/i915_gem_mman.h" +#include "gem/i915_gem_region.h" #include "gt/intel_engine_user.h" #include "gt/intel_gt.h" #include "gt/intel_gt_pm.h" @@ -200,7 +201,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj, static int i915_gem_create(struct drm_file *file, - struct drm_i915_private *dev_priv, + struct intel_memory_region *mr, u64 *size_p, u32 *handle_p) { @@ -209,12 +210,16 @@ i915_gem_create(struct drm_file *file, u64 size; int ret; - size = round_up(*size_p, PAGE_SIZE); + GEM_BUG_ON(!is_power_of_2(mr->min_page_size)); + size = round_up(*size_p, mr->min_page_size); if (size == 0) return -EINVAL; + /* For most of the ABI (e.g. mmap) we think in system pages */ + GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE)); + /* Allocate the new object */ - obj = i915_gem_object_create_shmem(dev_priv, size); + obj = i915_gem_object_create_region(mr, size, 0); if (IS_ERR(obj)) return PTR_ERR(obj); @@ -234,6 +239,7 @@ i915_gem_dumb_create(struct drm_file *file, struct drm_device *dev, struct drm_mode_create_dumb *args) { + enum intel_memory_type mem_type; int cpp = DIV_ROUND_UP(args->bpp, 8); u32 format; @@ -260,7 +266,14 @@ i915_gem_dumb_create(struct drm_file *file, args->pitch = ALIGN(args->pitch, 4096); args->size = args->pitch * args->height; - return i915_gem_create(file, to_i915(dev), + + mem_type = INTEL_MEMORY_SYSTEM; + if (HAS_LMEM(to_i915(dev))) + mem_type = INTEL_MEMORY_LOCAL; + + return i915_gem_create(file, + intel_memory_region_by_type(to_i915(dev), + mem_type), &args->size, &args->handle); } @@ -274,12 +287,14 @@ int i915_gem_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *i915 = to_i915(dev); struct drm_i915_gem_create *args = data; - i915_gem_flush_free_objects(dev_priv); + i915_gem_flush_free_objects(i915); - return i915_gem_create(file, dev_priv, + return i915_gem_create(file, + intel_memory_region_by_type(i915, + INTEL_MEMORY_SYSTEM), &args->size, &args->handle); } @@ -1172,6 +1187,8 @@ void i915_gem_driver_remove(struct drm_i915_private *dev_priv) void i915_gem_driver_release(struct drm_i915_private *dev_priv) { + i915_gem_driver_release__contexts(dev_priv); + intel_gt_driver_release(&dev_priv->gt); intel_wa_list_free(&dev_priv->gt_wa_list); @@ -1179,8 +1196,6 @@ void i915_gem_driver_release(struct drm_i915_private *dev_priv) intel_uc_cleanup_firmwares(&dev_priv->gt.uc); i915_gem_cleanup_userptr(dev_priv); - i915_gem_driver_release__contexts(dev_priv); - i915_gem_drain_freed_objects(dev_priv); WARN_ON(!list_empty(&dev_priv->gem.contexts.list)); diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.c b/drivers/gpu/drm/i915/i915_gem_fence_reg.c index 71efccfde122..d9c34a23cd67 100644 --- a/drivers/gpu/drm/i915/i915_gem_fence_reg.c +++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.c @@ -412,6 +412,9 @@ int i915_vma_pin_fence(struct i915_vma *vma) { int err; + if (!vma->fence && !i915_gem_object_is_tiled(vma->obj)) + return 0; + /* * Note that we revoke fences on runtime suspend. Therefore the user * must keep the device awake whilst using the fence. diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 1efe58ad0ce9..e039eb56900f 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -1,26 +1,7 @@ +// SPDX-License-Identifier: MIT /* * Copyright © 2010 Daniel Vetter - * Copyright © 2011-2014 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - * + * Copyright © 2020 Intel Corporation */ #include <linux/slab.h> /* fault-inject.h is not standalone! */ @@ -45,2116 +26,6 @@ #include "i915_trace.h" #include "i915_vgpu.h" -#define I915_GFP_ALLOW_FAIL (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN) - -#if IS_ENABLED(CONFIG_DRM_I915_TRACE_GTT) -#define DBG(...) trace_printk(__VA_ARGS__) -#else -#define DBG(...) -#endif - -#define NALLOC 3 /* 1 normal, 1 for concurrent threads, 1 for preallocation */ - -/** - * DOC: Global GTT views - * - * Background and previous state - * - * Historically objects could exists (be bound) in global GTT space only as - * singular instances with a view representing all of the object's backing pages - * in a linear fashion. This view will be called a normal view. - * - * To support multiple views of the same object, where the number of mapped - * pages is not equal to the backing store, or where the layout of the pages - * is not linear, concept of a GGTT view was added. - * - * One example of an alternative view is a stereo display driven by a single - * image. In this case we would have a framebuffer looking like this - * (2x2 pages): - * - * 12 - * 34 - * - * Above would represent a normal GGTT view as normally mapped for GPU or CPU - * rendering. In contrast, fed to the display engine would be an alternative - * view which could look something like this: - * - * 1212 - * 3434 - * - * In this example both the size and layout of pages in the alternative view is - * different from the normal view. - * - * Implementation and usage - * - * GGTT views are implemented using VMAs and are distinguished via enum - * i915_ggtt_view_type and struct i915_ggtt_view. - * - * A new flavour of core GEM functions which work with GGTT bound objects were - * added with the _ggtt_ infix, and sometimes with _view postfix to avoid - * renaming in large amounts of code. They take the struct i915_ggtt_view - * parameter encapsulating all metadata required to implement a view. - * - * As a helper for callers which are only interested in the normal view, - * globally const i915_ggtt_view_normal singleton instance exists. All old core - * GEM API functions, the ones not taking the view parameter, are operating on, - * or with the normal GGTT view. - * - * Code wanting to add or use a new GGTT view needs to: - * - * 1. Add a new enum with a suitable name. - * 2. Extend the metadata in the i915_ggtt_view structure if required. - * 3. Add support to i915_get_vma_pages(). - * - * New views are required to build a scatter-gather table from within the - * i915_get_vma_pages function. This table is stored in the vma.ggtt_view and - * exists for the lifetime of an VMA. - * - * Core API is designed to have copy semantics which means that passed in - * struct i915_ggtt_view does not need to be persistent (left around after - * calling the core API functions). - * - */ - -#define as_pd(x) container_of((x), typeof(struct i915_page_directory), pt) - -static int -i915_get_ggtt_vma_pages(struct i915_vma *vma); - -static void gen6_ggtt_invalidate(struct i915_ggtt *ggtt) -{ - struct intel_uncore *uncore = ggtt->vm.gt->uncore; - - spin_lock_irq(&uncore->lock); - intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); - intel_uncore_read_fw(uncore, GFX_FLSH_CNTL_GEN6); - spin_unlock_irq(&uncore->lock); -} - -static void gen8_ggtt_invalidate(struct i915_ggtt *ggtt) -{ - struct intel_uncore *uncore = ggtt->vm.gt->uncore; - - /* - * Note that as an uncached mmio write, this will flush the - * WCB of the writes into the GGTT before it triggers the invalidate. - */ - intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); -} - -static void guc_ggtt_invalidate(struct i915_ggtt *ggtt) -{ - struct intel_uncore *uncore = ggtt->vm.gt->uncore; - struct drm_i915_private *i915 = ggtt->vm.i915; - - gen8_ggtt_invalidate(ggtt); - - if (INTEL_GEN(i915) >= 12) - intel_uncore_write_fw(uncore, GEN12_GUC_TLB_INV_CR, - GEN12_GUC_TLB_INV_CR_INVALIDATE); - else - intel_uncore_write_fw(uncore, GEN8_GTCR, GEN8_GTCR_INVALIDATE); -} - -static void gmch_ggtt_invalidate(struct i915_ggtt *ggtt) -{ - intel_gtt_chipset_flush(); -} - -static int ppgtt_bind_vma(struct i915_vma *vma, - enum i915_cache_level cache_level, - u32 flags) -{ - u32 pte_flags; - int err; - - if (flags & I915_VMA_ALLOC) { - err = vma->vm->allocate_va_range(vma->vm, - vma->node.start, vma->size); - if (err) - return err; - - set_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma)); - } - - /* Applicable to VLV, and gen8+ */ - pte_flags = 0; - if (i915_gem_object_is_readonly(vma->obj)) - pte_flags |= PTE_READ_ONLY; - - GEM_BUG_ON(!test_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma))); - vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags); - wmb(); - - return 0; -} - -static void ppgtt_unbind_vma(struct i915_vma *vma) -{ - if (test_and_clear_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma))) - vma->vm->clear_range(vma->vm, vma->node.start, vma->size); -} - -static int ppgtt_set_pages(struct i915_vma *vma) -{ - GEM_BUG_ON(vma->pages); - - vma->pages = vma->obj->mm.pages; - - vma->page_sizes = vma->obj->mm.page_sizes; - - return 0; -} - -static void clear_pages(struct i915_vma *vma) -{ - GEM_BUG_ON(!vma->pages); - - if (vma->pages != vma->obj->mm.pages) { - sg_free_table(vma->pages); - kfree(vma->pages); - } - vma->pages = NULL; - - memset(&vma->page_sizes, 0, sizeof(vma->page_sizes)); -} - -static u64 gen8_pte_encode(dma_addr_t addr, - enum i915_cache_level level, - u32 flags) -{ - gen8_pte_t pte = addr | _PAGE_PRESENT | _PAGE_RW; - - if (unlikely(flags & PTE_READ_ONLY)) - pte &= ~_PAGE_RW; - - switch (level) { - case I915_CACHE_NONE: - pte |= PPAT_UNCACHED; - break; - case I915_CACHE_WT: - pte |= PPAT_DISPLAY_ELLC; - break; - default: - pte |= PPAT_CACHED; - break; - } - - return pte; -} - -static u64 gen8_pde_encode(const dma_addr_t addr, - const enum i915_cache_level level) -{ - u64 pde = _PAGE_PRESENT | _PAGE_RW; - pde |= addr; - if (level != I915_CACHE_NONE) - pde |= PPAT_CACHED_PDE; - else - pde |= PPAT_UNCACHED; - return pde; -} - -static u64 snb_pte_encode(dma_addr_t addr, - enum i915_cache_level level, - u32 flags) -{ - gen6_pte_t pte = GEN6_PTE_VALID; - pte |= GEN6_PTE_ADDR_ENCODE(addr); - - switch (level) { - case I915_CACHE_L3_LLC: - case I915_CACHE_LLC: - pte |= GEN6_PTE_CACHE_LLC; - break; - case I915_CACHE_NONE: - pte |= GEN6_PTE_UNCACHED; - break; - default: - MISSING_CASE(level); - } - - return pte; -} - -static u64 ivb_pte_encode(dma_addr_t addr, - enum i915_cache_level level, - u32 flags) -{ - gen6_pte_t pte = GEN6_PTE_VALID; - pte |= GEN6_PTE_ADDR_ENCODE(addr); - - switch (level) { - case I915_CACHE_L3_LLC: - pte |= GEN7_PTE_CACHE_L3_LLC; - break; - case I915_CACHE_LLC: - pte |= GEN6_PTE_CACHE_LLC; - break; - case I915_CACHE_NONE: - pte |= GEN6_PTE_UNCACHED; - break; - default: - MISSING_CASE(level); - } - - return pte; -} - -static u64 byt_pte_encode(dma_addr_t addr, - enum i915_cache_level level, - u32 flags) -{ - gen6_pte_t pte = GEN6_PTE_VALID; - pte |= GEN6_PTE_ADDR_ENCODE(addr); - - if (!(flags & PTE_READ_ONLY)) - pte |= BYT_PTE_WRITEABLE; - - if (level != I915_CACHE_NONE) - pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES; - - return pte; -} - -static u64 hsw_pte_encode(dma_addr_t addr, - enum i915_cache_level level, - u32 flags) -{ - gen6_pte_t pte = GEN6_PTE_VALID; - pte |= HSW_PTE_ADDR_ENCODE(addr); - - if (level != I915_CACHE_NONE) - pte |= HSW_WB_LLC_AGE3; - - return pte; -} - -static u64 iris_pte_encode(dma_addr_t addr, - enum i915_cache_level level, - u32 flags) -{ - gen6_pte_t pte = GEN6_PTE_VALID; - pte |= HSW_PTE_ADDR_ENCODE(addr); - - switch (level) { - case I915_CACHE_NONE: - break; - case I915_CACHE_WT: - pte |= HSW_WT_ELLC_LLC_AGE3; - break; - default: - pte |= HSW_WB_ELLC_LLC_AGE3; - break; - } - - return pte; -} - -static void stash_init(struct pagestash *stash) -{ - pagevec_init(&stash->pvec); - spin_lock_init(&stash->lock); -} - -static struct page *stash_pop_page(struct pagestash *stash) -{ - struct page *page = NULL; - - spin_lock(&stash->lock); - if (likely(stash->pvec.nr)) - page = stash->pvec.pages[--stash->pvec.nr]; - spin_unlock(&stash->lock); - - return page; -} - -static void stash_push_pagevec(struct pagestash *stash, struct pagevec *pvec) -{ - unsigned int nr; - - spin_lock_nested(&stash->lock, SINGLE_DEPTH_NESTING); - - nr = min_t(typeof(nr), pvec->nr, pagevec_space(&stash->pvec)); - memcpy(stash->pvec.pages + stash->pvec.nr, - pvec->pages + pvec->nr - nr, - sizeof(pvec->pages[0]) * nr); - stash->pvec.nr += nr; - - spin_unlock(&stash->lock); - - pvec->nr -= nr; -} - -static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp) -{ - struct pagevec stack; - struct page *page; - - if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1))) - i915_gem_shrink_all(vm->i915); - - page = stash_pop_page(&vm->free_pages); - if (page) - return page; - - if (!vm->pt_kmap_wc) - return alloc_page(gfp); - - /* Look in our global stash of WC pages... */ - page = stash_pop_page(&vm->i915->mm.wc_stash); - if (page) - return page; - - /* - * Otherwise batch allocate pages to amortize cost of set_pages_wc. - * - * We have to be careful as page allocation may trigger the shrinker - * (via direct reclaim) which will fill up the WC stash underneath us. - * So we add our WB pages into a temporary pvec on the stack and merge - * them into the WC stash after all the allocations are complete. - */ - pagevec_init(&stack); - do { - struct page *page; - - page = alloc_page(gfp); - if (unlikely(!page)) - break; - - stack.pages[stack.nr++] = page; - } while (pagevec_space(&stack)); - - if (stack.nr && !set_pages_array_wc(stack.pages, stack.nr)) { - page = stack.pages[--stack.nr]; - - /* Merge spare WC pages to the global stash */ - if (stack.nr) - stash_push_pagevec(&vm->i915->mm.wc_stash, &stack); - - /* Push any surplus WC pages onto the local VM stash */ - if (stack.nr) - stash_push_pagevec(&vm->free_pages, &stack); - } - - /* Return unwanted leftovers */ - if (unlikely(stack.nr)) { - WARN_ON_ONCE(set_pages_array_wb(stack.pages, stack.nr)); - __pagevec_release(&stack); - } - - return page; -} - -static void vm_free_pages_release(struct i915_address_space *vm, - bool immediate) -{ - struct pagevec *pvec = &vm->free_pages.pvec; - struct pagevec stack; - - lockdep_assert_held(&vm->free_pages.lock); - GEM_BUG_ON(!pagevec_count(pvec)); - - if (vm->pt_kmap_wc) { - /* - * When we use WC, first fill up the global stash and then - * only if full immediately free the overflow. - */ - stash_push_pagevec(&vm->i915->mm.wc_stash, pvec); - - /* - * As we have made some room in the VM's free_pages, - * we can wait for it to fill again. Unless we are - * inside i915_address_space_fini() and must - * immediately release the pages! - */ - if (pvec->nr <= (immediate ? 0 : PAGEVEC_SIZE - 1)) - return; - - /* - * We have to drop the lock to allow ourselves to sleep, - * so take a copy of the pvec and clear the stash for - * others to use it as we sleep. - */ - stack = *pvec; - pagevec_reinit(pvec); - spin_unlock(&vm->free_pages.lock); - - pvec = &stack; - set_pages_array_wb(pvec->pages, pvec->nr); - - spin_lock(&vm->free_pages.lock); - } - - __pagevec_release(pvec); -} - -static void vm_free_page(struct i915_address_space *vm, struct page *page) -{ - /* - * On !llc, we need to change the pages back to WB. We only do so - * in bulk, so we rarely need to change the page attributes here, - * but doing so requires a stop_machine() from deep inside arch/x86/mm. - * To make detection of the possible sleep more likely, use an - * unconditional might_sleep() for everybody. - */ - might_sleep(); - spin_lock(&vm->free_pages.lock); - while (!pagevec_space(&vm->free_pages.pvec)) - vm_free_pages_release(vm, false); - GEM_BUG_ON(pagevec_count(&vm->free_pages.pvec) >= PAGEVEC_SIZE); - pagevec_add(&vm->free_pages.pvec, page); - spin_unlock(&vm->free_pages.lock); -} - -static void i915_address_space_fini(struct i915_address_space *vm) -{ - spin_lock(&vm->free_pages.lock); - if (pagevec_count(&vm->free_pages.pvec)) - vm_free_pages_release(vm, true); - GEM_BUG_ON(pagevec_count(&vm->free_pages.pvec)); - spin_unlock(&vm->free_pages.lock); - - drm_mm_takedown(&vm->mm); - - mutex_destroy(&vm->mutex); -} - -void __i915_vm_close(struct i915_address_space *vm) -{ - struct i915_vma *vma, *vn; - - mutex_lock(&vm->mutex); - list_for_each_entry_safe(vma, vn, &vm->bound_list, vm_link) { - struct drm_i915_gem_object *obj = vma->obj; - - /* Keep the obj (and hence the vma) alive as _we_ destroy it */ - if (!kref_get_unless_zero(&obj->base.refcount)) - continue; - - atomic_and(~I915_VMA_PIN_MASK, &vma->flags); - WARN_ON(__i915_vma_unbind(vma)); - __i915_vma_put(vma); - - i915_gem_object_put(obj); - } - GEM_BUG_ON(!list_empty(&vm->bound_list)); - mutex_unlock(&vm->mutex); -} - -static void __i915_vm_release(struct work_struct *work) -{ - struct i915_address_space *vm = - container_of(work, struct i915_address_space, rcu.work); - - vm->cleanup(vm); - i915_address_space_fini(vm); - - kfree(vm); -} - -void i915_vm_release(struct kref *kref) -{ - struct i915_address_space *vm = - container_of(kref, struct i915_address_space, ref); - - GEM_BUG_ON(i915_is_ggtt(vm)); - trace_i915_ppgtt_release(vm); - - queue_rcu_work(vm->i915->wq, &vm->rcu); -} - -static void i915_address_space_init(struct i915_address_space *vm, int subclass) -{ - kref_init(&vm->ref); - INIT_RCU_WORK(&vm->rcu, __i915_vm_release); - atomic_set(&vm->open, 1); - - /* - * The vm->mutex must be reclaim safe (for use in the shrinker). - * Do a dummy acquire now under fs_reclaim so that any allocation - * attempt holding the lock is immediately reported by lockdep. - */ - mutex_init(&vm->mutex); - lockdep_set_subclass(&vm->mutex, subclass); - i915_gem_shrinker_taints_mutex(vm->i915, &vm->mutex); - - GEM_BUG_ON(!vm->total); - drm_mm_init(&vm->mm, 0, vm->total); - vm->mm.head_node.color = I915_COLOR_UNEVICTABLE; - - stash_init(&vm->free_pages); - - INIT_LIST_HEAD(&vm->bound_list); -} - -static int __setup_page_dma(struct i915_address_space *vm, - struct i915_page_dma *p, - gfp_t gfp) -{ - p->page = vm_alloc_page(vm, gfp | I915_GFP_ALLOW_FAIL); - if (unlikely(!p->page)) - return -ENOMEM; - - p->daddr = dma_map_page_attrs(vm->dma, - p->page, 0, PAGE_SIZE, - PCI_DMA_BIDIRECTIONAL, - DMA_ATTR_SKIP_CPU_SYNC | - DMA_ATTR_NO_WARN); - if (unlikely(dma_mapping_error(vm->dma, p->daddr))) { - vm_free_page(vm, p->page); - return -ENOMEM; - } - - return 0; -} - -static int setup_page_dma(struct i915_address_space *vm, - struct i915_page_dma *p) -{ - return __setup_page_dma(vm, p, __GFP_HIGHMEM); -} - -static void cleanup_page_dma(struct i915_address_space *vm, - struct i915_page_dma *p) -{ - dma_unmap_page(vm->dma, p->daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); - vm_free_page(vm, p->page); -} - -#define kmap_atomic_px(px) kmap_atomic(px_base(px)->page) - -static void -fill_page_dma(const struct i915_page_dma *p, const u64 val, unsigned int count) -{ - kunmap_atomic(memset64(kmap_atomic(p->page), val, count)); -} - -#define fill_px(px, v) fill_page_dma(px_base(px), (v), PAGE_SIZE / sizeof(u64)) -#define fill32_px(px, v) do { \ - u64 v__ = lower_32_bits(v); \ - fill_px((px), v__ << 32 | v__); \ -} while (0) - -static int -setup_scratch_page(struct i915_address_space *vm, gfp_t gfp) -{ - unsigned long size; - - /* - * In order to utilize 64K pages for an object with a size < 2M, we will - * need to support a 64K scratch page, given that every 16th entry for a - * page-table operating in 64K mode must point to a properly aligned 64K - * region, including any PTEs which happen to point to scratch. - * - * This is only relevant for the 48b PPGTT where we support - * huge-gtt-pages, see also i915_vma_insert(). However, as we share the - * scratch (read-only) between all vm, we create one 64k scratch page - * for all. - */ - size = I915_GTT_PAGE_SIZE_4K; - if (i915_vm_is_4lvl(vm) && - HAS_PAGE_SIZES(vm->i915, I915_GTT_PAGE_SIZE_64K)) { - size = I915_GTT_PAGE_SIZE_64K; - gfp |= __GFP_NOWARN; - } - gfp |= __GFP_ZERO | __GFP_RETRY_MAYFAIL; - - do { - unsigned int order = get_order(size); - struct page *page; - dma_addr_t addr; - - page = alloc_pages(gfp, order); - if (unlikely(!page)) - goto skip; - - addr = dma_map_page_attrs(vm->dma, - page, 0, size, - PCI_DMA_BIDIRECTIONAL, - DMA_ATTR_SKIP_CPU_SYNC | - DMA_ATTR_NO_WARN); - if (unlikely(dma_mapping_error(vm->dma, addr))) - goto free_page; - - if (unlikely(!IS_ALIGNED(addr, size))) - goto unmap_page; - - vm->scratch[0].base.page = page; - vm->scratch[0].base.daddr = addr; - vm->scratch_order = order; - return 0; - -unmap_page: - dma_unmap_page(vm->dma, addr, size, PCI_DMA_BIDIRECTIONAL); -free_page: - __free_pages(page, order); -skip: - if (size == I915_GTT_PAGE_SIZE_4K) - return -ENOMEM; - - size = I915_GTT_PAGE_SIZE_4K; - gfp &= ~__GFP_NOWARN; - } while (1); -} - -static void cleanup_scratch_page(struct i915_address_space *vm) -{ - struct i915_page_dma *p = px_base(&vm->scratch[0]); - unsigned int order = vm->scratch_order; - - dma_unmap_page(vm->dma, p->daddr, BIT(order) << PAGE_SHIFT, - PCI_DMA_BIDIRECTIONAL); - __free_pages(p->page, order); -} - -static void free_scratch(struct i915_address_space *vm) -{ - int i; - - if (!px_dma(&vm->scratch[0])) /* set to 0 on clones */ - return; - - for (i = 1; i <= vm->top; i++) { - if (!px_dma(&vm->scratch[i])) - break; - cleanup_page_dma(vm, px_base(&vm->scratch[i])); - } - - cleanup_scratch_page(vm); -} - -static struct i915_page_table *alloc_pt(struct i915_address_space *vm) -{ - struct i915_page_table *pt; - - pt = kmalloc(sizeof(*pt), I915_GFP_ALLOW_FAIL); - if (unlikely(!pt)) - return ERR_PTR(-ENOMEM); - - if (unlikely(setup_page_dma(vm, &pt->base))) { - kfree(pt); - return ERR_PTR(-ENOMEM); - } - - atomic_set(&pt->used, 0); - return pt; -} - -static struct i915_page_directory *__alloc_pd(size_t sz) -{ - struct i915_page_directory *pd; - - pd = kzalloc(sz, I915_GFP_ALLOW_FAIL); - if (unlikely(!pd)) - return NULL; - - spin_lock_init(&pd->lock); - return pd; -} - -static struct i915_page_directory *alloc_pd(struct i915_address_space *vm) -{ - struct i915_page_directory *pd; - - pd = __alloc_pd(sizeof(*pd)); - if (unlikely(!pd)) - return ERR_PTR(-ENOMEM); - - if (unlikely(setup_page_dma(vm, px_base(pd)))) { - kfree(pd); - return ERR_PTR(-ENOMEM); - } - - return pd; -} - -static void free_pd(struct i915_address_space *vm, struct i915_page_dma *pd) -{ - cleanup_page_dma(vm, pd); - kfree(pd); -} - -#define free_px(vm, px) free_pd(vm, px_base(px)) - -static inline void -write_dma_entry(struct i915_page_dma * const pdma, - const unsigned short idx, - const u64 encoded_entry) -{ - u64 * const vaddr = kmap_atomic(pdma->page); - - vaddr[idx] = encoded_entry; - kunmap_atomic(vaddr); -} - -static inline void -__set_pd_entry(struct i915_page_directory * const pd, - const unsigned short idx, - struct i915_page_dma * const to, - u64 (*encode)(const dma_addr_t, const enum i915_cache_level)) -{ - /* Each thread pre-pins the pd, and we may have a thread per pde. */ - GEM_BUG_ON(atomic_read(px_used(pd)) > NALLOC * ARRAY_SIZE(pd->entry)); - - atomic_inc(px_used(pd)); - pd->entry[idx] = to; - write_dma_entry(px_base(pd), idx, encode(to->daddr, I915_CACHE_LLC)); -} - -#define set_pd_entry(pd, idx, to) \ - __set_pd_entry((pd), (idx), px_base(to), gen8_pde_encode) - -static inline void -clear_pd_entry(struct i915_page_directory * const pd, - const unsigned short idx, - const struct i915_page_scratch * const scratch) -{ - GEM_BUG_ON(atomic_read(px_used(pd)) == 0); - - write_dma_entry(px_base(pd), idx, scratch->encode); - pd->entry[idx] = NULL; - atomic_dec(px_used(pd)); -} - -static bool -release_pd_entry(struct i915_page_directory * const pd, - const unsigned short idx, - struct i915_page_table * const pt, - const struct i915_page_scratch * const scratch) -{ - bool free = false; - - if (atomic_add_unless(&pt->used, -1, 1)) - return false; - - spin_lock(&pd->lock); - if (atomic_dec_and_test(&pt->used)) { - clear_pd_entry(pd, idx, scratch); - free = true; - } - spin_unlock(&pd->lock); - - return free; -} - -static void gen8_ppgtt_notify_vgt(struct i915_ppgtt *ppgtt, bool create) -{ - struct drm_i915_private *dev_priv = ppgtt->vm.i915; - enum vgt_g2v_type msg; - int i; - - if (create) - atomic_inc(px_used(ppgtt->pd)); /* never remove */ - else - atomic_dec(px_used(ppgtt->pd)); - - mutex_lock(&dev_priv->vgpu.lock); - - if (i915_vm_is_4lvl(&ppgtt->vm)) { - const u64 daddr = px_dma(ppgtt->pd); - - I915_WRITE(vgtif_reg(pdp[0].lo), lower_32_bits(daddr)); - I915_WRITE(vgtif_reg(pdp[0].hi), upper_32_bits(daddr)); - - msg = (create ? VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE : - VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY); - } else { - for (i = 0; i < GEN8_3LVL_PDPES; i++) { - const u64 daddr = i915_page_dir_dma_addr(ppgtt, i); - - I915_WRITE(vgtif_reg(pdp[i].lo), lower_32_bits(daddr)); - I915_WRITE(vgtif_reg(pdp[i].hi), upper_32_bits(daddr)); - } - - msg = (create ? VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE : - VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY); - } - - /* g2v_notify atomically (via hv trap) consumes the message packet. */ - I915_WRITE(vgtif_reg(g2v_notify), msg); - - mutex_unlock(&dev_priv->vgpu.lock); -} - -/* Index shifts into the pagetable are offset by GEN8_PTE_SHIFT [12] */ -#define GEN8_PAGE_SIZE (SZ_4K) /* page and page-directory sizes are the same */ -#define GEN8_PTE_SHIFT (ilog2(GEN8_PAGE_SIZE)) -#define GEN8_PDES (GEN8_PAGE_SIZE / sizeof(u64)) -#define gen8_pd_shift(lvl) ((lvl) * ilog2(GEN8_PDES)) -#define gen8_pd_index(i, lvl) i915_pde_index((i), gen8_pd_shift(lvl)) -#define __gen8_pte_shift(lvl) (GEN8_PTE_SHIFT + gen8_pd_shift(lvl)) -#define __gen8_pte_index(a, lvl) i915_pde_index((a), __gen8_pte_shift(lvl)) - -static inline unsigned int -gen8_pd_range(u64 start, u64 end, int lvl, unsigned int *idx) -{ - const int shift = gen8_pd_shift(lvl); - const u64 mask = ~0ull << gen8_pd_shift(lvl + 1); - - GEM_BUG_ON(start >= end); - end += ~mask >> gen8_pd_shift(1); - - *idx = i915_pde_index(start, shift); - if ((start ^ end) & mask) - return GEN8_PDES - *idx; - else - return i915_pde_index(end, shift) - *idx; -} - -static inline bool gen8_pd_contains(u64 start, u64 end, int lvl) -{ - const u64 mask = ~0ull << gen8_pd_shift(lvl + 1); - - GEM_BUG_ON(start >= end); - return (start ^ end) & mask && (start & ~mask) == 0; -} - -static inline unsigned int gen8_pt_count(u64 start, u64 end) -{ - GEM_BUG_ON(start >= end); - if ((start ^ end) >> gen8_pd_shift(1)) - return GEN8_PDES - (start & (GEN8_PDES - 1)); - else - return end - start; -} - -static inline unsigned int gen8_pd_top_count(const struct i915_address_space *vm) -{ - unsigned int shift = __gen8_pte_shift(vm->top); - return (vm->total + (1ull << shift) - 1) >> shift; -} - -static inline struct i915_page_directory * -gen8_pdp_for_page_index(struct i915_address_space * const vm, const u64 idx) -{ - struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm); - - if (vm->top == 2) - return ppgtt->pd; - else - return i915_pd_entry(ppgtt->pd, gen8_pd_index(idx, vm->top)); -} - -static inline struct i915_page_directory * -gen8_pdp_for_page_address(struct i915_address_space * const vm, const u64 addr) -{ - return gen8_pdp_for_page_index(vm, addr >> GEN8_PTE_SHIFT); -} - -static void __gen8_ppgtt_cleanup(struct i915_address_space *vm, - struct i915_page_directory *pd, - int count, int lvl) -{ - if (lvl) { - void **pde = pd->entry; - - do { - if (!*pde) - continue; - - __gen8_ppgtt_cleanup(vm, *pde, GEN8_PDES, lvl - 1); - } while (pde++, --count); - } - - free_px(vm, pd); -} - -static void gen8_ppgtt_cleanup(struct i915_address_space *vm) -{ - struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); - - if (intel_vgpu_active(vm->i915)) - gen8_ppgtt_notify_vgt(ppgtt, false); - - __gen8_ppgtt_cleanup(vm, ppgtt->pd, gen8_pd_top_count(vm), vm->top); - free_scratch(vm); -} - -static u64 __gen8_ppgtt_clear(struct i915_address_space * const vm, - struct i915_page_directory * const pd, - u64 start, const u64 end, int lvl) -{ - const struct i915_page_scratch * const scratch = &vm->scratch[lvl]; - unsigned int idx, len; - - GEM_BUG_ON(end > vm->total >> GEN8_PTE_SHIFT); - - len = gen8_pd_range(start, end, lvl--, &idx); - DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d }\n", - __func__, vm, lvl + 1, start, end, - idx, len, atomic_read(px_used(pd))); - GEM_BUG_ON(!len || len >= atomic_read(px_used(pd))); - - do { - struct i915_page_table *pt = pd->entry[idx]; - - if (atomic_fetch_inc(&pt->used) >> gen8_pd_shift(1) && - gen8_pd_contains(start, end, lvl)) { - DBG("%s(%p):{ lvl:%d, idx:%d, start:%llx, end:%llx } removing pd\n", - __func__, vm, lvl + 1, idx, start, end); - clear_pd_entry(pd, idx, scratch); - __gen8_ppgtt_cleanup(vm, as_pd(pt), I915_PDES, lvl); - start += (u64)I915_PDES << gen8_pd_shift(lvl); - continue; - } - - if (lvl) { - start = __gen8_ppgtt_clear(vm, as_pd(pt), - start, end, lvl); - } else { - unsigned int count; - u64 *vaddr; - - count = gen8_pt_count(start, end); - DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d } removing pte\n", - __func__, vm, lvl, start, end, - gen8_pd_index(start, 0), count, - atomic_read(&pt->used)); - GEM_BUG_ON(!count || count >= atomic_read(&pt->used)); - - vaddr = kmap_atomic_px(pt); - memset64(vaddr + gen8_pd_index(start, 0), - vm->scratch[0].encode, - count); - kunmap_atomic(vaddr); - - atomic_sub(count, &pt->used); - start += count; - } - - if (release_pd_entry(pd, idx, pt, scratch)) - free_px(vm, pt); - } while (idx++, --len); - - return start; -} - -static void gen8_ppgtt_clear(struct i915_address_space *vm, - u64 start, u64 length) -{ - GEM_BUG_ON(!IS_ALIGNED(start, BIT_ULL(GEN8_PTE_SHIFT))); - GEM_BUG_ON(!IS_ALIGNED(length, BIT_ULL(GEN8_PTE_SHIFT))); - GEM_BUG_ON(range_overflows(start, length, vm->total)); - - start >>= GEN8_PTE_SHIFT; - length >>= GEN8_PTE_SHIFT; - GEM_BUG_ON(length == 0); - - __gen8_ppgtt_clear(vm, i915_vm_to_ppgtt(vm)->pd, - start, start + length, vm->top); -} - -static int __gen8_ppgtt_alloc(struct i915_address_space * const vm, - struct i915_page_directory * const pd, - u64 * const start, const u64 end, int lvl) -{ - const struct i915_page_scratch * const scratch = &vm->scratch[lvl]; - struct i915_page_table *alloc = NULL; - unsigned int idx, len; - int ret = 0; - - GEM_BUG_ON(end > vm->total >> GEN8_PTE_SHIFT); - - len = gen8_pd_range(*start, end, lvl--, &idx); - DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d }\n", - __func__, vm, lvl + 1, *start, end, - idx, len, atomic_read(px_used(pd))); - GEM_BUG_ON(!len || (idx + len - 1) >> gen8_pd_shift(1)); - - spin_lock(&pd->lock); - GEM_BUG_ON(!atomic_read(px_used(pd))); /* Must be pinned! */ - do { - struct i915_page_table *pt = pd->entry[idx]; - - if (!pt) { - spin_unlock(&pd->lock); - - DBG("%s(%p):{ lvl:%d, idx:%d } allocating new tree\n", - __func__, vm, lvl + 1, idx); - - pt = fetch_and_zero(&alloc); - if (lvl) { - if (!pt) { - pt = &alloc_pd(vm)->pt; - if (IS_ERR(pt)) { - ret = PTR_ERR(pt); - goto out; - } - } - - fill_px(pt, vm->scratch[lvl].encode); - } else { - if (!pt) { - pt = alloc_pt(vm); - if (IS_ERR(pt)) { - ret = PTR_ERR(pt); - goto out; - } - } - - if (intel_vgpu_active(vm->i915) || - gen8_pt_count(*start, end) < I915_PDES) - fill_px(pt, vm->scratch[lvl].encode); - } - - spin_lock(&pd->lock); - if (likely(!pd->entry[idx])) - set_pd_entry(pd, idx, pt); - else - alloc = pt, pt = pd->entry[idx]; - } - - if (lvl) { - atomic_inc(&pt->used); - spin_unlock(&pd->lock); - - ret = __gen8_ppgtt_alloc(vm, as_pd(pt), - start, end, lvl); - if (unlikely(ret)) { - if (release_pd_entry(pd, idx, pt, scratch)) - free_px(vm, pt); - goto out; - } - - spin_lock(&pd->lock); - atomic_dec(&pt->used); - GEM_BUG_ON(!atomic_read(&pt->used)); - } else { - unsigned int count = gen8_pt_count(*start, end); - - DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d } inserting pte\n", - __func__, vm, lvl, *start, end, - gen8_pd_index(*start, 0), count, - atomic_read(&pt->used)); - - atomic_add(count, &pt->used); - /* All other pdes may be simultaneously removed */ - GEM_BUG_ON(atomic_read(&pt->used) > NALLOC * I915_PDES); - *start += count; - } - } while (idx++, --len); - spin_unlock(&pd->lock); -out: - if (alloc) - free_px(vm, alloc); - return ret; -} - -static int gen8_ppgtt_alloc(struct i915_address_space *vm, - u64 start, u64 length) -{ - u64 from; - int err; - - GEM_BUG_ON(!IS_ALIGNED(start, BIT_ULL(GEN8_PTE_SHIFT))); - GEM_BUG_ON(!IS_ALIGNED(length, BIT_ULL(GEN8_PTE_SHIFT))); - GEM_BUG_ON(range_overflows(start, length, vm->total)); - - start >>= GEN8_PTE_SHIFT; - length >>= GEN8_PTE_SHIFT; - GEM_BUG_ON(length == 0); - from = start; - - err = __gen8_ppgtt_alloc(vm, i915_vm_to_ppgtt(vm)->pd, - &start, start + length, vm->top); - if (unlikely(err && from != start)) - __gen8_ppgtt_clear(vm, i915_vm_to_ppgtt(vm)->pd, - from, start, vm->top); - - return err; -} - -static inline struct sgt_dma { - struct scatterlist *sg; - dma_addr_t dma, max; -} sgt_dma(struct i915_vma *vma) { - struct scatterlist *sg = vma->pages->sgl; - dma_addr_t addr = sg_dma_address(sg); - return (struct sgt_dma) { sg, addr, addr + sg->length }; -} - -static __always_inline u64 -gen8_ppgtt_insert_pte(struct i915_ppgtt *ppgtt, - struct i915_page_directory *pdp, - struct sgt_dma *iter, - u64 idx, - enum i915_cache_level cache_level, - u32 flags) -{ - struct i915_page_directory *pd; - const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags); - gen8_pte_t *vaddr; - - pd = i915_pd_entry(pdp, gen8_pd_index(idx, 2)); - vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1))); - do { - vaddr[gen8_pd_index(idx, 0)] = pte_encode | iter->dma; - - iter->dma += I915_GTT_PAGE_SIZE; - if (iter->dma >= iter->max) { - iter->sg = __sg_next(iter->sg); - if (!iter->sg) { - idx = 0; - break; - } - - iter->dma = sg_dma_address(iter->sg); - iter->max = iter->dma + iter->sg->length; - } - - if (gen8_pd_index(++idx, 0) == 0) { - if (gen8_pd_index(idx, 1) == 0) { - /* Limited by sg length for 3lvl */ - if (gen8_pd_index(idx, 2) == 0) - break; - - pd = pdp->entry[gen8_pd_index(idx, 2)]; - } - - kunmap_atomic(vaddr); - vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1))); - } - } while (1); - kunmap_atomic(vaddr); - - return idx; -} - -static void gen8_ppgtt_insert_huge(struct i915_vma *vma, - struct sgt_dma *iter, - enum i915_cache_level cache_level, - u32 flags) -{ - const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags); - u64 start = vma->node.start; - dma_addr_t rem = iter->sg->length; - - GEM_BUG_ON(!i915_vm_is_4lvl(vma->vm)); - - do { - struct i915_page_directory * const pdp = - gen8_pdp_for_page_address(vma->vm, start); - struct i915_page_directory * const pd = - i915_pd_entry(pdp, __gen8_pte_index(start, 2)); - gen8_pte_t encode = pte_encode; - unsigned int maybe_64K = -1; - unsigned int page_size; - gen8_pte_t *vaddr; - u16 index; - - if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_2M && - IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_2M) && - rem >= I915_GTT_PAGE_SIZE_2M && - !__gen8_pte_index(start, 0)) { - index = __gen8_pte_index(start, 1); - encode |= GEN8_PDE_PS_2M; - page_size = I915_GTT_PAGE_SIZE_2M; - - vaddr = kmap_atomic_px(pd); - } else { - struct i915_page_table *pt = - i915_pt_entry(pd, __gen8_pte_index(start, 1)); - - index = __gen8_pte_index(start, 0); - page_size = I915_GTT_PAGE_SIZE; - - if (!index && - vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K && - IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) && - (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) || - rem >= (I915_PDES - index) * I915_GTT_PAGE_SIZE)) - maybe_64K = __gen8_pte_index(start, 1); - - vaddr = kmap_atomic_px(pt); - } - - do { - GEM_BUG_ON(iter->sg->length < page_size); - vaddr[index++] = encode | iter->dma; - - start += page_size; - iter->dma += page_size; - rem -= page_size; - if (iter->dma >= iter->max) { - iter->sg = __sg_next(iter->sg); - if (!iter->sg) - break; - - rem = iter->sg->length; - iter->dma = sg_dma_address(iter->sg); - iter->max = iter->dma + rem; - - if (maybe_64K != -1 && index < I915_PDES && - !(IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) && - (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) || - rem >= (I915_PDES - index) * I915_GTT_PAGE_SIZE))) - maybe_64K = -1; - - if (unlikely(!IS_ALIGNED(iter->dma, page_size))) - break; - } - } while (rem >= page_size && index < I915_PDES); - - kunmap_atomic(vaddr); - - /* - * Is it safe to mark the 2M block as 64K? -- Either we have - * filled whole page-table with 64K entries, or filled part of - * it and have reached the end of the sg table and we have - * enough padding. - */ - if (maybe_64K != -1 && - (index == I915_PDES || - (i915_vm_has_scratch_64K(vma->vm) && - !iter->sg && IS_ALIGNED(vma->node.start + - vma->node.size, - I915_GTT_PAGE_SIZE_2M)))) { - vaddr = kmap_atomic_px(pd); - vaddr[maybe_64K] |= GEN8_PDE_IPS_64K; - kunmap_atomic(vaddr); - page_size = I915_GTT_PAGE_SIZE_64K; - - /* - * We write all 4K page entries, even when using 64K - * pages. In order to verify that the HW isn't cheating - * by using the 4K PTE instead of the 64K PTE, we want - * to remove all the surplus entries. If the HW skipped - * the 64K PTE, it will read/write into the scratch page - * instead - which we detect as missing results during - * selftests. - */ - if (I915_SELFTEST_ONLY(vma->vm->scrub_64K)) { - u16 i; - - encode = vma->vm->scratch[0].encode; - vaddr = kmap_atomic_px(i915_pt_entry(pd, maybe_64K)); - - for (i = 1; i < index; i += 16) - memset64(vaddr + i, encode, 15); - - kunmap_atomic(vaddr); - } - } - - vma->page_sizes.gtt |= page_size; - } while (iter->sg); -} - -static void gen8_ppgtt_insert(struct i915_address_space *vm, - struct i915_vma *vma, - enum i915_cache_level cache_level, - u32 flags) -{ - struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm); - struct sgt_dma iter = sgt_dma(vma); - - if (vma->page_sizes.sg > I915_GTT_PAGE_SIZE) { - gen8_ppgtt_insert_huge(vma, &iter, cache_level, flags); - } else { - u64 idx = vma->node.start >> GEN8_PTE_SHIFT; - - do { - struct i915_page_directory * const pdp = - gen8_pdp_for_page_index(vm, idx); - - idx = gen8_ppgtt_insert_pte(ppgtt, pdp, &iter, idx, - cache_level, flags); - } while (idx); - - vma->page_sizes.gtt = I915_GTT_PAGE_SIZE; - } -} - -static int gen8_init_scratch(struct i915_address_space *vm) -{ - int ret; - int i; - - /* - * If everybody agrees to not to write into the scratch page, - * we can reuse it for all vm, keeping contexts and processes separate. - */ - if (vm->has_read_only && vm->gt->vm && !i915_is_ggtt(vm->gt->vm)) { - struct i915_address_space *clone = vm->gt->vm; - - GEM_BUG_ON(!clone->has_read_only); - - vm->scratch_order = clone->scratch_order; - memcpy(vm->scratch, clone->scratch, sizeof(vm->scratch)); - px_dma(&vm->scratch[0]) = 0; /* no xfer of ownership */ - return 0; - } - - ret = setup_scratch_page(vm, __GFP_HIGHMEM); - if (ret) - return ret; - - vm->scratch[0].encode = - gen8_pte_encode(px_dma(&vm->scratch[0]), - I915_CACHE_LLC, vm->has_read_only); - - for (i = 1; i <= vm->top; i++) { - if (unlikely(setup_page_dma(vm, px_base(&vm->scratch[i])))) - goto free_scratch; - - fill_px(&vm->scratch[i], vm->scratch[i - 1].encode); - vm->scratch[i].encode = - gen8_pde_encode(px_dma(&vm->scratch[i]), - I915_CACHE_LLC); - } - - return 0; - -free_scratch: - free_scratch(vm); - return -ENOMEM; -} - -static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt) -{ - struct i915_address_space *vm = &ppgtt->vm; - struct i915_page_directory *pd = ppgtt->pd; - unsigned int idx; - - GEM_BUG_ON(vm->top != 2); - GEM_BUG_ON(gen8_pd_top_count(vm) != GEN8_3LVL_PDPES); - - for (idx = 0; idx < GEN8_3LVL_PDPES; idx++) { - struct i915_page_directory *pde; - - pde = alloc_pd(vm); - if (IS_ERR(pde)) - return PTR_ERR(pde); - - fill_px(pde, vm->scratch[1].encode); - set_pd_entry(pd, idx, pde); - atomic_inc(px_used(pde)); /* keep pinned */ - } - wmb(); - - return 0; -} - -static void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt) -{ - struct drm_i915_private *i915 = gt->i915; - - ppgtt->vm.gt = gt; - ppgtt->vm.i915 = i915; - ppgtt->vm.dma = &i915->drm.pdev->dev; - ppgtt->vm.total = BIT_ULL(INTEL_INFO(i915)->ppgtt_size); - - i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT); - - ppgtt->vm.vma_ops.bind_vma = ppgtt_bind_vma; - ppgtt->vm.vma_ops.unbind_vma = ppgtt_unbind_vma; - ppgtt->vm.vma_ops.set_pages = ppgtt_set_pages; - ppgtt->vm.vma_ops.clear_pages = clear_pages; -} - -static struct i915_page_directory * -gen8_alloc_top_pd(struct i915_address_space *vm) -{ - const unsigned int count = gen8_pd_top_count(vm); - struct i915_page_directory *pd; - - GEM_BUG_ON(count > ARRAY_SIZE(pd->entry)); - - pd = __alloc_pd(offsetof(typeof(*pd), entry[count])); - if (unlikely(!pd)) - return ERR_PTR(-ENOMEM); - - if (unlikely(setup_page_dma(vm, px_base(pd)))) { - kfree(pd); - return ERR_PTR(-ENOMEM); - } - - fill_page_dma(px_base(pd), vm->scratch[vm->top].encode, count); - atomic_inc(px_used(pd)); /* mark as pinned */ - return pd; -} - -/* - * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers - * with a net effect resembling a 2-level page table in normal x86 terms. Each - * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address - * space. - * - */ -static struct i915_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915) -{ - struct i915_ppgtt *ppgtt; - int err; - - ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL); - if (!ppgtt) - return ERR_PTR(-ENOMEM); - - ppgtt_init(ppgtt, &i915->gt); - ppgtt->vm.top = i915_vm_is_4lvl(&ppgtt->vm) ? 3 : 2; - - /* - * From bdw, there is hw support for read-only pages in the PPGTT. - * - * Gen11 has HSDES#:1807136187 unresolved. Disable ro support - * for now. - * - * Gen12 has inherited the same read-only fault issue from gen11. - */ - ppgtt->vm.has_read_only = !IS_GEN_RANGE(i915, 11, 12); - - /* There are only few exceptions for gen >=6. chv and bxt. - * And we are not sure about the latter so play safe for now. - */ - if (IS_CHERRYVIEW(i915) || IS_BROXTON(i915)) - ppgtt->vm.pt_kmap_wc = true; - - err = gen8_init_scratch(&ppgtt->vm); - if (err) - goto err_free; - - ppgtt->pd = gen8_alloc_top_pd(&ppgtt->vm); - if (IS_ERR(ppgtt->pd)) { - err = PTR_ERR(ppgtt->pd); - goto err_free_scratch; - } - - if (!i915_vm_is_4lvl(&ppgtt->vm)) { - err = gen8_preallocate_top_level_pdp(ppgtt); - if (err) - goto err_free_pd; - } - - ppgtt->vm.bind_async_flags = I915_VMA_LOCAL_BIND; - ppgtt->vm.insert_entries = gen8_ppgtt_insert; - ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc; - ppgtt->vm.clear_range = gen8_ppgtt_clear; - - if (intel_vgpu_active(i915)) - gen8_ppgtt_notify_vgt(ppgtt, true); - - ppgtt->vm.cleanup = gen8_ppgtt_cleanup; - - return ppgtt; - -err_free_pd: - __gen8_ppgtt_cleanup(&ppgtt->vm, ppgtt->pd, - gen8_pd_top_count(&ppgtt->vm), ppgtt->vm.top); -err_free_scratch: - free_scratch(&ppgtt->vm); -err_free: - kfree(ppgtt); - return ERR_PTR(err); -} - -/* Write pde (index) from the page directory @pd to the page table @pt */ -static inline void gen6_write_pde(const struct gen6_ppgtt *ppgtt, - const unsigned int pde, - const struct i915_page_table *pt) -{ - /* Caller needs to make sure the write completes if necessary */ - iowrite32(GEN6_PDE_ADDR_ENCODE(px_dma(pt)) | GEN6_PDE_VALID, - ppgtt->pd_addr + pde); -} - -static void gen7_ppgtt_enable(struct intel_gt *gt) -{ - struct drm_i915_private *i915 = gt->i915; - struct intel_uncore *uncore = gt->uncore; - struct intel_engine_cs *engine; - enum intel_engine_id id; - u32 ecochk; - - intel_uncore_rmw(uncore, GAC_ECO_BITS, 0, ECOBITS_PPGTT_CACHE64B); - - ecochk = intel_uncore_read(uncore, GAM_ECOCHK); - if (IS_HASWELL(i915)) { - ecochk |= ECOCHK_PPGTT_WB_HSW; - } else { - ecochk |= ECOCHK_PPGTT_LLC_IVB; - ecochk &= ~ECOCHK_PPGTT_GFDT_IVB; - } - intel_uncore_write(uncore, GAM_ECOCHK, ecochk); - - for_each_engine(engine, gt, id) { - /* GFX_MODE is per-ring on gen7+ */ - ENGINE_WRITE(engine, - RING_MODE_GEN7, - _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); - } -} - -static void gen6_ppgtt_enable(struct intel_gt *gt) -{ - struct intel_uncore *uncore = gt->uncore; - - intel_uncore_rmw(uncore, - GAC_ECO_BITS, - 0, - ECOBITS_SNB_BIT | ECOBITS_PPGTT_CACHE64B); - - intel_uncore_rmw(uncore, - GAB_CTL, - 0, - GAB_CTL_CONT_AFTER_PAGEFAULT); - - intel_uncore_rmw(uncore, - GAM_ECOCHK, - 0, - ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B); - - if (HAS_PPGTT(uncore->i915)) /* may be disabled for VT-d */ - intel_uncore_write(uncore, - GFX_MODE, - _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); -} - -/* PPGTT support for Sandybdrige/Gen6 and later */ -static void gen6_ppgtt_clear_range(struct i915_address_space *vm, - u64 start, u64 length) -{ - struct gen6_ppgtt * const ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm)); - const unsigned int first_entry = start / I915_GTT_PAGE_SIZE; - const gen6_pte_t scratch_pte = vm->scratch[0].encode; - unsigned int pde = first_entry / GEN6_PTES; - unsigned int pte = first_entry % GEN6_PTES; - unsigned int num_entries = length / I915_GTT_PAGE_SIZE; - - while (num_entries) { - struct i915_page_table * const pt = - i915_pt_entry(ppgtt->base.pd, pde++); - const unsigned int count = min(num_entries, GEN6_PTES - pte); - gen6_pte_t *vaddr; - - GEM_BUG_ON(px_base(pt) == px_base(&vm->scratch[1])); - - num_entries -= count; - - GEM_BUG_ON(count > atomic_read(&pt->used)); - if (!atomic_sub_return(count, &pt->used)) - ppgtt->scan_for_unused_pt = true; - - /* - * Note that the hw doesn't support removing PDE on the fly - * (they are cached inside the context with no means to - * invalidate the cache), so we can only reset the PTE - * entries back to scratch. - */ - - vaddr = kmap_atomic_px(pt); - memset32(vaddr + pte, scratch_pte, count); - kunmap_atomic(vaddr); - - pte = 0; - } -} - -static void gen6_ppgtt_insert_entries(struct i915_address_space *vm, - struct i915_vma *vma, - enum i915_cache_level cache_level, - u32 flags) -{ - struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); - struct i915_page_directory * const pd = ppgtt->pd; - unsigned first_entry = vma->node.start / I915_GTT_PAGE_SIZE; - unsigned act_pt = first_entry / GEN6_PTES; - unsigned act_pte = first_entry % GEN6_PTES; - const u32 pte_encode = vm->pte_encode(0, cache_level, flags); - struct sgt_dma iter = sgt_dma(vma); - gen6_pte_t *vaddr; - - GEM_BUG_ON(pd->entry[act_pt] == &vm->scratch[1]); - - vaddr = kmap_atomic_px(i915_pt_entry(pd, act_pt)); - do { - vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma); - - iter.dma += I915_GTT_PAGE_SIZE; - if (iter.dma == iter.max) { - iter.sg = __sg_next(iter.sg); - if (!iter.sg) - break; - - iter.dma = sg_dma_address(iter.sg); - iter.max = iter.dma + iter.sg->length; - } - - if (++act_pte == GEN6_PTES) { - kunmap_atomic(vaddr); - vaddr = kmap_atomic_px(i915_pt_entry(pd, ++act_pt)); - act_pte = 0; - } - } while (1); - kunmap_atomic(vaddr); - - vma->page_sizes.gtt = I915_GTT_PAGE_SIZE; -} - -static void gen6_flush_pd(struct gen6_ppgtt *ppgtt, u64 start, u64 end) -{ - struct i915_page_directory * const pd = ppgtt->base.pd; - struct i915_page_table *pt; - unsigned int pde; - - start = round_down(start, SZ_64K); - end = round_up(end, SZ_64K) - start; - - mutex_lock(&ppgtt->flush); - - gen6_for_each_pde(pt, pd, start, end, pde) - gen6_write_pde(ppgtt, pde, pt); - - mb(); - ioread32(ppgtt->pd_addr + pde - 1); - gen6_ggtt_invalidate(ppgtt->base.vm.gt->ggtt); - mb(); - - mutex_unlock(&ppgtt->flush); -} - -static int gen6_alloc_va_range(struct i915_address_space *vm, - u64 start, u64 length) -{ - struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm)); - struct i915_page_directory * const pd = ppgtt->base.pd; - struct i915_page_table *pt, *alloc = NULL; - intel_wakeref_t wakeref; - u64 from = start; - unsigned int pde; - int ret = 0; - - wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm); - - spin_lock(&pd->lock); - gen6_for_each_pde(pt, pd, start, length, pde) { - const unsigned int count = gen6_pte_count(start, length); - - if (px_base(pt) == px_base(&vm->scratch[1])) { - spin_unlock(&pd->lock); - - pt = fetch_and_zero(&alloc); - if (!pt) - pt = alloc_pt(vm); - if (IS_ERR(pt)) { - ret = PTR_ERR(pt); - goto unwind_out; - } - - fill32_px(pt, vm->scratch[0].encode); - - spin_lock(&pd->lock); - if (pd->entry[pde] == &vm->scratch[1]) { - pd->entry[pde] = pt; - } else { - alloc = pt; - pt = pd->entry[pde]; - } - } - - atomic_add(count, &pt->used); - } - spin_unlock(&pd->lock); - - if (i915_vma_is_bound(ppgtt->vma, I915_VMA_GLOBAL_BIND)) - gen6_flush_pd(ppgtt, from, start); - - goto out; - -unwind_out: - gen6_ppgtt_clear_range(vm, from, start - from); -out: - if (alloc) - free_px(vm, alloc); - intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref); - return ret; -} - -static int gen6_ppgtt_init_scratch(struct gen6_ppgtt *ppgtt) -{ - struct i915_address_space * const vm = &ppgtt->base.vm; - struct i915_page_directory * const pd = ppgtt->base.pd; - int ret; - - ret = setup_scratch_page(vm, __GFP_HIGHMEM); - if (ret) - return ret; - - vm->scratch[0].encode = - vm->pte_encode(px_dma(&vm->scratch[0]), - I915_CACHE_NONE, PTE_READ_ONLY); - - if (unlikely(setup_page_dma(vm, px_base(&vm->scratch[1])))) { - cleanup_scratch_page(vm); - return -ENOMEM; - } - - fill32_px(&vm->scratch[1], vm->scratch[0].encode); - memset_p(pd->entry, &vm->scratch[1], I915_PDES); - - return 0; -} - -static void gen6_ppgtt_free_pd(struct gen6_ppgtt *ppgtt) -{ - struct i915_page_directory * const pd = ppgtt->base.pd; - struct i915_page_dma * const scratch = - px_base(&ppgtt->base.vm.scratch[1]); - struct i915_page_table *pt; - u32 pde; - - gen6_for_all_pdes(pt, pd, pde) - if (px_base(pt) != scratch) - free_px(&ppgtt->base.vm, pt); -} - -static void gen6_ppgtt_cleanup(struct i915_address_space *vm) -{ - struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm)); - - __i915_vma_put(ppgtt->vma); - - gen6_ppgtt_free_pd(ppgtt); - free_scratch(vm); - - mutex_destroy(&ppgtt->flush); - mutex_destroy(&ppgtt->pin_mutex); - kfree(ppgtt->base.pd); -} - -static int pd_vma_set_pages(struct i915_vma *vma) -{ - vma->pages = ERR_PTR(-ENODEV); - return 0; -} - -static void pd_vma_clear_pages(struct i915_vma *vma) -{ - GEM_BUG_ON(!vma->pages); - - vma->pages = NULL; -} - -static int pd_vma_bind(struct i915_vma *vma, - enum i915_cache_level cache_level, - u32 unused) -{ - struct i915_ggtt *ggtt = i915_vm_to_ggtt(vma->vm); - struct gen6_ppgtt *ppgtt = vma->private; - u32 ggtt_offset = i915_ggtt_offset(vma) / I915_GTT_PAGE_SIZE; - - px_base(ppgtt->base.pd)->ggtt_offset = ggtt_offset * sizeof(gen6_pte_t); - ppgtt->pd_addr = (gen6_pte_t __iomem *)ggtt->gsm + ggtt_offset; - - gen6_flush_pd(ppgtt, 0, ppgtt->base.vm.total); - return 0; -} - -static void pd_vma_unbind(struct i915_vma *vma) -{ - struct gen6_ppgtt *ppgtt = vma->private; - struct i915_page_directory * const pd = ppgtt->base.pd; - struct i915_page_dma * const scratch = - px_base(&ppgtt->base.vm.scratch[1]); - struct i915_page_table *pt; - unsigned int pde; - - if (!ppgtt->scan_for_unused_pt) - return; - - /* Free all no longer used page tables */ - gen6_for_all_pdes(pt, ppgtt->base.pd, pde) { - if (px_base(pt) == scratch || atomic_read(&pt->used)) - continue; - - free_px(&ppgtt->base.vm, pt); - pd->entry[pde] = scratch; - } - - ppgtt->scan_for_unused_pt = false; -} - -static const struct i915_vma_ops pd_vma_ops = { - .set_pages = pd_vma_set_pages, - .clear_pages = pd_vma_clear_pages, - .bind_vma = pd_vma_bind, - .unbind_vma = pd_vma_unbind, -}; - -static struct i915_vma *pd_vma_create(struct gen6_ppgtt *ppgtt, int size) -{ - struct i915_ggtt *ggtt = ppgtt->base.vm.gt->ggtt; - struct i915_vma *vma; - - GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)); - GEM_BUG_ON(size > ggtt->vm.total); - - vma = i915_vma_alloc(); - if (!vma) - return ERR_PTR(-ENOMEM); - - i915_active_init(&vma->active, NULL, NULL); - - kref_init(&vma->ref); - mutex_init(&vma->pages_mutex); - vma->vm = i915_vm_get(&ggtt->vm); - vma->ops = &pd_vma_ops; - vma->private = ppgtt; - - vma->size = size; - vma->fence_size = size; - atomic_set(&vma->flags, I915_VMA_GGTT); - vma->ggtt_view.type = I915_GGTT_VIEW_ROTATED; /* prevent fencing */ - - INIT_LIST_HEAD(&vma->obj_link); - INIT_LIST_HEAD(&vma->closed_link); - - return vma; -} - -int gen6_ppgtt_pin(struct i915_ppgtt *base) -{ - struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base); - int err = 0; - - GEM_BUG_ON(!atomic_read(&ppgtt->base.vm.open)); - - /* - * Workaround the limited maximum vma->pin_count and the aliasing_ppgtt - * which will be pinned into every active context. - * (When vma->pin_count becomes atomic, I expect we will naturally - * need a larger, unpacked, type and kill this redundancy.) - */ - if (atomic_add_unless(&ppgtt->pin_count, 1, 0)) - return 0; - - if (mutex_lock_interruptible(&ppgtt->pin_mutex)) - return -EINTR; - - /* - * PPGTT PDEs reside in the GGTT and consists of 512 entries. The - * allocator works in address space sizes, so it's multiplied by page - * size. We allocate at the top of the GTT to avoid fragmentation. - */ - if (!atomic_read(&ppgtt->pin_count)) { - err = i915_ggtt_pin(ppgtt->vma, GEN6_PD_ALIGN, PIN_HIGH); - } - if (!err) - atomic_inc(&ppgtt->pin_count); - mutex_unlock(&ppgtt->pin_mutex); - - return err; -} - -void gen6_ppgtt_unpin(struct i915_ppgtt *base) -{ - struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base); - - GEM_BUG_ON(!atomic_read(&ppgtt->pin_count)); - if (atomic_dec_and_test(&ppgtt->pin_count)) - i915_vma_unpin(ppgtt->vma); -} - -void gen6_ppgtt_unpin_all(struct i915_ppgtt *base) -{ - struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base); - - if (!atomic_read(&ppgtt->pin_count)) - return; - - i915_vma_unpin(ppgtt->vma); - atomic_set(&ppgtt->pin_count, 0); -} - -static struct i915_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915) -{ - struct i915_ggtt * const ggtt = &i915->ggtt; - struct gen6_ppgtt *ppgtt; - int err; - - ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL); - if (!ppgtt) - return ERR_PTR(-ENOMEM); - - mutex_init(&ppgtt->flush); - mutex_init(&ppgtt->pin_mutex); - - ppgtt_init(&ppgtt->base, &i915->gt); - ppgtt->base.vm.top = 1; - - ppgtt->base.vm.bind_async_flags = I915_VMA_LOCAL_BIND; - ppgtt->base.vm.allocate_va_range = gen6_alloc_va_range; - ppgtt->base.vm.clear_range = gen6_ppgtt_clear_range; - ppgtt->base.vm.insert_entries = gen6_ppgtt_insert_entries; - ppgtt->base.vm.cleanup = gen6_ppgtt_cleanup; - - ppgtt->base.vm.pte_encode = ggtt->vm.pte_encode; - - ppgtt->base.pd = __alloc_pd(sizeof(*ppgtt->base.pd)); - if (!ppgtt->base.pd) { - err = -ENOMEM; - goto err_free; - } - - err = gen6_ppgtt_init_scratch(ppgtt); - if (err) - goto err_pd; - - ppgtt->vma = pd_vma_create(ppgtt, GEN6_PD_SIZE); - if (IS_ERR(ppgtt->vma)) { - err = PTR_ERR(ppgtt->vma); - goto err_scratch; - } - - return &ppgtt->base; - -err_scratch: - free_scratch(&ppgtt->base.vm); -err_pd: - kfree(ppgtt->base.pd); -err_free: - mutex_destroy(&ppgtt->pin_mutex); - kfree(ppgtt); - return ERR_PTR(err); -} - -static void gtt_write_workarounds(struct intel_gt *gt) -{ - struct drm_i915_private *i915 = gt->i915; - struct intel_uncore *uncore = gt->uncore; - - /* This function is for gtt related workarounds. This function is - * called on driver load and after a GPU reset, so you can place - * workarounds here even if they get overwritten by GPU reset. - */ - /* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk,cfl,cnl,icl */ - if (IS_BROADWELL(i915)) - intel_uncore_write(uncore, - GEN8_L3_LRA_1_GPGPU, - GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW); - else if (IS_CHERRYVIEW(i915)) - intel_uncore_write(uncore, - GEN8_L3_LRA_1_GPGPU, - GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV); - else if (IS_GEN9_LP(i915)) - intel_uncore_write(uncore, - GEN8_L3_LRA_1_GPGPU, - GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT); - else if (INTEL_GEN(i915) >= 9 && INTEL_GEN(i915) <= 11) - intel_uncore_write(uncore, - GEN8_L3_LRA_1_GPGPU, - GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL); - - /* - * To support 64K PTEs we need to first enable the use of the - * Intermediate-Page-Size(IPS) bit of the PDE field via some magical - * mmio, otherwise the page-walker will simply ignore the IPS bit. This - * shouldn't be needed after GEN10. - * - * 64K pages were first introduced from BDW+, although technically they - * only *work* from gen9+. For pre-BDW we instead have the option for - * 32K pages, but we don't currently have any support for it in our - * driver. - */ - if (HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_64K) && - INTEL_GEN(i915) <= 10) - intel_uncore_rmw(uncore, - GEN8_GAMW_ECO_DEV_RW_IA, - 0, - GAMW_ECO_ENABLE_64K_IPS_FIELD); - - if (IS_GEN_RANGE(i915, 8, 11)) { - bool can_use_gtt_cache = true; - - /* - * According to the BSpec if we use 2M/1G pages then we also - * need to disable the GTT cache. At least on BDW we can see - * visual corruption when using 2M pages, and not disabling the - * GTT cache. - */ - if (HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_2M)) - can_use_gtt_cache = false; - - /* WaGttCachingOffByDefault */ - intel_uncore_write(uncore, - HSW_GTT_CACHE_EN, - can_use_gtt_cache ? GTT_CACHE_EN_ALL : 0); - WARN_ON_ONCE(can_use_gtt_cache && - intel_uncore_read(uncore, - HSW_GTT_CACHE_EN) == 0); - } -} - -int i915_ppgtt_init_hw(struct intel_gt *gt) -{ - struct drm_i915_private *i915 = gt->i915; - - gtt_write_workarounds(gt); - - if (IS_GEN(i915, 6)) - gen6_ppgtt_enable(gt); - else if (IS_GEN(i915, 7)) - gen7_ppgtt_enable(gt); - - return 0; -} - -static struct i915_ppgtt * -__ppgtt_create(struct drm_i915_private *i915) -{ - if (INTEL_GEN(i915) < 8) - return gen6_ppgtt_create(i915); - else - return gen8_ppgtt_create(i915); -} - -struct i915_ppgtt * -i915_ppgtt_create(struct drm_i915_private *i915) -{ - struct i915_ppgtt *ppgtt; - - ppgtt = __ppgtt_create(i915); - if (IS_ERR(ppgtt)) - return ppgtt; - - trace_i915_ppgtt_create(&ppgtt->vm); - - return ppgtt; -} - -/* Certain Gen5 chipsets require require idling the GPU before - * unmapping anything from the GTT when VT-d is enabled. - */ -static bool needs_idle_maps(struct drm_i915_private *dev_priv) -{ - /* Query intel_iommu to see if we need the workaround. Presumably that - * was loaded first. - */ - return IS_GEN(dev_priv, 5) && IS_MOBILE(dev_priv) && intel_vtd_active(); -} - -static void ggtt_suspend_mappings(struct i915_ggtt *ggtt) -{ - struct drm_i915_private *i915 = ggtt->vm.i915; - - /* Don't bother messing with faults pre GEN6 as we have little - * documentation supporting that it's a good idea. - */ - if (INTEL_GEN(i915) < 6) - return; - - intel_gt_check_and_clear_faults(ggtt->vm.gt); - - ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total); - - ggtt->invalidate(ggtt); -} - -void i915_gem_suspend_gtt_mappings(struct drm_i915_private *i915) -{ - ggtt_suspend_mappings(&i915->ggtt); -} - int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj, struct sg_table *pages) { @@ -2181,368 +52,6 @@ int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj, return -ENOSPC; } -static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte) -{ - writeq(pte, addr); -} - -static void gen8_ggtt_insert_page(struct i915_address_space *vm, - dma_addr_t addr, - u64 offset, - enum i915_cache_level level, - u32 unused) -{ - struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); - gen8_pte_t __iomem *pte = - (gen8_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE; - - gen8_set_pte(pte, gen8_pte_encode(addr, level, 0)); - - ggtt->invalidate(ggtt); -} - -static void gen8_ggtt_insert_entries(struct i915_address_space *vm, - struct i915_vma *vma, - enum i915_cache_level level, - u32 flags) -{ - struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); - struct sgt_iter sgt_iter; - gen8_pte_t __iomem *gtt_entries; - const gen8_pte_t pte_encode = gen8_pte_encode(0, level, 0); - dma_addr_t addr; - - /* - * Note that we ignore PTE_READ_ONLY here. The caller must be careful - * not to allow the user to override access to a read only page. - */ - - gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm; - gtt_entries += vma->node.start / I915_GTT_PAGE_SIZE; - for_each_sgt_daddr(addr, sgt_iter, vma->pages) - gen8_set_pte(gtt_entries++, pte_encode | addr); - - /* - * We want to flush the TLBs only after we're certain all the PTE - * updates have finished. - */ - ggtt->invalidate(ggtt); -} - -static void gen6_ggtt_insert_page(struct i915_address_space *vm, - dma_addr_t addr, - u64 offset, - enum i915_cache_level level, - u32 flags) -{ - struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); - gen6_pte_t __iomem *pte = - (gen6_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE; - - iowrite32(vm->pte_encode(addr, level, flags), pte); - - ggtt->invalidate(ggtt); -} - -/* - * Binds an object into the global gtt with the specified cache level. The object - * will be accessible to the GPU via commands whose operands reference offsets - * within the global GTT as well as accessible by the GPU through the GMADR - * mapped BAR (dev_priv->mm.gtt->gtt). - */ -static void gen6_ggtt_insert_entries(struct i915_address_space *vm, - struct i915_vma *vma, - enum i915_cache_level level, - u32 flags) -{ - struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); - gen6_pte_t __iomem *entries = (gen6_pte_t __iomem *)ggtt->gsm; - unsigned int i = vma->node.start / I915_GTT_PAGE_SIZE; - struct sgt_iter iter; - dma_addr_t addr; - for_each_sgt_daddr(addr, iter, vma->pages) - iowrite32(vm->pte_encode(addr, level, flags), &entries[i++]); - - /* - * We want to flush the TLBs only after we're certain all the PTE - * updates have finished. - */ - ggtt->invalidate(ggtt); -} - -static void nop_clear_range(struct i915_address_space *vm, - u64 start, u64 length) -{ -} - -static void gen8_ggtt_clear_range(struct i915_address_space *vm, - u64 start, u64 length) -{ - struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); - unsigned first_entry = start / I915_GTT_PAGE_SIZE; - unsigned num_entries = length / I915_GTT_PAGE_SIZE; - const gen8_pte_t scratch_pte = vm->scratch[0].encode; - gen8_pte_t __iomem *gtt_base = - (gen8_pte_t __iomem *)ggtt->gsm + first_entry; - const int max_entries = ggtt_total_entries(ggtt) - first_entry; - int i; - - if (WARN(num_entries > max_entries, - "First entry = %d; Num entries = %d (max=%d)\n", - first_entry, num_entries, max_entries)) - num_entries = max_entries; - - for (i = 0; i < num_entries; i++) - gen8_set_pte(>t_base[i], scratch_pte); -} - -static void bxt_vtd_ggtt_wa(struct i915_address_space *vm) -{ - struct drm_i915_private *dev_priv = vm->i915; - - /* - * Make sure the internal GAM fifo has been cleared of all GTT - * writes before exiting stop_machine(). This guarantees that - * any aperture accesses waiting to start in another process - * cannot back up behind the GTT writes causing a hang. - * The register can be any arbitrary GAM register. - */ - POSTING_READ(GFX_FLSH_CNTL_GEN6); -} - -struct insert_page { - struct i915_address_space *vm; - dma_addr_t addr; - u64 offset; - enum i915_cache_level level; -}; - -static int bxt_vtd_ggtt_insert_page__cb(void *_arg) -{ - struct insert_page *arg = _arg; - - gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, arg->level, 0); - bxt_vtd_ggtt_wa(arg->vm); - - return 0; -} - -static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm, - dma_addr_t addr, - u64 offset, - enum i915_cache_level level, - u32 unused) -{ - struct insert_page arg = { vm, addr, offset, level }; - - stop_machine(bxt_vtd_ggtt_insert_page__cb, &arg, NULL); -} - -struct insert_entries { - struct i915_address_space *vm; - struct i915_vma *vma; - enum i915_cache_level level; - u32 flags; -}; - -static int bxt_vtd_ggtt_insert_entries__cb(void *_arg) -{ - struct insert_entries *arg = _arg; - - gen8_ggtt_insert_entries(arg->vm, arg->vma, arg->level, arg->flags); - bxt_vtd_ggtt_wa(arg->vm); - - return 0; -} - -static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm, - struct i915_vma *vma, - enum i915_cache_level level, - u32 flags) -{ - struct insert_entries arg = { vm, vma, level, flags }; - - stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL); -} - -struct clear_range { - struct i915_address_space *vm; - u64 start; - u64 length; -}; - -static int bxt_vtd_ggtt_clear_range__cb(void *_arg) -{ - struct clear_range *arg = _arg; - - gen8_ggtt_clear_range(arg->vm, arg->start, arg->length); - bxt_vtd_ggtt_wa(arg->vm); - - return 0; -} - -static void bxt_vtd_ggtt_clear_range__BKL(struct i915_address_space *vm, - u64 start, - u64 length) -{ - struct clear_range arg = { vm, start, length }; - - stop_machine(bxt_vtd_ggtt_clear_range__cb, &arg, NULL); -} - -static void gen6_ggtt_clear_range(struct i915_address_space *vm, - u64 start, u64 length) -{ - struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); - unsigned first_entry = start / I915_GTT_PAGE_SIZE; - unsigned num_entries = length / I915_GTT_PAGE_SIZE; - gen6_pte_t scratch_pte, __iomem *gtt_base = - (gen6_pte_t __iomem *)ggtt->gsm + first_entry; - const int max_entries = ggtt_total_entries(ggtt) - first_entry; - int i; - - if (WARN(num_entries > max_entries, - "First entry = %d; Num entries = %d (max=%d)\n", - first_entry, num_entries, max_entries)) - num_entries = max_entries; - - scratch_pte = vm->scratch[0].encode; - for (i = 0; i < num_entries; i++) - iowrite32(scratch_pte, >t_base[i]); -} - -static void i915_ggtt_insert_page(struct i915_address_space *vm, - dma_addr_t addr, - u64 offset, - enum i915_cache_level cache_level, - u32 unused) -{ - unsigned int flags = (cache_level == I915_CACHE_NONE) ? - AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY; - - intel_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags); -} - -static void i915_ggtt_insert_entries(struct i915_address_space *vm, - struct i915_vma *vma, - enum i915_cache_level cache_level, - u32 unused) -{ - unsigned int flags = (cache_level == I915_CACHE_NONE) ? - AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY; - - intel_gtt_insert_sg_entries(vma->pages, vma->node.start >> PAGE_SHIFT, - flags); -} - -static void i915_ggtt_clear_range(struct i915_address_space *vm, - u64 start, u64 length) -{ - intel_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT); -} - -static int ggtt_bind_vma(struct i915_vma *vma, - enum i915_cache_level cache_level, - u32 flags) -{ - struct drm_i915_private *i915 = vma->vm->i915; - struct drm_i915_gem_object *obj = vma->obj; - intel_wakeref_t wakeref; - u32 pte_flags; - - /* Applicable to VLV (gen8+ do not support RO in the GGTT) */ - pte_flags = 0; - if (i915_gem_object_is_readonly(obj)) - pte_flags |= PTE_READ_ONLY; - - with_intel_runtime_pm(&i915->runtime_pm, wakeref) - vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags); - - vma->page_sizes.gtt = I915_GTT_PAGE_SIZE; - - /* - * Without aliasing PPGTT there's no difference between - * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally - * upgrade to both bound if we bind either to avoid double-binding. - */ - atomic_or(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND, &vma->flags); - - return 0; -} - -static void ggtt_unbind_vma(struct i915_vma *vma) -{ - struct drm_i915_private *i915 = vma->vm->i915; - intel_wakeref_t wakeref; - - with_intel_runtime_pm(&i915->runtime_pm, wakeref) - vma->vm->clear_range(vma->vm, vma->node.start, vma->size); -} - -static int aliasing_gtt_bind_vma(struct i915_vma *vma, - enum i915_cache_level cache_level, - u32 flags) -{ - struct drm_i915_private *i915 = vma->vm->i915; - u32 pte_flags; - int ret; - - /* Currently applicable only to VLV */ - pte_flags = 0; - if (i915_gem_object_is_readonly(vma->obj)) - pte_flags |= PTE_READ_ONLY; - - if (flags & I915_VMA_LOCAL_BIND) { - struct i915_ppgtt *alias = i915_vm_to_ggtt(vma->vm)->alias; - - if (flags & I915_VMA_ALLOC) { - ret = alias->vm.allocate_va_range(&alias->vm, - vma->node.start, - vma->size); - if (ret) - return ret; - - set_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma)); - } - - GEM_BUG_ON(!test_bit(I915_VMA_ALLOC_BIT, - __i915_vma_flags(vma))); - alias->vm.insert_entries(&alias->vm, vma, - cache_level, pte_flags); - } - - if (flags & I915_VMA_GLOBAL_BIND) { - intel_wakeref_t wakeref; - - with_intel_runtime_pm(&i915->runtime_pm, wakeref) { - vma->vm->insert_entries(vma->vm, vma, - cache_level, pte_flags); - } - } - - return 0; -} - -static void aliasing_gtt_unbind_vma(struct i915_vma *vma) -{ - struct drm_i915_private *i915 = vma->vm->i915; - - if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) { - struct i915_address_space *vm = vma->vm; - intel_wakeref_t wakeref; - - with_intel_runtime_pm(&i915->runtime_pm, wakeref) - vm->clear_range(vm, vma->node.start, vma->size); - } - - if (test_and_clear_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma))) { - struct i915_address_space *vm = - &i915_vm_to_ggtt(vma->vm)->alias->vm; - - vm->clear_range(vm, vma->node.start, vma->size); - } -} - void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj, struct sg_table *pages) { @@ -2563,1070 +72,6 @@ void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj, dma_unmap_sg(kdev, pages->sgl, pages->nents, PCI_DMA_BIDIRECTIONAL); } -static int ggtt_set_pages(struct i915_vma *vma) -{ - int ret; - - GEM_BUG_ON(vma->pages); - - ret = i915_get_ggtt_vma_pages(vma); - if (ret) - return ret; - - vma->page_sizes = vma->obj->mm.page_sizes; - - return 0; -} - -static void i915_ggtt_color_adjust(const struct drm_mm_node *node, - unsigned long color, - u64 *start, - u64 *end) -{ - if (i915_node_color_differs(node, color)) - *start += I915_GTT_PAGE_SIZE; - - /* Also leave a space between the unallocated reserved node after the - * GTT and any objects within the GTT, i.e. we use the color adjustment - * to insert a guard page to prevent prefetches crossing over the - * GTT boundary. - */ - node = list_next_entry(node, node_list); - if (node->color != color) - *end -= I915_GTT_PAGE_SIZE; -} - -static int init_aliasing_ppgtt(struct i915_ggtt *ggtt) -{ - struct i915_ppgtt *ppgtt; - int err; - - ppgtt = i915_ppgtt_create(ggtt->vm.i915); - if (IS_ERR(ppgtt)) - return PTR_ERR(ppgtt); - - if (GEM_WARN_ON(ppgtt->vm.total < ggtt->vm.total)) { - err = -ENODEV; - goto err_ppgtt; - } - - /* - * Note we only pre-allocate as far as the end of the global - * GTT. On 48b / 4-level page-tables, the difference is very, - * very significant! We have to preallocate as GVT/vgpu does - * not like the page directory disappearing. - */ - err = ppgtt->vm.allocate_va_range(&ppgtt->vm, 0, ggtt->vm.total); - if (err) - goto err_ppgtt; - - ggtt->alias = ppgtt; - ggtt->vm.bind_async_flags |= ppgtt->vm.bind_async_flags; - - GEM_BUG_ON(ggtt->vm.vma_ops.bind_vma != ggtt_bind_vma); - ggtt->vm.vma_ops.bind_vma = aliasing_gtt_bind_vma; - - GEM_BUG_ON(ggtt->vm.vma_ops.unbind_vma != ggtt_unbind_vma); - ggtt->vm.vma_ops.unbind_vma = aliasing_gtt_unbind_vma; - - return 0; - -err_ppgtt: - i915_vm_put(&ppgtt->vm); - return err; -} - -static void fini_aliasing_ppgtt(struct i915_ggtt *ggtt) -{ - struct i915_ppgtt *ppgtt; - - ppgtt = fetch_and_zero(&ggtt->alias); - if (!ppgtt) - return; - - i915_vm_put(&ppgtt->vm); - - ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma; - ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma; -} - -static int ggtt_reserve_guc_top(struct i915_ggtt *ggtt) -{ - u64 size; - int ret; - - if (!USES_GUC(ggtt->vm.i915)) - return 0; - - GEM_BUG_ON(ggtt->vm.total <= GUC_GGTT_TOP); - size = ggtt->vm.total - GUC_GGTT_TOP; - - ret = i915_gem_gtt_reserve(&ggtt->vm, &ggtt->uc_fw, size, - GUC_GGTT_TOP, I915_COLOR_UNEVICTABLE, - PIN_NOEVICT); - if (ret) - DRM_DEBUG_DRIVER("Failed to reserve top of GGTT for GuC\n"); - - return ret; -} - -static void ggtt_release_guc_top(struct i915_ggtt *ggtt) -{ - if (drm_mm_node_allocated(&ggtt->uc_fw)) - drm_mm_remove_node(&ggtt->uc_fw); -} - -static void cleanup_init_ggtt(struct i915_ggtt *ggtt) -{ - ggtt_release_guc_top(ggtt); - if (drm_mm_node_allocated(&ggtt->error_capture)) - drm_mm_remove_node(&ggtt->error_capture); -} - -static int init_ggtt(struct i915_ggtt *ggtt) -{ - /* Let GEM Manage all of the aperture. - * - * However, leave one page at the end still bound to the scratch page. - * There are a number of places where the hardware apparently prefetches - * past the end of the object, and we've seen multiple hangs with the - * GPU head pointer stuck in a batchbuffer bound at the last page of the - * aperture. One page should be enough to keep any prefetching inside - * of the aperture. - */ - unsigned long hole_start, hole_end; - struct drm_mm_node *entry; - int ret; - - /* - * GuC requires all resources that we're sharing with it to be placed in - * non-WOPCM memory. If GuC is not present or not in use we still need a - * small bias as ring wraparound at offset 0 sometimes hangs. No idea - * why. - */ - ggtt->pin_bias = max_t(u32, I915_GTT_PAGE_SIZE, - intel_wopcm_guc_size(&ggtt->vm.i915->wopcm)); - - ret = intel_vgt_balloon(ggtt); - if (ret) - return ret; - - if (ggtt->mappable_end) { - /* Reserve a mappable slot for our lockless error capture */ - ret = drm_mm_insert_node_in_range(&ggtt->vm.mm, &ggtt->error_capture, - PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE, - 0, ggtt->mappable_end, - DRM_MM_INSERT_LOW); - if (ret) - return ret; - } - - /* - * The upper portion of the GuC address space has a sizeable hole - * (several MB) that is inaccessible by GuC. Reserve this range within - * GGTT as it can comfortably hold GuC/HuC firmware images. - */ - ret = ggtt_reserve_guc_top(ggtt); - if (ret) - goto err; - - /* Clear any non-preallocated blocks */ - drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) { - DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n", - hole_start, hole_end); - ggtt->vm.clear_range(&ggtt->vm, hole_start, - hole_end - hole_start); - } - - /* And finally clear the reserved guard page */ - ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE, PAGE_SIZE); - - return 0; - -err: - cleanup_init_ggtt(ggtt); - return ret; -} - -int i915_init_ggtt(struct drm_i915_private *i915) -{ - int ret; - - ret = init_ggtt(&i915->ggtt); - if (ret) - return ret; - - if (INTEL_PPGTT(i915) == INTEL_PPGTT_ALIASING) { - ret = init_aliasing_ppgtt(&i915->ggtt); - if (ret) - cleanup_init_ggtt(&i915->ggtt); - } - - return 0; -} - -static void ggtt_cleanup_hw(struct i915_ggtt *ggtt) -{ - struct i915_vma *vma, *vn; - - atomic_set(&ggtt->vm.open, 0); - - rcu_barrier(); /* flush the RCU'ed__i915_vm_release */ - flush_workqueue(ggtt->vm.i915->wq); - - mutex_lock(&ggtt->vm.mutex); - - list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) - WARN_ON(__i915_vma_unbind(vma)); - - if (drm_mm_node_allocated(&ggtt->error_capture)) - drm_mm_remove_node(&ggtt->error_capture); - - ggtt_release_guc_top(ggtt); - intel_vgt_deballoon(ggtt); - - ggtt->vm.cleanup(&ggtt->vm); - - mutex_unlock(&ggtt->vm.mutex); - i915_address_space_fini(&ggtt->vm); - - arch_phys_wc_del(ggtt->mtrr); - - if (ggtt->iomap.size) - io_mapping_fini(&ggtt->iomap); -} - -/** - * i915_ggtt_driver_release - Clean up GGTT hardware initialization - * @i915: i915 device - */ -void i915_ggtt_driver_release(struct drm_i915_private *i915) -{ - struct pagevec *pvec; - - fini_aliasing_ppgtt(&i915->ggtt); - - ggtt_cleanup_hw(&i915->ggtt); - - pvec = &i915->mm.wc_stash.pvec; - if (pvec->nr) { - set_pages_array_wb(pvec->pages, pvec->nr); - __pagevec_release(pvec); - } -} - -static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl) -{ - snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT; - snb_gmch_ctl &= SNB_GMCH_GGMS_MASK; - return snb_gmch_ctl << 20; -} - -static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl) -{ - bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT; - bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK; - if (bdw_gmch_ctl) - bdw_gmch_ctl = 1 << bdw_gmch_ctl; - -#ifdef CONFIG_X86_32 - /* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * I915_GTT_PAGE_SIZE */ - if (bdw_gmch_ctl > 4) - bdw_gmch_ctl = 4; -#endif - - return bdw_gmch_ctl << 20; -} - -static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl) -{ - gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT; - gmch_ctrl &= SNB_GMCH_GGMS_MASK; - - if (gmch_ctrl) - return 1 << (20 + gmch_ctrl); - - return 0; -} - -static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size) -{ - struct drm_i915_private *dev_priv = ggtt->vm.i915; - struct pci_dev *pdev = dev_priv->drm.pdev; - phys_addr_t phys_addr; - int ret; - - /* For Modern GENs the PTEs and register space are split in the BAR */ - phys_addr = pci_resource_start(pdev, 0) + pci_resource_len(pdev, 0) / 2; - - /* - * On BXT+/CNL+ writes larger than 64 bit to the GTT pagetable range - * will be dropped. For WC mappings in general we have 64 byte burst - * writes when the WC buffer is flushed, so we can't use it, but have to - * resort to an uncached mapping. The WC issue is easily caught by the - * readback check when writing GTT PTE entries. - */ - if (IS_GEN9_LP(dev_priv) || INTEL_GEN(dev_priv) >= 10) - ggtt->gsm = ioremap_nocache(phys_addr, size); - else - ggtt->gsm = ioremap_wc(phys_addr, size); - if (!ggtt->gsm) { - DRM_ERROR("Failed to map the ggtt page table\n"); - return -ENOMEM; - } - - ret = setup_scratch_page(&ggtt->vm, GFP_DMA32); - if (ret) { - DRM_ERROR("Scratch setup failed\n"); - /* iounmap will also get called at remove, but meh */ - iounmap(ggtt->gsm); - return ret; - } - - ggtt->vm.scratch[0].encode = - ggtt->vm.pte_encode(px_dma(&ggtt->vm.scratch[0]), - I915_CACHE_NONE, 0); - - return 0; -} - -static void tgl_setup_private_ppat(struct intel_uncore *uncore) -{ - /* TGL doesn't support LLC or AGE settings */ - intel_uncore_write(uncore, GEN12_PAT_INDEX(0), GEN8_PPAT_WB); - intel_uncore_write(uncore, GEN12_PAT_INDEX(1), GEN8_PPAT_WC); - intel_uncore_write(uncore, GEN12_PAT_INDEX(2), GEN8_PPAT_WT); - intel_uncore_write(uncore, GEN12_PAT_INDEX(3), GEN8_PPAT_UC); - intel_uncore_write(uncore, GEN12_PAT_INDEX(4), GEN8_PPAT_WB); - intel_uncore_write(uncore, GEN12_PAT_INDEX(5), GEN8_PPAT_WB); - intel_uncore_write(uncore, GEN12_PAT_INDEX(6), GEN8_PPAT_WB); - intel_uncore_write(uncore, GEN12_PAT_INDEX(7), GEN8_PPAT_WB); -} - -static void cnl_setup_private_ppat(struct intel_uncore *uncore) -{ - intel_uncore_write(uncore, - GEN10_PAT_INDEX(0), - GEN8_PPAT_WB | GEN8_PPAT_LLC); - intel_uncore_write(uncore, - GEN10_PAT_INDEX(1), - GEN8_PPAT_WC | GEN8_PPAT_LLCELLC); - intel_uncore_write(uncore, - GEN10_PAT_INDEX(2), - GEN8_PPAT_WT | GEN8_PPAT_LLCELLC); - intel_uncore_write(uncore, - GEN10_PAT_INDEX(3), - GEN8_PPAT_UC); - intel_uncore_write(uncore, - GEN10_PAT_INDEX(4), - GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)); - intel_uncore_write(uncore, - GEN10_PAT_INDEX(5), - GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)); - intel_uncore_write(uncore, - GEN10_PAT_INDEX(6), - GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)); - intel_uncore_write(uncore, - GEN10_PAT_INDEX(7), - GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3)); -} - -/* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability - * bits. When using advanced contexts each context stores its own PAT, but - * writing this data shouldn't be harmful even in those cases. */ -static void bdw_setup_private_ppat(struct intel_uncore *uncore) -{ - u64 pat; - - pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC) | /* for normal objects, no eLLC */ - GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) | /* for something pointing to ptes? */ - GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC) | /* for scanout with eLLC */ - GEN8_PPAT(3, GEN8_PPAT_UC) | /* Uncached objects, mostly for scanout */ - GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) | - GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) | - GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) | - GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3)); - - intel_uncore_write(uncore, GEN8_PRIVATE_PAT_LO, lower_32_bits(pat)); - intel_uncore_write(uncore, GEN8_PRIVATE_PAT_HI, upper_32_bits(pat)); -} - -static void chv_setup_private_ppat(struct intel_uncore *uncore) -{ - u64 pat; - - /* - * Map WB on BDW to snooped on CHV. - * - * Only the snoop bit has meaning for CHV, the rest is - * ignored. - * - * The hardware will never snoop for certain types of accesses: - * - CPU GTT (GMADR->GGTT->no snoop->memory) - * - PPGTT page tables - * - some other special cycles - * - * As with BDW, we also need to consider the following for GT accesses: - * "For GGTT, there is NO pat_sel[2:0] from the entry, - * so RTL will always use the value corresponding to - * pat_sel = 000". - * Which means we must set the snoop bit in PAT entry 0 - * in order to keep the global status page working. - */ - - pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) | - GEN8_PPAT(1, 0) | - GEN8_PPAT(2, 0) | - GEN8_PPAT(3, 0) | - GEN8_PPAT(4, CHV_PPAT_SNOOP) | - GEN8_PPAT(5, CHV_PPAT_SNOOP) | - GEN8_PPAT(6, CHV_PPAT_SNOOP) | - GEN8_PPAT(7, CHV_PPAT_SNOOP); - - intel_uncore_write(uncore, GEN8_PRIVATE_PAT_LO, lower_32_bits(pat)); - intel_uncore_write(uncore, GEN8_PRIVATE_PAT_HI, upper_32_bits(pat)); -} - -static void gen6_gmch_remove(struct i915_address_space *vm) -{ - struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); - - iounmap(ggtt->gsm); - cleanup_scratch_page(vm); -} - -static void setup_private_pat(struct intel_uncore *uncore) -{ - struct drm_i915_private *i915 = uncore->i915; - - GEM_BUG_ON(INTEL_GEN(i915) < 8); - - if (INTEL_GEN(i915) >= 12) - tgl_setup_private_ppat(uncore); - else if (INTEL_GEN(i915) >= 10) - cnl_setup_private_ppat(uncore); - else if (IS_CHERRYVIEW(i915) || IS_GEN9_LP(i915)) - chv_setup_private_ppat(uncore); - else - bdw_setup_private_ppat(uncore); -} - -static struct resource pci_resource(struct pci_dev *pdev, int bar) -{ - return (struct resource)DEFINE_RES_MEM(pci_resource_start(pdev, bar), - pci_resource_len(pdev, bar)); -} - -static int gen8_gmch_probe(struct i915_ggtt *ggtt) -{ - struct drm_i915_private *dev_priv = ggtt->vm.i915; - struct pci_dev *pdev = dev_priv->drm.pdev; - unsigned int size; - u16 snb_gmch_ctl; - int err; - - /* TODO: We're not aware of mappable constraints on gen8 yet */ - if (!IS_DGFX(dev_priv)) { - ggtt->gmadr = pci_resource(pdev, 2); - ggtt->mappable_end = resource_size(&ggtt->gmadr); - } - - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(39)); - if (!err) - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(39)); - if (err) - DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err); - - pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); - if (IS_CHERRYVIEW(dev_priv)) - size = chv_get_total_gtt_size(snb_gmch_ctl); - else - size = gen8_get_total_gtt_size(snb_gmch_ctl); - - ggtt->vm.total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE; - ggtt->vm.cleanup = gen6_gmch_remove; - ggtt->vm.insert_page = gen8_ggtt_insert_page; - ggtt->vm.clear_range = nop_clear_range; - if (intel_scanout_needs_vtd_wa(dev_priv)) - ggtt->vm.clear_range = gen8_ggtt_clear_range; - - ggtt->vm.insert_entries = gen8_ggtt_insert_entries; - - /* Serialize GTT updates with aperture access on BXT if VT-d is on. */ - if (intel_ggtt_update_needs_vtd_wa(dev_priv) || - IS_CHERRYVIEW(dev_priv) /* fails with concurrent use/update */) { - ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL; - ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL; - if (ggtt->vm.clear_range != nop_clear_range) - ggtt->vm.clear_range = bxt_vtd_ggtt_clear_range__BKL; - } - - ggtt->invalidate = gen8_ggtt_invalidate; - - ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma; - ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma; - ggtt->vm.vma_ops.set_pages = ggtt_set_pages; - ggtt->vm.vma_ops.clear_pages = clear_pages; - - ggtt->vm.pte_encode = gen8_pte_encode; - - setup_private_pat(ggtt->vm.gt->uncore); - - return ggtt_probe_common(ggtt, size); -} - -static int gen6_gmch_probe(struct i915_ggtt *ggtt) -{ - struct drm_i915_private *dev_priv = ggtt->vm.i915; - struct pci_dev *pdev = dev_priv->drm.pdev; - unsigned int size; - u16 snb_gmch_ctl; - int err; - - ggtt->gmadr = - (struct resource) DEFINE_RES_MEM(pci_resource_start(pdev, 2), - pci_resource_len(pdev, 2)); - ggtt->mappable_end = resource_size(&ggtt->gmadr); - - /* 64/512MB is the current min/max we actually know of, but this is just - * a coarse sanity check. - */ - if (ggtt->mappable_end < (64<<20) || ggtt->mappable_end > (512<<20)) { - DRM_ERROR("Unknown GMADR size (%pa)\n", &ggtt->mappable_end); - return -ENXIO; - } - - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40)); - if (!err) - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40)); - if (err) - DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err); - pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); - - size = gen6_get_total_gtt_size(snb_gmch_ctl); - ggtt->vm.total = (size / sizeof(gen6_pte_t)) * I915_GTT_PAGE_SIZE; - - ggtt->vm.clear_range = nop_clear_range; - if (!HAS_FULL_PPGTT(dev_priv) || intel_scanout_needs_vtd_wa(dev_priv)) - ggtt->vm.clear_range = gen6_ggtt_clear_range; - ggtt->vm.insert_page = gen6_ggtt_insert_page; - ggtt->vm.insert_entries = gen6_ggtt_insert_entries; - ggtt->vm.cleanup = gen6_gmch_remove; - - ggtt->invalidate = gen6_ggtt_invalidate; - - if (HAS_EDRAM(dev_priv)) - ggtt->vm.pte_encode = iris_pte_encode; - else if (IS_HASWELL(dev_priv)) - ggtt->vm.pte_encode = hsw_pte_encode; - else if (IS_VALLEYVIEW(dev_priv)) - ggtt->vm.pte_encode = byt_pte_encode; - else if (INTEL_GEN(dev_priv) >= 7) - ggtt->vm.pte_encode = ivb_pte_encode; - else - ggtt->vm.pte_encode = snb_pte_encode; - - ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma; - ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma; - ggtt->vm.vma_ops.set_pages = ggtt_set_pages; - ggtt->vm.vma_ops.clear_pages = clear_pages; - - return ggtt_probe_common(ggtt, size); -} - -static void i915_gmch_remove(struct i915_address_space *vm) -{ - intel_gmch_remove(); -} - -static int i915_gmch_probe(struct i915_ggtt *ggtt) -{ - struct drm_i915_private *dev_priv = ggtt->vm.i915; - phys_addr_t gmadr_base; - int ret; - - ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->drm.pdev, NULL); - if (!ret) { - DRM_ERROR("failed to set up gmch\n"); - return -EIO; - } - - intel_gtt_get(&ggtt->vm.total, &gmadr_base, &ggtt->mappable_end); - - ggtt->gmadr = - (struct resource) DEFINE_RES_MEM(gmadr_base, - ggtt->mappable_end); - - ggtt->do_idle_maps = needs_idle_maps(dev_priv); - ggtt->vm.insert_page = i915_ggtt_insert_page; - ggtt->vm.insert_entries = i915_ggtt_insert_entries; - ggtt->vm.clear_range = i915_ggtt_clear_range; - ggtt->vm.cleanup = i915_gmch_remove; - - ggtt->invalidate = gmch_ggtt_invalidate; - - ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma; - ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma; - ggtt->vm.vma_ops.set_pages = ggtt_set_pages; - ggtt->vm.vma_ops.clear_pages = clear_pages; - - if (unlikely(ggtt->do_idle_maps)) - dev_notice(dev_priv->drm.dev, - "Applying Ironlake quirks for intel_iommu\n"); - - return 0; -} - -static int ggtt_probe_hw(struct i915_ggtt *ggtt, struct intel_gt *gt) -{ - struct drm_i915_private *i915 = gt->i915; - int ret; - - ggtt->vm.gt = gt; - ggtt->vm.i915 = i915; - ggtt->vm.dma = &i915->drm.pdev->dev; - - if (INTEL_GEN(i915) <= 5) - ret = i915_gmch_probe(ggtt); - else if (INTEL_GEN(i915) < 8) - ret = gen6_gmch_probe(ggtt); - else - ret = gen8_gmch_probe(ggtt); - if (ret) - return ret; - - if ((ggtt->vm.total - 1) >> 32) { - DRM_ERROR("We never expected a Global GTT with more than 32bits" - " of address space! Found %lldM!\n", - ggtt->vm.total >> 20); - ggtt->vm.total = 1ULL << 32; - ggtt->mappable_end = - min_t(u64, ggtt->mappable_end, ggtt->vm.total); - } - - if (ggtt->mappable_end > ggtt->vm.total) { - DRM_ERROR("mappable aperture extends past end of GGTT," - " aperture=%pa, total=%llx\n", - &ggtt->mappable_end, ggtt->vm.total); - ggtt->mappable_end = ggtt->vm.total; - } - - /* GMADR is the PCI mmio aperture into the global GTT. */ - DRM_DEBUG_DRIVER("GGTT size = %lluM\n", ggtt->vm.total >> 20); - DRM_DEBUG_DRIVER("GMADR size = %lluM\n", (u64)ggtt->mappable_end >> 20); - DRM_DEBUG_DRIVER("DSM size = %lluM\n", - (u64)resource_size(&intel_graphics_stolen_res) >> 20); - - return 0; -} - -/** - * i915_ggtt_probe_hw - Probe GGTT hardware location - * @i915: i915 device - */ -int i915_ggtt_probe_hw(struct drm_i915_private *i915) -{ - int ret; - - ret = ggtt_probe_hw(&i915->ggtt, &i915->gt); - if (ret) - return ret; - - if (intel_vtd_active()) - dev_info(i915->drm.dev, "VT-d active for gfx access\n"); - - return 0; -} - -static int ggtt_init_hw(struct i915_ggtt *ggtt) -{ - struct drm_i915_private *i915 = ggtt->vm.i915; - - i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT); - - ggtt->vm.is_ggtt = true; - - /* Only VLV supports read-only GGTT mappings */ - ggtt->vm.has_read_only = IS_VALLEYVIEW(i915); - - if (!HAS_LLC(i915) && !HAS_PPGTT(i915)) - ggtt->vm.mm.color_adjust = i915_ggtt_color_adjust; - - if (ggtt->mappable_end) { - if (!io_mapping_init_wc(&ggtt->iomap, - ggtt->gmadr.start, - ggtt->mappable_end)) { - ggtt->vm.cleanup(&ggtt->vm); - return -EIO; - } - - ggtt->mtrr = arch_phys_wc_add(ggtt->gmadr.start, - ggtt->mappable_end); - } - - i915_ggtt_init_fences(ggtt); - - return 0; -} - -/** - * i915_ggtt_init_hw - Initialize GGTT hardware - * @dev_priv: i915 device - */ -int i915_ggtt_init_hw(struct drm_i915_private *dev_priv) -{ - int ret; - - stash_init(&dev_priv->mm.wc_stash); - - /* Note that we use page colouring to enforce a guard page at the - * end of the address space. This is required as the CS may prefetch - * beyond the end of the batch buffer, across the page boundary, - * and beyond the end of the GTT if we do not provide a guard. - */ - ret = ggtt_init_hw(&dev_priv->ggtt); - if (ret) - return ret; - - return 0; -} - -int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv) -{ - if (INTEL_GEN(dev_priv) < 6 && !intel_enable_gtt()) - return -EIO; - - return 0; -} - -void i915_ggtt_enable_guc(struct i915_ggtt *ggtt) -{ - GEM_BUG_ON(ggtt->invalidate != gen8_ggtt_invalidate); - - ggtt->invalidate = guc_ggtt_invalidate; - - ggtt->invalidate(ggtt); -} - -void i915_ggtt_disable_guc(struct i915_ggtt *ggtt) -{ - /* XXX Temporary pardon for error unload */ - if (ggtt->invalidate == gen8_ggtt_invalidate) - return; - - /* We should only be called after i915_ggtt_enable_guc() */ - GEM_BUG_ON(ggtt->invalidate != guc_ggtt_invalidate); - - ggtt->invalidate = gen8_ggtt_invalidate; - - ggtt->invalidate(ggtt); -} - -static void ggtt_restore_mappings(struct i915_ggtt *ggtt) -{ - struct i915_vma *vma, *vn; - bool flush = false; - int open; - - intel_gt_check_and_clear_faults(ggtt->vm.gt); - - mutex_lock(&ggtt->vm.mutex); - - /* First fill our portion of the GTT with scratch pages */ - ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total); - - /* Skip rewriting PTE on VMA unbind. */ - open = atomic_xchg(&ggtt->vm.open, 0); - - /* clflush objects bound into the GGTT and rebind them. */ - list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) { - struct drm_i915_gem_object *obj = vma->obj; - - if (!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) - continue; - - if (!__i915_vma_unbind(vma)) - continue; - - clear_bit(I915_VMA_GLOBAL_BIND_BIT, __i915_vma_flags(vma)); - WARN_ON(i915_vma_bind(vma, - obj ? obj->cache_level : 0, - PIN_GLOBAL, NULL)); - if (obj) { /* only used during resume => exclusive access */ - flush |= fetch_and_zero(&obj->write_domain); - obj->read_domains |= I915_GEM_DOMAIN_GTT; - } - } - - atomic_set(&ggtt->vm.open, open); - ggtt->invalidate(ggtt); - - mutex_unlock(&ggtt->vm.mutex); - - if (flush) - wbinvd_on_all_cpus(); -} - -void i915_gem_restore_gtt_mappings(struct drm_i915_private *i915) -{ - struct i915_ggtt *ggtt = &i915->ggtt; - - ggtt_restore_mappings(ggtt); - - if (INTEL_GEN(i915) >= 8) - setup_private_pat(ggtt->vm.gt->uncore); -} - -static struct scatterlist * -rotate_pages(struct drm_i915_gem_object *obj, unsigned int offset, - unsigned int width, unsigned int height, - unsigned int stride, - struct sg_table *st, struct scatterlist *sg) -{ - unsigned int column, row; - unsigned int src_idx; - - for (column = 0; column < width; column++) { - src_idx = stride * (height - 1) + column + offset; - for (row = 0; row < height; row++) { - st->nents++; - /* We don't need the pages, but need to initialize - * the entries so the sg list can be happily traversed. - * The only thing we need are DMA addresses. - */ - sg_set_page(sg, NULL, I915_GTT_PAGE_SIZE, 0); - sg_dma_address(sg) = - i915_gem_object_get_dma_address(obj, src_idx); - sg_dma_len(sg) = I915_GTT_PAGE_SIZE; - sg = sg_next(sg); - src_idx -= stride; - } - } - - return sg; -} - -static noinline struct sg_table * -intel_rotate_pages(struct intel_rotation_info *rot_info, - struct drm_i915_gem_object *obj) -{ - unsigned int size = intel_rotation_info_size(rot_info); - struct sg_table *st; - struct scatterlist *sg; - int ret = -ENOMEM; - int i; - - /* Allocate target SG list. */ - st = kmalloc(sizeof(*st), GFP_KERNEL); - if (!st) - goto err_st_alloc; - - ret = sg_alloc_table(st, size, GFP_KERNEL); - if (ret) - goto err_sg_alloc; - - st->nents = 0; - sg = st->sgl; - - for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) { - sg = rotate_pages(obj, rot_info->plane[i].offset, - rot_info->plane[i].width, rot_info->plane[i].height, - rot_info->plane[i].stride, st, sg); - } - - return st; - -err_sg_alloc: - kfree(st); -err_st_alloc: - - DRM_DEBUG_DRIVER("Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n", - obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size); - - return ERR_PTR(ret); -} - -static struct scatterlist * -remap_pages(struct drm_i915_gem_object *obj, unsigned int offset, - unsigned int width, unsigned int height, - unsigned int stride, - struct sg_table *st, struct scatterlist *sg) -{ - unsigned int row; - - for (row = 0; row < height; row++) { - unsigned int left = width * I915_GTT_PAGE_SIZE; - - while (left) { - dma_addr_t addr; - unsigned int length; - - /* We don't need the pages, but need to initialize - * the entries so the sg list can be happily traversed. - * The only thing we need are DMA addresses. - */ - - addr = i915_gem_object_get_dma_address_len(obj, offset, &length); - - length = min(left, length); - - st->nents++; - - sg_set_page(sg, NULL, length, 0); - sg_dma_address(sg) = addr; - sg_dma_len(sg) = length; - sg = sg_next(sg); - - offset += length / I915_GTT_PAGE_SIZE; - left -= length; - } - - offset += stride - width; - } - - return sg; -} - -static noinline struct sg_table * -intel_remap_pages(struct intel_remapped_info *rem_info, - struct drm_i915_gem_object *obj) -{ - unsigned int size = intel_remapped_info_size(rem_info); - struct sg_table *st; - struct scatterlist *sg; - int ret = -ENOMEM; - int i; - - /* Allocate target SG list. */ - st = kmalloc(sizeof(*st), GFP_KERNEL); - if (!st) - goto err_st_alloc; - - ret = sg_alloc_table(st, size, GFP_KERNEL); - if (ret) - goto err_sg_alloc; - - st->nents = 0; - sg = st->sgl; - - for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) { - sg = remap_pages(obj, rem_info->plane[i].offset, - rem_info->plane[i].width, rem_info->plane[i].height, - rem_info->plane[i].stride, st, sg); - } - - i915_sg_trim(st); - - return st; - -err_sg_alloc: - kfree(st); -err_st_alloc: - - DRM_DEBUG_DRIVER("Failed to create remapped mapping for object size %zu! (%ux%u tiles, %u pages)\n", - obj->base.size, rem_info->plane[0].width, rem_info->plane[0].height, size); - - return ERR_PTR(ret); -} - -static noinline struct sg_table * -intel_partial_pages(const struct i915_ggtt_view *view, - struct drm_i915_gem_object *obj) -{ - struct sg_table *st; - struct scatterlist *sg, *iter; - unsigned int count = view->partial.size; - unsigned int offset; - int ret = -ENOMEM; - - st = kmalloc(sizeof(*st), GFP_KERNEL); - if (!st) - goto err_st_alloc; - - ret = sg_alloc_table(st, count, GFP_KERNEL); - if (ret) - goto err_sg_alloc; - - iter = i915_gem_object_get_sg(obj, view->partial.offset, &offset); - GEM_BUG_ON(!iter); - - sg = st->sgl; - st->nents = 0; - do { - unsigned int len; - - len = min(iter->length - (offset << PAGE_SHIFT), - count << PAGE_SHIFT); - sg_set_page(sg, NULL, len, 0); - sg_dma_address(sg) = - sg_dma_address(iter) + (offset << PAGE_SHIFT); - sg_dma_len(sg) = len; - - st->nents++; - count -= len >> PAGE_SHIFT; - if (count == 0) { - sg_mark_end(sg); - i915_sg_trim(st); /* Drop any unused tail entries. */ - - return st; - } - - sg = __sg_next(sg); - iter = __sg_next(iter); - offset = 0; - } while (1); - -err_sg_alloc: - kfree(st); -err_st_alloc: - return ERR_PTR(ret); -} - -static int -i915_get_ggtt_vma_pages(struct i915_vma *vma) -{ - int ret; - - /* The vma->pages are only valid within the lifespan of the borrowed - * obj->mm.pages. When the obj->mm.pages sg_table is regenerated, so - * must be the vma->pages. A simple rule is that vma->pages must only - * be accessed when the obj->mm.pages are pinned. - */ - GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj)); - - switch (vma->ggtt_view.type) { - default: - GEM_BUG_ON(vma->ggtt_view.type); - /* fall through */ - case I915_GGTT_VIEW_NORMAL: - vma->pages = vma->obj->mm.pages; - return 0; - - case I915_GGTT_VIEW_ROTATED: - vma->pages = - intel_rotate_pages(&vma->ggtt_view.rotated, vma->obj); - break; - - case I915_GGTT_VIEW_REMAPPED: - vma->pages = - intel_remap_pages(&vma->ggtt_view.remapped, vma->obj); - break; - - case I915_GGTT_VIEW_PARTIAL: - vma->pages = intel_partial_pages(&vma->ggtt_view, vma->obj); - break; - } - - ret = 0; - if (IS_ERR(vma->pages)) { - ret = PTR_ERR(vma->pages); - vma->pages = NULL; - DRM_ERROR("Failed to get pages for VMA view type %u (%d)!\n", - vma->ggtt_view.type, ret); - } - return ret; -} - /** * i915_gem_gtt_reserve - reserve a node in an address_space (GTT) * @vm: the &struct i915_address_space @@ -3848,6 +293,5 @@ int i915_gem_gtt_insert(struct i915_address_space *vm, } #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) -#include "selftests/mock_gtt.c" #include "selftests/i915_gem_gtt.c" #endif diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index 31a4a96ddd0d..f6226df9f972 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h @@ -1,639 +1,21 @@ +/* SPDX-License-Identifier: MIT */ /* - * Copyright © 2014 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - * - * Please try to maintain the following order within this file unless it makes - * sense to do otherwise. From top to bottom: - * 1. typedefs - * 2. #defines, and macros - * 3. structure definitions - * 4. function prototypes - * - * Within each section, please try to order by generation in ascending order, - * from top to bottom (ie. gen6 on the top, gen8 on the bottom). + * Copyright © 2020 Intel Corporation */ #ifndef __I915_GEM_GTT_H__ #define __I915_GEM_GTT_H__ #include <linux/io-mapping.h> -#include <linux/kref.h> -#include <linux/mm.h> -#include <linux/pagevec.h> -#include <linux/workqueue.h> +#include <linux/types.h> #include <drm/drm_mm.h> -#include "gt/intel_reset.h" -#include "i915_gem_fence_reg.h" -#include "i915_request.h" +#include "gt/intel_gtt.h" #include "i915_scatterlist.h" -#include "i915_selftest.h" -#include "gt/intel_timeline.h" -#define I915_GTT_PAGE_SIZE_4K BIT_ULL(12) -#define I915_GTT_PAGE_SIZE_64K BIT_ULL(16) -#define I915_GTT_PAGE_SIZE_2M BIT_ULL(21) - -#define I915_GTT_PAGE_SIZE I915_GTT_PAGE_SIZE_4K -#define I915_GTT_MAX_PAGE_SIZE I915_GTT_PAGE_SIZE_2M - -#define I915_GTT_PAGE_MASK -I915_GTT_PAGE_SIZE - -#define I915_GTT_MIN_ALIGNMENT I915_GTT_PAGE_SIZE - -#define I915_FENCE_REG_NONE -1 -#define I915_MAX_NUM_FENCES 32 -/* 32 fences + sign bit for FENCE_REG_NONE */ -#define I915_MAX_NUM_FENCE_BITS 6 - -struct drm_i915_file_private; struct drm_i915_gem_object; -struct i915_vma; -struct intel_gt; - -typedef u32 gen6_pte_t; -typedef u64 gen8_pte_t; - -#define ggtt_total_entries(ggtt) ((ggtt)->vm.total >> PAGE_SHIFT) - -/* gen6-hsw has bit 11-4 for physical addr bit 39-32 */ -#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0)) -#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr) -#define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr) -#define GEN6_PTE_CACHE_LLC (2 << 1) -#define GEN6_PTE_UNCACHED (1 << 1) -#define GEN6_PTE_VALID (1 << 0) - -#define I915_PTES(pte_len) ((unsigned int)(PAGE_SIZE / (pte_len))) -#define I915_PTE_MASK(pte_len) (I915_PTES(pte_len) - 1) -#define I915_PDES 512 -#define I915_PDE_MASK (I915_PDES - 1) -#define NUM_PTE(pde_shift) (1 << (pde_shift - PAGE_SHIFT)) - -#define GEN6_PTES I915_PTES(sizeof(gen6_pte_t)) -#define GEN6_PD_SIZE (I915_PDES * PAGE_SIZE) -#define GEN6_PD_ALIGN (PAGE_SIZE * 16) -#define GEN6_PDE_SHIFT 22 -#define GEN6_PDE_VALID (1 << 0) - -#define GEN7_PTE_CACHE_L3_LLC (3 << 1) - -#define BYT_PTE_SNOOPED_BY_CPU_CACHES (1 << 2) -#define BYT_PTE_WRITEABLE (1 << 1) - -/* Cacheability Control is a 4-bit value. The low three bits are stored in bits - * 3:1 of the PTE, while the fourth bit is stored in bit 11 of the PTE. - */ -#define HSW_CACHEABILITY_CONTROL(bits) ((((bits) & 0x7) << 1) | \ - (((bits) & 0x8) << (11 - 3))) -#define HSW_WB_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x2) -#define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3) -#define HSW_WB_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x8) -#define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb) -#define HSW_WT_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x7) -#define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6) -#define HSW_PTE_UNCACHED (0) -#define HSW_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0x7f0)) -#define HSW_PTE_ADDR_ENCODE(addr) HSW_GTT_ADDR_ENCODE(addr) - -/* - * GEN8 32b style address is defined as a 3 level page table: - * 31:30 | 29:21 | 20:12 | 11:0 - * PDPE | PDE | PTE | offset - * The difference as compared to normal x86 3 level page table is the PDPEs are - * programmed via register. - * - * GEN8 48b style address is defined as a 4 level page table: - * 47:39 | 38:30 | 29:21 | 20:12 | 11:0 - * PML4E | PDPE | PDE | PTE | offset - */ -#define GEN8_3LVL_PDPES 4 - -#define PPAT_UNCACHED (_PAGE_PWT | _PAGE_PCD) -#define PPAT_CACHED_PDE 0 /* WB LLC */ -#define PPAT_CACHED _PAGE_PAT /* WB LLCeLLC */ -#define PPAT_DISPLAY_ELLC _PAGE_PCD /* WT eLLC */ - -#define CHV_PPAT_SNOOP (1<<6) -#define GEN8_PPAT_AGE(x) ((x)<<4) -#define GEN8_PPAT_LLCeLLC (3<<2) -#define GEN8_PPAT_LLCELLC (2<<2) -#define GEN8_PPAT_LLC (1<<2) -#define GEN8_PPAT_WB (3<<0) -#define GEN8_PPAT_WT (2<<0) -#define GEN8_PPAT_WC (1<<0) -#define GEN8_PPAT_UC (0<<0) -#define GEN8_PPAT_ELLC_OVERRIDE (0<<2) -#define GEN8_PPAT(i, x) ((u64)(x) << ((i) * 8)) - -#define GEN8_PDE_IPS_64K BIT(11) -#define GEN8_PDE_PS_2M BIT(7) - -#define for_each_sgt_daddr(__dp, __iter, __sgt) \ - __for_each_sgt_daddr(__dp, __iter, __sgt, I915_GTT_PAGE_SIZE) - -struct intel_remapped_plane_info { - /* in gtt pages */ - unsigned int width, height, stride, offset; -} __packed; - -struct intel_remapped_info { - struct intel_remapped_plane_info plane[2]; - unsigned int unused_mbz; -} __packed; - -struct intel_rotation_info { - struct intel_remapped_plane_info plane[2]; -} __packed; - -struct intel_partial_info { - u64 offset; - unsigned int size; -} __packed; - -enum i915_ggtt_view_type { - I915_GGTT_VIEW_NORMAL = 0, - I915_GGTT_VIEW_ROTATED = sizeof(struct intel_rotation_info), - I915_GGTT_VIEW_PARTIAL = sizeof(struct intel_partial_info), - I915_GGTT_VIEW_REMAPPED = sizeof(struct intel_remapped_info), -}; - -static inline void assert_i915_gem_gtt_types(void) -{ - BUILD_BUG_ON(sizeof(struct intel_rotation_info) != 8*sizeof(unsigned int)); - BUILD_BUG_ON(sizeof(struct intel_partial_info) != sizeof(u64) + sizeof(unsigned int)); - BUILD_BUG_ON(sizeof(struct intel_remapped_info) != 9*sizeof(unsigned int)); - - /* Check that rotation/remapped shares offsets for simplicity */ - BUILD_BUG_ON(offsetof(struct intel_remapped_info, plane[0]) != - offsetof(struct intel_rotation_info, plane[0])); - BUILD_BUG_ON(offsetofend(struct intel_remapped_info, plane[1]) != - offsetofend(struct intel_rotation_info, plane[1])); - - /* As we encode the size of each branch inside the union into its type, - * we have to be careful that each branch has a unique size. - */ - switch ((enum i915_ggtt_view_type)0) { - case I915_GGTT_VIEW_NORMAL: - case I915_GGTT_VIEW_PARTIAL: - case I915_GGTT_VIEW_ROTATED: - case I915_GGTT_VIEW_REMAPPED: - /* gcc complains if these are identical cases */ - break; - } -} - -struct i915_ggtt_view { - enum i915_ggtt_view_type type; - union { - /* Members need to contain no holes/padding */ - struct intel_partial_info partial; - struct intel_rotation_info rotated; - struct intel_remapped_info remapped; - }; -}; - -enum i915_cache_level; - -struct i915_vma; - -struct i915_page_dma { - struct page *page; - union { - dma_addr_t daddr; - - /* For gen6/gen7 only. This is the offset in the GGTT - * where the page directory entries for PPGTT begin - */ - u32 ggtt_offset; - }; -}; - -struct i915_page_scratch { - struct i915_page_dma base; - u64 encode; -}; - -struct i915_page_table { - struct i915_page_dma base; - atomic_t used; -}; - -struct i915_page_directory { - struct i915_page_table pt; - spinlock_t lock; - void *entry[512]; -}; - -#define __px_choose_expr(x, type, expr, other) \ - __builtin_choose_expr( \ - __builtin_types_compatible_p(typeof(x), type) || \ - __builtin_types_compatible_p(typeof(x), const type), \ - ({ type __x = (type)(x); expr; }), \ - other) - -#define px_base(px) \ - __px_choose_expr(px, struct i915_page_dma *, __x, \ - __px_choose_expr(px, struct i915_page_scratch *, &__x->base, \ - __px_choose_expr(px, struct i915_page_table *, &__x->base, \ - __px_choose_expr(px, struct i915_page_directory *, &__x->pt.base, \ - (void)0)))) -#define px_dma(px) (px_base(px)->daddr) - -#define px_pt(px) \ - __px_choose_expr(px, struct i915_page_table *, __x, \ - __px_choose_expr(px, struct i915_page_directory *, &__x->pt, \ - (void)0)) -#define px_used(px) (&px_pt(px)->used) - -struct i915_vma_ops { - /* Map an object into an address space with the given cache flags. */ - int (*bind_vma)(struct i915_vma *vma, - enum i915_cache_level cache_level, - u32 flags); - /* - * Unmap an object from an address space. This usually consists of - * setting the valid PTE entries to a reserved scratch page. - */ - void (*unbind_vma)(struct i915_vma *vma); - - int (*set_pages)(struct i915_vma *vma); - void (*clear_pages)(struct i915_vma *vma); -}; - -struct pagestash { - spinlock_t lock; - struct pagevec pvec; -}; - -struct i915_address_space { - struct kref ref; - struct rcu_work rcu; - - struct drm_mm mm; - struct intel_gt *gt; - struct drm_i915_private *i915; - struct device *dma; - /* Every address space belongs to a struct file - except for the global - * GTT that is owned by the driver (and so @file is set to NULL). In - * principle, no information should leak from one context to another - * (or between files/processes etc) unless explicitly shared by the - * owner. Tracking the owner is important in order to free up per-file - * objects along with the file, to aide resource tracking, and to - * assign blame. - */ - struct drm_i915_file_private *file; - u64 total; /* size addr space maps (ex. 2GB for ggtt) */ - u64 reserved; /* size addr space reserved */ - - unsigned int bind_async_flags; - - /* - * Each active user context has its own address space (in full-ppgtt). - * Since the vm may be shared between multiple contexts, we count how - * many contexts keep us "open". Once open hits zero, we are closed - * and do not allow any new attachments, and proceed to shutdown our - * vma and page directories. - */ - atomic_t open; - - struct mutex mutex; /* protects vma and our lists */ -#define VM_CLASS_GGTT 0 -#define VM_CLASS_PPGTT 1 - - struct i915_page_scratch scratch[4]; - unsigned int scratch_order; - unsigned int top; - - /** - * List of vma currently bound. - */ - struct list_head bound_list; - - struct pagestash free_pages; - - /* Global GTT */ - bool is_ggtt:1; - - /* Some systems require uncached updates of the page directories */ - bool pt_kmap_wc:1; - - /* Some systems support read-only mappings for GGTT and/or PPGTT */ - bool has_read_only:1; - - u64 (*pte_encode)(dma_addr_t addr, - enum i915_cache_level level, - u32 flags); /* Create a valid PTE */ -#define PTE_READ_ONLY (1<<0) - - int (*allocate_va_range)(struct i915_address_space *vm, - u64 start, u64 length); - void (*clear_range)(struct i915_address_space *vm, - u64 start, u64 length); - void (*insert_page)(struct i915_address_space *vm, - dma_addr_t addr, - u64 offset, - enum i915_cache_level cache_level, - u32 flags); - void (*insert_entries)(struct i915_address_space *vm, - struct i915_vma *vma, - enum i915_cache_level cache_level, - u32 flags); - void (*cleanup)(struct i915_address_space *vm); - - struct i915_vma_ops vma_ops; - - I915_SELFTEST_DECLARE(struct fault_attr fault_attr); - I915_SELFTEST_DECLARE(bool scrub_64K); -}; - -#define i915_is_ggtt(vm) ((vm)->is_ggtt) - -static inline bool -i915_vm_is_4lvl(const struct i915_address_space *vm) -{ - return (vm->total - 1) >> 32; -} - -static inline bool -i915_vm_has_scratch_64K(struct i915_address_space *vm) -{ - return vm->scratch_order == get_order(I915_GTT_PAGE_SIZE_64K); -} - -static inline bool -i915_vm_has_cache_coloring(struct i915_address_space *vm) -{ - return i915_is_ggtt(vm) && vm->mm.color_adjust; -} - -/* The Graphics Translation Table is the way in which GEN hardware translates a - * Graphics Virtual Address into a Physical Address. In addition to the normal - * collateral associated with any va->pa translations GEN hardware also has a - * portion of the GTT which can be mapped by the CPU and remain both coherent - * and correct (in cases like swizzling). That region is referred to as GMADR in - * the spec. - */ -struct i915_ggtt { - struct i915_address_space vm; - - struct io_mapping iomap; /* Mapping to our CPU mappable region */ - struct resource gmadr; /* GMADR resource */ - resource_size_t mappable_end; /* End offset that we can CPU map */ - - /** "Graphics Stolen Memory" holds the global PTEs */ - void __iomem *gsm; - void (*invalidate)(struct i915_ggtt *ggtt); - - /** PPGTT used for aliasing the PPGTT with the GTT */ - struct i915_ppgtt *alias; - - bool do_idle_maps; - - int mtrr; - - /** Bit 6 swizzling required for X tiling */ - u32 bit_6_swizzle_x; - /** Bit 6 swizzling required for Y tiling */ - u32 bit_6_swizzle_y; - - u32 pin_bias; - - unsigned int num_fences; - struct i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; - struct list_head fence_list; - - /** List of all objects in gtt_space, currently mmaped by userspace. - * All objects within this list must also be on bound_list. - */ - struct list_head userfault_list; - - /* Manual runtime pm autosuspend delay for user GGTT mmaps */ - struct intel_wakeref_auto userfault_wakeref; - - struct drm_mm_node error_capture; - struct drm_mm_node uc_fw; -}; - -struct i915_ppgtt { - struct i915_address_space vm; - - struct i915_page_directory *pd; -}; - -struct gen6_ppgtt { - struct i915_ppgtt base; - - struct mutex flush; - struct i915_vma *vma; - gen6_pte_t __iomem *pd_addr; - - atomic_t pin_count; - struct mutex pin_mutex; - - bool scan_for_unused_pt; -}; - -#define __to_gen6_ppgtt(base) container_of(base, struct gen6_ppgtt, base) - -static inline struct gen6_ppgtt *to_gen6_ppgtt(struct i915_ppgtt *base) -{ - BUILD_BUG_ON(offsetof(struct gen6_ppgtt, base)); - return __to_gen6_ppgtt(base); -} - -/* - * gen6_for_each_pde() iterates over every pde from start until start+length. - * If start and start+length are not perfectly divisible, the macro will round - * down and up as needed. Start=0 and length=2G effectively iterates over - * every PDE in the system. The macro modifies ALL its parameters except 'pd', - * so each of the other parameters should preferably be a simple variable, or - * at most an lvalue with no side-effects! - */ -#define gen6_for_each_pde(pt, pd, start, length, iter) \ - for (iter = gen6_pde_index(start); \ - length > 0 && iter < I915_PDES && \ - (pt = i915_pt_entry(pd, iter), true); \ - ({ u32 temp = ALIGN(start+1, 1 << GEN6_PDE_SHIFT); \ - temp = min(temp - start, length); \ - start += temp, length -= temp; }), ++iter) - -#define gen6_for_all_pdes(pt, pd, iter) \ - for (iter = 0; \ - iter < I915_PDES && \ - (pt = i915_pt_entry(pd, iter), true); \ - ++iter) - -static inline u32 i915_pte_index(u64 address, unsigned int pde_shift) -{ - const u32 mask = NUM_PTE(pde_shift) - 1; - - return (address >> PAGE_SHIFT) & mask; -} - -/* Helper to counts the number of PTEs within the given length. This count - * does not cross a page table boundary, so the max value would be - * GEN6_PTES for GEN6, and GEN8_PTES for GEN8. -*/ -static inline u32 i915_pte_count(u64 addr, u64 length, unsigned int pde_shift) -{ - const u64 mask = ~((1ULL << pde_shift) - 1); - u64 end; - - GEM_BUG_ON(length == 0); - GEM_BUG_ON(offset_in_page(addr | length)); - - end = addr + length; - - if ((addr & mask) != (end & mask)) - return NUM_PTE(pde_shift) - i915_pte_index(addr, pde_shift); - - return i915_pte_index(end, pde_shift) - i915_pte_index(addr, pde_shift); -} - -static inline u32 i915_pde_index(u64 addr, u32 shift) -{ - return (addr >> shift) & I915_PDE_MASK; -} - -static inline u32 gen6_pte_index(u32 addr) -{ - return i915_pte_index(addr, GEN6_PDE_SHIFT); -} - -static inline u32 gen6_pte_count(u32 addr, u32 length) -{ - return i915_pte_count(addr, length, GEN6_PDE_SHIFT); -} - -static inline u32 gen6_pde_index(u32 addr) -{ - return i915_pde_index(addr, GEN6_PDE_SHIFT); -} - -static inline struct i915_page_table * -i915_pt_entry(const struct i915_page_directory * const pd, - const unsigned short n) -{ - return pd->entry[n]; -} - -static inline struct i915_page_directory * -i915_pd_entry(const struct i915_page_directory * const pdp, - const unsigned short n) -{ - return pdp->entry[n]; -} - -static inline dma_addr_t -i915_page_dir_dma_addr(const struct i915_ppgtt *ppgtt, const unsigned int n) -{ - struct i915_page_dma *pt = ppgtt->pd->entry[n]; - - return px_dma(pt ?: px_base(&ppgtt->vm.scratch[ppgtt->vm.top])); -} - -static inline struct i915_ggtt * -i915_vm_to_ggtt(struct i915_address_space *vm) -{ - BUILD_BUG_ON(offsetof(struct i915_ggtt, vm)); - GEM_BUG_ON(!i915_is_ggtt(vm)); - return container_of(vm, struct i915_ggtt, vm); -} - -static inline struct i915_ppgtt * -i915_vm_to_ppgtt(struct i915_address_space *vm) -{ - BUILD_BUG_ON(offsetof(struct i915_ppgtt, vm)); - GEM_BUG_ON(i915_is_ggtt(vm)); - return container_of(vm, struct i915_ppgtt, vm); -} - -int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv); -int i915_ggtt_init_hw(struct drm_i915_private *dev_priv); -int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv); -void i915_ggtt_enable_guc(struct i915_ggtt *ggtt); -void i915_ggtt_disable_guc(struct i915_ggtt *ggtt); -int i915_init_ggtt(struct drm_i915_private *dev_priv); -void i915_ggtt_driver_release(struct drm_i915_private *dev_priv); - -static inline bool i915_ggtt_has_aperture(const struct i915_ggtt *ggtt) -{ - return ggtt->mappable_end > 0; -} - -int i915_ppgtt_init_hw(struct intel_gt *gt); - -struct i915_ppgtt *i915_ppgtt_create(struct drm_i915_private *dev_priv); - -static inline struct i915_address_space * -i915_vm_get(struct i915_address_space *vm) -{ - kref_get(&vm->ref); - return vm; -} - -void i915_vm_release(struct kref *kref); - -static inline void i915_vm_put(struct i915_address_space *vm) -{ - kref_put(&vm->ref, i915_vm_release); -} - -static inline struct i915_address_space * -i915_vm_open(struct i915_address_space *vm) -{ - GEM_BUG_ON(!atomic_read(&vm->open)); - atomic_inc(&vm->open); - return i915_vm_get(vm); -} - -static inline bool -i915_vm_tryopen(struct i915_address_space *vm) -{ - if (atomic_add_unless(&vm->open, 1, 0)) - return i915_vm_get(vm); - - return false; -} - -void __i915_vm_close(struct i915_address_space *vm); - -static inline void -i915_vm_close(struct i915_address_space *vm) -{ - GEM_BUG_ON(!atomic_read(&vm->open)); - if (atomic_dec_and_test(&vm->open)) - __i915_vm_close(vm); - - i915_vm_put(vm); -} - -int gen6_ppgtt_pin(struct i915_ppgtt *base); -void gen6_ppgtt_unpin(struct i915_ppgtt *base); -void gen6_ppgtt_unpin_all(struct i915_ppgtt *base); - -void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv); -void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv); +struct i915_address_space; int __must_check i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj, struct sg_table *pages); @@ -664,6 +46,6 @@ int i915_gem_gtt_insert(struct i915_address_space *vm, #define PIN_GLOBAL BIT_ULL(10) /* I915_VMA_GLOBAL_BIND */ #define PIN_USER BIT_ULL(11) /* I915_VMA_LOCAL_BIND */ -#define PIN_OFFSET_MASK (-I915_GTT_PAGE_SIZE) +#define PIN_OFFSET_MASK I915_GTT_PAGE_MASK #endif diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index fda0977d2059..4c1836f0a991 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -41,6 +41,7 @@ #include "gem/i915_gem_context.h" #include "gem/i915_gem_lmem.h" +#include "gt/intel_gt_pm.h" #include "i915_drv.h" #include "i915_gpu_error.h" @@ -232,14 +233,13 @@ static void pool_free(struct pagevec *pv, void *addr) #ifdef CONFIG_DRM_I915_COMPRESS_ERROR -struct compress { +struct i915_vma_compress { struct pagevec pool; struct z_stream_s zstream; void *tmp; - bool wc; }; -static bool compress_init(struct compress *c) +static bool compress_init(struct i915_vma_compress *c) { struct z_stream_s *zstream = &c->zstream; @@ -261,7 +261,7 @@ static bool compress_init(struct compress *c) return true; } -static bool compress_start(struct compress *c) +static bool compress_start(struct i915_vma_compress *c) { struct z_stream_s *zstream = &c->zstream; void *workspace = zstream->workspace; @@ -272,8 +272,8 @@ static bool compress_start(struct compress *c) return zlib_deflateInit(zstream, Z_DEFAULT_COMPRESSION) == Z_OK; } -static void *compress_next_page(struct compress *c, - struct drm_i915_error_object *dst) +static void *compress_next_page(struct i915_vma_compress *c, + struct i915_vma_coredump *dst) { void *page; @@ -287,14 +287,15 @@ static void *compress_next_page(struct compress *c, return dst->pages[dst->page_count++] = page; } -static int compress_page(struct compress *c, +static int compress_page(struct i915_vma_compress *c, void *src, - struct drm_i915_error_object *dst) + struct i915_vma_coredump *dst, + bool wc) { struct z_stream_s *zstream = &c->zstream; zstream->next_in = src; - if (c->wc && c->tmp && i915_memcpy_from_wc(c->tmp, src, PAGE_SIZE)) + if (wc && c->tmp && i915_memcpy_from_wc(c->tmp, src, PAGE_SIZE)) zstream->next_in = c->tmp; zstream->avail_in = PAGE_SIZE; @@ -318,8 +319,8 @@ static int compress_page(struct compress *c, return 0; } -static int compress_flush(struct compress *c, - struct drm_i915_error_object *dst) +static int compress_flush(struct i915_vma_compress *c, + struct i915_vma_coredump *dst) { struct z_stream_s *zstream = &c->zstream; @@ -347,12 +348,12 @@ end: return 0; } -static void compress_finish(struct compress *c) +static void compress_finish(struct i915_vma_compress *c) { zlib_deflateEnd(&c->zstream); } -static void compress_fini(struct compress *c) +static void compress_fini(struct i915_vma_compress *c) { kfree(c->zstream.workspace); if (c->tmp) @@ -367,24 +368,24 @@ static void err_compression_marker(struct drm_i915_error_state_buf *m) #else -struct compress { +struct i915_vma_compress { struct pagevec pool; - bool wc; }; -static bool compress_init(struct compress *c) +static bool compress_init(struct i915_vma_compress *c) { return pool_init(&c->pool, ALLOW_FAIL) == 0; } -static bool compress_start(struct compress *c) +static bool compress_start(struct i915_vma_compress *c) { return true; } -static int compress_page(struct compress *c, +static int compress_page(struct i915_vma_compress *c, void *src, - struct drm_i915_error_object *dst) + struct i915_vma_coredump *dst, + bool wc) { void *ptr; @@ -392,24 +393,24 @@ static int compress_page(struct compress *c, if (!ptr) return -ENOMEM; - if (!(c->wc && i915_memcpy_from_wc(ptr, src, PAGE_SIZE))) + if (!(wc && i915_memcpy_from_wc(ptr, src, PAGE_SIZE))) memcpy(ptr, src, PAGE_SIZE); dst->pages[dst->page_count++] = ptr; return 0; } -static int compress_flush(struct compress *c, - struct drm_i915_error_object *dst) +static int compress_flush(struct i915_vma_compress *c, + struct i915_vma_coredump *dst) { return 0; } -static void compress_finish(struct compress *c) +static void compress_finish(struct i915_vma_compress *c) { } -static void compress_fini(struct compress *c) +static void compress_fini(struct i915_vma_compress *c) { pool_fini(&c->pool); } @@ -422,7 +423,7 @@ static void err_compression_marker(struct drm_i915_error_state_buf *m) #endif static void error_print_instdone(struct drm_i915_error_state_buf *m, - const struct drm_i915_error_engine *ee) + const struct intel_engine_coredump *ee) { const struct sseu_dev_info *sseu = &RUNTIME_INFO(m->i915)->sseu; int slice; @@ -453,40 +454,56 @@ static void error_print_instdone(struct drm_i915_error_state_buf *m, static void error_print_request(struct drm_i915_error_state_buf *m, const char *prefix, - const struct drm_i915_error_request *erq, - const unsigned long epoch) + const struct i915_request_coredump *erq) { if (!erq->seqno) return; - err_printf(m, "%s pid %d, seqno %8x:%08x%s%s, prio %d, emitted %dms, start %08x, head %08x, tail %08x\n", + err_printf(m, "%s pid %d, seqno %8x:%08x%s%s, prio %d, start %08x, head %08x, tail %08x\n", prefix, erq->pid, erq->context, erq->seqno, test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &erq->flags) ? "!" : "", test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &erq->flags) ? "+" : "", erq->sched_attr.priority, - jiffies_to_msecs(erq->jiffies - epoch), erq->start, erq->head, erq->tail); } static void error_print_context(struct drm_i915_error_state_buf *m, const char *header, - const struct drm_i915_error_context *ctx) + const struct i915_gem_context_coredump *ctx) { err_printf(m, "%s%s[%d] prio %d, guilty %d active %d\n", header, ctx->comm, ctx->pid, ctx->sched_attr.priority, ctx->guilty, ctx->active); } +static struct i915_vma_coredump * +__find_vma(struct i915_vma_coredump *vma, const char *name) +{ + while (vma) { + if (strcmp(vma->name, name) == 0) + return vma; + vma = vma->next; + } + + return NULL; +} + +static struct i915_vma_coredump * +find_batch(const struct intel_engine_coredump *ee) +{ + return __find_vma(ee->vma, "batch"); +} + static void error_print_engine(struct drm_i915_error_state_buf *m, - const struct drm_i915_error_engine *ee, - const unsigned long epoch) + const struct intel_engine_coredump *ee) { + struct i915_vma_coredump *batch; int n; err_printf(m, "%s command stream:\n", ee->engine->name); - err_printf(m, " IDLE?: %s\n", yesno(ee->idle)); + err_printf(m, " CCID: 0x%08x\n", ee->ccid); err_printf(m, " START: 0x%08x\n", ee->start); err_printf(m, " HEAD: 0x%08x [0x%08x]\n", ee->head, ee->rq_head); err_printf(m, " TAIL: 0x%08x [0x%08x, 0x%08x]\n", @@ -501,9 +518,10 @@ static void error_print_engine(struct drm_i915_error_state_buf *m, error_print_instdone(m, ee); - if (ee->batchbuffer) { - u64 start = ee->batchbuffer->gtt_offset; - u64 end = start + ee->batchbuffer->gtt_size; + batch = find_batch(ee); + if (batch) { + u64 start = batch->gtt_offset; + u64 end = start + batch->gtt_size; err_printf(m, " batch: [0x%08x_%08x, 0x%08x_%08x]\n", upper_32_bits(start), lower_32_bits(start), @@ -535,13 +553,11 @@ static void error_print_engine(struct drm_i915_error_state_buf *m, ee->vm_info.pp_dir_base); } } - err_printf(m, " ring->head: 0x%08x\n", ee->cpu_ring_head); - err_printf(m, " ring->tail: 0x%08x\n", ee->cpu_ring_tail); err_printf(m, " engine reset count: %u\n", ee->reset_count); for (n = 0; n < ee->num_ports; n++) { err_printf(m, " ELSP[%d]:", n); - error_print_request(m, " ", &ee->execlist[n], epoch); + error_print_request(m, " ", &ee->execlist[n]); } error_print_context(m, " Active context: ", &ee->context); @@ -556,38 +572,35 @@ void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...) va_end(args); } -static void print_error_obj(struct drm_i915_error_state_buf *m, +static void print_error_vma(struct drm_i915_error_state_buf *m, const struct intel_engine_cs *engine, - const char *name, - const struct drm_i915_error_object *obj) + const struct i915_vma_coredump *vma) { char out[ASCII85_BUFSZ]; int page; - if (!obj) + if (!vma) return; - if (name) { - err_printf(m, "%s --- %s = 0x%08x %08x\n", - engine ? engine->name : "global", name, - upper_32_bits(obj->gtt_offset), - lower_32_bits(obj->gtt_offset)); - } + err_printf(m, "%s --- %s = 0x%08x %08x\n", + engine ? engine->name : "global", vma->name, + upper_32_bits(vma->gtt_offset), + lower_32_bits(vma->gtt_offset)); - if (obj->gtt_page_sizes > I915_GTT_PAGE_SIZE_4K) - err_printf(m, "gtt_page_sizes = 0x%08x\n", obj->gtt_page_sizes); + if (vma->gtt_page_sizes > I915_GTT_PAGE_SIZE_4K) + err_printf(m, "gtt_page_sizes = 0x%08x\n", vma->gtt_page_sizes); err_compression_marker(m); - for (page = 0; page < obj->page_count; page++) { + for (page = 0; page < vma->page_count; page++) { int i, len; len = PAGE_SIZE; - if (page == obj->page_count - 1) - len -= obj->unused; + if (page == vma->page_count - 1) + len -= vma->unused; len = ascii85_encode_len(len); for (i = 0; i < len; i++) - err_puts(m, ascii85_encode(obj->pages[page][i], out)); + err_puts(m, ascii85_encode(vma->pages[page][i], out)); } err_puts(m, "\n"); } @@ -626,18 +639,13 @@ static void err_print_pciid(struct drm_i915_error_state_buf *m, } static void err_print_uc(struct drm_i915_error_state_buf *m, - const struct i915_error_uc *error_uc) + const struct intel_uc_coredump *error_uc) { struct drm_printer p = i915_error_printer(m); - const struct i915_gpu_state *error = - container_of(error_uc, typeof(*error), uc); - - if (!error->device_info.has_gt_uc) - return; intel_uc_fw_dump(&error_uc->guc_fw, &p); intel_uc_fw_dump(&error_uc->huc_fw, &p); - print_error_obj(m, NULL, "GuC log buffer", error_uc->guc_log); + print_error_vma(m, NULL, error_uc->guc_log); } static void err_free_sgl(struct scatterlist *sgl) @@ -657,12 +665,69 @@ static void err_free_sgl(struct scatterlist *sgl) } } +static void err_print_gt(struct drm_i915_error_state_buf *m, + struct intel_gt_coredump *gt) +{ + const struct intel_engine_coredump *ee; + int i; + + err_printf(m, "GT awake: %s\n", yesno(gt->awake)); + err_printf(m, "EIR: 0x%08x\n", gt->eir); + err_printf(m, "IER: 0x%08x\n", gt->ier); + for (i = 0; i < gt->ngtier; i++) + err_printf(m, "GTIER[%d]: 0x%08x\n", i, gt->gtier[i]); + err_printf(m, "PGTBL_ER: 0x%08x\n", gt->pgtbl_er); + err_printf(m, "FORCEWAKE: 0x%08x\n", gt->forcewake); + err_printf(m, "DERRMR: 0x%08x\n", gt->derrmr); + + for (i = 0; i < gt->nfence; i++) + err_printf(m, " fence[%d] = %08llx\n", i, gt->fence[i]); + + if (IS_GEN_RANGE(m->i915, 6, 11)) { + err_printf(m, "ERROR: 0x%08x\n", gt->error); + err_printf(m, "DONE_REG: 0x%08x\n", gt->done_reg); + } + + if (INTEL_GEN(m->i915) >= 8) + err_printf(m, "FAULT_TLB_DATA: 0x%08x 0x%08x\n", + gt->fault_data1, gt->fault_data0); + + if (IS_GEN(m->i915, 7)) + err_printf(m, "ERR_INT: 0x%08x\n", gt->err_int); + + if (IS_GEN_RANGE(m->i915, 8, 11)) + err_printf(m, "GTT_CACHE_EN: 0x%08x\n", gt->gtt_cache); + + if (IS_GEN(m->i915, 12)) + err_printf(m, "AUX_ERR_DBG: 0x%08x\n", gt->aux_err); + + if (INTEL_GEN(m->i915) >= 12) { + int i; + + for (i = 0; i < GEN12_SFC_DONE_MAX; i++) + err_printf(m, " SFC_DONE[%d]: 0x%08x\n", i, + gt->sfc_done[i]); + + err_printf(m, " GAM_DONE: 0x%08x\n", gt->gam_done); + } + + for (ee = gt->engine; ee; ee = ee->next) { + const struct i915_vma_coredump *vma; + + error_print_engine(m, ee); + for (vma = ee->vma; vma; vma = vma->next) + print_error_vma(m, ee->engine, vma); + } + + if (gt->uc) + err_print_uc(m, gt->uc); +} + static void __err_print_to_sgl(struct drm_i915_error_state_buf *m, - struct i915_gpu_state *error) + struct i915_gpu_coredump *error) { - const struct drm_i915_error_engine *ee; + const struct intel_engine_coredump *ee; struct timespec64 ts; - int i, j; if (*error->error_msg) err_printf(m, "%s\n", error->error_msg); @@ -682,7 +747,7 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m, err_printf(m, "Capture: %lu jiffies; %d ms ago\n", error->capture, jiffies_to_msecs(jiffies - error->capture)); - for (ee = error->engine; ee; ee = ee->next) + for (ee = error->gt ? error->gt->engine : NULL; ee; ee = ee->next) err_printf(m, "Active process (on ring %s): %s [%d]\n", ee->engine->name, ee->context.comm, @@ -708,90 +773,11 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m, CSR_VERSION_MINOR(csr->version)); } - err_printf(m, "GT awake: %s\n", yesno(error->awake)); err_printf(m, "RPM wakelock: %s\n", yesno(error->wakelock)); err_printf(m, "PM suspended: %s\n", yesno(error->suspended)); - err_printf(m, "EIR: 0x%08x\n", error->eir); - err_printf(m, "IER: 0x%08x\n", error->ier); - for (i = 0; i < error->ngtier; i++) - err_printf(m, "GTIER[%d]: 0x%08x\n", i, error->gtier[i]); - err_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er); - err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake); - err_printf(m, "DERRMR: 0x%08x\n", error->derrmr); - err_printf(m, "CCID: 0x%08x\n", error->ccid); - - for (i = 0; i < error->nfence; i++) - err_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]); - - if (IS_GEN_RANGE(m->i915, 6, 11)) { - err_printf(m, "ERROR: 0x%08x\n", error->error); - err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg); - } - if (INTEL_GEN(m->i915) >= 8) - err_printf(m, "FAULT_TLB_DATA: 0x%08x 0x%08x\n", - error->fault_data1, error->fault_data0); - - if (IS_GEN(m->i915, 7)) - err_printf(m, "ERR_INT: 0x%08x\n", error->err_int); - - if (IS_GEN_RANGE(m->i915, 8, 11)) - err_printf(m, "GTT_CACHE_EN: 0x%08x\n", error->gtt_cache); - - if (IS_GEN(m->i915, 12)) - err_printf(m, "AUX_ERR_DBG: 0x%08x\n", error->aux_err); - - if (INTEL_GEN(m->i915) >= 12) { - int i; - - for (i = 0; i < GEN12_SFC_DONE_MAX; i++) - err_printf(m, " SFC_DONE[%d]: 0x%08x\n", i, - error->sfc_done[i]); - - err_printf(m, " GAM_DONE: 0x%08x\n", error->gam_done); - } - - for (ee = error->engine; ee; ee = ee->next) - error_print_engine(m, ee, error->capture); - - for (ee = error->engine; ee; ee = ee->next) { - const struct drm_i915_error_object *obj; - - obj = ee->batchbuffer; - if (obj) { - err_puts(m, ee->engine->name); - if (ee->context.pid) - err_printf(m, " (submitted by %s [%d])", - ee->context.comm, - ee->context.pid); - err_printf(m, " --- gtt_offset = 0x%08x %08x\n", - upper_32_bits(obj->gtt_offset), - lower_32_bits(obj->gtt_offset)); - print_error_obj(m, ee->engine, NULL, obj); - } - - for (j = 0; j < ee->user_bo_count; j++) - print_error_obj(m, ee->engine, "user", ee->user_bo[j]); - - if (ee->num_requests) { - err_printf(m, "%s --- %d requests\n", - ee->engine->name, - ee->num_requests); - for (j = 0; j < ee->num_requests; j++) - error_print_request(m, " ", - &ee->requests[j], - error->capture); - } - - print_error_obj(m, ee->engine, "ringbuffer", ee->ringbuffer); - print_error_obj(m, ee->engine, "HW Status", ee->hws_page); - print_error_obj(m, ee->engine, "HW context", ee->ctx); - print_error_obj(m, ee->engine, "WA context", ee->wa_ctx); - print_error_obj(m, ee->engine, - "WA batchbuffer", ee->wa_batchbuffer); - print_error_obj(m, ee->engine, - "NULL context", ee->default_state); - } + if (error->gt) + err_print_gt(m, error->gt); if (error->overlay) intel_overlay_print_error_state(m, error->overlay); @@ -802,10 +788,9 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m, err_print_capabilities(m, &error->device_info, &error->runtime_info, &error->driver_caps); err_print_params(m, &error->params); - err_print_uc(m, &error->uc); } -static int err_print_to_sgl(struct i915_gpu_state *error) +static int err_print_to_sgl(struct i915_gpu_coredump *error) { struct drm_i915_error_state_buf m; @@ -842,8 +827,8 @@ static int err_print_to_sgl(struct i915_gpu_state *error) return 0; } -ssize_t i915_gpu_state_copy_to_buffer(struct i915_gpu_state *error, - char *buf, loff_t off, size_t rem) +ssize_t i915_gpu_coredump_copy_to_buffer(struct i915_gpu_coredump *error, + char *buf, loff_t off, size_t rem) { struct scatterlist *sg; size_t count; @@ -906,85 +891,88 @@ ssize_t i915_gpu_state_copy_to_buffer(struct i915_gpu_state *error, return count; } -static void i915_error_object_free(struct drm_i915_error_object *obj) +static void i915_vma_coredump_free(struct i915_vma_coredump *vma) { - int page; + while (vma) { + struct i915_vma_coredump *next = vma->next; + int page; - if (obj == NULL) - return; + for (page = 0; page < vma->page_count; page++) + free_page((unsigned long)vma->pages[page]); - for (page = 0; page < obj->page_count; page++) - free_page((unsigned long)obj->pages[page]); - - kfree(obj); + kfree(vma); + vma = next; + } } - -static void cleanup_params(struct i915_gpu_state *error) +static void cleanup_params(struct i915_gpu_coredump *error) { i915_params_free(&error->params); } -static void cleanup_uc_state(struct i915_gpu_state *error) +static void cleanup_uc(struct intel_uc_coredump *uc) { - struct i915_error_uc *error_uc = &error->uc; + kfree(uc->guc_fw.path); + kfree(uc->huc_fw.path); + i915_vma_coredump_free(uc->guc_log); - kfree(error_uc->guc_fw.path); - kfree(error_uc->huc_fw.path); - i915_error_object_free(error_uc->guc_log); + kfree(uc); } -void __i915_gpu_state_free(struct kref *error_ref) +static void cleanup_gt(struct intel_gt_coredump *gt) { - struct i915_gpu_state *error = - container_of(error_ref, typeof(*error), ref); - long i; + while (gt->engine) { + struct intel_engine_coredump *ee = gt->engine; + + gt->engine = ee->next; + + i915_vma_coredump_free(ee->vma); + kfree(ee); + } - while (error->engine) { - struct drm_i915_error_engine *ee = error->engine; + if (gt->uc) + cleanup_uc(gt->uc); - error->engine = ee->next; + kfree(gt); +} - for (i = 0; i < ee->user_bo_count; i++) - i915_error_object_free(ee->user_bo[i]); - kfree(ee->user_bo); +void __i915_gpu_coredump_free(struct kref *error_ref) +{ + struct i915_gpu_coredump *error = + container_of(error_ref, typeof(*error), ref); - i915_error_object_free(ee->batchbuffer); - i915_error_object_free(ee->wa_batchbuffer); - i915_error_object_free(ee->ringbuffer); - i915_error_object_free(ee->hws_page); - i915_error_object_free(ee->ctx); - i915_error_object_free(ee->wa_ctx); + while (error->gt) { + struct intel_gt_coredump *gt = error->gt; - kfree(ee->requests); - kfree(ee); + error->gt = gt->next; + cleanup_gt(gt); } kfree(error->overlay); kfree(error->display); cleanup_params(error); - cleanup_uc_state(error); err_free_sgl(error->sgl); kfree(error); } -static struct drm_i915_error_object * -i915_error_object_create(struct drm_i915_private *i915, - struct i915_vma *vma, - struct compress *compress) +static struct i915_vma_coredump * +i915_vma_coredump_create(const struct intel_gt *gt, + const struct i915_vma *vma, + const char *name, + struct i915_vma_compress *compress) { - struct i915_ggtt *ggtt = &i915->ggtt; + struct i915_ggtt *ggtt = gt->ggtt; const u64 slot = ggtt->error_capture.start; - struct drm_i915_error_object *dst; + struct i915_vma_coredump *dst; unsigned long num_pages; struct sgt_iter iter; int ret; might_sleep(); - if (!vma || !vma->pages) + if (!vma || !vma->pages || !compress) return NULL; num_pages = min_t(u64, vma->size, vma->obj->base.size) >> PAGE_SHIFT; @@ -998,6 +986,9 @@ i915_error_object_create(struct drm_i915_private *i915, return NULL; } + strcpy(dst->name, name); + dst->next = NULL; + dst->gtt_offset = vma->node.start; dst->gtt_size = vma->node.size; dst->gtt_page_sizes = vma->page_sizes.gtt; @@ -1005,9 +996,6 @@ i915_error_object_create(struct drm_i915_private *i915, dst->page_count = 0; dst->unused = 0; - compress->wc = i915_gem_object_is_lmem(vma->obj) || - drm_mm_node_allocated(&ggtt->error_capture); - ret = -EINVAL; if (drm_mm_node_allocated(&ggtt->error_capture)) { void __iomem *s; @@ -1016,9 +1004,12 @@ i915_error_object_create(struct drm_i915_private *i915, for_each_sgt_daddr(dma, iter, vma->pages) { ggtt->vm.insert_page(&ggtt->vm, dma, slot, I915_CACHE_NONE, 0); + mb(); s = io_mapping_map_wc(&ggtt->iomap, slot, PAGE_SIZE); - ret = compress_page(compress, (void __force *)s, dst); + ret = compress_page(compress, + (void __force *)s, dst, + true); io_mapping_unmap(s); if (ret) break; @@ -1031,7 +1022,9 @@ i915_error_object_create(struct drm_i915_private *i915, void __iomem *s; s = io_mapping_map_wc(&mem->iomap, dma, PAGE_SIZE); - ret = compress_page(compress, (void __force *)s, dst); + ret = compress_page(compress, + (void __force *)s, dst, + true); io_mapping_unmap(s); if (ret) break; @@ -1045,7 +1038,7 @@ i915_error_object_create(struct drm_i915_private *i915, drm_clflush_pages(&page, 1); s = kmap(page); - ret = compress_page(compress, s, dst); + ret = compress_page(compress, s, dst, false); kunmap(page); drm_clflush_pages(&page, 1); @@ -1066,77 +1059,56 @@ i915_error_object_create(struct drm_i915_private *i915, return dst; } -/* - * Generate a semi-unique error code. The code is not meant to have meaning, The - * code's only purpose is to try to prevent false duplicated bug reports by - * grossly estimating a GPU error state. - * - * TODO Ideally, hashing the batchbuffer would be a very nice way to determine - * the hang if we could strip the GTT offset information from it. - * - * It's only a small step better than a random number in its current form. - */ -static u32 i915_error_generate_code(struct i915_gpu_state *error) -{ - const struct drm_i915_error_engine *ee = error->engine; - - /* - * IPEHR would be an ideal way to detect errors, as it's the gross - * measure of "the command that hung." However, has some very common - * synchronization commands which almost always appear in the case - * strictly a client bug. Use instdone to differentiate those some. - */ - return ee ? ee->ipehr ^ ee->instdone.instdone : 0; -} - -static void gem_record_fences(struct i915_gpu_state *error) +static void gt_record_fences(struct intel_gt_coredump *gt) { - struct drm_i915_private *dev_priv = error->i915; - struct intel_uncore *uncore = &dev_priv->uncore; + struct i915_ggtt *ggtt = gt->_gt->ggtt; + struct intel_uncore *uncore = gt->_gt->uncore; int i; - if (INTEL_GEN(dev_priv) >= 6) { - for (i = 0; i < dev_priv->ggtt.num_fences; i++) - error->fence[i] = + if (INTEL_GEN(uncore->i915) >= 6) { + for (i = 0; i < ggtt->num_fences; i++) + gt->fence[i] = intel_uncore_read64(uncore, FENCE_REG_GEN6_LO(i)); - } else if (INTEL_GEN(dev_priv) >= 4) { - for (i = 0; i < dev_priv->ggtt.num_fences; i++) - error->fence[i] = + } else if (INTEL_GEN(uncore->i915) >= 4) { + for (i = 0; i < ggtt->num_fences; i++) + gt->fence[i] = intel_uncore_read64(uncore, FENCE_REG_965_LO(i)); } else { - for (i = 0; i < dev_priv->ggtt.num_fences; i++) - error->fence[i] = + for (i = 0; i < ggtt->num_fences; i++) + gt->fence[i] = intel_uncore_read(uncore, FENCE_REG(i)); } - error->nfence = i; + gt->nfence = i; } -static void error_record_engine_registers(struct i915_gpu_state *error, - struct intel_engine_cs *engine, - struct drm_i915_error_engine *ee) +static void engine_record_registers(struct intel_engine_coredump *ee) { - struct drm_i915_private *dev_priv = engine->i915; + const struct intel_engine_cs *engine = ee->engine; + struct drm_i915_private *i915 = engine->i915; - if (INTEL_GEN(dev_priv) >= 6) { + if (INTEL_GEN(i915) >= 6) { ee->rc_psmi = ENGINE_READ(engine, RING_PSMI_CTL); - if (INTEL_GEN(dev_priv) >= 12) - ee->fault_reg = I915_READ(GEN12_RING_FAULT_REG); - else if (INTEL_GEN(dev_priv) >= 8) - ee->fault_reg = I915_READ(GEN8_RING_FAULT_REG); + if (INTEL_GEN(i915) >= 12) + ee->fault_reg = intel_uncore_read(engine->uncore, + GEN12_RING_FAULT_REG); + else if (INTEL_GEN(i915) >= 8) + ee->fault_reg = intel_uncore_read(engine->uncore, + GEN8_RING_FAULT_REG); else ee->fault_reg = GEN6_RING_FAULT_REG_READ(engine); } - if (INTEL_GEN(dev_priv) >= 4) { + if (INTEL_GEN(i915) >= 4) { ee->faddr = ENGINE_READ(engine, RING_DMA_FADD); ee->ipeir = ENGINE_READ(engine, RING_IPEIR); ee->ipehr = ENGINE_READ(engine, RING_IPEHR); ee->instps = ENGINE_READ(engine, RING_INSTPS); ee->bbaddr = ENGINE_READ(engine, RING_BBADDR); - if (INTEL_GEN(dev_priv) >= 8) { + ee->ccid = ENGINE_READ(engine, CCID); + if (INTEL_GEN(i915) >= 8) { ee->faddr |= (u64)ENGINE_READ(engine, RING_DMA_FADD_UDW) << 32; ee->bbaddr |= (u64)ENGINE_READ(engine, RING_BBADDR_UDW) << 32; } @@ -1155,13 +1127,13 @@ static void error_record_engine_registers(struct i915_gpu_state *error, ee->head = ENGINE_READ(engine, RING_HEAD); ee->tail = ENGINE_READ(engine, RING_TAIL); ee->ctl = ENGINE_READ(engine, RING_CTL); - if (INTEL_GEN(dev_priv) > 2) + if (INTEL_GEN(i915) > 2) ee->mode = ENGINE_READ(engine, RING_MI_MODE); - if (!HWS_NEEDS_PHYSICAL(dev_priv)) { + if (!HWS_NEEDS_PHYSICAL(i915)) { i915_reg_t mmio; - if (IS_GEN(dev_priv, 7)) { + if (IS_GEN(i915, 7)) { switch (engine->id) { default: MISSING_CASE(engine->id); @@ -1186,40 +1158,40 @@ static void error_record_engine_registers(struct i915_gpu_state *error, mmio = RING_HWS_PGA(engine->mmio_base); } - ee->hws = I915_READ(mmio); + ee->hws = intel_uncore_read(engine->uncore, mmio); } - ee->idle = intel_engine_is_idle(engine); - ee->reset_count = i915_reset_engine_count(&dev_priv->gpu_error, - engine); + ee->reset_count = i915_reset_engine_count(&i915->gpu_error, engine); - if (HAS_PPGTT(dev_priv)) { + if (HAS_PPGTT(i915)) { int i; ee->vm_info.gfx_mode = ENGINE_READ(engine, RING_MODE_GEN7); - if (IS_GEN(dev_priv, 6)) { + if (IS_GEN(i915, 6)) { ee->vm_info.pp_dir_base = ENGINE_READ(engine, RING_PP_DIR_BASE_READ); - } else if (IS_GEN(dev_priv, 7)) { + } else if (IS_GEN(i915, 7)) { ee->vm_info.pp_dir_base = ENGINE_READ(engine, RING_PP_DIR_BASE); - } else if (INTEL_GEN(dev_priv) >= 8) { + } else if (INTEL_GEN(i915) >= 8) { u32 base = engine->mmio_base; for (i = 0; i < 4; i++) { ee->vm_info.pdp[i] = - I915_READ(GEN8_RING_PDP_UDW(base, i)); + intel_uncore_read(engine->uncore, + GEN8_RING_PDP_UDW(base, i)); ee->vm_info.pdp[i] <<= 32; ee->vm_info.pdp[i] |= - I915_READ(GEN8_RING_PDP_LDW(base, i)); + intel_uncore_read(engine->uncore, + GEN8_RING_PDP_LDW(base, i)); } } } } static void record_request(const struct i915_request *request, - struct drm_i915_error_request *erq) + struct i915_request_coredump *erq) { const struct i915_gem_context *ctx; @@ -1227,7 +1199,6 @@ static void record_request(const struct i915_request *request, erq->context = request->fence.context; erq->seqno = request->fence.seqno; erq->sched_attr = request->sched.attr; - erq->jiffies = request->emitted_jiffies; erq->start = i915_ggtt_offset(request->ring->vma); erq->head = request->head; erq->tail = request->tail; @@ -1240,59 +1211,10 @@ static void record_request(const struct i915_request *request, rcu_read_unlock(); } -static void engine_record_requests(struct intel_engine_cs *engine, - struct i915_request *first, - struct drm_i915_error_engine *ee) +static void engine_record_execlists(struct intel_engine_coredump *ee) { - struct i915_request *request; - int count; - - count = 0; - request = first; - list_for_each_entry_from(request, &engine->active.requests, sched.link) - count++; - if (!count) - return; - - ee->requests = kcalloc(count, sizeof(*ee->requests), ATOMIC_MAYFAIL); - if (!ee->requests) - return; - - ee->num_requests = count; - - count = 0; - request = first; - list_for_each_entry_from(request, - &engine->active.requests, sched.link) { - if (count >= ee->num_requests) { - /* - * If the ring request list was changed in - * between the point where the error request - * list was created and dimensioned and this - * point then just exit early to avoid crashes. - * - * We don't need to communicate that the - * request list changed state during error - * state capture and that the error state is - * slightly incorrect as a consequence since we - * are typically only interested in the request - * list state at the point of error state - * capture, not in any changes happening during - * the capture. - */ - break; - } - - record_request(request, &ee->requests[count++]); - } - ee->num_requests = count; -} - -static void error_record_engine_execlists(const struct intel_engine_cs *engine, - struct drm_i915_error_engine *ee) -{ - const struct intel_engine_execlists * const execlists = &engine->execlists; - struct i915_request * const *port = execlists->active; + const struct intel_engine_execlists * const el = &ee->engine->execlists; + struct i915_request * const *port = el->active; unsigned int n = 0; while (*port) @@ -1301,7 +1223,7 @@ static void error_record_engine_execlists(const struct intel_engine_cs *engine, ee->num_ports = n; } -static bool record_context(struct drm_i915_error_context *e, +static bool record_context(struct i915_gem_context_coredump *e, const struct i915_request *rq) { struct i915_gem_context *ctx; @@ -1334,23 +1256,24 @@ static bool record_context(struct drm_i915_error_context *e, return capture; } -struct capture_vma { - struct capture_vma *next; - void **slot; +struct intel_engine_capture_vma { + struct intel_engine_capture_vma *next; + struct i915_vma *vma; + char name[16]; }; -static struct capture_vma * -capture_vma(struct capture_vma *next, +static struct intel_engine_capture_vma * +capture_vma(struct intel_engine_capture_vma *next, struct i915_vma *vma, - struct drm_i915_error_object **out) + const char *name, + gfp_t gfp) { - struct capture_vma *c; + struct intel_engine_capture_vma *c; - *out = NULL; if (!vma) return next; - c = kmalloc(sizeof(*c), ATOMIC_MAYFAIL); + c = kmalloc(sizeof(*c), gfp); if (!c) return next; @@ -1359,54 +1282,31 @@ capture_vma(struct capture_vma *next, return next; } - c->slot = (void **)out; - *c->slot = i915_vma_get(vma); + strcpy(c->name, name); + c->vma = i915_vma_get(vma); c->next = next; return c; } -static struct capture_vma * -request_record_user_bo(struct i915_request *request, - struct drm_i915_error_engine *ee, - struct capture_vma *capture) +static struct intel_engine_capture_vma * +capture_user(struct intel_engine_capture_vma *capture, + const struct i915_request *rq, + gfp_t gfp) { struct i915_capture_list *c; - struct drm_i915_error_object **bo; - long count, max; - - max = 0; - for (c = request->capture_list; c; c = c->next) - max++; - if (!max) - return capture; - - bo = kmalloc_array(max, sizeof(*bo), ATOMIC_MAYFAIL); - if (!bo) { - /* If we can't capture everything, try to capture something. */ - max = min_t(long, max, PAGE_SIZE / sizeof(*bo)); - bo = kmalloc_array(max, sizeof(*bo), ATOMIC_MAYFAIL); - } - if (!bo) - return capture; - count = 0; - for (c = request->capture_list; c; c = c->next) { - capture = capture_vma(capture, c->vma, &bo[count]); - if (++count == max) - break; - } - - ee->user_bo = bo; - ee->user_bo_count = count; + for (c = rq->capture_list; c; c = c->next) + capture = capture_vma(capture, c->vma, "user", gfp); return capture; } -static struct drm_i915_error_object * -capture_object(struct drm_i915_private *dev_priv, +static struct i915_vma_coredump * +capture_object(const struct intel_gt *gt, struct drm_i915_gem_object *obj, - struct compress *compress) + const char *name, + struct i915_vma_compress *compress) { if (obj && i915_gem_object_has_pages(obj)) { struct i915_vma fake = { @@ -1416,127 +1316,175 @@ capture_object(struct drm_i915_private *dev_priv, .obj = obj, }; - return i915_error_object_create(dev_priv, &fake, compress); + return i915_vma_coredump_create(gt, &fake, name, compress); } else { return NULL; } } -static void -gem_record_rings(struct i915_gpu_state *error, struct compress *compress) +static void add_vma(struct intel_engine_coredump *ee, + struct i915_vma_coredump *vma) { - struct drm_i915_private *i915 = error->i915; - struct intel_engine_cs *engine; - struct drm_i915_error_engine *ee; + if (vma) { + vma->next = ee->vma; + ee->vma = vma; + } +} + +struct intel_engine_coredump * +intel_engine_coredump_alloc(struct intel_engine_cs *engine, gfp_t gfp) +{ + struct intel_engine_coredump *ee; - ee = kzalloc(sizeof(*ee), GFP_KERNEL); + ee = kzalloc(sizeof(*ee), gfp); if (!ee) - return; + return NULL; - for_each_uabi_engine(engine, i915) { - struct capture_vma *capture = NULL; - struct i915_request *request; - unsigned long flags; + ee->engine = engine; - /* Refill our page pool before entering atomic section */ - pool_refill(&compress->pool, ALLOW_FAIL); + engine_record_registers(ee); + engine_record_execlists(ee); - spin_lock_irqsave(&engine->active.lock, flags); - request = intel_engine_find_active_request(engine); - if (!request) { - spin_unlock_irqrestore(&engine->active.lock, flags); - continue; - } + return ee; +} - error->simulated |= record_context(&ee->context, request); +struct intel_engine_capture_vma * +intel_engine_coredump_add_request(struct intel_engine_coredump *ee, + struct i915_request *rq, + gfp_t gfp) +{ + struct intel_engine_capture_vma *vma = NULL; - /* - * We need to copy these to an anonymous buffer - * as the simplest method to avoid being overwritten - * by userspace. - */ - capture = capture_vma(capture, - request->batch, - &ee->batchbuffer); + ee->simulated |= record_context(&ee->context, rq); + if (ee->simulated) + return NULL; - if (HAS_BROKEN_CS_TLB(i915)) - capture = capture_vma(capture, - engine->gt->scratch, - &ee->wa_batchbuffer); + /* + * We need to copy these to an anonymous buffer + * as the simplest method to avoid being overwritten + * by userspace. + */ + vma = capture_vma(vma, rq->batch, "batch", gfp); + vma = capture_user(vma, rq, gfp); + vma = capture_vma(vma, rq->ring->vma, "ring", gfp); + vma = capture_vma(vma, rq->context->state, "HW context", gfp); - capture = request_record_user_bo(request, ee, capture); + ee->rq_head = rq->head; + ee->rq_post = rq->postfix; + ee->rq_tail = rq->tail; - capture = capture_vma(capture, - request->context->state, - &ee->ctx); + return vma; +} - capture = capture_vma(capture, - request->ring->vma, - &ee->ringbuffer); +void +intel_engine_coredump_add_vma(struct intel_engine_coredump *ee, + struct intel_engine_capture_vma *capture, + struct i915_vma_compress *compress) +{ + const struct intel_engine_cs *engine = ee->engine; - ee->cpu_ring_head = request->ring->head; - ee->cpu_ring_tail = request->ring->tail; + while (capture) { + struct intel_engine_capture_vma *this = capture; + struct i915_vma *vma = this->vma; - ee->rq_head = request->head; - ee->rq_post = request->postfix; - ee->rq_tail = request->tail; + add_vma(ee, + i915_vma_coredump_create(engine->gt, + vma, this->name, + compress)); - engine_record_requests(engine, request, ee); - spin_unlock_irqrestore(&engine->active.lock, flags); + i915_active_release(&vma->active); + i915_vma_put(vma); - error_record_engine_registers(error, engine, ee); - error_record_engine_execlists(engine, ee); + capture = this->next; + kfree(this); + } - while (capture) { - struct capture_vma *this = capture; - struct i915_vma *vma = *this->slot; + add_vma(ee, + i915_vma_coredump_create(engine->gt, + engine->status_page.vma, + "HW Status", + compress)); - *this->slot = - i915_error_object_create(i915, vma, compress); + add_vma(ee, + i915_vma_coredump_create(engine->gt, + engine->wa_ctx.vma, + "WA context", + compress)); - i915_active_release(&vma->active); - i915_vma_put(vma); + add_vma(ee, + capture_object(engine->gt, + engine->default_state, + "NULL context", + compress)); +} - capture = this->next; - kfree(this); - } +static struct intel_engine_coredump * +capture_engine(struct intel_engine_cs *engine, + struct i915_vma_compress *compress) +{ + struct intel_engine_capture_vma *capture = NULL; + struct intel_engine_coredump *ee; + struct i915_request *rq; + unsigned long flags; - ee->hws_page = - i915_error_object_create(i915, - engine->status_page.vma, - compress); + ee = intel_engine_coredump_alloc(engine, GFP_KERNEL); + if (!ee) + return NULL; + + spin_lock_irqsave(&engine->active.lock, flags); + rq = intel_engine_find_active_request(engine); + if (rq) + capture = intel_engine_coredump_add_request(ee, rq, + ATOMIC_MAYFAIL); + spin_unlock_irqrestore(&engine->active.lock, flags); + if (!capture) { + kfree(ee); + return NULL; + } - ee->wa_ctx = - i915_error_object_create(i915, - engine->wa_ctx.vma, - compress); + intel_engine_coredump_add_vma(ee, capture, compress); - ee->default_state = - capture_object(i915, engine->default_state, compress); + return ee; +} - ee->engine = engine; +static void +gt_record_engines(struct intel_gt_coredump *gt, + struct i915_vma_compress *compress) +{ + struct intel_engine_cs *engine; + enum intel_engine_id id; - ee->next = error->engine; - error->engine = ee; + for_each_engine(engine, gt->_gt, id) { + struct intel_engine_coredump *ee; - ee = kzalloc(sizeof(*ee), GFP_KERNEL); + /* Refill our page pool before entering atomic section */ + pool_refill(&compress->pool, ALLOW_FAIL); + + ee = capture_engine(engine, compress); if (!ee) - return; - } + continue; - kfree(ee); + gt->simulated |= ee->simulated; + if (ee->simulated) { + kfree(ee); + continue; + } + + ee->next = gt->engine; + gt->engine = ee; + } } -static void -capture_uc_state(struct i915_gpu_state *error, struct compress *compress) +static struct intel_uc_coredump * +gt_record_uc(struct intel_gt_coredump *gt, + struct i915_vma_compress *compress) { - struct drm_i915_private *i915 = error->i915; - struct i915_error_uc *error_uc = &error->uc; - struct intel_uc *uc = &i915->gt.uc; + const struct intel_uc *uc = >->_gt->uc; + struct intel_uc_coredump *error_uc; - /* Capturing uC state won't be useful if there is no GuC */ - if (!error->device_info.has_gt_uc) - return; + error_uc = kzalloc(sizeof(*error_uc), ALLOW_FAIL); + if (!error_uc) + return NULL; memcpy(&error_uc->guc_fw, &uc->guc.fw, sizeof(uc->guc.fw)); memcpy(&error_uc->huc_fw, &uc->huc.fw, sizeof(uc->huc.fw)); @@ -1547,19 +1495,42 @@ capture_uc_state(struct i915_gpu_state *error, struct compress *compress) */ error_uc->guc_fw.path = kstrdup(uc->guc.fw.path, ALLOW_FAIL); error_uc->huc_fw.path = kstrdup(uc->huc.fw.path, ALLOW_FAIL); - error_uc->guc_log = i915_error_object_create(i915, - uc->guc.log.vma, - compress); + error_uc->guc_log = + i915_vma_coredump_create(gt->_gt, + uc->guc.log.vma, "GuC log buffer", + compress); + + return error_uc; +} + +static void gt_capture_prepare(struct intel_gt_coredump *gt) +{ + struct i915_ggtt *ggtt = gt->_gt->ggtt; + + mutex_lock(&ggtt->error_mutex); +} + +static void gt_capture_finish(struct intel_gt_coredump *gt) +{ + struct i915_ggtt *ggtt = gt->_gt->ggtt; + + if (drm_mm_node_allocated(&ggtt->error_capture)) + ggtt->vm.clear_range(&ggtt->vm, + ggtt->error_capture.start, + PAGE_SIZE); + + mutex_unlock(&ggtt->error_mutex); } /* Capture all registers which don't fit into another category. */ -static void capture_reg_state(struct i915_gpu_state *error) +static void gt_record_regs(struct intel_gt_coredump *gt) { - struct drm_i915_private *i915 = error->i915; - struct intel_uncore *uncore = &i915->uncore; + struct intel_uncore *uncore = gt->_gt->uncore; + struct drm_i915_private *i915 = uncore->i915; int i; - /* General organization + /* + * General organization * 1. Registers specific to a single generation * 2. Registers which belong to multiple generations * 3. Feature specific registers. @@ -1569,138 +1540,162 @@ static void capture_reg_state(struct i915_gpu_state *error) /* 1: Registers specific to a single generation */ if (IS_VALLEYVIEW(i915)) { - error->gtier[0] = intel_uncore_read(uncore, GTIER); - error->ier = intel_uncore_read(uncore, VLV_IER); - error->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE_VLV); + gt->gtier[0] = intel_uncore_read(uncore, GTIER); + gt->ier = intel_uncore_read(uncore, VLV_IER); + gt->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE_VLV); } if (IS_GEN(i915, 7)) - error->err_int = intel_uncore_read(uncore, GEN7_ERR_INT); + gt->err_int = intel_uncore_read(uncore, GEN7_ERR_INT); if (INTEL_GEN(i915) >= 12) { - error->fault_data0 = intel_uncore_read(uncore, - GEN12_FAULT_TLB_DATA0); - error->fault_data1 = intel_uncore_read(uncore, - GEN12_FAULT_TLB_DATA1); + gt->fault_data0 = intel_uncore_read(uncore, + GEN12_FAULT_TLB_DATA0); + gt->fault_data1 = intel_uncore_read(uncore, + GEN12_FAULT_TLB_DATA1); } else if (INTEL_GEN(i915) >= 8) { - error->fault_data0 = intel_uncore_read(uncore, - GEN8_FAULT_TLB_DATA0); - error->fault_data1 = intel_uncore_read(uncore, - GEN8_FAULT_TLB_DATA1); + gt->fault_data0 = intel_uncore_read(uncore, + GEN8_FAULT_TLB_DATA0); + gt->fault_data1 = intel_uncore_read(uncore, + GEN8_FAULT_TLB_DATA1); } if (IS_GEN(i915, 6)) { - error->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE); - error->gab_ctl = intel_uncore_read(uncore, GAB_CTL); - error->gfx_mode = intel_uncore_read(uncore, GFX_MODE); + gt->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE); + gt->gab_ctl = intel_uncore_read(uncore, GAB_CTL); + gt->gfx_mode = intel_uncore_read(uncore, GFX_MODE); } /* 2: Registers which belong to multiple generations */ if (INTEL_GEN(i915) >= 7) - error->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE_MT); + gt->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE_MT); if (INTEL_GEN(i915) >= 6) { - error->derrmr = intel_uncore_read(uncore, DERRMR); + gt->derrmr = intel_uncore_read(uncore, DERRMR); if (INTEL_GEN(i915) < 12) { - error->error = intel_uncore_read(uncore, ERROR_GEN6); - error->done_reg = intel_uncore_read(uncore, DONE_REG); + gt->error = intel_uncore_read(uncore, ERROR_GEN6); + gt->done_reg = intel_uncore_read(uncore, DONE_REG); } } - if (INTEL_GEN(i915) >= 5) - error->ccid = intel_uncore_read(uncore, CCID(RENDER_RING_BASE)); - /* 3: Feature specific registers */ if (IS_GEN_RANGE(i915, 6, 7)) { - error->gam_ecochk = intel_uncore_read(uncore, GAM_ECOCHK); - error->gac_eco = intel_uncore_read(uncore, GAC_ECO_BITS); + gt->gam_ecochk = intel_uncore_read(uncore, GAM_ECOCHK); + gt->gac_eco = intel_uncore_read(uncore, GAC_ECO_BITS); } if (IS_GEN_RANGE(i915, 8, 11)) - error->gtt_cache = intel_uncore_read(uncore, HSW_GTT_CACHE_EN); + gt->gtt_cache = intel_uncore_read(uncore, HSW_GTT_CACHE_EN); if (IS_GEN(i915, 12)) - error->aux_err = intel_uncore_read(uncore, GEN12_AUX_ERR_DBG); + gt->aux_err = intel_uncore_read(uncore, GEN12_AUX_ERR_DBG); if (INTEL_GEN(i915) >= 12) { for (i = 0; i < GEN12_SFC_DONE_MAX; i++) { - error->sfc_done[i] = + gt->sfc_done[i] = intel_uncore_read(uncore, GEN12_SFC_DONE(i)); } - error->gam_done = intel_uncore_read(uncore, GEN12_GAM_DONE); + gt->gam_done = intel_uncore_read(uncore, GEN12_GAM_DONE); } /* 4: Everything else */ if (INTEL_GEN(i915) >= 11) { - error->ier = intel_uncore_read(uncore, GEN8_DE_MISC_IER); - error->gtier[0] = + gt->ier = intel_uncore_read(uncore, GEN8_DE_MISC_IER); + gt->gtier[0] = intel_uncore_read(uncore, GEN11_RENDER_COPY_INTR_ENABLE); - error->gtier[1] = + gt->gtier[1] = intel_uncore_read(uncore, GEN11_VCS_VECS_INTR_ENABLE); - error->gtier[2] = + gt->gtier[2] = intel_uncore_read(uncore, GEN11_GUC_SG_INTR_ENABLE); - error->gtier[3] = + gt->gtier[3] = intel_uncore_read(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE); - error->gtier[4] = + gt->gtier[4] = intel_uncore_read(uncore, GEN11_CRYPTO_RSVD_INTR_ENABLE); - error->gtier[5] = + gt->gtier[5] = intel_uncore_read(uncore, GEN11_GUNIT_CSME_INTR_ENABLE); - error->ngtier = 6; + gt->ngtier = 6; } else if (INTEL_GEN(i915) >= 8) { - error->ier = intel_uncore_read(uncore, GEN8_DE_MISC_IER); + gt->ier = intel_uncore_read(uncore, GEN8_DE_MISC_IER); for (i = 0; i < 4; i++) - error->gtier[i] = intel_uncore_read(uncore, - GEN8_GT_IER(i)); - error->ngtier = 4; + gt->gtier[i] = + intel_uncore_read(uncore, GEN8_GT_IER(i)); + gt->ngtier = 4; } else if (HAS_PCH_SPLIT(i915)) { - error->ier = intel_uncore_read(uncore, DEIER); - error->gtier[0] = intel_uncore_read(uncore, GTIER); - error->ngtier = 1; + gt->ier = intel_uncore_read(uncore, DEIER); + gt->gtier[0] = intel_uncore_read(uncore, GTIER); + gt->ngtier = 1; } else if (IS_GEN(i915, 2)) { - error->ier = intel_uncore_read16(uncore, GEN2_IER); + gt->ier = intel_uncore_read16(uncore, GEN2_IER); } else if (!IS_VALLEYVIEW(i915)) { - error->ier = intel_uncore_read(uncore, GEN2_IER); + gt->ier = intel_uncore_read(uncore, GEN2_IER); } - error->eir = intel_uncore_read(uncore, EIR); - error->pgtbl_er = intel_uncore_read(uncore, PGTBL_ER); + gt->eir = intel_uncore_read(uncore, EIR); + gt->pgtbl_er = intel_uncore_read(uncore, PGTBL_ER); } -static const char * -error_msg(struct i915_gpu_state *error, - intel_engine_mask_t engines, const char *msg) +/* + * Generate a semi-unique error code. The code is not meant to have meaning, The + * code's only purpose is to try to prevent false duplicated bug reports by + * grossly estimating a GPU error state. + * + * TODO Ideally, hashing the batchbuffer would be a very nice way to determine + * the hang if we could strip the GTT offset information from it. + * + * It's only a small step better than a random number in its current form. + */ +static u32 generate_ecode(const struct intel_engine_coredump *ee) { + /* + * IPEHR would be an ideal way to detect errors, as it's the gross + * measure of "the command that hung." However, has some very common + * synchronization commands which almost always appear in the case + * strictly a client bug. Use instdone to differentiate those some. + */ + return ee ? ee->ipehr ^ ee->instdone.instdone : 0; +} + +static const char *error_msg(struct i915_gpu_coredump *error) +{ + struct intel_engine_coredump *first = NULL; + struct intel_gt_coredump *gt; + intel_engine_mask_t engines; int len; + engines = 0; + for (gt = error->gt; gt; gt = gt->next) { + struct intel_engine_coredump *cs; + + if (gt->engine && !first) + first = gt->engine; + + for (cs = gt->engine; cs; cs = cs->next) + engines |= cs->engine->mask; + } + len = scnprintf(error->error_msg, sizeof(error->error_msg), - "GPU HANG: ecode %d:%x:0x%08x", + "GPU HANG: ecode %d:%x:%08x", INTEL_GEN(error->i915), engines, - i915_error_generate_code(error)); - if (error->engine) { + generate_ecode(first)); + if (first) { /* Just show the first executing process, more is confusing */ len += scnprintf(error->error_msg + len, sizeof(error->error_msg) - len, ", in %s [%d]", - error->engine->context.comm, - error->engine->context.pid); + first->context.comm, first->context.pid); } - if (msg) - len += scnprintf(error->error_msg + len, - sizeof(error->error_msg) - len, - ", %s", msg); return error->error_msg; } -static void capture_gen_state(struct i915_gpu_state *error) +static void capture_gen(struct i915_gpu_coredump *error) { struct drm_i915_private *i915 = error->i915; - error->awake = i915->gt.awake; error->wakelock = atomic_read(&i915->runtime_pm.wakeref_count); error->suspended = i915->runtime_pm.suspended; @@ -1711,6 +1706,7 @@ static void capture_gen_state(struct i915_gpu_state *error) error->reset_count = i915_reset_count(&i915->gpu_error); error->suspend_count = i915->suspend_count; + i915_params_copy(&error->params, &i915_modparams); memcpy(&error->device_info, INTEL_INFO(i915), sizeof(error->device_info)); @@ -1720,115 +1716,138 @@ static void capture_gen_state(struct i915_gpu_state *error) error->driver_caps = i915->caps; } -static void capture_params(struct i915_gpu_state *error) +struct i915_gpu_coredump * +i915_gpu_coredump_alloc(struct drm_i915_private *i915, gfp_t gfp) { - i915_params_copy(&error->params, &i915_modparams); + struct i915_gpu_coredump *error; + + if (!i915_modparams.error_capture) + return NULL; + + error = kzalloc(sizeof(*error), gfp); + if (!error) + return NULL; + + kref_init(&error->ref); + error->i915 = i915; + + error->time = ktime_get_real(); + error->boottime = ktime_get_boottime(); + error->uptime = ktime_sub(ktime_get(), i915->gt.last_init_time); + error->capture = jiffies; + + capture_gen(error); + + return error; } -static void capture_finish(struct i915_gpu_state *error) +#define DAY_AS_SECONDS(x) (24 * 60 * 60 * (x)) + +struct intel_gt_coredump * +intel_gt_coredump_alloc(struct intel_gt *gt, gfp_t gfp) { - struct i915_ggtt *ggtt = &error->i915->ggtt; + struct intel_gt_coredump *gc; - if (drm_mm_node_allocated(&ggtt->error_capture)) { - const u64 slot = ggtt->error_capture.start; + gc = kzalloc(sizeof(*gc), gfp); + if (!gc) + return NULL; + + gc->_gt = gt; + gc->awake = intel_gt_pm_is_awake(gt); - ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE); + gt_record_regs(gc); + gt_record_fences(gc); + + return gc; +} + +struct i915_vma_compress * +i915_vma_capture_prepare(struct intel_gt_coredump *gt) +{ + struct i915_vma_compress *compress; + + compress = kmalloc(sizeof(*compress), ALLOW_FAIL); + if (!compress) + return NULL; + + if (!compress_init(compress)) { + kfree(compress); + return NULL; } + + gt_capture_prepare(gt); + + return compress; } -#define DAY_AS_SECONDS(x) (24 * 60 * 60 * (x)) +void i915_vma_capture_finish(struct intel_gt_coredump *gt, + struct i915_vma_compress *compress) +{ + if (!compress) + return; + + gt_capture_finish(gt); -struct i915_gpu_state * -i915_capture_gpu_state(struct drm_i915_private *i915) + compress_fini(compress); + kfree(compress); +} + +struct i915_gpu_coredump *i915_gpu_coredump(struct drm_i915_private *i915) { - struct i915_gpu_state *error; - struct compress compress; + struct i915_gpu_coredump *error; /* Check if GPU capture has been disabled */ error = READ_ONCE(i915->gpu_error.first_error); if (IS_ERR(error)) return error; - error = kzalloc(sizeof(*error), ALLOW_FAIL); - if (!error) { - i915_disable_error_state(i915, -ENOMEM); + error = i915_gpu_coredump_alloc(i915, ALLOW_FAIL); + if (!error) return ERR_PTR(-ENOMEM); - } - if (!compress_init(&compress)) { - kfree(error); - i915_disable_error_state(i915, -ENOMEM); - return ERR_PTR(-ENOMEM); - } + error->gt = intel_gt_coredump_alloc(&i915->gt, ALLOW_FAIL); + if (error->gt) { + struct i915_vma_compress *compress; - kref_init(&error->ref); - error->i915 = i915; + compress = i915_vma_capture_prepare(error->gt); + if (!compress) { + kfree(error->gt); + kfree(error); + return ERR_PTR(-ENOMEM); + } - error->time = ktime_get_real(); - error->boottime = ktime_get_boottime(); - error->uptime = ktime_sub(ktime_get(), i915->gt.last_init_time); - error->capture = jiffies; + gt_record_engines(error->gt, compress); - capture_params(error); - capture_gen_state(error); - capture_uc_state(error, &compress); - capture_reg_state(error); - gem_record_fences(error); - gem_record_rings(error, &compress); + if (INTEL_INFO(i915)->has_gt_uc) + error->gt->uc = gt_record_uc(error->gt, compress); + + i915_vma_capture_finish(error->gt, compress); + + error->simulated |= error->gt->simulated; + } error->overlay = intel_overlay_capture_error_state(i915); error->display = intel_display_capture_error_state(i915); - capture_finish(error); - compress_fini(&compress); - return error; } -/** - * i915_capture_error_state - capture an error record for later analysis - * @i915: i915 device - * @engine_mask: the mask of engines triggering the hang - * @msg: a message to insert into the error capture header - * - * Should be called when an error is detected (either a hang or an error - * interrupt) to capture error state from the time of the error. Fills - * out a structure which becomes available in debugfs for user level tools - * to pick up. - */ -void i915_capture_error_state(struct drm_i915_private *i915, - intel_engine_mask_t engine_mask, - const char *msg) +void i915_error_state_store(struct i915_gpu_coredump *error) { + struct drm_i915_private *i915; static bool warned; - struct i915_gpu_state *error; - unsigned long flags; - if (!i915_modparams.error_capture) + if (IS_ERR_OR_NULL(error)) return; - if (READ_ONCE(i915->gpu_error.first_error)) - return; + i915 = error->i915; + dev_info(i915->drm.dev, "%s\n", error_msg(error)); - error = i915_capture_gpu_state(i915); - if (IS_ERR(error)) + if (error->simulated || + cmpxchg(&i915->gpu_error.first_error, NULL, error)) return; - dev_info(i915->drm.dev, "%s\n", error_msg(error, engine_mask, msg)); - - if (!error->simulated) { - spin_lock_irqsave(&i915->gpu_error.lock, flags); - if (!i915->gpu_error.first_error) { - i915->gpu_error.first_error = error; - error = NULL; - } - spin_unlock_irqrestore(&i915->gpu_error.lock, flags); - } - - if (error) { - __i915_gpu_state_free(&error->ref); - return; - } + i915_gpu_coredump_get(error); if (!xchg(&warned, true) && ktime_get_real_seconds() - DRIVER_TIMESTAMP < DAY_AS_SECONDS(180)) { @@ -1841,15 +1860,38 @@ void i915_capture_error_state(struct drm_i915_private *i915, } } -struct i915_gpu_state * +/** + * i915_capture_error_state - capture an error record for later analysis + * @i915: i915 device + * + * Should be called when an error is detected (either a hang or an error + * interrupt) to capture error state from the time of the error. Fills + * out a structure which becomes available in debugfs for user level tools + * to pick up. + */ +void i915_capture_error_state(struct drm_i915_private *i915) +{ + struct i915_gpu_coredump *error; + + error = i915_gpu_coredump(i915); + if (IS_ERR(error)) { + cmpxchg(&i915->gpu_error.first_error, NULL, error); + return; + } + + i915_error_state_store(error); + i915_gpu_coredump_put(error); +} + +struct i915_gpu_coredump * i915_first_error_state(struct drm_i915_private *i915) { - struct i915_gpu_state *error; + struct i915_gpu_coredump *error; spin_lock_irq(&i915->gpu_error.lock); error = i915->gpu_error.first_error; if (!IS_ERR_OR_NULL(error)) - i915_gpu_state_get(error); + i915_gpu_coredump_get(error); spin_unlock_irq(&i915->gpu_error.lock); return error; @@ -1857,7 +1899,7 @@ i915_first_error_state(struct drm_i915_private *i915) void i915_reset_error_state(struct drm_i915_private *i915) { - struct i915_gpu_state *error; + struct i915_gpu_coredump *error; spin_lock_irq(&i915->gpu_error.lock); error = i915->gpu_error.first_error; @@ -1866,7 +1908,7 @@ void i915_reset_error_state(struct drm_i915_private *i915) spin_unlock_irq(&i915->gpu_error.lock); if (!IS_ERR_OR_NULL(error)) - i915_gpu_state_put(error); + i915_gpu_coredump_put(error); } void i915_disable_error_state(struct drm_i915_private *i915, int err) diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h index 5d2c3372ff99..9109004956bd 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.h +++ b/drivers/gpu/drm/i915/i915_gpu_error.h @@ -25,43 +25,100 @@ #include "i915_scheduler.h" struct drm_i915_private; +struct i915_vma_compress; +struct intel_engine_capture_vma; struct intel_overlay_error_state; struct intel_display_error_state; -struct i915_gpu_state { - struct kref ref; - ktime_t time; - ktime_t boottime; - ktime_t uptime; - unsigned long capture; +struct i915_vma_coredump { + struct i915_vma_coredump *next; - struct drm_i915_private *i915; + char name[20]; + + u64 gtt_offset; + u64 gtt_size; + u32 gtt_page_sizes; + + int num_pages; + int page_count; + int unused; + u32 *pages[0]; +}; + +struct i915_request_coredump { + unsigned long flags; + pid_t pid; + u32 context; + u32 seqno; + u32 start; + u32 head; + u32 tail; + struct i915_sched_attr sched_attr; +}; + +struct intel_engine_coredump { + const struct intel_engine_cs *engine; - char error_msg[128]; bool simulated; - bool awake; - bool wakelock; - bool suspended; - int iommu; u32 reset_count; - u32 suspend_count; - struct intel_device_info device_info; - struct intel_runtime_info runtime_info; - struct intel_driver_caps driver_caps; - struct i915_params params; - struct i915_error_uc { - struct intel_uc_fw guc_fw; - struct intel_uc_fw huc_fw; - struct drm_i915_error_object *guc_log; - } uc; + /* position of active request inside the ring */ + u32 rq_head, rq_post, rq_tail; + + /* Register state */ + u32 ccid; + u32 start; + u32 tail; + u32 head; + u32 ctl; + u32 mode; + u32 hws; + u32 ipeir; + u32 ipehr; + u32 bbstate; + u32 instpm; + u32 instps; + u64 bbaddr; + u64 acthd; + u32 fault_reg; + u64 faddr; + u32 rc_psmi; /* sleep state */ + struct intel_instdone instdone; + + struct i915_gem_context_coredump { + char comm[TASK_COMM_LEN]; + pid_t pid; + int active; + int guilty; + struct i915_sched_attr sched_attr; + } context; + + struct i915_vma_coredump *vma; + + struct i915_request_coredump execlist[EXECLIST_MAX_PORTS]; + unsigned int num_ports; + + struct { + u32 gfx_mode; + union { + u64 pdp[4]; + u32 pp_dir_base; + }; + } vm_info; + + struct intel_engine_coredump *next; +}; + +struct intel_gt_coredump { + const struct intel_gt *_gt; + bool awake; + bool simulated; /* Generic register state */ u32 eir; u32 pgtbl_er; u32 ier; u32 gtier[6], ngtier; - u32 ccid; u32 derrmr; u32 forcewake; u32 error; /* gen6+ */ @@ -80,91 +137,45 @@ struct i915_gpu_state { u32 nfence; u64 fence[I915_MAX_NUM_FENCES]; + + struct intel_engine_coredump *engine; + + struct intel_uc_coredump { + struct intel_uc_fw guc_fw; + struct intel_uc_fw huc_fw; + struct i915_vma_coredump *guc_log; + } *uc; + + struct intel_gt_coredump *next; +}; + +struct i915_gpu_coredump { + struct kref ref; + ktime_t time; + ktime_t boottime; + ktime_t uptime; + unsigned long capture; + + struct drm_i915_private *i915; + + struct intel_gt_coredump *gt; + + char error_msg[128]; + bool simulated; + bool wakelock; + bool suspended; + int iommu; + u32 reset_count; + u32 suspend_count; + + struct intel_device_info device_info; + struct intel_runtime_info runtime_info; + struct intel_driver_caps driver_caps; + struct i915_params params; + struct intel_overlay_error_state *overlay; struct intel_display_error_state *display; - struct drm_i915_error_engine { - const struct intel_engine_cs *engine; - - /* Software tracked state */ - bool idle; - int num_requests; - u32 reset_count; - - /* position of active request inside the ring */ - u32 rq_head, rq_post, rq_tail; - - /* our own tracking of ring head and tail */ - u32 cpu_ring_head; - u32 cpu_ring_tail; - - /* Register state */ - u32 start; - u32 tail; - u32 head; - u32 ctl; - u32 mode; - u32 hws; - u32 ipeir; - u32 ipehr; - u32 bbstate; - u32 instpm; - u32 instps; - u64 bbaddr; - u64 acthd; - u32 fault_reg; - u64 faddr; - u32 rc_psmi; /* sleep state */ - struct intel_instdone instdone; - - struct drm_i915_error_context { - char comm[TASK_COMM_LEN]; - pid_t pid; - int active; - int guilty; - struct i915_sched_attr sched_attr; - } context; - - struct drm_i915_error_object { - u64 gtt_offset; - u64 gtt_size; - u32 gtt_page_sizes; - int num_pages; - int page_count; - int unused; - u32 *pages[0]; - } *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page; - - struct drm_i915_error_object **user_bo; - long user_bo_count; - - struct drm_i915_error_object *wa_ctx; - struct drm_i915_error_object *default_state; - - struct drm_i915_error_request { - unsigned long flags; - long jiffies; - pid_t pid; - u32 context; - u32 seqno; - u32 start; - u32 head; - u32 tail; - struct i915_sched_attr sched_attr; - } *requests, execlist[EXECLIST_MAX_PORTS]; - unsigned int num_ports; - - struct { - u32 gfx_mode; - union { - u64 pdp[4]; - u32 pp_dir_base; - }; - } vm_info; - - struct drm_i915_error_engine *next; - } *engine; - struct scatterlist *sgl, *fit; }; @@ -172,7 +183,7 @@ struct i915_gpu_error { /* For reset and error_state handling. */ spinlock_t lock; /* Protected by the above dev->gpu_error.lock. */ - struct i915_gpu_state *first_error; + struct i915_gpu_coredump *first_error; atomic_t pending_fb_pin; @@ -200,41 +211,115 @@ struct drm_i915_error_state_buf { __printf(2, 3) void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...); -struct i915_gpu_state *i915_capture_gpu_state(struct drm_i915_private *i915); -void i915_capture_error_state(struct drm_i915_private *dev_priv, - intel_engine_mask_t engine_mask, - const char *error_msg); +struct i915_gpu_coredump *i915_gpu_coredump(struct drm_i915_private *i915); +void i915_capture_error_state(struct drm_i915_private *i915); + +struct i915_gpu_coredump * +i915_gpu_coredump_alloc(struct drm_i915_private *i915, gfp_t gfp); + +struct intel_gt_coredump * +intel_gt_coredump_alloc(struct intel_gt *gt, gfp_t gfp); + +struct intel_engine_coredump * +intel_engine_coredump_alloc(struct intel_engine_cs *engine, gfp_t gfp); + +struct intel_engine_capture_vma * +intel_engine_coredump_add_request(struct intel_engine_coredump *ee, + struct i915_request *rq, + gfp_t gfp); -static inline struct i915_gpu_state * -i915_gpu_state_get(struct i915_gpu_state *gpu) +void intel_engine_coredump_add_vma(struct intel_engine_coredump *ee, + struct intel_engine_capture_vma *capture, + struct i915_vma_compress *compress); + +struct i915_vma_compress * +i915_vma_capture_prepare(struct intel_gt_coredump *gt); + +void i915_vma_capture_finish(struct intel_gt_coredump *gt, + struct i915_vma_compress *compress); + +void i915_error_state_store(struct i915_gpu_coredump *error); + +static inline struct i915_gpu_coredump * +i915_gpu_coredump_get(struct i915_gpu_coredump *gpu) { kref_get(&gpu->ref); return gpu; } -ssize_t i915_gpu_state_copy_to_buffer(struct i915_gpu_state *error, - char *buf, loff_t offset, size_t count); +ssize_t +i915_gpu_coredump_copy_to_buffer(struct i915_gpu_coredump *error, + char *buf, loff_t offset, size_t count); -void __i915_gpu_state_free(struct kref *kref); -static inline void i915_gpu_state_put(struct i915_gpu_state *gpu) +void __i915_gpu_coredump_free(struct kref *kref); +static inline void i915_gpu_coredump_put(struct i915_gpu_coredump *gpu) { if (gpu) - kref_put(&gpu->ref, __i915_gpu_state_free); + kref_put(&gpu->ref, __i915_gpu_coredump_free); } -struct i915_gpu_state *i915_first_error_state(struct drm_i915_private *i915); +struct i915_gpu_coredump *i915_first_error_state(struct drm_i915_private *i915); void i915_reset_error_state(struct drm_i915_private *i915); void i915_disable_error_state(struct drm_i915_private *i915, int err); #else -static inline void i915_capture_error_state(struct drm_i915_private *dev_priv, - u32 engine_mask, - const char *error_msg) +static inline void i915_capture_error_state(struct drm_i915_private *i915) +{ +} + +static inline struct i915_gpu_coredump * +i915_gpu_coredump_alloc(struct drm_i915_private *i915, gfp_t gfp) +{ + return NULL; +} + +static inline struct intel_gt_coredump * +intel_gt_coredump_alloc(struct intel_gt *gt, gfp_t gfp) +{ + return NULL; +} + +static inline struct intel_engine_coredump * +intel_engine_coredump_alloc(struct intel_engine_cs *engine, gfp_t gfp) +{ + return NULL; +} + +static inline struct intel_engine_capture_vma * +intel_engine_coredump_add_request(struct intel_engine_coredump *ee, + struct i915_request *rq, + gfp_t gfp) +{ + return NULL; +} + +static inline void +intel_engine_coredump_add_vma(struct intel_engine_coredump *ee, + struct intel_engine_capture_vma *capture, + struct i915_vma_compress *compress) +{ +} + +static inline struct i915_vma_compress * +i915_vma_capture_prepare(struct intel_gt_coredump *gt) +{ + return NULL; +} + +static inline void +i915_vma_capture_finish(struct intel_gt_coredump *gt, + struct i915_vma_compress *compress) +{ +} + +static inline void +i915_error_state_store(struct drm_i915_private *i915, + struct i915_gpu_coredump *error) { } -static inline struct i915_gpu_state * +static inline struct i915_gpu_coredump * i915_first_error_state(struct drm_i915_private *i915) { return ERR_PTR(-ENODEV); diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 42b79f577500..afc6aad9bf8c 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -893,7 +893,7 @@ int intel_get_crtc_scanline(struct intel_crtc *crtc) } /** - * ivybridge_parity_work - Workqueue called when a parity error interrupt + * ivb_parity_work - Workqueue called when a parity error interrupt * occurred. * @work: workqueue struct * @@ -901,7 +901,7 @@ int intel_get_crtc_scanline(struct intel_crtc *crtc) * this event, userspace should try to remap the bad rows since statistically * it is likely the same row is more likely to go bad again. */ -static void ivybridge_parity_work(struct work_struct *work) +static void ivb_parity_work(struct work_struct *work) { struct drm_i915_private *dev_priv = container_of(work, typeof(*dev_priv), l3_parity.error_work); @@ -2031,7 +2031,7 @@ static void ivb_display_irq_handler(struct drm_i915_private *dev_priv, * 4 - Process the interrupt(s) that had bits set in the IIRs. * 5 - Re-enable Master Interrupt Control. */ -static irqreturn_t ironlake_irq_handler(int irq, void *arg) +static irqreturn_t ilk_irq_handler(int irq, void *arg) { struct drm_i915_private *dev_priv = arg; u32 de_iir, gt_iir, de_ier, sde_ier = 0; @@ -2742,7 +2742,7 @@ static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv) /* drm_dma.h hooks */ -static void ironlake_irq_reset(struct drm_i915_private *dev_priv) +static void ilk_irq_reset(struct drm_i915_private *dev_priv) { struct intel_uncore *uncore = &dev_priv->uncore; @@ -3225,7 +3225,7 @@ static void ibx_irq_postinstall(struct drm_i915_private *dev_priv) spt_hpd_detection_setup(dev_priv); } -static void ironlake_irq_postinstall(struct drm_i915_private *dev_priv) +static void ilk_irq_postinstall(struct drm_i915_private *dev_priv) { struct intel_uncore *uncore = &dev_priv->uncore; u32 display_mask, extra_mask; @@ -3899,7 +3899,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv) intel_hpd_init_work(dev_priv); - INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); + INIT_WORK(&dev_priv->l3_parity.error_work, ivb_parity_work); for (i = 0; i < MAX_L3_SLICES; ++i) dev_priv->l3_parity.remap_info[i] = NULL; @@ -3980,7 +3980,7 @@ static irq_handler_t intel_irq_handler(struct drm_i915_private *dev_priv) else if (INTEL_GEN(dev_priv) >= 8) return gen8_irq_handler; else - return ironlake_irq_handler; + return ilk_irq_handler; } } @@ -4003,7 +4003,7 @@ static void intel_irq_reset(struct drm_i915_private *dev_priv) else if (INTEL_GEN(dev_priv) >= 8) gen8_irq_reset(dev_priv); else - ironlake_irq_reset(dev_priv); + ilk_irq_reset(dev_priv); } } @@ -4026,7 +4026,7 @@ static void intel_irq_postinstall(struct drm_i915_private *dev_priv) else if (INTEL_GEN(dev_priv) >= 8) gen8_irq_postinstall(dev_priv); else - ironlake_irq_postinstall(dev_priv); + ilk_irq_postinstall(dev_priv); } } diff --git a/drivers/gpu/drm/i915/i915_mm.c b/drivers/gpu/drm/i915/i915_mm.c index 318562ce64c0..b6376b25ef63 100644 --- a/drivers/gpu/drm/i915/i915_mm.c +++ b/drivers/gpu/drm/i915/i915_mm.c @@ -33,6 +33,9 @@ struct remap_pfn { struct mm_struct *mm; unsigned long pfn; pgprot_t prot; + + struct sgt_iter sgt; + resource_size_t iobase; }; static int remap_pfn(pte_t *pte, unsigned long addr, void *data) @@ -46,6 +49,35 @@ static int remap_pfn(pte_t *pte, unsigned long addr, void *data) return 0; } +#define use_dma(io) ((io) != -1) + +static inline unsigned long sgt_pfn(const struct remap_pfn *r) +{ + if (use_dma(r->iobase)) + return (r->sgt.dma + r->sgt.curr + r->iobase) >> PAGE_SHIFT; + else + return r->sgt.pfn + (r->sgt.curr >> PAGE_SHIFT); +} + +static int remap_sg(pte_t *pte, unsigned long addr, void *data) +{ + struct remap_pfn *r = data; + + if (GEM_WARN_ON(!r->sgt.pfn)) + return -EINVAL; + + /* Special PTE are not associated with any struct page */ + set_pte_at(r->mm, addr, pte, + pte_mkspecial(pfn_pte(sgt_pfn(r), r->prot))); + r->pfn++; /* track insertions in case we need to unwind later */ + + r->sgt.curr += PAGE_SIZE; + if (r->sgt.curr >= r->sgt.max) + r->sgt = __sgt_iter(__sg_next(r->sgt.sgp), use_dma(r->iobase)); + + return 0; +} + /** * remap_io_mapping - remap an IO mapping to userspace * @vma: user vma to map to @@ -80,3 +112,40 @@ int remap_io_mapping(struct vm_area_struct *vma, return 0; } + +/** + * remap_io_sg - remap an IO mapping to userspace + * @vma: user vma to map to + * @addr: target user address to start at + * @size: size of map area + * @sgl: Start sg entry + * @iobase: Use stored dma address offset by this address or pfn if -1 + * + * Note: this is only safe if the mm semaphore is held when called. + */ +int remap_io_sg(struct vm_area_struct *vma, + unsigned long addr, unsigned long size, + struct scatterlist *sgl, resource_size_t iobase) +{ + struct remap_pfn r = { + .mm = vma->vm_mm, + .prot = vma->vm_page_prot, + .sgt = __sgt_iter(sgl, use_dma(iobase)), + .iobase = iobase, + }; + int err; + + /* We rely on prevalidation of the io-mapping to skip track_pfn(). */ + GEM_BUG_ON((vma->vm_flags & EXPECTED_FLAGS) != EXPECTED_FLAGS); + + if (!use_dma(iobase)) + flush_cache_range(vma, addr, size); + + err = apply_to_page_range(r.mm, addr, size, remap_sg, &r); + if (unlikely(err)) { + zap_vma_ptes(vma, addr, r.pfn << PAGE_SHIFT); + return err; + } + + return 0; +} diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index 9571611b4b16..83f01401b8b5 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -193,23 +193,23 @@ GEN_DEFAULT_PAGE_SIZES, \ GEN_DEFAULT_REGIONS -static const struct intel_device_info intel_i830_info = { +static const struct intel_device_info i830_info = { I830_FEATURES, PLATFORM(INTEL_I830), }; -static const struct intel_device_info intel_i845g_info = { +static const struct intel_device_info i845g_info = { I845_FEATURES, PLATFORM(INTEL_I845G), }; -static const struct intel_device_info intel_i85x_info = { +static const struct intel_device_info i85x_info = { I830_FEATURES, PLATFORM(INTEL_I85X), .display.has_fbc = 1, }; -static const struct intel_device_info intel_i865g_info = { +static const struct intel_device_info i865g_info = { I845_FEATURES, PLATFORM(INTEL_I865G), }; @@ -228,7 +228,7 @@ static const struct intel_device_info intel_i865g_info = { GEN_DEFAULT_PAGE_SIZES, \ GEN_DEFAULT_REGIONS -static const struct intel_device_info intel_i915g_info = { +static const struct intel_device_info i915g_info = { GEN3_FEATURES, PLATFORM(INTEL_I915G), .has_coherent_ggtt = false, @@ -239,7 +239,7 @@ static const struct intel_device_info intel_i915g_info = { .unfenced_needs_alignment = 1, }; -static const struct intel_device_info intel_i915gm_info = { +static const struct intel_device_info i915gm_info = { GEN3_FEATURES, PLATFORM(INTEL_I915GM), .is_mobile = 1, @@ -252,7 +252,7 @@ static const struct intel_device_info intel_i915gm_info = { .unfenced_needs_alignment = 1, }; -static const struct intel_device_info intel_i945g_info = { +static const struct intel_device_info i945g_info = { GEN3_FEATURES, PLATFORM(INTEL_I945G), .display.has_hotplug = 1, @@ -263,7 +263,7 @@ static const struct intel_device_info intel_i945g_info = { .unfenced_needs_alignment = 1, }; -static const struct intel_device_info intel_i945gm_info = { +static const struct intel_device_info i945gm_info = { GEN3_FEATURES, PLATFORM(INTEL_I945GM), .is_mobile = 1, @@ -277,21 +277,21 @@ static const struct intel_device_info intel_i945gm_info = { .unfenced_needs_alignment = 1, }; -static const struct intel_device_info intel_g33_info = { +static const struct intel_device_info g33_info = { GEN3_FEATURES, PLATFORM(INTEL_G33), .display.has_hotplug = 1, .display.has_overlay = 1, }; -static const struct intel_device_info intel_pineview_g_info = { +static const struct intel_device_info pnv_g_info = { GEN3_FEATURES, PLATFORM(INTEL_PINEVIEW), .display.has_hotplug = 1, .display.has_overlay = 1, }; -static const struct intel_device_info intel_pineview_m_info = { +static const struct intel_device_info pnv_m_info = { GEN3_FEATURES, PLATFORM(INTEL_PINEVIEW), .is_mobile = 1, @@ -314,7 +314,7 @@ static const struct intel_device_info intel_pineview_m_info = { GEN_DEFAULT_PAGE_SIZES, \ GEN_DEFAULT_REGIONS -static const struct intel_device_info intel_i965g_info = { +static const struct intel_device_info i965g_info = { GEN4_FEATURES, PLATFORM(INTEL_I965G), .display.has_overlay = 1, @@ -322,7 +322,7 @@ static const struct intel_device_info intel_i965g_info = { .has_snoop = false, }; -static const struct intel_device_info intel_i965gm_info = { +static const struct intel_device_info i965gm_info = { GEN4_FEATURES, PLATFORM(INTEL_I965GM), .is_mobile = 1, @@ -333,14 +333,14 @@ static const struct intel_device_info intel_i965gm_info = { .has_snoop = false, }; -static const struct intel_device_info intel_g45_info = { +static const struct intel_device_info g45_info = { GEN4_FEATURES, PLATFORM(INTEL_G45), .engine_mask = BIT(RCS0) | BIT(VCS0), .gpu_reset_clobbers_display = false, }; -static const struct intel_device_info intel_gm45_info = { +static const struct intel_device_info gm45_info = { GEN4_FEATURES, PLATFORM(INTEL_GM45), .is_mobile = 1, @@ -365,12 +365,12 @@ static const struct intel_device_info intel_gm45_info = { GEN_DEFAULT_PAGE_SIZES, \ GEN_DEFAULT_REGIONS -static const struct intel_device_info intel_ironlake_d_info = { +static const struct intel_device_info ilk_d_info = { GEN5_FEATURES, PLATFORM(INTEL_IRONLAKE), }; -static const struct intel_device_info intel_ironlake_m_info = { +static const struct intel_device_info ilk_m_info = { GEN5_FEATURES, PLATFORM(INTEL_IRONLAKE), .is_mobile = 1, @@ -400,12 +400,12 @@ static const struct intel_device_info intel_ironlake_m_info = { GEN6_FEATURES, \ PLATFORM(INTEL_SANDYBRIDGE) -static const struct intel_device_info intel_sandybridge_d_gt1_info = { +static const struct intel_device_info snb_d_gt1_info = { SNB_D_PLATFORM, .gt = 1, }; -static const struct intel_device_info intel_sandybridge_d_gt2_info = { +static const struct intel_device_info snb_d_gt2_info = { SNB_D_PLATFORM, .gt = 2, }; @@ -416,12 +416,12 @@ static const struct intel_device_info intel_sandybridge_d_gt2_info = { .is_mobile = 1 -static const struct intel_device_info intel_sandybridge_m_gt1_info = { +static const struct intel_device_info snb_m_gt1_info = { SNB_M_PLATFORM, .gt = 1, }; -static const struct intel_device_info intel_sandybridge_m_gt2_info = { +static const struct intel_device_info snb_m_gt2_info = { SNB_M_PLATFORM, .gt = 2, }; @@ -450,12 +450,12 @@ static const struct intel_device_info intel_sandybridge_m_gt2_info = { PLATFORM(INTEL_IVYBRIDGE), \ .has_l3_dpf = 1 -static const struct intel_device_info intel_ivybridge_d_gt1_info = { +static const struct intel_device_info ivb_d_gt1_info = { IVB_D_PLATFORM, .gt = 1, }; -static const struct intel_device_info intel_ivybridge_d_gt2_info = { +static const struct intel_device_info ivb_d_gt2_info = { IVB_D_PLATFORM, .gt = 2, }; @@ -466,17 +466,17 @@ static const struct intel_device_info intel_ivybridge_d_gt2_info = { .is_mobile = 1, \ .has_l3_dpf = 1 -static const struct intel_device_info intel_ivybridge_m_gt1_info = { +static const struct intel_device_info ivb_m_gt1_info = { IVB_M_PLATFORM, .gt = 1, }; -static const struct intel_device_info intel_ivybridge_m_gt2_info = { +static const struct intel_device_info ivb_m_gt2_info = { IVB_M_PLATFORM, .gt = 2, }; -static const struct intel_device_info intel_ivybridge_q_info = { +static const struct intel_device_info ivb_q_info = { GEN7_FEATURES, PLATFORM(INTEL_IVYBRIDGE), .gt = 2, @@ -484,7 +484,7 @@ static const struct intel_device_info intel_ivybridge_q_info = { .has_l3_dpf = 1, }; -static const struct intel_device_info intel_valleyview_info = { +static const struct intel_device_info vlv_info = { PLATFORM(INTEL_VALLEYVIEW), GEN(7), .is_lp = 1, @@ -523,17 +523,17 @@ static const struct intel_device_info intel_valleyview_info = { PLATFORM(INTEL_HASWELL), \ .has_l3_dpf = 1 -static const struct intel_device_info intel_haswell_gt1_info = { +static const struct intel_device_info hsw_gt1_info = { HSW_PLATFORM, .gt = 1, }; -static const struct intel_device_info intel_haswell_gt2_info = { +static const struct intel_device_info hsw_gt2_info = { HSW_PLATFORM, .gt = 2, }; -static const struct intel_device_info intel_haswell_gt3_info = { +static const struct intel_device_info hsw_gt3_info = { HSW_PLATFORM, .gt = 3, }; @@ -551,17 +551,17 @@ static const struct intel_device_info intel_haswell_gt3_info = { GEN8_FEATURES, \ PLATFORM(INTEL_BROADWELL) -static const struct intel_device_info intel_broadwell_gt1_info = { +static const struct intel_device_info bdw_gt1_info = { BDW_PLATFORM, .gt = 1, }; -static const struct intel_device_info intel_broadwell_gt2_info = { +static const struct intel_device_info bdw_gt2_info = { BDW_PLATFORM, .gt = 2, }; -static const struct intel_device_info intel_broadwell_rsvd_info = { +static const struct intel_device_info bdw_rsvd_info = { BDW_PLATFORM, .gt = 3, /* According to the device ID those devices are GT3, they were @@ -569,14 +569,14 @@ static const struct intel_device_info intel_broadwell_rsvd_info = { */ }; -static const struct intel_device_info intel_broadwell_gt3_info = { +static const struct intel_device_info bdw_gt3_info = { BDW_PLATFORM, .gt = 3, .engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1), }; -static const struct intel_device_info intel_cherryview_info = { +static const struct intel_device_info chv_info = { PLATFORM(INTEL_CHERRYVIEW), GEN(8), .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), @@ -621,12 +621,12 @@ static const struct intel_device_info intel_cherryview_info = { GEN9_FEATURES, \ PLATFORM(INTEL_SKYLAKE) -static const struct intel_device_info intel_skylake_gt1_info = { +static const struct intel_device_info skl_gt1_info = { SKL_PLATFORM, .gt = 1, }; -static const struct intel_device_info intel_skylake_gt2_info = { +static const struct intel_device_info skl_gt2_info = { SKL_PLATFORM, .gt = 2, }; @@ -637,12 +637,12 @@ static const struct intel_device_info intel_skylake_gt2_info = { BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1) -static const struct intel_device_info intel_skylake_gt3_info = { +static const struct intel_device_info skl_gt3_info = { SKL_GT3_PLUS_PLATFORM, .gt = 3, }; -static const struct intel_device_info intel_skylake_gt4_info = { +static const struct intel_device_info skl_gt4_info = { SKL_GT3_PLUS_PLATFORM, .gt = 4, }; @@ -679,13 +679,13 @@ static const struct intel_device_info intel_skylake_gt4_info = { GEN9_DEFAULT_PAGE_SIZES, \ GEN_DEFAULT_REGIONS -static const struct intel_device_info intel_broxton_info = { +static const struct intel_device_info bxt_info = { GEN9_LP_FEATURES, PLATFORM(INTEL_BROXTON), .ddb_size = 512, }; -static const struct intel_device_info intel_geminilake_info = { +static const struct intel_device_info glk_info = { GEN9_LP_FEATURES, PLATFORM(INTEL_GEMINILAKE), .ddb_size = 1024, @@ -696,17 +696,17 @@ static const struct intel_device_info intel_geminilake_info = { GEN9_FEATURES, \ PLATFORM(INTEL_KABYLAKE) -static const struct intel_device_info intel_kabylake_gt1_info = { +static const struct intel_device_info kbl_gt1_info = { KBL_PLATFORM, .gt = 1, }; -static const struct intel_device_info intel_kabylake_gt2_info = { +static const struct intel_device_info kbl_gt2_info = { KBL_PLATFORM, .gt = 2, }; -static const struct intel_device_info intel_kabylake_gt3_info = { +static const struct intel_device_info kbl_gt3_info = { KBL_PLATFORM, .gt = 3, .engine_mask = @@ -717,17 +717,17 @@ static const struct intel_device_info intel_kabylake_gt3_info = { GEN9_FEATURES, \ PLATFORM(INTEL_COFFEELAKE) -static const struct intel_device_info intel_coffeelake_gt1_info = { +static const struct intel_device_info cfl_gt1_info = { CFL_PLATFORM, .gt = 1, }; -static const struct intel_device_info intel_coffeelake_gt2_info = { +static const struct intel_device_info cfl_gt2_info = { CFL_PLATFORM, .gt = 2, }; -static const struct intel_device_info intel_coffeelake_gt3_info = { +static const struct intel_device_info cfl_gt3_info = { CFL_PLATFORM, .gt = 3, .engine_mask = @@ -742,7 +742,7 @@ static const struct intel_device_info intel_coffeelake_gt3_info = { .has_coherent_ggtt = false, \ GLK_COLORS -static const struct intel_device_info intel_cannonlake_info = { +static const struct intel_device_info cnl_info = { GEN10_FEATURES, PLATFORM(INTEL_CANNONLAKE), .gt = 2, @@ -777,14 +777,14 @@ static const struct intel_device_info intel_cannonlake_info = { .has_logical_ring_elsq = 1, \ .color = { .degamma_lut_size = 33, .gamma_lut_size = 262145 } -static const struct intel_device_info intel_icelake_11_info = { +static const struct intel_device_info icl_info = { GEN11_FEATURES, PLATFORM(INTEL_ICELAKE), .engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2), }; -static const struct intel_device_info intel_elkhartlake_info = { +static const struct intel_device_info ehl_info = { GEN11_FEATURES, PLATFORM(INTEL_ELKHARTLAKE), .require_force_probe = 1, @@ -815,7 +815,7 @@ static const struct intel_device_info intel_elkhartlake_info = { .has_global_mocs = 1, \ .display.has_dsb = 1 -static const struct intel_device_info intel_tigerlake_12_info = { +static const struct intel_device_info tgl_info = { GEN12_FEATURES, PLATFORM(INTEL_TIGERLAKE), .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), @@ -840,70 +840,70 @@ static const struct intel_device_info intel_tigerlake_12_info = { * PCI ID matches, otherwise we'll use the wrong info struct above. */ static const struct pci_device_id pciidlist[] = { - INTEL_I830_IDS(&intel_i830_info), - INTEL_I845G_IDS(&intel_i845g_info), - INTEL_I85X_IDS(&intel_i85x_info), - INTEL_I865G_IDS(&intel_i865g_info), - INTEL_I915G_IDS(&intel_i915g_info), - INTEL_I915GM_IDS(&intel_i915gm_info), - INTEL_I945G_IDS(&intel_i945g_info), - INTEL_I945GM_IDS(&intel_i945gm_info), - INTEL_I965G_IDS(&intel_i965g_info), - INTEL_G33_IDS(&intel_g33_info), - INTEL_I965GM_IDS(&intel_i965gm_info), - INTEL_GM45_IDS(&intel_gm45_info), - INTEL_G45_IDS(&intel_g45_info), - INTEL_PINEVIEW_G_IDS(&intel_pineview_g_info), - INTEL_PINEVIEW_M_IDS(&intel_pineview_m_info), - INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info), - INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info), - INTEL_SNB_D_GT1_IDS(&intel_sandybridge_d_gt1_info), - INTEL_SNB_D_GT2_IDS(&intel_sandybridge_d_gt2_info), - INTEL_SNB_M_GT1_IDS(&intel_sandybridge_m_gt1_info), - INTEL_SNB_M_GT2_IDS(&intel_sandybridge_m_gt2_info), - INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */ - INTEL_IVB_M_GT1_IDS(&intel_ivybridge_m_gt1_info), - INTEL_IVB_M_GT2_IDS(&intel_ivybridge_m_gt2_info), - INTEL_IVB_D_GT1_IDS(&intel_ivybridge_d_gt1_info), - INTEL_IVB_D_GT2_IDS(&intel_ivybridge_d_gt2_info), - INTEL_HSW_GT1_IDS(&intel_haswell_gt1_info), - INTEL_HSW_GT2_IDS(&intel_haswell_gt2_info), - INTEL_HSW_GT3_IDS(&intel_haswell_gt3_info), - INTEL_VLV_IDS(&intel_valleyview_info), - INTEL_BDW_GT1_IDS(&intel_broadwell_gt1_info), - INTEL_BDW_GT2_IDS(&intel_broadwell_gt2_info), - INTEL_BDW_GT3_IDS(&intel_broadwell_gt3_info), - INTEL_BDW_RSVD_IDS(&intel_broadwell_rsvd_info), - INTEL_CHV_IDS(&intel_cherryview_info), - INTEL_SKL_GT1_IDS(&intel_skylake_gt1_info), - INTEL_SKL_GT2_IDS(&intel_skylake_gt2_info), - INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info), - INTEL_SKL_GT4_IDS(&intel_skylake_gt4_info), - INTEL_BXT_IDS(&intel_broxton_info), - INTEL_GLK_IDS(&intel_geminilake_info), - INTEL_KBL_GT1_IDS(&intel_kabylake_gt1_info), - INTEL_KBL_GT2_IDS(&intel_kabylake_gt2_info), - INTEL_KBL_GT3_IDS(&intel_kabylake_gt3_info), - INTEL_KBL_GT4_IDS(&intel_kabylake_gt3_info), - INTEL_AML_KBL_GT2_IDS(&intel_kabylake_gt2_info), - INTEL_CFL_S_GT1_IDS(&intel_coffeelake_gt1_info), - INTEL_CFL_S_GT2_IDS(&intel_coffeelake_gt2_info), - INTEL_CFL_H_GT1_IDS(&intel_coffeelake_gt1_info), - INTEL_CFL_H_GT2_IDS(&intel_coffeelake_gt2_info), - INTEL_CFL_U_GT2_IDS(&intel_coffeelake_gt2_info), - INTEL_CFL_U_GT3_IDS(&intel_coffeelake_gt3_info), - INTEL_WHL_U_GT1_IDS(&intel_coffeelake_gt1_info), - INTEL_WHL_U_GT2_IDS(&intel_coffeelake_gt2_info), - INTEL_AML_CFL_GT2_IDS(&intel_coffeelake_gt2_info), - INTEL_WHL_U_GT3_IDS(&intel_coffeelake_gt3_info), - INTEL_CML_GT1_IDS(&intel_coffeelake_gt1_info), - INTEL_CML_GT2_IDS(&intel_coffeelake_gt2_info), - INTEL_CML_U_GT1_IDS(&intel_coffeelake_gt1_info), - INTEL_CML_U_GT2_IDS(&intel_coffeelake_gt2_info), - INTEL_CNL_IDS(&intel_cannonlake_info), - INTEL_ICL_11_IDS(&intel_icelake_11_info), - INTEL_EHL_IDS(&intel_elkhartlake_info), - INTEL_TGL_12_IDS(&intel_tigerlake_12_info), + INTEL_I830_IDS(&i830_info), + INTEL_I845G_IDS(&i845g_info), + INTEL_I85X_IDS(&i85x_info), + INTEL_I865G_IDS(&i865g_info), + INTEL_I915G_IDS(&i915g_info), + INTEL_I915GM_IDS(&i915gm_info), + INTEL_I945G_IDS(&i945g_info), + INTEL_I945GM_IDS(&i945gm_info), + INTEL_I965G_IDS(&i965g_info), + INTEL_G33_IDS(&g33_info), + INTEL_I965GM_IDS(&i965gm_info), + INTEL_GM45_IDS(&gm45_info), + INTEL_G45_IDS(&g45_info), + INTEL_PINEVIEW_G_IDS(&pnv_g_info), + INTEL_PINEVIEW_M_IDS(&pnv_m_info), + INTEL_IRONLAKE_D_IDS(&ilk_d_info), + INTEL_IRONLAKE_M_IDS(&ilk_m_info), + INTEL_SNB_D_GT1_IDS(&snb_d_gt1_info), + INTEL_SNB_D_GT2_IDS(&snb_d_gt2_info), + INTEL_SNB_M_GT1_IDS(&snb_m_gt1_info), + INTEL_SNB_M_GT2_IDS(&snb_m_gt2_info), + INTEL_IVB_Q_IDS(&ivb_q_info), /* must be first IVB */ + INTEL_IVB_M_GT1_IDS(&ivb_m_gt1_info), + INTEL_IVB_M_GT2_IDS(&ivb_m_gt2_info), + INTEL_IVB_D_GT1_IDS(&ivb_d_gt1_info), + INTEL_IVB_D_GT2_IDS(&ivb_d_gt2_info), + INTEL_HSW_GT1_IDS(&hsw_gt1_info), + INTEL_HSW_GT2_IDS(&hsw_gt2_info), + INTEL_HSW_GT3_IDS(&hsw_gt3_info), + INTEL_VLV_IDS(&vlv_info), + INTEL_BDW_GT1_IDS(&bdw_gt1_info), + INTEL_BDW_GT2_IDS(&bdw_gt2_info), + INTEL_BDW_GT3_IDS(&bdw_gt3_info), + INTEL_BDW_RSVD_IDS(&bdw_rsvd_info), + INTEL_CHV_IDS(&chv_info), + INTEL_SKL_GT1_IDS(&skl_gt1_info), + INTEL_SKL_GT2_IDS(&skl_gt2_info), + INTEL_SKL_GT3_IDS(&skl_gt3_info), + INTEL_SKL_GT4_IDS(&skl_gt4_info), + INTEL_BXT_IDS(&bxt_info), + INTEL_GLK_IDS(&glk_info), + INTEL_KBL_GT1_IDS(&kbl_gt1_info), + INTEL_KBL_GT2_IDS(&kbl_gt2_info), + INTEL_KBL_GT3_IDS(&kbl_gt3_info), + INTEL_KBL_GT4_IDS(&kbl_gt3_info), + INTEL_AML_KBL_GT2_IDS(&kbl_gt2_info), + INTEL_CFL_S_GT1_IDS(&cfl_gt1_info), + INTEL_CFL_S_GT2_IDS(&cfl_gt2_info), + INTEL_CFL_H_GT1_IDS(&cfl_gt1_info), + INTEL_CFL_H_GT2_IDS(&cfl_gt2_info), + INTEL_CFL_U_GT2_IDS(&cfl_gt2_info), + INTEL_CFL_U_GT3_IDS(&cfl_gt3_info), + INTEL_WHL_U_GT1_IDS(&cfl_gt1_info), + INTEL_WHL_U_GT2_IDS(&cfl_gt2_info), + INTEL_AML_CFL_GT2_IDS(&cfl_gt2_info), + INTEL_WHL_U_GT3_IDS(&cfl_gt3_info), + INTEL_CML_GT1_IDS(&cfl_gt1_info), + INTEL_CML_GT2_IDS(&cfl_gt2_info), + INTEL_CML_U_GT1_IDS(&cfl_gt1_info), + INTEL_CML_U_GT2_IDS(&cfl_gt2_info), + INTEL_CNL_IDS(&cnl_info), + INTEL_ICL_11_IDS(&icl_info), + INTEL_EHL_IDS(&ehl_info), + INTEL_TGL_12_IDS(&tgl_info), {0, 0, 0} }; MODULE_DEVICE_TABLE(pci, pciidlist); diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index 84350c7bc711..0f556d80ba36 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -2159,8 +2159,6 @@ static int gen8_modify_context(struct intel_context *ce, struct i915_request *rq; int err; - lockdep_assert_held(&ce->pin_mutex); - rq = intel_engine_create_kernel_request(ce->engine); if (IS_ERR(rq)) return PTR_ERR(rq); @@ -2203,17 +2201,14 @@ static int gen8_configure_context(struct i915_gem_context *ctx, if (ce->engine->class != RENDER_CLASS) continue; - err = intel_context_lock_pinned(ce); - if (err) - break; + /* Otherwise OA settings will be set upon first use */ + if (!intel_context_pin_if_active(ce)) + continue; flex->value = intel_sseu_make_rpcs(ctx->i915, &ce->sseu); + err = gen8_modify_context(ce, flex, count); - /* Otherwise OA settings will be set upon first use */ - if (intel_context_is_pinned(ce)) - err = gen8_modify_context(ce, flex, count); - - intel_context_unlock_pinned(ce); + intel_context_unpin(ce); if (err) break; } diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c index f3ef6700a5f2..28a82c849bac 100644 --- a/drivers/gpu/drm/i915/i915_pmu.c +++ b/drivers/gpu/drm/i915/i915_pmu.c @@ -1117,12 +1117,17 @@ void i915_pmu_register(struct drm_i915_private *i915) hrtimer_init(&pmu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); pmu->timer.function = i915_sample; - if (!is_igp(i915)) + if (!is_igp(i915)) { pmu->name = kasprintf(GFP_KERNEL, - "i915-%s", + "i915_%s", dev_name(i915->drm.dev)); - else + if (pmu->name) { + /* tools/perf reserves colons as special. */ + strreplace((char *)pmu->name, ':', '_'); + } + } else { pmu->name = "i915"; + } if (!pmu->name) goto err; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index bbfedeb00b7f..6cc55c103f67 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -2244,26 +2244,6 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) MG_DP_MODE_LN1_ACU_PORT1) #define MG_DP_MODE_CFG_DP_X2_MODE (1 << 7) #define MG_DP_MODE_CFG_DP_X1_MODE (1 << 6) -#define MG_DP_MODE_CFG_TR2PWR_GATING (1 << 5) -#define MG_DP_MODE_CFG_TRPWR_GATING (1 << 4) -#define MG_DP_MODE_CFG_CLNPWR_GATING (1 << 3) -#define MG_DP_MODE_CFG_DIGPWR_GATING (1 << 2) -#define MG_DP_MODE_CFG_GAONPWR_GATING (1 << 1) - -#define MG_MISC_SUS0_PORT1 0x168814 -#define MG_MISC_SUS0_PORT2 0x169814 -#define MG_MISC_SUS0_PORT3 0x16A814 -#define MG_MISC_SUS0_PORT4 0x16B814 -#define MG_MISC_SUS0(tc_port) \ - _MMIO(_PORT(tc_port, MG_MISC_SUS0_PORT1, MG_MISC_SUS0_PORT2)) -#define MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE_MASK (3 << 14) -#define MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE(x) ((x) << 14) -#define MG_MISC_SUS0_CFG_TR2PWR_GATING (1 << 12) -#define MG_MISC_SUS0_CFG_CL2PWR_GATING (1 << 11) -#define MG_MISC_SUS0_CFG_GAONPWR_GATING (1 << 10) -#define MG_MISC_SUS0_CFG_TRPWR_GATING (1 << 7) -#define MG_MISC_SUS0_CFG_CL1PWR_GATING (1 << 6) -#define MG_MISC_SUS0_CFG_DGPWR_GATING (1 << 5) /* The spec defines this only for BXT PHY0, but lets assume that this * would exist for PHY1 too if it had a second channel. @@ -4177,7 +4157,13 @@ enum { #define CPSSUNIT_CLKGATE_DIS REG_BIT(9) #define UNSLICE_UNIT_LEVEL_CLKGATE _MMIO(0x9434) -#define VFUNIT_CLKGATE_DIS (1 << 20) +#define VFUNIT_CLKGATE_DIS REG_BIT(20) +#define HSUNIT_CLKGATE_DIS REG_BIT(8) +#define VSUNIT_CLKGATE_DIS REG_BIT(3) + +#define UNSLICE_UNIT_LEVEL_CLKGATE2 _MMIO(0x94e4) +#define VSUNIT_CLKGATE_DIS_TGL REG_BIT(19) +#define PSDUNIT_CLKGATE_DIS REG_BIT(5) #define INF_UNIT_LEVEL_CLKGATE _MMIO(0x9560) #define CGPSF_CLKGATE_DIS (1 << 3) @@ -6808,6 +6794,7 @@ enum { #define PLANE_CTL_TILED_Y (4 << 10) #define PLANE_CTL_TILED_YF (5 << 10) #define PLANE_CTL_FLIP_HORIZONTAL (1 << 8) +#define PLANE_CTL_MEDIA_DECOMPRESSION_ENABLE (1 << 4) /* TGL+ */ #define PLANE_CTL_ALPHA_MASK (0x3 << 4) /* Pre-GLK */ #define PLANE_CTL_ALPHA_DISABLE (0 << 4) #define PLANE_CTL_ALPHA_SW_PREMULTIPLY (2 << 4) diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index 44a0d1a950c5..be185886e4fc 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -658,7 +658,6 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp) rq->engine = ce->engine; rq->ring = ce->ring; rq->execution_mask = ce->engine->mask; - rq->flags = 0; RCU_INIT_POINTER(rq->timeline, tl); RCU_INIT_POINTER(rq->hwsp_cacheline, tl->hwsp_cacheline); diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h index 565322640378..031433691a06 100644 --- a/drivers/gpu/drm/i915/i915_request.h +++ b/drivers/gpu/drm/i915/i915_request.h @@ -51,7 +51,7 @@ struct i915_capture_list { #define RQ_TRACE(rq, fmt, ...) do { \ const struct i915_request *rq__ = (rq); \ - ENGINE_TRACE(rq__->engine, "fence %llx:%lld, current %d" fmt, \ + ENGINE_TRACE(rq__->engine, "fence %llx:%lld, current %d " fmt, \ rq__->fence.context, rq__->fence.seqno, \ hwsp_seqno(rq__), ##__VA_ARGS__); \ } while (0) @@ -77,6 +77,38 @@ enum { * a request is on the various signal_list. */ I915_FENCE_FLAG_SIGNAL, + + /* + * I915_FENCE_FLAG_NOPREEMPT - this request should not be preempted + * + * The execution of some requests should not be interrupted. This is + * a sensitive operation as it makes the request super important, + * blocking other higher priority work. Abuse of this flag will + * lead to quality of service issues. + */ + I915_FENCE_FLAG_NOPREEMPT, + + /* + * I915_FENCE_FLAG_SENTINEL - this request should be last in the queue + * + * A high priority sentinel request may be submitted to clear the + * submission queue. As it will be the only request in-flight, upon + * execution all other active requests will have been preempted and + * unsubmitted. This preemptive pulse is used to re-evaluate the + * in-flight requests, particularly in cases where an active context + * is banned and those active requests need to be cancelled. + */ + I915_FENCE_FLAG_SENTINEL, + + /* + * I915_FENCE_FLAG_BOOST - upclock the gpu for this request + * + * Some requests are more important than others! In particular, a + * request that the user is waiting on is typically required for + * interactive latency, for which we want to minimise by upclocking + * the GPU. Here we track such boost requests on a per-request basis. + */ + I915_FENCE_FLAG_BOOST, }; /** @@ -225,11 +257,6 @@ struct i915_request { /** Time at which this request was emitted, in jiffies. */ unsigned long emitted_jiffies; - unsigned long flags; -#define I915_REQUEST_WAITBOOST BIT(0) -#define I915_REQUEST_NOPREEMPT BIT(1) -#define I915_REQUEST_SENTINEL BIT(2) - /** timeline->request entry for this request */ struct list_head link; @@ -442,18 +469,18 @@ static inline void i915_request_mark_complete(struct i915_request *rq) static inline bool i915_request_has_waitboost(const struct i915_request *rq) { - return rq->flags & I915_REQUEST_WAITBOOST; + return test_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags); } static inline bool i915_request_has_nopreempt(const struct i915_request *rq) { /* Preemption should only be disabled very rarely */ - return unlikely(rq->flags & I915_REQUEST_NOPREEMPT); + return unlikely(test_bit(I915_FENCE_FLAG_NOPREEMPT, &rq->fence.flags)); } static inline bool i915_request_has_sentinel(const struct i915_request *rq) { - return unlikely(rq->flags & I915_REQUEST_SENTINEL); + return unlikely(test_bit(I915_FENCE_FLAG_SENTINEL, &rq->fence.flags)); } static inline struct intel_timeline * diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c index ad2b1b833d7b..0cef3130db05 100644 --- a/drivers/gpu/drm/i915/i915_sysfs.c +++ b/drivers/gpu/drm/i915/i915_sysfs.c @@ -498,15 +498,15 @@ static ssize_t error_state_read(struct file *filp, struct kobject *kobj, struct device *kdev = kobj_to_dev(kobj); struct drm_i915_private *i915 = kdev_minor_to_i915(kdev); - struct i915_gpu_state *gpu; + struct i915_gpu_coredump *gpu; ssize_t ret; gpu = i915_first_error_state(i915); if (IS_ERR(gpu)) { ret = PTR_ERR(gpu); } else if (gpu) { - ret = i915_gpu_state_copy_to_buffer(gpu, buf, off, count); - i915_gpu_state_put(gpu); + ret = i915_gpu_coredump_copy_to_buffer(gpu, buf, off, count); + i915_gpu_coredump_put(gpu); } else { const char *str = "No error state collected\n"; size_t len = strlen(str); diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index cbd783c31adb..17d7c525ea5c 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -423,8 +423,6 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma) void __iomem *ptr; int err; - /* Access through the GTT requires the device to be awake. */ - assert_rpm_wakelock_held(vma->vm->gt->uncore->rpm); if (GEM_WARN_ON(!i915_vma_is_map_and_fenceable(vma))) { err = -ENODEV; goto err; @@ -456,6 +454,8 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma) goto err_unpin; i915_vma_set_ggtt_write(vma); + + /* NB Access through the GTT requires the device to be awake. */ return ptr; err_unpin: @@ -858,6 +858,7 @@ static void vma_unbind_pages(struct i915_vma *vma) int i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) { struct i915_vma_work *work = NULL; + intel_wakeref_t wakeref = 0; unsigned int bound; int err; @@ -883,6 +884,9 @@ int i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) } } + if (flags & PIN_GLOBAL) + wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm); + /* No more allocations allowed once we hold vm->mutex */ err = mutex_lock_interruptible(&vma->vm->mutex); if (err) @@ -946,6 +950,8 @@ err_unlock: err_fence: if (work) dma_fence_work_commit(&work->base); + if (wakeref) + intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref); err_pages: vma_put_pages(vma); return err; @@ -1246,11 +1252,16 @@ int __i915_vma_unbind(struct i915_vma *vma) int i915_vma_unbind(struct i915_vma *vma) { struct i915_address_space *vm = vma->vm; + intel_wakeref_t wakeref = 0; int err; if (!drm_mm_node_allocated(&vma->node)) return 0; + if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) + /* XXX not always required: nop_clear_range */ + wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm); + err = mutex_lock_interruptible(&vm->mutex); if (err) return err; @@ -1258,6 +1269,9 @@ int i915_vma_unbind(struct i915_vma *vma) err = __i915_vma_unbind(vma); mutex_unlock(&vm->mutex); + if (wakeref) + intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref); + return err; } diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h index 5fffa3c58908..02b31a62951e 100644 --- a/drivers/gpu/drm/i915/i915_vma.h +++ b/drivers/gpu/drm/i915/i915_vma.h @@ -30,148 +30,14 @@ #include <drm/drm_mm.h> +#include "gem/i915_gem_object.h" + #include "i915_gem_gtt.h" #include "i915_gem_fence_reg.h" -#include "gem/i915_gem_object.h" #include "i915_active.h" #include "i915_request.h" - -enum i915_cache_level; - -/** - * DOC: Virtual Memory Address - * - * A VMA represents a GEM BO that is bound into an address space. Therefore, a - * VMA's presence cannot be guaranteed before binding, or after unbinding the - * object into/from the address space. - * - * To make things as simple as possible (ie. no refcounting), a VMA's lifetime - * will always be <= an objects lifetime. So object refcounting should cover us. - */ -struct i915_vma { - struct drm_mm_node node; - - struct i915_address_space *vm; - const struct i915_vma_ops *ops; - - struct drm_i915_gem_object *obj; - struct dma_resv *resv; /** Alias of obj->resv */ - - struct sg_table *pages; - void __iomem *iomap; - void *private; /* owned by creator */ - - struct i915_fence_reg *fence; - - u64 size; - u64 display_alignment; - struct i915_page_sizes page_sizes; - - /* mmap-offset associated with fencing for this vma */ - struct i915_mmap_offset *mmo; - - u32 fence_size; - u32 fence_alignment; - - /** - * Count of the number of times this vma has been opened by different - * handles (but same file) for execbuf, i.e. the number of aliases - * that exist in the ctx->handle_vmas LUT for this vma. - */ - struct kref ref; - atomic_t open_count; - atomic_t flags; - /** - * How many users have pinned this object in GTT space. - * - * This is a tightly bound, fairly small number of users, so we - * stuff inside the flags field so that we can both check for overflow - * and detect a no-op i915_vma_pin() in a single check, while also - * pinning the vma. - * - * The worst case display setup would have the same vma pinned for - * use on each plane on each crtc, while also building the next atomic - * state and holding a pin for the length of the cleanup queue. In the - * future, the flip queue may be increased from 1. - * Estimated worst case: 3 [qlen] * 4 [max crtcs] * 7 [max planes] = 84 - * - * For GEM, the number of concurrent users for pwrite/pread is - * unbounded. For execbuffer, it is currently one but will in future - * be extended to allow multiple clients to pin vma concurrently. - * - * We also use suballocated pages, with each suballocation claiming - * its own pin on the shared vma. At present, this is limited to - * exclusive cachelines of a single page, so a maximum of 64 possible - * users. - */ -#define I915_VMA_PIN_MASK 0x3ff -#define I915_VMA_OVERFLOW 0x200 - - /** Flags and address space this VMA is bound to */ -#define I915_VMA_GLOBAL_BIND_BIT 10 -#define I915_VMA_LOCAL_BIND_BIT 11 - -#define I915_VMA_GLOBAL_BIND ((int)BIT(I915_VMA_GLOBAL_BIND_BIT)) -#define I915_VMA_LOCAL_BIND ((int)BIT(I915_VMA_LOCAL_BIND_BIT)) - -#define I915_VMA_BIND_MASK (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND) - -#define I915_VMA_ALLOC_BIT 12 -#define I915_VMA_ALLOC ((int)BIT(I915_VMA_ALLOC_BIT)) - -#define I915_VMA_ERROR_BIT 13 -#define I915_VMA_ERROR ((int)BIT(I915_VMA_ERROR_BIT)) - -#define I915_VMA_GGTT_BIT 14 -#define I915_VMA_CAN_FENCE_BIT 15 -#define I915_VMA_USERFAULT_BIT 16 -#define I915_VMA_GGTT_WRITE_BIT 17 - -#define I915_VMA_GGTT ((int)BIT(I915_VMA_GGTT_BIT)) -#define I915_VMA_CAN_FENCE ((int)BIT(I915_VMA_CAN_FENCE_BIT)) -#define I915_VMA_USERFAULT ((int)BIT(I915_VMA_USERFAULT_BIT)) -#define I915_VMA_GGTT_WRITE ((int)BIT(I915_VMA_GGTT_WRITE_BIT)) - - struct i915_active active; - -#define I915_VMA_PAGES_BIAS 24 -#define I915_VMA_PAGES_ACTIVE (BIT(24) | 1) - atomic_t pages_count; /* number of active binds to the pages */ - struct mutex pages_mutex; /* protect acquire/release of backing pages */ - - /** - * Support different GGTT views into the same object. - * This means there can be multiple VMA mappings per object and per VM. - * i915_ggtt_view_type is used to distinguish between those entries. - * The default one of zero (I915_GGTT_VIEW_NORMAL) is default and also - * assumed in GEM functions which take no ggtt view parameter. - */ - struct i915_ggtt_view ggtt_view; - - /** This object's place on the active/inactive lists */ - struct list_head vm_link; - - struct list_head obj_link; /* Link in the object's VMA list */ - struct rb_node obj_node; - struct hlist_node obj_hash; - - /** This vma's place in the execbuf reservation list */ - struct list_head exec_link; - struct list_head reloc_link; - - /** This vma's place in the eviction list */ - struct list_head evict_link; - - struct list_head closed_link; - - /** - * Used for performing relocations during execbuffer insertion. - */ - unsigned int *exec_flags; - struct hlist_node exec_node; - u32 exec_handle; -}; +#include "i915_vma_types.h" struct i915_vma * i915_vma_instance(struct drm_i915_gem_object *obj, diff --git a/drivers/gpu/drm/i915/i915_vma_types.h b/drivers/gpu/drm/i915/i915_vma_types.h new file mode 100644 index 000000000000..e0942efd5236 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_vma_types.h @@ -0,0 +1,294 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2016 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef __I915_VMA_TYPES_H__ +#define __I915_VMA_TYPES_H__ + +#include <linux/rbtree.h> + +#include <drm/drm_mm.h> + +#include "gem/i915_gem_object_types.h" + +enum i915_cache_level; + +/** + * DOC: Global GTT views + * + * Background and previous state + * + * Historically objects could exists (be bound) in global GTT space only as + * singular instances with a view representing all of the object's backing pages + * in a linear fashion. This view will be called a normal view. + * + * To support multiple views of the same object, where the number of mapped + * pages is not equal to the backing store, or where the layout of the pages + * is not linear, concept of a GGTT view was added. + * + * One example of an alternative view is a stereo display driven by a single + * image. In this case we would have a framebuffer looking like this + * (2x2 pages): + * + * 12 + * 34 + * + * Above would represent a normal GGTT view as normally mapped for GPU or CPU + * rendering. In contrast, fed to the display engine would be an alternative + * view which could look something like this: + * + * 1212 + * 3434 + * + * In this example both the size and layout of pages in the alternative view is + * different from the normal view. + * + * Implementation and usage + * + * GGTT views are implemented using VMAs and are distinguished via enum + * i915_ggtt_view_type and struct i915_ggtt_view. + * + * A new flavour of core GEM functions which work with GGTT bound objects were + * added with the _ggtt_ infix, and sometimes with _view postfix to avoid + * renaming in large amounts of code. They take the struct i915_ggtt_view + * parameter encapsulating all metadata required to implement a view. + * + * As a helper for callers which are only interested in the normal view, + * globally const i915_ggtt_view_normal singleton instance exists. All old core + * GEM API functions, the ones not taking the view parameter, are operating on, + * or with the normal GGTT view. + * + * Code wanting to add or use a new GGTT view needs to: + * + * 1. Add a new enum with a suitable name. + * 2. Extend the metadata in the i915_ggtt_view structure if required. + * 3. Add support to i915_get_vma_pages(). + * + * New views are required to build a scatter-gather table from within the + * i915_get_vma_pages function. This table is stored in the vma.ggtt_view and + * exists for the lifetime of an VMA. + * + * Core API is designed to have copy semantics which means that passed in + * struct i915_ggtt_view does not need to be persistent (left around after + * calling the core API functions). + * + */ + +struct intel_remapped_plane_info { + /* in gtt pages */ + unsigned int width, height, stride, offset; +} __packed; + +struct intel_remapped_info { + struct intel_remapped_plane_info plane[2]; + unsigned int unused_mbz; +} __packed; + +struct intel_rotation_info { + struct intel_remapped_plane_info plane[2]; +} __packed; + +struct intel_partial_info { + u64 offset; + unsigned int size; +} __packed; + +enum i915_ggtt_view_type { + I915_GGTT_VIEW_NORMAL = 0, + I915_GGTT_VIEW_ROTATED = sizeof(struct intel_rotation_info), + I915_GGTT_VIEW_PARTIAL = sizeof(struct intel_partial_info), + I915_GGTT_VIEW_REMAPPED = sizeof(struct intel_remapped_info), +}; + +static inline void assert_i915_gem_gtt_types(void) +{ + BUILD_BUG_ON(sizeof(struct intel_rotation_info) != 8*sizeof(unsigned int)); + BUILD_BUG_ON(sizeof(struct intel_partial_info) != sizeof(u64) + sizeof(unsigned int)); + BUILD_BUG_ON(sizeof(struct intel_remapped_info) != 9*sizeof(unsigned int)); + + /* Check that rotation/remapped shares offsets for simplicity */ + BUILD_BUG_ON(offsetof(struct intel_remapped_info, plane[0]) != + offsetof(struct intel_rotation_info, plane[0])); + BUILD_BUG_ON(offsetofend(struct intel_remapped_info, plane[1]) != + offsetofend(struct intel_rotation_info, plane[1])); + + /* As we encode the size of each branch inside the union into its type, + * we have to be careful that each branch has a unique size. + */ + switch ((enum i915_ggtt_view_type)0) { + case I915_GGTT_VIEW_NORMAL: + case I915_GGTT_VIEW_PARTIAL: + case I915_GGTT_VIEW_ROTATED: + case I915_GGTT_VIEW_REMAPPED: + /* gcc complains if these are identical cases */ + break; + } +} + +struct i915_ggtt_view { + enum i915_ggtt_view_type type; + union { + /* Members need to contain no holes/padding */ + struct intel_partial_info partial; + struct intel_rotation_info rotated; + struct intel_remapped_info remapped; + }; +}; + +/** + * DOC: Virtual Memory Address + * + * A VMA represents a GEM BO that is bound into an address space. Therefore, a + * VMA's presence cannot be guaranteed before binding, or after unbinding the + * object into/from the address space. + * + * To make things as simple as possible (ie. no refcounting), a VMA's lifetime + * will always be <= an objects lifetime. So object refcounting should cover us. + */ +struct i915_vma { + struct drm_mm_node node; + + struct i915_address_space *vm; + const struct i915_vma_ops *ops; + + struct drm_i915_gem_object *obj; + struct dma_resv *resv; /** Alias of obj->resv */ + + struct sg_table *pages; + void __iomem *iomap; + void *private; /* owned by creator */ + + struct i915_fence_reg *fence; + + u64 size; + u64 display_alignment; + struct i915_page_sizes page_sizes; + + /* mmap-offset associated with fencing for this vma */ + struct i915_mmap_offset *mmo; + + u32 fence_size; + u32 fence_alignment; + + /** + * Count of the number of times this vma has been opened by different + * handles (but same file) for execbuf, i.e. the number of aliases + * that exist in the ctx->handle_vmas LUT for this vma. + */ + struct kref ref; + atomic_t open_count; + atomic_t flags; + /** + * How many users have pinned this object in GTT space. + * + * This is a tightly bound, fairly small number of users, so we + * stuff inside the flags field so that we can both check for overflow + * and detect a no-op i915_vma_pin() in a single check, while also + * pinning the vma. + * + * The worst case display setup would have the same vma pinned for + * use on each plane on each crtc, while also building the next atomic + * state and holding a pin for the length of the cleanup queue. In the + * future, the flip queue may be increased from 1. + * Estimated worst case: 3 [qlen] * 4 [max crtcs] * 7 [max planes] = 84 + * + * For GEM, the number of concurrent users for pwrite/pread is + * unbounded. For execbuffer, it is currently one but will in future + * be extended to allow multiple clients to pin vma concurrently. + * + * We also use suballocated pages, with each suballocation claiming + * its own pin on the shared vma. At present, this is limited to + * exclusive cachelines of a single page, so a maximum of 64 possible + * users. + */ +#define I915_VMA_PIN_MASK 0x3ff +#define I915_VMA_OVERFLOW 0x200 + + /** Flags and address space this VMA is bound to */ +#define I915_VMA_GLOBAL_BIND_BIT 10 +#define I915_VMA_LOCAL_BIND_BIT 11 + +#define I915_VMA_GLOBAL_BIND ((int)BIT(I915_VMA_GLOBAL_BIND_BIT)) +#define I915_VMA_LOCAL_BIND ((int)BIT(I915_VMA_LOCAL_BIND_BIT)) + +#define I915_VMA_BIND_MASK (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND) + +#define I915_VMA_ALLOC_BIT 12 +#define I915_VMA_ALLOC ((int)BIT(I915_VMA_ALLOC_BIT)) + +#define I915_VMA_ERROR_BIT 13 +#define I915_VMA_ERROR ((int)BIT(I915_VMA_ERROR_BIT)) + +#define I915_VMA_GGTT_BIT 14 +#define I915_VMA_CAN_FENCE_BIT 15 +#define I915_VMA_USERFAULT_BIT 16 +#define I915_VMA_GGTT_WRITE_BIT 17 + +#define I915_VMA_GGTT ((int)BIT(I915_VMA_GGTT_BIT)) +#define I915_VMA_CAN_FENCE ((int)BIT(I915_VMA_CAN_FENCE_BIT)) +#define I915_VMA_USERFAULT ((int)BIT(I915_VMA_USERFAULT_BIT)) +#define I915_VMA_GGTT_WRITE ((int)BIT(I915_VMA_GGTT_WRITE_BIT)) + + struct i915_active active; + +#define I915_VMA_PAGES_BIAS 24 +#define I915_VMA_PAGES_ACTIVE (BIT(24) | 1) + atomic_t pages_count; /* number of active binds to the pages */ + struct mutex pages_mutex; /* protect acquire/release of backing pages */ + + /** + * Support different GGTT views into the same object. + * This means there can be multiple VMA mappings per object and per VM. + * i915_ggtt_view_type is used to distinguish between those entries. + * The default one of zero (I915_GGTT_VIEW_NORMAL) is default and also + * assumed in GEM functions which take no ggtt view parameter. + */ + struct i915_ggtt_view ggtt_view; + + /** This object's place on the active/inactive lists */ + struct list_head vm_link; + + struct list_head obj_link; /* Link in the object's VMA list */ + struct rb_node obj_node; + struct hlist_node obj_hash; + + /** This vma's place in the execbuf reservation list */ + struct list_head exec_link; + struct list_head reloc_link; + + /** This vma's place in the eviction list */ + struct list_head evict_link; + + struct list_head closed_link; + + /** + * Used for performing relocations during execbuffer insertion. + */ + unsigned int *exec_flags; + struct hlist_node exec_node; + u32 exec_handle; +}; + +#endif + diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c index 1acb5db77431..6670a0763be2 100644 --- a/drivers/gpu/drm/i915/intel_device_info.c +++ b/drivers/gpu/drm/i915/intel_device_info.c @@ -519,7 +519,7 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv) } } -static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv) +static void bdw_sseu_info_init(struct drm_i915_private *dev_priv) { struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu; int s, ss; @@ -600,7 +600,7 @@ static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv) sseu->has_eu_pg = 0; } -static void haswell_sseu_info_init(struct drm_i915_private *dev_priv) +static void hsw_sseu_info_init(struct drm_i915_private *dev_priv) { struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu; u32 fuse1; @@ -1021,11 +1021,11 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv) /* Initialize slice/subslice/EU info */ if (IS_HASWELL(dev_priv)) - haswell_sseu_info_init(dev_priv); + hsw_sseu_info_init(dev_priv); else if (IS_CHERRYVIEW(dev_priv)) cherryview_sseu_info_init(dev_priv); else if (IS_BROADWELL(dev_priv)) - broadwell_sseu_info_init(dev_priv); + bdw_sseu_info_init(dev_priv); else if (IS_GEN(dev_priv, 9)) gen9_sseu_info_init(dev_priv); else if (IS_GEN(dev_priv, 10)) @@ -1093,7 +1093,7 @@ void intel_device_info_init_mmio(struct drm_i915_private *dev_priv) * hooked up to an SFC (Scaler & Format Converter) unit. * In TGL each VDBOX has access to an SFC. */ - if (IS_TIGERLAKE(dev_priv) || logical_vdbox++ % 2 == 0) + if (INTEL_GEN(dev_priv) >= 12 || logical_vdbox++ % 2 == 0) RUNTIME_INFO(dev_priv)->vdbox_sfc_access |= BIT(i); } DRM_DEBUG_DRIVER("vdbox enable: %04x, instances: %04lx\n", diff --git a/drivers/gpu/drm/i915/intel_memory_region.c b/drivers/gpu/drm/i915/intel_memory_region.c index e24c280e5930..d0d038b3cd79 100644 --- a/drivers/gpu/drm/i915/intel_memory_region.c +++ b/drivers/gpu/drm/i915/intel_memory_region.c @@ -16,6 +16,20 @@ const u32 intel_region_map[] = { [INTEL_REGION_STOLEN] = REGION_MAP(INTEL_MEMORY_STOLEN, 0), }; +struct intel_memory_region * +intel_memory_region_by_type(struct drm_i915_private *i915, + enum intel_memory_type mem_type) +{ + struct intel_memory_region *mr; + int id; + + for_each_memory_region(mr, i915, id) + if (mr->type == mem_type) + return mr; + + return NULL; +} + static u64 intel_memory_region_free_pages(struct intel_memory_region *mem, struct list_head *blocks) @@ -37,7 +51,7 @@ __intel_memory_region_put_pages_buddy(struct intel_memory_region *mem, struct list_head *blocks) { mutex_lock(&mem->mm_lock); - intel_memory_region_free_pages(mem, blocks); + mem->avail += intel_memory_region_free_pages(mem, blocks); mutex_unlock(&mem->mm_lock); } @@ -106,6 +120,7 @@ __intel_memory_region_get_pages_buddy(struct intel_memory_region *mem, break; } while (1); + mem->avail -= size; mutex_unlock(&mem->mm_lock); return 0; @@ -164,6 +179,8 @@ intel_memory_region_create(struct drm_i915_private *i915, mem->io_start = io_start; mem->min_page_size = min_page_size; mem->ops = ops; + mem->total = size; + mem->avail = mem->total; mutex_init(&mem->objects.lock); INIT_LIST_HEAD(&mem->objects.list); @@ -185,6 +202,16 @@ err_free: return ERR_PTR(err); } +void intel_memory_region_set_name(struct intel_memory_region *mem, + const char *fmt, ...) +{ + va_list ap; + + va_start(ap, fmt); + vsnprintf(mem->name, sizeof(mem->name), fmt, ap); + va_end(ap); +} + static void __intel_memory_region_destroy(struct kref *kref) { struct intel_memory_region *mem = diff --git a/drivers/gpu/drm/i915/intel_memory_region.h b/drivers/gpu/drm/i915/intel_memory_region.h index 238722009677..232490d89a83 100644 --- a/drivers/gpu/drm/i915/intel_memory_region.h +++ b/drivers/gpu/drm/i915/intel_memory_region.h @@ -47,6 +47,10 @@ enum intel_region_id { #define I915_ALLOC_MIN_PAGE_SIZE BIT(0) #define I915_ALLOC_CONTIGUOUS BIT(1) +#define for_each_memory_region(mr, i915, id) \ + for (id = 0; id < ARRAY_SIZE((i915)->mm.regions); id++) \ + for_each_if((mr) = (i915)->mm.regions[id]) + /** * Memory regions encoded as type | instance */ @@ -82,10 +86,13 @@ struct intel_memory_region { resource_size_t io_start; resource_size_t min_page_size; + resource_size_t total; + resource_size_t avail; unsigned int type; unsigned int instance; unsigned int id; + char name[8]; dma_addr_t remap_addr; @@ -125,5 +132,12 @@ void intel_memory_region_put(struct intel_memory_region *mem); int intel_memory_regions_hw_probe(struct drm_i915_private *i915); void intel_memory_regions_driver_release(struct drm_i915_private *i915); +struct intel_memory_region * +intel_memory_region_by_type(struct drm_i915_private *i915, + enum intel_memory_type mem_type); + +__printf(2, 3) void +intel_memory_region_set_name(struct intel_memory_region *mem, + const char *fmt, ...); #endif diff --git a/drivers/gpu/drm/i915/intel_pch.c b/drivers/gpu/drm/i915/intel_pch.c index 43b68b5fc562..4ed60e1f01db 100644 --- a/drivers/gpu/drm/i915/intel_pch.c +++ b/drivers/gpu/drm/i915/intel_pch.c @@ -12,90 +12,91 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id) { switch (id) { case INTEL_PCH_IBX_DEVICE_ID_TYPE: - DRM_DEBUG_KMS("Found Ibex Peak PCH\n"); + drm_dbg_kms(&dev_priv->drm, "Found Ibex Peak PCH\n"); WARN_ON(!IS_GEN(dev_priv, 5)); return PCH_IBX; case INTEL_PCH_CPT_DEVICE_ID_TYPE: - DRM_DEBUG_KMS("Found CougarPoint PCH\n"); + drm_dbg_kms(&dev_priv->drm, "Found CougarPoint PCH\n"); WARN_ON(!IS_GEN(dev_priv, 6) && !IS_IVYBRIDGE(dev_priv)); return PCH_CPT; case INTEL_PCH_PPT_DEVICE_ID_TYPE: - DRM_DEBUG_KMS("Found PantherPoint PCH\n"); + drm_dbg_kms(&dev_priv->drm, "Found PantherPoint PCH\n"); WARN_ON(!IS_GEN(dev_priv, 6) && !IS_IVYBRIDGE(dev_priv)); /* PantherPoint is CPT compatible */ return PCH_CPT; case INTEL_PCH_LPT_DEVICE_ID_TYPE: - DRM_DEBUG_KMS("Found LynxPoint PCH\n"); + drm_dbg_kms(&dev_priv->drm, "Found LynxPoint PCH\n"); WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)); WARN_ON(IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv)); return PCH_LPT; case INTEL_PCH_LPT_LP_DEVICE_ID_TYPE: - DRM_DEBUG_KMS("Found LynxPoint LP PCH\n"); + drm_dbg_kms(&dev_priv->drm, "Found LynxPoint LP PCH\n"); WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)); WARN_ON(!IS_HSW_ULT(dev_priv) && !IS_BDW_ULT(dev_priv)); return PCH_LPT; case INTEL_PCH_WPT_DEVICE_ID_TYPE: - DRM_DEBUG_KMS("Found WildcatPoint PCH\n"); + drm_dbg_kms(&dev_priv->drm, "Found WildcatPoint PCH\n"); WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)); WARN_ON(IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv)); /* WildcatPoint is LPT compatible */ return PCH_LPT; case INTEL_PCH_WPT_LP_DEVICE_ID_TYPE: - DRM_DEBUG_KMS("Found WildcatPoint LP PCH\n"); + drm_dbg_kms(&dev_priv->drm, "Found WildcatPoint LP PCH\n"); WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)); WARN_ON(!IS_HSW_ULT(dev_priv) && !IS_BDW_ULT(dev_priv)); /* WildcatPoint is LPT compatible */ return PCH_LPT; case INTEL_PCH_SPT_DEVICE_ID_TYPE: - DRM_DEBUG_KMS("Found SunrisePoint PCH\n"); + drm_dbg_kms(&dev_priv->drm, "Found SunrisePoint PCH\n"); WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv)); return PCH_SPT; case INTEL_PCH_SPT_LP_DEVICE_ID_TYPE: - DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n"); + drm_dbg_kms(&dev_priv->drm, "Found SunrisePoint LP PCH\n"); WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv) && !IS_COFFEELAKE(dev_priv)); return PCH_SPT; case INTEL_PCH_KBP_DEVICE_ID_TYPE: - DRM_DEBUG_KMS("Found Kaby Lake PCH (KBP)\n"); + drm_dbg_kms(&dev_priv->drm, "Found Kaby Lake PCH (KBP)\n"); WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv) && !IS_COFFEELAKE(dev_priv)); /* KBP is SPT compatible */ return PCH_SPT; case INTEL_PCH_CNP_DEVICE_ID_TYPE: - DRM_DEBUG_KMS("Found Cannon Lake PCH (CNP)\n"); + drm_dbg_kms(&dev_priv->drm, "Found Cannon Lake PCH (CNP)\n"); WARN_ON(!IS_CANNONLAKE(dev_priv) && !IS_COFFEELAKE(dev_priv)); return PCH_CNP; case INTEL_PCH_CNP_LP_DEVICE_ID_TYPE: - DRM_DEBUG_KMS("Found Cannon Lake LP PCH (CNP-LP)\n"); + drm_dbg_kms(&dev_priv->drm, + "Found Cannon Lake LP PCH (CNP-LP)\n"); WARN_ON(!IS_CANNONLAKE(dev_priv) && !IS_COFFEELAKE(dev_priv)); return PCH_CNP; case INTEL_PCH_CMP_DEVICE_ID_TYPE: case INTEL_PCH_CMP2_DEVICE_ID_TYPE: - DRM_DEBUG_KMS("Found Comet Lake PCH (CMP)\n"); + drm_dbg_kms(&dev_priv->drm, "Found Comet Lake PCH (CMP)\n"); WARN_ON(!IS_COFFEELAKE(dev_priv)); /* CometPoint is CNP Compatible */ return PCH_CNP; case INTEL_PCH_CMP_V_DEVICE_ID_TYPE: - DRM_DEBUG_KMS("Found Comet Lake V PCH (CMP-V)\n"); + drm_dbg_kms(&dev_priv->drm, "Found Comet Lake V PCH (CMP-V)\n"); WARN_ON(!IS_COFFEELAKE(dev_priv)); /* Comet Lake V PCH is based on KBP, which is SPT compatible */ return PCH_SPT; case INTEL_PCH_ICP_DEVICE_ID_TYPE: - DRM_DEBUG_KMS("Found Ice Lake PCH\n"); + drm_dbg_kms(&dev_priv->drm, "Found Ice Lake PCH\n"); WARN_ON(!IS_ICELAKE(dev_priv)); return PCH_ICP; case INTEL_PCH_MCC_DEVICE_ID_TYPE: - DRM_DEBUG_KMS("Found Mule Creek Canyon PCH\n"); + drm_dbg_kms(&dev_priv->drm, "Found Mule Creek Canyon PCH\n"); WARN_ON(!IS_ELKHARTLAKE(dev_priv)); return PCH_MCC; case INTEL_PCH_TGP_DEVICE_ID_TYPE: case INTEL_PCH_TGP2_DEVICE_ID_TYPE: - DRM_DEBUG_KMS("Found Tiger Lake LP PCH\n"); + drm_dbg_kms(&dev_priv->drm, "Found Tiger Lake LP PCH\n"); WARN_ON(!IS_TIGERLAKE(dev_priv)); return PCH_TGP; case INTEL_PCH_JSP_DEVICE_ID_TYPE: case INTEL_PCH_JSP2_DEVICE_ID_TYPE: - DRM_DEBUG_KMS("Found Jasper Lake PCH\n"); + drm_dbg_kms(&dev_priv->drm, "Found Jasper Lake PCH\n"); WARN_ON(!IS_ELKHARTLAKE(dev_priv)); return PCH_JSP; default: @@ -145,9 +146,9 @@ intel_virt_detect_pch(const struct drm_i915_private *dev_priv) id = INTEL_PCH_IBX_DEVICE_ID_TYPE; if (id) - DRM_DEBUG_KMS("Assuming PCH ID %04x\n", id); + drm_dbg_kms(&dev_priv->drm, "Assuming PCH ID %04x\n", id); else - DRM_DEBUG_KMS("Assuming no PCH\n"); + drm_dbg_kms(&dev_priv->drm, "Assuming no PCH\n"); return id; } @@ -201,13 +202,14 @@ void intel_detect_pch(struct drm_i915_private *dev_priv) * display. */ if (pch && !HAS_DISPLAY(dev_priv)) { - DRM_DEBUG_KMS("Display disabled, reverting to NOP PCH\n"); + drm_dbg_kms(&dev_priv->drm, + "Display disabled, reverting to NOP PCH\n"); dev_priv->pch_type = PCH_NOP; dev_priv->pch_id = 0; } if (!pch) - DRM_DEBUG_KMS("No PCH found.\n"); + drm_dbg_kms(&dev_priv->drm, "No PCH found.\n"); pci_dev_put(pch); } diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 31ec82337e4f..bd2d30ecc030 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -140,7 +140,7 @@ static void glk_init_clock_gating(struct drm_i915_private *dev_priv) } -static void i915_pineview_get_mem_freq(struct drm_i915_private *dev_priv) +static void pnv_get_mem_freq(struct drm_i915_private *dev_priv) { u32 tmp; @@ -178,7 +178,7 @@ static void i915_pineview_get_mem_freq(struct drm_i915_private *dev_priv) dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0; } -static void i915_ironlake_get_mem_freq(struct drm_i915_private *dev_priv) +static void ilk_get_mem_freq(struct drm_i915_private *dev_priv) { u16 ddrpll, csipll; @@ -199,8 +199,8 @@ static void i915_ironlake_get_mem_freq(struct drm_i915_private *dev_priv) dev_priv->mem_freq = 1600; break; default: - DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n", - ddrpll & 0xff); + drm_dbg(&dev_priv->drm, "unknown memory frequency 0x%02x\n", + ddrpll & 0xff); dev_priv->mem_freq = 0; break; } @@ -228,8 +228,8 @@ static void i915_ironlake_get_mem_freq(struct drm_i915_private *dev_priv) dev_priv->fsb_freq = 6400; break; default: - DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n", - csipll & 0x3ff); + drm_dbg(&dev_priv->drm, "unknown fsb frequency 0x%04x\n", + csipll & 0x3ff); dev_priv->fsb_freq = 0; break; } @@ -314,7 +314,8 @@ static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable) if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) & FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) - DRM_ERROR("timed out waiting for Punit DDR DVFS request\n"); + drm_err(&dev_priv->drm, + "timed out waiting for Punit DDR DVFS request\n"); vlv_punit_put(dev_priv); } @@ -383,9 +384,9 @@ static bool _intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enabl trace_intel_memory_cxsr(dev_priv, was_enabled, enable); - DRM_DEBUG_KMS("memory self-refresh is %s (was %s)\n", - enableddisabled(enable), - enableddisabled(was_enabled)); + drm_dbg_kms(&dev_priv->drm, "memory self-refresh is %s (was %s)\n", + enableddisabled(enable), + enableddisabled(was_enabled)); return was_enabled; } @@ -510,8 +511,8 @@ static int i9xx_get_fifo_size(struct drm_i915_private *dev_priv, if (i9xx_plane == PLANE_B) size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size; - DRM_DEBUG_KMS("FIFO size - (0x%08x) %c: %d\n", - dsparb, plane_name(i9xx_plane), size); + drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n", + dsparb, plane_name(i9xx_plane), size); return size; } @@ -527,8 +528,8 @@ static int i830_get_fifo_size(struct drm_i915_private *dev_priv, size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size; size >>= 1; /* Convert to cachelines */ - DRM_DEBUG_KMS("FIFO size - (0x%08x) %c: %d\n", - dsparb, plane_name(i9xx_plane), size); + drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n", + dsparb, plane_name(i9xx_plane), size); return size; } @@ -542,41 +543,45 @@ static int i845_get_fifo_size(struct drm_i915_private *dev_priv, size = dsparb & 0x7f; size >>= 2; /* Convert to cachelines */ - DRM_DEBUG_KMS("FIFO size - (0x%08x) %c: %d\n", - dsparb, plane_name(i9xx_plane), size); + drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n", + dsparb, plane_name(i9xx_plane), size); return size; } /* Pineview has different values for various configs */ -static const struct intel_watermark_params pineview_display_wm = { +static const struct intel_watermark_params pnv_display_wm = { .fifo_size = PINEVIEW_DISPLAY_FIFO, .max_wm = PINEVIEW_MAX_WM, .default_wm = PINEVIEW_DFT_WM, .guard_size = PINEVIEW_GUARD_WM, .cacheline_size = PINEVIEW_FIFO_LINE_SIZE, }; -static const struct intel_watermark_params pineview_display_hplloff_wm = { + +static const struct intel_watermark_params pnv_display_hplloff_wm = { .fifo_size = PINEVIEW_DISPLAY_FIFO, .max_wm = PINEVIEW_MAX_WM, .default_wm = PINEVIEW_DFT_HPLLOFF_WM, .guard_size = PINEVIEW_GUARD_WM, .cacheline_size = PINEVIEW_FIFO_LINE_SIZE, }; -static const struct intel_watermark_params pineview_cursor_wm = { + +static const struct intel_watermark_params pnv_cursor_wm = { .fifo_size = PINEVIEW_CURSOR_FIFO, .max_wm = PINEVIEW_CURSOR_MAX_WM, .default_wm = PINEVIEW_CURSOR_DFT_WM, .guard_size = PINEVIEW_CURSOR_GUARD_WM, .cacheline_size = PINEVIEW_FIFO_LINE_SIZE, }; -static const struct intel_watermark_params pineview_cursor_hplloff_wm = { + +static const struct intel_watermark_params pnv_cursor_hplloff_wm = { .fifo_size = PINEVIEW_CURSOR_FIFO, .max_wm = PINEVIEW_CURSOR_MAX_WM, .default_wm = PINEVIEW_CURSOR_DFT_WM, .guard_size = PINEVIEW_CURSOR_GUARD_WM, .cacheline_size = PINEVIEW_FIFO_LINE_SIZE, }; + static const struct intel_watermark_params i965_cursor_wm_info = { .fifo_size = I965_CURSOR_FIFO, .max_wm = I965_CURSOR_MAX_WM, @@ -584,6 +589,7 @@ static const struct intel_watermark_params i965_cursor_wm_info = { .guard_size = 2, .cacheline_size = I915_FIFO_LINE_SIZE, }; + static const struct intel_watermark_params i945_wm_info = { .fifo_size = I945_FIFO_SIZE, .max_wm = I915_MAX_WM, @@ -591,6 +597,7 @@ static const struct intel_watermark_params i945_wm_info = { .guard_size = 2, .cacheline_size = I915_FIFO_LINE_SIZE, }; + static const struct intel_watermark_params i915_wm_info = { .fifo_size = I915_FIFO_SIZE, .max_wm = I915_MAX_WM, @@ -598,6 +605,7 @@ static const struct intel_watermark_params i915_wm_info = { .guard_size = 2, .cacheline_size = I915_FIFO_LINE_SIZE, }; + static const struct intel_watermark_params i830_a_wm_info = { .fifo_size = I855GM_FIFO_SIZE, .max_wm = I915_MAX_WM, @@ -605,6 +613,7 @@ static const struct intel_watermark_params i830_a_wm_info = { .guard_size = 2, .cacheline_size = I830_FIFO_LINE_SIZE, }; + static const struct intel_watermark_params i830_bc_wm_info = { .fifo_size = I855GM_FIFO_SIZE, .max_wm = I915_MAX_WM/2, @@ -612,6 +621,7 @@ static const struct intel_watermark_params i830_bc_wm_info = { .guard_size = 2, .cacheline_size = I830_FIFO_LINE_SIZE, }; + static const struct intel_watermark_params i845_wm_info = { .fifo_size = I830_FIFO_SIZE, .max_wm = I915_MAX_WM, @@ -848,7 +858,7 @@ static struct intel_crtc *single_enabled_crtc(struct drm_i915_private *dev_priv) return enabled; } -static void pineview_update_wm(struct intel_crtc *unused_crtc) +static void pnv_update_wm(struct intel_crtc *unused_crtc) { struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev); struct intel_crtc *crtc; @@ -861,7 +871,8 @@ static void pineview_update_wm(struct intel_crtc *unused_crtc) dev_priv->fsb_freq, dev_priv->mem_freq); if (!latency) { - DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n"); + drm_dbg_kms(&dev_priv->drm, + "Unknown FSB/MEM found, disable CxSR\n"); intel_set_memory_cxsr(dev_priv, false); return; } @@ -876,18 +887,18 @@ static void pineview_update_wm(struct intel_crtc *unused_crtc) int clock = adjusted_mode->crtc_clock; /* Display SR */ - wm = intel_calculate_wm(clock, &pineview_display_wm, - pineview_display_wm.fifo_size, + wm = intel_calculate_wm(clock, &pnv_display_wm, + pnv_display_wm.fifo_size, cpp, latency->display_sr); reg = I915_READ(DSPFW1); reg &= ~DSPFW_SR_MASK; reg |= FW_WM(wm, SR); I915_WRITE(DSPFW1, reg); - DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg); + drm_dbg_kms(&dev_priv->drm, "DSPFW1 register is %x\n", reg); /* cursor SR */ - wm = intel_calculate_wm(clock, &pineview_cursor_wm, - pineview_display_wm.fifo_size, + wm = intel_calculate_wm(clock, &pnv_cursor_wm, + pnv_display_wm.fifo_size, 4, latency->cursor_sr); reg = I915_READ(DSPFW3); reg &= ~DSPFW_CURSOR_SR_MASK; @@ -895,8 +906,8 @@ static void pineview_update_wm(struct intel_crtc *unused_crtc) I915_WRITE(DSPFW3, reg); /* Display HPLL off SR */ - wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm, - pineview_display_hplloff_wm.fifo_size, + wm = intel_calculate_wm(clock, &pnv_display_hplloff_wm, + pnv_display_hplloff_wm.fifo_size, cpp, latency->display_hpll_disable); reg = I915_READ(DSPFW3); reg &= ~DSPFW_HPLL_SR_MASK; @@ -904,14 +915,14 @@ static void pineview_update_wm(struct intel_crtc *unused_crtc) I915_WRITE(DSPFW3, reg); /* cursor HPLL off SR */ - wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm, - pineview_display_hplloff_wm.fifo_size, + wm = intel_calculate_wm(clock, &pnv_cursor_hplloff_wm, + pnv_display_hplloff_wm.fifo_size, 4, latency->cursor_hpll_disable); reg = I915_READ(DSPFW3); reg &= ~DSPFW_HPLL_CURSOR_MASK; reg |= FW_WM(wm, HPLL_CURSOR); I915_WRITE(DSPFW3, reg); - DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg); + drm_dbg_kms(&dev_priv->drm, "DSPFW3 register is %x\n", reg); intel_set_memory_cxsr(dev_priv, true); } else { @@ -1202,6 +1213,7 @@ static bool g4x_raw_plane_wm_compute(struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); + struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); int num_levels = intel_wm_num_levels(to_i915(plane->base.dev)); enum plane_id plane_id = plane->id; bool dirty = false; @@ -1254,16 +1266,18 @@ static bool g4x_raw_plane_wm_compute(struct intel_crtc_state *crtc_state, out: if (dirty) { - DRM_DEBUG_KMS("%s watermarks: normal=%d, SR=%d, HPLL=%d\n", - plane->base.name, - crtc_state->wm.g4x.raw[G4X_WM_LEVEL_NORMAL].plane[plane_id], - crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].plane[plane_id], - crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].plane[plane_id]); + drm_dbg_kms(&dev_priv->drm, + "%s watermarks: normal=%d, SR=%d, HPLL=%d\n", + plane->base.name, + crtc_state->wm.g4x.raw[G4X_WM_LEVEL_NORMAL].plane[plane_id], + crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].plane[plane_id], + crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].plane[plane_id]); if (plane_id == PLANE_PRIMARY) - DRM_DEBUG_KMS("FBC watermarks: SR=%d, HPLL=%d\n", - crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].fbc, - crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].fbc); + drm_dbg_kms(&dev_priv->drm, + "FBC watermarks: SR=%d, HPLL=%d\n", + crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].fbc, + crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].fbc); } return dirty; @@ -1781,6 +1795,7 @@ static bool vlv_raw_plane_wm_compute(struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); + struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); enum plane_id plane_id = plane->id; int num_levels = intel_wm_num_levels(to_i915(plane->base.dev)); int level; @@ -1808,11 +1823,12 @@ static bool vlv_raw_plane_wm_compute(struct intel_crtc_state *crtc_state, out: if (dirty) - DRM_DEBUG_KMS("%s watermarks: PM2=%d, PM5=%d, DDR DVFS=%d\n", - plane->base.name, - crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2].plane[plane_id], - crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM5].plane[plane_id], - crtc_state->wm.vlv.raw[VLV_WM_LEVEL_DDR_DVFS].plane[plane_id]); + drm_dbg_kms(&dev_priv->drm, + "%s watermarks: PM2=%d, PM5=%d, DDR DVFS=%d\n", + plane->base.name, + crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2].plane[plane_id], + crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM5].plane[plane_id], + crtc_state->wm.vlv.raw[VLV_WM_LEVEL_DDR_DVFS].plane[plane_id]); return dirty; } @@ -2227,8 +2243,9 @@ static void i965_update_wm(struct intel_crtc *unused_crtc) if (srwm < 0) srwm = 1; srwm &= 0x1ff; - DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n", - entries, srwm); + drm_dbg_kms(&dev_priv->drm, + "self-refresh entries: %d, wm: %d\n", + entries, srwm); entries = intel_wm_method2(clock, htotal, crtc->base.cursor->state->crtc_w, 4, @@ -2241,8 +2258,9 @@ static void i965_update_wm(struct intel_crtc *unused_crtc) if (cursor_sr > i965_cursor_wm_info.max_wm) cursor_sr = i965_cursor_wm_info.max_wm; - DRM_DEBUG_KMS("self-refresh watermark: display plane %d " - "cursor %d\n", srwm, cursor_sr); + drm_dbg_kms(&dev_priv->drm, + "self-refresh watermark: display plane %d " + "cursor %d\n", srwm, cursor_sr); cxsr_enabled = true; } else { @@ -2251,8 +2269,9 @@ static void i965_update_wm(struct intel_crtc *unused_crtc) intel_set_memory_cxsr(dev_priv, false); } - DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n", - srwm); + drm_dbg_kms(&dev_priv->drm, + "Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n", + srwm); /* 965 has limitations... */ I915_WRITE(DSPFW1, FW_WM(srwm, SR) | @@ -2342,7 +2361,8 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc) planeb_wm = wm_info->max_wm; } - DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm); + drm_dbg_kms(&dev_priv->drm, + "FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm); if (IS_I915GM(dev_priv) && enabled) { struct drm_i915_gem_object *obj; @@ -2384,7 +2404,8 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc) entries = intel_wm_method2(clock, htotal, hdisplay, cpp, sr_latency_ns / 100); entries = DIV_ROUND_UP(entries, wm_info->cacheline_size); - DRM_DEBUG_KMS("self-refresh entries: %d\n", entries); + drm_dbg_kms(&dev_priv->drm, + "self-refresh entries: %d\n", entries); srwm = wm_info->fifo_size - entries; if (srwm < 0) srwm = 1; @@ -2396,8 +2417,9 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc) I915_WRITE(FW_BLC_SELF, srwm & 0x3f); } - DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n", - planea_wm, planeb_wm, cwm, srwm); + drm_dbg_kms(&dev_priv->drm, + "Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n", + planea_wm, planeb_wm, cwm, srwm); fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f); fwater_hi = (cwm & 0x1f); @@ -2433,7 +2455,8 @@ static void i845_update_wm(struct intel_crtc *unused_crtc) fwater_lo = I915_READ(FW_BLC) & ~0xfff; fwater_lo |= (3<<8) | planea_wm; - DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm); + drm_dbg_kms(&dev_priv->drm, + "Setting FIFO watermarks - A: %d\n", planea_wm); I915_WRITE(FW_BLC, fwater_lo); } @@ -2832,7 +2855,8 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv, &val, NULL); if (ret) { - DRM_ERROR("SKL Mailbox read error = %d\n", ret); + drm_err(&dev_priv->drm, + "SKL Mailbox read error = %d\n", ret); return; } @@ -2850,7 +2874,8 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv, GEN9_PCODE_READ_MEM_LATENCY, &val, NULL); if (ret) { - DRM_ERROR("SKL Mailbox read error = %d\n", ret); + drm_err(&dev_priv->drm, + "SKL Mailbox read error = %d\n", ret); return; } @@ -2968,8 +2993,9 @@ static void intel_print_wm_latency(struct drm_i915_private *dev_priv, unsigned int latency = wm[level]; if (latency == 0) { - DRM_DEBUG_KMS("%s WM%d latency not provided\n", - name, level); + drm_dbg_kms(&dev_priv->drm, + "%s WM%d latency not provided\n", + name, level); continue; } @@ -2982,9 +3008,9 @@ static void intel_print_wm_latency(struct drm_i915_private *dev_priv, else if (level > 0) latency *= 5; - DRM_DEBUG_KMS("%s WM%d latency %u (%u.%u usec)\n", - name, level, wm[level], - latency / 10, latency % 10); + drm_dbg_kms(&dev_priv->drm, + "%s WM%d latency %u (%u.%u usec)\n", name, level, + wm[level], latency / 10, latency % 10); } } @@ -3018,7 +3044,8 @@ static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv) if (!changed) return; - DRM_DEBUG_KMS("WM latency values increased to avoid potential underruns\n"); + drm_dbg_kms(&dev_priv->drm, + "WM latency values increased to avoid potential underruns\n"); intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency); intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency); intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency); @@ -3046,7 +3073,8 @@ static void snb_wm_lp3_irq_quirk(struct drm_i915_private *dev_priv) dev_priv->wm.spr_latency[3] = 0; dev_priv->wm.cur_latency[3] = 0; - DRM_DEBUG_KMS("LP3 watermarks disabled due to potential for lost interrupts\n"); + drm_dbg_kms(&dev_priv->drm, + "LP3 watermarks disabled due to potential for lost interrupts\n"); intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency); intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency); intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency); @@ -3096,7 +3124,7 @@ static bool ilk_validate_pipe_wm(const struct drm_i915_private *dev_priv, /* At least LP0 must be valid */ if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0])) { - DRM_DEBUG_KMS("LP0 watermark invalid\n"); + drm_dbg_kms(&dev_priv->drm, "LP0 watermark invalid\n"); return false; } @@ -3673,7 +3701,7 @@ skl_setup_sagv_block_time(struct drm_i915_private *dev_priv) return; } - DRM_DEBUG_DRIVER("Couldn't read SAGV block time!\n"); + drm_dbg(&dev_priv->drm, "Couldn't read SAGV block time!\n"); } else if (IS_GEN(dev_priv, 11)) { dev_priv->sagv_block_time_us = 10; return; @@ -3713,7 +3741,7 @@ intel_enable_sagv(struct drm_i915_private *dev_priv) if (dev_priv->sagv_status == I915_SAGV_ENABLED) return 0; - DRM_DEBUG_KMS("Enabling SAGV\n"); + drm_dbg_kms(&dev_priv->drm, "Enabling SAGV\n"); ret = sandybridge_pcode_write(dev_priv, GEN9_PCODE_SAGV_CONTROL, GEN9_SAGV_ENABLE); @@ -3724,11 +3752,11 @@ intel_enable_sagv(struct drm_i915_private *dev_priv) * don't actually have SAGV. */ if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) { - DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n"); + drm_dbg(&dev_priv->drm, "No SAGV found on system, ignoring\n"); dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED; return 0; } else if (ret < 0) { - DRM_ERROR("Failed to enable SAGV\n"); + drm_err(&dev_priv->drm, "Failed to enable SAGV\n"); return ret; } @@ -3747,7 +3775,7 @@ intel_disable_sagv(struct drm_i915_private *dev_priv) if (dev_priv->sagv_status == I915_SAGV_DISABLED) return 0; - DRM_DEBUG_KMS("Disabling SAGV\n"); + drm_dbg_kms(&dev_priv->drm, "Disabling SAGV\n"); /* bspec says to keep retrying for at least 1 ms */ ret = skl_pcode_request(dev_priv, GEN9_PCODE_SAGV_CONTROL, GEN9_SAGV_DISABLE, @@ -3758,11 +3786,11 @@ intel_disable_sagv(struct drm_i915_private *dev_priv) * don't actually have SAGV. */ if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) { - DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n"); + drm_dbg(&dev_priv->drm, "No SAGV found on system, ignoring\n"); dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED; return 0; } else if (ret < 0) { - DRM_ERROR("Failed to disable SAGV (%d)\n", ret); + drm_err(&dev_priv->drm, "Failed to disable SAGV (%d)\n", ret); return ret; } @@ -4331,9 +4359,10 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state, } if (level < 0) { - DRM_DEBUG_KMS("Requested display configuration exceeds system DDB limitations"); - DRM_DEBUG_KMS("minimum required %d/%d\n", blocks, - alloc_size); + drm_dbg_kms(&dev_priv->drm, + "Requested display configuration exceeds system DDB limitations"); + drm_dbg_kms(&dev_priv->drm, "minimum required %d/%d\n", + blocks, alloc_size); return -EINVAL; } @@ -4561,7 +4590,8 @@ skl_compute_wm_params(const struct intel_crtc_state *crtc_state, /* only planar format has two planes */ if (color_plane == 1 && !intel_format_info_is_yuv_semiplanar(format, modifier)) { - DRM_DEBUG_KMS("Non planar format have single plane\n"); + drm_dbg_kms(&dev_priv->drm, + "Non planar format have single plane\n"); return -EINVAL; } @@ -5260,10 +5290,11 @@ skl_print_wm_changes(struct intel_atomic_state *state) if (skl_ddb_entry_equal(old, new)) continue; - DRM_DEBUG_KMS("[PLANE:%d:%s] ddb (%4d - %4d) -> (%4d - %4d), size %4d -> %4d\n", - plane->base.base.id, plane->base.name, - old->start, old->end, new->start, new->end, - skl_ddb_entry_size(old), skl_ddb_entry_size(new)); + drm_dbg_kms(&dev_priv->drm, + "[PLANE:%d:%s] ddb (%4d - %4d) -> (%4d - %4d), size %4d -> %4d\n", + plane->base.base.id, plane->base.name, + old->start, old->end, new->start, new->end, + skl_ddb_entry_size(old), skl_ddb_entry_size(new)); } for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { @@ -5276,70 +5307,74 @@ skl_print_wm_changes(struct intel_atomic_state *state) if (skl_plane_wm_equals(dev_priv, old_wm, new_wm)) continue; - DRM_DEBUG_KMS("[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm" - " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm\n", - plane->base.base.id, plane->base.name, - enast(old_wm->wm[0].plane_en), enast(old_wm->wm[1].plane_en), - enast(old_wm->wm[2].plane_en), enast(old_wm->wm[3].plane_en), - enast(old_wm->wm[4].plane_en), enast(old_wm->wm[5].plane_en), - enast(old_wm->wm[6].plane_en), enast(old_wm->wm[7].plane_en), - enast(old_wm->trans_wm.plane_en), - enast(new_wm->wm[0].plane_en), enast(new_wm->wm[1].plane_en), - enast(new_wm->wm[2].plane_en), enast(new_wm->wm[3].plane_en), - enast(new_wm->wm[4].plane_en), enast(new_wm->wm[5].plane_en), - enast(new_wm->wm[6].plane_en), enast(new_wm->wm[7].plane_en), - enast(new_wm->trans_wm.plane_en)); - - DRM_DEBUG_KMS("[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d" + drm_dbg_kms(&dev_priv->drm, + "[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm" + " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm\n", + plane->base.base.id, plane->base.name, + enast(old_wm->wm[0].plane_en), enast(old_wm->wm[1].plane_en), + enast(old_wm->wm[2].plane_en), enast(old_wm->wm[3].plane_en), + enast(old_wm->wm[4].plane_en), enast(old_wm->wm[5].plane_en), + enast(old_wm->wm[6].plane_en), enast(old_wm->wm[7].plane_en), + enast(old_wm->trans_wm.plane_en), + enast(new_wm->wm[0].plane_en), enast(new_wm->wm[1].plane_en), + enast(new_wm->wm[2].plane_en), enast(new_wm->wm[3].plane_en), + enast(new_wm->wm[4].plane_en), enast(new_wm->wm[5].plane_en), + enast(new_wm->wm[6].plane_en), enast(new_wm->wm[7].plane_en), + enast(new_wm->trans_wm.plane_en)); + + drm_dbg_kms(&dev_priv->drm, + "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d" " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d\n", - plane->base.base.id, plane->base.name, - enast(old_wm->wm[0].ignore_lines), old_wm->wm[0].plane_res_l, - enast(old_wm->wm[1].ignore_lines), old_wm->wm[1].plane_res_l, - enast(old_wm->wm[2].ignore_lines), old_wm->wm[2].plane_res_l, - enast(old_wm->wm[3].ignore_lines), old_wm->wm[3].plane_res_l, - enast(old_wm->wm[4].ignore_lines), old_wm->wm[4].plane_res_l, - enast(old_wm->wm[5].ignore_lines), old_wm->wm[5].plane_res_l, - enast(old_wm->wm[6].ignore_lines), old_wm->wm[6].plane_res_l, - enast(old_wm->wm[7].ignore_lines), old_wm->wm[7].plane_res_l, - enast(old_wm->trans_wm.ignore_lines), old_wm->trans_wm.plane_res_l, - - enast(new_wm->wm[0].ignore_lines), new_wm->wm[0].plane_res_l, - enast(new_wm->wm[1].ignore_lines), new_wm->wm[1].plane_res_l, - enast(new_wm->wm[2].ignore_lines), new_wm->wm[2].plane_res_l, - enast(new_wm->wm[3].ignore_lines), new_wm->wm[3].plane_res_l, - enast(new_wm->wm[4].ignore_lines), new_wm->wm[4].plane_res_l, - enast(new_wm->wm[5].ignore_lines), new_wm->wm[5].plane_res_l, - enast(new_wm->wm[6].ignore_lines), new_wm->wm[6].plane_res_l, - enast(new_wm->wm[7].ignore_lines), new_wm->wm[7].plane_res_l, - enast(new_wm->trans_wm.ignore_lines), new_wm->trans_wm.plane_res_l); - - DRM_DEBUG_KMS("[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d" - " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n", - plane->base.base.id, plane->base.name, - old_wm->wm[0].plane_res_b, old_wm->wm[1].plane_res_b, - old_wm->wm[2].plane_res_b, old_wm->wm[3].plane_res_b, - old_wm->wm[4].plane_res_b, old_wm->wm[5].plane_res_b, - old_wm->wm[6].plane_res_b, old_wm->wm[7].plane_res_b, - old_wm->trans_wm.plane_res_b, - new_wm->wm[0].plane_res_b, new_wm->wm[1].plane_res_b, - new_wm->wm[2].plane_res_b, new_wm->wm[3].plane_res_b, - new_wm->wm[4].plane_res_b, new_wm->wm[5].plane_res_b, - new_wm->wm[6].plane_res_b, new_wm->wm[7].plane_res_b, - new_wm->trans_wm.plane_res_b); - - DRM_DEBUG_KMS("[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d" - " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n", - plane->base.base.id, plane->base.name, - old_wm->wm[0].min_ddb_alloc, old_wm->wm[1].min_ddb_alloc, - old_wm->wm[2].min_ddb_alloc, old_wm->wm[3].min_ddb_alloc, - old_wm->wm[4].min_ddb_alloc, old_wm->wm[5].min_ddb_alloc, - old_wm->wm[6].min_ddb_alloc, old_wm->wm[7].min_ddb_alloc, - old_wm->trans_wm.min_ddb_alloc, - new_wm->wm[0].min_ddb_alloc, new_wm->wm[1].min_ddb_alloc, - new_wm->wm[2].min_ddb_alloc, new_wm->wm[3].min_ddb_alloc, - new_wm->wm[4].min_ddb_alloc, new_wm->wm[5].min_ddb_alloc, - new_wm->wm[6].min_ddb_alloc, new_wm->wm[7].min_ddb_alloc, - new_wm->trans_wm.min_ddb_alloc); + plane->base.base.id, plane->base.name, + enast(old_wm->wm[0].ignore_lines), old_wm->wm[0].plane_res_l, + enast(old_wm->wm[1].ignore_lines), old_wm->wm[1].plane_res_l, + enast(old_wm->wm[2].ignore_lines), old_wm->wm[2].plane_res_l, + enast(old_wm->wm[3].ignore_lines), old_wm->wm[3].plane_res_l, + enast(old_wm->wm[4].ignore_lines), old_wm->wm[4].plane_res_l, + enast(old_wm->wm[5].ignore_lines), old_wm->wm[5].plane_res_l, + enast(old_wm->wm[6].ignore_lines), old_wm->wm[6].plane_res_l, + enast(old_wm->wm[7].ignore_lines), old_wm->wm[7].plane_res_l, + enast(old_wm->trans_wm.ignore_lines), old_wm->trans_wm.plane_res_l, + + enast(new_wm->wm[0].ignore_lines), new_wm->wm[0].plane_res_l, + enast(new_wm->wm[1].ignore_lines), new_wm->wm[1].plane_res_l, + enast(new_wm->wm[2].ignore_lines), new_wm->wm[2].plane_res_l, + enast(new_wm->wm[3].ignore_lines), new_wm->wm[3].plane_res_l, + enast(new_wm->wm[4].ignore_lines), new_wm->wm[4].plane_res_l, + enast(new_wm->wm[5].ignore_lines), new_wm->wm[5].plane_res_l, + enast(new_wm->wm[6].ignore_lines), new_wm->wm[6].plane_res_l, + enast(new_wm->wm[7].ignore_lines), new_wm->wm[7].plane_res_l, + enast(new_wm->trans_wm.ignore_lines), new_wm->trans_wm.plane_res_l); + + drm_dbg_kms(&dev_priv->drm, + "[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d" + " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n", + plane->base.base.id, plane->base.name, + old_wm->wm[0].plane_res_b, old_wm->wm[1].plane_res_b, + old_wm->wm[2].plane_res_b, old_wm->wm[3].plane_res_b, + old_wm->wm[4].plane_res_b, old_wm->wm[5].plane_res_b, + old_wm->wm[6].plane_res_b, old_wm->wm[7].plane_res_b, + old_wm->trans_wm.plane_res_b, + new_wm->wm[0].plane_res_b, new_wm->wm[1].plane_res_b, + new_wm->wm[2].plane_res_b, new_wm->wm[3].plane_res_b, + new_wm->wm[4].plane_res_b, new_wm->wm[5].plane_res_b, + new_wm->wm[6].plane_res_b, new_wm->wm[7].plane_res_b, + new_wm->trans_wm.plane_res_b); + + drm_dbg_kms(&dev_priv->drm, + "[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d" + " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n", + plane->base.base.id, plane->base.name, + old_wm->wm[0].min_ddb_alloc, old_wm->wm[1].min_ddb_alloc, + old_wm->wm[2].min_ddb_alloc, old_wm->wm[3].min_ddb_alloc, + old_wm->wm[4].min_ddb_alloc, old_wm->wm[5].min_ddb_alloc, + old_wm->wm[6].min_ddb_alloc, old_wm->wm[7].min_ddb_alloc, + old_wm->trans_wm.min_ddb_alloc, + new_wm->wm[0].min_ddb_alloc, new_wm->wm[1].min_ddb_alloc, + new_wm->wm[2].min_ddb_alloc, new_wm->wm[3].min_ddb_alloc, + new_wm->wm[4].min_ddb_alloc, new_wm->wm[5].min_ddb_alloc, + new_wm->wm[6].min_ddb_alloc, new_wm->wm[7].min_ddb_alloc, + new_wm->trans_wm.min_ddb_alloc); } } } @@ -5931,19 +5966,22 @@ void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv) crtc_state->wm.g4x.optimal = *active; crtc_state->wm.g4x.intermediate = *active; - DRM_DEBUG_KMS("Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite=%d\n", - pipe_name(pipe), - wm->pipe[pipe].plane[PLANE_PRIMARY], - wm->pipe[pipe].plane[PLANE_CURSOR], - wm->pipe[pipe].plane[PLANE_SPRITE0]); + drm_dbg_kms(&dev_priv->drm, + "Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite=%d\n", + pipe_name(pipe), + wm->pipe[pipe].plane[PLANE_PRIMARY], + wm->pipe[pipe].plane[PLANE_CURSOR], + wm->pipe[pipe].plane[PLANE_SPRITE0]); } - DRM_DEBUG_KMS("Initial SR watermarks: plane=%d, cursor=%d fbc=%d\n", - wm->sr.plane, wm->sr.cursor, wm->sr.fbc); - DRM_DEBUG_KMS("Initial HPLL watermarks: plane=%d, SR cursor=%d fbc=%d\n", - wm->hpll.plane, wm->hpll.cursor, wm->hpll.fbc); - DRM_DEBUG_KMS("Initial SR=%s HPLL=%s FBC=%s\n", - yesno(wm->cxsr), yesno(wm->hpll_en), yesno(wm->fbc_en)); + drm_dbg_kms(&dev_priv->drm, + "Initial SR watermarks: plane=%d, cursor=%d fbc=%d\n", + wm->sr.plane, wm->sr.cursor, wm->sr.fbc); + drm_dbg_kms(&dev_priv->drm, + "Initial HPLL watermarks: plane=%d, SR cursor=%d fbc=%d\n", + wm->hpll.plane, wm->hpll.cursor, wm->hpll.fbc); + drm_dbg_kms(&dev_priv->drm, "Initial SR=%s HPLL=%s FBC=%s\n", + yesno(wm->cxsr), yesno(wm->hpll_en), yesno(wm->fbc_en)); } void g4x_wm_sanitize(struct drm_i915_private *dev_priv) @@ -6035,8 +6073,9 @@ void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv) if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) & FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) { - DRM_DEBUG_KMS("Punit not acking DDR DVFS request, " - "assuming DDR DVFS is disabled\n"); + drm_dbg_kms(&dev_priv->drm, + "Punit not acking DDR DVFS request, " + "assuming DDR DVFS is disabled\n"); dev_priv->wm.max_level = VLV_WM_LEVEL_PM5; } else { val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2); @@ -6087,16 +6126,18 @@ void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv) crtc_state->wm.vlv.optimal = *active; crtc_state->wm.vlv.intermediate = *active; - DRM_DEBUG_KMS("Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n", - pipe_name(pipe), - wm->pipe[pipe].plane[PLANE_PRIMARY], - wm->pipe[pipe].plane[PLANE_CURSOR], - wm->pipe[pipe].plane[PLANE_SPRITE0], - wm->pipe[pipe].plane[PLANE_SPRITE1]); + drm_dbg_kms(&dev_priv->drm, + "Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n", + pipe_name(pipe), + wm->pipe[pipe].plane[PLANE_PRIMARY], + wm->pipe[pipe].plane[PLANE_CURSOR], + wm->pipe[pipe].plane[PLANE_SPRITE0], + wm->pipe[pipe].plane[PLANE_SPRITE1]); } - DRM_DEBUG_KMS("Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n", - wm->sr.plane, wm->sr.cursor, wm->level, wm->cxsr); + drm_dbg_kms(&dev_priv->drm, + "Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n", + wm->sr.plane, wm->sr.cursor, wm->level, wm->cxsr); } void vlv_wm_sanitize(struct drm_i915_private *dev_priv) @@ -6412,8 +6453,9 @@ static void gen6_check_mch_setup(struct drm_i915_private *dev_priv) tmp = I915_READ(MCH_SSKPD); if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL) - DRM_DEBUG_KMS("Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n", - tmp); + drm_dbg_kms(&dev_priv->drm, + "Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n", + tmp); } static void gen6_init_clock_gating(struct drm_i915_private *dev_priv) @@ -6590,6 +6632,17 @@ static void icl_init_clock_gating(struct drm_i915_private *dev_priv) /* WaEnable32PlaneMode:icl */ I915_WRITE(GEN9_CSFE_CHICKEN1_RCS, _MASKED_BIT_ENABLE(GEN11_ENABLE_32_PLANE_MODE)); + + /* + * Wa_1408615072:icl,ehl (vsunit) + * Wa_1407596294:icl,ehl (hsunit) + */ + intel_uncore_rmw(&dev_priv->uncore, UNSLICE_UNIT_LEVEL_CLKGATE, + 0, VSUNIT_CLKGATE_DIS | HSUNIT_CLKGATE_DIS); + + /* Wa_1407352427:icl,ehl */ + intel_uncore_rmw(&dev_priv->uncore, UNSLICE_UNIT_LEVEL_CLKGATE2, + 0, PSDUNIT_CLKGATE_DIS); } static void tgl_init_clock_gating(struct drm_i915_private *dev_priv) @@ -6597,6 +6650,10 @@ static void tgl_init_clock_gating(struct drm_i915_private *dev_priv) u32 vd_pg_enable = 0; unsigned int i; + /* Wa_1408615072:tgl */ + intel_uncore_rmw(&dev_priv->uncore, UNSLICE_UNIT_LEVEL_CLKGATE2, + 0, VSUNIT_CLKGATE_DIS_TGL); + /* This is not a WA. Enable VD HCP & MFX_ENC powergate */ for (i = 0; i < I915_MAX_VCS; i++) { if (HAS_ENGINE(dev_priv, _VCS(i))) @@ -7113,7 +7170,8 @@ void intel_suspend_hw(struct drm_i915_private *dev_priv) static void nop_init_clock_gating(struct drm_i915_private *dev_priv) { - DRM_DEBUG_KMS("No clock gating settings or workarounds applied.\n"); + drm_dbg_kms(&dev_priv->drm, + "No clock gating settings or workarounds applied.\n"); } /** @@ -7180,9 +7238,9 @@ void intel_init_pm(struct drm_i915_private *dev_priv) { /* For cxsr */ if (IS_PINEVIEW(dev_priv)) - i915_pineview_get_mem_freq(dev_priv); + pnv_get_mem_freq(dev_priv); else if (IS_GEN(dev_priv, 5)) - i915_ironlake_get_mem_freq(dev_priv); + ilk_get_mem_freq(dev_priv); if (intel_has_sagv(dev_priv)) skl_setup_sagv_block_time(dev_priv); @@ -7208,8 +7266,9 @@ void intel_init_pm(struct drm_i915_private *dev_priv) dev_priv->display.optimize_watermarks = ilk_optimize_watermarks; } else { - DRM_DEBUG_KMS("Failed to read display plane latency. " - "Disable CxSR\n"); + drm_dbg_kms(&dev_priv->drm, + "Failed to read display plane latency. " + "Disable CxSR\n"); } } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { vlv_setup_wm_latency(dev_priv); @@ -7229,7 +7288,8 @@ void intel_init_pm(struct drm_i915_private *dev_priv) dev_priv->is_ddr3, dev_priv->fsb_freq, dev_priv->mem_freq)) { - DRM_INFO("failed to find known CxSR latency " + drm_info(&dev_priv->drm, + "failed to find known CxSR latency " "(found ddr%s fsb freq %d, mem freq %d), " "disabling CxSR\n", (dev_priv->is_ddr3 == 1) ? "3" : "2", @@ -7238,7 +7298,7 @@ void intel_init_pm(struct drm_i915_private *dev_priv) intel_set_memory_cxsr(dev_priv, false); dev_priv->display.update_wm = NULL; } else - dev_priv->display.update_wm = pineview_update_wm; + dev_priv->display.update_wm = pnv_update_wm; } else if (IS_GEN(dev_priv, 4)) { dev_priv->display.update_wm = i965_update_wm; } else if (IS_GEN(dev_priv, 3)) { @@ -7253,7 +7313,8 @@ void intel_init_pm(struct drm_i915_private *dev_priv) dev_priv->display.get_fifo_size = i830_get_fifo_size; } } else { - DRM_ERROR("unexpected fall-through in intel_init_pm\n"); + drm_err(&dev_priv->drm, + "unexpected fall-through in %s\n", __func__); } } diff --git a/drivers/gpu/drm/i915/intel_region_lmem.c b/drivers/gpu/drm/i915/intel_region_lmem.c index eddb392917aa..14b59b899c9b 100644 --- a/drivers/gpu/drm/i915/intel_region_lmem.c +++ b/drivers/gpu/drm/i915/intel_region_lmem.c @@ -90,6 +90,8 @@ region_lmem_init(struct intel_memory_region *mem) if (ret) io_mapping_fini(&mem->iomap); + intel_memory_region_set_name(mem, "local"); + return ret; } @@ -123,10 +125,12 @@ intel_setup_fake_lmem(struct drm_i915_private *i915) io_start, &intel_region_lmem_ops); if (!IS_ERR(mem)) { - DRM_INFO("Intel graphics fake LMEM: %pR\n", &mem->region); - DRM_INFO("Intel graphics fake LMEM IO start: %llx\n", - (u64)mem->io_start); - DRM_INFO("Intel graphics fake LMEM size: %llx\n", + drm_info(&i915->drm, "Intel graphics fake LMEM: %pR\n", + &mem->region); + drm_info(&i915->drm, + "Intel graphics fake LMEM IO start: %llx\n", + (u64)mem->io_start); + drm_info(&i915->drm, "Intel graphics fake LMEM size: %llx\n", (u64)resource_size(&mem->region)); } diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c index e06b35b844a0..cbfb7171d62d 100644 --- a/drivers/gpu/drm/i915/intel_sideband.c +++ b/drivers/gpu/drm/i915/intel_sideband.c @@ -105,8 +105,8 @@ static int vlv_sideband_rw(struct drm_i915_private *i915, if (intel_wait_for_register(uncore, VLV_IOSF_DOORBELL_REQ, IOSF_SB_BUSY, 0, 5)) { - DRM_DEBUG_DRIVER("IOSF sideband idle wait (%s) timed out\n", - is_read ? "read" : "write"); + drm_dbg(&i915->drm, "IOSF sideband idle wait (%s) timed out\n", + is_read ? "read" : "write"); return -EAGAIN; } @@ -129,8 +129,8 @@ static int vlv_sideband_rw(struct drm_i915_private *i915, *val = intel_uncore_read_fw(uncore, VLV_IOSF_DATA); err = 0; } else { - DRM_DEBUG_DRIVER("IOSF sideband finish wait (%s) timed out\n", - is_read ? "read" : "write"); + drm_dbg(&i915->drm, "IOSF sideband finish wait (%s) timed out\n", + is_read ? "read" : "write"); err = -ETIMEDOUT; } @@ -283,7 +283,8 @@ static int intel_sbi_rw(struct drm_i915_private *i915, u16 reg, if (intel_wait_for_register_fw(uncore, SBI_CTL_STAT, SBI_BUSY, 0, 100)) { - DRM_ERROR("timeout waiting for SBI to become ready\n"); + drm_err(&i915->drm, + "timeout waiting for SBI to become ready\n"); return -EBUSY; } @@ -301,12 +302,13 @@ static int intel_sbi_rw(struct drm_i915_private *i915, u16 reg, if (__intel_wait_for_register_fw(uncore, SBI_CTL_STAT, SBI_BUSY, 0, 100, 100, &cmd)) { - DRM_ERROR("timeout waiting for SBI to complete read\n"); + drm_err(&i915->drm, + "timeout waiting for SBI to complete read\n"); return -ETIMEDOUT; } if (cmd & SBI_RESPONSE_FAIL) { - DRM_ERROR("error during SBI read of reg %x\n", reg); + drm_err(&i915->drm, "error during SBI read of reg %x\n", reg); return -ENXIO; } @@ -426,8 +428,9 @@ int sandybridge_pcode_read(struct drm_i915_private *i915, u32 mbox, mutex_unlock(&i915->sb_lock); if (err) { - DRM_DEBUG_DRIVER("warning: pcode (read from mbox %x) mailbox access failed for %ps: %d\n", - mbox, __builtin_return_address(0), err); + drm_dbg(&i915->drm, + "warning: pcode (read from mbox %x) mailbox access failed for %ps: %d\n", + mbox, __builtin_return_address(0), err); } return err; @@ -447,8 +450,9 @@ int sandybridge_pcode_write_timeout(struct drm_i915_private *i915, mutex_unlock(&i915->sb_lock); if (err) { - DRM_DEBUG_DRIVER("warning: pcode (write of 0x%08x to mbox %x) mailbox access failed for %ps: %d\n", - val, mbox, __builtin_return_address(0), err); + drm_dbg(&i915->drm, + "warning: pcode (write of 0x%08x to mbox %x) mailbox access failed for %ps: %d\n", + val, mbox, __builtin_return_address(0), err); } return err; @@ -519,7 +523,8 @@ int skl_pcode_request(struct drm_i915_private *i915, u32 mbox, u32 request, * requests, and for any quirks of the PCODE firmware that delays * the request completion. */ - DRM_DEBUG_KMS("PCODE timeout, retrying with preemption disabled\n"); + drm_dbg_kms(&i915->drm, + "PCODE timeout, retrying with preemption disabled\n"); WARN_ON_ONCE(timeout_base_ms > 3); preempt_disable(); ret = wait_for_atomic(COND, 50); diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 94a97bf8c021..5f2cf6f43b8b 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -359,7 +359,8 @@ static void __gen6_gt_wait_for_fifo(struct intel_uncore *uncore) if (wait_for_atomic((n = fifo_free_entries(uncore)) > GT_FIFO_NUM_RESERVED_ENTRIES, GT_FIFO_TIMEOUT_MS)) { - DRM_DEBUG("GT_FIFO timeout, entries: %u\n", n); + drm_dbg(&uncore->i915->drm, + "GT_FIFO timeout, entries: %u\n", n); return; } } @@ -432,7 +433,7 @@ intel_uncore_forcewake_reset(struct intel_uncore *uncore) break; if (--retry_count == 0) { - DRM_ERROR("Timed out waiting for forcewake timers to finish\n"); + drm_err(&uncore->i915->drm, "Timed out waiting for forcewake timers to finish\n"); break; } @@ -490,7 +491,7 @@ gen6_check_for_fifo_debug(struct intel_uncore *uncore) fifodbg = __raw_uncore_read32(uncore, GTFIFODBG); if (unlikely(fifodbg)) { - DRM_DEBUG_DRIVER("GTFIFODBG = 0x08%x\n", fifodbg); + drm_dbg(&uncore->i915->drm, "GTFIFODBG = 0x08%x\n", fifodbg); __raw_uncore_write32(uncore, GTFIFODBG, fifodbg); } @@ -562,7 +563,7 @@ void intel_uncore_resume_early(struct intel_uncore *uncore) unsigned int restore_forcewake; if (intel_uncore_unclaimed_mmio(uncore)) - DRM_DEBUG("unclaimed mmio detected on resume, clearing\n"); + drm_dbg(&uncore->i915->drm, "unclaimed mmio detected on resume, clearing\n"); if (!intel_uncore_has_forcewake(uncore)) return; @@ -1595,8 +1596,8 @@ static int intel_uncore_fw_domains_init(struct intel_uncore *uncore) spin_unlock_irq(&uncore->lock); if (!(ecobus & FORCEWAKE_MT_ENABLE)) { - DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n"); - DRM_INFO("when using vblank-synced partial screen updates.\n"); + drm_info(&i915->drm, "No MT forcewake available on Ivybridge, this can result in issues\n"); + drm_info(&i915->drm, "when using vblank-synced partial screen updates.\n"); fw_domain_fini(uncore, FW_DOMAIN_ID_RENDER); fw_domain_init(uncore, FW_DOMAIN_ID_RENDER, FORCEWAKE, FORCEWAKE_ACK); @@ -1683,8 +1684,7 @@ static int uncore_mmio_setup(struct intel_uncore *uncore) mmio_size = 2 * 1024 * 1024; uncore->regs = pci_iomap(pdev, mmio_bar, mmio_size); if (uncore->regs == NULL) { - DRM_ERROR("failed to map registers\n"); - + drm_err(&i915->drm, "failed to map registers\n"); return -EIO; } @@ -1807,7 +1807,7 @@ int intel_uncore_init_mmio(struct intel_uncore *uncore) /* clear out unclaimed reg detection bit */ if (intel_uncore_unclaimed_mmio(uncore)) - DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n"); + drm_dbg(&i915->drm, "unclaimed mmio detected on uncore init, clearing\n"); return 0; @@ -2072,9 +2072,10 @@ intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore) if (unlikely(check_for_unclaimed_mmio(uncore))) { if (!i915_modparams.mmio_debug) { - DRM_DEBUG("Unclaimed register detected, " - "enabling oneshot unclaimed register reporting. " - "Please use i915.mmio_debug=N for more information.\n"); + drm_dbg(&uncore->i915->drm, + "Unclaimed register detected, " + "enabling oneshot unclaimed register reporting. " + "Please use i915.mmio_debug=N for more information.\n"); i915_modparams.mmio_debug++; } uncore->debug->unclaimed_mmio_check--; diff --git a/drivers/gpu/drm/i915/intel_wakeref.c b/drivers/gpu/drm/i915/intel_wakeref.c index 59aa1b6f1827..8fbf6f4d3f26 100644 --- a/drivers/gpu/drm/i915/intel_wakeref.c +++ b/drivers/gpu/drm/i915/intel_wakeref.c @@ -95,16 +95,17 @@ static void __intel_wakeref_put_work(struct work_struct *wrk) void __intel_wakeref_init(struct intel_wakeref *wf, struct intel_runtime_pm *rpm, const struct intel_wakeref_ops *ops, - struct lock_class_key *key) + struct intel_wakeref_lockclass *key) { wf->rpm = rpm; wf->ops = ops; - __mutex_init(&wf->mutex, "wakeref", key); + __mutex_init(&wf->mutex, "wakeref.mutex", &key->mutex); atomic_set(&wf->count, 0); wf->wakeref = 0; INIT_WORK(&wf->work, __intel_wakeref_put_work); + lockdep_init_map(&wf->work.lockdep_map, "wakeref.work", &key->work, 0); } int intel_wakeref_wait_for_idle(struct intel_wakeref *wf) diff --git a/drivers/gpu/drm/i915/intel_wakeref.h b/drivers/gpu/drm/i915/intel_wakeref.h index 8d945db94b7a..7d1e676b71ef 100644 --- a/drivers/gpu/drm/i915/intel_wakeref.h +++ b/drivers/gpu/drm/i915/intel_wakeref.h @@ -44,12 +44,17 @@ struct intel_wakeref { struct work_struct work; }; +struct intel_wakeref_lockclass { + struct lock_class_key mutex; + struct lock_class_key work; +}; + void __intel_wakeref_init(struct intel_wakeref *wf, struct intel_runtime_pm *rpm, const struct intel_wakeref_ops *ops, - struct lock_class_key *key); + struct intel_wakeref_lockclass *key); #define intel_wakeref_init(wf, rpm, ops) do { \ - static struct lock_class_key __key; \ + static struct intel_wakeref_lockclass __key; \ \ __intel_wakeref_init((wf), (rpm), (ops), &__key); \ } while (0) diff --git a/drivers/gpu/drm/i915/oa/Makefile b/drivers/gpu/drm/i915/oa/Makefile deleted file mode 100644 index df028e2b0d64..000000000000 --- a/drivers/gpu/drm/i915/oa/Makefile +++ /dev/null @@ -1,7 +0,0 @@ -# SPDX-License-Identifier: MIT - -# For building individual subdir files on the command line -subdir-ccflags-y += -I$(srctree)/$(src)/.. - -# Extra header tests -header-test-pattern-$(CONFIG_DRM_I915_WERROR) := *.h diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c index b37fc53973cc..78f36faf2bbe 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem.c @@ -124,8 +124,6 @@ static void pm_resume(struct drm_i915_private *i915) * that runtime-pm just works. */ with_intel_runtime_pm(&i915->runtime_pm, wakeref) { - intel_gt_sanitize(&i915->gt, false); - i915_gem_restore_gtt_mappings(i915); i915_gem_restore_fences(&i915->ggtt); diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c index 80cde5bda922..b342bef5e7c9 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c @@ -34,6 +34,7 @@ #include "mock_drm.h" #include "mock_gem_device.h" +#include "mock_gtt.h" #include "igt_flush_test.h" static void cleanup_freed_objects(struct drm_i915_private *i915) @@ -151,7 +152,7 @@ static int igt_ppgtt_alloc(void *arg) if (!HAS_PPGTT(dev_priv)) return 0; - ppgtt = __ppgtt_create(dev_priv); + ppgtt = i915_ppgtt_create(&dev_priv->gt); if (IS_ERR(ppgtt)) return PTR_ERR(ppgtt); @@ -206,8 +207,7 @@ err_ppgtt_cleanup: return err; } -static int lowlevel_hole(struct drm_i915_private *i915, - struct i915_address_space *vm, +static int lowlevel_hole(struct i915_address_space *vm, u64 hole_start, u64 hole_end, unsigned long end_time) { @@ -256,7 +256,7 @@ static int lowlevel_hole(struct drm_i915_private *i915, * memory. We expect to hit -ENOMEM. */ - obj = fake_dma_object(i915, BIT_ULL(size)); + obj = fake_dma_object(vm->i915, BIT_ULL(size)); if (IS_ERR(obj)) { kfree(order); break; @@ -291,7 +291,7 @@ static int lowlevel_hole(struct drm_i915_private *i915, mock_vma->node.size = BIT_ULL(size); mock_vma->node.start = addr; - with_intel_runtime_pm(&i915->runtime_pm, wakeref) + with_intel_runtime_pm(vm->gt->uncore->rpm, wakeref) vm->insert_entries(vm, mock_vma, I915_CACHE_NONE, 0); } @@ -303,7 +303,7 @@ static int lowlevel_hole(struct drm_i915_private *i915, intel_wakeref_t wakeref; GEM_BUG_ON(addr + BIT_ULL(size) > vm->total); - with_intel_runtime_pm(&i915->runtime_pm, wakeref) + with_intel_runtime_pm(vm->gt->uncore->rpm, wakeref) vm->clear_range(vm, addr, BIT_ULL(size)); } @@ -312,7 +312,7 @@ static int lowlevel_hole(struct drm_i915_private *i915, kfree(order); - cleanup_freed_objects(i915); + cleanup_freed_objects(vm->i915); } kfree(mock_vma); @@ -340,8 +340,7 @@ static void close_object_list(struct list_head *objects, } } -static int fill_hole(struct drm_i915_private *i915, - struct i915_address_space *vm, +static int fill_hole(struct i915_address_space *vm, u64 hole_start, u64 hole_end, unsigned long end_time) { @@ -374,7 +373,7 @@ static int fill_hole(struct drm_i915_private *i915, { } }, *p; - obj = fake_dma_object(i915, full_size); + obj = fake_dma_object(vm->i915, full_size); if (IS_ERR(obj)) break; @@ -542,7 +541,7 @@ static int fill_hole(struct drm_i915_private *i915, } close_object_list(&objects, vm); - cleanup_freed_objects(i915); + cleanup_freed_objects(vm->i915); } return 0; @@ -552,8 +551,7 @@ err: return err; } -static int walk_hole(struct drm_i915_private *i915, - struct i915_address_space *vm, +static int walk_hole(struct i915_address_space *vm, u64 hole_start, u64 hole_end, unsigned long end_time) { @@ -575,7 +573,7 @@ static int walk_hole(struct drm_i915_private *i915, u64 addr; int err = 0; - obj = fake_dma_object(i915, size << PAGE_SHIFT); + obj = fake_dma_object(vm->i915, size << PAGE_SHIFT); if (IS_ERR(obj)) break; @@ -630,14 +628,13 @@ err_put: if (err) return err; - cleanup_freed_objects(i915); + cleanup_freed_objects(vm->i915); } return 0; } -static int pot_hole(struct drm_i915_private *i915, - struct i915_address_space *vm, +static int pot_hole(struct i915_address_space *vm, u64 hole_start, u64 hole_end, unsigned long end_time) { @@ -651,7 +648,7 @@ static int pot_hole(struct drm_i915_private *i915, if (i915_is_ggtt(vm)) flags |= PIN_GLOBAL; - obj = i915_gem_object_create_internal(i915, 2 * I915_GTT_PAGE_SIZE); + obj = i915_gem_object_create_internal(vm->i915, 2 * I915_GTT_PAGE_SIZE); if (IS_ERR(obj)) return PTR_ERR(obj); @@ -712,8 +709,7 @@ err_obj: return err; } -static int drunk_hole(struct drm_i915_private *i915, - struct i915_address_space *vm, +static int drunk_hole(struct i915_address_space *vm, u64 hole_start, u64 hole_end, unsigned long end_time) { @@ -758,7 +754,7 @@ static int drunk_hole(struct drm_i915_private *i915, * memory. We expect to hit -ENOMEM. */ - obj = fake_dma_object(i915, BIT_ULL(size)); + obj = fake_dma_object(vm->i915, BIT_ULL(size)); if (IS_ERR(obj)) { kfree(order); break; @@ -816,14 +812,13 @@ err_obj: if (err) return err; - cleanup_freed_objects(i915); + cleanup_freed_objects(vm->i915); } return 0; } -static int __shrink_hole(struct drm_i915_private *i915, - struct i915_address_space *vm, +static int __shrink_hole(struct i915_address_space *vm, u64 hole_start, u64 hole_end, unsigned long end_time) { @@ -840,7 +835,7 @@ static int __shrink_hole(struct drm_i915_private *i915, u64 size = BIT_ULL(order++); size = min(size, hole_end - addr); - obj = fake_dma_object(i915, size); + obj = fake_dma_object(vm->i915, size); if (IS_ERR(obj)) { err = PTR_ERR(obj); break; @@ -894,12 +889,11 @@ static int __shrink_hole(struct drm_i915_private *i915, } close_object_list(&objects, vm); - cleanup_freed_objects(i915); + cleanup_freed_objects(vm->i915); return err; } -static int shrink_hole(struct drm_i915_private *i915, - struct i915_address_space *vm, +static int shrink_hole(struct i915_address_space *vm, u64 hole_start, u64 hole_end, unsigned long end_time) { @@ -911,7 +905,7 @@ static int shrink_hole(struct drm_i915_private *i915, for_each_prime_number_from(prime, 0, ULONG_MAX - 1) { vm->fault_attr.interval = prime; - err = __shrink_hole(i915, vm, hole_start, hole_end, end_time); + err = __shrink_hole(vm, hole_start, hole_end, end_time); if (err) break; } @@ -921,8 +915,7 @@ static int shrink_hole(struct drm_i915_private *i915, return err; } -static int shrink_boom(struct drm_i915_private *i915, - struct i915_address_space *vm, +static int shrink_boom(struct i915_address_space *vm, u64 hole_start, u64 hole_end, unsigned long end_time) { @@ -944,7 +937,7 @@ static int shrink_boom(struct drm_i915_private *i915, unsigned int size = sizes[i]; struct i915_vma *vma; - purge = fake_dma_object(i915, size); + purge = fake_dma_object(vm->i915, size); if (IS_ERR(purge)) return PTR_ERR(purge); @@ -961,7 +954,7 @@ static int shrink_boom(struct drm_i915_private *i915, /* Should now be ripe for purging */ i915_vma_unpin(vma); - explode = fake_dma_object(i915, size); + explode = fake_dma_object(vm->i915, size); if (IS_ERR(explode)) { err = PTR_ERR(explode); goto err_purge; @@ -987,7 +980,7 @@ static int shrink_boom(struct drm_i915_private *i915, i915_gem_object_put(explode); memset(&vm->fault_attr, 0, sizeof(vm->fault_attr)); - cleanup_freed_objects(i915); + cleanup_freed_objects(vm->i915); } return 0; @@ -1001,8 +994,7 @@ err_purge: } static int exercise_ppgtt(struct drm_i915_private *dev_priv, - int (*func)(struct drm_i915_private *i915, - struct i915_address_space *vm, + int (*func)(struct i915_address_space *vm, u64 hole_start, u64 hole_end, unsigned long end_time)) { @@ -1018,7 +1010,7 @@ static int exercise_ppgtt(struct drm_i915_private *dev_priv, if (IS_ERR(file)) return PTR_ERR(file); - ppgtt = i915_ppgtt_create(dev_priv); + ppgtt = i915_ppgtt_create(&dev_priv->gt); if (IS_ERR(ppgtt)) { err = PTR_ERR(ppgtt); goto out_free; @@ -1026,7 +1018,7 @@ static int exercise_ppgtt(struct drm_i915_private *dev_priv, GEM_BUG_ON(offset_in_page(ppgtt->vm.total)); GEM_BUG_ON(!atomic_read(&ppgtt->vm.open)); - err = func(dev_priv, &ppgtt->vm, 0, ppgtt->vm.total, end_time); + err = func(&ppgtt->vm, 0, ppgtt->vm.total, end_time); i915_vm_put(&ppgtt->vm); @@ -1082,8 +1074,7 @@ static int sort_holes(void *priv, struct list_head *A, struct list_head *B) } static int exercise_ggtt(struct drm_i915_private *i915, - int (*func)(struct drm_i915_private *i915, - struct i915_address_space *vm, + int (*func)(struct i915_address_space *vm, u64 hole_start, u64 hole_end, unsigned long end_time)) { @@ -1105,7 +1096,7 @@ restart: if (hole_start >= hole_end) continue; - err = func(i915, &ggtt->vm, hole_start, hole_end, end_time); + err = func(&ggtt->vm, hole_start, hole_end, end_time); if (err) break; @@ -1252,8 +1243,7 @@ static void track_vma_bind(struct i915_vma *vma) } static int exercise_mock(struct drm_i915_private *i915, - int (*func)(struct drm_i915_private *i915, - struct i915_address_space *vm, + int (*func)(struct i915_address_space *vm, u64 hole_start, u64 hole_end, unsigned long end_time)) { @@ -1268,7 +1258,7 @@ static int exercise_mock(struct drm_i915_private *i915, return -ENOMEM; vm = i915_gem_context_get_vm_rcu(ctx); - err = func(i915, vm, 0, min(vm->total, limit), end_time); + err = func(vm, 0, min(vm->total, limit), end_time); i915_vm_put(vm); mock_context_close(ctx); diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h index 476fba2ed8bb..34138c7bdd15 100644 --- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h +++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h @@ -1,5 +1,11 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* List each unit test as selftest(name, function) + +#ifndef selftest +#define selftest(x, y) +#endif + +/* + * List each unit test as selftest(name, function) * * The name is used as both an enum and expanded as subtest__name to create * a module parameter. It must be unique and legal for a C identifier. diff --git a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h index aa5a0e7f5d9e..5b39bab4da1d 100644 --- a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h +++ b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h @@ -1,5 +1,11 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* List each unit test as selftest(name, function) + +#ifndef selftest +#define selftest(x, y) +#endif + +/* + * List each unit test as selftest(name, function) * * The name is used as both an enum and expanded as subtest__name to create * a module parameter. It must be unique and legal for a C identifier. diff --git a/drivers/gpu/drm/i915/selftests/i915_perf_selftests.h b/drivers/gpu/drm/i915/selftests/i915_perf_selftests.h index f7129a243daa..5a577a1332f5 100644 --- a/drivers/gpu/drm/i915/selftests/i915_perf_selftests.h +++ b/drivers/gpu/drm/i915/selftests/i915_perf_selftests.h @@ -1,5 +1,11 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* List each unit test as selftest(name, function) + +#ifndef selftest +#define selftest(x, y) +#endif + +/* + * List each unit test as selftest(name, function) * * The name is used as both an enum and expanded as subtest__name to create * a module parameter. It must be unique and legal for a C identifier. diff --git a/drivers/gpu/drm/i915/selftests/i915_random.h b/drivers/gpu/drm/i915/selftests/i915_random.h index 35cc69a3a1b9..05364eca20f7 100644 --- a/drivers/gpu/drm/i915/selftests/i915_random.h +++ b/drivers/gpu/drm/i915/selftests/i915_random.h @@ -25,6 +25,7 @@ #ifndef __I915_SELFTESTS_RANDOM_H__ #define __I915_SELFTESTS_RANDOM_H__ +#include <linux/math64.h> #include <linux/random.h> #include "../i915_selftest.h" diff --git a/drivers/gpu/drm/i915/selftests/igt_atomic.c b/drivers/gpu/drm/i915/selftests/igt_atomic.c new file mode 100644 index 000000000000..fb506b699095 --- /dev/null +++ b/drivers/gpu/drm/i915/selftests/igt_atomic.c @@ -0,0 +1,47 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2018 Intel Corporation + */ + +#include <linux/preempt.h> +#include <linux/bottom_half.h> +#include <linux/irqflags.h> + +#include "igt_atomic.h" + +static void __preempt_begin(void) +{ + preempt_disable(); +} + +static void __preempt_end(void) +{ + preempt_enable(); +} + +static void __softirq_begin(void) +{ + local_bh_disable(); +} + +static void __softirq_end(void) +{ + local_bh_enable(); +} + +static void __hardirq_begin(void) +{ + local_irq_disable(); +} + +static void __hardirq_end(void) +{ + local_irq_enable(); +} + +const struct igt_atomic_section igt_atomic_phases[] = { + { "preempt", __preempt_begin, __preempt_end }, + { "softirq", __softirq_begin, __softirq_end }, + { "hardirq", __hardirq_begin, __hardirq_end }, + { } +}; diff --git a/drivers/gpu/drm/i915/selftests/igt_atomic.h b/drivers/gpu/drm/i915/selftests/igt_atomic.h index 93ec89f487ec..1991798abf4b 100644 --- a/drivers/gpu/drm/i915/selftests/igt_atomic.h +++ b/drivers/gpu/drm/i915/selftests/igt_atomic.h @@ -6,51 +6,12 @@ #ifndef IGT_ATOMIC_H #define IGT_ATOMIC_H -#include <linux/preempt.h> -#include <linux/bottom_half.h> -#include <linux/irqflags.h> - -static void __preempt_begin(void) -{ - preempt_disable(); -} - -static void __preempt_end(void) -{ - preempt_enable(); -} - -static void __softirq_begin(void) -{ - local_bh_disable(); -} - -static void __softirq_end(void) -{ - local_bh_enable(); -} - -static void __hardirq_begin(void) -{ - local_irq_disable(); -} - -static void __hardirq_end(void) -{ - local_irq_enable(); -} - struct igt_atomic_section { const char *name; void (*critical_section_begin)(void); void (*critical_section_end)(void); }; -static const struct igt_atomic_section igt_atomic_phases[] = { - { "preempt", __preempt_begin, __preempt_end }, - { "softirq", __softirq_begin, __softirq_end }, - { "hardirq", __hardirq_begin, __hardirq_end }, - { } -}; +extern const struct igt_atomic_section igt_atomic_phases[]; #endif /* IGT_ATOMIC_H */ diff --git a/drivers/gpu/drm/i915/selftests/igt_live_test.h b/drivers/gpu/drm/i915/selftests/igt_live_test.h index c0e9f99d50de..36ed42736c52 100644 --- a/drivers/gpu/drm/i915/selftests/igt_live_test.h +++ b/drivers/gpu/drm/i915/selftests/igt_live_test.h @@ -7,7 +7,7 @@ #ifndef IGT_LIVE_TEST_H #define IGT_LIVE_TEST_H -#include "../i915_gem.h" +#include "gt/intel_engine.h" /* for I915_NUM_ENGINES */ struct drm_i915_private; diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c index 04d0aa7b349e..3ef3620e0da5 100644 --- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c +++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c @@ -270,36 +270,31 @@ static int igt_gpu_write_dw(struct intel_context *ce, static int igt_cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val) { - unsigned long n; + unsigned long n = obj->base.size >> PAGE_SHIFT; + u32 *ptr; int err; - i915_gem_object_lock(obj); - err = i915_gem_object_set_to_wc_domain(obj, false); - i915_gem_object_unlock(obj); - if (err) - return err; - - err = i915_gem_object_pin_pages(obj); + err = i915_gem_object_wait(obj, 0, MAX_SCHEDULE_TIMEOUT); if (err) return err; - for (n = 0; n < obj->base.size >> PAGE_SHIFT; ++n) { - u32 __iomem *base; - u32 read_val; - - base = i915_gem_object_lmem_io_map_page_atomic(obj, n); + ptr = i915_gem_object_pin_map(obj, I915_MAP_WC); + if (IS_ERR(ptr)) + return PTR_ERR(ptr); - read_val = ioread32(base + dword); - io_mapping_unmap_atomic(base); - if (read_val != val) { - pr_err("n=%lu base[%u]=%u, val=%u\n", - n, dword, read_val, val); + ptr += dword; + while (n--) { + if (*ptr != val) { + pr_err("base[%u]=%08x, val=%08x\n", + dword, *ptr, val); err = -EINVAL; break; } + + ptr += PAGE_SIZE / sizeof(*ptr); } - i915_gem_object_unpin_pages(obj); + i915_gem_object_unpin_map(obj); return err; } diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c index ac641f5360e1..3b8986983afc 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c +++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c @@ -58,6 +58,8 @@ static void mock_device_release(struct drm_device *dev) mock_device_flush(i915); intel_gt_driver_remove(&i915->gt); + i915_gem_driver_release__contexts(i915); + i915_gem_drain_workqueue(i915); i915_gem_drain_freed_objects(i915); @@ -184,6 +186,7 @@ struct drm_i915_private *mock_gem_device(void) if (mock_engine_init(i915->engine[RCS0])) goto err_context; + __clear_bit(I915_WEDGED, &i915->gt.reset.flags); intel_engines_driver_register(i915); return i915; diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.c b/drivers/gpu/drm/i915/selftests/mock_gtt.c index 20ac3844edec..edc5e3dda8ca 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gtt.c +++ b/drivers/gpu/drm/i915/selftests/mock_gtt.c @@ -55,6 +55,11 @@ static void mock_cleanup(struct i915_address_space *vm) { } +static void mock_clear_range(struct i915_address_space *vm, + u64 start, u64 length) +{ +} + struct i915_ppgtt *mock_ppgtt(struct drm_i915_private *i915, const char *name) { struct i915_ppgtt *ppgtt; @@ -70,7 +75,7 @@ struct i915_ppgtt *mock_ppgtt(struct drm_i915_private *i915, const char *name) i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT); - ppgtt->vm.clear_range = nop_clear_range; + ppgtt->vm.clear_range = mock_clear_range; ppgtt->vm.insert_page = mock_insert_page; ppgtt->vm.insert_entries = mock_insert_entries; ppgtt->vm.cleanup = mock_cleanup; @@ -107,7 +112,7 @@ void mock_init_ggtt(struct drm_i915_private *i915, struct i915_ggtt *ggtt) ggtt->mappable_end = resource_size(&ggtt->gmadr); ggtt->vm.total = 4096 * PAGE_SIZE; - ggtt->vm.clear_range = nop_clear_range; + ggtt->vm.clear_range = mock_clear_range; ggtt->vm.insert_page = mock_insert_page; ggtt->vm.insert_entries = mock_insert_entries; ggtt->vm.cleanup = mock_cleanup; diff --git a/drivers/mfd/intel_soc_pmic_core.c b/drivers/mfd/intel_soc_pmic_core.c index 47188df3080d..ddd64f9e3341 100644 --- a/drivers/mfd/intel_soc_pmic_core.c +++ b/drivers/mfd/intel_soc_pmic_core.c @@ -9,8 +9,6 @@ */ #include <linux/acpi.h> -#include <linux/gpio/consumer.h> -#include <linux/gpio/machine.h> #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/module.h> @@ -25,17 +23,6 @@ #define BYT_CRC_HRV 2 #define CHT_CRC_HRV 3 -/* Lookup table for the Panel Enable/Disable line as GPIO signals */ -static struct gpiod_lookup_table panel_gpio_table = { - /* Intel GFX is consumer */ - .dev_id = "0000:00:02.0", - .table = { - /* Panel EN/DISABLE */ - GPIO_LOOKUP("gpio_crystalcove", 94, "panel", GPIO_ACTIVE_HIGH), - { }, - }, -}; - /* PWM consumed by the Intel GFX */ static struct pwm_lookup crc_pwm_lookup[] = { PWM_LOOKUP("crystal_cove_pwm", 0, "0000:00:02.0", "pwm_pmic_backlight", 0, PWM_POLARITY_NORMAL), @@ -96,9 +83,6 @@ static int intel_soc_pmic_i2c_probe(struct i2c_client *i2c, if (ret) dev_warn(dev, "Can't enable IRQ as wake source: %d\n", ret); - /* Add lookup table binding for Panel Control to the GPIO Chip */ - gpiod_add_lookup_table(&panel_gpio_table); - /* Add lookup table for crc-pwm */ pwm_add_table(crc_pwm_lookup, ARRAY_SIZE(crc_pwm_lookup)); @@ -121,9 +105,6 @@ static int intel_soc_pmic_i2c_remove(struct i2c_client *i2c) regmap_del_irq_chip(pmic->irq, pmic->irq_chip_data); - /* Remove lookup table for Panel Control from the GPIO Chip */ - gpiod_remove_lookup_table(&panel_gpio_table); - /* remove crc-pwm lookup table */ pwm_remove_table(crc_pwm_lookup, ARRAY_SIZE(crc_pwm_lookup)); diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c index 2bbd8ee93507..b0eea728455d 100644 --- a/drivers/pinctrl/core.c +++ b/drivers/pinctrl/core.c @@ -1376,8 +1376,15 @@ void devm_pinctrl_put(struct pinctrl *p) } EXPORT_SYMBOL_GPL(devm_pinctrl_put); -int pinctrl_register_map(const struct pinctrl_map *maps, unsigned num_maps, - bool dup) +/** + * pinctrl_register_mappings() - register a set of pin controller mappings + * @maps: the pincontrol mappings table to register. Note the pinctrl-core + * keeps a reference to the passed in maps, so they should _not_ be + * marked with __initdata. + * @num_maps: the number of maps in the mapping table + */ +int pinctrl_register_mappings(const struct pinctrl_map *maps, + unsigned num_maps) { int i, ret; struct pinctrl_maps *maps_node; @@ -1430,17 +1437,8 @@ int pinctrl_register_map(const struct pinctrl_map *maps, unsigned num_maps, if (!maps_node) return -ENOMEM; + maps_node->maps = maps; maps_node->num_maps = num_maps; - if (dup) { - maps_node->maps = kmemdup(maps, sizeof(*maps) * num_maps, - GFP_KERNEL); - if (!maps_node->maps) { - kfree(maps_node); - return -ENOMEM; - } - } else { - maps_node->maps = maps; - } mutex_lock(&pinctrl_maps_mutex); list_add_tail(&maps_node->node, &pinctrl_maps); @@ -1448,22 +1446,14 @@ int pinctrl_register_map(const struct pinctrl_map *maps, unsigned num_maps, return 0; } +EXPORT_SYMBOL_GPL(pinctrl_register_mappings); /** - * pinctrl_register_mappings() - register a set of pin controller mappings - * @maps: the pincontrol mappings table to register. This should probably be - * marked with __initdata so it can be discarded after boot. This - * function will perform a shallow copy for the mapping entries. - * @num_maps: the number of maps in the mapping table + * pinctrl_unregister_mappings() - unregister a set of pin controller mappings + * @maps: the pincontrol mappings table passed to pinctrl_register_mappings() + * when registering the mappings. */ -int pinctrl_register_mappings(const struct pinctrl_map *maps, - unsigned num_maps) -{ - return pinctrl_register_map(maps, num_maps, true); -} -EXPORT_SYMBOL_GPL(pinctrl_register_mappings); - -void pinctrl_unregister_map(const struct pinctrl_map *map) +void pinctrl_unregister_mappings(const struct pinctrl_map *map) { struct pinctrl_maps *maps_node; @@ -1478,6 +1468,7 @@ void pinctrl_unregister_map(const struct pinctrl_map *map) } mutex_unlock(&pinctrl_maps_mutex); } +EXPORT_SYMBOL_GPL(pinctrl_unregister_mappings); /** * pinctrl_force_sleep() - turn a given controller device into sleep state diff --git a/drivers/pinctrl/core.h b/drivers/pinctrl/core.h index 7f34167a0405..840103c40c14 100644 --- a/drivers/pinctrl/core.h +++ b/drivers/pinctrl/core.h @@ -236,10 +236,6 @@ extern struct pinctrl_gpio_range * pinctrl_find_gpio_range_from_pin_nolock(struct pinctrl_dev *pctldev, unsigned int pin); -int pinctrl_register_map(const struct pinctrl_map *maps, unsigned num_maps, - bool dup); -void pinctrl_unregister_map(const struct pinctrl_map *map); - extern int pinctrl_force_sleep(struct pinctrl_dev *pctldev); extern int pinctrl_force_default(struct pinctrl_dev *pctldev); diff --git a/drivers/pinctrl/devicetree.c b/drivers/pinctrl/devicetree.c index 674920daac26..9357f7c46cf3 100644 --- a/drivers/pinctrl/devicetree.c +++ b/drivers/pinctrl/devicetree.c @@ -51,7 +51,7 @@ void pinctrl_dt_free_maps(struct pinctrl *p) struct pinctrl_dt_map *dt_map, *n1; list_for_each_entry_safe(dt_map, n1, &p->dt_maps, node) { - pinctrl_unregister_map(dt_map->map); + pinctrl_unregister_mappings(dt_map->map); list_del(&dt_map->node); dt_free_map(dt_map->pctldev, dt_map->map, dt_map->num_maps); @@ -92,7 +92,7 @@ static int dt_remember_or_free_map(struct pinctrl *p, const char *statename, dt_map->num_maps = num_maps; list_add_tail(&dt_map->node, &p->dt_maps); - return pinctrl_register_map(map, num_maps, false); + return pinctrl_register_mappings(map, num_maps); err_free_map: dt_free_map(pctldev, map, num_maps); diff --git a/include/drm/drm_fourcc.h b/include/drm/drm_fourcc.h index 306d1efeb5e0..156b122c0ad5 100644 --- a/include/drm/drm_fourcc.h +++ b/include/drm/drm_fourcc.h @@ -78,7 +78,7 @@ struct drm_format_info { * triplet @char_per_block, @block_w, @block_h for better * describing the pixel format. */ - u8 cpp[3]; + u8 cpp[4]; /** * @char_per_block: @@ -104,7 +104,7 @@ struct drm_format_info { * information from their drm_mode_config.get_format_info hook * if they want the core to be validating the pitch. */ - u8 char_per_block[3]; + u8 char_per_block[4]; }; /** @@ -113,7 +113,7 @@ struct drm_format_info { * Block width in pixels, this is intended to be accessed through * drm_format_info_block_width() */ - u8 block_w[3]; + u8 block_w[4]; /** * @block_h: @@ -121,7 +121,7 @@ struct drm_format_info { * Block height in pixels, this is intended to be accessed through * drm_format_info_block_height() */ - u8 block_h[3]; + u8 block_h[4]; /** @hsub: Horizontal chroma subsampling factor */ u8 hsub; diff --git a/include/linux/pinctrl/machine.h b/include/linux/pinctrl/machine.h index ddd1b2773431..e987dc9fd2af 100644 --- a/include/linux/pinctrl/machine.h +++ b/include/linux/pinctrl/machine.h @@ -153,6 +153,7 @@ struct pinctrl_map { extern int pinctrl_register_mappings(const struct pinctrl_map *map, unsigned num_maps); +extern void pinctrl_unregister_mappings(const struct pinctrl_map *map); extern void pinctrl_provide_dummies(void); #else @@ -162,6 +163,10 @@ static inline int pinctrl_register_mappings(const struct pinctrl_map *map, return 0; } +static inline void pinctrl_unregister_mappings(const struct pinctrl_map *map) +{ +} + static inline void pinctrl_provide_dummies(void) { } diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h index 5ba481f49931..8bc0b31597d8 100644 --- a/include/uapi/drm/drm_fourcc.h +++ b/include/uapi/drm/drm_fourcc.h @@ -422,6 +422,19 @@ extern "C" { #define I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS fourcc_mod_code(INTEL, 6) /* + * Intel color control surfaces (CCS) for Gen-12 media compression + * + * The main surface is Y-tiled and at plane index 0, the CCS is linear and + * at index 1. A 64B CCS cache line corresponds to an area of 4x1 tiles in + * main surface. In other words, 4 bits in CCS map to a main surface cache + * line pair. The main surface pitch is required to be a multiple of four + * Y-tile widths. For semi-planar formats like NV12, CCS planes follow the + * Y and UV planes i.e., planes 0 and 1 are used for Y and UV surfaces, + * planes 2 and 3 for the respective CCS. + */ +#define I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS fourcc_mod_code(INTEL, 7) + +/* * Tiled, NV12MT, grouped in 64 (pixels) x 32 (lines) -sized macroblocks * * Macroblocks are laid in a Z-shape, and each pixel data is following the |