summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/radeon
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/radeon')
-rw-r--r--drivers/gpu/drm/radeon/atombios.h127
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c21
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c3
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c26
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c58
-rw-r--r--drivers/gpu/drm/radeon/ci_smc.c4
-rw-r--r--drivers/gpu/drm/radeon/cik.c757
-rw-r--r--drivers/gpu/drm/radeon/cik_sdma.c62
-rw-r--r--drivers/gpu/drm/radeon/cikd.h103
-rw-r--r--drivers/gpu/drm/radeon/dce6_afmt.c66
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c80
-rw-r--r--drivers/gpu/drm/radeon/evergreen_hdmi.c71
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h70
-rw-r--r--drivers/gpu/drm/radeon/ni.c76
-rw-r--r--drivers/gpu/drm/radeon/ni_dma.c19
-rw-r--r--drivers/gpu/drm/radeon/r100.c2
-rw-r--r--drivers/gpu/drm/radeon/r600.c53
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c18
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c102
-rw-r--r--drivers/gpu/drm/radeon/r600d.h28
-rw-r--r--drivers/gpu/drm/radeon/radeon.h33
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c74
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h35
-rw-r--r--drivers/gpu/drm/radeon/radeon_atpx_handler.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_bios.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c116
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c298
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c78
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c83
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c173
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon_family.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c350
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c73
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_ioc32.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c28
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c21
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h17
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c65
-rw-r--r--drivers/gpu/drm/radeon/radeon_trace.h24
-rw-r--r--drivers/gpu/drm/radeon/radeon_ucode.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c5
-rw-r--r--drivers/gpu/drm/radeon/rs600.c64
-rw-r--r--drivers/gpu/drm/radeon/rs690.c16
-rw-r--r--drivers/gpu/drm/radeon/rv515.c8
-rw-r--r--drivers/gpu/drm/radeon/rv6xx_dpm.c6
-rw-r--r--drivers/gpu/drm/radeon/si.c99
-rw-r--r--drivers/gpu/drm/radeon/si_dma.c22
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c9
-rw-r--r--drivers/gpu/drm/radeon/sid.h47
53 files changed, 2469 insertions, 1058 deletions
diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h
index af10f8571d87..92be50c39ffd 100644
--- a/drivers/gpu/drm/radeon/atombios.h
+++ b/drivers/gpu/drm/radeon/atombios.h
@@ -1711,7 +1711,9 @@ typedef struct _PIXEL_CLOCK_PARAMETERS_V6
#define PIXEL_CLOCK_V6_MISC_HDMI_BPP_MASK 0x0c
#define PIXEL_CLOCK_V6_MISC_HDMI_24BPP 0x00
#define PIXEL_CLOCK_V6_MISC_HDMI_36BPP 0x04
+#define PIXEL_CLOCK_V6_MISC_HDMI_36BPP_V6 0x08 //for V6, the correct defintion for 36bpp should be 2 for 36bpp(2:1)
#define PIXEL_CLOCK_V6_MISC_HDMI_30BPP 0x08
+#define PIXEL_CLOCK_V6_MISC_HDMI_30BPP_V6 0x04 //for V6, the correct defintion for 30bpp should be 1 for 36bpp(5:4)
#define PIXEL_CLOCK_V6_MISC_HDMI_48BPP 0x0c
#define PIXEL_CLOCK_V6_MISC_REF_DIV_SRC 0x10
#define PIXEL_CLOCK_V6_MISC_GEN_DPREFCLK 0x40
@@ -2223,7 +2225,7 @@ typedef struct _SET_VOLTAGE_PARAMETERS_V2
USHORT usVoltageLevel; // real voltage level
}SET_VOLTAGE_PARAMETERS_V2;
-
+// used by both SetVoltageTable v1.3 and v1.4
typedef struct _SET_VOLTAGE_PARAMETERS_V1_3
{
UCHAR ucVoltageType; // To tell which voltage to set up, VDDC/MVDDC/MVDDQ/VDDCI
@@ -2290,15 +2292,36 @@ typedef struct _GET_LEAKAGE_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_1
#define ATOM_GET_VOLTAGE_VID 0x00
#define ATOM_GET_VOTLAGE_INIT_SEQ 0x03
#define ATOM_GET_VOLTTAGE_PHASE_PHASE_VID 0x04
-// for SI, this state map to 0xff02 voltage state in Power Play table, which is power boost state
-#define ATOM_GET_VOLTAGE_STATE0_LEAKAGE_VID 0x10
+#define ATOM_GET_VOLTAGE_SVID2 0x07 //Get SVI2 Regulator Info
+// for SI, this state map to 0xff02 voltage state in Power Play table, which is power boost state
+#define ATOM_GET_VOLTAGE_STATE0_LEAKAGE_VID 0x10
// for SI, this state map to 0xff01 voltage state in Power Play table, which is performance state
#define ATOM_GET_VOLTAGE_STATE1_LEAKAGE_VID 0x11
-// undefined power state
+
#define ATOM_GET_VOLTAGE_STATE2_LEAKAGE_VID 0x12
#define ATOM_GET_VOLTAGE_STATE3_LEAKAGE_VID 0x13
+// New Added from CI Hawaii for GetVoltageInfoTable, input parameter structure
+typedef struct _GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_2
+{
+ UCHAR ucVoltageType; // Input: To tell which voltage to set up, VDDC/MVDDC/MVDDQ/VDDCI
+ UCHAR ucVoltageMode; // Input: Indicate action: Get voltage info
+ USHORT usVoltageLevel; // Input: real voltage level in unit of mv or Voltage Phase (0, 1, 2, .. ) or Leakage Id
+ ULONG ulSCLKFreq; // Input: when ucVoltageMode= ATOM_GET_VOLTAGE_EVV_VOLTAGE, DPM state SCLK frequency, Define in PPTable SCLK/Voltage dependence table
+}GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_2;
+
+// New in GetVoltageInfo v1.2 ucVoltageMode
+#define ATOM_GET_VOLTAGE_EVV_VOLTAGE 0x09
+
+// New Added from CI Hawaii for EVV feature
+typedef struct _GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2
+{
+ USHORT usVoltageLevel; // real voltage level in unit of mv
+ USHORT usVoltageId; // Voltage Id programmed in Voltage Regulator
+ ULONG ulReseved;
+}GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2;
+
/****************************************************************************/
// Structures used by TVEncoderControlTable
/****************************************************************************/
@@ -3864,6 +3887,8 @@ typedef struct _ATOM_GPIO_PIN_ASSIGNMENT
#define PP_AC_DC_SWITCH_GPIO_PINID 60
//from SMU7.x, if ucGPIO_ID=VDDC_REGULATOR_VRHOT_GPIO_PINID in GPIO_LUTable, VRHot feature is enable
#define VDDC_VRHOT_GPIO_PINID 61
+//if ucGPIO_ID=VDDC_PCC_GPIO_PINID in GPIO_LUTable, Peak Current Control feature is enabled
+#define VDDC_PCC_GPIO_PINID 62
typedef struct _ATOM_GPIO_PIN_LUT
{
@@ -4169,10 +4194,10 @@ typedef struct _ATOM_COMMON_RECORD_HEADER
#define ATOM_OBJECT_LINK_RECORD_TYPE 18 //Once this record is present under one object, it indicats the oobject is linked to another obj described by the record
#define ATOM_CONNECTOR_REMOTE_CAP_RECORD_TYPE 19
#define ATOM_ENCODER_CAP_RECORD_TYPE 20
-
+#define ATOM_BRACKET_LAYOUT_RECORD_TYPE 21
//Must be updated when new record type is added,equal to that record definition!
-#define ATOM_MAX_OBJECT_RECORD_NUMBER ATOM_ENCODER_CAP_RECORD_TYPE
+#define ATOM_MAX_OBJECT_RECORD_NUMBER ATOM_BRACKET_LAYOUT_RECORD_TYPE
typedef struct _ATOM_I2C_RECORD
{
@@ -4397,6 +4422,31 @@ typedef struct _ATOM_CONNECTOR_REMOTE_CAP_RECORD
USHORT usReserved;
}ATOM_CONNECTOR_REMOTE_CAP_RECORD;
+typedef struct _ATOM_CONNECTOR_LAYOUT_INFO
+{
+ USHORT usConnectorObjectId;
+ UCHAR ucConnectorType;
+ UCHAR ucPosition;
+}ATOM_CONNECTOR_LAYOUT_INFO;
+
+// define ATOM_CONNECTOR_LAYOUT_INFO.ucConnectorType to describe the display connector size
+#define CONNECTOR_TYPE_DVI_D 1
+#define CONNECTOR_TYPE_DVI_I 2
+#define CONNECTOR_TYPE_VGA 3
+#define CONNECTOR_TYPE_HDMI 4
+#define CONNECTOR_TYPE_DISPLAY_PORT 5
+#define CONNECTOR_TYPE_MINI_DISPLAY_PORT 6
+
+typedef struct _ATOM_BRACKET_LAYOUT_RECORD
+{
+ ATOM_COMMON_RECORD_HEADER sheader;
+ UCHAR ucLength;
+ UCHAR ucWidth;
+ UCHAR ucConnNum;
+ UCHAR ucReserved;
+ ATOM_CONNECTOR_LAYOUT_INFO asConnInfo[1];
+}ATOM_BRACKET_LAYOUT_RECORD;
+
/****************************************************************************/
// ASIC voltage data table
/****************************************************************************/
@@ -4524,8 +4574,9 @@ typedef struct _ATOM_VOLTAGE_OBJECT_HEADER_V3{
#define VOLTAGE_OBJ_VR_I2C_INIT_SEQ 3 //VOLTAGE REGULATOR INIT sequece through I2C -> ATOM_I2C_VOLTAGE_OBJECT_V3
#define VOLTAGE_OBJ_PHASE_LUT 4 //Set Vregulator Phase lookup table ->ATOM_GPIO_VOLTAGE_OBJECT_V3
#define VOLTAGE_OBJ_SVID2 7 //Indicate voltage control by SVID2 ->ATOM_SVID2_VOLTAGE_OBJECT_V3
-#define VOLTAGE_OBJ_PWRBOOST_LEAKAGE_LUT 0x10 //Powerboost Voltage and LeakageId lookup table->ATOM_LEAKAGE_VOLTAGE_OBJECT_V3
-#define VOLTAGE_OBJ_HIGH_STATE_LEAKAGE_LUT 0x11 //High voltage state Voltage and LeakageId lookup table->ATOM_LEAKAGE_VOLTAGE_OBJECT_V3
+#define VOLTAGE_OBJ_EVV 8
+#define VOLTAGE_OBJ_PWRBOOST_LEAKAGE_LUT 0x10 //Powerboost Voltage and LeakageId lookup table->ATOM_LEAKAGE_VOLTAGE_OBJECT_V3
+#define VOLTAGE_OBJ_HIGH_STATE_LEAKAGE_LUT 0x11 //High voltage state Voltage and LeakageId lookup table->ATOM_LEAKAGE_VOLTAGE_OBJECT_V3
#define VOLTAGE_OBJ_HIGH1_STATE_LEAKAGE_LUT 0x12 //High1 voltage state Voltage and LeakageId lookup table->ATOM_LEAKAGE_VOLTAGE_OBJECT_V3
typedef struct _VOLTAGE_LUT_ENTRY_V2
@@ -4552,6 +4603,10 @@ typedef struct _ATOM_I2C_VOLTAGE_OBJECT_V3
VOLTAGE_LUT_ENTRY asVolI2cLut[1]; // end with 0xff
}ATOM_I2C_VOLTAGE_OBJECT_V3;
+// ATOM_I2C_VOLTAGE_OBJECT_V3.ucVoltageControlFlag
+#define VOLTAGE_DATA_ONE_BYTE 0
+#define VOLTAGE_DATA_TWO_BYTE 1
+
typedef struct _ATOM_GPIO_VOLTAGE_OBJECT_V3
{
ATOM_VOLTAGE_OBJECT_HEADER_V3 sHeader; // voltage mode = VOLTAGE_OBJ_GPIO_LUT or VOLTAGE_OBJ_PHASE_LUT
@@ -4584,7 +4639,8 @@ typedef struct _ATOM_SVID2_VOLTAGE_OBJECT_V3
// 1:0 – offset trim,
USHORT usLoadLine_PSI;
// GPU GPIO pin Id to SVID2 regulator VRHot pin. possible value 0~31. 0 means GPIO0, 31 means GPIO31
- UCHAR ucReserved[2];
+ UCHAR ucSVDGpioId; //0~31 indicate GPIO0~31
+ UCHAR ucSVCGpioId; //0~31 indicate GPIO0~31
ULONG ulReserved;
}ATOM_SVID2_VOLTAGE_OBJECT_V3;
@@ -4637,6 +4693,49 @@ typedef struct _ATOM_ASIC_PROFILING_INFO_V2_1
USHORT usElbVDDCI_LevelArrayOffset; // offset of 2 dimension voltage level USHORT array
}ATOM_ASIC_PROFILING_INFO_V2_1;
+typedef struct _ATOM_ASIC_PROFILING_INFO_V3_1
+{
+ ATOM_COMMON_TABLE_HEADER asHeader;
+ ULONG ulEvvDerateTdp;
+ ULONG ulEvvDerateTdc;
+ ULONG ulBoardCoreTemp;
+ ULONG ulMaxVddc;
+ ULONG ulMinVddc;
+ ULONG ulLoadLineSlop;
+ ULONG ulLeakageTemp;
+ ULONG ulLeakageVoltage;
+ ULONG ulCACmEncodeRange;
+ ULONG ulCACmEncodeAverage;
+ ULONG ulCACbEncodeRange;
+ ULONG ulCACbEncodeAverage;
+ ULONG ulKt_bEncodeRange;
+ ULONG ulKt_bEncodeAverage;
+ ULONG ulKv_mEncodeRange;
+ ULONG ulKv_mEncodeAverage;
+ ULONG ulKv_bEncodeRange;
+ ULONG ulKv_bEncodeAverage;
+ ULONG ulLkgEncodeLn_MaxDivMin;
+ ULONG ulLkgEncodeMin;
+ ULONG ulEfuseLogisticAlpha;
+ USHORT usPowerDpm0;
+ USHORT usCurrentDpm0;
+ USHORT usPowerDpm1;
+ USHORT usCurrentDpm1;
+ USHORT usPowerDpm2;
+ USHORT usCurrentDpm2;
+ USHORT usPowerDpm3;
+ USHORT usCurrentDpm3;
+ USHORT usPowerDpm4;
+ USHORT usCurrentDpm4;
+ USHORT usPowerDpm5;
+ USHORT usCurrentDpm5;
+ USHORT usPowerDpm6;
+ USHORT usCurrentDpm6;
+ USHORT usPowerDpm7;
+ USHORT usCurrentDpm7;
+}ATOM_ASIC_PROFILING_INFO_V3_1;
+
+
typedef struct _ATOM_POWER_SOURCE_OBJECT
{
UCHAR ucPwrSrcId; // Power source
@@ -5808,6 +5907,8 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO_V3
#define ATOM_S7_DOS_MODE_PIXEL_DEPTHb0 0x0C
#define ATOM_S7_DOS_MODE_PIXEL_FORMATb0 0xF0
#define ATOM_S7_DOS_8BIT_DAC_ENb1 0x01
+#define ATOM_S7_ASIC_INIT_COMPLETEb1 0x02
+#define ATOM_S7_ASIC_INIT_COMPLETE_MASK 0x00000200
#define ATOM_S7_DOS_MODE_NUMBERw1 0x0FFFF
#define ATOM_S7_DOS_8BIT_DAC_EN_SHIFT 8
@@ -6242,6 +6343,7 @@ typedef struct _ATOM_MC_INIT_PARAM_TABLE
#define _128Mx32 0x53
#define _256Mx8 0x61
#define _256Mx16 0x62
+#define _512Mx8 0x71
#define SAMSUNG 0x1
#define INFINEON 0x2
@@ -6987,9 +7089,10 @@ typedef struct _ATOM_DISP_OUT_INFO_V3
UCHAR ucMaxDispEngineNum;
UCHAR ucMaxActiveDispEngineNum;
UCHAR ucMaxPPLLNum;
- UCHAR ucCoreRefClkSource; // value of CORE_REF_CLK_SOURCE
- UCHAR ucReserved[3];
- ASIC_TRANSMITTER_INFO_V2 asTransmitterInfo[1]; // for alligment only
+ UCHAR ucCoreRefClkSource; // value of CORE_REF_CLK_SOURCE
+ UCHAR ucDispCaps;
+ UCHAR ucReserved[2];
+ ASIC_TRANSMITTER_INFO_V2 asTransmitterInfo[1]; // for alligment only
}ATOM_DISP_OUT_INFO_V3;
//ucDispCaps
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index bf87f6d435f8..80a20120e625 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -1753,7 +1753,7 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
if (pll != ATOM_PPLL_INVALID)
return pll;
}
- } else {
+ } else if (!ASIC_IS_DCE41(rdev)) { /* Don't share PLLs on DCE4.1 chips */
/* use the same PPLL for all monitors with the same clock */
pll = radeon_get_shared_nondp_ppll(crtc);
if (pll != ATOM_PPLL_INVALID)
@@ -1910,6 +1910,21 @@ static void atombios_crtc_disable(struct drm_crtc *crtc)
int i;
atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+ if (crtc->fb) {
+ int r;
+ struct radeon_framebuffer *radeon_fb;
+ struct radeon_bo *rbo;
+
+ radeon_fb = to_radeon_framebuffer(crtc->fb);
+ rbo = gem_to_radeon_bo(radeon_fb->obj);
+ r = radeon_bo_reserve(rbo, false);
+ if (unlikely(r))
+ DRM_ERROR("failed to reserve rbo before unpin\n");
+ else {
+ radeon_bo_unpin(rbo);
+ radeon_bo_unreserve(rbo);
+ }
+ }
/* disable the GRPH */
if (ASIC_IS_DCE4(rdev))
WREG32(EVERGREEN_GRPH_ENABLE + radeon_crtc->crtc_offset, 0);
@@ -1940,7 +1955,9 @@ static void atombios_crtc_disable(struct drm_crtc *crtc)
break;
case ATOM_PPLL0:
/* disable the ppll */
- if ((rdev->family == CHIP_ARUBA) || (rdev->family == CHIP_BONAIRE))
+ if ((rdev->family == CHIP_ARUBA) ||
+ (rdev->family == CHIP_BONAIRE) ||
+ (rdev->family == CHIP_HAWAII))
atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id,
0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
break;
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 00885417ffff..fb3ae07a1469 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -690,8 +690,7 @@ static int radeon_dp_link_train_init(struct radeon_dp_link_train_info *dp_info)
/* set the lane count on the sink */
tmp = dp_info->dp_lane_count;
- if (dp_info->dpcd[DP_DPCD_REV] >= 0x11 &&
- dp_info->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)
+ if (drm_dp_enhanced_frame_cap(dp_info->dpcd))
tmp |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LANE_COUNT_SET, tmp);
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 5e891b226acf..a42d61571f49 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -213,7 +213,7 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
props.type = BACKLIGHT_RAW;
snprintf(bl_name, sizeof(bl_name),
"radeon_bl%d", dev->primary->index);
- bd = backlight_device_register(bl_name, &drm_connector->kdev,
+ bd = backlight_device_register(bl_name, drm_connector->kdev,
pdata, &radeon_atom_backlight_ops, &props);
if (IS_ERR(bd)) {
DRM_ERROR("Backlight registration failed\n");
@@ -1662,19 +1662,11 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0);
/* enable the transmitter */
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
} else {
/* setup and enable the encoder and transmitter */
atombios_dig_encoder_setup(encoder, ATOM_ENABLE, 0);
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0);
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
- /* some dce3.x boards have a bug in their transmitter control table.
- * ACTION_ENABLE_OUTPUT can probably be dropped since ACTION_ENABLE
- * does the same thing and more.
- */
- if ((rdev->family != CHIP_RV710) && (rdev->family != CHIP_RV730) &&
- (rdev->family != CHIP_RS780) && (rdev->family != CHIP_RS880))
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
}
if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
@@ -1692,16 +1684,11 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
- if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) {
- /* disable the transmitter */
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
- } else if (ASIC_IS_DCE4(rdev)) {
+ if (ASIC_IS_DCE4(rdev)) {
/* disable the transmitter */
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
} else {
/* disable the encoder and transmitter */
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0);
}
@@ -2410,6 +2397,15 @@ static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
/* this is needed for the pll/ss setup to work correctly in some cases */
atombios_set_encoder_crtc_source(encoder);
+ /* set up the FMT blocks */
+ if (ASIC_IS_DCE8(rdev))
+ dce8_program_fmt(encoder);
+ else if (ASIC_IS_DCE4(rdev))
+ dce4_program_fmt(encoder);
+ else if (ASIC_IS_DCE3(rdev))
+ dce3_program_fmt(encoder);
+ else if (ASIC_IS_AVIVO(rdev))
+ avivo_program_fmt(encoder);
}
static void radeon_atom_encoder_commit(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index 51e947a97edf..1ed479976358 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -40,6 +40,20 @@
#define VOLTAGE_VID_OFFSET_SCALE1 625
#define VOLTAGE_VID_OFFSET_SCALE2 100
+static const struct ci_pt_defaults defaults_hawaii_xt =
+{
+ 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000,
+ { 0x84, 0x0, 0x0, 0x7F, 0x0, 0x0, 0x5A, 0x60, 0x51, 0x8E, 0x79, 0x6B, 0x5F, 0x90, 0x79 },
+ { 0x1EA, 0x1EA, 0x1EA, 0x224, 0x224, 0x224, 0x24F, 0x24F, 0x24F, 0x28E, 0x28E, 0x28E, 0x2BC, 0x2BC, 0x2BC }
+};
+
+static const struct ci_pt_defaults defaults_hawaii_pro =
+{
+ 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0x65062,
+ { 0x93, 0x0, 0x0, 0x97, 0x0, 0x0, 0x6B, 0x60, 0x51, 0x95, 0x79, 0x6B, 0x5F, 0x90, 0x79 },
+ { 0x1EA, 0x1EA, 0x1EA, 0x224, 0x224, 0x224, 0x24F, 0x24F, 0x24F, 0x28E, 0x28E, 0x28E, 0x2BC, 0x2BC, 0x2BC }
+};
+
static const struct ci_pt_defaults defaults_bonaire_xt =
{
1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
@@ -187,22 +201,38 @@ static void ci_initialize_powertune_defaults(struct radeon_device *rdev)
struct ci_power_info *pi = ci_get_pi(rdev);
switch (rdev->pdev->device) {
- case 0x6650:
- case 0x6658:
- case 0x665C:
- default:
+ case 0x6650:
+ case 0x6658:
+ case 0x665C:
+ default:
pi->powertune_defaults = &defaults_bonaire_xt;
break;
- case 0x6651:
- case 0x665D:
+ case 0x6651:
+ case 0x665D:
pi->powertune_defaults = &defaults_bonaire_pro;
break;
- case 0x6640:
+ case 0x6640:
pi->powertune_defaults = &defaults_saturn_xt;
break;
- case 0x6641:
+ case 0x6641:
pi->powertune_defaults = &defaults_saturn_pro;
break;
+ case 0x67B8:
+ case 0x67B0:
+ case 0x67A0:
+ case 0x67A1:
+ case 0x67A2:
+ case 0x67A8:
+ case 0x67A9:
+ case 0x67AA:
+ case 0x67B9:
+ case 0x67BE:
+ pi->powertune_defaults = &defaults_hawaii_xt;
+ break;
+ case 0x67BA:
+ case 0x67B1:
+ pi->powertune_defaults = &defaults_hawaii_pro;
+ break;
}
pi->dte_tj_offset = 0;
@@ -5142,9 +5172,15 @@ int ci_dpm_init(struct radeon_device *rdev)
rdev->pm.dpm.dyn_state.valid_mclk_values.count = 0;
rdev->pm.dpm.dyn_state.valid_mclk_values.values = NULL;
- pi->thermal_temp_setting.temperature_low = 99500;
- pi->thermal_temp_setting.temperature_high = 100000;
- pi->thermal_temp_setting.temperature_shutdown = 104000;
+ if (rdev->family == CHIP_HAWAII) {
+ pi->thermal_temp_setting.temperature_low = 94500;
+ pi->thermal_temp_setting.temperature_high = 95000;
+ pi->thermal_temp_setting.temperature_shutdown = 104000;
+ } else {
+ pi->thermal_temp_setting.temperature_low = 99500;
+ pi->thermal_temp_setting.temperature_high = 100000;
+ pi->thermal_temp_setting.temperature_shutdown = 104000;
+ }
pi->uvd_enabled = false;
diff --git a/drivers/gpu/drm/radeon/ci_smc.c b/drivers/gpu/drm/radeon/ci_smc.c
index 252e10a41cf5..9c745dd22438 100644
--- a/drivers/gpu/drm/radeon/ci_smc.c
+++ b/drivers/gpu/drm/radeon/ci_smc.c
@@ -217,6 +217,10 @@ int ci_load_smc_ucode(struct radeon_device *rdev, u32 limit)
ucode_start_address = BONAIRE_SMC_UCODE_START;
ucode_size = BONAIRE_SMC_UCODE_SIZE;
break;
+ case CHIP_HAWAII:
+ ucode_start_address = HAWAII_SMC_UCODE_START;
+ ucode_size = HAWAII_SMC_UCODE_SIZE;
+ break;
default:
DRM_ERROR("unknown asic in smc ucode loader\n");
BUG();
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 9cd2bc989ac7..ae92aa041c6a 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -41,6 +41,14 @@ MODULE_FIRMWARE("radeon/BONAIRE_mc.bin");
MODULE_FIRMWARE("radeon/BONAIRE_rlc.bin");
MODULE_FIRMWARE("radeon/BONAIRE_sdma.bin");
MODULE_FIRMWARE("radeon/BONAIRE_smc.bin");
+MODULE_FIRMWARE("radeon/HAWAII_pfp.bin");
+MODULE_FIRMWARE("radeon/HAWAII_me.bin");
+MODULE_FIRMWARE("radeon/HAWAII_ce.bin");
+MODULE_FIRMWARE("radeon/HAWAII_mec.bin");
+MODULE_FIRMWARE("radeon/HAWAII_mc.bin");
+MODULE_FIRMWARE("radeon/HAWAII_rlc.bin");
+MODULE_FIRMWARE("radeon/HAWAII_sdma.bin");
+MODULE_FIRMWARE("radeon/HAWAII_smc.bin");
MODULE_FIRMWARE("radeon/KAVERI_pfp.bin");
MODULE_FIRMWARE("radeon/KAVERI_me.bin");
MODULE_FIRMWARE("radeon/KAVERI_ce.bin");
@@ -67,11 +75,6 @@ extern void si_init_uvd_internal_cg(struct radeon_device *rdev);
extern int cik_sdma_resume(struct radeon_device *rdev);
extern void cik_sdma_enable(struct radeon_device *rdev, bool enable);
extern void cik_sdma_fini(struct radeon_device *rdev);
-extern void cik_sdma_vm_set_page(struct radeon_device *rdev,
- struct radeon_ib *ib,
- uint64_t pe,
- uint64_t addr, unsigned count,
- uint32_t incr, uint32_t flags);
static void cik_rlc_stop(struct radeon_device *rdev);
static void cik_pcie_gen3_enable(struct radeon_device *rdev);
static void cik_program_aspm(struct radeon_device *rdev);
@@ -1302,6 +1305,171 @@ static const u32 kalindi_mgcg_cgcg_init[] =
0xd80c, 0xff000ff0, 0x00000100
};
+static const u32 hawaii_golden_spm_registers[] =
+{
+ 0x30800, 0xe0ffffff, 0xe0000000
+};
+
+static const u32 hawaii_golden_common_registers[] =
+{
+ 0x30800, 0xffffffff, 0xe0000000,
+ 0x28350, 0xffffffff, 0x3a00161a,
+ 0x28354, 0xffffffff, 0x0000002e,
+ 0x9a10, 0xffffffff, 0x00018208,
+ 0x98f8, 0xffffffff, 0x12011003
+};
+
+static const u32 hawaii_golden_registers[] =
+{
+ 0x3354, 0x00000333, 0x00000333,
+ 0x9a10, 0x00010000, 0x00058208,
+ 0x9830, 0xffffffff, 0x00000000,
+ 0x9834, 0xf00fffff, 0x00000400,
+ 0x9838, 0x0002021c, 0x00020200,
+ 0xc78, 0x00000080, 0x00000000,
+ 0x5bb0, 0x000000f0, 0x00000070,
+ 0x5bc0, 0xf0311fff, 0x80300000,
+ 0x350c, 0x00810000, 0x408af000,
+ 0x7030, 0x31000111, 0x00000011,
+ 0x2f48, 0x73773777, 0x12010001,
+ 0x2120, 0x0000007f, 0x0000001b,
+ 0x21dc, 0x00007fb6, 0x00002191,
+ 0x3628, 0x0000003f, 0x0000000a,
+ 0x362c, 0x0000003f, 0x0000000a,
+ 0x2ae4, 0x00073ffe, 0x000022a2,
+ 0x240c, 0x000007ff, 0x00000000,
+ 0x8bf0, 0x00002001, 0x00000001,
+ 0x8b24, 0xffffffff, 0x00ffffff,
+ 0x30a04, 0x0000ff0f, 0x00000000,
+ 0x28a4c, 0x07ffffff, 0x06000000,
+ 0x3e78, 0x00000001, 0x00000002,
+ 0xc768, 0x00000008, 0x00000008,
+ 0xc770, 0x00000f00, 0x00000800,
+ 0xc774, 0x00000f00, 0x00000800,
+ 0xc798, 0x00ffffff, 0x00ff7fbf,
+ 0xc79c, 0x00ffffff, 0x00ff7faf,
+ 0x8c00, 0x000000ff, 0x00000800,
+ 0xe40, 0x00001fff, 0x00001fff,
+ 0x9060, 0x0000007f, 0x00000020,
+ 0x9508, 0x00010000, 0x00010000,
+ 0xae00, 0x00100000, 0x000ff07c,
+ 0xac14, 0x000003ff, 0x0000000f,
+ 0xac10, 0xffffffff, 0x7564fdec,
+ 0xac0c, 0xffffffff, 0x3120b9a8,
+ 0xac08, 0x20000000, 0x0f9c0000
+};
+
+static const u32 hawaii_mgcg_cgcg_init[] =
+{
+ 0xc420, 0xffffffff, 0xfffffffd,
+ 0x30800, 0xffffffff, 0xe0000000,
+ 0x3c2a0, 0xffffffff, 0x00000100,
+ 0x3c208, 0xffffffff, 0x00000100,
+ 0x3c2c0, 0xffffffff, 0x00000100,
+ 0x3c2c8, 0xffffffff, 0x00000100,
+ 0x3c2c4, 0xffffffff, 0x00000100,
+ 0x55e4, 0xffffffff, 0x00200100,
+ 0x3c280, 0xffffffff, 0x00000100,
+ 0x3c214, 0xffffffff, 0x06000100,
+ 0x3c220, 0xffffffff, 0x00000100,
+ 0x3c218, 0xffffffff, 0x06000100,
+ 0x3c204, 0xffffffff, 0x00000100,
+ 0x3c2e0, 0xffffffff, 0x00000100,
+ 0x3c224, 0xffffffff, 0x00000100,
+ 0x3c200, 0xffffffff, 0x00000100,
+ 0x3c230, 0xffffffff, 0x00000100,
+ 0x3c234, 0xffffffff, 0x00000100,
+ 0x3c250, 0xffffffff, 0x00000100,
+ 0x3c254, 0xffffffff, 0x00000100,
+ 0x3c258, 0xffffffff, 0x00000100,
+ 0x3c25c, 0xffffffff, 0x00000100,
+ 0x3c260, 0xffffffff, 0x00000100,
+ 0x3c27c, 0xffffffff, 0x00000100,
+ 0x3c278, 0xffffffff, 0x00000100,
+ 0x3c210, 0xffffffff, 0x06000100,
+ 0x3c290, 0xffffffff, 0x00000100,
+ 0x3c274, 0xffffffff, 0x00000100,
+ 0x3c2b4, 0xffffffff, 0x00000100,
+ 0x3c2b0, 0xffffffff, 0x00000100,
+ 0x3c270, 0xffffffff, 0x00000100,
+ 0x30800, 0xffffffff, 0xe0000000,
+ 0x3c020, 0xffffffff, 0x00010000,
+ 0x3c024, 0xffffffff, 0x00030002,
+ 0x3c028, 0xffffffff, 0x00040007,
+ 0x3c02c, 0xffffffff, 0x00060005,
+ 0x3c030, 0xffffffff, 0x00090008,
+ 0x3c034, 0xffffffff, 0x00010000,
+ 0x3c038, 0xffffffff, 0x00030002,
+ 0x3c03c, 0xffffffff, 0x00040007,
+ 0x3c040, 0xffffffff, 0x00060005,
+ 0x3c044, 0xffffffff, 0x00090008,
+ 0x3c048, 0xffffffff, 0x00010000,
+ 0x3c04c, 0xffffffff, 0x00030002,
+ 0x3c050, 0xffffffff, 0x00040007,
+ 0x3c054, 0xffffffff, 0x00060005,
+ 0x3c058, 0xffffffff, 0x00090008,
+ 0x3c05c, 0xffffffff, 0x00010000,
+ 0x3c060, 0xffffffff, 0x00030002,
+ 0x3c064, 0xffffffff, 0x00040007,
+ 0x3c068, 0xffffffff, 0x00060005,
+ 0x3c06c, 0xffffffff, 0x00090008,
+ 0x3c070, 0xffffffff, 0x00010000,
+ 0x3c074, 0xffffffff, 0x00030002,
+ 0x3c078, 0xffffffff, 0x00040007,
+ 0x3c07c, 0xffffffff, 0x00060005,
+ 0x3c080, 0xffffffff, 0x00090008,
+ 0x3c084, 0xffffffff, 0x00010000,
+ 0x3c088, 0xffffffff, 0x00030002,
+ 0x3c08c, 0xffffffff, 0x00040007,
+ 0x3c090, 0xffffffff, 0x00060005,
+ 0x3c094, 0xffffffff, 0x00090008,
+ 0x3c098, 0xffffffff, 0x00010000,
+ 0x3c09c, 0xffffffff, 0x00030002,
+ 0x3c0a0, 0xffffffff, 0x00040007,
+ 0x3c0a4, 0xffffffff, 0x00060005,
+ 0x3c0a8, 0xffffffff, 0x00090008,
+ 0x3c0ac, 0xffffffff, 0x00010000,
+ 0x3c0b0, 0xffffffff, 0x00030002,
+ 0x3c0b4, 0xffffffff, 0x00040007,
+ 0x3c0b8, 0xffffffff, 0x00060005,
+ 0x3c0bc, 0xffffffff, 0x00090008,
+ 0x3c0c0, 0xffffffff, 0x00010000,
+ 0x3c0c4, 0xffffffff, 0x00030002,
+ 0x3c0c8, 0xffffffff, 0x00040007,
+ 0x3c0cc, 0xffffffff, 0x00060005,
+ 0x3c0d0, 0xffffffff, 0x00090008,
+ 0x3c0d4, 0xffffffff, 0x00010000,
+ 0x3c0d8, 0xffffffff, 0x00030002,
+ 0x3c0dc, 0xffffffff, 0x00040007,
+ 0x3c0e0, 0xffffffff, 0x00060005,
+ 0x3c0e4, 0xffffffff, 0x00090008,
+ 0x3c0e8, 0xffffffff, 0x00010000,
+ 0x3c0ec, 0xffffffff, 0x00030002,
+ 0x3c0f0, 0xffffffff, 0x00040007,
+ 0x3c0f4, 0xffffffff, 0x00060005,
+ 0x3c0f8, 0xffffffff, 0x00090008,
+ 0xc318, 0xffffffff, 0x00020200,
+ 0x3350, 0xffffffff, 0x00000200,
+ 0x15c0, 0xffffffff, 0x00000400,
+ 0x55e8, 0xffffffff, 0x00000000,
+ 0x2f50, 0xffffffff, 0x00000902,
+ 0x3c000, 0xffffffff, 0x96940200,
+ 0x8708, 0xffffffff, 0x00900100,
+ 0xc424, 0xffffffff, 0x0020003f,
+ 0x38, 0xffffffff, 0x0140001c,
+ 0x3c, 0x000f0000, 0x000f0000,
+ 0x220, 0xffffffff, 0xc060000c,
+ 0x224, 0xc0000fff, 0x00000100,
+ 0xf90, 0xffffffff, 0x00000100,
+ 0xf98, 0x00000101, 0x00000000,
+ 0x20a8, 0xffffffff, 0x00000104,
+ 0x55e4, 0xff000fff, 0x00000100,
+ 0x30cc, 0xc0000fff, 0x00000104,
+ 0xc1e4, 0x00000001, 0x00000001,
+ 0xd00c, 0xff000ff0, 0x00000100,
+ 0xd80c, 0xff000ff0, 0x00000100
+};
+
static void cik_init_golden_registers(struct radeon_device *rdev)
{
switch (rdev->family) {
@@ -1347,6 +1515,20 @@ static void cik_init_golden_registers(struct radeon_device *rdev)
spectre_golden_spm_registers,
(const u32)ARRAY_SIZE(spectre_golden_spm_registers));
break;
+ case CHIP_HAWAII:
+ radeon_program_register_sequence(rdev,
+ hawaii_mgcg_cgcg_init,
+ (const u32)ARRAY_SIZE(hawaii_mgcg_cgcg_init));
+ radeon_program_register_sequence(rdev,
+ hawaii_golden_registers,
+ (const u32)ARRAY_SIZE(hawaii_golden_registers));
+ radeon_program_register_sequence(rdev,
+ hawaii_golden_common_registers,
+ (const u32)ARRAY_SIZE(hawaii_golden_common_registers));
+ radeon_program_register_sequence(rdev,
+ hawaii_golden_spm_registers,
+ (const u32)ARRAY_SIZE(hawaii_golden_spm_registers));
+ break;
default:
break;
}
@@ -1454,6 +1636,35 @@ static const u32 bonaire_io_mc_regs[BONAIRE_IO_MC_REGS_SIZE][2] =
{0x0000009f, 0x00b48000}
};
+#define HAWAII_IO_MC_REGS_SIZE 22
+
+static const u32 hawaii_io_mc_regs[HAWAII_IO_MC_REGS_SIZE][2] =
+{
+ {0x0000007d, 0x40000000},
+ {0x0000007e, 0x40180304},
+ {0x0000007f, 0x0000ff00},
+ {0x00000081, 0x00000000},
+ {0x00000083, 0x00000800},
+ {0x00000086, 0x00000000},
+ {0x00000087, 0x00000100},
+ {0x00000088, 0x00020100},
+ {0x00000089, 0x00000000},
+ {0x0000008b, 0x00040000},
+ {0x0000008c, 0x00000100},
+ {0x0000008e, 0xff010000},
+ {0x00000090, 0xffffefff},
+ {0x00000091, 0xfff3efff},
+ {0x00000092, 0xfff3efbf},
+ {0x00000093, 0xf7ffffff},
+ {0x00000094, 0xffffff7f},
+ {0x00000095, 0x00000fff},
+ {0x00000096, 0x00116fff},
+ {0x00000097, 0x60010000},
+ {0x00000098, 0x10010000},
+ {0x0000009f, 0x00c79000}
+};
+
+
/**
* cik_srbm_select - select specific register instances
*
@@ -1498,11 +1709,17 @@ static int ci_mc_load_microcode(struct radeon_device *rdev)
switch (rdev->family) {
case CHIP_BONAIRE:
- default:
io_mc_regs = (u32 *)&bonaire_io_mc_regs;
ucode_size = CIK_MC_UCODE_SIZE;
regs_size = BONAIRE_IO_MC_REGS_SIZE;
break;
+ case CHIP_HAWAII:
+ io_mc_regs = (u32 *)&hawaii_io_mc_regs;
+ ucode_size = HAWAII_MC_UCODE_SIZE;
+ regs_size = HAWAII_IO_MC_REGS_SIZE;
+ break;
+ default:
+ return -EINVAL;
}
running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
@@ -1564,8 +1781,8 @@ static int cik_init_microcode(struct radeon_device *rdev)
{
const char *chip_name;
size_t pfp_req_size, me_req_size, ce_req_size,
- mec_req_size, rlc_req_size, mc_req_size,
- sdma_req_size, smc_req_size;
+ mec_req_size, rlc_req_size, mc_req_size = 0,
+ sdma_req_size, smc_req_size = 0;
char fw_name[30];
int err;
@@ -1583,6 +1800,17 @@ static int cik_init_microcode(struct radeon_device *rdev)
sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
smc_req_size = ALIGN(BONAIRE_SMC_UCODE_SIZE, 4);
break;
+ case CHIP_HAWAII:
+ chip_name = "HAWAII";
+ pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
+ me_req_size = CIK_ME_UCODE_SIZE * 4;
+ ce_req_size = CIK_CE_UCODE_SIZE * 4;
+ mec_req_size = CIK_MEC_UCODE_SIZE * 4;
+ rlc_req_size = BONAIRE_RLC_UCODE_SIZE * 4;
+ mc_req_size = HAWAII_MC_UCODE_SIZE * 4;
+ sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
+ smc_req_size = ALIGN(HAWAII_SMC_UCODE_SIZE, 4);
+ break;
case CHIP_KAVERI:
chip_name = "KAVERI";
pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
@@ -1763,9 +1991,227 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev)
num_pipe_configs = rdev->config.cik.max_tile_pipes;
if (num_pipe_configs > 8)
- num_pipe_configs = 8; /* ??? */
+ num_pipe_configs = 16;
- if (num_pipe_configs == 8) {
+ if (num_pipe_configs == 16) {
+ for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
+ switch (reg_offset) {
+ case 0:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B));
+ break;
+ case 1:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B));
+ break;
+ case 2:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
+ break;
+ case 3:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B));
+ break;
+ case 4:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ TILE_SPLIT(split_equal_to_row_size));
+ break;
+ case 5:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+ break;
+ case 6:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
+ break;
+ case 7:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ TILE_SPLIT(split_equal_to_row_size));
+ break;
+ case 8:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16));
+ break;
+ case 9:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
+ break;
+ case 10:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ break;
+ case 11:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_8x16) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ break;
+ case 12:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ break;
+ case 13:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
+ break;
+ case 14:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ break;
+ case 16:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_8x16) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ break;
+ case 17:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ break;
+ case 27:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
+ break;
+ case 28:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ break;
+ case 29:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_8x16) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ break;
+ case 30:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ break;
+ default:
+ gb_tile_moden = 0;
+ break;
+ }
+ rdev->config.cik.tile_mode_array[reg_offset] = gb_tile_moden;
+ WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
+ }
+ for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) {
+ switch (reg_offset) {
+ case 0:
+ gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ break;
+ case 1:
+ gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ break;
+ case 2:
+ gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ break;
+ case 3:
+ gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ break;
+ case 4:
+ gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_8_BANK));
+ break;
+ case 5:
+ gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_4_BANK));
+ break;
+ case 6:
+ gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_2_BANK));
+ break;
+ case 8:
+ gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ break;
+ case 9:
+ gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ break;
+ case 10:
+ gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ break;
+ case 11:
+ gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_8_BANK));
+ break;
+ case 12:
+ gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_4_BANK));
+ break;
+ case 13:
+ gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_2_BANK));
+ break;
+ case 14:
+ gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_2_BANK));
+ break;
+ default:
+ gb_tile_moden = 0;
+ break;
+ }
+ WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
+ }
+ } else if (num_pipe_configs == 8) {
for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
switch (reg_offset) {
case 0:
@@ -2650,7 +3096,10 @@ static void cik_setup_rb(struct radeon_device *rdev,
for (j = 0; j < sh_per_se; j++) {
cik_select_se_sh(rdev, i, j);
data = cik_get_rb_disabled(rdev, max_rb_num, se_num, sh_per_se);
- disabled_rbs |= data << ((i * sh_per_se + j) * CIK_RB_BITMAP_WIDTH_PER_SH);
+ if (rdev->family == CHIP_HAWAII)
+ disabled_rbs |= data << ((i * sh_per_se + j) * HAWAII_RB_BITMAP_WIDTH_PER_SH);
+ else
+ disabled_rbs |= data << ((i * sh_per_se + j) * CIK_RB_BITMAP_WIDTH_PER_SH);
}
}
cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
@@ -2667,6 +3116,12 @@ static void cik_setup_rb(struct radeon_device *rdev,
data = 0;
for (j = 0; j < sh_per_se; j++) {
switch (enabled_rbs & 3) {
+ case 0:
+ if (j == 0)
+ data |= PKR_MAP(RASTER_CONFIG_RB_MAP_3);
+ else
+ data |= PKR_MAP(RASTER_CONFIG_RB_MAP_0);
+ break;
case 1:
data |= (RASTER_CONFIG_RB_MAP_0 << (i * sh_per_se + j) * 2);
break;
@@ -2719,6 +3174,23 @@ static void cik_gpu_init(struct radeon_device *rdev)
rdev->config.cik.sc_earlyz_tile_fifo_size = 0x130;
gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
break;
+ case CHIP_HAWAII:
+ rdev->config.cik.max_shader_engines = 4;
+ rdev->config.cik.max_tile_pipes = 16;
+ rdev->config.cik.max_cu_per_sh = 11;
+ rdev->config.cik.max_sh_per_se = 1;
+ rdev->config.cik.max_backends_per_se = 4;
+ rdev->config.cik.max_texture_channel_caches = 16;
+ rdev->config.cik.max_gprs = 256;
+ rdev->config.cik.max_gs_threads = 32;
+ rdev->config.cik.max_hw_contexts = 8;
+
+ rdev->config.cik.sc_prim_fifo_size_frontend = 0x20;
+ rdev->config.cik.sc_prim_fifo_size_backend = 0x100;
+ rdev->config.cik.sc_hiz_tile_fifo_size = 0x30;
+ rdev->config.cik.sc_earlyz_tile_fifo_size = 0x130;
+ gb_addr_config = HAWAII_GB_ADDR_CONFIG_GOLDEN;
+ break;
case CHIP_KAVERI:
rdev->config.cik.max_shader_engines = 1;
rdev->config.cik.max_tile_pipes = 4;
@@ -3097,6 +3569,85 @@ void cik_semaphore_ring_emit(struct radeon_device *rdev,
radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | sel);
}
+/**
+ * cik_copy_cpdma - copy pages using the CP DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @src_offset: src GPU address
+ * @dst_offset: dst GPU address
+ * @num_gpu_pages: number of GPU pages to xfer
+ * @fence: radeon fence object
+ *
+ * Copy GPU paging using the CP DMA engine (CIK+).
+ * Used by the radeon ttm implementation to move pages if
+ * registered as the asic copy callback.
+ */
+int cik_copy_cpdma(struct radeon_device *rdev,
+ uint64_t src_offset, uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct radeon_fence **fence)
+{
+ struct radeon_semaphore *sem = NULL;
+ int ring_index = rdev->asic->copy.blit_ring_index;
+ struct radeon_ring *ring = &rdev->ring[ring_index];
+ u32 size_in_bytes, cur_size_in_bytes, control;
+ int i, num_loops;
+ int r = 0;
+
+ r = radeon_semaphore_create(rdev, &sem);
+ if (r) {
+ DRM_ERROR("radeon: moving bo (%d).\n", r);
+ return r;
+ }
+
+ size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
+ num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff);
+ r = radeon_ring_lock(rdev, ring, num_loops * 7 + 18);
+ if (r) {
+ DRM_ERROR("radeon: moving bo (%d).\n", r);
+ radeon_semaphore_free(rdev, &sem, NULL);
+ return r;
+ }
+
+ if (radeon_fence_need_sync(*fence, ring->idx)) {
+ radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
+ ring->idx);
+ radeon_fence_note_sync(*fence, ring->idx);
+ } else {
+ radeon_semaphore_free(rdev, &sem, NULL);
+ }
+
+ for (i = 0; i < num_loops; i++) {
+ cur_size_in_bytes = size_in_bytes;
+ if (cur_size_in_bytes > 0x1fffff)
+ cur_size_in_bytes = 0x1fffff;
+ size_in_bytes -= cur_size_in_bytes;
+ control = 0;
+ if (size_in_bytes == 0)
+ control |= PACKET3_DMA_DATA_CP_SYNC;
+ radeon_ring_write(ring, PACKET3(PACKET3_DMA_DATA, 5));
+ radeon_ring_write(ring, control);
+ radeon_ring_write(ring, lower_32_bits(src_offset));
+ radeon_ring_write(ring, upper_32_bits(src_offset));
+ radeon_ring_write(ring, lower_32_bits(dst_offset));
+ radeon_ring_write(ring, upper_32_bits(dst_offset));
+ radeon_ring_write(ring, cur_size_in_bytes);
+ src_offset += cur_size_in_bytes;
+ dst_offset += cur_size_in_bytes;
+ }
+
+ r = radeon_fence_emit(rdev, fence, ring->idx);
+ if (r) {
+ radeon_ring_unlock_undo(rdev, ring);
+ return r;
+ }
+
+ radeon_ring_unlock_commit(rdev, ring);
+ radeon_semaphore_free(rdev, &sem, *fence);
+
+ return r;
+}
+
/*
* IB stuff
*/
@@ -3403,7 +3954,8 @@ static int cik_cp_gfx_resume(struct radeon_device *rdev)
int r;
WREG32(CP_SEM_WAIT_TIMER, 0x0);
- WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
+ if (rdev->family != CHIP_HAWAII)
+ WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
/* Set the write pointer delay */
WREG32(CP_RB_WPTR_DELAY, 0);
@@ -4740,12 +5292,17 @@ void cik_vm_fini(struct radeon_device *rdev)
static void cik_vm_decode_fault(struct radeon_device *rdev,
u32 status, u32 addr, u32 mc_client)
{
- u32 mc_id = (status & MEMORY_CLIENT_ID_MASK) >> MEMORY_CLIENT_ID_SHIFT;
+ u32 mc_id;
u32 vmid = (status & FAULT_VMID_MASK) >> FAULT_VMID_SHIFT;
u32 protections = (status & PROTECTIONS_MASK) >> PROTECTIONS_SHIFT;
char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
(mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
+ if (rdev->family == CHIP_HAWAII)
+ mc_id = (status & HAWAII_MEMORY_CLIENT_ID_MASK) >> MEMORY_CLIENT_ID_SHIFT;
+ else
+ mc_id = (status & MEMORY_CLIENT_ID_MASK) >> MEMORY_CLIENT_ID_SHIFT;
+
printk("VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
protections, vmid, addr,
(status & MEMORY_CLIENT_RW_MASK) ? "write" : "read",
@@ -4834,62 +5391,6 @@ void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
}
}
-/**
- * cik_vm_set_page - update the page tables using sDMA
- *
- * @rdev: radeon_device pointer
- * @ib: indirect buffer to fill with commands
- * @pe: addr of the page entry
- * @addr: dst addr to write into pe
- * @count: number of page entries to update
- * @incr: increase next addr by incr bytes
- * @flags: access flags
- *
- * Update the page tables using CP or sDMA (CIK).
- */
-void cik_vm_set_page(struct radeon_device *rdev,
- struct radeon_ib *ib,
- uint64_t pe,
- uint64_t addr, unsigned count,
- uint32_t incr, uint32_t flags)
-{
- uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
- uint64_t value;
- unsigned ndw;
-
- if (rdev->asic->vm.pt_ring_index == RADEON_RING_TYPE_GFX_INDEX) {
- /* CP */
- while (count) {
- ndw = 2 + count * 2;
- if (ndw > 0x3FFE)
- ndw = 0x3FFE;
-
- ib->ptr[ib->length_dw++] = PACKET3(PACKET3_WRITE_DATA, ndw);
- ib->ptr[ib->length_dw++] = (WRITE_DATA_ENGINE_SEL(0) |
- WRITE_DATA_DST_SEL(1));
- ib->ptr[ib->length_dw++] = pe;
- ib->ptr[ib->length_dw++] = upper_32_bits(pe);
- for (; ndw > 2; ndw -= 2, --count, pe += 8) {
- if (flags & RADEON_VM_PAGE_SYSTEM) {
- value = radeon_vm_map_gart(rdev, addr);
- value &= 0xFFFFFFFFFFFFF000ULL;
- } else if (flags & RADEON_VM_PAGE_VALID) {
- value = addr;
- } else {
- value = 0;
- }
- addr += incr;
- value |= r600_flags;
- ib->ptr[ib->length_dw++] = value;
- ib->ptr[ib->length_dw++] = upper_32_bits(value);
- }
- }
- } else {
- /* DMA */
- cik_sdma_vm_set_page(rdev, ib, pe, addr, count, incr, flags);
- }
-}
-
/*
* RLC
* The RLC is a multi-purpose microengine that handles a
@@ -5058,6 +5559,7 @@ static int cik_rlc_resume(struct radeon_device *rdev)
switch (rdev->family) {
case CHIP_BONAIRE:
+ case CHIP_HAWAII:
default:
size = BONAIRE_RLC_UCODE_SIZE;
break;
@@ -5556,7 +6058,7 @@ void cik_init_cp_pg_table(struct radeon_device *rdev)
}
for (i = 0; i < CP_ME_TABLE_SIZE; i ++) {
- dst_ptr[bo_offset + i] = be32_to_cpu(fw_data[table_offset + i]);
+ dst_ptr[bo_offset + i] = cpu_to_le32(be32_to_cpu(fw_data[table_offset + i]));
}
bo_offset += CP_ME_TABLE_SIZE;
}
@@ -5778,52 +6280,57 @@ void cik_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer)
if (buffer == NULL)
return;
- buffer[count++] = PACKET3(PACKET3_PREAMBLE_CNTL, 0);
- buffer[count++] = PACKET3_PREAMBLE_BEGIN_CLEAR_STATE;
+ buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+ buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
- buffer[count++] = PACKET3(PACKET3_CONTEXT_CONTROL, 1);
- buffer[count++] = 0x80000000;
- buffer[count++] = 0x80000000;
+ buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
+ buffer[count++] = cpu_to_le32(0x80000000);
+ buffer[count++] = cpu_to_le32(0x80000000);
for (sect = rdev->rlc.cs_data; sect->section != NULL; ++sect) {
for (ext = sect->section; ext->extent != NULL; ++ext) {
if (sect->id == SECT_CONTEXT) {
- buffer[count++] = PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count);
- buffer[count++] = ext->reg_index - 0xa000;
+ buffer[count++] =
+ cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
+ buffer[count++] = cpu_to_le32(ext->reg_index - 0xa000);
for (i = 0; i < ext->reg_count; i++)
- buffer[count++] = ext->extent[i];
+ buffer[count++] = cpu_to_le32(ext->extent[i]);
} else {
return;
}
}
}
- buffer[count++] = PACKET3(PACKET3_SET_CONTEXT_REG, 2);
- buffer[count++] = PA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START;
+ buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+ buffer[count++] = cpu_to_le32(PA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
switch (rdev->family) {
case CHIP_BONAIRE:
- buffer[count++] = 0x16000012;
- buffer[count++] = 0x00000000;
+ buffer[count++] = cpu_to_le32(0x16000012);
+ buffer[count++] = cpu_to_le32(0x00000000);
break;
case CHIP_KAVERI:
- buffer[count++] = 0x00000000; /* XXX */
- buffer[count++] = 0x00000000;
+ buffer[count++] = cpu_to_le32(0x00000000); /* XXX */
+ buffer[count++] = cpu_to_le32(0x00000000);
break;
case CHIP_KABINI:
- buffer[count++] = 0x00000000; /* XXX */
- buffer[count++] = 0x00000000;
+ buffer[count++] = cpu_to_le32(0x00000000); /* XXX */
+ buffer[count++] = cpu_to_le32(0x00000000);
+ break;
+ case CHIP_HAWAII:
+ buffer[count++] = 0x3a00161a;
+ buffer[count++] = 0x0000002e;
break;
default:
- buffer[count++] = 0x00000000;
- buffer[count++] = 0x00000000;
+ buffer[count++] = cpu_to_le32(0x00000000);
+ buffer[count++] = cpu_to_le32(0x00000000);
break;
}
- buffer[count++] = PACKET3(PACKET3_PREAMBLE_CNTL, 0);
- buffer[count++] = PACKET3_PREAMBLE_END_CLEAR_STATE;
+ buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+ buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
- buffer[count++] = PACKET3(PACKET3_CLEAR_STATE, 0);
- buffer[count++] = 0;
+ buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
+ buffer[count++] = cpu_to_le32(0);
}
static void cik_init_pg(struct radeon_device *rdev)
@@ -7118,7 +7625,7 @@ static int cik_startup(struct radeon_device *rdev)
ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
CP_RB0_RPTR, CP_RB0_WPTR,
- RADEON_CP_PACKET2);
+ PACKET3(PACKET3_NOP, 0x3FFF));
if (r)
return r;
@@ -7428,6 +7935,70 @@ void cik_fini(struct radeon_device *rdev)
rdev->bios = NULL;
}
+void dce8_program_fmt(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+ int bpc = 0;
+ u32 tmp = 0;
+ enum radeon_connector_dither dither = RADEON_FMT_DITHER_DISABLE;
+
+ if (connector) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ bpc = radeon_get_monitor_bpc(connector);
+ dither = radeon_connector->dither;
+ }
+
+ /* LVDS/eDP FMT is set up by atom */
+ if (radeon_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
+ return;
+
+ /* not needed for analog */
+ if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) ||
+ (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2))
+ return;
+
+ if (bpc == 0)
+ return;
+
+ switch (bpc) {
+ case 6:
+ if (dither == RADEON_FMT_DITHER_ENABLE)
+ /* XXX sort out optimal dither settings */
+ tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE |
+ FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH(0));
+ else
+ tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH(0));
+ break;
+ case 8:
+ if (dither == RADEON_FMT_DITHER_ENABLE)
+ /* XXX sort out optimal dither settings */
+ tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE |
+ FMT_RGB_RANDOM_ENABLE |
+ FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH(1));
+ else
+ tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH(1));
+ break;
+ case 10:
+ if (dither == RADEON_FMT_DITHER_ENABLE)
+ /* XXX sort out optimal dither settings */
+ tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE |
+ FMT_RGB_RANDOM_ENABLE |
+ FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH(2));
+ else
+ tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH(2));
+ break;
+ default:
+ /* not needed */
+ break;
+ }
+
+ WREG32(FMT_BIT_DEPTH_CONTROL + radeon_crtc->crtc_offset, tmp);
+}
+
/* display watermark setup */
/**
* dce8_line_buffer_adjust - Set up the line buffer
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
index b6286068e111..9c9529de20ee 100644
--- a/drivers/gpu/drm/radeon/cik_sdma.c
+++ b/drivers/gpu/drm/radeon/cik_sdma.c
@@ -25,6 +25,7 @@
#include <drm/drmP.h>
#include "radeon.h"
#include "radeon_asic.h"
+#include "radeon_trace.h"
#include "cikd.h"
/* sdma */
@@ -101,14 +102,6 @@ void cik_sdma_fence_ring_emit(struct radeon_device *rdev,
{
struct radeon_ring *ring = &rdev->ring[fence->ring];
u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
- u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(1) |
- SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */
- u32 ref_and_mask;
-
- if (fence->ring == R600_RING_TYPE_DMA_INDEX)
- ref_and_mask = SDMA0;
- else
- ref_and_mask = SDMA1;
/* write the fence */
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_FENCE, 0, 0));
@@ -118,12 +111,12 @@ void cik_sdma_fence_ring_emit(struct radeon_device *rdev,
/* generate an interrupt */
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_TRAP, 0, 0));
/* flush HDP */
- radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
- radeon_ring_write(ring, GPU_HDP_FLUSH_DONE);
- radeon_ring_write(ring, GPU_HDP_FLUSH_REQ);
- radeon_ring_write(ring, ref_and_mask); /* REFERENCE */
- radeon_ring_write(ring, ref_and_mask); /* MASK */
- radeon_ring_write(ring, (4 << 16) | 10); /* RETRY_COUNT, POLL_INTERVAL */
+ /* We should be using the new POLL_REG_MEM special op packet here
+ * but it causes sDMA to hang sometimes
+ */
+ radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
+ radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
+ radeon_ring_write(ring, 0);
}
/**
@@ -653,11 +646,12 @@ void cik_sdma_vm_set_page(struct radeon_device *rdev,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags)
{
- uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
uint64_t value;
unsigned ndw;
- if (flags & RADEON_VM_PAGE_SYSTEM) {
+ trace_radeon_vm_set_page(pe, addr, count, incr, flags);
+
+ if (flags & R600_PTE_SYSTEM) {
while (count) {
ndw = count * 2;
if (ndw > 0xFFFFE)
@@ -669,16 +663,10 @@ void cik_sdma_vm_set_page(struct radeon_device *rdev,
ib->ptr[ib->length_dw++] = upper_32_bits(pe);
ib->ptr[ib->length_dw++] = ndw;
for (; ndw > 0; ndw -= 2, --count, pe += 8) {
- if (flags & RADEON_VM_PAGE_SYSTEM) {
- value = radeon_vm_map_gart(rdev, addr);
- value &= 0xFFFFFFFFFFFFF000ULL;
- } else if (flags & RADEON_VM_PAGE_VALID) {
- value = addr;
- } else {
- value = 0;
- }
+ value = radeon_vm_map_gart(rdev, addr);
+ value &= 0xFFFFFFFFFFFFF000ULL;
addr += incr;
- value |= r600_flags;
+ value |= flags;
ib->ptr[ib->length_dw++] = value;
ib->ptr[ib->length_dw++] = upper_32_bits(value);
}
@@ -689,7 +677,7 @@ void cik_sdma_vm_set_page(struct radeon_device *rdev,
if (ndw > 0x7FFFF)
ndw = 0x7FFFF;
- if (flags & RADEON_VM_PAGE_VALID)
+ if (flags & R600_PTE_VALID)
value = addr;
else
value = 0;
@@ -697,7 +685,7 @@ void cik_sdma_vm_set_page(struct radeon_device *rdev,
ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0);
ib->ptr[ib->length_dw++] = pe; /* dst addr */
ib->ptr[ib->length_dw++] = upper_32_bits(pe);
- ib->ptr[ib->length_dw++] = r600_flags; /* mask */
+ ib->ptr[ib->length_dw++] = flags; /* mask */
ib->ptr[ib->length_dw++] = 0;
ib->ptr[ib->length_dw++] = value; /* value */
ib->ptr[ib->length_dw++] = upper_32_bits(value);
@@ -724,18 +712,10 @@ void cik_sdma_vm_set_page(struct radeon_device *rdev,
void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
{
struct radeon_ring *ring = &rdev->ring[ridx];
- u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(1) |
- SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */
- u32 ref_and_mask;
if (vm == NULL)
return;
- if (ridx == R600_RING_TYPE_DMA_INDEX)
- ref_and_mask = SDMA0;
- else
- ref_and_mask = SDMA1;
-
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
if (vm->id < 8) {
radeon_ring_write(ring, (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2);
@@ -770,12 +750,12 @@ void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm
radeon_ring_write(ring, VMID(0));
/* flush HDP */
- radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
- radeon_ring_write(ring, GPU_HDP_FLUSH_DONE);
- radeon_ring_write(ring, GPU_HDP_FLUSH_REQ);
- radeon_ring_write(ring, ref_and_mask); /* REFERENCE */
- radeon_ring_write(ring, ref_and_mask); /* MASK */
- radeon_ring_write(ring, (4 << 16) | 10); /* RETRY_COUNT, POLL_INTERVAL */
+ /* We should be using the new POLL_REG_MEM special op packet here
+ * but it causes sDMA to hang sometimes
+ */
+ radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
+ radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
+ radeon_ring_write(ring, 0);
/* flush TLB */
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h
index 203d2a09a1f5..5964af5e5b2d 100644
--- a/drivers/gpu/drm/radeon/cikd.h
+++ b/drivers/gpu/drm/radeon/cikd.h
@@ -25,8 +25,10 @@
#define CIK_H
#define BONAIRE_GB_ADDR_CONFIG_GOLDEN 0x12010001
+#define HAWAII_GB_ADDR_CONFIG_GOLDEN 0x12011003
-#define CIK_RB_BITMAP_WIDTH_PER_SH 2
+#define CIK_RB_BITMAP_WIDTH_PER_SH 2
+#define HAWAII_RB_BITMAP_WIDTH_PER_SH 4
/* DIDT IND registers */
#define DIDT_SQ_CTRL0 0x0
@@ -499,6 +501,7 @@
* bit 4: write
*/
#define MEMORY_CLIENT_ID_MASK (0xff << 12)
+#define HAWAII_MEMORY_CLIENT_ID_MASK (0x1ff << 12)
#define MEMORY_CLIENT_ID_SHIFT 12
#define MEMORY_CLIENT_RW_MASK (1 << 24)
#define MEMORY_CLIENT_RW_SHIFT 24
@@ -906,6 +909,39 @@
#define DPG_PIPE_STUTTER_CONTROL 0x6cd4
# define STUTTER_ENABLE (1 << 0)
+/* DCE8 FMT blocks */
+#define FMT_DYNAMIC_EXP_CNTL 0x6fb4
+# define FMT_DYNAMIC_EXP_EN (1 << 0)
+# define FMT_DYNAMIC_EXP_MODE (1 << 4)
+ /* 0 = 10bit -> 12bit, 1 = 8bit -> 12bit */
+#define FMT_CONTROL 0x6fb8
+# define FMT_PIXEL_ENCODING (1 << 16)
+ /* 0 = RGB 4:4:4 or YCbCr 4:4:4, 1 = YCbCr 4:2:2 */
+#define FMT_BIT_DEPTH_CONTROL 0x6fc8
+# define FMT_TRUNCATE_EN (1 << 0)
+# define FMT_TRUNCATE_MODE (1 << 1)
+# define FMT_TRUNCATE_DEPTH(x) ((x) << 4) /* 0 - 18bpp, 1 - 24bpp, 2 - 30bpp */
+# define FMT_SPATIAL_DITHER_EN (1 << 8)
+# define FMT_SPATIAL_DITHER_MODE(x) ((x) << 9)
+# define FMT_SPATIAL_DITHER_DEPTH(x) ((x) << 11) /* 0 - 18bpp, 1 - 24bpp, 2 - 30bpp */
+# define FMT_FRAME_RANDOM_ENABLE (1 << 13)
+# define FMT_RGB_RANDOM_ENABLE (1 << 14)
+# define FMT_HIGHPASS_RANDOM_ENABLE (1 << 15)
+# define FMT_TEMPORAL_DITHER_EN (1 << 16)
+# define FMT_TEMPORAL_DITHER_DEPTH(x) ((x) << 17) /* 0 - 18bpp, 1 - 24bpp, 2 - 30bpp */
+# define FMT_TEMPORAL_DITHER_OFFSET(x) ((x) << 21)
+# define FMT_TEMPORAL_LEVEL (1 << 24)
+# define FMT_TEMPORAL_DITHER_RESET (1 << 25)
+# define FMT_25FRC_SEL(x) ((x) << 26)
+# define FMT_50FRC_SEL(x) ((x) << 28)
+# define FMT_75FRC_SEL(x) ((x) << 30)
+#define FMT_CLAMP_CONTROL 0x6fe4
+# define FMT_CLAMP_DATA_EN (1 << 0)
+# define FMT_CLAMP_COLOR_FORMAT(x) ((x) << 16)
+# define FMT_CLAMP_6BPC 0
+# define FMT_CLAMP_8BPC 1
+# define FMT_CLAMP_10BPC 2
+
#define GRBM_CNTL 0x8000
#define GRBM_READ_TIMEOUT(x) ((x) << 0)
@@ -1129,6 +1165,8 @@
# define ADDR_SURF_P8_32x32_16x16 12
# define ADDR_SURF_P8_32x32_16x32 13
# define ADDR_SURF_P8_32x64_32x32 14
+# define ADDR_SURF_P16_32x32_8x16 16
+# define ADDR_SURF_P16_32x32_16x16 17
# define TILE_SPLIT(x) ((x) << 11)
# define ADDR_SURF_TILE_SPLIT_64B 0
# define ADDR_SURF_TILE_SPLIT_128B 1
@@ -1422,6 +1460,7 @@
# define RASTER_CONFIG_RB_MAP_1 1
# define RASTER_CONFIG_RB_MAP_2 2
# define RASTER_CONFIG_RB_MAP_3 3
+#define PKR_MAP(x) ((x) << 8)
#define VGT_EVENT_INITIATOR 0x28a90
# define SAMPLE_STREAMOUTSTATS1 (1 << 0)
@@ -1714,6 +1753,68 @@
# define PACKET3_PREAMBLE_BEGIN_CLEAR_STATE (2 << 28)
# define PACKET3_PREAMBLE_END_CLEAR_STATE (3 << 28)
#define PACKET3_DMA_DATA 0x50
+/* 1. header
+ * 2. CONTROL
+ * 3. SRC_ADDR_LO or DATA [31:0]
+ * 4. SRC_ADDR_HI [31:0]
+ * 5. DST_ADDR_LO [31:0]
+ * 6. DST_ADDR_HI [7:0]
+ * 7. COMMAND [30:21] | BYTE_COUNT [20:0]
+ */
+/* CONTROL */
+# define PACKET3_DMA_DATA_ENGINE(x) ((x) << 0)
+ /* 0 - ME
+ * 1 - PFP
+ */
+# define PACKET3_DMA_DATA_SRC_CACHE_POLICY(x) ((x) << 13)
+ /* 0 - LRU
+ * 1 - Stream
+ * 2 - Bypass
+ */
+# define PACKET3_DMA_DATA_SRC_VOLATILE (1 << 15)
+# define PACKET3_DMA_DATA_DST_SEL(x) ((x) << 20)
+ /* 0 - DST_ADDR using DAS
+ * 1 - GDS
+ * 3 - DST_ADDR using L2
+ */
+# define PACKET3_DMA_DATA_DST_CACHE_POLICY(x) ((x) << 25)
+ /* 0 - LRU
+ * 1 - Stream
+ * 2 - Bypass
+ */
+# define PACKET3_DMA_DATA_DST_VOLATILE (1 << 27)
+# define PACKET3_DMA_DATA_SRC_SEL(x) ((x) << 29)
+ /* 0 - SRC_ADDR using SAS
+ * 1 - GDS
+ * 2 - DATA
+ * 3 - SRC_ADDR using L2
+ */
+# define PACKET3_DMA_DATA_CP_SYNC (1 << 31)
+/* COMMAND */
+# define PACKET3_DMA_DATA_DIS_WC (1 << 21)
+# define PACKET3_DMA_DATA_CMD_SRC_SWAP(x) ((x) << 22)
+ /* 0 - none
+ * 1 - 8 in 16
+ * 2 - 8 in 32
+ * 3 - 8 in 64
+ */
+# define PACKET3_DMA_DATA_CMD_DST_SWAP(x) ((x) << 24)
+ /* 0 - none
+ * 1 - 8 in 16
+ * 2 - 8 in 32
+ * 3 - 8 in 64
+ */
+# define PACKET3_DMA_DATA_CMD_SAS (1 << 26)
+ /* 0 - memory
+ * 1 - register
+ */
+# define PACKET3_DMA_DATA_CMD_DAS (1 << 27)
+ /* 0 - memory
+ * 1 - register
+ */
+# define PACKET3_DMA_DATA_CMD_SAIC (1 << 28)
+# define PACKET3_DMA_DATA_CMD_DAIC (1 << 29)
+# define PACKET3_DMA_DATA_CMD_RAW_WAIT (1 << 30)
#define PACKET3_AQUIRE_MEM 0x58
#define PACKET3_REWIND 0x59
#define PACKET3_LOAD_UCONFIG_REG 0x5E
diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
index 9fcd338c0fcf..009f46e0ce72 100644
--- a/drivers/gpu/drm/radeon/dce6_afmt.c
+++ b/drivers/gpu/drm/radeon/dce6_afmt.c
@@ -102,6 +102,49 @@ void dce6_afmt_select_pin(struct drm_encoder *encoder)
AFMT_AUDIO_SRC_SELECT(dig->afmt->pin->id));
}
+void dce6_afmt_write_latency_fields(struct drm_encoder *encoder,
+ struct drm_display_mode *mode)
+{
+ struct radeon_device *rdev = encoder->dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ struct drm_connector *connector;
+ struct radeon_connector *radeon_connector = NULL;
+ u32 tmp = 0, offset;
+
+ if (!dig->afmt->pin)
+ return;
+
+ offset = dig->afmt->pin->offset;
+
+ list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ if (connector->encoder == encoder) {
+ radeon_connector = to_radeon_connector(connector);
+ break;
+ }
+ }
+
+ if (!radeon_connector) {
+ DRM_ERROR("Couldn't find encoder's connector\n");
+ return;
+ }
+
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ if (connector->latency_present[1])
+ tmp = VIDEO_LIPSYNC(connector->video_latency[1]) |
+ AUDIO_LIPSYNC(connector->audio_latency[1]);
+ else
+ tmp = VIDEO_LIPSYNC(255) | AUDIO_LIPSYNC(255);
+ } else {
+ if (connector->latency_present[0])
+ tmp = VIDEO_LIPSYNC(connector->video_latency[0]) |
+ AUDIO_LIPSYNC(connector->audio_latency[0]);
+ else
+ tmp = VIDEO_LIPSYNC(255) | AUDIO_LIPSYNC(255);
+ }
+ WREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
+}
+
void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder)
{
struct radeon_device *rdev = encoder->dev->dev_private;
@@ -113,9 +156,6 @@ void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder)
u8 *sadb;
int sad_count;
- /* XXX: setting this register causes hangs on some asics */
- return;
-
if (!dig->afmt->pin)
return;
@@ -201,20 +241,30 @@ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder)
for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
u32 value = 0;
+ u8 stereo_freqs = 0;
+ int max_channels = -1;
int j;
for (j = 0; j < sad_count; j++) {
struct cea_sad *sad = &sads[j];
if (sad->format == eld_reg_to_type[i][1]) {
- value = MAX_CHANNELS(sad->channels) |
- DESCRIPTOR_BYTE_2(sad->byte2) |
- SUPPORTED_FREQUENCIES(sad->freq);
+ if (sad->channels > max_channels) {
+ value = MAX_CHANNELS(sad->channels) |
+ DESCRIPTOR_BYTE_2(sad->byte2) |
+ SUPPORTED_FREQUENCIES(sad->freq);
+ max_channels = sad->channels;
+ }
+
if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
- value |= SUPPORTED_FREQUENCIES_STEREO(sad->freq);
- break;
+ stereo_freqs |= sad->freq;
+ else
+ break;
}
}
+
+ value |= SUPPORTED_FREQUENCIES_STEREO(stereo_freqs);
+
WREG32_ENDPOINT(offset, eld_reg_to_type[i][0], value);
}
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 56f6bec34af5..9702e55e924e 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -1186,6 +1186,62 @@ void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
pcie_set_readrq(rdev->pdev, 512);
}
+void dce4_program_fmt(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+ int bpc = 0;
+ u32 tmp = 0;
+ enum radeon_connector_dither dither = RADEON_FMT_DITHER_DISABLE;
+
+ if (connector) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ bpc = radeon_get_monitor_bpc(connector);
+ dither = radeon_connector->dither;
+ }
+
+ /* LVDS/eDP FMT is set up by atom */
+ if (radeon_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
+ return;
+
+ /* not needed for analog */
+ if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) ||
+ (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2))
+ return;
+
+ if (bpc == 0)
+ return;
+
+ switch (bpc) {
+ case 6:
+ if (dither == RADEON_FMT_DITHER_ENABLE)
+ /* XXX sort out optimal dither settings */
+ tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE |
+ FMT_SPATIAL_DITHER_EN);
+ else
+ tmp |= FMT_TRUNCATE_EN;
+ break;
+ case 8:
+ if (dither == RADEON_FMT_DITHER_ENABLE)
+ /* XXX sort out optimal dither settings */
+ tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE |
+ FMT_RGB_RANDOM_ENABLE |
+ FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH);
+ else
+ tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH);
+ break;
+ case 10:
+ default:
+ /* not needed */
+ break;
+ }
+
+ WREG32(FMT_BIT_DEPTH_CONTROL + radeon_crtc->crtc_offset, tmp);
+}
+
static bool dce4_is_in_vblank(struct radeon_device *rdev, int crtc)
{
if (RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK)
@@ -3956,7 +4012,7 @@ int sumo_rlc_init(struct radeon_device *rdev)
if (rdev->family >= CHIP_TAHITI) {
/* SI */
for (i = 0; i < rdev->rlc.reg_list_size; i++)
- dst_ptr[i] = src_ptr[i];
+ dst_ptr[i] = cpu_to_le32(src_ptr[i]);
} else {
/* ON/LN/TN */
/* format:
@@ -3970,10 +4026,10 @@ int sumo_rlc_init(struct radeon_device *rdev)
if (i < dws)
data |= (src_ptr[i] >> 2) << 16;
j = (((i - 1) * 3) / 2);
- dst_ptr[j] = data;
+ dst_ptr[j] = cpu_to_le32(data);
}
j = ((i * 3) / 2);
- dst_ptr[j] = RLC_SAVE_RESTORE_LIST_END_MARKER;
+ dst_ptr[j] = cpu_to_le32(RLC_SAVE_RESTORE_LIST_END_MARKER);
}
radeon_bo_kunmap(rdev->rlc.save_restore_obj);
radeon_bo_unreserve(rdev->rlc.save_restore_obj);
@@ -4035,40 +4091,40 @@ int sumo_rlc_init(struct radeon_device *rdev)
cik_get_csb_buffer(rdev, dst_ptr);
} else if (rdev->family >= CHIP_TAHITI) {
reg_list_mc_addr = rdev->rlc.clear_state_gpu_addr + 256;
- dst_ptr[0] = upper_32_bits(reg_list_mc_addr);
- dst_ptr[1] = lower_32_bits(reg_list_mc_addr);
- dst_ptr[2] = rdev->rlc.clear_state_size;
+ dst_ptr[0] = cpu_to_le32(upper_32_bits(reg_list_mc_addr));
+ dst_ptr[1] = cpu_to_le32(lower_32_bits(reg_list_mc_addr));
+ dst_ptr[2] = cpu_to_le32(rdev->rlc.clear_state_size);
si_get_csb_buffer(rdev, &dst_ptr[(256/4)]);
} else {
reg_list_hdr_blk_index = 0;
reg_list_mc_addr = rdev->rlc.clear_state_gpu_addr + (reg_list_blk_index * 4);
data = upper_32_bits(reg_list_mc_addr);
- dst_ptr[reg_list_hdr_blk_index] = data;
+ dst_ptr[reg_list_hdr_blk_index] = cpu_to_le32(data);
reg_list_hdr_blk_index++;
for (i = 0; cs_data[i].section != NULL; i++) {
for (j = 0; cs_data[i].section[j].extent != NULL; j++) {
reg_num = cs_data[i].section[j].reg_count;
data = reg_list_mc_addr & 0xffffffff;
- dst_ptr[reg_list_hdr_blk_index] = data;
+ dst_ptr[reg_list_hdr_blk_index] = cpu_to_le32(data);
reg_list_hdr_blk_index++;
data = (cs_data[i].section[j].reg_index * 4) & 0xffffffff;
- dst_ptr[reg_list_hdr_blk_index] = data;
+ dst_ptr[reg_list_hdr_blk_index] = cpu_to_le32(data);
reg_list_hdr_blk_index++;
data = 0x08000000 | (reg_num * 4);
- dst_ptr[reg_list_hdr_blk_index] = data;
+ dst_ptr[reg_list_hdr_blk_index] = cpu_to_le32(data);
reg_list_hdr_blk_index++;
for (k = 0; k < reg_num; k++) {
data = cs_data[i].section[j].extent[k];
- dst_ptr[reg_list_blk_index + k] = data;
+ dst_ptr[reg_list_blk_index + k] = cpu_to_le32(data);
}
reg_list_mc_addr += reg_num * 4;
reg_list_blk_index += reg_num;
}
}
- dst_ptr[reg_list_hdr_blk_index] = RLC_CLEAR_STATE_END_MARKER;
+ dst_ptr[reg_list_hdr_blk_index] = cpu_to_le32(RLC_CLEAR_STATE_END_MARKER);
}
radeon_bo_kunmap(rdev->rlc.clear_state_obj);
radeon_bo_unreserve(rdev->rlc.clear_state_obj);
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
index 57fcc4b16a52..aa695c4feb3d 100644
--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -35,6 +35,8 @@
extern void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder);
extern void dce6_afmt_write_sad_regs(struct drm_encoder *encoder);
extern void dce6_afmt_select_pin(struct drm_encoder *encoder);
+extern void dce6_afmt_write_latency_fields(struct drm_encoder *encoder,
+ struct drm_display_mode *mode);
/*
* update the N and CTS parameters for a given pixel clock rate
@@ -58,6 +60,42 @@ static void evergreen_hdmi_update_ACR(struct drm_encoder *encoder, uint32_t cloc
WREG32(HDMI_ACR_48_1 + offset, acr.n_48khz);
}
+static void dce4_afmt_write_latency_fields(struct drm_encoder *encoder,
+ struct drm_display_mode *mode)
+{
+ struct radeon_device *rdev = encoder->dev->dev_private;
+ struct drm_connector *connector;
+ struct radeon_connector *radeon_connector = NULL;
+ u32 tmp = 0;
+
+ list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ if (connector->encoder == encoder) {
+ radeon_connector = to_radeon_connector(connector);
+ break;
+ }
+ }
+
+ if (!radeon_connector) {
+ DRM_ERROR("Couldn't find encoder's connector\n");
+ return;
+ }
+
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ if (connector->latency_present[1])
+ tmp = VIDEO_LIPSYNC(connector->video_latency[1]) |
+ AUDIO_LIPSYNC(connector->audio_latency[1]);
+ else
+ tmp = VIDEO_LIPSYNC(255) | AUDIO_LIPSYNC(255);
+ } else {
+ if (connector->latency_present[0])
+ tmp = VIDEO_LIPSYNC(connector->video_latency[0]) |
+ AUDIO_LIPSYNC(connector->audio_latency[0]);
+ else
+ tmp = VIDEO_LIPSYNC(255) | AUDIO_LIPSYNC(255);
+ }
+ WREG32(AZ_F0_CODEC_PIN0_CONTROL_RESPONSE_LIPSYNC, tmp);
+}
+
static void dce4_afmt_write_speaker_allocation(struct drm_encoder *encoder)
{
struct radeon_device *rdev = encoder->dev->dev_private;
@@ -67,12 +105,11 @@ static void dce4_afmt_write_speaker_allocation(struct drm_encoder *encoder)
u8 *sadb;
int sad_count;
- /* XXX: setting this register causes hangs on some asics */
- return;
-
list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
- if (connector->encoder == encoder)
+ if (connector->encoder == encoder) {
radeon_connector = to_radeon_connector(connector);
+ break;
+ }
}
if (!radeon_connector) {
@@ -124,8 +161,10 @@ static void evergreen_hdmi_write_sad_regs(struct drm_encoder *encoder)
};
list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
- if (connector->encoder == encoder)
+ if (connector->encoder == encoder) {
radeon_connector = to_radeon_connector(connector);
+ break;
+ }
}
if (!radeon_connector) {
@@ -142,20 +181,30 @@ static void evergreen_hdmi_write_sad_regs(struct drm_encoder *encoder)
for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
u32 value = 0;
+ u8 stereo_freqs = 0;
+ int max_channels = -1;
int j;
for (j = 0; j < sad_count; j++) {
struct cea_sad *sad = &sads[j];
if (sad->format == eld_reg_to_type[i][1]) {
- value = MAX_CHANNELS(sad->channels) |
- DESCRIPTOR_BYTE_2(sad->byte2) |
- SUPPORTED_FREQUENCIES(sad->freq);
+ if (sad->channels > max_channels) {
+ value = MAX_CHANNELS(sad->channels) |
+ DESCRIPTOR_BYTE_2(sad->byte2) |
+ SUPPORTED_FREQUENCIES(sad->freq);
+ max_channels = sad->channels;
+ }
+
if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
- value |= SUPPORTED_FREQUENCIES_STEREO(sad->freq);
- break;
+ stereo_freqs |= sad->freq;
+ else
+ break;
}
}
+
+ value |= SUPPORTED_FREQUENCIES_STEREO(stereo_freqs);
+
WREG32(eld_reg_to_type[i][0], value);
}
@@ -324,8 +373,10 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
if (ASIC_IS_DCE6(rdev)) {
dce6_afmt_select_pin(encoder);
dce6_afmt_write_sad_regs(encoder);
+ dce6_afmt_write_latency_fields(encoder, mode);
} else {
evergreen_hdmi_write_sad_regs(encoder);
+ dce4_afmt_write_latency_fields(encoder, mode);
}
err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index 4f6d2962767d..17f990798992 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -750,6 +750,44 @@
* bit6 = 192 kHz
*/
+#define AZ_CHANNEL_COUNT_CONTROL 0x5fe4
+# define HBR_CHANNEL_COUNT(x) (((x) & 0x7) << 0)
+# define COMPRESSED_CHANNEL_COUNT(x) (((x) & 0x7) << 4)
+/* HBR_CHANNEL_COUNT, COMPRESSED_CHANNEL_COUNT
+ * 0 = use stream header
+ * 1-7 = channel count - 1
+ */
+#define AZ_F0_CODEC_PIN0_CONTROL_RESPONSE_LIPSYNC 0x5fe8
+# define VIDEO_LIPSYNC(x) (((x) & 0xff) << 0)
+# define AUDIO_LIPSYNC(x) (((x) & 0xff) << 8)
+/* VIDEO_LIPSYNC, AUDIO_LIPSYNC
+ * 0 = invalid
+ * x = legal delay value
+ * 255 = sync not supported
+ */
+#define AZ_F0_CODEC_PIN0_CONTROL_RESPONSE_HBR 0x5fec
+# define HBR_CAPABLE (1 << 0) /* enabled by default */
+
+#define AZ_F0_CODEC_PIN0_CONTROL_RESPONSE_AV_ASSOCIATION0 0x5ff4
+# define DISPLAY0_TYPE(x) (((x) & 0x3) << 0)
+# define DISPLAY_TYPE_NONE 0
+# define DISPLAY_TYPE_HDMI 1
+# define DISPLAY_TYPE_DP 2
+# define DISPLAY0_ID(x) (((x) & 0x3f) << 2)
+# define DISPLAY1_TYPE(x) (((x) & 0x3) << 8)
+# define DISPLAY1_ID(x) (((x) & 0x3f) << 10)
+# define DISPLAY2_TYPE(x) (((x) & 0x3) << 16)
+# define DISPLAY2_ID(x) (((x) & 0x3f) << 18)
+# define DISPLAY3_TYPE(x) (((x) & 0x3) << 24)
+# define DISPLAY3_ID(x) (((x) & 0x3f) << 26)
+#define AZ_F0_CODEC_PIN0_CONTROL_RESPONSE_AV_ASSOCIATION1 0x5ff8
+# define DISPLAY4_TYPE(x) (((x) & 0x3) << 0)
+# define DISPLAY4_ID(x) (((x) & 0x3f) << 2)
+# define DISPLAY5_TYPE(x) (((x) & 0x3) << 8)
+# define DISPLAY5_ID(x) (((x) & 0x3f) << 10)
+#define AZ_F0_CODEC_PIN0_CONTROL_RESPONSE_AV_NUMBER 0x5ffc
+# define NUMBER_OF_DISPLAY_ID(x) (((x) & 0x7) << 0)
+
#define AZ_HOT_PLUG_CONTROL 0x5e78
# define AZ_FORCE_CODEC_WAKE (1 << 0)
# define PIN0_JACK_DETECTION_ENABLE (1 << 4)
@@ -1312,6 +1350,38 @@
# define DC_HPDx_RX_INT_TIMER(x) ((x) << 16)
# define DC_HPDx_EN (1 << 28)
+/* DCE4/5/6 FMT blocks */
+#define FMT_DYNAMIC_EXP_CNTL 0x6fb4
+# define FMT_DYNAMIC_EXP_EN (1 << 0)
+# define FMT_DYNAMIC_EXP_MODE (1 << 4)
+ /* 0 = 10bit -> 12bit, 1 = 8bit -> 12bit */
+#define FMT_CONTROL 0x6fb8
+# define FMT_PIXEL_ENCODING (1 << 16)
+ /* 0 = RGB 4:4:4 or YCbCr 4:4:4, 1 = YCbCr 4:2:2 */
+#define FMT_BIT_DEPTH_CONTROL 0x6fc8
+# define FMT_TRUNCATE_EN (1 << 0)
+# define FMT_TRUNCATE_DEPTH (1 << 4)
+# define FMT_SPATIAL_DITHER_EN (1 << 8)
+# define FMT_SPATIAL_DITHER_MODE(x) ((x) << 9)
+# define FMT_SPATIAL_DITHER_DEPTH (1 << 12)
+# define FMT_FRAME_RANDOM_ENABLE (1 << 13)
+# define FMT_RGB_RANDOM_ENABLE (1 << 14)
+# define FMT_HIGHPASS_RANDOM_ENABLE (1 << 15)
+# define FMT_TEMPORAL_DITHER_EN (1 << 16)
+# define FMT_TEMPORAL_DITHER_DEPTH (1 << 20)
+# define FMT_TEMPORAL_DITHER_OFFSET(x) ((x) << 21)
+# define FMT_TEMPORAL_LEVEL (1 << 24)
+# define FMT_TEMPORAL_DITHER_RESET (1 << 25)
+# define FMT_25FRC_SEL(x) ((x) << 26)
+# define FMT_50FRC_SEL(x) ((x) << 28)
+# define FMT_75FRC_SEL(x) ((x) << 30)
+#define FMT_CLAMP_CONTROL 0x6fe4
+# define FMT_CLAMP_DATA_EN (1 << 0)
+# define FMT_CLAMP_COLOR_FORMAT(x) ((x) << 16)
+# define FMT_CLAMP_6BPC 0
+# define FMT_CLAMP_8BPC 1
+# define FMT_CLAMP_10BPC 2
+
/* ASYNC DMA */
#define DMA_RB_RPTR 0xd008
#define DMA_RB_WPTR 0xd00c
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index cac2866d79da..11aab2ab54ce 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -174,11 +174,6 @@ extern void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
extern void evergreen_program_aspm(struct radeon_device *rdev);
extern void sumo_rlc_fini(struct radeon_device *rdev);
extern int sumo_rlc_init(struct radeon_device *rdev);
-extern void cayman_dma_vm_set_page(struct radeon_device *rdev,
- struct radeon_ib *ib,
- uint64_t pe,
- uint64_t addr, unsigned count,
- uint32_t incr, uint32_t flags);
/* Firmware Names */
MODULE_FIRMWARE("radeon/BARTS_pfp.bin");
@@ -2400,77 +2395,6 @@ void cayman_vm_decode_fault(struct radeon_device *rdev,
block, mc_id);
}
-#define R600_ENTRY_VALID (1 << 0)
-#define R600_PTE_SYSTEM (1 << 1)
-#define R600_PTE_SNOOPED (1 << 2)
-#define R600_PTE_READABLE (1 << 5)
-#define R600_PTE_WRITEABLE (1 << 6)
-
-uint32_t cayman_vm_page_flags(struct radeon_device *rdev, uint32_t flags)
-{
- uint32_t r600_flags = 0;
- r600_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_ENTRY_VALID : 0;
- r600_flags |= (flags & RADEON_VM_PAGE_READABLE) ? R600_PTE_READABLE : 0;
- r600_flags |= (flags & RADEON_VM_PAGE_WRITEABLE) ? R600_PTE_WRITEABLE : 0;
- if (flags & RADEON_VM_PAGE_SYSTEM) {
- r600_flags |= R600_PTE_SYSTEM;
- r600_flags |= (flags & RADEON_VM_PAGE_SNOOPED) ? R600_PTE_SNOOPED : 0;
- }
- return r600_flags;
-}
-
-/**
- * cayman_vm_set_page - update the page tables using the CP
- *
- * @rdev: radeon_device pointer
- * @ib: indirect buffer to fill with commands
- * @pe: addr of the page entry
- * @addr: dst addr to write into pe
- * @count: number of page entries to update
- * @incr: increase next addr by incr bytes
- * @flags: access flags
- *
- * Update the page tables using the CP (cayman/TN).
- */
-void cayman_vm_set_page(struct radeon_device *rdev,
- struct radeon_ib *ib,
- uint64_t pe,
- uint64_t addr, unsigned count,
- uint32_t incr, uint32_t flags)
-{
- uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
- uint64_t value;
- unsigned ndw;
-
- if (rdev->asic->vm.pt_ring_index == RADEON_RING_TYPE_GFX_INDEX) {
- while (count) {
- ndw = 1 + count * 2;
- if (ndw > 0x3FFF)
- ndw = 0x3FFF;
-
- ib->ptr[ib->length_dw++] = PACKET3(PACKET3_ME_WRITE, ndw);
- ib->ptr[ib->length_dw++] = pe;
- ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
- for (; ndw > 1; ndw -= 2, --count, pe += 8) {
- if (flags & RADEON_VM_PAGE_SYSTEM) {
- value = radeon_vm_map_gart(rdev, addr);
- value &= 0xFFFFFFFFFFFFF000ULL;
- } else if (flags & RADEON_VM_PAGE_VALID) {
- value = addr;
- } else {
- value = 0;
- }
- addr += incr;
- value |= r600_flags;
- ib->ptr[ib->length_dw++] = value;
- ib->ptr[ib->length_dw++] = upper_32_bits(value);
- }
- }
- } else {
- cayman_dma_vm_set_page(rdev, ib, pe, addr, count, incr, flags);
- }
-}
-
/**
* cayman_vm_flush - vm flush using the CP
*
diff --git a/drivers/gpu/drm/radeon/ni_dma.c b/drivers/gpu/drm/radeon/ni_dma.c
index dd6e9688fbef..bdeb65ed3658 100644
--- a/drivers/gpu/drm/radeon/ni_dma.c
+++ b/drivers/gpu/drm/radeon/ni_dma.c
@@ -24,6 +24,7 @@
#include <drm/drmP.h>
#include "radeon.h"
#include "radeon_asic.h"
+#include "radeon_trace.h"
#include "nid.h"
u32 cayman_gpu_check_soft_reset(struct radeon_device *rdev);
@@ -245,8 +246,7 @@ bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
* @addr: dst addr to write into pe
* @count: number of page entries to update
* @incr: increase next addr by incr bytes
- * @flags: access flags
- * @r600_flags: hw access flags
+ * @flags: hw access flags
*
* Update the page tables using the DMA (cayman/TN).
*/
@@ -256,11 +256,12 @@ void cayman_dma_vm_set_page(struct radeon_device *rdev,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags)
{
- uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
uint64_t value;
unsigned ndw;
- if ((flags & RADEON_VM_PAGE_SYSTEM) || (count == 1)) {
+ trace_radeon_vm_set_page(pe, addr, count, incr, flags);
+
+ if ((flags & R600_PTE_SYSTEM) || (count == 1)) {
while (count) {
ndw = count * 2;
if (ndw > 0xFFFFE)
@@ -271,16 +272,16 @@ void cayman_dma_vm_set_page(struct radeon_device *rdev,
ib->ptr[ib->length_dw++] = pe;
ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
for (; ndw > 0; ndw -= 2, --count, pe += 8) {
- if (flags & RADEON_VM_PAGE_SYSTEM) {
+ if (flags & R600_PTE_SYSTEM) {
value = radeon_vm_map_gart(rdev, addr);
value &= 0xFFFFFFFFFFFFF000ULL;
- } else if (flags & RADEON_VM_PAGE_VALID) {
+ } else if (flags & R600_PTE_VALID) {
value = addr;
} else {
value = 0;
}
addr += incr;
- value |= r600_flags;
+ value |= flags;
ib->ptr[ib->length_dw++] = value;
ib->ptr[ib->length_dw++] = upper_32_bits(value);
}
@@ -291,7 +292,7 @@ void cayman_dma_vm_set_page(struct radeon_device *rdev,
if (ndw > 0xFFFFE)
ndw = 0xFFFFE;
- if (flags & RADEON_VM_PAGE_VALID)
+ if (flags & R600_PTE_VALID)
value = addr;
else
value = 0;
@@ -299,7 +300,7 @@ void cayman_dma_vm_set_page(struct radeon_device *rdev,
ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
ib->ptr[ib->length_dw++] = pe; /* dst addr */
ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
- ib->ptr[ib->length_dw++] = r600_flags; /* mask */
+ ib->ptr[ib->length_dw++] = flags; /* mask */
ib->ptr[ib->length_dw++] = 0;
ib->ptr[ib->length_dw++] = value; /* value */
ib->ptr[ib->length_dw++] = upper_32_bits(value);
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index d71333033b2b..784983d78158 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -1434,7 +1434,7 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
if (!obj) {
DRM_ERROR("cannot find crtc %d\n", crtc_id);
- return -EINVAL;
+ return -ENOENT;
}
crtc = obj_to_crtc(obj);
radeon_crtc = to_radeon_crtc(crtc);
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index f9be22062df1..4e609e8a8d2b 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -124,6 +124,59 @@ int r600_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
return 0;
}
+void dce3_program_fmt(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+ int bpc = 0;
+ u32 tmp = 0;
+ enum radeon_connector_dither dither = RADEON_FMT_DITHER_DISABLE;
+
+ if (connector) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ bpc = radeon_get_monitor_bpc(connector);
+ dither = radeon_connector->dither;
+ }
+
+ /* LVDS FMT is set up by atom */
+ if (radeon_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
+ return;
+
+ /* not needed for analog */
+ if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) ||
+ (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2))
+ return;
+
+ if (bpc == 0)
+ return;
+
+ switch (bpc) {
+ case 6:
+ if (dither == RADEON_FMT_DITHER_ENABLE)
+ /* XXX sort out optimal dither settings */
+ tmp |= FMT_SPATIAL_DITHER_EN;
+ else
+ tmp |= FMT_TRUNCATE_EN;
+ break;
+ case 8:
+ if (dither == RADEON_FMT_DITHER_ENABLE)
+ /* XXX sort out optimal dither settings */
+ tmp |= (FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH);
+ else
+ tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH);
+ break;
+ case 10:
+ default:
+ /* not needed */
+ break;
+ }
+
+ WREG32(FMT_BIT_DEPTH_CONTROL + radeon_crtc->crtc_offset, tmp);
+}
+
/* get temperature in millidegrees */
int rv6xx_get_temp(struct radeon_device *rdev)
{
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 01a3ec83f284..5dceea6f71ae 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -887,7 +887,7 @@ int r600_cs_common_vline_parse(struct radeon_cs_parser *p,
obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
if (!obj) {
DRM_ERROR("cannot find crtc %d\n", crtc_id);
- return -EINVAL;
+ return -ENOENT;
}
crtc = obj_to_crtc(obj);
radeon_crtc = to_radeon_crtc(crtc);
@@ -2328,13 +2328,8 @@ static void r600_cs_parser_fini(struct radeon_cs_parser *parser, int error)
unsigned i;
kfree(parser->relocs);
- for (i = 0; i < parser->nchunks; i++) {
- kfree(parser->chunks[i].kdata);
- if (parser->rdev && (parser->rdev->flags & RADEON_IS_AGP)) {
- kfree(parser->chunks[i].kpage[0]);
- kfree(parser->chunks[i].kpage[1]);
- }
- }
+ for (i = 0; i < parser->nchunks; i++)
+ drm_free_large(parser->chunks[i].kdata);
kfree(parser->chunks);
kfree(parser->chunks_array);
}
@@ -2391,13 +2386,12 @@ int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
ib_chunk = &parser.chunks[parser.chunk_ib_idx];
parser.ib.length_dw = ib_chunk->length_dw;
*l = parser.ib.length_dw;
- r = r600_cs_parse(&parser);
- if (r) {
- DRM_ERROR("Invalid command stream !\n");
+ if (DRM_COPY_FROM_USER(ib, ib_chunk->user_ptr, ib_chunk->length_dw * 4)) {
+ r = -EFAULT;
r600_cs_parser_fini(&parser, r);
return r;
}
- r = radeon_cs_finish_pages(&parser);
+ r = r600_cs_parse(&parser);
if (r) {
DRM_ERROR("Invalid command stream !\n");
r600_cs_parser_fini(&parser, r);
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index 06022e3b9c3b..4b89262f3f0e 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -24,6 +24,7 @@
* Authors: Christian König
*/
#include <linux/hdmi.h>
+#include <linux/gcd.h>
#include <drm/drmP.h>
#include <drm/radeon_drm.h>
#include "radeon.h"
@@ -57,35 +58,57 @@ enum r600_hdmi_iec_status_bits {
static const struct radeon_hdmi_acr r600_hdmi_predefined_acr[] = {
/* 32kHz 44.1kHz 48kHz */
/* Clock N CTS N CTS N CTS */
- { 25175, 4576, 28125, 7007, 31250, 6864, 28125 }, /* 25,20/1.001 MHz */
+ { 25175, 4096, 25175, 28224, 125875, 6144, 25175 }, /* 25,20/1.001 MHz */
{ 25200, 4096, 25200, 6272, 28000, 6144, 25200 }, /* 25.20 MHz */
{ 27000, 4096, 27000, 6272, 30000, 6144, 27000 }, /* 27.00 MHz */
{ 27027, 4096, 27027, 6272, 30030, 6144, 27027 }, /* 27.00*1.001 MHz */
{ 54000, 4096, 54000, 6272, 60000, 6144, 54000 }, /* 54.00 MHz */
{ 54054, 4096, 54054, 6272, 60060, 6144, 54054 }, /* 54.00*1.001 MHz */
- { 74176, 11648, 210937, 17836, 234375, 11648, 140625 }, /* 74.25/1.001 MHz */
+ { 74176, 4096, 74176, 5733, 75335, 6144, 74176 }, /* 74.25/1.001 MHz */
{ 74250, 4096, 74250, 6272, 82500, 6144, 74250 }, /* 74.25 MHz */
- { 148352, 11648, 421875, 8918, 234375, 5824, 140625 }, /* 148.50/1.001 MHz */
+ { 148352, 4096, 148352, 5733, 150670, 6144, 148352 }, /* 148.50/1.001 MHz */
{ 148500, 4096, 148500, 6272, 165000, 6144, 148500 }, /* 148.50 MHz */
- { 0, 4096, 0, 6272, 0, 6144, 0 } /* Other */
};
+
/*
- * calculate CTS value if it's not found in the table
+ * calculate CTS and N values if they are not found in the table
*/
-static void r600_hdmi_calc_cts(uint32_t clock, int *CTS, int N, int freq)
+static void r600_hdmi_calc_cts(uint32_t clock, int *CTS, int *N, int freq)
{
- u64 n;
- u32 d;
-
- if (*CTS == 0) {
- n = (u64)clock * (u64)N * 1000ULL;
- d = 128 * freq;
- do_div(n, d);
- *CTS = n;
- }
- DRM_DEBUG("Using ACR timing N=%d CTS=%d for frequency %d\n",
- N, *CTS, freq);
+ int n, cts;
+ unsigned long div, mul;
+
+ /* Safe, but overly large values */
+ n = 128 * freq;
+ cts = clock * 1000;
+
+ /* Smallest valid fraction */
+ div = gcd(n, cts);
+
+ n /= div;
+ cts /= div;
+
+ /*
+ * The optimal N is 128*freq/1000. Calculate the closest larger
+ * value that doesn't truncate any bits.
+ */
+ mul = ((128*freq/1000) + (n-1))/n;
+
+ n *= mul;
+ cts *= mul;
+
+ /* Check that we are in spec (not always possible) */
+ if (n < (128*freq/1500))
+ printk(KERN_WARNING "Calculated ACR N value is too small. You may experience audio problems.\n");
+ if (n > (128*freq/300))
+ printk(KERN_WARNING "Calculated ACR N value is too large. You may experience audio problems.\n");
+
+ *N = n;
+ *CTS = cts;
+
+ DRM_DEBUG("Calculated ACR timing N=%d CTS=%d for frequency %d\n",
+ *N, *CTS, freq);
}
struct radeon_hdmi_acr r600_hdmi_acr(uint32_t clock)
@@ -93,15 +116,16 @@ struct radeon_hdmi_acr r600_hdmi_acr(uint32_t clock)
struct radeon_hdmi_acr res;
u8 i;
- for (i = 0; r600_hdmi_predefined_acr[i].clock != clock &&
- r600_hdmi_predefined_acr[i].clock != 0; i++)
- ;
- res = r600_hdmi_predefined_acr[i];
+ /* Precalculated values for common clocks */
+ for (i = 0; i < ARRAY_SIZE(r600_hdmi_predefined_acr); i++) {
+ if (r600_hdmi_predefined_acr[i].clock == clock)
+ return r600_hdmi_predefined_acr[i];
+ }
- /* In case some CTS are missing */
- r600_hdmi_calc_cts(clock, &res.cts_32khz, res.n_32khz, 32000);
- r600_hdmi_calc_cts(clock, &res.cts_44_1khz, res.n_44_1khz, 44100);
- r600_hdmi_calc_cts(clock, &res.cts_48khz, res.n_48khz, 48000);
+ /* And odd clocks get manually calculated */
+ r600_hdmi_calc_cts(clock, &res.cts_32khz, &res.n_32khz, 32000);
+ r600_hdmi_calc_cts(clock, &res.cts_44_1khz, &res.n_44_1khz, 44100);
+ r600_hdmi_calc_cts(clock, &res.cts_48khz, &res.n_48khz, 48000);
return res;
}
@@ -313,8 +337,10 @@ static void dce3_2_afmt_write_speaker_allocation(struct drm_encoder *encoder)
return;
list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
- if (connector->encoder == encoder)
+ if (connector->encoder == encoder) {
radeon_connector = to_radeon_connector(connector);
+ break;
+ }
}
if (!radeon_connector) {
@@ -366,8 +392,10 @@ static void dce3_2_afmt_write_sad_regs(struct drm_encoder *encoder)
};
list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
- if (connector->encoder == encoder)
+ if (connector->encoder == encoder) {
radeon_connector = to_radeon_connector(connector);
+ break;
+ }
}
if (!radeon_connector) {
@@ -384,20 +412,30 @@ static void dce3_2_afmt_write_sad_regs(struct drm_encoder *encoder)
for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
u32 value = 0;
+ u8 stereo_freqs = 0;
+ int max_channels = -1;
int j;
for (j = 0; j < sad_count; j++) {
struct cea_sad *sad = &sads[j];
if (sad->format == eld_reg_to_type[i][1]) {
- value = MAX_CHANNELS(sad->channels) |
- DESCRIPTOR_BYTE_2(sad->byte2) |
- SUPPORTED_FREQUENCIES(sad->freq);
+ if (sad->channels > max_channels) {
+ value = MAX_CHANNELS(sad->channels) |
+ DESCRIPTOR_BYTE_2(sad->byte2) |
+ SUPPORTED_FREQUENCIES(sad->freq);
+ max_channels = sad->channels;
+ }
+
if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
- value |= SUPPORTED_FREQUENCIES_STEREO(sad->freq);
- break;
+ stereo_freqs |= sad->freq;
+ else
+ break;
}
}
+
+ value |= SUPPORTED_FREQUENCIES_STEREO(stereo_freqs);
+
WREG32(eld_reg_to_type[i][0], value);
}
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 7b3c7b5932c5..ebe38724a976 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -1199,6 +1199,34 @@
# define AFMT_AZ_FORMAT_WTRIG_ACK (1 << 29)
# define AFMT_AZ_AUDIO_ENABLE_CHG_ACK (1 << 30)
+/* DCE3 FMT blocks */
+#define FMT_CONTROL 0x6700
+# define FMT_PIXEL_ENCODING (1 << 16)
+ /* 0 = RGB 4:4:4 or YCbCr 4:4:4, 1 = YCbCr 4:2:2 */
+#define FMT_BIT_DEPTH_CONTROL 0x6710
+# define FMT_TRUNCATE_EN (1 << 0)
+# define FMT_TRUNCATE_DEPTH (1 << 4)
+# define FMT_SPATIAL_DITHER_EN (1 << 8)
+# define FMT_SPATIAL_DITHER_MODE(x) ((x) << 9)
+# define FMT_SPATIAL_DITHER_DEPTH (1 << 12)
+# define FMT_FRAME_RANDOM_ENABLE (1 << 13)
+# define FMT_RGB_RANDOM_ENABLE (1 << 14)
+# define FMT_HIGHPASS_RANDOM_ENABLE (1 << 15)
+# define FMT_TEMPORAL_DITHER_EN (1 << 16)
+# define FMT_TEMPORAL_DITHER_DEPTH (1 << 20)
+# define FMT_TEMPORAL_DITHER_OFFSET(x) ((x) << 21)
+# define FMT_TEMPORAL_LEVEL (1 << 24)
+# define FMT_TEMPORAL_DITHER_RESET (1 << 25)
+# define FMT_25FRC_SEL(x) ((x) << 26)
+# define FMT_50FRC_SEL(x) ((x) << 28)
+# define FMT_75FRC_SEL(x) ((x) << 30)
+#define FMT_CLAMP_CONTROL 0x672c
+# define FMT_CLAMP_DATA_EN (1 << 0)
+# define FMT_CLAMP_COLOR_FORMAT(x) ((x) << 16)
+# define FMT_CLAMP_6BPC 0
+# define FMT_CLAMP_8BPC 1
+# define FMT_CLAMP_10BPC 2
+
/* Power management */
#define CG_SPLL_FUNC_CNTL 0x600
# define SPLL_RESET (1 << 0)
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 24f4960f59ee..b9ee99258602 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -98,6 +98,7 @@ extern int radeon_lockup_timeout;
extern int radeon_fastfb;
extern int radeon_dpm;
extern int radeon_aspm;
+extern int radeon_runtime_pm;
/*
* Copy from radeon_drv.h so we don't have to include both and have conflicting
@@ -327,7 +328,6 @@ struct radeon_fence_driver {
/* sync_seq is protected by ring emission lock */
uint64_t sync_seq[RADEON_NUM_RINGS];
atomic64_t last_seq;
- unsigned long last_activity;
bool initialized;
};
@@ -832,6 +832,12 @@ struct radeon_mec {
#define RADEON_VM_PTB_ALIGN_MASK (RADEON_VM_PTB_ALIGN_SIZE - 1)
#define RADEON_VM_PTB_ALIGN(a) (((a) + RADEON_VM_PTB_ALIGN_MASK) & ~RADEON_VM_PTB_ALIGN_MASK)
+#define R600_PTE_VALID (1 << 0)
+#define R600_PTE_SYSTEM (1 << 1)
+#define R600_PTE_SNOOPED (1 << 2)
+#define R600_PTE_READABLE (1 << 5)
+#define R600_PTE_WRITEABLE (1 << 6)
+
struct radeon_vm {
struct list_head list;
struct list_head va;
@@ -967,12 +973,8 @@ struct radeon_cs_reloc {
struct radeon_cs_chunk {
uint32_t chunk_id;
uint32_t length_dw;
- int kpage_idx[2];
- uint32_t *kpage[2];
uint32_t *kdata;
void __user *user_ptr;
- int last_copied_page;
- int last_page_index;
};
struct radeon_cs_parser {
@@ -1007,8 +1009,15 @@ struct radeon_cs_parser {
struct ww_acquire_ctx ticket;
};
-extern int radeon_cs_finish_pages(struct radeon_cs_parser *p);
-extern u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx);
+static inline u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
+{
+ struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
+
+ if (ibc->kdata)
+ return ibc->kdata[idx];
+ return p->ib.ptr[idx];
+}
+
struct radeon_cs_packet {
unsigned idx;
@@ -1675,8 +1684,6 @@ struct radeon_asic {
struct {
int (*init)(struct radeon_device *rdev);
void (*fini)(struct radeon_device *rdev);
-
- u32 pt_ring_index;
void (*set_page)(struct radeon_device *rdev,
struct radeon_ib *ib,
uint64_t pe,
@@ -2170,6 +2177,7 @@ struct radeon_device {
bool need_dma32;
bool accel_working;
bool fastfb_working; /* IGP feature*/
+ bool needs_reset;
struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES];
const struct firmware *me_fw; /* all family ME firmware */
const struct firmware *pfp_fw; /* r6/700 PFP firmware */
@@ -2212,6 +2220,9 @@ struct radeon_device {
/* clock, powergating flags */
u32 cg_flags;
u32 pg_flags;
+
+ struct dev_pm_domain vga_pm_domain;
+ bool have_disp_power_ref;
};
int radeon_device_init(struct radeon_device *rdev,
@@ -2673,8 +2684,8 @@ extern void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain);
extern bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo);
extern void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base);
extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
-extern int radeon_resume_kms(struct drm_device *dev);
-extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state);
+extern int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon);
+extern int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon);
extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size);
extern void radeon_program_register_sequence(struct radeon_device *rdev,
const u32 *registers,
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 8f7e04538fd6..50853c0cb49d 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -1622,8 +1622,7 @@ static struct radeon_asic cayman_asic = {
.vm = {
.init = &cayman_vm_init,
.fini = &cayman_vm_fini,
- .pt_ring_index = R600_RING_TYPE_DMA_INDEX,
- .set_page = &cayman_vm_set_page,
+ .set_page = &cayman_dma_vm_set_page,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = &cayman_gfx_ring,
@@ -1723,8 +1722,7 @@ static struct radeon_asic trinity_asic = {
.vm = {
.init = &cayman_vm_init,
.fini = &cayman_vm_fini,
- .pt_ring_index = R600_RING_TYPE_DMA_INDEX,
- .set_page = &cayman_vm_set_page,
+ .set_page = &cayman_dma_vm_set_page,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = &cayman_gfx_ring,
@@ -1854,8 +1852,7 @@ static struct radeon_asic si_asic = {
.vm = {
.init = &si_vm_init,
.fini = &si_vm_fini,
- .pt_ring_index = R600_RING_TYPE_DMA_INDEX,
- .set_page = &si_vm_set_page,
+ .set_page = &si_dma_vm_set_page,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = &si_gfx_ring,
@@ -1879,7 +1876,7 @@ static struct radeon_asic si_asic = {
.hdmi_setmode = &evergreen_hdmi_setmode,
},
.copy = {
- .blit = NULL,
+ .blit = &r600_copy_cpdma,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = &si_copy_dma,
.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
@@ -2000,8 +1997,7 @@ static struct radeon_asic ci_asic = {
.vm = {
.init = &cik_vm_init,
.fini = &cik_vm_fini,
- .pt_ring_index = R600_RING_TYPE_DMA_INDEX,
- .set_page = &cik_vm_set_page,
+ .set_page = &cik_sdma_vm_set_page,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = &ci_gfx_ring,
@@ -2100,8 +2096,7 @@ static struct radeon_asic kv_asic = {
.vm = {
.init = &cik_vm_init,
.fini = &cik_vm_fini,
- .pt_ring_index = R600_RING_TYPE_DMA_INDEX,
- .set_page = &cik_vm_set_page,
+ .set_page = &cik_sdma_vm_set_page,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = &ci_gfx_ring,
@@ -2442,27 +2437,48 @@ int radeon_asic_init(struct radeon_device *rdev)
}
break;
case CHIP_BONAIRE:
+ case CHIP_HAWAII:
rdev->asic = &ci_asic;
rdev->num_crtc = 6;
rdev->has_uvd = true;
- rdev->cg_flags =
- RADEON_CG_SUPPORT_GFX_MGCG |
- RADEON_CG_SUPPORT_GFX_MGLS |
- /*RADEON_CG_SUPPORT_GFX_CGCG |*/
- RADEON_CG_SUPPORT_GFX_CGLS |
- RADEON_CG_SUPPORT_GFX_CGTS |
- RADEON_CG_SUPPORT_GFX_CGTS_LS |
- RADEON_CG_SUPPORT_GFX_CP_LS |
- RADEON_CG_SUPPORT_MC_LS |
- RADEON_CG_SUPPORT_MC_MGCG |
- RADEON_CG_SUPPORT_SDMA_MGCG |
- RADEON_CG_SUPPORT_SDMA_LS |
- RADEON_CG_SUPPORT_BIF_LS |
- RADEON_CG_SUPPORT_VCE_MGCG |
- RADEON_CG_SUPPORT_UVD_MGCG |
- RADEON_CG_SUPPORT_HDP_LS |
- RADEON_CG_SUPPORT_HDP_MGCG;
- rdev->pg_flags = 0;
+ if (rdev->family == CHIP_BONAIRE) {
+ rdev->cg_flags =
+ RADEON_CG_SUPPORT_GFX_MGCG |
+ RADEON_CG_SUPPORT_GFX_MGLS |
+ /*RADEON_CG_SUPPORT_GFX_CGCG |*/
+ RADEON_CG_SUPPORT_GFX_CGLS |
+ RADEON_CG_SUPPORT_GFX_CGTS |
+ RADEON_CG_SUPPORT_GFX_CGTS_LS |
+ RADEON_CG_SUPPORT_GFX_CP_LS |
+ RADEON_CG_SUPPORT_MC_LS |
+ RADEON_CG_SUPPORT_MC_MGCG |
+ RADEON_CG_SUPPORT_SDMA_MGCG |
+ RADEON_CG_SUPPORT_SDMA_LS |
+ RADEON_CG_SUPPORT_BIF_LS |
+ RADEON_CG_SUPPORT_VCE_MGCG |
+ RADEON_CG_SUPPORT_UVD_MGCG |
+ RADEON_CG_SUPPORT_HDP_LS |
+ RADEON_CG_SUPPORT_HDP_MGCG;
+ rdev->pg_flags = 0;
+ } else {
+ rdev->cg_flags =
+ RADEON_CG_SUPPORT_GFX_MGCG |
+ RADEON_CG_SUPPORT_GFX_MGLS |
+ /*RADEON_CG_SUPPORT_GFX_CGCG |*/
+ RADEON_CG_SUPPORT_GFX_CGLS |
+ RADEON_CG_SUPPORT_GFX_CGTS |
+ RADEON_CG_SUPPORT_GFX_CP_LS |
+ RADEON_CG_SUPPORT_MC_LS |
+ RADEON_CG_SUPPORT_MC_MGCG |
+ RADEON_CG_SUPPORT_SDMA_MGCG |
+ RADEON_CG_SUPPORT_SDMA_LS |
+ RADEON_CG_SUPPORT_BIF_LS |
+ RADEON_CG_SUPPORT_VCE_MGCG |
+ RADEON_CG_SUPPORT_UVD_MGCG |
+ RADEON_CG_SUPPORT_HDP_LS |
+ RADEON_CG_SUPPORT_HDP_MGCG;
+ rdev->pg_flags = 0;
+ }
break;
case CHIP_KAVERI:
case CHIP_KABINI:
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 70c29d5e080d..f2833ee3a613 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -581,17 +581,18 @@ int cayman_vm_init(struct radeon_device *rdev);
void cayman_vm_fini(struct radeon_device *rdev);
void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
uint32_t cayman_vm_page_flags(struct radeon_device *rdev, uint32_t flags);
-void cayman_vm_set_page(struct radeon_device *rdev,
- struct radeon_ib *ib,
- uint64_t pe,
- uint64_t addr, unsigned count,
- uint32_t incr, uint32_t flags);
int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
int evergreen_dma_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
void cayman_dma_ring_ib_execute(struct radeon_device *rdev,
struct radeon_ib *ib);
bool cayman_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
+void cayman_dma_vm_set_page(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags);
+
void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
int ni_dpm_init(struct radeon_device *rdev);
@@ -653,17 +654,17 @@ int si_irq_set(struct radeon_device *rdev);
int si_irq_process(struct radeon_device *rdev);
int si_vm_init(struct radeon_device *rdev);
void si_vm_fini(struct radeon_device *rdev);
-void si_vm_set_page(struct radeon_device *rdev,
- struct radeon_ib *ib,
- uint64_t pe,
- uint64_t addr, unsigned count,
- uint32_t incr, uint32_t flags);
void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
int si_copy_dma(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages,
struct radeon_fence **fence);
+void si_dma_vm_set_page(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags);
void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
u32 si_get_xclk(struct radeon_device *rdev);
uint64_t si_get_gpu_clock_counter(struct radeon_device *rdev);
@@ -705,6 +706,10 @@ int cik_copy_dma(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages,
struct radeon_fence **fence);
+int cik_copy_cpdma(struct radeon_device *rdev,
+ uint64_t src_offset, uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct radeon_fence **fence);
int cik_sdma_ring_test(struct radeon_device *rdev, struct radeon_ring *ring);
int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
bool cik_sdma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
@@ -731,11 +736,11 @@ int cik_irq_process(struct radeon_device *rdev);
int cik_vm_init(struct radeon_device *rdev);
void cik_vm_fini(struct radeon_device *rdev);
void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
-void cik_vm_set_page(struct radeon_device *rdev,
- struct radeon_ib *ib,
- uint64_t pe,
- uint64_t addr, unsigned count,
- uint32_t incr, uint32_t flags);
+void cik_sdma_vm_set_page(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags);
void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
int cik_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
u32 cik_compute_ring_get_rptr(struct radeon_device *rdev,
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
index d96070bf8388..6153ec18943a 100644
--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -59,6 +59,10 @@ struct atpx_mux {
u16 mux;
} __packed;
+bool radeon_is_px(void) {
+ return radeon_atpx_priv.atpx_detected;
+}
+
/**
* radeon_atpx_call - call an ATPX method
*
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
index 061b227dae0c..c155d6f3fa68 100644
--- a/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -499,7 +499,7 @@ static bool legacy_read_disabled_bios(struct radeon_device *rdev)
crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
fp2_gen_cntl = 0;
- if (rdev->ddev->pci_device == PCI_DEVICE_ID_ATI_RADEON_QY) {
+ if (rdev->ddev->pdev->device == PCI_DEVICE_ID_ATI_RADEON_QY) {
fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
}
@@ -536,7 +536,7 @@ static bool legacy_read_disabled_bios(struct radeon_device *rdev)
(RADEON_CRTC_SYNC_TRISTAT |
RADEON_CRTC_DISPLAY_DIS)));
- if (rdev->ddev->pci_device == PCI_DEVICE_ID_ATI_RADEON_QY) {
+ if (rdev->ddev->pdev->device == PCI_DEVICE_ID_ATI_RADEON_QY) {
WREG32(RADEON_FP2_GEN_CNTL, (fp2_gen_cntl & ~RADEON_FP2_ON));
}
@@ -554,7 +554,7 @@ static bool legacy_read_disabled_bios(struct radeon_device *rdev)
WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
}
WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl);
- if (rdev->ddev->pci_device == PCI_DEVICE_ID_ATI_RADEON_QY) {
+ if (rdev->ddev->pdev->device == PCI_DEVICE_ID_ATI_RADEON_QY) {
WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl);
}
return r;
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 64565732cb98..20a768ac89a8 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -31,6 +31,8 @@
#include "radeon.h"
#include "atom.h"
+#include <linux/pm_runtime.h>
+
extern void
radeon_combios_connected_scratch_regs(struct drm_connector *connector,
struct drm_encoder *encoder,
@@ -411,6 +413,21 @@ static int radeon_connector_set_property(struct drm_connector *connector, struct
}
}
+ if (property == rdev->mode_info.dither_property) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ /* need to find digital encoder on connector */
+ encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS);
+ if (!encoder)
+ return 0;
+
+ radeon_encoder = to_radeon_encoder(encoder);
+
+ if (radeon_connector->dither != val) {
+ radeon_connector->dither = val;
+ radeon_property_change_mode(&radeon_encoder->base);
+ }
+ }
+
if (property == rdev->mode_info.underscan_property) {
/* need to find digital encoder on connector */
encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS);
@@ -626,6 +643,11 @@ radeon_lvds_detect(struct drm_connector *connector, bool force)
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct drm_encoder *encoder = radeon_best_single_encoder(connector);
enum drm_connector_status ret = connector_status_disconnected;
+ int r;
+
+ r = pm_runtime_get_sync(connector->dev->dev);
+ if (r < 0)
+ return connector_status_disconnected;
if (encoder) {
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
@@ -651,6 +673,8 @@ radeon_lvds_detect(struct drm_connector *connector, bool force)
/* check acpi lid status ??? */
radeon_connector_update_scratch_regs(connector, ret);
+ pm_runtime_mark_last_busy(connector->dev->dev);
+ pm_runtime_put_autosuspend(connector->dev->dev);
return ret;
}
@@ -750,6 +774,11 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
struct drm_encoder_helper_funcs *encoder_funcs;
bool dret = false;
enum drm_connector_status ret = connector_status_disconnected;
+ int r;
+
+ r = pm_runtime_get_sync(connector->dev->dev);
+ if (r < 0)
+ return connector_status_disconnected;
encoder = radeon_best_single_encoder(connector);
if (!encoder)
@@ -790,9 +819,8 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
* detected a monitor via load.
*/
if (radeon_connector->detected_by_load)
- return connector->status;
- else
- return ret;
+ ret = connector->status;
+ goto out;
}
if (radeon_connector->dac_load_detect && encoder) {
@@ -817,6 +845,11 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
}
radeon_connector_update_scratch_regs(connector, ret);
+
+out:
+ pm_runtime_mark_last_busy(connector->dev->dev);
+ pm_runtime_put_autosuspend(connector->dev->dev);
+
return ret;
}
@@ -873,10 +906,15 @@ radeon_tv_detect(struct drm_connector *connector, bool force)
struct drm_encoder_helper_funcs *encoder_funcs;
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
enum drm_connector_status ret = connector_status_disconnected;
+ int r;
if (!radeon_connector->dac_load_detect)
return ret;
+ r = pm_runtime_get_sync(connector->dev->dev);
+ if (r < 0)
+ return connector_status_disconnected;
+
encoder = radeon_best_single_encoder(connector);
if (!encoder)
ret = connector_status_disconnected;
@@ -887,6 +925,8 @@ radeon_tv_detect(struct drm_connector *connector, bool force)
if (ret == connector_status_connected)
ret = radeon_connector_analog_encoder_conflict_solve(connector, encoder, ret, false);
radeon_connector_update_scratch_regs(connector, ret);
+ pm_runtime_mark_last_busy(connector->dev->dev);
+ pm_runtime_put_autosuspend(connector->dev->dev);
return ret;
}
@@ -954,12 +994,18 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
struct drm_encoder *encoder = NULL;
struct drm_encoder_helper_funcs *encoder_funcs;
struct drm_mode_object *obj;
- int i;
+ int i, r;
enum drm_connector_status ret = connector_status_disconnected;
bool dret = false, broken_edid = false;
- if (!force && radeon_check_hpd_status_unchanged(connector))
- return connector->status;
+ r = pm_runtime_get_sync(connector->dev->dev);
+ if (r < 0)
+ return connector_status_disconnected;
+
+ if (!force && radeon_check_hpd_status_unchanged(connector)) {
+ ret = connector->status;
+ goto exit;
+ }
if (radeon_connector->ddc_bus)
dret = radeon_ddc_probe(radeon_connector, false);
@@ -1110,6 +1156,11 @@ out:
/* updated in get modes as well since we need to know if it's analog or digital */
radeon_connector_update_scratch_regs(connector, ret);
+
+exit:
+ pm_runtime_mark_last_busy(connector->dev->dev);
+ pm_runtime_put_autosuspend(connector->dev->dev);
+
return ret;
}
@@ -1377,9 +1428,16 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
enum drm_connector_status ret = connector_status_disconnected;
struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
struct drm_encoder *encoder = radeon_best_single_encoder(connector);
+ int r;
- if (!force && radeon_check_hpd_status_unchanged(connector))
- return connector->status;
+ r = pm_runtime_get_sync(connector->dev->dev);
+ if (r < 0)
+ return connector_status_disconnected;
+
+ if (!force && radeon_check_hpd_status_unchanged(connector)) {
+ ret = connector->status;
+ goto out;
+ }
if (radeon_connector->edid) {
kfree(radeon_connector->edid);
@@ -1443,6 +1501,10 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
}
radeon_connector_update_scratch_regs(connector, ret);
+out:
+ pm_runtime_mark_last_busy(connector->dev->dev);
+ pm_runtime_put_autosuspend(connector->dev->dev);
+
return ret;
}
@@ -1658,12 +1720,16 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.underscan_vborder_property,
0);
+
+ drm_object_attach_property(&radeon_connector->base.base,
+ rdev->mode_info.dither_property,
+ RADEON_FMT_DITHER_DISABLE);
+
if (radeon_audio != 0)
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.audio_property,
- (radeon_audio == 1) ?
- RADEON_AUDIO_AUTO :
- RADEON_AUDIO_DISABLE);
+ RADEON_AUDIO_AUTO);
+
subpixel_order = SubPixelHorizontalRGB;
connector->interlace_allowed = true;
if (connector_type == DRM_MODE_CONNECTOR_HDMIB)
@@ -1760,9 +1826,12 @@ radeon_add_atom_connector(struct drm_device *dev,
if (ASIC_IS_DCE2(rdev) && (radeon_audio != 0)) {
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.audio_property,
- (radeon_audio == 1) ?
- RADEON_AUDIO_AUTO :
- RADEON_AUDIO_DISABLE);
+ RADEON_AUDIO_AUTO);
+ }
+ if (ASIC_IS_AVIVO(rdev)) {
+ drm_object_attach_property(&radeon_connector->base.base,
+ rdev->mode_info.dither_property,
+ RADEON_FMT_DITHER_DISABLE);
}
if (connector_type == DRM_MODE_CONNECTOR_DVII) {
radeon_connector->dac_load_detect = true;
@@ -1807,9 +1876,12 @@ radeon_add_atom_connector(struct drm_device *dev,
if (ASIC_IS_DCE2(rdev) && (radeon_audio != 0)) {
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.audio_property,
- (radeon_audio == 1) ?
- RADEON_AUDIO_AUTO :
- RADEON_AUDIO_DISABLE);
+ RADEON_AUDIO_AUTO);
+ }
+ if (ASIC_IS_AVIVO(rdev)) {
+ drm_object_attach_property(&radeon_connector->base.base,
+ rdev->mode_info.dither_property,
+ RADEON_FMT_DITHER_DISABLE);
}
subpixel_order = SubPixelHorizontalRGB;
connector->interlace_allowed = true;
@@ -1853,9 +1925,13 @@ radeon_add_atom_connector(struct drm_device *dev,
if (ASIC_IS_DCE2(rdev) && (radeon_audio != 0)) {
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.audio_property,
- (radeon_audio == 1) ?
- RADEON_AUDIO_AUTO :
- RADEON_AUDIO_DISABLE);
+ RADEON_AUDIO_AUTO);
+ }
+ if (ASIC_IS_AVIVO(rdev)) {
+ drm_object_attach_property(&radeon_connector->base.base,
+ rdev->mode_info.dither_property,
+ RADEON_FMT_DITHER_DISABLE);
+
}
connector->interlace_allowed = true;
/* in theory with a DP to VGA converter... */
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 80285e35bc65..26ca223d12d6 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -212,9 +212,7 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
return -EFAULT;
}
p->chunks[i].length_dw = user_chunk.length_dw;
- p->chunks[i].kdata = NULL;
p->chunks[i].chunk_id = user_chunk.chunk_id;
- p->chunks[i].user_ptr = (void __user *)(unsigned long)user_chunk.chunk_data;
if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) {
p->chunk_relocs_idx = i;
}
@@ -237,25 +235,31 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
return -EINVAL;
}
- cdata = (uint32_t *)(unsigned long)user_chunk.chunk_data;
- if ((p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) ||
- (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS)) {
- size = p->chunks[i].length_dw * sizeof(uint32_t);
- p->chunks[i].kdata = kmalloc(size, GFP_KERNEL);
- if (p->chunks[i].kdata == NULL) {
- return -ENOMEM;
- }
- if (DRM_COPY_FROM_USER(p->chunks[i].kdata,
- p->chunks[i].user_ptr, size)) {
- return -EFAULT;
- }
- if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
- p->cs_flags = p->chunks[i].kdata[0];
- if (p->chunks[i].length_dw > 1)
- ring = p->chunks[i].kdata[1];
- if (p->chunks[i].length_dw > 2)
- priority = (s32)p->chunks[i].kdata[2];
- }
+ size = p->chunks[i].length_dw;
+ cdata = (void __user *)(unsigned long)user_chunk.chunk_data;
+ p->chunks[i].user_ptr = cdata;
+ if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_CONST_IB)
+ continue;
+
+ if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) {
+ if (!p->rdev || !(p->rdev->flags & RADEON_IS_AGP))
+ continue;
+ }
+
+ p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t));
+ size *= sizeof(uint32_t);
+ if (p->chunks[i].kdata == NULL) {
+ return -ENOMEM;
+ }
+ if (DRM_COPY_FROM_USER(p->chunks[i].kdata, cdata, size)) {
+ return -EFAULT;
+ }
+ if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
+ p->cs_flags = p->chunks[i].kdata[0];
+ if (p->chunks[i].length_dw > 1)
+ ring = p->chunks[i].kdata[1];
+ if (p->chunks[i].length_dw > 2)
+ priority = (s32)p->chunks[i].kdata[2];
}
}
@@ -278,34 +282,6 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
}
}
- /* deal with non-vm */
- if ((p->chunk_ib_idx != -1) &&
- ((p->cs_flags & RADEON_CS_USE_VM) == 0) &&
- (p->chunks[p->chunk_ib_idx].chunk_id == RADEON_CHUNK_ID_IB)) {
- if (p->chunks[p->chunk_ib_idx].length_dw > (16 * 1024)) {
- DRM_ERROR("cs IB too big: %d\n",
- p->chunks[p->chunk_ib_idx].length_dw);
- return -EINVAL;
- }
- if (p->rdev && (p->rdev->flags & RADEON_IS_AGP)) {
- p->chunks[p->chunk_ib_idx].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL);
- p->chunks[p->chunk_ib_idx].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL);
- if (p->chunks[p->chunk_ib_idx].kpage[0] == NULL ||
- p->chunks[p->chunk_ib_idx].kpage[1] == NULL) {
- kfree(p->chunks[p->chunk_ib_idx].kpage[0]);
- kfree(p->chunks[p->chunk_ib_idx].kpage[1]);
- p->chunks[p->chunk_ib_idx].kpage[0] = NULL;
- p->chunks[p->chunk_ib_idx].kpage[1] = NULL;
- return -ENOMEM;
- }
- }
- p->chunks[p->chunk_ib_idx].kpage_idx[0] = -1;
- p->chunks[p->chunk_ib_idx].kpage_idx[1] = -1;
- p->chunks[p->chunk_ib_idx].last_copied_page = -1;
- p->chunks[p->chunk_ib_idx].last_page_index =
- ((p->chunks[p->chunk_ib_idx].length_dw * 4) - 1) / PAGE_SIZE;
- }
-
return 0;
}
@@ -339,13 +315,8 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bo
kfree(parser->track);
kfree(parser->relocs);
kfree(parser->relocs_ptr);
- for (i = 0; i < parser->nchunks; i++) {
- kfree(parser->chunks[i].kdata);
- if ((parser->rdev->flags & RADEON_IS_AGP)) {
- kfree(parser->chunks[i].kpage[0]);
- kfree(parser->chunks[i].kpage[1]);
- }
- }
+ for (i = 0; i < parser->nchunks; i++)
+ drm_free_large(parser->chunks[i].kdata);
kfree(parser->chunks);
kfree(parser->chunks_array);
radeon_ib_free(parser->rdev, &parser->ib);
@@ -355,7 +326,6 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bo
static int radeon_cs_ib_chunk(struct radeon_device *rdev,
struct radeon_cs_parser *parser)
{
- struct radeon_cs_chunk *ib_chunk;
int r;
if (parser->chunk_ib_idx == -1)
@@ -364,28 +334,11 @@ static int radeon_cs_ib_chunk(struct radeon_device *rdev,
if (parser->cs_flags & RADEON_CS_USE_VM)
return 0;
- ib_chunk = &parser->chunks[parser->chunk_ib_idx];
- /* Copy the packet into the IB, the parser will read from the
- * input memory (cached) and write to the IB (which can be
- * uncached).
- */
- r = radeon_ib_get(rdev, parser->ring, &parser->ib,
- NULL, ib_chunk->length_dw * 4);
- if (r) {
- DRM_ERROR("Failed to get ib !\n");
- return r;
- }
- parser->ib.length_dw = ib_chunk->length_dw;
r = radeon_cs_parse(rdev, parser->ring, parser);
if (r || parser->parser_error) {
DRM_ERROR("Invalid command stream !\n");
return r;
}
- r = radeon_cs_finish_pages(parser);
- if (r) {
- DRM_ERROR("Invalid command stream !\n");
- return r;
- }
if (parser->ring == R600_RING_TYPE_UVD_INDEX)
radeon_uvd_note_usage(rdev);
@@ -423,7 +376,6 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *parser,
static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
struct radeon_cs_parser *parser)
{
- struct radeon_cs_chunk *ib_chunk;
struct radeon_fpriv *fpriv = parser->filp->driver_priv;
struct radeon_vm *vm = &fpriv->vm;
int r;
@@ -433,49 +385,13 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
if ((parser->cs_flags & RADEON_CS_USE_VM) == 0)
return 0;
- if ((rdev->family >= CHIP_TAHITI) &&
- (parser->chunk_const_ib_idx != -1)) {
- ib_chunk = &parser->chunks[parser->chunk_const_ib_idx];
- if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
- DRM_ERROR("cs IB CONST too big: %d\n", ib_chunk->length_dw);
- return -EINVAL;
- }
- r = radeon_ib_get(rdev, parser->ring, &parser->const_ib,
- vm, ib_chunk->length_dw * 4);
- if (r) {
- DRM_ERROR("Failed to get const ib !\n");
- return r;
- }
- parser->const_ib.is_const_ib = true;
- parser->const_ib.length_dw = ib_chunk->length_dw;
- /* Copy the packet into the IB */
- if (DRM_COPY_FROM_USER(parser->const_ib.ptr, ib_chunk->user_ptr,
- ib_chunk->length_dw * 4)) {
- return -EFAULT;
- }
+ if (parser->const_ib.length_dw) {
r = radeon_ring_ib_parse(rdev, parser->ring, &parser->const_ib);
if (r) {
return r;
}
}
- ib_chunk = &parser->chunks[parser->chunk_ib_idx];
- if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
- DRM_ERROR("cs IB too big: %d\n", ib_chunk->length_dw);
- return -EINVAL;
- }
- r = radeon_ib_get(rdev, parser->ring, &parser->ib,
- vm, ib_chunk->length_dw * 4);
- if (r) {
- DRM_ERROR("Failed to get ib !\n");
- return r;
- }
- parser->ib.length_dw = ib_chunk->length_dw;
- /* Copy the packet into the IB */
- if (DRM_COPY_FROM_USER(parser->ib.ptr, ib_chunk->user_ptr,
- ib_chunk->length_dw * 4)) {
- return -EFAULT;
- }
r = radeon_ring_ib_parse(rdev, parser->ring, &parser->ib);
if (r) {
return r;
@@ -527,6 +443,62 @@ static int radeon_cs_handle_lockup(struct radeon_device *rdev, int r)
return r;
}
+static int radeon_cs_ib_fill(struct radeon_device *rdev, struct radeon_cs_parser *parser)
+{
+ struct radeon_cs_chunk *ib_chunk;
+ struct radeon_vm *vm = NULL;
+ int r;
+
+ if (parser->chunk_ib_idx == -1)
+ return 0;
+
+ if (parser->cs_flags & RADEON_CS_USE_VM) {
+ struct radeon_fpriv *fpriv = parser->filp->driver_priv;
+ vm = &fpriv->vm;
+
+ if ((rdev->family >= CHIP_TAHITI) &&
+ (parser->chunk_const_ib_idx != -1)) {
+ ib_chunk = &parser->chunks[parser->chunk_const_ib_idx];
+ if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
+ DRM_ERROR("cs IB CONST too big: %d\n", ib_chunk->length_dw);
+ return -EINVAL;
+ }
+ r = radeon_ib_get(rdev, parser->ring, &parser->const_ib,
+ vm, ib_chunk->length_dw * 4);
+ if (r) {
+ DRM_ERROR("Failed to get const ib !\n");
+ return r;
+ }
+ parser->const_ib.is_const_ib = true;
+ parser->const_ib.length_dw = ib_chunk->length_dw;
+ if (DRM_COPY_FROM_USER(parser->const_ib.ptr,
+ ib_chunk->user_ptr,
+ ib_chunk->length_dw * 4))
+ return -EFAULT;
+ }
+
+ ib_chunk = &parser->chunks[parser->chunk_ib_idx];
+ if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
+ DRM_ERROR("cs IB too big: %d\n", ib_chunk->length_dw);
+ return -EINVAL;
+ }
+ }
+ ib_chunk = &parser->chunks[parser->chunk_ib_idx];
+
+ r = radeon_ib_get(rdev, parser->ring, &parser->ib,
+ vm, ib_chunk->length_dw * 4);
+ if (r) {
+ DRM_ERROR("Failed to get ib !\n");
+ return r;
+ }
+ parser->ib.length_dw = ib_chunk->length_dw;
+ if (ib_chunk->kdata)
+ memcpy(parser->ib.ptr, ib_chunk->kdata, ib_chunk->length_dw * 4);
+ else if (DRM_COPY_FROM_USER(parser->ib.ptr, ib_chunk->user_ptr, ib_chunk->length_dw * 4))
+ return -EFAULT;
+ return 0;
+}
+
int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
{
struct radeon_device *rdev = dev->dev_private;
@@ -552,10 +524,15 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
r = radeon_cs_handle_lockup(rdev, r);
return r;
}
- r = radeon_cs_parser_relocs(&parser);
- if (r) {
- if (r != -ERESTARTSYS)
+
+ r = radeon_cs_ib_fill(rdev, &parser);
+ if (!r) {
+ r = radeon_cs_parser_relocs(&parser);
+ if (r && r != -ERESTARTSYS)
DRM_ERROR("Failed to parse relocation %d!\n", r);
+ }
+
+ if (r) {
radeon_cs_parser_fini(&parser, r, false);
up_read(&rdev->exclusive_lock);
r = radeon_cs_handle_lockup(rdev, r);
@@ -579,97 +556,6 @@ out:
return r;
}
-int radeon_cs_finish_pages(struct radeon_cs_parser *p)
-{
- struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
- int i;
- int size = PAGE_SIZE;
-
- for (i = ibc->last_copied_page + 1; i <= ibc->last_page_index; i++) {
- if (i == ibc->last_page_index) {
- size = (ibc->length_dw * 4) % PAGE_SIZE;
- if (size == 0)
- size = PAGE_SIZE;
- }
-
- if (DRM_COPY_FROM_USER(p->ib.ptr + (i * (PAGE_SIZE/4)),
- ibc->user_ptr + (i * PAGE_SIZE),
- size))
- return -EFAULT;
- }
- return 0;
-}
-
-static int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx)
-{
- int new_page;
- struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
- int i;
- int size = PAGE_SIZE;
- bool copy1 = (p->rdev && (p->rdev->flags & RADEON_IS_AGP)) ?
- false : true;
-
- for (i = ibc->last_copied_page + 1; i < pg_idx; i++) {
- if (DRM_COPY_FROM_USER(p->ib.ptr + (i * (PAGE_SIZE/4)),
- ibc->user_ptr + (i * PAGE_SIZE),
- PAGE_SIZE)) {
- p->parser_error = -EFAULT;
- return 0;
- }
- }
-
- if (pg_idx == ibc->last_page_index) {
- size = (ibc->length_dw * 4) % PAGE_SIZE;
- if (size == 0)
- size = PAGE_SIZE;
- }
-
- new_page = ibc->kpage_idx[0] < ibc->kpage_idx[1] ? 0 : 1;
- if (copy1)
- ibc->kpage[new_page] = p->ib.ptr + (pg_idx * (PAGE_SIZE / 4));
-
- if (DRM_COPY_FROM_USER(ibc->kpage[new_page],
- ibc->user_ptr + (pg_idx * PAGE_SIZE),
- size)) {
- p->parser_error = -EFAULT;
- return 0;
- }
-
- /* copy to IB for non single case */
- if (!copy1)
- memcpy((void *)(p->ib.ptr+(pg_idx*(PAGE_SIZE/4))), ibc->kpage[new_page], size);
-
- ibc->last_copied_page = pg_idx;
- ibc->kpage_idx[new_page] = pg_idx;
-
- return new_page;
-}
-
-u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
-{
- struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
- u32 pg_idx, pg_offset;
- u32 idx_value = 0;
- int new_page;
-
- pg_idx = (idx * 4) / PAGE_SIZE;
- pg_offset = (idx * 4) % PAGE_SIZE;
-
- if (ibc->kpage_idx[0] == pg_idx)
- return ibc->kpage[0][pg_offset/4];
- if (ibc->kpage_idx[1] == pg_idx)
- return ibc->kpage[1][pg_offset/4];
-
- new_page = radeon_cs_update_pages(p, pg_idx);
- if (new_page < 0) {
- p->parser_error = new_page;
- return 0;
- }
-
- idx_value = ibc->kpage[new_page][pg_offset/4];
- return idx_value;
-}
-
/**
* radeon_cs_packet_parse() - parse cp packet and point ib index to next packet
* @parser: parser structure holding parsing context.
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 841d0e09be3e..b9234c43f43d 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -98,9 +98,16 @@ static const char radeon_family_name[][16] = {
"BONAIRE",
"KAVERI",
"KABINI",
+ "HAWAII",
"LAST",
};
+#if defined(CONFIG_VGA_SWITCHEROO)
+bool radeon_is_px(void);
+#else
+static inline bool radeon_is_px(void) { return false; }
+#endif
+
/**
* radeon_program_register_sequence - program an array of registers.
*
@@ -1076,7 +1083,10 @@ static bool radeon_switcheroo_quirk_long_wakeup(struct pci_dev *pdev)
static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
{
struct drm_device *dev = pci_get_drvdata(pdev);
- pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
+
+ if (radeon_is_px() && state == VGA_SWITCHEROO_OFF)
+ return;
+
if (state == VGA_SWITCHEROO_ON) {
unsigned d3_delay = dev->pdev->d3_delay;
@@ -1087,7 +1097,7 @@ static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero
if (d3_delay < 20 && radeon_switcheroo_quirk_long_wakeup(pdev))
dev->pdev->d3_delay = 20;
- radeon_resume_kms(dev);
+ radeon_resume_kms(dev, true, true);
dev->pdev->d3_delay = d3_delay;
@@ -1097,7 +1107,7 @@ static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero
printk(KERN_INFO "radeon: switched off\n");
drm_kms_helper_poll_disable(dev);
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
- radeon_suspend_kms(dev, pmm);
+ radeon_suspend_kms(dev, true, true);
dev->switch_power_state = DRM_SWITCH_POWER_OFF;
}
}
@@ -1147,6 +1157,7 @@ int radeon_device_init(struct radeon_device *rdev,
{
int r, i;
int dma_bits;
+ bool runtime = false;
rdev->shutdown = false;
rdev->dev = &pdev->dev;
@@ -1293,7 +1304,14 @@ int radeon_device_init(struct radeon_device *rdev,
/* this will fail for cards that aren't VGA class devices, just
* ignore it */
vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
- vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops, false);
+
+ if (radeon_runtime_pm == 1)
+ runtime = true;
+ if ((radeon_runtime_pm == -1) && radeon_is_px())
+ runtime = true;
+ vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops, runtime);
+ if (runtime)
+ vga_switcheroo_init_domain_pm_ops(rdev->dev, &rdev->vga_pm_domain);
r = radeon_init(rdev);
if (r)
@@ -1383,7 +1401,7 @@ void radeon_device_fini(struct radeon_device *rdev)
* Returns 0 for success or an error on failure.
* Called at driver suspend.
*/
-int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
+int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
{
struct radeon_device *rdev;
struct drm_crtc *crtc;
@@ -1394,9 +1412,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
if (dev == NULL || dev->dev_private == NULL) {
return -ENODEV;
}
- if (state.event == PM_EVENT_PRETHAW) {
- return 0;
- }
+
rdev = dev->dev_private;
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
@@ -1455,14 +1471,17 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
radeon_agp_suspend(rdev);
pci_save_state(dev->pdev);
- if (state.event == PM_EVENT_SUSPEND) {
+ if (suspend) {
/* Shut down the device */
pci_disable_device(dev->pdev);
pci_set_power_state(dev->pdev, PCI_D3hot);
}
- console_lock();
- radeon_fbdev_set_suspend(rdev, 1);
- console_unlock();
+
+ if (fbcon) {
+ console_lock();
+ radeon_fbdev_set_suspend(rdev, 1);
+ console_unlock();
+ }
return 0;
}
@@ -1475,7 +1494,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
* Returns 0 for success or an error on failure.
* Called at driver resume.
*/
-int radeon_resume_kms(struct drm_device *dev)
+int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
{
struct drm_connector *connector;
struct radeon_device *rdev = dev->dev_private;
@@ -1484,12 +1503,17 @@ int radeon_resume_kms(struct drm_device *dev)
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
- console_lock();
- pci_set_power_state(dev->pdev, PCI_D0);
- pci_restore_state(dev->pdev);
- if (pci_enable_device(dev->pdev)) {
- console_unlock();
- return -1;
+ if (fbcon) {
+ console_lock();
+ }
+ if (resume) {
+ pci_set_power_state(dev->pdev, PCI_D0);
+ pci_restore_state(dev->pdev);
+ if (pci_enable_device(dev->pdev)) {
+ if (fbcon)
+ console_unlock();
+ return -1;
+ }
}
/* resume AGP if in use */
radeon_agp_resume(rdev);
@@ -1502,9 +1526,11 @@ int radeon_resume_kms(struct drm_device *dev)
radeon_pm_resume(rdev);
radeon_restore_bios_scratch_regs(rdev);
- radeon_fbdev_set_suspend(rdev, 0);
- console_unlock();
-
+ if (fbcon) {
+ radeon_fbdev_set_suspend(rdev, 0);
+ console_unlock();
+ }
+
/* init dig PHYs, disp eng pll */
if (rdev->is_atom_bios) {
radeon_atom_encoder_init(rdev);
@@ -1549,6 +1575,14 @@ int radeon_gpu_reset(struct radeon_device *rdev)
int resched;
down_write(&rdev->exclusive_lock);
+
+ if (!rdev->needs_reset) {
+ up_write(&rdev->exclusive_lock);
+ return 0;
+ }
+
+ rdev->needs_reset = false;
+
radeon_save_bios_scratch_regs(rdev);
/* block TTM */
resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 0d1aa050d41d..7b253815a323 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -30,6 +30,7 @@
#include "atom.h"
#include <asm/div64.h>
+#include <linux/pm_runtime.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
@@ -306,7 +307,7 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
*/
if (update_pending &&
(DRM_SCANOUTPOS_VALID & radeon_get_crtc_scanoutpos(rdev->ddev, crtc_id,
- &vpos, &hpos)) &&
+ &vpos, &hpos, NULL, NULL)) &&
((vpos >= (99 * rdev->mode_info.crtcs[crtc_id]->base.hwmode.crtc_vdisplay)/100) ||
(vpos < 0 && !ASIC_IS_AVIVO(rdev)))) {
/* crtc didn't flip in this target vblank interval,
@@ -494,11 +495,55 @@ unlock_free:
return r;
}
+static int
+radeon_crtc_set_config(struct drm_mode_set *set)
+{
+ struct drm_device *dev;
+ struct radeon_device *rdev;
+ struct drm_crtc *crtc;
+ bool active = false;
+ int ret;
+
+ if (!set || !set->crtc)
+ return -EINVAL;
+
+ dev = set->crtc->dev;
+
+ ret = pm_runtime_get_sync(dev->dev);
+ if (ret < 0)
+ return ret;
+
+ ret = drm_crtc_helper_set_config(set);
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+ if (crtc->enabled)
+ active = true;
+
+ pm_runtime_mark_last_busy(dev->dev);
+
+ rdev = dev->dev_private;
+ /* if we have active crtcs and we don't have a power ref,
+ take the current one */
+ if (active && !rdev->have_disp_power_ref) {
+ rdev->have_disp_power_ref = true;
+ return ret;
+ }
+ /* if we have no active crtcs, then drop the power ref
+ we got before */
+ if (!active && rdev->have_disp_power_ref) {
+ pm_runtime_put_autosuspend(dev->dev);
+ rdev->have_disp_power_ref = false;
+ }
+
+ /* drop the power reference we got coming in here */
+ pm_runtime_put_autosuspend(dev->dev);
+ return ret;
+}
static const struct drm_crtc_funcs radeon_crtc_funcs = {
.cursor_set = radeon_crtc_cursor_set,
.cursor_move = radeon_crtc_cursor_move,
.gamma_set = radeon_crtc_gamma_set,
- .set_config = drm_crtc_helper_set_config,
+ .set_config = radeon_crtc_set_config,
.destroy = radeon_crtc_destroy,
.page_flip = radeon_crtc_page_flip,
};
@@ -1178,6 +1223,12 @@ static struct drm_prop_enum_list radeon_audio_enum_list[] =
{ RADEON_AUDIO_AUTO, "auto" },
};
+/* XXX support different dither options? spatial, temporal, both, etc. */
+static struct drm_prop_enum_list radeon_dither_enum_list[] =
+{ { RADEON_FMT_DITHER_DISABLE, "off" },
+ { RADEON_FMT_DITHER_ENABLE, "on" },
+};
+
static int radeon_modeset_create_props(struct radeon_device *rdev)
{
int sz;
@@ -1234,6 +1285,12 @@ static int radeon_modeset_create_props(struct radeon_device *rdev)
"audio",
radeon_audio_enum_list, sz);
+ sz = ARRAY_SIZE(radeon_dither_enum_list);
+ rdev->mode_info.dither_property =
+ drm_property_create_enum(rdev->ddev, 0,
+ "dither",
+ radeon_dither_enum_list, sz);
+
return 0;
}
@@ -1539,12 +1596,17 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
}
/*
- * Retrieve current video scanout position of crtc on a given gpu.
+ * Retrieve current video scanout position of crtc on a given gpu, and
+ * an optional accurate timestamp of when query happened.
*
* \param dev Device to query.
* \param crtc Crtc to query.
* \param *vpos Location where vertical scanout position should be stored.
* \param *hpos Location where horizontal scanout position should go.
+ * \param *stime Target location for timestamp taken immediately before
+ * scanout position query. Can be NULL to skip timestamp.
+ * \param *etime Target location for timestamp taken immediately after
+ * scanout position query. Can be NULL to skip timestamp.
*
* Returns vpos as a positive number while in active scanout area.
* Returns vpos as a negative number inside vblank, counting the number
@@ -1560,7 +1622,8 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
* unknown small number of scanlines wrt. real scanout position.
*
*/
-int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, int *vpos, int *hpos)
+int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, int *vpos, int *hpos,
+ ktime_t *stime, ktime_t *etime)
{
u32 stat_crtc = 0, vbl = 0, position = 0;
int vbl_start, vbl_end, vtotal, ret = 0;
@@ -1568,6 +1631,12 @@ int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, int *vpos, int
struct radeon_device *rdev = dev->dev_private;
+ /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
+
+ /* Get optional system timestamp before query. */
+ if (stime)
+ *stime = ktime_get();
+
if (ASIC_IS_DCE4(rdev)) {
if (crtc == 0) {
vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
@@ -1650,6 +1719,12 @@ int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, int *vpos, int
}
}
+ /* Get optional system timestamp after query. */
+ if (etime)
+ *etime = ktime_get();
+
+ /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
+
/* Decode into vertical and horizontal scanout position. */
*vpos = position & 0x1fff;
*hpos = (position >> 16) & 0x1fff;
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 9c14a1ba1de4..1aee32213f66 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -36,8 +36,9 @@
#include <drm/drm_pciids.h>
#include <linux/console.h>
#include <linux/module.h>
-
-
+#include <linux/pm_runtime.h>
+#include <linux/vga_switcheroo.h>
+#include "drm_crtc_helper.h"
/*
* KMS wrapper.
* - 2.0.0 - initial interface
@@ -87,8 +88,8 @@ void radeon_driver_postclose_kms(struct drm_device *dev,
struct drm_file *file_priv);
void radeon_driver_preclose_kms(struct drm_device *dev,
struct drm_file *file_priv);
-int radeon_suspend_kms(struct drm_device *dev, pm_message_t state);
-int radeon_resume_kms(struct drm_device *dev);
+int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon);
+int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon);
u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc);
int radeon_enable_vblank_kms(struct drm_device *dev, int crtc);
void radeon_disable_vblank_kms(struct drm_device *dev, int crtc);
@@ -100,14 +101,14 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev);
int radeon_driver_irq_postinstall_kms(struct drm_device *dev);
void radeon_driver_irq_uninstall_kms(struct drm_device *dev);
irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS);
-int radeon_gem_object_init(struct drm_gem_object *obj);
void radeon_gem_object_free(struct drm_gem_object *obj);
int radeon_gem_object_open(struct drm_gem_object *obj,
struct drm_file *file_priv);
void radeon_gem_object_close(struct drm_gem_object *obj,
struct drm_file *file_priv);
extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc,
- int *vpos, int *hpos);
+ int *vpos, int *hpos, ktime_t *stime,
+ ktime_t *etime);
extern const struct drm_ioctl_desc radeon_ioctls_kms[];
extern int radeon_max_kms_ioctl;
int radeon_mmap(struct file *filp, struct vm_area_struct *vma);
@@ -137,9 +138,11 @@ void radeon_debugfs_cleanup(struct drm_minor *minor);
#if defined(CONFIG_VGA_SWITCHEROO)
void radeon_register_atpx_handler(void);
void radeon_unregister_atpx_handler(void);
+bool radeon_is_px(void);
#else
static inline void radeon_register_atpx_handler(void) {}
static inline void radeon_unregister_atpx_handler(void) {}
+static inline bool radeon_is_px(void) { return false; }
#endif
int radeon_no_wb;
@@ -162,6 +165,7 @@ int radeon_lockup_timeout = 10000;
int radeon_fastfb = 0;
int radeon_dpm = -1;
int radeon_aspm = -1;
+int radeon_runtime_pm = -1;
MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
module_param_named(no_wb, radeon_no_wb, int, 0444);
@@ -223,6 +227,9 @@ module_param_named(dpm, radeon_dpm, int, 0444);
MODULE_PARM_DESC(aspm, "ASPM support (1 = enable, 0 = disable, -1 = auto)");
module_param_named(aspm, radeon_aspm, int, 0444);
+MODULE_PARM_DESC(runpm, "PX runtime pm (1 = force enable, 0 = disable, -1 = PX only default)");
+module_param_named(runpm, radeon_runtime_pm, int, 0444);
+
static struct pci_device_id pciidlist[] = {
radeon_PCI_IDS
};
@@ -259,6 +266,7 @@ static int radeon_resume(struct drm_device *dev)
return 0;
}
+
static const struct file_operations radeon_driver_old_fops = {
.owner = THIS_MODULE,
.open = drm_open,
@@ -353,25 +361,144 @@ radeon_pci_remove(struct pci_dev *pdev)
drm_put_dev(dev);
}
-static int
-radeon_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+static int radeon_pmops_suspend(struct device *dev)
{
- struct drm_device *dev = pci_get_drvdata(pdev);
- return radeon_suspend_kms(dev, state);
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ return radeon_suspend_kms(drm_dev, true, true);
}
-static int
-radeon_pci_resume(struct pci_dev *pdev)
+static int radeon_pmops_resume(struct device *dev)
{
- struct drm_device *dev = pci_get_drvdata(pdev);
- return radeon_resume_kms(dev);
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ return radeon_resume_kms(drm_dev, true, true);
+}
+
+static int radeon_pmops_freeze(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ return radeon_suspend_kms(drm_dev, false, true);
+}
+
+static int radeon_pmops_thaw(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ return radeon_resume_kms(drm_dev, false, true);
+}
+
+static int radeon_pmops_runtime_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ int ret;
+
+ if (radeon_runtime_pm == 0)
+ return -EINVAL;
+
+ drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
+ drm_kms_helper_poll_disable(drm_dev);
+ vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF);
+
+ ret = radeon_suspend_kms(drm_dev, false, false);
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, PCI_D3cold);
+ drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF;
+
+ return 0;
+}
+
+static int radeon_pmops_runtime_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ int ret;
+
+ if (radeon_runtime_pm == 0)
+ return -EINVAL;
+
+ drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ ret = pci_enable_device(pdev);
+ if (ret)
+ return ret;
+ pci_set_master(pdev);
+
+ ret = radeon_resume_kms(drm_dev, false, false);
+ drm_kms_helper_poll_enable(drm_dev);
+ vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON);
+ drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
+ return 0;
}
+static int radeon_pmops_runtime_idle(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ struct drm_crtc *crtc;
+
+ if (radeon_runtime_pm == 0)
+ return -EBUSY;
+
+ /* are we PX enabled? */
+ if (radeon_runtime_pm == -1 && !radeon_is_px()) {
+ DRM_DEBUG_DRIVER("failing to power off - not px\n");
+ return -EBUSY;
+ }
+
+ list_for_each_entry(crtc, &drm_dev->mode_config.crtc_list, head) {
+ if (crtc->enabled) {
+ DRM_DEBUG_DRIVER("failing to power off - crtc active\n");
+ return -EBUSY;
+ }
+ }
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_autosuspend(dev);
+ /* we don't want the main rpm_idle to call suspend - we want to autosuspend */
+ return 1;
+}
+
+long radeon_drm_ioctl(struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ struct drm_file *file_priv = filp->private_data;
+ struct drm_device *dev;
+ long ret;
+ dev = file_priv->minor->dev;
+ ret = pm_runtime_get_sync(dev->dev);
+ if (ret < 0)
+ return ret;
+
+ ret = drm_ioctl(filp, cmd, arg);
+
+ pm_runtime_mark_last_busy(dev->dev);
+ pm_runtime_put_autosuspend(dev->dev);
+ return ret;
+}
+
+static const struct dev_pm_ops radeon_pm_ops = {
+ .suspend = radeon_pmops_suspend,
+ .resume = radeon_pmops_resume,
+ .freeze = radeon_pmops_freeze,
+ .thaw = radeon_pmops_thaw,
+ .poweroff = radeon_pmops_freeze,
+ .restore = radeon_pmops_resume,
+ .runtime_suspend = radeon_pmops_runtime_suspend,
+ .runtime_resume = radeon_pmops_runtime_resume,
+ .runtime_idle = radeon_pmops_runtime_idle,
+};
+
static const struct file_operations radeon_driver_kms_fops = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
- .unlocked_ioctl = drm_ioctl,
+ .unlocked_ioctl = radeon_drm_ioctl,
.mmap = radeon_mmap,
.poll = drm_poll,
.read = drm_read,
@@ -380,6 +507,15 @@ static const struct file_operations radeon_driver_kms_fops = {
#endif
};
+
+static void
+radeon_pci_shutdown(struct pci_dev *pdev)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+
+ radeon_driver_unload_kms(dev);
+}
+
static struct drm_driver kms_driver = {
.driver_features =
DRIVER_USE_AGP |
@@ -392,8 +528,6 @@ static struct drm_driver kms_driver = {
.postclose = radeon_driver_postclose_kms,
.lastclose = radeon_driver_lastclose_kms,
.unload = radeon_driver_unload_kms,
- .suspend = radeon_suspend_kms,
- .resume = radeon_resume_kms,
.get_vblank_counter = radeon_get_vblank_counter_kms,
.enable_vblank = radeon_enable_vblank_kms,
.disable_vblank = radeon_disable_vblank_kms,
@@ -408,7 +542,6 @@ static struct drm_driver kms_driver = {
.irq_uninstall = radeon_driver_irq_uninstall_kms,
.irq_handler = radeon_driver_irq_handler_kms,
.ioctls = radeon_ioctls_kms,
- .gem_init_object = radeon_gem_object_init,
.gem_free_object = radeon_gem_object_free,
.gem_open_object = radeon_gem_object_open,
.gem_close_object = radeon_gem_object_close,
@@ -451,8 +584,8 @@ static struct pci_driver radeon_kms_pci_driver = {
.id_table = pciidlist,
.probe = radeon_pci_probe,
.remove = radeon_pci_remove,
- .suspend = radeon_pci_suspend,
- .resume = radeon_pci_resume,
+ .driver.pm = &radeon_pm_ops,
+ .shutdown = radeon_pci_shutdown,
};
static int __init radeon_init(void)
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index b369d42f7de5..543dcfae7e6f 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -113,6 +113,9 @@
#define DRIVER_MINOR 33
#define DRIVER_PATCHLEVEL 0
+long radeon_drm_ioctl(struct file *filp,
+ unsigned int cmd, unsigned long arg);
+
/* The rest of the file is DEPRECATED! */
#ifdef CONFIG_DRM_RADEON_UMS
diff --git a/drivers/gpu/drm/radeon/radeon_family.h b/drivers/gpu/drm/radeon/radeon_family.h
index 3c8289083f9d..614ad549297f 100644
--- a/drivers/gpu/drm/radeon/radeon_family.h
+++ b/drivers/gpu/drm/radeon/radeon_family.h
@@ -96,6 +96,7 @@ enum radeon_family {
CHIP_BONAIRE,
CHIP_KAVERI,
CHIP_KABINI,
+ CHIP_HAWAII,
CHIP_LAST,
};
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index ddb8f8e04eb5..281d14c22a47 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -190,10 +190,8 @@ void radeon_fence_process(struct radeon_device *rdev, int ring)
}
} while (atomic64_xchg(&rdev->fence_drv[ring].last_seq, seq) > seq);
- if (wake) {
- rdev->fence_drv[ring].last_activity = jiffies;
+ if (wake)
wake_up_all(&rdev->fence_queue);
- }
}
/**
@@ -212,13 +210,13 @@ static void radeon_fence_destroy(struct kref *kref)
}
/**
- * radeon_fence_seq_signaled - check if a fence sequeuce number has signaled
+ * radeon_fence_seq_signaled - check if a fence sequence number has signaled
*
* @rdev: radeon device pointer
* @seq: sequence number
* @ring: ring index the fence is associated with
*
- * Check if the last singled fence sequnce number is >= the requested
+ * Check if the last signaled fence sequnce number is >= the requested
* sequence number (all asics).
* Returns true if the fence has signaled (current fence value
* is >= requested value) or false if it has not (current fence
@@ -263,113 +261,131 @@ bool radeon_fence_signaled(struct radeon_fence *fence)
}
/**
- * radeon_fence_wait_seq - wait for a specific sequence number
+ * radeon_fence_any_seq_signaled - check if any sequence number is signaled
*
* @rdev: radeon device pointer
- * @target_seq: sequence number we want to wait for
- * @ring: ring index the fence is associated with
+ * @seq: sequence numbers
+ *
+ * Check if the last signaled fence sequnce number is >= the requested
+ * sequence number (all asics).
+ * Returns true if any has signaled (current value is >= requested value)
+ * or false if it has not. Helper function for radeon_fence_wait_seq.
+ */
+static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq)
+{
+ unsigned i;
+
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ if (seq[i] && radeon_fence_seq_signaled(rdev, seq[i], i))
+ return true;
+ }
+ return false;
+}
+
+/**
+ * radeon_fence_wait_seq - wait for a specific sequence numbers
+ *
+ * @rdev: radeon device pointer
+ * @target_seq: sequence number(s) we want to wait for
* @intr: use interruptable sleep
* @lock_ring: whether the ring should be locked or not
*
- * Wait for the requested sequence number to be written (all asics).
+ * Wait for the requested sequence number(s) to be written by any ring
+ * (all asics). Sequnce number array is indexed by ring id.
* @intr selects whether to use interruptable (true) or non-interruptable
* (false) sleep when waiting for the sequence number. Helper function
- * for radeon_fence_wait(), et al.
+ * for radeon_fence_wait_*().
* Returns 0 if the sequence number has passed, error for all other cases.
- * -EDEADLK is returned when a GPU lockup has been detected and the ring is
- * marked as not ready so no further jobs get scheduled until a successful
- * reset.
+ * -EDEADLK is returned when a GPU lockup has been detected.
*/
-static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 target_seq,
- unsigned ring, bool intr, bool lock_ring)
+static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 *target_seq,
+ bool intr, bool lock_ring)
{
- unsigned long timeout, last_activity;
- uint64_t seq;
- unsigned i;
+ uint64_t last_seq[RADEON_NUM_RINGS];
bool signaled;
- int r;
+ int i, r;
+
+ while (!radeon_fence_any_seq_signaled(rdev, target_seq)) {
+
+ /* Save current sequence values, used to check for GPU lockups */
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ if (!target_seq[i])
+ continue;
- while (target_seq > atomic64_read(&rdev->fence_drv[ring].last_seq)) {
- if (!rdev->ring[ring].ready) {
- return -EBUSY;
+ last_seq[i] = atomic64_read(&rdev->fence_drv[i].last_seq);
+ trace_radeon_fence_wait_begin(rdev->ddev, target_seq[i]);
+ radeon_irq_kms_sw_irq_get(rdev, i);
}
- timeout = jiffies - RADEON_FENCE_JIFFIES_TIMEOUT;
- if (time_after(rdev->fence_drv[ring].last_activity, timeout)) {
- /* the normal case, timeout is somewhere before last_activity */
- timeout = rdev->fence_drv[ring].last_activity - timeout;
+ if (intr) {
+ r = wait_event_interruptible_timeout(rdev->fence_queue, (
+ (signaled = radeon_fence_any_seq_signaled(rdev, target_seq))
+ || rdev->needs_reset), RADEON_FENCE_JIFFIES_TIMEOUT);
} else {
- /* either jiffies wrapped around, or no fence was signaled in the last 500ms
- * anyway we will just wait for the minimum amount and then check for a lockup
- */
- timeout = 1;
+ r = wait_event_timeout(rdev->fence_queue, (
+ (signaled = radeon_fence_any_seq_signaled(rdev, target_seq))
+ || rdev->needs_reset), RADEON_FENCE_JIFFIES_TIMEOUT);
}
- seq = atomic64_read(&rdev->fence_drv[ring].last_seq);
- /* Save current last activity valuee, used to check for GPU lockups */
- last_activity = rdev->fence_drv[ring].last_activity;
- trace_radeon_fence_wait_begin(rdev->ddev, seq);
- radeon_irq_kms_sw_irq_get(rdev, ring);
- if (intr) {
- r = wait_event_interruptible_timeout(rdev->fence_queue,
- (signaled = radeon_fence_seq_signaled(rdev, target_seq, ring)),
- timeout);
- } else {
- r = wait_event_timeout(rdev->fence_queue,
- (signaled = radeon_fence_seq_signaled(rdev, target_seq, ring)),
- timeout);
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ if (!target_seq[i])
+ continue;
+
+ radeon_irq_kms_sw_irq_put(rdev, i);
+ trace_radeon_fence_wait_end(rdev->ddev, target_seq[i]);
}
- radeon_irq_kms_sw_irq_put(rdev, ring);
- if (unlikely(r < 0)) {
+
+ if (unlikely(r < 0))
return r;
- }
- trace_radeon_fence_wait_end(rdev->ddev, seq);
if (unlikely(!signaled)) {
+ if (rdev->needs_reset)
+ return -EDEADLK;
+
/* we were interrupted for some reason and fence
* isn't signaled yet, resume waiting */
- if (r) {
+ if (r)
continue;
+
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ if (!target_seq[i])
+ continue;
+
+ if (last_seq[i] != atomic64_read(&rdev->fence_drv[i].last_seq))
+ break;
}
- /* check if sequence value has changed since last_activity */
- if (seq != atomic64_read(&rdev->fence_drv[ring].last_seq)) {
+ if (i != RADEON_NUM_RINGS)
continue;
- }
- if (lock_ring) {
+ if (lock_ring)
mutex_lock(&rdev->ring_lock);
- }
- /* test if somebody else has already decided that this is a lockup */
- if (last_activity != rdev->fence_drv[ring].last_activity) {
- if (lock_ring) {
- mutex_unlock(&rdev->ring_lock);
- }
- continue;
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ if (!target_seq[i])
+ continue;
+
+ if (radeon_ring_is_lockup(rdev, i, &rdev->ring[i]))
+ break;
}
- if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) {
+ if (i < RADEON_NUM_RINGS) {
/* good news we believe it's a lockup */
- dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016llx last fence id 0x%016llx)\n",
- target_seq, seq);
-
- /* change last activity so nobody else think there is a lockup */
- for (i = 0; i < RADEON_NUM_RINGS; ++i) {
- rdev->fence_drv[i].last_activity = jiffies;
- }
-
- /* mark the ring as not ready any more */
- rdev->ring[ring].ready = false;
- if (lock_ring) {
+ dev_warn(rdev->dev, "GPU lockup (waiting for "
+ "0x%016llx last fence id 0x%016llx on"
+ " ring %d)\n",
+ target_seq[i], last_seq[i], i);
+
+ /* remember that we need an reset */
+ rdev->needs_reset = true;
+ if (lock_ring)
mutex_unlock(&rdev->ring_lock);
- }
+ wake_up_all(&rdev->fence_queue);
return -EDEADLK;
}
- if (lock_ring) {
+ if (lock_ring)
mutex_unlock(&rdev->ring_lock);
- }
}
}
return 0;
@@ -388,6 +404,7 @@ static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 target_seq,
*/
int radeon_fence_wait(struct radeon_fence *fence, bool intr)
{
+ uint64_t seq[RADEON_NUM_RINGS] = {};
int r;
if (fence == NULL) {
@@ -395,147 +412,15 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr)
return -EINVAL;
}
- r = radeon_fence_wait_seq(fence->rdev, fence->seq,
- fence->ring, intr, true);
- if (r) {
- return r;
- }
- fence->seq = RADEON_FENCE_SIGNALED_SEQ;
- return 0;
-}
-
-static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq)
-{
- unsigned i;
-
- for (i = 0; i < RADEON_NUM_RINGS; ++i) {
- if (seq[i] && radeon_fence_seq_signaled(rdev, seq[i], i)) {
- return true;
- }
- }
- return false;
-}
+ seq[fence->ring] = fence->seq;
+ if (seq[fence->ring] == RADEON_FENCE_SIGNALED_SEQ)
+ return 0;
-/**
- * radeon_fence_wait_any_seq - wait for a sequence number on any ring
- *
- * @rdev: radeon device pointer
- * @target_seq: sequence number(s) we want to wait for
- * @intr: use interruptable sleep
- *
- * Wait for the requested sequence number(s) to be written by any ring
- * (all asics). Sequnce number array is indexed by ring id.
- * @intr selects whether to use interruptable (true) or non-interruptable
- * (false) sleep when waiting for the sequence number. Helper function
- * for radeon_fence_wait_any(), et al.
- * Returns 0 if the sequence number has passed, error for all other cases.
- */
-static int radeon_fence_wait_any_seq(struct radeon_device *rdev,
- u64 *target_seq, bool intr)
-{
- unsigned long timeout, last_activity, tmp;
- unsigned i, ring = RADEON_NUM_RINGS;
- bool signaled;
- int r;
-
- for (i = 0, last_activity = 0; i < RADEON_NUM_RINGS; ++i) {
- if (!target_seq[i]) {
- continue;
- }
-
- /* use the most recent one as indicator */
- if (time_after(rdev->fence_drv[i].last_activity, last_activity)) {
- last_activity = rdev->fence_drv[i].last_activity;
- }
-
- /* For lockup detection just pick the lowest ring we are
- * actively waiting for
- */
- if (i < ring) {
- ring = i;
- }
- }
-
- /* nothing to wait for ? */
- if (ring == RADEON_NUM_RINGS) {
- return -ENOENT;
- }
-
- while (!radeon_fence_any_seq_signaled(rdev, target_seq)) {
- timeout = jiffies - RADEON_FENCE_JIFFIES_TIMEOUT;
- if (time_after(last_activity, timeout)) {
- /* the normal case, timeout is somewhere before last_activity */
- timeout = last_activity - timeout;
- } else {
- /* either jiffies wrapped around, or no fence was signaled in the last 500ms
- * anyway we will just wait for the minimum amount and then check for a lockup
- */
- timeout = 1;
- }
-
- trace_radeon_fence_wait_begin(rdev->ddev, target_seq[ring]);
- for (i = 0; i < RADEON_NUM_RINGS; ++i) {
- if (target_seq[i]) {
- radeon_irq_kms_sw_irq_get(rdev, i);
- }
- }
- if (intr) {
- r = wait_event_interruptible_timeout(rdev->fence_queue,
- (signaled = radeon_fence_any_seq_signaled(rdev, target_seq)),
- timeout);
- } else {
- r = wait_event_timeout(rdev->fence_queue,
- (signaled = radeon_fence_any_seq_signaled(rdev, target_seq)),
- timeout);
- }
- for (i = 0; i < RADEON_NUM_RINGS; ++i) {
- if (target_seq[i]) {
- radeon_irq_kms_sw_irq_put(rdev, i);
- }
- }
- if (unlikely(r < 0)) {
- return r;
- }
- trace_radeon_fence_wait_end(rdev->ddev, target_seq[ring]);
-
- if (unlikely(!signaled)) {
- /* we were interrupted for some reason and fence
- * isn't signaled yet, resume waiting */
- if (r) {
- continue;
- }
-
- mutex_lock(&rdev->ring_lock);
- for (i = 0, tmp = 0; i < RADEON_NUM_RINGS; ++i) {
- if (time_after(rdev->fence_drv[i].last_activity, tmp)) {
- tmp = rdev->fence_drv[i].last_activity;
- }
- }
- /* test if somebody else has already decided that this is a lockup */
- if (last_activity != tmp) {
- last_activity = tmp;
- mutex_unlock(&rdev->ring_lock);
- continue;
- }
-
- if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) {
- /* good news we believe it's a lockup */
- dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016llx)\n",
- target_seq[ring]);
-
- /* change last activity so nobody else think there is a lockup */
- for (i = 0; i < RADEON_NUM_RINGS; ++i) {
- rdev->fence_drv[i].last_activity = jiffies;
- }
+ r = radeon_fence_wait_seq(fence->rdev, seq, intr, true);
+ if (r)
+ return r;
- /* mark the ring as not ready any more */
- rdev->ring[ring].ready = false;
- mutex_unlock(&rdev->ring_lock);
- return -EDEADLK;
- }
- mutex_unlock(&rdev->ring_lock);
- }
- }
+ fence->seq = RADEON_FENCE_SIGNALED_SEQ;
return 0;
}
@@ -557,7 +442,7 @@ int radeon_fence_wait_any(struct radeon_device *rdev,
bool intr)
{
uint64_t seq[RADEON_NUM_RINGS];
- unsigned i;
+ unsigned i, num_rings = 0;
int r;
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
@@ -567,15 +452,19 @@ int radeon_fence_wait_any(struct radeon_device *rdev,
continue;
}
- if (fences[i]->seq == RADEON_FENCE_SIGNALED_SEQ) {
- /* something was allready signaled */
- return 0;
- }
-
seq[i] = fences[i]->seq;
+ ++num_rings;
+
+ /* test if something was allready signaled */
+ if (seq[i] == RADEON_FENCE_SIGNALED_SEQ)
+ return 0;
}
- r = radeon_fence_wait_any_seq(rdev, seq, intr);
+ /* nothing to wait for ? */
+ if (num_rings == 0)
+ return -ENOENT;
+
+ r = radeon_fence_wait_seq(rdev, seq, intr, true);
if (r) {
return r;
}
@@ -594,15 +483,15 @@ int radeon_fence_wait_any(struct radeon_device *rdev,
*/
int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring)
{
- uint64_t seq;
+ uint64_t seq[RADEON_NUM_RINGS] = {};
- seq = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL;
- if (seq >= rdev->fence_drv[ring].sync_seq[ring]) {
+ seq[ring] = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL;
+ if (seq[ring] >= rdev->fence_drv[ring].sync_seq[ring]) {
/* nothing to wait for, last_seq is
already the last emited fence */
return -ENOENT;
}
- return radeon_fence_wait_seq(rdev, seq, ring, false, false);
+ return radeon_fence_wait_seq(rdev, seq, false, false);
}
/**
@@ -617,14 +506,18 @@ int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring)
*/
int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring)
{
- uint64_t seq = rdev->fence_drv[ring].sync_seq[ring];
+ uint64_t seq[RADEON_NUM_RINGS] = {};
int r;
- r = radeon_fence_wait_seq(rdev, seq, ring, false, false);
+ seq[ring] = rdev->fence_drv[ring].sync_seq[ring];
+ if (!seq[ring])
+ return 0;
+
+ r = radeon_fence_wait_seq(rdev, seq, false, false);
if (r) {
- if (r == -EDEADLK) {
+ if (r == -EDEADLK)
return -EDEADLK;
- }
+
dev_err(rdev->dev, "error waiting for ring[%d] to become idle (%d)\n",
ring, r);
}
@@ -826,7 +719,6 @@ static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring)
for (i = 0; i < RADEON_NUM_RINGS; ++i)
rdev->fence_drv[ring].sync_seq[i] = 0;
atomic64_set(&rdev->fence_drv[ring].last_seq, 0);
- rdev->fence_drv[ring].last_activity = jiffies;
rdev->fence_drv[ring].initialized = false;
}
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index b990b1a2bd50..8a83b89d4709 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -607,8 +607,8 @@ static int radeon_vm_evict(struct radeon_device *rdev, struct radeon_vm *vm)
*/
int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm)
{
- unsigned pd_size, pts_size;
- u64 *pd_addr;
+ unsigned pd_size, pd_entries, pts_size;
+ struct radeon_ib ib;
int r;
if (vm == NULL) {
@@ -619,8 +619,10 @@ int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm)
return 0;
}
-retry:
pd_size = radeon_vm_directory_size(rdev);
+ pd_entries = radeon_vm_num_pdes(rdev);
+
+retry:
r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager,
&vm->page_directory, pd_size,
RADEON_VM_PTB_ALIGN_SIZE, false);
@@ -637,9 +639,31 @@ retry:
vm->pd_gpu_addr = radeon_sa_bo_gpu_addr(vm->page_directory);
/* Initially clear the page directory */
- pd_addr = radeon_sa_bo_cpu_addr(vm->page_directory);
- memset(pd_addr, 0, pd_size);
+ r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib,
+ NULL, pd_entries * 2 + 64);
+ if (r) {
+ radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence);
+ return r;
+ }
+
+ ib.length_dw = 0;
+
+ radeon_asic_vm_set_page(rdev, &ib, vm->pd_gpu_addr,
+ 0, pd_entries, 0, 0);
+
+ radeon_ib_sync_to(&ib, vm->fence);
+ r = radeon_ib_schedule(rdev, &ib, NULL);
+ if (r) {
+ radeon_ib_free(rdev, &ib);
+ radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence);
+ return r;
+ }
+ radeon_fence_unref(&vm->fence);
+ vm->fence = radeon_fence_ref(ib.fence);
+ radeon_ib_free(rdev, &ib);
+ radeon_fence_unref(&vm->last_flush);
+ /* allocate page table array */
pts_size = radeon_vm_num_pdes(rdev) * sizeof(struct radeon_sa_bo *);
vm->page_tables = kzalloc(pts_size, GFP_KERNEL);
@@ -914,6 +938,26 @@ uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr)
}
/**
+ * radeon_vm_page_flags - translate page flags to what the hw uses
+ *
+ * @flags: flags comming from userspace
+ *
+ * Translate the flags the userspace ABI uses to hw flags.
+ */
+static uint32_t radeon_vm_page_flags(uint32_t flags)
+{
+ uint32_t hw_flags = 0;
+ hw_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_PTE_VALID : 0;
+ hw_flags |= (flags & RADEON_VM_PAGE_READABLE) ? R600_PTE_READABLE : 0;
+ hw_flags |= (flags & RADEON_VM_PAGE_WRITEABLE) ? R600_PTE_WRITEABLE : 0;
+ if (flags & RADEON_VM_PAGE_SYSTEM) {
+ hw_flags |= R600_PTE_SYSTEM;
+ hw_flags |= (flags & RADEON_VM_PAGE_SNOOPED) ? R600_PTE_SNOOPED : 0;
+ }
+ return hw_flags;
+}
+
+/**
* radeon_vm_update_pdes - make sure that page directory is valid
*
* @rdev: radeon_device pointer
@@ -974,7 +1018,11 @@ retry:
if (count) {
radeon_asic_vm_set_page(rdev, ib, last_pde,
last_pt, count, incr,
- RADEON_VM_PAGE_VALID);
+ R600_PTE_VALID);
+
+ count *= RADEON_VM_PTE_COUNT;
+ radeon_asic_vm_set_page(rdev, ib, last_pt, 0,
+ count, 0, 0);
}
count = 1;
@@ -987,8 +1035,11 @@ retry:
if (count) {
radeon_asic_vm_set_page(rdev, ib, last_pde, last_pt, count,
- incr, RADEON_VM_PAGE_VALID);
+ incr, R600_PTE_VALID);
+ count *= RADEON_VM_PTE_COUNT;
+ radeon_asic_vm_set_page(rdev, ib, last_pt, 0,
+ count, 0, 0);
}
return 0;
@@ -1082,7 +1133,6 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
struct radeon_bo *bo,
struct ttm_mem_reg *mem)
{
- unsigned ridx = rdev->asic->vm.pt_ring_index;
struct radeon_ib ib;
struct radeon_bo_va *bo_va;
unsigned nptes, npdes, ndw;
@@ -1151,11 +1201,14 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
/* reserve space for pde addresses */
ndw += npdes * 2;
+ /* reserve space for clearing new page tables */
+ ndw += npdes * 2 * RADEON_VM_PTE_COUNT;
+
/* update too big for an IB */
if (ndw > 0xfffff)
return -ENOMEM;
- r = radeon_ib_get(rdev, ridx, &ib, NULL, ndw * 4);
+ r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, ndw * 4);
ib.length_dw = 0;
r = radeon_vm_update_pdes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset);
@@ -1165,7 +1218,7 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
}
radeon_vm_update_ptes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset,
- addr, bo_va->flags);
+ addr, radeon_vm_page_flags(bo_va->flags));
radeon_ib_sync_to(&ib, vm->fence);
r = radeon_ib_schedule(rdev, &ib, NULL);
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index dce99c8a5835..805c5e566b9a 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -29,13 +29,6 @@
#include <drm/radeon_drm.h>
#include "radeon.h"
-int radeon_gem_object_init(struct drm_gem_object *obj)
-{
- BUG();
-
- return 0;
-}
-
void radeon_gem_object_free(struct drm_gem_object *gobj)
{
struct radeon_bo *robj = gem_to_radeon_bo(gobj);
diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
index c180df8e84db..bdb0f93e73bc 100644
--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
@@ -418,7 +418,7 @@ long radeon_kms_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long
if (nr < DRM_COMMAND_BASE)
return drm_compat_ioctl(filp, cmd, arg);
- ret = drm_ioctl(filp, cmd, arg);
+ ret = radeon_drm_ioctl(filp, cmd, arg);
return ret;
}
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index cc9e8482cf30..ec6240b00469 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -32,6 +32,8 @@
#include "radeon.h"
#include "atom.h"
+#include <linux/pm_runtime.h>
+
#define RADEON_WAIT_IDLE_TIMEOUT 200
/**
@@ -47,8 +49,12 @@ irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS)
{
struct drm_device *dev = (struct drm_device *) arg;
struct radeon_device *rdev = dev->dev_private;
+ irqreturn_t ret;
- return radeon_irq_process(rdev);
+ ret = radeon_irq_process(rdev);
+ if (ret == IRQ_HANDLED)
+ pm_runtime_mark_last_busy(dev->dev);
+ return ret;
}
/*
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 61580ddc4eb2..bb8710531a1b 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -32,7 +32,7 @@
#include <linux/vga_switcheroo.h>
#include <linux/slab.h>
-
+#include <linux/pm_runtime.h>
/**
* radeon_driver_unload_kms - Main unload function for KMS.
*
@@ -50,9 +50,14 @@ int radeon_driver_unload_kms(struct drm_device *dev)
if (rdev == NULL)
return 0;
+
if (rdev->rmmio == NULL)
goto done_free;
+
+ pm_runtime_get_sync(dev->dev);
+
radeon_acpi_fini(rdev);
+
radeon_modeset_fini(rdev);
radeon_device_fini(rdev);
@@ -125,9 +130,20 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
"Error during ACPI methods call\n");
}
+ if (radeon_runtime_pm != 0) {
+ pm_runtime_use_autosuspend(dev->dev);
+ pm_runtime_set_autosuspend_delay(dev->dev, 5000);
+ pm_runtime_set_active(dev->dev);
+ pm_runtime_allow(dev->dev);
+ pm_runtime_mark_last_busy(dev->dev);
+ pm_runtime_put_autosuspend(dev->dev);
+ }
+
out:
if (r)
radeon_driver_unload_kms(dev);
+
+
return r;
}
@@ -191,7 +207,7 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
switch (info->request) {
case RADEON_INFO_DEVICE_ID:
- *value = dev->pci_device;
+ *value = dev->pdev->device;
break;
case RADEON_INFO_NUM_GB_PIPES:
*value = rdev->num_gb_pipes;
@@ -475,9 +491,14 @@ void radeon_driver_lastclose_kms(struct drm_device *dev)
int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
{
struct radeon_device *rdev = dev->dev_private;
+ int r;
file_priv->driver_priv = NULL;
+ r = pm_runtime_get_sync(dev->dev);
+ if (r < 0)
+ return r;
+
/* new gpu have virtual address space support */
if (rdev->family >= CHIP_CAYMAN) {
struct radeon_fpriv *fpriv;
@@ -506,6 +527,9 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
file_priv->driver_priv = fpriv;
}
+
+ pm_runtime_mark_last_busy(dev->dev);
+ pm_runtime_put_autosuspend(dev->dev);
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index 7cb178a34a0f..0c7b8c66301b 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -1056,6 +1056,26 @@ static void radeon_crtc_commit(struct drm_crtc *crtc)
}
}
+static void radeon_crtc_disable(struct drm_crtc *crtc)
+{
+ radeon_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+ if (crtc->fb) {
+ int r;
+ struct radeon_framebuffer *radeon_fb;
+ struct radeon_bo *rbo;
+
+ radeon_fb = to_radeon_framebuffer(crtc->fb);
+ rbo = gem_to_radeon_bo(radeon_fb->obj);
+ r = radeon_bo_reserve(rbo, false);
+ if (unlikely(r))
+ DRM_ERROR("failed to reserve rbo before unpin\n");
+ else {
+ radeon_bo_unpin(rbo);
+ radeon_bo_unreserve(rbo);
+ }
+ }
+}
+
static const struct drm_crtc_helper_funcs legacy_helper_funcs = {
.dpms = radeon_crtc_dpms,
.mode_fixup = radeon_crtc_mode_fixup,
@@ -1065,6 +1085,7 @@ static const struct drm_crtc_helper_funcs legacy_helper_funcs = {
.prepare = radeon_crtc_prepare,
.commit = radeon_crtc_commit,
.load_lut = radeon_crtc_load_lut,
+ .disable = radeon_crtc_disable
};
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index 62cd512f5c8d..c89971d904c3 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -392,7 +392,7 @@ void radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder,
props.type = BACKLIGHT_RAW;
snprintf(bl_name, sizeof(bl_name),
"radeon_bl%d", dev->primary->index);
- bd = backlight_device_register(bl_name, &drm_connector->kdev,
+ bd = backlight_device_register(bl_name, drm_connector->kdev,
pdata, &radeon_backlight_ops, &props);
if (IS_ERR(bd)) {
DRM_ERROR("Backlight registration failed\n");
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index ef63d3f00b2f..3f0dd664af90 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -249,6 +249,8 @@ struct radeon_mode_info {
struct drm_property *underscan_vborder_property;
/* audio */
struct drm_property *audio_property;
+ /* FMT dithering */
+ struct drm_property *dither_property;
/* hardcoded DFP edid from BIOS */
struct edid *bios_hardcoded_edid;
int bios_hardcoded_edid_size;
@@ -479,6 +481,11 @@ enum radeon_connector_audio {
RADEON_AUDIO_AUTO = 2
};
+enum radeon_connector_dither {
+ RADEON_FMT_DITHER_DISABLE = 0,
+ RADEON_FMT_DITHER_ENABLE = 1,
+};
+
struct radeon_connector {
struct drm_connector base;
uint32_t connector_id;
@@ -498,6 +505,7 @@ struct radeon_connector {
struct radeon_router router;
struct radeon_i2c_chan *router_bus;
enum radeon_connector_audio audio;
+ enum radeon_connector_dither dither;
};
struct radeon_framebuffer {
@@ -758,7 +766,8 @@ extern int radeon_crtc_cursor_move(struct drm_crtc *crtc,
int x, int y);
extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc,
- int *vpos, int *hpos);
+ int *vpos, int *hpos, ktime_t *stime,
+ ktime_t *etime);
extern bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev);
extern struct edid *
@@ -850,6 +859,12 @@ void radeon_legacy_tv_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
+/* fmt blocks */
+void avivo_program_fmt(struct drm_encoder *encoder);
+void dce3_program_fmt(struct drm_encoder *encoder);
+void dce4_program_fmt(struct drm_encoder *encoder);
+void dce8_program_fmt(struct drm_encoder *encoder);
+
/* fbdev layer */
int radeon_fbdev_init(struct radeon_device *rdev);
void radeon_fbdev_fini(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 4f6b7fc7ad3c..866ace070b91 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -508,17 +508,21 @@ static ssize_t radeon_set_dpm_forced_performance_level(struct device *dev,
} else if (strncmp("auto", buf, strlen("auto")) == 0) {
level = RADEON_DPM_FORCED_LEVEL_AUTO;
} else {
- mutex_unlock(&rdev->pm.mutex);
count = -EINVAL;
goto fail;
}
if (rdev->asic->dpm.force_performance_level) {
+ if (rdev->pm.dpm.thermal_active) {
+ count = -EINVAL;
+ goto fail;
+ }
ret = radeon_dpm_force_performance_level(rdev, level);
if (ret)
count = -EINVAL;
}
- mutex_unlock(&rdev->pm.mutex);
fail:
+ mutex_unlock(&rdev->pm.mutex);
+
return count;
}
@@ -881,11 +885,12 @@ static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev)
}
}
- printk("switching from power state:\n");
- radeon_dpm_print_power_state(rdev, rdev->pm.dpm.current_ps);
- printk("switching to power state:\n");
- radeon_dpm_print_power_state(rdev, rdev->pm.dpm.requested_ps);
-
+ if (radeon_dpm == 1) {
+ printk("switching from power state:\n");
+ radeon_dpm_print_power_state(rdev, rdev->pm.dpm.current_ps);
+ printk("switching to power state:\n");
+ radeon_dpm_print_power_state(rdev, rdev->pm.dpm.requested_ps);
+ }
mutex_lock(&rdev->ddev->struct_mutex);
down_write(&rdev->pm.mclk_lock);
mutex_lock(&rdev->ring_lock);
@@ -918,12 +923,16 @@ static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev)
radeon_dpm_post_set_power_state(rdev);
if (rdev->asic->dpm.force_performance_level) {
- if (rdev->pm.dpm.thermal_active)
+ if (rdev->pm.dpm.thermal_active) {
+ enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
/* force low perf level for thermal */
radeon_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_LOW);
- else
- /* otherwise, enable auto */
- radeon_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO);
+ /* save the user's level */
+ rdev->pm.dpm.forced_level = level;
+ } else {
+ /* otherwise, user selected level */
+ radeon_dpm_force_performance_level(rdev, rdev->pm.dpm.forced_level);
+ }
}
done:
@@ -1179,7 +1188,8 @@ static int radeon_pm_init_dpm(struct radeon_device *rdev)
mutex_lock(&rdev->pm.mutex);
radeon_dpm_init(rdev);
rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps;
- radeon_dpm_print_power_states(rdev);
+ if (radeon_dpm == 1)
+ radeon_dpm_print_power_states(rdev);
radeon_dpm_setup_asic(rdev);
ret = radeon_dpm_enable(rdev);
mutex_unlock(&rdev->pm.mutex);
@@ -1241,6 +1251,24 @@ int radeon_pm_init(struct radeon_device *rdev)
case CHIP_RV670:
case CHIP_RS780:
case CHIP_RS880:
+ case CHIP_CAYMAN:
+ case CHIP_ARUBA:
+ case CHIP_BONAIRE:
+ case CHIP_KABINI:
+ case CHIP_KAVERI:
+ case CHIP_HAWAII:
+ /* DPM requires the RLC, RV770+ dGPU requires SMC */
+ if (!rdev->rlc_fw)
+ rdev->pm.pm_method = PM_METHOD_PROFILE;
+ else if ((rdev->family >= CHIP_RV770) &&
+ (!(rdev->flags & RADEON_IS_IGP)) &&
+ (!rdev->smc_fw))
+ rdev->pm.pm_method = PM_METHOD_PROFILE;
+ else if (radeon_dpm == 1)
+ rdev->pm.pm_method = PM_METHOD_DPM;
+ else
+ rdev->pm.pm_method = PM_METHOD_PROFILE;
+ break;
case CHIP_RV770:
case CHIP_RV730:
case CHIP_RV710:
@@ -1256,16 +1284,11 @@ int radeon_pm_init(struct radeon_device *rdev)
case CHIP_BARTS:
case CHIP_TURKS:
case CHIP_CAICOS:
- case CHIP_CAYMAN:
- case CHIP_ARUBA:
case CHIP_TAHITI:
case CHIP_PITCAIRN:
case CHIP_VERDE:
case CHIP_OLAND:
case CHIP_HAINAN:
- case CHIP_BONAIRE:
- case CHIP_KABINI:
- case CHIP_KAVERI:
/* DPM requires the RLC, RV770+ dGPU requires SMC */
if (!rdev->rlc_fw)
rdev->pm.pm_method = PM_METHOD_PROFILE;
@@ -1273,10 +1296,10 @@ int radeon_pm_init(struct radeon_device *rdev)
(!(rdev->flags & RADEON_IS_IGP)) &&
(!rdev->smc_fw))
rdev->pm.pm_method = PM_METHOD_PROFILE;
- else if (radeon_dpm == 1)
- rdev->pm.pm_method = PM_METHOD_DPM;
- else
+ else if (radeon_dpm == 0)
rdev->pm.pm_method = PM_METHOD_PROFILE;
+ else
+ rdev->pm.pm_method = PM_METHOD_DPM;
break;
default:
/* default to profile method */
@@ -1468,7 +1491,7 @@ static bool radeon_pm_in_vbl(struct radeon_device *rdev)
*/
for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) {
if (rdev->pm.active_crtcs & (1 << crtc)) {
- vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev, crtc, &vpos, &hpos);
+ vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev, crtc, &vpos, &hpos, NULL, NULL);
if ((vbl_status & DRM_SCANOUTPOS_VALID) &&
!(vbl_status & DRM_SCANOUTPOS_INVBL))
in_vbl = false;
diff --git a/drivers/gpu/drm/radeon/radeon_trace.h b/drivers/gpu/drm/radeon/radeon_trace.h
index f7e367815964..811bca691b36 100644
--- a/drivers/gpu/drm/radeon/radeon_trace.h
+++ b/drivers/gpu/drm/radeon/radeon_trace.h
@@ -47,6 +47,30 @@ TRACE_EVENT(radeon_cs,
__entry->fences)
);
+TRACE_EVENT(radeon_vm_set_page,
+ TP_PROTO(uint64_t pe, uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags),
+ TP_ARGS(pe, addr, count, incr, flags),
+ TP_STRUCT__entry(
+ __field(u64, pe)
+ __field(u64, addr)
+ __field(u32, count)
+ __field(u32, incr)
+ __field(u32, flags)
+ ),
+
+ TP_fast_assign(
+ __entry->pe = pe;
+ __entry->addr = addr;
+ __entry->count = count;
+ __entry->incr = incr;
+ __entry->flags = flags;
+ ),
+ TP_printk("pe=%010Lx, addr=%010Lx, incr=%u, flags=%08x, count=%u",
+ __entry->pe, __entry->addr, __entry->incr,
+ __entry->flags, __entry->count)
+);
+
DECLARE_EVENT_CLASS(radeon_fence_request,
TP_PROTO(struct drm_device *dev, u32 seqno),
diff --git a/drivers/gpu/drm/radeon/radeon_ucode.h b/drivers/gpu/drm/radeon/radeon_ucode.h
index 33858364fe89..a77cd274dfc3 100644
--- a/drivers/gpu/drm/radeon/radeon_ucode.h
+++ b/drivers/gpu/drm/radeon/radeon_ucode.h
@@ -59,6 +59,7 @@
#define SI_MC_UCODE_SIZE 7769
#define OLAND_MC_UCODE_SIZE 7863
#define CIK_MC_UCODE_SIZE 7866
+#define HAWAII_MC_UCODE_SIZE 7933
/* SDMA */
#define CIK_SDMA_UCODE_SIZE 1050
@@ -143,4 +144,7 @@
#define BONAIRE_SMC_UCODE_START 0x20000
#define BONAIRE_SMC_UCODE_SIZE 0x1FDEC
+#define HAWAII_SMC_UCODE_START 0x20000
+#define HAWAII_SMC_UCODE_SIZE 0x1FDEC
+
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 308eff5be1b4..373d088bac66 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -97,6 +97,7 @@ int radeon_uvd_init(struct radeon_device *rdev)
case CHIP_BONAIRE:
case CHIP_KABINI:
case CHIP_KAVERI:
+ case CHIP_HAWAII:
fw_name = FIRMWARE_BONAIRE;
break;
@@ -240,6 +241,8 @@ void radeon_uvd_free_handles(struct radeon_device *rdev, struct drm_file *filp)
if (handle != 0 && rdev->uvd.filp[i] == filp) {
struct radeon_fence *fence;
+ radeon_uvd_note_usage(rdev);
+
r = radeon_uvd_get_destroy_msg(rdev,
R600_RING_TYPE_UVD_INDEX, handle, &fence);
if (r) {
@@ -620,7 +623,7 @@ static int radeon_uvd_send_msg(struct radeon_device *rdev,
if (r)
goto err;
- r = radeon_ib_get(rdev, ring, &ib, NULL, 16);
+ r = radeon_ib_get(rdev, ring, &ib, NULL, 64);
if (r)
goto err;
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 6acba8017b9a..76cc8d3aafec 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -153,6 +153,70 @@ u32 rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
return RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING;
}
+void avivo_program_fmt(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+ int bpc = 0;
+ u32 tmp = 0;
+ enum radeon_connector_dither dither = RADEON_FMT_DITHER_DISABLE;
+
+ if (connector) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ bpc = radeon_get_monitor_bpc(connector);
+ dither = radeon_connector->dither;
+ }
+
+ /* LVDS FMT is set up by atom */
+ if (radeon_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
+ return;
+
+ if (bpc == 0)
+ return;
+
+ switch (bpc) {
+ case 6:
+ if (dither == RADEON_FMT_DITHER_ENABLE)
+ /* XXX sort out optimal dither settings */
+ tmp |= AVIVO_TMDS_BIT_DEPTH_CONTROL_SPATIAL_DITHER_EN;
+ else
+ tmp |= AVIVO_TMDS_BIT_DEPTH_CONTROL_TRUNCATE_EN;
+ break;
+ case 8:
+ if (dither == RADEON_FMT_DITHER_ENABLE)
+ /* XXX sort out optimal dither settings */
+ tmp |= (AVIVO_TMDS_BIT_DEPTH_CONTROL_SPATIAL_DITHER_EN |
+ AVIVO_TMDS_BIT_DEPTH_CONTROL_SPATIAL_DITHER_DEPTH);
+ else
+ tmp |= (AVIVO_TMDS_BIT_DEPTH_CONTROL_TRUNCATE_EN |
+ AVIVO_TMDS_BIT_DEPTH_CONTROL_TRUNCATE_DEPTH);
+ break;
+ case 10:
+ default:
+ /* not needed */
+ break;
+ }
+
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+ WREG32(AVIVO_TMDSA_BIT_DEPTH_CONTROL, tmp);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+ WREG32(AVIVO_LVTMA_BIT_DEPTH_CONTROL, tmp);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+ WREG32(AVIVO_DVOA_BIT_DEPTH_CONTROL, tmp);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DDI:
+ WREG32(AVIVO_DDIA_BIT_DEPTH_CONTROL, tmp);
+ break;
+ default:
+ break;
+ }
+}
+
void rs600_pm_misc(struct radeon_device *rdev)
{
int requested_index = rdev->pm.requested_power_state_index;
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 1447d794c22a..1c560629575a 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -345,9 +345,11 @@ static void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
if (max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
rdev->pm.sideport_bandwidth.full)
max_bandwidth = rdev->pm.sideport_bandwidth;
- read_delay_latency.full = dfixed_const(370 * 800 * 1000);
- read_delay_latency.full = dfixed_div(read_delay_latency,
- rdev->pm.igp_sideport_mclk);
+ read_delay_latency.full = dfixed_const(370 * 800);
+ a.full = dfixed_const(1000);
+ b.full = dfixed_div(rdev->pm.igp_sideport_mclk, a);
+ read_delay_latency.full = dfixed_div(read_delay_latency, b);
+ read_delay_latency.full = dfixed_mul(read_delay_latency, a);
} else {
if (max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
rdev->pm.k8_bandwidth.full)
@@ -488,14 +490,10 @@ static void rs690_compute_mode_priority(struct radeon_device *rdev,
}
if (wm0->priority_mark.full > priority_mark02.full)
priority_mark02.full = wm0->priority_mark.full;
- if (dfixed_trunc(priority_mark02) < 0)
- priority_mark02.full = 0;
if (wm0->priority_mark_max.full > priority_mark02.full)
priority_mark02.full = wm0->priority_mark_max.full;
if (wm1->priority_mark.full > priority_mark12.full)
priority_mark12.full = wm1->priority_mark.full;
- if (dfixed_trunc(priority_mark12) < 0)
- priority_mark12.full = 0;
if (wm1->priority_mark_max.full > priority_mark12.full)
priority_mark12.full = wm1->priority_mark_max.full;
*d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
@@ -526,8 +524,6 @@ static void rs690_compute_mode_priority(struct radeon_device *rdev,
}
if (wm0->priority_mark.full > priority_mark02.full)
priority_mark02.full = wm0->priority_mark.full;
- if (dfixed_trunc(priority_mark02) < 0)
- priority_mark02.full = 0;
if (wm0->priority_mark_max.full > priority_mark02.full)
priority_mark02.full = wm0->priority_mark_max.full;
*d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
@@ -555,8 +551,6 @@ static void rs690_compute_mode_priority(struct radeon_device *rdev,
}
if (wm1->priority_mark.full > priority_mark12.full)
priority_mark12.full = wm1->priority_mark.full;
- if (dfixed_trunc(priority_mark12) < 0)
- priority_mark12.full = 0;
if (wm1->priority_mark_max.full > priority_mark12.full)
priority_mark12.full = wm1->priority_mark_max.full;
*d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 873eb4b193b4..5d1c316115ef 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -1155,14 +1155,10 @@ static void rv515_compute_mode_priority(struct radeon_device *rdev,
}
if (wm0->priority_mark.full > priority_mark02.full)
priority_mark02.full = wm0->priority_mark.full;
- if (dfixed_trunc(priority_mark02) < 0)
- priority_mark02.full = 0;
if (wm0->priority_mark_max.full > priority_mark02.full)
priority_mark02.full = wm0->priority_mark_max.full;
if (wm1->priority_mark.full > priority_mark12.full)
priority_mark12.full = wm1->priority_mark.full;
- if (dfixed_trunc(priority_mark12) < 0)
- priority_mark12.full = 0;
if (wm1->priority_mark_max.full > priority_mark12.full)
priority_mark12.full = wm1->priority_mark_max.full;
*d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
@@ -1193,8 +1189,6 @@ static void rv515_compute_mode_priority(struct radeon_device *rdev,
}
if (wm0->priority_mark.full > priority_mark02.full)
priority_mark02.full = wm0->priority_mark.full;
- if (dfixed_trunc(priority_mark02) < 0)
- priority_mark02.full = 0;
if (wm0->priority_mark_max.full > priority_mark02.full)
priority_mark02.full = wm0->priority_mark_max.full;
*d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
@@ -1222,8 +1216,6 @@ static void rv515_compute_mode_priority(struct radeon_device *rdev,
}
if (wm1->priority_mark.full > priority_mark12.full)
priority_mark12.full = wm1->priority_mark.full;
- if (dfixed_trunc(priority_mark12) < 0)
- priority_mark12.full = 0;
if (wm1->priority_mark_max.full > priority_mark12.full)
priority_mark12.full = wm1->priority_mark_max.full;
*d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
diff --git a/drivers/gpu/drm/radeon/rv6xx_dpm.c b/drivers/gpu/drm/radeon/rv6xx_dpm.c
index 5811d277a36a..26633a025252 100644
--- a/drivers/gpu/drm/radeon/rv6xx_dpm.c
+++ b/drivers/gpu/drm/radeon/rv6xx_dpm.c
@@ -407,9 +407,9 @@ static void rv6xx_enable_engine_feedback_and_reference_sync(struct radeon_device
WREG32_P(SPLL_CNTL_MODE, SPLL_DIV_SYNC, ~SPLL_DIV_SYNC);
}
-static u64 rv6xx_clocks_per_unit(u32 unit)
+static u32 rv6xx_clocks_per_unit(u32 unit)
{
- u64 tmp = 1 << (2 * unit);
+ u32 tmp = 1 << (2 * unit);
return tmp;
}
@@ -417,7 +417,7 @@ static u64 rv6xx_clocks_per_unit(u32 unit)
static u32 rv6xx_scale_count_given_unit(struct radeon_device *rdev,
u32 unscaled_count, u32 unit)
{
- u32 count_per_unit = (u32)rv6xx_clocks_per_unit(unit);
+ u32 count_per_unit = rv6xx_clocks_per_unit(unit);
return (unscaled_count + count_per_unit - 1) / count_per_unit;
}
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index d96f7cbca0a1..6a64ccaa0695 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -78,11 +78,6 @@ extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_
extern u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev);
extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev);
extern bool evergreen_is_display_hung(struct radeon_device *rdev);
-extern void si_dma_vm_set_page(struct radeon_device *rdev,
- struct radeon_ib *ib,
- uint64_t pe,
- uint64_t addr, unsigned count,
- uint32_t incr, uint32_t flags);
static void si_enable_gui_idle_interrupt(struct radeon_device *rdev,
bool enable);
static void si_fini_pg(struct radeon_device *rdev);
@@ -4673,61 +4668,6 @@ static void si_vm_decode_fault(struct radeon_device *rdev,
block, mc_id);
}
-/**
- * si_vm_set_page - update the page tables using the CP
- *
- * @rdev: radeon_device pointer
- * @ib: indirect buffer to fill with commands
- * @pe: addr of the page entry
- * @addr: dst addr to write into pe
- * @count: number of page entries to update
- * @incr: increase next addr by incr bytes
- * @flags: access flags
- *
- * Update the page tables using the CP (SI).
- */
-void si_vm_set_page(struct radeon_device *rdev,
- struct radeon_ib *ib,
- uint64_t pe,
- uint64_t addr, unsigned count,
- uint32_t incr, uint32_t flags)
-{
- uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
- uint64_t value;
- unsigned ndw;
-
- if (rdev->asic->vm.pt_ring_index == RADEON_RING_TYPE_GFX_INDEX) {
- while (count) {
- ndw = 2 + count * 2;
- if (ndw > 0x3FFE)
- ndw = 0x3FFE;
-
- ib->ptr[ib->length_dw++] = PACKET3(PACKET3_WRITE_DATA, ndw);
- ib->ptr[ib->length_dw++] = (WRITE_DATA_ENGINE_SEL(0) |
- WRITE_DATA_DST_SEL(1));
- ib->ptr[ib->length_dw++] = pe;
- ib->ptr[ib->length_dw++] = upper_32_bits(pe);
- for (; ndw > 2; ndw -= 2, --count, pe += 8) {
- if (flags & RADEON_VM_PAGE_SYSTEM) {
- value = radeon_vm_map_gart(rdev, addr);
- value &= 0xFFFFFFFFFFFFF000ULL;
- } else if (flags & RADEON_VM_PAGE_VALID) {
- value = addr;
- } else {
- value = 0;
- }
- addr += incr;
- value |= r600_flags;
- ib->ptr[ib->length_dw++] = value;
- ib->ptr[ib->length_dw++] = upper_32_bits(value);
- }
- }
- } else {
- /* DMA */
- si_dma_vm_set_page(rdev, ib, pe, addr, count, incr, flags);
- }
-}
-
void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
{
struct radeon_ring *ring = &rdev->ring[ridx];
@@ -5372,52 +5312,53 @@ void si_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer)
if (buffer == NULL)
return;
- buffer[count++] = PACKET3(PACKET3_PREAMBLE_CNTL, 0);
- buffer[count++] = PACKET3_PREAMBLE_BEGIN_CLEAR_STATE;
+ buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+ buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
- buffer[count++] = PACKET3(PACKET3_CONTEXT_CONTROL, 1);
- buffer[count++] = 0x80000000;
- buffer[count++] = 0x80000000;
+ buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
+ buffer[count++] = cpu_to_le32(0x80000000);
+ buffer[count++] = cpu_to_le32(0x80000000);
for (sect = rdev->rlc.cs_data; sect->section != NULL; ++sect) {
for (ext = sect->section; ext->extent != NULL; ++ext) {
if (sect->id == SECT_CONTEXT) {
- buffer[count++] = PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count);
- buffer[count++] = ext->reg_index - 0xa000;
+ buffer[count++] =
+ cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
+ buffer[count++] = cpu_to_le32(ext->reg_index - 0xa000);
for (i = 0; i < ext->reg_count; i++)
- buffer[count++] = ext->extent[i];
+ buffer[count++] = cpu_to_le32(ext->extent[i]);
} else {
return;
}
}
}
- buffer[count++] = PACKET3(PACKET3_SET_CONTEXT_REG, 1);
- buffer[count++] = PA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START;
+ buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+ buffer[count++] = cpu_to_le32(PA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
switch (rdev->family) {
case CHIP_TAHITI:
case CHIP_PITCAIRN:
- buffer[count++] = 0x2a00126a;
+ buffer[count++] = cpu_to_le32(0x2a00126a);
break;
case CHIP_VERDE:
- buffer[count++] = 0x0000124a;
+ buffer[count++] = cpu_to_le32(0x0000124a);
break;
case CHIP_OLAND:
- buffer[count++] = 0x00000082;
+ buffer[count++] = cpu_to_le32(0x00000082);
break;
case CHIP_HAINAN:
- buffer[count++] = 0x00000000;
+ buffer[count++] = cpu_to_le32(0x00000000);
break;
default:
- buffer[count++] = 0x00000000;
+ buffer[count++] = cpu_to_le32(0x00000000);
break;
}
- buffer[count++] = PACKET3(PACKET3_PREAMBLE_CNTL, 0);
- buffer[count++] = PACKET3_PREAMBLE_END_CLEAR_STATE;
+ buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+ buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
- buffer[count++] = PACKET3(PACKET3_CLEAR_STATE, 0);
- buffer[count++] = 0;
+ buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
+ buffer[count++] = cpu_to_le32(0);
}
static void si_init_pg(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/si_dma.c b/drivers/gpu/drm/radeon/si_dma.c
index 49909d23dfce..8e8f46133532 100644
--- a/drivers/gpu/drm/radeon/si_dma.c
+++ b/drivers/gpu/drm/radeon/si_dma.c
@@ -24,6 +24,7 @@
#include <drm/drmP.h>
#include "radeon.h"
#include "radeon_asic.h"
+#include "radeon_trace.h"
#include "sid.h"
u32 si_gpu_check_soft_reset(struct radeon_device *rdev);
@@ -75,11 +76,12 @@ void si_dma_vm_set_page(struct radeon_device *rdev,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags)
{
- uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
uint64_t value;
unsigned ndw;
- if (flags & RADEON_VM_PAGE_SYSTEM) {
+ trace_radeon_vm_set_page(pe, addr, count, incr, flags);
+
+ if (flags & R600_PTE_SYSTEM) {
while (count) {
ndw = count * 2;
if (ndw > 0xFFFFE)
@@ -90,16 +92,10 @@ void si_dma_vm_set_page(struct radeon_device *rdev,
ib->ptr[ib->length_dw++] = pe;
ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
for (; ndw > 0; ndw -= 2, --count, pe += 8) {
- if (flags & RADEON_VM_PAGE_SYSTEM) {
- value = radeon_vm_map_gart(rdev, addr);
- value &= 0xFFFFFFFFFFFFF000ULL;
- } else if (flags & RADEON_VM_PAGE_VALID) {
- value = addr;
- } else {
- value = 0;
- }
+ value = radeon_vm_map_gart(rdev, addr);
+ value &= 0xFFFFFFFFFFFFF000ULL;
addr += incr;
- value |= r600_flags;
+ value |= flags;
ib->ptr[ib->length_dw++] = value;
ib->ptr[ib->length_dw++] = upper_32_bits(value);
}
@@ -110,7 +106,7 @@ void si_dma_vm_set_page(struct radeon_device *rdev,
if (ndw > 0xFFFFE)
ndw = 0xFFFFE;
- if (flags & RADEON_VM_PAGE_VALID)
+ if (flags & R600_PTE_VALID)
value = addr;
else
value = 0;
@@ -118,7 +114,7 @@ void si_dma_vm_set_page(struct radeon_device *rdev,
ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
ib->ptr[ib->length_dw++] = pe; /* dst addr */
ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
- ib->ptr[ib->length_dw++] = r600_flags; /* mask */
+ ib->ptr[ib->length_dw++] = flags; /* mask */
ib->ptr[ib->length_dw++] = 0;
ib->ptr[ib->length_dw++] = value; /* value */
ib->ptr[ib->length_dw++] = upper_32_bits(value);
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 2332aa1bf93c..0b00c790fb77 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -3589,7 +3589,12 @@ static void si_program_display_gap(struct radeon_device *rdev)
WREG32(DCCG_DISP_SLOW_SELECT_REG, tmp);
}
- si_notify_smc_display_change(rdev, rdev->pm.dpm.new_active_crtc_count > 0);
+ /* Setting this to false forces the performance state to low if the crtcs are disabled.
+ * This can be a problem on PowerXpress systems or if you want to use the card
+ * for offscreen rendering or compute if there are no crtcs enabled. Set it to
+ * true for now so that performance scales even if the displays are off.
+ */
+ si_notify_smc_display_change(rdev, true /*rdev->pm.dpm.new_active_crtc_count > 0*/);
}
static void si_enable_spread_spectrum(struct radeon_device *rdev, bool enable)
@@ -4553,7 +4558,7 @@ static int si_init_smc_table(struct radeon_device *rdev)
table->systemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REVERT_GPIO5_POLARITY)
- table->systemFlags |= PPSMC_EXTRAFLAGS_AC2DC_GPIO5_POLARITY_HIGH;
+ table->extraFlags |= PPSMC_EXTRAFLAGS_AC2DC_GPIO5_POLARITY_HIGH;
if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_VRHOT_GPIO_CONFIGURABLE) {
table->systemFlags |= PPSMC_SYSTEMFLAG_REGULATOR_HOT_PROG_GPIO;
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index 7e2e0ea66a00..b322acc48097 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -478,7 +478,7 @@
#define STATE3_MASK (0x1f << 15)
#define STATE3_SHIFT 15
-#define MC_SEQ_TRAIN_WAKEUP_CNTL 0x2808
+#define MC_SEQ_TRAIN_WAKEUP_CNTL 0x28e8
#define TRAIN_DONE_D0 (1 << 30)
#define TRAIN_DONE_D1 (1 << 31)
@@ -683,6 +683,51 @@
* bit5 = 176.4 kHz
* bit6 = 192 kHz
*/
+
+#define AZ_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC 0x37
+# define VIDEO_LIPSYNC(x) (((x) & 0xff) << 0)
+# define AUDIO_LIPSYNC(x) (((x) & 0xff) << 8)
+/* VIDEO_LIPSYNC, AUDIO_LIPSYNC
+ * 0 = invalid
+ * x = legal delay value
+ * 255 = sync not supported
+ */
+#define AZ_F0_CODEC_PIN_CONTROL_RESPONSE_HBR 0x38
+# define HBR_CAPABLE (1 << 0) /* enabled by default */
+
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO0 0x3a
+# define MANUFACTURER_ID(x) (((x) & 0xffff) << 0)
+# define PRODUCT_ID(x) (((x) & 0xffff) << 16)
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO1 0x3b
+# define SINK_DESCRIPTION_LEN(x) (((x) & 0xff) << 0)
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO2 0x3c
+# define PORT_ID0(x) (((x) & 0xffffffff) << 0)
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO3 0x3d
+# define PORT_ID1(x) (((x) & 0xffffffff) << 0)
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO4 0x3e
+# define DESCRIPTION0(x) (((x) & 0xff) << 0)
+# define DESCRIPTION1(x) (((x) & 0xff) << 8)
+# define DESCRIPTION2(x) (((x) & 0xff) << 16)
+# define DESCRIPTION3(x) (((x) & 0xff) << 24)
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO5 0x3f
+# define DESCRIPTION4(x) (((x) & 0xff) << 0)
+# define DESCRIPTION5(x) (((x) & 0xff) << 8)
+# define DESCRIPTION6(x) (((x) & 0xff) << 16)
+# define DESCRIPTION7(x) (((x) & 0xff) << 24)
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO6 0x40
+# define DESCRIPTION8(x) (((x) & 0xff) << 0)
+# define DESCRIPTION9(x) (((x) & 0xff) << 8)
+# define DESCRIPTION10(x) (((x) & 0xff) << 16)
+# define DESCRIPTION11(x) (((x) & 0xff) << 24)
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO7 0x41
+# define DESCRIPTION12(x) (((x) & 0xff) << 0)
+# define DESCRIPTION13(x) (((x) & 0xff) << 8)
+# define DESCRIPTION14(x) (((x) & 0xff) << 16)
+# define DESCRIPTION15(x) (((x) & 0xff) << 24)
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO8 0x42
+# define DESCRIPTION16(x) (((x) & 0xff) << 0)
+# define DESCRIPTION17(x) (((x) & 0xff) << 8)
+
#define AZ_F0_CODEC_PIN_CONTROL_HOTPLUG_CONTROL 0x54
# define AUDIO_ENABLED (1 << 31)