Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/amd/amdgpu: port of DCE v6 to new headers (v3)

Port of SI DCE v6 over to new AMDGPU headers. Tested on a
Tahiti with GNOME through various hot plugs/rotations/sizes/fullscreen/windowed and
staging drm/xf86-video-amdgpu.

(v2) Re-factored to remove formatting changes to si_enums.h
as well rename various defines.
(v3) Rebase on upstream

Signed-off-by: Tom St Denis <tom.stdenis@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Acked-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>

authored by

Tom St Denis and committed by
Alex Deucher
b00861b9 99e3820a

+350 -247
+260 -247
drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
··· 30 30 #include "atombios_encoders.h" 31 31 #include "amdgpu_pll.h" 32 32 #include "amdgpu_connectors.h" 33 - #include "si/si_reg.h" 34 - #include "si/sid.h" 33 + 34 + #include "bif/bif_3_0_d.h" 35 + #include "bif/bif_3_0_sh_mask.h" 36 + #include "oss/oss_1_0_d.h" 37 + #include "oss/oss_1_0_sh_mask.h" 38 + #include "gca/gfx_6_0_d.h" 39 + #include "gca/gfx_6_0_sh_mask.h" 40 + #include "gmc/gmc_6_0_d.h" 41 + #include "gmc/gmc_6_0_sh_mask.h" 42 + #include "dce/dce_6_0_d.h" 43 + #include "dce/dce_6_0_sh_mask.h" 44 + #include "gca/gfx_7_2_enum.h" 45 + #include "si_enums.h" 35 46 36 47 static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev); 37 48 static void dce_v6_0_set_irq_funcs(struct amdgpu_device *adev); ··· 59 48 60 49 static const u32 hpd_offsets[] = 61 50 { 62 - DC_HPD1_INT_STATUS - DC_HPD1_INT_STATUS, 63 - DC_HPD2_INT_STATUS - DC_HPD1_INT_STATUS, 64 - DC_HPD3_INT_STATUS - DC_HPD1_INT_STATUS, 65 - DC_HPD4_INT_STATUS - DC_HPD1_INT_STATUS, 66 - DC_HPD5_INT_STATUS - DC_HPD1_INT_STATUS, 67 - DC_HPD6_INT_STATUS - DC_HPD1_INT_STATUS, 51 + mmDC_HPD1_INT_STATUS - mmDC_HPD1_INT_STATUS, 52 + mmDC_HPD2_INT_STATUS - mmDC_HPD1_INT_STATUS, 53 + mmDC_HPD3_INT_STATUS - mmDC_HPD1_INT_STATUS, 54 + mmDC_HPD4_INT_STATUS - mmDC_HPD1_INT_STATUS, 55 + mmDC_HPD5_INT_STATUS - mmDC_HPD1_INT_STATUS, 56 + mmDC_HPD6_INT_STATUS - mmDC_HPD1_INT_STATUS, 68 57 }; 69 58 70 59 static const uint32_t dig_offsets[] = { ··· 84 73 uint32_t hpd; 85 74 86 75 } interrupt_status_offsets[6] = { { 87 - .reg = DISP_INTERRUPT_STATUS, 76 + .reg = mmDISP_INTERRUPT_STATUS, 88 77 .vblank = DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK, 89 78 .vline = DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK, 90 79 .hpd = DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK 91 80 }, { 92 - .reg = DISP_INTERRUPT_STATUS_CONTINUE, 81 + .reg = mmDISP_INTERRUPT_STATUS_CONTINUE, 93 82 .vblank = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK, 94 83 .vline = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK, 95 84 .hpd = DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK 96 85 }, { 97 - .reg = DISP_INTERRUPT_STATUS_CONTINUE2, 86 + .reg = mmDISP_INTERRUPT_STATUS_CONTINUE2, 98 87 .vblank = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK, 99 88 .vline = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK, 100 89 .hpd = DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK 101 90 }, { 102 - .reg = DISP_INTERRUPT_STATUS_CONTINUE3, 91 + .reg = mmDISP_INTERRUPT_STATUS_CONTINUE3, 103 92 .vblank = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK, 104 93 .vline = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK, 105 94 .hpd = DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK 106 95 }, { 107 - .reg = DISP_INTERRUPT_STATUS_CONTINUE4, 96 + .reg = mmDISP_INTERRUPT_STATUS_CONTINUE4, 108 97 .vblank = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK, 109 98 .vline = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK, 110 99 .hpd = DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK 111 100 }, { 112 - .reg = DISP_INTERRUPT_STATUS_CONTINUE5, 101 + .reg = mmDISP_INTERRUPT_STATUS_CONTINUE5, 113 102 .vblank = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK, 114 103 .vline = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK, 115 104 .hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK ··· 130 119 131 120 static bool dce_v6_0_is_in_vblank(struct amdgpu_device *adev, int crtc) 132 121 { 133 - if (RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK) 122 + if (RREG32(mmCRTC_STATUS + crtc_offsets[crtc]) & CRTC_STATUS__CRTC_V_BLANK_MASK) 134 123 return true; 135 124 else 136 125 return false; ··· 140 129 { 141 130 u32 pos1, pos2; 142 131 143 - pos1 = RREG32(EVERGREEN_CRTC_STATUS_POSITION + crtc_offsets[crtc]); 144 - pos2 = RREG32(EVERGREEN_CRTC_STATUS_POSITION + crtc_offsets[crtc]); 132 + pos1 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]); 133 + pos2 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]); 145 134 146 135 if (pos1 != pos2) 147 136 return true; ··· 163 152 if (crtc >= adev->mode_info.num_crtc) 164 153 return; 165 154 166 - if (!(RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[crtc]) & EVERGREEN_CRTC_MASTER_EN)) 155 + if (!(RREG32(mmCRTC_CONTROL + crtc_offsets[crtc]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK)) 167 156 return; 168 157 169 158 /* depending on when we hit vblank, we may be close to active; if so, ··· 191 180 if (crtc >= adev->mode_info.num_crtc) 192 181 return 0; 193 182 else 194 - return RREG32(CRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]); 183 + return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]); 195 184 } 196 185 197 186 static void dce_v6_0_pageflip_interrupt_init(struct amdgpu_device *adev) ··· 231 220 struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id]; 232 221 233 222 /* flip at hsync for async, default is vsync */ 234 - WREG32(EVERGREEN_GRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, async ? 235 - EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN : 0); 223 + WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, async ? 224 + GRPH_FLIP_CONTROL__GRPH_SURFACE_UPDATE_H_RETRACE_EN_MASK : 0); 236 225 /* update the scanout addresses */ 237 - WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, 226 + WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, 238 227 upper_32_bits(crtc_base)); 239 - WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, 228 + WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, 240 229 (u32)crtc_base); 241 230 242 231 /* post the write */ 243 - RREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset); 232 + RREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset); 244 233 } 245 234 246 235 static int dce_v6_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc, ··· 248 237 { 249 238 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc)) 250 239 return -EINVAL; 251 - *vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + crtc_offsets[crtc]); 252 - *position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + crtc_offsets[crtc]); 240 + *vbl = RREG32(mmCRTC_V_BLANK_START_END + crtc_offsets[crtc]); 241 + *position = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]); 253 242 254 243 return 0; 255 244 ··· 272 261 if (hpd >= adev->mode_info.num_hpd) 273 262 return connected; 274 263 275 - if (RREG32(DC_HPD1_INT_STATUS + hpd_offsets[hpd]) & DC_HPDx_SENSE) 264 + if (RREG32(mmDC_HPD1_INT_STATUS + hpd_offsets[hpd]) & DC_HPD1_INT_STATUS__DC_HPD1_SENSE_MASK) 276 265 connected = true; 277 266 278 267 return connected; ··· 295 284 if (hpd >= adev->mode_info.num_hpd) 296 285 return; 297 286 298 - tmp = RREG32(DC_HPD1_INT_CONTROL + hpd_offsets[hpd]); 287 + tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]); 299 288 if (connected) 300 - tmp &= ~DC_HPDx_INT_POLARITY; 289 + tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK; 301 290 else 302 - tmp |= DC_HPDx_INT_POLARITY; 303 - WREG32(DC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp); 291 + tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK; 292 + WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp); 304 293 } 305 294 306 295 /** ··· 323 312 if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd) 324 313 continue; 325 314 326 - tmp = RREG32(DC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]); 327 - tmp |= DC_HPDx_EN; 328 - WREG32(DC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp); 315 + tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]); 316 + tmp |= DC_HPD1_CONTROL__DC_HPD1_EN_MASK; 317 + WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp); 329 318 330 319 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP || 331 320 connector->connector_type == DRM_MODE_CONNECTOR_LVDS) { ··· 334 323 * https://bugzilla.redhat.com/show_bug.cgi?id=726143 335 324 * also avoid interrupt storms during dpms. 336 325 */ 337 - tmp = RREG32(DC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]); 338 - tmp &= ~DC_HPDx_INT_EN; 339 - WREG32(DC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp); 326 + tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]); 327 + tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK; 328 + WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp); 340 329 continue; 341 330 } 342 331 ··· 366 355 if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd) 367 356 continue; 368 357 369 - tmp = RREG32(DC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]); 370 - tmp &= ~DC_HPDx_EN; 371 - WREG32(DC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], 0); 358 + tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]); 359 + tmp &= ~DC_HPD1_CONTROL__DC_HPD1_EN_MASK; 360 + WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], 0); 372 361 373 362 amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd); 374 363 } ··· 376 365 377 366 static u32 dce_v6_0_hpd_get_gpio_reg(struct amdgpu_device *adev) 378 367 { 379 - return SI_DC_GPIO_HPD_A; 368 + return mmDC_GPIO_HPD_A; 380 369 } 381 370 382 371 static bool dce_v6_0_is_display_hung(struct amdgpu_device *adev) ··· 391 380 if (crtc >= adev->mode_info.num_crtc) 392 381 return 0; 393 382 else 394 - return RREG32(CRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]); 383 + return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]); 395 384 } 396 385 397 386 static void dce_v6_0_stop_mc_access(struct amdgpu_device *adev, ··· 400 389 u32 crtc_enabled, tmp, frame_count; 401 390 int i, j; 402 391 403 - save->vga_render_control = RREG32(VGA_RENDER_CONTROL); 404 - save->vga_hdp_control = RREG32(VGA_HDP_CONTROL); 392 + save->vga_render_control = RREG32(mmVGA_RENDER_CONTROL); 393 + save->vga_hdp_control = RREG32(mmVGA_HDP_CONTROL); 405 394 406 395 /* disable VGA render */ 407 - WREG32(VGA_RENDER_CONTROL, 0); 396 + WREG32(mmVGA_RENDER_CONTROL, 0); 408 397 409 398 /* blank the display controllers */ 410 399 for (i = 0; i < adev->mode_info.num_crtc; i++) { 411 - crtc_enabled = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]) & EVERGREEN_CRTC_MASTER_EN; 400 + crtc_enabled = RREG32(mmCRTC_CONTROL + crtc_offsets[i]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK; 412 401 if (crtc_enabled) { 413 402 save->crtc_enabled[i] = true; 414 - tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]); 403 + tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]); 415 404 416 - if (!(tmp & EVERGREEN_CRTC_BLANK_DATA_EN)) { 405 + if (!(tmp & CRTC_BLANK_CONTROL__CRTC_BLANK_DATA_EN_MASK)) { 417 406 dce_v6_0_vblank_wait(adev, i); 418 - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1); 419 - tmp |= EVERGREEN_CRTC_BLANK_DATA_EN; 420 - WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp); 421 - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0); 407 + WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1); 408 + tmp |= CRTC_BLANK_CONTROL__CRTC_BLANK_DATA_EN_MASK; 409 + WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp); 410 + WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0); 422 411 } 423 412 /* wait for the next frame */ 424 413 frame_count = evergreen_get_vblank_counter(adev, i); ··· 429 418 } 430 419 431 420 /* XXX this is a hack to avoid strange behavior with EFI on certain systems */ 432 - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1); 433 - tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]); 434 - tmp &= ~EVERGREEN_CRTC_MASTER_EN; 435 - WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp); 436 - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0); 421 + WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1); 422 + tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]); 423 + tmp &= ~CRTC_CONTROL__CRTC_MASTER_EN_MASK; 424 + WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp); 425 + WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0); 437 426 save->crtc_enabled[i] = false; 438 427 /* ***** */ 439 428 } else { ··· 450 439 451 440 /* update crtc base addresses */ 452 441 for (i = 0; i < adev->mode_info.num_crtc; i++) { 453 - WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i], 442 + WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i], 454 443 upper_32_bits(adev->mc.vram_start)); 455 - WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i], 444 + WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i], 456 445 upper_32_bits(adev->mc.vram_start)); 457 - WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i], 446 + WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i], 458 447 (u32)adev->mc.vram_start); 459 - WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i], 448 + WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i], 460 449 (u32)adev->mc.vram_start); 461 450 } 462 451 463 - WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start)); 464 - WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)adev->mc.vram_start); 452 + WREG32(mmVGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start)); 453 + WREG32(mmVGA_MEMORY_BASE_ADDRESS, (u32)adev->mc.vram_start); 465 454 466 455 /* unlock regs and wait for update */ 467 456 for (i = 0; i < adev->mode_info.num_crtc; i++) { 468 457 if (save->crtc_enabled[i]) { 469 - tmp = RREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i]); 458 + tmp = RREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i]); 470 459 if ((tmp & 0x7) != 3) { 471 460 tmp &= ~0x7; 472 461 tmp |= 0x3; 473 - WREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i], tmp); 462 + WREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i], tmp); 474 463 } 475 - tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]); 476 - if (tmp & EVERGREEN_GRPH_UPDATE_LOCK) { 477 - tmp &= ~EVERGREEN_GRPH_UPDATE_LOCK; 478 - WREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i], tmp); 464 + tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]); 465 + if (tmp & GRPH_UPDATE__GRPH_UPDATE_LOCK_MASK) { 466 + tmp &= ~GRPH_UPDATE__GRPH_UPDATE_LOCK_MASK; 467 + WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp); 479 468 } 480 - tmp = RREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i]); 469 + tmp = RREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i]); 481 470 if (tmp & 1) { 482 471 tmp &= ~1; 483 - WREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp); 472 + WREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i], tmp); 484 473 } 485 474 for (j = 0; j < adev->usec_timeout; j++) { 486 - tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]); 487 - if ((tmp & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING) == 0) 475 + tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]); 476 + if ((tmp & GRPH_UPDATE__GRPH_SURFACE_UPDATE_PENDING_MASK) == 0) 488 477 break; 489 478 udelay(1); 490 479 } ··· 492 481 } 493 482 494 483 /* Unlock vga access */ 495 - WREG32(VGA_HDP_CONTROL, save->vga_hdp_control); 484 + WREG32(mmVGA_HDP_CONTROL, save->vga_hdp_control); 496 485 mdelay(1); 497 - WREG32(VGA_RENDER_CONTROL, save->vga_render_control); 486 + WREG32(mmVGA_RENDER_CONTROL, save->vga_render_control); 498 487 499 488 } 500 489 ··· 502 491 bool render) 503 492 { 504 493 if (!render) 505 - WREG32(R_000300_VGA_RENDER_CONTROL, 506 - RREG32(R_000300_VGA_RENDER_CONTROL) & C_000300_VGA_VSTATUS_CNTL); 494 + WREG32(mmVGA_RENDER_CONTROL, 495 + RREG32(mmVGA_RENDER_CONTROL) & VGA_VSTATUS_CNTL); 507 496 508 497 } 509 498 ··· 537 526 538 527 /*Disable crtc*/ 539 528 for (i = 0; i < dce_v6_0_get_num_crtc(adev); i++) { 540 - crtc_enabled = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]) & 541 - EVERGREEN_CRTC_MASTER_EN; 529 + crtc_enabled = RREG32(mmCRTC_CONTROL + crtc_offsets[i]) & 530 + CRTC_CONTROL__CRTC_MASTER_EN_MASK; 542 531 if (crtc_enabled) { 543 - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1); 544 - tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]); 545 - tmp &= ~EVERGREEN_CRTC_MASTER_EN; 546 - WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp); 547 - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0); 532 + WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1); 533 + tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]); 534 + tmp &= ~CRTC_CONTROL__CRTC_MASTER_EN_MASK; 535 + WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp); 536 + WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0); 548 537 } 549 538 } 550 539 } ··· 580 569 case 6: 581 570 if (dither == AMDGPU_FMT_DITHER_ENABLE) 582 571 /* XXX sort out optimal dither settings */ 583 - tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE | 584 - FMT_SPATIAL_DITHER_EN); 572 + tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK | 573 + FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK | 574 + FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK); 585 575 else 586 - tmp |= FMT_TRUNCATE_EN; 576 + tmp |= FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK; 587 577 break; 588 578 case 8: 589 579 if (dither == AMDGPU_FMT_DITHER_ENABLE) 590 580 /* XXX sort out optimal dither settings */ 591 - tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE | 592 - FMT_RGB_RANDOM_ENABLE | 593 - FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH); 581 + tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK | 582 + FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK | 583 + FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK | 584 + FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK | 585 + FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH_MASK); 594 586 else 595 - tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH); 587 + tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK | 588 + FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH_MASK); 596 589 break; 597 590 case 10: 598 591 default: ··· 604 589 break; 605 590 } 606 591 607 - WREG32(FMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp); 592 + WREG32(mmFMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp); 608 593 } 609 594 610 595 /** ··· 618 603 */ 619 604 static u32 si_get_number_of_dram_channels(struct amdgpu_device *adev) 620 605 { 621 - u32 tmp = RREG32(MC_SHARED_CHMAP); 606 + u32 tmp = RREG32(mmMC_SHARED_CHMAP); 622 607 623 608 switch ((tmp & MC_SHARED_CHMAP__NOOFCHAN_MASK) >> MC_SHARED_CHMAP__NOOFCHAN__SHIFT) { 624 609 case 0: ··· 1115 1100 } 1116 1101 1117 1102 /* select wm A */ 1118 - arb_control3 = RREG32(DPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset); 1103 + arb_control3 = RREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset); 1119 1104 tmp = arb_control3; 1120 1105 tmp &= ~LATENCY_WATERMARK_MASK(3); 1121 1106 tmp |= LATENCY_WATERMARK_MASK(1); 1122 - WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp); 1123 - WREG32(DPG_PIPE_LATENCY_CONTROL + amdgpu_crtc->crtc_offset, 1124 - (LATENCY_LOW_WATERMARK(latency_watermark_a) | 1125 - LATENCY_HIGH_WATERMARK(line_time))); 1107 + WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp); 1108 + WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset, 1109 + ((latency_watermark_a << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) | 1110 + (line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT))); 1126 1111 /* select wm B */ 1127 - tmp = RREG32(DPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset); 1112 + tmp = RREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset); 1128 1113 tmp &= ~LATENCY_WATERMARK_MASK(3); 1129 1114 tmp |= LATENCY_WATERMARK_MASK(2); 1130 - WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp); 1131 - WREG32(DPG_PIPE_LATENCY_CONTROL + amdgpu_crtc->crtc_offset, 1132 - (LATENCY_LOW_WATERMARK(latency_watermark_b) | 1133 - LATENCY_HIGH_WATERMARK(line_time))); 1115 + WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp); 1116 + WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset, 1117 + ((latency_watermark_b << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) | 1118 + (line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT))); 1134 1119 /* restore original selection */ 1135 - WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, arb_control3); 1120 + WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, arb_control3); 1136 1121 1137 1122 /* write the priority marks */ 1138 - WREG32(PRIORITY_A_CNT + amdgpu_crtc->crtc_offset, priority_a_cnt); 1139 - WREG32(PRIORITY_B_CNT + amdgpu_crtc->crtc_offset, priority_b_cnt); 1123 + WREG32(mmPRIORITY_A_CNT + amdgpu_crtc->crtc_offset, priority_a_cnt); 1124 + WREG32(mmPRIORITY_B_CNT + amdgpu_crtc->crtc_offset, priority_b_cnt); 1140 1125 1141 1126 /* save values for DPM */ 1142 1127 amdgpu_crtc->line_time = line_time; ··· 1154 1139 /* 1155 1140 * Line Buffer Setup 1156 1141 * There are 3 line buffers, each one shared by 2 display controllers. 1157 - * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between 1142 + * mmDC_LB_MEMORY_SPLIT controls how that line buffer is shared between 1158 1143 * the display controllers. The paritioning is done via one of four 1159 1144 * preset allocations specified in bits 21:20: 1160 1145 * 0 - half lb ··· 1177 1162 buffer_alloc = 0; 1178 1163 } 1179 1164 1180 - WREG32(DC_LB_MEMORY_SPLIT + amdgpu_crtc->crtc_offset, 1165 + WREG32(mmDC_LB_MEMORY_SPLIT + amdgpu_crtc->crtc_offset, 1181 1166 DC_LB_MEMORY_CONFIG(tmp)); 1182 1167 1183 - WREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset, 1184 - DMIF_BUFFERS_ALLOCATED(buffer_alloc)); 1168 + WREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset, 1169 + (buffer_alloc << PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED__SHIFT)); 1185 1170 for (i = 0; i < adev->usec_timeout; i++) { 1186 - if (RREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset) & 1187 - DMIF_BUFFERS_ALLOCATED_COMPLETED) 1171 + if (RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset) & 1172 + PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED_MASK) 1188 1173 break; 1189 1174 udelay(1); 1190 1175 } ··· 1426 1411 1427 1412 static const u32 vga_control_regs[6] = 1428 1413 { 1429 - AVIVO_D1VGA_CONTROL, 1430 - AVIVO_D2VGA_CONTROL, 1431 - EVERGREEN_D3VGA_CONTROL, 1432 - EVERGREEN_D4VGA_CONTROL, 1433 - EVERGREEN_D5VGA_CONTROL, 1434 - EVERGREEN_D6VGA_CONTROL, 1414 + mmD1VGA_CONTROL, 1415 + mmD2VGA_CONTROL, 1416 + mmD3VGA_CONTROL, 1417 + mmD4VGA_CONTROL, 1418 + mmD5VGA_CONTROL, 1419 + mmD6VGA_CONTROL, 1435 1420 }; 1436 1421 1437 1422 static void dce_v6_0_vga_enable(struct drm_crtc *crtc, bool enable) ··· 1451 1436 struct drm_device *dev = crtc->dev; 1452 1437 struct amdgpu_device *adev = dev->dev_private; 1453 1438 1454 - WREG32(EVERGREEN_GRPH_ENABLE + amdgpu_crtc->crtc_offset, enable ? 1 : 0); 1439 + WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, enable ? 1 : 0); 1455 1440 } 1456 1441 1457 1442 static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc, ··· 1467 1452 struct amdgpu_bo *abo; 1468 1453 uint64_t fb_location, tiling_flags; 1469 1454 uint32_t fb_format, fb_pitch_pixels, pipe_config; 1470 - u32 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_NONE); 1455 + u32 fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_NONE); 1471 1456 u32 viewport_w, viewport_h; 1472 1457 int r; 1473 1458 bool bypass_lut = false; ··· 1510 1495 1511 1496 switch (target_fb->pixel_format) { 1512 1497 case DRM_FORMAT_C8: 1513 - fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_8BPP) | 1514 - EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_INDEXED)); 1498 + fb_format = (GRPH_DEPTH(GRPH_DEPTH_8BPP) | 1499 + GRPH_FORMAT(GRPH_FORMAT_INDEXED)); 1515 1500 break; 1516 1501 case DRM_FORMAT_XRGB4444: 1517 1502 case DRM_FORMAT_ARGB4444: 1518 - fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) | 1519 - EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB4444)); 1503 + fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) | 1504 + GRPH_FORMAT(GRPH_FORMAT_ARGB4444)); 1520 1505 #ifdef __BIG_ENDIAN 1521 - fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16); 1506 + fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16); 1522 1507 #endif 1523 1508 break; 1524 1509 case DRM_FORMAT_XRGB1555: 1525 1510 case DRM_FORMAT_ARGB1555: 1526 - fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) | 1527 - EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB1555)); 1511 + fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) | 1512 + GRPH_FORMAT(GRPH_FORMAT_ARGB1555)); 1528 1513 #ifdef __BIG_ENDIAN 1529 - fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16); 1514 + fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16); 1530 1515 #endif 1531 1516 break; 1532 1517 case DRM_FORMAT_BGRX5551: 1533 1518 case DRM_FORMAT_BGRA5551: 1534 - fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) | 1535 - EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_BGRA5551)); 1519 + fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) | 1520 + GRPH_FORMAT(GRPH_FORMAT_BGRA5551)); 1536 1521 #ifdef __BIG_ENDIAN 1537 - fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16); 1522 + fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16); 1538 1523 #endif 1539 1524 break; 1540 1525 case DRM_FORMAT_RGB565: 1541 - fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) | 1542 - EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB565)); 1526 + fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) | 1527 + GRPH_FORMAT(GRPH_FORMAT_ARGB565)); 1543 1528 #ifdef __BIG_ENDIAN 1544 - fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16); 1529 + fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16); 1545 1530 #endif 1546 1531 break; 1547 1532 case DRM_FORMAT_XRGB8888: 1548 1533 case DRM_FORMAT_ARGB8888: 1549 - fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) | 1550 - EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB8888)); 1534 + fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) | 1535 + GRPH_FORMAT(GRPH_FORMAT_ARGB8888)); 1551 1536 #ifdef __BIG_ENDIAN 1552 - fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN32); 1537 + fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32); 1553 1538 #endif 1554 1539 break; 1555 1540 case DRM_FORMAT_XRGB2101010: 1556 1541 case DRM_FORMAT_ARGB2101010: 1557 - fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) | 1558 - EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB2101010)); 1542 + fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) | 1543 + GRPH_FORMAT(GRPH_FORMAT_ARGB2101010)); 1559 1544 #ifdef __BIG_ENDIAN 1560 - fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN32); 1545 + fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32); 1561 1546 #endif 1562 1547 /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */ 1563 1548 bypass_lut = true; 1564 1549 break; 1565 1550 case DRM_FORMAT_BGRX1010102: 1566 1551 case DRM_FORMAT_BGRA1010102: 1567 - fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) | 1568 - EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_BGRA1010102)); 1552 + fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) | 1553 + GRPH_FORMAT(GRPH_FORMAT_BGRA1010102)); 1569 1554 #ifdef __BIG_ENDIAN 1570 - fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN32); 1555 + fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32); 1571 1556 #endif 1572 1557 /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */ 1573 1558 bypass_lut = true; ··· 1587 1572 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT); 1588 1573 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS); 1589 1574 1590 - fb_format |= EVERGREEN_GRPH_NUM_BANKS(num_banks); 1591 - fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1); 1592 - fb_format |= EVERGREEN_GRPH_TILE_SPLIT(tile_split); 1593 - fb_format |= EVERGREEN_GRPH_BANK_WIDTH(bankw); 1594 - fb_format |= EVERGREEN_GRPH_BANK_HEIGHT(bankh); 1595 - fb_format |= EVERGREEN_GRPH_MACRO_TILE_ASPECT(mtaspect); 1575 + fb_format |= GRPH_NUM_BANKS(num_banks); 1576 + fb_format |= GRPH_ARRAY_MODE(GRPH_ARRAY_2D_TILED_THIN1); 1577 + fb_format |= GRPH_TILE_SPLIT(tile_split); 1578 + fb_format |= GRPH_BANK_WIDTH(bankw); 1579 + fb_format |= GRPH_BANK_HEIGHT(bankh); 1580 + fb_format |= GRPH_MACRO_TILE_ASPECT(mtaspect); 1596 1581 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1) { 1597 - fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1); 1582 + fb_format |= GRPH_ARRAY_MODE(GRPH_ARRAY_1D_TILED_THIN1); 1598 1583 } 1599 1584 1600 1585 pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG); 1601 - fb_format |= SI_GRPH_PIPE_CONFIG(pipe_config); 1586 + fb_format |= GRPH_PIPE_CONFIG(pipe_config); 1602 1587 1603 1588 dce_v6_0_vga_enable(crtc, false); 1604 1589 1605 1590 /* Make sure surface address is updated at vertical blank rather than 1606 1591 * horizontal blank 1607 1592 */ 1608 - WREG32(EVERGREEN_GRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, 0); 1593 + WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, 0); 1609 1594 1610 - WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, 1595 + WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, 1611 1596 upper_32_bits(fb_location)); 1612 - WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, 1597 + WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, 1613 1598 upper_32_bits(fb_location)); 1614 - WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, 1615 - (u32)fb_location & EVERGREEN_GRPH_SURFACE_ADDRESS_MASK); 1616 - WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, 1617 - (u32) fb_location & EVERGREEN_GRPH_SURFACE_ADDRESS_MASK); 1618 - WREG32(EVERGREEN_GRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format); 1619 - WREG32(EVERGREEN_GRPH_SWAP_CONTROL + amdgpu_crtc->crtc_offset, fb_swap); 1599 + WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, 1600 + (u32)fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK); 1601 + WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, 1602 + (u32) fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK); 1603 + WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format); 1604 + WREG32(mmGRPH_SWAP_CNTL + amdgpu_crtc->crtc_offset, fb_swap); 1620 1605 1621 1606 /* 1622 1607 * The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT 1623 1608 * for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to 1624 1609 * retain the full precision throughout the pipeline. 1625 1610 */ 1626 - WREG32_P(EVERGREEN_GRPH_LUT_10BIT_BYPASS_CONTROL + amdgpu_crtc->crtc_offset, 1627 - (bypass_lut ? EVERGREEN_LUT_10BIT_BYPASS_EN : 0), 1628 - ~EVERGREEN_LUT_10BIT_BYPASS_EN); 1611 + WREG32_P(mmGRPH_LUT_10BIT_BYPASS + amdgpu_crtc->crtc_offset, 1612 + (bypass_lut ? GRPH_LUT_10BIT_BYPASS__GRPH_LUT_10BIT_BYPASS_EN_MASK : 0), 1613 + ~GRPH_LUT_10BIT_BYPASS__GRPH_LUT_10BIT_BYPASS_EN_MASK); 1629 1614 1630 1615 if (bypass_lut) 1631 1616 DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n"); 1632 1617 1633 - WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0); 1634 - WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0); 1635 - WREG32(EVERGREEN_GRPH_X_START + amdgpu_crtc->crtc_offset, 0); 1636 - WREG32(EVERGREEN_GRPH_Y_START + amdgpu_crtc->crtc_offset, 0); 1637 - WREG32(EVERGREEN_GRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width); 1638 - WREG32(EVERGREEN_GRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height); 1618 + WREG32(mmGRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0); 1619 + WREG32(mmGRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0); 1620 + WREG32(mmGRPH_X_START + amdgpu_crtc->crtc_offset, 0); 1621 + WREG32(mmGRPH_Y_START + amdgpu_crtc->crtc_offset, 0); 1622 + WREG32(mmGRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width); 1623 + WREG32(mmGRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height); 1639 1624 1640 1625 fb_pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8); 1641 - WREG32(EVERGREEN_GRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels); 1626 + WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels); 1642 1627 1643 1628 dce_v6_0_grph_enable(crtc, true); 1644 1629 1645 - WREG32(EVERGREEN_DESKTOP_HEIGHT + amdgpu_crtc->crtc_offset, 1630 + WREG32(mmDESKTOP_HEIGHT + amdgpu_crtc->crtc_offset, 1646 1631 target_fb->height); 1647 1632 x &= ~3; 1648 1633 y &= ~1; 1649 - WREG32(EVERGREEN_VIEWPORT_START + amdgpu_crtc->crtc_offset, 1634 + WREG32(mmVIEWPORT_START + amdgpu_crtc->crtc_offset, 1650 1635 (x << 16) | y); 1651 1636 viewport_w = crtc->mode.hdisplay; 1652 1637 viewport_h = (crtc->mode.vdisplay + 1) & ~1; 1653 1638 1654 - WREG32(EVERGREEN_VIEWPORT_SIZE + amdgpu_crtc->crtc_offset, 1639 + WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset, 1655 1640 (viewport_w << 16) | viewport_h); 1656 1641 1657 1642 /* set pageflip to happen anywhere in vblank interval */ 1658 - WREG32(EVERGREEN_MASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0); 1643 + WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0); 1659 1644 1660 1645 if (!atomic && fb && fb != crtc->primary->fb) { 1661 1646 amdgpu_fb = to_amdgpu_framebuffer(fb); ··· 1682 1667 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 1683 1668 1684 1669 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 1685 - WREG32(EVERGREEN_DATA_FORMAT + amdgpu_crtc->crtc_offset, 1686 - EVERGREEN_INTERLEAVE_EN); 1670 + WREG32(mmDATA_FORMAT + amdgpu_crtc->crtc_offset, 1671 + INTERLEAVE_EN); 1687 1672 else 1688 - WREG32(EVERGREEN_DATA_FORMAT + amdgpu_crtc->crtc_offset, 0); 1673 + WREG32(mmDATA_FORMAT + amdgpu_crtc->crtc_offset, 0); 1689 1674 } 1690 1675 1691 1676 static void dce_v6_0_crtc_load_lut(struct drm_crtc *crtc) ··· 1698 1683 1699 1684 DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id); 1700 1685 1701 - WREG32(NI_INPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset, 1702 - (NI_INPUT_CSC_GRPH_MODE(NI_INPUT_CSC_BYPASS) | 1703 - NI_INPUT_CSC_OVL_MODE(NI_INPUT_CSC_BYPASS))); 1704 - WREG32(NI_PRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset, 1705 - NI_GRPH_PRESCALE_BYPASS); 1706 - WREG32(NI_PRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset, 1707 - NI_OVL_PRESCALE_BYPASS); 1708 - WREG32(NI_INPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset, 1709 - (NI_GRPH_INPUT_GAMMA_MODE(NI_INPUT_GAMMA_USE_LUT) | 1710 - NI_OVL_INPUT_GAMMA_MODE(NI_INPUT_GAMMA_USE_LUT))); 1686 + WREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset, 1687 + ((0 << INPUT_CSC_CONTROL__INPUT_CSC_GRPH_MODE__SHIFT) | 1688 + (0 << INPUT_CSC_CONTROL__INPUT_CSC_OVL_MODE__SHIFT))); 1689 + WREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset, 1690 + PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_BYPASS_MASK); 1691 + WREG32(mmPRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset, 1692 + PRESCALE_OVL_CONTROL__OVL_PRESCALE_BYPASS_MASK); 1693 + WREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset, 1694 + ((0 << INPUT_GAMMA_CONTROL__GRPH_INPUT_GAMMA_MODE__SHIFT) | 1695 + (0 << INPUT_GAMMA_CONTROL__OVL_INPUT_GAMMA_MODE__SHIFT))); 1711 1696 1697 + WREG32(mmDC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0); 1712 1698 1699 + WREG32(mmDC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0); 1700 + WREG32(mmDC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0); 1701 + WREG32(mmDC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0); 1713 1702 1714 - WREG32(EVERGREEN_DC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0); 1703 + WREG32(mmDC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff); 1704 + WREG32(mmDC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff); 1705 + WREG32(mmDC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff); 1715 1706 1716 - WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0); 1717 - WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0); 1718 - WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0); 1707 + WREG32(mmDC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0); 1708 + WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007); 1719 1709 1720 - WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff); 1721 - WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff); 1722 - WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff); 1723 - 1724 - WREG32(EVERGREEN_DC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0); 1725 - WREG32(EVERGREEN_DC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007); 1726 - 1727 - WREG32(EVERGREEN_DC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0); 1710 + WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0); 1728 1711 for (i = 0; i < 256; i++) { 1729 - WREG32(EVERGREEN_DC_LUT_30_COLOR + amdgpu_crtc->crtc_offset, 1712 + WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset, 1730 1713 (amdgpu_crtc->lut_r[i] << 20) | 1731 1714 (amdgpu_crtc->lut_g[i] << 10) | 1732 1715 (amdgpu_crtc->lut_b[i] << 0)); 1733 1716 } 1734 1717 1735 - WREG32(NI_DEGAMMA_CONTROL + amdgpu_crtc->crtc_offset, 1736 - (NI_GRPH_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) | 1737 - NI_OVL_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) | 1738 - NI_ICON_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) | 1739 - NI_CURSOR_DEGAMMA_MODE(NI_DEGAMMA_BYPASS))); 1740 - WREG32(NI_GAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset, 1741 - (NI_GRPH_GAMUT_REMAP_MODE(NI_GAMUT_REMAP_BYPASS) | 1742 - NI_OVL_GAMUT_REMAP_MODE(NI_GAMUT_REMAP_BYPASS))); 1743 - WREG32(NI_REGAMMA_CONTROL + amdgpu_crtc->crtc_offset, 1744 - (NI_GRPH_REGAMMA_MODE(NI_REGAMMA_BYPASS) | 1745 - NI_OVL_REGAMMA_MODE(NI_REGAMMA_BYPASS))); 1746 - WREG32(NI_OUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset, 1747 - (NI_OUTPUT_CSC_GRPH_MODE(0) | 1748 - NI_OUTPUT_CSC_OVL_MODE(NI_OUTPUT_CSC_BYPASS))); 1718 + WREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset, 1719 + ((0 << DEGAMMA_CONTROL__GRPH_DEGAMMA_MODE__SHIFT) | 1720 + (0 << DEGAMMA_CONTROL__OVL_DEGAMMA_MODE__SHIFT) | 1721 + ICON_DEGAMMA_MODE(0) | 1722 + (0 << DEGAMMA_CONTROL__CURSOR_DEGAMMA_MODE__SHIFT))); 1723 + WREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset, 1724 + ((0 << GAMUT_REMAP_CONTROL__GRPH_GAMUT_REMAP_MODE__SHIFT) | 1725 + (0 << GAMUT_REMAP_CONTROL__OVL_GAMUT_REMAP_MODE__SHIFT))); 1726 + WREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset, 1727 + ((0 << REGAMMA_CONTROL__GRPH_REGAMMA_MODE__SHIFT) | 1728 + (0 << REGAMMA_CONTROL__OVL_REGAMMA_MODE__SHIFT))); 1729 + WREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset, 1730 + ((0 << OUTPUT_CSC_CONTROL__OUTPUT_CSC_GRPH_MODE__SHIFT) | 1731 + (0 << OUTPUT_CSC_CONTROL__OUTPUT_CSC_OVL_MODE__SHIFT))); 1749 1732 /* XXX match this to the depth of the crtc fmt block, move to modeset? */ 1750 1733 WREG32(0x1a50 + amdgpu_crtc->crtc_offset, 0); 1751 1734 ··· 1822 1809 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 1823 1810 uint32_t cur_lock; 1824 1811 1825 - cur_lock = RREG32(EVERGREEN_CUR_UPDATE + amdgpu_crtc->crtc_offset); 1812 + cur_lock = RREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset); 1826 1813 if (lock) 1827 - cur_lock |= EVERGREEN_CURSOR_UPDATE_LOCK; 1814 + cur_lock |= CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK; 1828 1815 else 1829 - cur_lock &= ~EVERGREEN_CURSOR_UPDATE_LOCK; 1830 - WREG32(EVERGREEN_CUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock); 1816 + cur_lock &= ~CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK; 1817 + WREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock); 1831 1818 } 1832 1819 1833 1820 static void dce_v6_0_hide_cursor(struct drm_crtc *crtc) ··· 1835 1822 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 1836 1823 struct amdgpu_device *adev = crtc->dev->dev_private; 1837 1824 1838 - WREG32_IDX(EVERGREEN_CUR_CONTROL + amdgpu_crtc->crtc_offset, 1839 - EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) | 1840 - EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2)); 1825 + WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, 1826 + (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) | 1827 + (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT)); 1841 1828 1842 1829 1843 1830 } ··· 1847 1834 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 1848 1835 struct amdgpu_device *adev = crtc->dev->dev_private; 1849 1836 1850 - WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, 1837 + WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, 1851 1838 upper_32_bits(amdgpu_crtc->cursor_addr)); 1852 - WREG32(EVERGREEN_CUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, 1839 + WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, 1853 1840 lower_32_bits(amdgpu_crtc->cursor_addr)); 1854 1841 1855 - WREG32_IDX(EVERGREEN_CUR_CONTROL + amdgpu_crtc->crtc_offset, 1856 - EVERGREEN_CURSOR_EN | 1857 - EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) | 1858 - EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2)); 1842 + WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, 1843 + CUR_CONTROL__CURSOR_EN_MASK | 1844 + (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) | 1845 + (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT)); 1859 1846 1860 1847 } 1861 1848 ··· 1882 1869 y = 0; 1883 1870 } 1884 1871 1885 - WREG32(EVERGREEN_CUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y); 1886 - WREG32(EVERGREEN_CUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin); 1887 - WREG32(EVERGREEN_CUR_SIZE + amdgpu_crtc->crtc_offset, 1872 + WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y); 1873 + WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin); 1874 + WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, 1888 1875 ((w - 1) << 16) | (amdgpu_crtc->cursor_height - 1)); 1889 1876 1890 1877 amdgpu_crtc->cursor_x = x; ··· 2490 2477 2491 2478 switch (state) { 2492 2479 case AMDGPU_IRQ_STATE_DISABLE: 2493 - interrupt_mask = RREG32(INT_MASK + reg_block); 2480 + interrupt_mask = RREG32(mmINT_MASK + reg_block); 2494 2481 interrupt_mask &= ~VBLANK_INT_MASK; 2495 - WREG32(INT_MASK + reg_block, interrupt_mask); 2482 + WREG32(mmINT_MASK + reg_block, interrupt_mask); 2496 2483 break; 2497 2484 case AMDGPU_IRQ_STATE_ENABLE: 2498 - interrupt_mask = RREG32(INT_MASK + reg_block); 2485 + interrupt_mask = RREG32(mmINT_MASK + reg_block); 2499 2486 interrupt_mask |= VBLANK_INT_MASK; 2500 - WREG32(INT_MASK + reg_block, interrupt_mask); 2487 + WREG32(mmINT_MASK + reg_block, interrupt_mask); 2501 2488 break; 2502 2489 default: 2503 2490 break; ··· 2525 2512 2526 2513 switch (state) { 2527 2514 case AMDGPU_IRQ_STATE_DISABLE: 2528 - dc_hpd_int_cntl = RREG32(DC_HPD1_INT_CONTROL + hpd_offsets[type]); 2515 + dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]); 2529 2516 dc_hpd_int_cntl &= ~DC_HPDx_INT_EN; 2530 - WREG32(DC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl); 2517 + WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl); 2531 2518 break; 2532 2519 case AMDGPU_IRQ_STATE_ENABLE: 2533 - dc_hpd_int_cntl = RREG32(DC_HPD1_INT_CONTROL + hpd_offsets[type]); 2520 + dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]); 2534 2521 dc_hpd_int_cntl |= DC_HPDx_INT_EN; 2535 - WREG32(DC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl); 2522 + WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl); 2536 2523 break; 2537 2524 default: 2538 2525 break; ··· 2600 2587 switch (entry->src_data) { 2601 2588 case 0: /* vblank */ 2602 2589 if (disp_int & interrupt_status_offsets[crtc].vblank) 2603 - WREG32(VBLANK_STATUS + crtc_offsets[crtc], VBLANK_ACK); 2590 + WREG32(mmVBLANK_STATUS + crtc_offsets[crtc], VBLANK_ACK); 2604 2591 else 2605 2592 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); 2606 2593 ··· 2611 2598 break; 2612 2599 case 1: /* vline */ 2613 2600 if (disp_int & interrupt_status_offsets[crtc].vline) 2614 - WREG32(VLINE_STATUS + crtc_offsets[crtc], VLINE_ACK); 2601 + WREG32(mmVLINE_STATUS + crtc_offsets[crtc], VLINE_ACK); 2615 2602 else 2616 2603 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); 2617 2604 ··· 2637 2624 return -EINVAL; 2638 2625 } 2639 2626 2640 - reg = RREG32(GRPH_INT_CONTROL + crtc_offsets[type]); 2627 + reg = RREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type]); 2641 2628 if (state == AMDGPU_IRQ_STATE_DISABLE) 2642 - WREG32(GRPH_INT_CONTROL + crtc_offsets[type], 2629 + WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type], 2643 2630 reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK); 2644 2631 else 2645 - WREG32(GRPH_INT_CONTROL + crtc_offsets[type], 2632 + WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type], 2646 2633 reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK); 2647 2634 2648 2635 return 0; ··· 2665 2652 return -EINVAL; 2666 2653 } 2667 2654 2668 - if (RREG32(GRPH_INT_STATUS + crtc_offsets[crtc_id]) & 2655 + if (RREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id]) & 2669 2656 GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK) 2670 - WREG32(GRPH_INT_STATUS + crtc_offsets[crtc_id], 2657 + WREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id], 2671 2658 GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK); 2672 2659 2673 2660 /* IRQ could occur when in initial stage */ ··· 2718 2705 mask = interrupt_status_offsets[hpd].hpd; 2719 2706 2720 2707 if (disp_int & mask) { 2721 - tmp = RREG32(DC_HPD1_INT_CONTROL + hpd_offsets[hpd]); 2708 + tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]); 2722 2709 tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK; 2723 - WREG32(DC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp); 2710 + WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp); 2724 2711 schedule_work(&adev->hotplug_work); 2725 2712 DRM_INFO("IH: HPD%d\n", hpd + 1); 2726 2713 }
+78
drivers/gpu/drm/amd/amdgpu/si_enums.h
··· 23 23 #ifndef SI_ENUMS_H 24 24 #define SI_ENUMS_H 25 25 26 + #define VBLANK_INT_MASK (1 << 0) 27 + #define DC_HPDx_INT_EN (1 << 16) 28 + #define VBLANK_ACK (1 << 4) 29 + #define VLINE_ACK (1 << 4) 30 + 31 + #define CURSOR_WIDTH 64 32 + #define CURSOR_HEIGHT 64 33 + 34 + #define VGA_VSTATUS_CNTL 0xFFFCFFFF 35 + #define PRIORITY_MARK_MASK 0x7fff 36 + #define PRIORITY_OFF (1 << 16) 37 + #define PRIORITY_ALWAYS_ON (1 << 20) 38 + #define INTERLEAVE_EN (1 << 0) 39 + 40 + #define LATENCY_WATERMARK_MASK(x) ((x) << 16) 41 + #define DC_LB_MEMORY_CONFIG(x) ((x) << 20) 42 + #define ICON_DEGAMMA_MODE(x) (((x) & 0x3) << 8) 43 + 44 + #define GRPH_ENDIAN_SWAP(x) (((x) & 0x3) << 0) 45 + #define GRPH_ENDIAN_NONE 0 46 + #define GRPH_ENDIAN_8IN16 1 47 + #define GRPH_ENDIAN_8IN32 2 48 + #define GRPH_ENDIAN_8IN64 3 49 + 50 + #define GRPH_DEPTH(x) (((x) & 0x3) << 0) 51 + #define GRPH_DEPTH_8BPP 0 52 + #define GRPH_DEPTH_16BPP 1 53 + #define GRPH_DEPTH_32BPP 2 54 + 55 + #define GRPH_FORMAT(x) (((x) & 0x7) << 8) 56 + #define GRPH_FORMAT_INDEXED 0 57 + #define GRPH_FORMAT_ARGB1555 0 58 + #define GRPH_FORMAT_ARGB565 1 59 + #define GRPH_FORMAT_ARGB4444 2 60 + #define GRPH_FORMAT_AI88 3 61 + #define GRPH_FORMAT_MONO16 4 62 + #define GRPH_FORMAT_BGRA5551 5 63 + #define GRPH_FORMAT_ARGB8888 0 64 + #define GRPH_FORMAT_ARGB2101010 1 65 + #define GRPH_FORMAT_32BPP_DIG 2 66 + #define GRPH_FORMAT_8B_ARGB2101010 3 67 + #define GRPH_FORMAT_BGRA1010102 4 68 + #define GRPH_FORMAT_8B_BGRA1010102 5 69 + #define GRPH_FORMAT_RGB111110 6 70 + #define GRPH_FORMAT_BGR101111 7 71 + 72 + #define GRPH_NUM_BANKS(x) (((x) & 0x3) << 2) 73 + #define GRPH_ARRAY_MODE(x) (((x) & 0x7) << 20) 74 + #define GRPH_ARRAY_LINEAR_GENERAL 0 75 + #define GRPH_ARRAY_LINEAR_ALIGNED 1 76 + #define GRPH_ARRAY_1D_TILED_THIN1 2 77 + #define GRPH_ARRAY_2D_TILED_THIN1 4 78 + #define GRPH_TILE_SPLIT(x) (((x) & 0x7) << 13) 79 + #define GRPH_BANK_WIDTH(x) (((x) & 0x3) << 6) 80 + #define GRPH_BANK_HEIGHT(x) (((x) & 0x3) << 11) 81 + #define GRPH_MACRO_TILE_ASPECT(x) (((x) & 0x3) << 18) 82 + #define GRPH_ARRAY_MODE(x) (((x) & 0x7) << 20) 83 + #define GRPH_PIPE_CONFIG(x) (((x) & 0x1f) << 24) 84 + 85 + #define CURSOR_EN (1 << 0) 86 + #define CURSOR_MODE(x) (((x) & 0x3) << 8) 87 + #define CURSOR_MONO 0 88 + #define CURSOR_24_1 1 89 + #define CURSOR_24_8_PRE_MULT 2 90 + #define CURSOR_24_8_UNPRE_MULT 3 91 + #define CURSOR_2X_MAGNIFY (1 << 16) 92 + #define CURSOR_FORCE_MC_ON (1 << 20) 93 + #define CURSOR_URGENT_CONTROL(x) (((x) & 0x7) << 24) 94 + #define CURSOR_URGENT_ALWAYS 0 95 + #define CURSOR_URGENT_1_8 1 96 + #define CURSOR_URGENT_1_4 2 97 + #define CURSOR_URGENT_3_8 3 98 + #define CURSOR_URGENT_1_2 4 99 + #define CURSOR_UPDATE_PENDING (1 << 0) 100 + #define CURSOR_UPDATE_TAKEN (1 << 1) 101 + #define CURSOR_UPDATE_LOCK (1 << 16) 102 + #define CURSOR_DISABLE_MULTIPLE_UPDATE (1 << 24) 103 + 26 104 #define AMDGPU_NUM_OF_VMIDS 8 27 105 #define SI_CRTC0_REGISTER_OFFSET 0 28 106 #define SI_CRTC1_REGISTER_OFFSET 0x300
+12
drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_d.h
··· 4442 4442 #define mmXDMA_TEST_DEBUG_DATA 0x041D 4443 4443 #define mmXDMA_TEST_DEBUG_INDEX 0x041C 4444 4444 4445 + /* Registers that spilled out of sid.h */ 4446 + #define mmDATA_FORMAT 0x1AC0 4447 + #define mmDESKTOP_HEIGHT 0x1AC1 4448 + #define mmDC_LB_MEMORY_SPLIT 0x1AC3 4449 + #define mmPRIORITY_A_CNT 0x1AC6 4450 + #define mmPRIORITY_B_CNT 0x1AC7 4451 + #define mmDPG_PIPE_ARBITRATION_CONTROL3 0x1B32 4452 + #define mmINT_MASK 0x1AD0 4453 + #define mmVLINE_STATUS 0x1AEE 4454 + #define mmVBLANK_STATUS 0x1AEF 4455 + 4456 + 4445 4457 #endif