Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v3.1 1567 lines 45 kB view raw
1/* 2 * Copyright 2010 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Alex Deucher 23 */ 24#include <linux/firmware.h> 25#include <linux/platform_device.h> 26#include <linux/slab.h> 27#include "drmP.h" 28#include "radeon.h" 29#include "radeon_asic.h" 30#include "radeon_drm.h" 31#include "nid.h" 32#include "atom.h" 33#include "ni_reg.h" 34#include "cayman_blit_shaders.h" 35 36extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save); 37extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save); 38extern int evergreen_mc_wait_for_idle(struct radeon_device *rdev); 39extern void evergreen_mc_program(struct radeon_device *rdev); 40extern void evergreen_irq_suspend(struct radeon_device *rdev); 41extern int evergreen_mc_init(struct radeon_device *rdev); 42extern void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev); 43 44#define EVERGREEN_PFP_UCODE_SIZE 1120 45#define EVERGREEN_PM4_UCODE_SIZE 1376 46#define EVERGREEN_RLC_UCODE_SIZE 768 47#define BTC_MC_UCODE_SIZE 6024 48 49#define CAYMAN_PFP_UCODE_SIZE 2176 50#define CAYMAN_PM4_UCODE_SIZE 2176 51#define CAYMAN_RLC_UCODE_SIZE 1024 52#define CAYMAN_MC_UCODE_SIZE 6037 53 54/* Firmware Names */ 55MODULE_FIRMWARE("radeon/BARTS_pfp.bin"); 56MODULE_FIRMWARE("radeon/BARTS_me.bin"); 57MODULE_FIRMWARE("radeon/BARTS_mc.bin"); 58MODULE_FIRMWARE("radeon/BTC_rlc.bin"); 59MODULE_FIRMWARE("radeon/TURKS_pfp.bin"); 60MODULE_FIRMWARE("radeon/TURKS_me.bin"); 61MODULE_FIRMWARE("radeon/TURKS_mc.bin"); 62MODULE_FIRMWARE("radeon/CAICOS_pfp.bin"); 63MODULE_FIRMWARE("radeon/CAICOS_me.bin"); 64MODULE_FIRMWARE("radeon/CAICOS_mc.bin"); 65MODULE_FIRMWARE("radeon/CAYMAN_pfp.bin"); 66MODULE_FIRMWARE("radeon/CAYMAN_me.bin"); 67MODULE_FIRMWARE("radeon/CAYMAN_mc.bin"); 68MODULE_FIRMWARE("radeon/CAYMAN_rlc.bin"); 69 70#define BTC_IO_MC_REGS_SIZE 29 71 72static const u32 barts_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = { 73 {0x00000077, 0xff010100}, 74 {0x00000078, 0x00000000}, 75 {0x00000079, 0x00001434}, 76 {0x0000007a, 0xcc08ec08}, 77 {0x0000007b, 0x00040000}, 78 {0x0000007c, 0x000080c0}, 79 {0x0000007d, 0x09000000}, 80 {0x0000007e, 0x00210404}, 81 {0x00000081, 0x08a8e800}, 82 {0x00000082, 0x00030444}, 83 {0x00000083, 0x00000000}, 84 {0x00000085, 0x00000001}, 85 {0x00000086, 0x00000002}, 86 {0x00000087, 0x48490000}, 87 {0x00000088, 0x20244647}, 88 {0x00000089, 0x00000005}, 89 {0x0000008b, 0x66030000}, 90 {0x0000008c, 0x00006603}, 91 {0x0000008d, 0x00000100}, 92 {0x0000008f, 0x00001c0a}, 93 {0x00000090, 0xff000001}, 94 {0x00000094, 0x00101101}, 95 {0x00000095, 0x00000fff}, 96 {0x00000096, 0x00116fff}, 97 {0x00000097, 0x60010000}, 98 {0x00000098, 0x10010000}, 99 {0x00000099, 0x00006000}, 100 {0x0000009a, 0x00001000}, 101 {0x0000009f, 0x00946a00} 102}; 103 104static const u32 turks_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = { 105 {0x00000077, 0xff010100}, 106 {0x00000078, 0x00000000}, 107 {0x00000079, 0x00001434}, 108 {0x0000007a, 0xcc08ec08}, 109 {0x0000007b, 0x00040000}, 110 {0x0000007c, 0x000080c0}, 111 {0x0000007d, 0x09000000}, 112 {0x0000007e, 0x00210404}, 113 {0x00000081, 0x08a8e800}, 114 {0x00000082, 0x00030444}, 115 {0x00000083, 0x00000000}, 116 {0x00000085, 0x00000001}, 117 {0x00000086, 0x00000002}, 118 {0x00000087, 0x48490000}, 119 {0x00000088, 0x20244647}, 120 {0x00000089, 0x00000005}, 121 {0x0000008b, 0x66030000}, 122 {0x0000008c, 0x00006603}, 123 {0x0000008d, 0x00000100}, 124 {0x0000008f, 0x00001c0a}, 125 {0x00000090, 0xff000001}, 126 {0x00000094, 0x00101101}, 127 {0x00000095, 0x00000fff}, 128 {0x00000096, 0x00116fff}, 129 {0x00000097, 0x60010000}, 130 {0x00000098, 0x10010000}, 131 {0x00000099, 0x00006000}, 132 {0x0000009a, 0x00001000}, 133 {0x0000009f, 0x00936a00} 134}; 135 136static const u32 caicos_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = { 137 {0x00000077, 0xff010100}, 138 {0x00000078, 0x00000000}, 139 {0x00000079, 0x00001434}, 140 {0x0000007a, 0xcc08ec08}, 141 {0x0000007b, 0x00040000}, 142 {0x0000007c, 0x000080c0}, 143 {0x0000007d, 0x09000000}, 144 {0x0000007e, 0x00210404}, 145 {0x00000081, 0x08a8e800}, 146 {0x00000082, 0x00030444}, 147 {0x00000083, 0x00000000}, 148 {0x00000085, 0x00000001}, 149 {0x00000086, 0x00000002}, 150 {0x00000087, 0x48490000}, 151 {0x00000088, 0x20244647}, 152 {0x00000089, 0x00000005}, 153 {0x0000008b, 0x66030000}, 154 {0x0000008c, 0x00006603}, 155 {0x0000008d, 0x00000100}, 156 {0x0000008f, 0x00001c0a}, 157 {0x00000090, 0xff000001}, 158 {0x00000094, 0x00101101}, 159 {0x00000095, 0x00000fff}, 160 {0x00000096, 0x00116fff}, 161 {0x00000097, 0x60010000}, 162 {0x00000098, 0x10010000}, 163 {0x00000099, 0x00006000}, 164 {0x0000009a, 0x00001000}, 165 {0x0000009f, 0x00916a00} 166}; 167 168static const u32 cayman_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = { 169 {0x00000077, 0xff010100}, 170 {0x00000078, 0x00000000}, 171 {0x00000079, 0x00001434}, 172 {0x0000007a, 0xcc08ec08}, 173 {0x0000007b, 0x00040000}, 174 {0x0000007c, 0x000080c0}, 175 {0x0000007d, 0x09000000}, 176 {0x0000007e, 0x00210404}, 177 {0x00000081, 0x08a8e800}, 178 {0x00000082, 0x00030444}, 179 {0x00000083, 0x00000000}, 180 {0x00000085, 0x00000001}, 181 {0x00000086, 0x00000002}, 182 {0x00000087, 0x48490000}, 183 {0x00000088, 0x20244647}, 184 {0x00000089, 0x00000005}, 185 {0x0000008b, 0x66030000}, 186 {0x0000008c, 0x00006603}, 187 {0x0000008d, 0x00000100}, 188 {0x0000008f, 0x00001c0a}, 189 {0x00000090, 0xff000001}, 190 {0x00000094, 0x00101101}, 191 {0x00000095, 0x00000fff}, 192 {0x00000096, 0x00116fff}, 193 {0x00000097, 0x60010000}, 194 {0x00000098, 0x10010000}, 195 {0x00000099, 0x00006000}, 196 {0x0000009a, 0x00001000}, 197 {0x0000009f, 0x00976b00} 198}; 199 200int ni_mc_load_microcode(struct radeon_device *rdev) 201{ 202 const __be32 *fw_data; 203 u32 mem_type, running, blackout = 0; 204 u32 *io_mc_regs; 205 int i, ucode_size, regs_size; 206 207 if (!rdev->mc_fw) 208 return -EINVAL; 209 210 switch (rdev->family) { 211 case CHIP_BARTS: 212 io_mc_regs = (u32 *)&barts_io_mc_regs; 213 ucode_size = BTC_MC_UCODE_SIZE; 214 regs_size = BTC_IO_MC_REGS_SIZE; 215 break; 216 case CHIP_TURKS: 217 io_mc_regs = (u32 *)&turks_io_mc_regs; 218 ucode_size = BTC_MC_UCODE_SIZE; 219 regs_size = BTC_IO_MC_REGS_SIZE; 220 break; 221 case CHIP_CAICOS: 222 default: 223 io_mc_regs = (u32 *)&caicos_io_mc_regs; 224 ucode_size = BTC_MC_UCODE_SIZE; 225 regs_size = BTC_IO_MC_REGS_SIZE; 226 break; 227 case CHIP_CAYMAN: 228 io_mc_regs = (u32 *)&cayman_io_mc_regs; 229 ucode_size = CAYMAN_MC_UCODE_SIZE; 230 regs_size = BTC_IO_MC_REGS_SIZE; 231 break; 232 } 233 234 mem_type = (RREG32(MC_SEQ_MISC0) & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT; 235 running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK; 236 237 if ((mem_type == MC_SEQ_MISC0_GDDR5_VALUE) && (running == 0)) { 238 if (running) { 239 blackout = RREG32(MC_SHARED_BLACKOUT_CNTL); 240 WREG32(MC_SHARED_BLACKOUT_CNTL, 1); 241 } 242 243 /* reset the engine and set to writable */ 244 WREG32(MC_SEQ_SUP_CNTL, 0x00000008); 245 WREG32(MC_SEQ_SUP_CNTL, 0x00000010); 246 247 /* load mc io regs */ 248 for (i = 0; i < regs_size; i++) { 249 WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]); 250 WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]); 251 } 252 /* load the MC ucode */ 253 fw_data = (const __be32 *)rdev->mc_fw->data; 254 for (i = 0; i < ucode_size; i++) 255 WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++)); 256 257 /* put the engine back into the active state */ 258 WREG32(MC_SEQ_SUP_CNTL, 0x00000008); 259 WREG32(MC_SEQ_SUP_CNTL, 0x00000004); 260 WREG32(MC_SEQ_SUP_CNTL, 0x00000001); 261 262 /* wait for training to complete */ 263 while (!(RREG32(MC_IO_PAD_CNTL_D0) & MEM_FALL_OUT_CMD)) 264 udelay(10); 265 266 if (running) 267 WREG32(MC_SHARED_BLACKOUT_CNTL, blackout); 268 } 269 270 return 0; 271} 272 273int ni_init_microcode(struct radeon_device *rdev) 274{ 275 struct platform_device *pdev; 276 const char *chip_name; 277 const char *rlc_chip_name; 278 size_t pfp_req_size, me_req_size, rlc_req_size, mc_req_size; 279 char fw_name[30]; 280 int err; 281 282 DRM_DEBUG("\n"); 283 284 pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0); 285 err = IS_ERR(pdev); 286 if (err) { 287 printk(KERN_ERR "radeon_cp: Failed to register firmware\n"); 288 return -EINVAL; 289 } 290 291 switch (rdev->family) { 292 case CHIP_BARTS: 293 chip_name = "BARTS"; 294 rlc_chip_name = "BTC"; 295 pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4; 296 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4; 297 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4; 298 mc_req_size = BTC_MC_UCODE_SIZE * 4; 299 break; 300 case CHIP_TURKS: 301 chip_name = "TURKS"; 302 rlc_chip_name = "BTC"; 303 pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4; 304 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4; 305 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4; 306 mc_req_size = BTC_MC_UCODE_SIZE * 4; 307 break; 308 case CHIP_CAICOS: 309 chip_name = "CAICOS"; 310 rlc_chip_name = "BTC"; 311 pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4; 312 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4; 313 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4; 314 mc_req_size = BTC_MC_UCODE_SIZE * 4; 315 break; 316 case CHIP_CAYMAN: 317 chip_name = "CAYMAN"; 318 rlc_chip_name = "CAYMAN"; 319 pfp_req_size = CAYMAN_PFP_UCODE_SIZE * 4; 320 me_req_size = CAYMAN_PM4_UCODE_SIZE * 4; 321 rlc_req_size = CAYMAN_RLC_UCODE_SIZE * 4; 322 mc_req_size = CAYMAN_MC_UCODE_SIZE * 4; 323 break; 324 default: BUG(); 325 } 326 327 DRM_INFO("Loading %s Microcode\n", chip_name); 328 329 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name); 330 err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev); 331 if (err) 332 goto out; 333 if (rdev->pfp_fw->size != pfp_req_size) { 334 printk(KERN_ERR 335 "ni_cp: Bogus length %zu in firmware \"%s\"\n", 336 rdev->pfp_fw->size, fw_name); 337 err = -EINVAL; 338 goto out; 339 } 340 341 snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name); 342 err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev); 343 if (err) 344 goto out; 345 if (rdev->me_fw->size != me_req_size) { 346 printk(KERN_ERR 347 "ni_cp: Bogus length %zu in firmware \"%s\"\n", 348 rdev->me_fw->size, fw_name); 349 err = -EINVAL; 350 } 351 352 snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name); 353 err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev); 354 if (err) 355 goto out; 356 if (rdev->rlc_fw->size != rlc_req_size) { 357 printk(KERN_ERR 358 "ni_rlc: Bogus length %zu in firmware \"%s\"\n", 359 rdev->rlc_fw->size, fw_name); 360 err = -EINVAL; 361 } 362 363 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name); 364 err = request_firmware(&rdev->mc_fw, fw_name, &pdev->dev); 365 if (err) 366 goto out; 367 if (rdev->mc_fw->size != mc_req_size) { 368 printk(KERN_ERR 369 "ni_mc: Bogus length %zu in firmware \"%s\"\n", 370 rdev->mc_fw->size, fw_name); 371 err = -EINVAL; 372 } 373out: 374 platform_device_unregister(pdev); 375 376 if (err) { 377 if (err != -EINVAL) 378 printk(KERN_ERR 379 "ni_cp: Failed to load firmware \"%s\"\n", 380 fw_name); 381 release_firmware(rdev->pfp_fw); 382 rdev->pfp_fw = NULL; 383 release_firmware(rdev->me_fw); 384 rdev->me_fw = NULL; 385 release_firmware(rdev->rlc_fw); 386 rdev->rlc_fw = NULL; 387 release_firmware(rdev->mc_fw); 388 rdev->mc_fw = NULL; 389 } 390 return err; 391} 392 393/* 394 * Core functions 395 */ 396static u32 cayman_get_tile_pipe_to_backend_map(struct radeon_device *rdev, 397 u32 num_tile_pipes, 398 u32 num_backends_per_asic, 399 u32 *backend_disable_mask_per_asic, 400 u32 num_shader_engines) 401{ 402 u32 backend_map = 0; 403 u32 enabled_backends_mask = 0; 404 u32 enabled_backends_count = 0; 405 u32 num_backends_per_se; 406 u32 cur_pipe; 407 u32 swizzle_pipe[CAYMAN_MAX_PIPES]; 408 u32 cur_backend = 0; 409 u32 i; 410 bool force_no_swizzle; 411 412 /* force legal values */ 413 if (num_tile_pipes < 1) 414 num_tile_pipes = 1; 415 if (num_tile_pipes > rdev->config.cayman.max_tile_pipes) 416 num_tile_pipes = rdev->config.cayman.max_tile_pipes; 417 if (num_shader_engines < 1) 418 num_shader_engines = 1; 419 if (num_shader_engines > rdev->config.cayman.max_shader_engines) 420 num_shader_engines = rdev->config.cayman.max_shader_engines; 421 if (num_backends_per_asic < num_shader_engines) 422 num_backends_per_asic = num_shader_engines; 423 if (num_backends_per_asic > (rdev->config.cayman.max_backends_per_se * num_shader_engines)) 424 num_backends_per_asic = rdev->config.cayman.max_backends_per_se * num_shader_engines; 425 426 /* make sure we have the same number of backends per se */ 427 num_backends_per_asic = ALIGN(num_backends_per_asic, num_shader_engines); 428 /* set up the number of backends per se */ 429 num_backends_per_se = num_backends_per_asic / num_shader_engines; 430 if (num_backends_per_se > rdev->config.cayman.max_backends_per_se) { 431 num_backends_per_se = rdev->config.cayman.max_backends_per_se; 432 num_backends_per_asic = num_backends_per_se * num_shader_engines; 433 } 434 435 /* create enable mask and count for enabled backends */ 436 for (i = 0; i < CAYMAN_MAX_BACKENDS; ++i) { 437 if (((*backend_disable_mask_per_asic >> i) & 1) == 0) { 438 enabled_backends_mask |= (1 << i); 439 ++enabled_backends_count; 440 } 441 if (enabled_backends_count == num_backends_per_asic) 442 break; 443 } 444 445 /* force the backends mask to match the current number of backends */ 446 if (enabled_backends_count != num_backends_per_asic) { 447 u32 this_backend_enabled; 448 u32 shader_engine; 449 u32 backend_per_se; 450 451 enabled_backends_mask = 0; 452 enabled_backends_count = 0; 453 *backend_disable_mask_per_asic = CAYMAN_MAX_BACKENDS_MASK; 454 for (i = 0; i < CAYMAN_MAX_BACKENDS; ++i) { 455 /* calc the current se */ 456 shader_engine = i / rdev->config.cayman.max_backends_per_se; 457 /* calc the backend per se */ 458 backend_per_se = i % rdev->config.cayman.max_backends_per_se; 459 /* default to not enabled */ 460 this_backend_enabled = 0; 461 if ((shader_engine < num_shader_engines) && 462 (backend_per_se < num_backends_per_se)) 463 this_backend_enabled = 1; 464 if (this_backend_enabled) { 465 enabled_backends_mask |= (1 << i); 466 *backend_disable_mask_per_asic &= ~(1 << i); 467 ++enabled_backends_count; 468 } 469 } 470 } 471 472 473 memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * CAYMAN_MAX_PIPES); 474 switch (rdev->family) { 475 case CHIP_CAYMAN: 476 force_no_swizzle = true; 477 break; 478 default: 479 force_no_swizzle = false; 480 break; 481 } 482 if (force_no_swizzle) { 483 bool last_backend_enabled = false; 484 485 force_no_swizzle = false; 486 for (i = 0; i < CAYMAN_MAX_BACKENDS; ++i) { 487 if (((enabled_backends_mask >> i) & 1) == 1) { 488 if (last_backend_enabled) 489 force_no_swizzle = true; 490 last_backend_enabled = true; 491 } else 492 last_backend_enabled = false; 493 } 494 } 495 496 switch (num_tile_pipes) { 497 case 1: 498 case 3: 499 case 5: 500 case 7: 501 DRM_ERROR("odd number of pipes!\n"); 502 break; 503 case 2: 504 swizzle_pipe[0] = 0; 505 swizzle_pipe[1] = 1; 506 break; 507 case 4: 508 if (force_no_swizzle) { 509 swizzle_pipe[0] = 0; 510 swizzle_pipe[1] = 1; 511 swizzle_pipe[2] = 2; 512 swizzle_pipe[3] = 3; 513 } else { 514 swizzle_pipe[0] = 0; 515 swizzle_pipe[1] = 2; 516 swizzle_pipe[2] = 1; 517 swizzle_pipe[3] = 3; 518 } 519 break; 520 case 6: 521 if (force_no_swizzle) { 522 swizzle_pipe[0] = 0; 523 swizzle_pipe[1] = 1; 524 swizzle_pipe[2] = 2; 525 swizzle_pipe[3] = 3; 526 swizzle_pipe[4] = 4; 527 swizzle_pipe[5] = 5; 528 } else { 529 swizzle_pipe[0] = 0; 530 swizzle_pipe[1] = 2; 531 swizzle_pipe[2] = 4; 532 swizzle_pipe[3] = 1; 533 swizzle_pipe[4] = 3; 534 swizzle_pipe[5] = 5; 535 } 536 break; 537 case 8: 538 if (force_no_swizzle) { 539 swizzle_pipe[0] = 0; 540 swizzle_pipe[1] = 1; 541 swizzle_pipe[2] = 2; 542 swizzle_pipe[3] = 3; 543 swizzle_pipe[4] = 4; 544 swizzle_pipe[5] = 5; 545 swizzle_pipe[6] = 6; 546 swizzle_pipe[7] = 7; 547 } else { 548 swizzle_pipe[0] = 0; 549 swizzle_pipe[1] = 2; 550 swizzle_pipe[2] = 4; 551 swizzle_pipe[3] = 6; 552 swizzle_pipe[4] = 1; 553 swizzle_pipe[5] = 3; 554 swizzle_pipe[6] = 5; 555 swizzle_pipe[7] = 7; 556 } 557 break; 558 } 559 560 for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) { 561 while (((1 << cur_backend) & enabled_backends_mask) == 0) 562 cur_backend = (cur_backend + 1) % CAYMAN_MAX_BACKENDS; 563 564 backend_map |= (((cur_backend & 0xf) << (swizzle_pipe[cur_pipe] * 4))); 565 566 cur_backend = (cur_backend + 1) % CAYMAN_MAX_BACKENDS; 567 } 568 569 return backend_map; 570} 571 572static u32 cayman_get_disable_mask_per_asic(struct radeon_device *rdev, 573 u32 disable_mask_per_se, 574 u32 max_disable_mask_per_se, 575 u32 num_shader_engines) 576{ 577 u32 disable_field_width_per_se = r600_count_pipe_bits(disable_mask_per_se); 578 u32 disable_mask_per_asic = disable_mask_per_se & max_disable_mask_per_se; 579 580 if (num_shader_engines == 1) 581 return disable_mask_per_asic; 582 else if (num_shader_engines == 2) 583 return disable_mask_per_asic | (disable_mask_per_asic << disable_field_width_per_se); 584 else 585 return 0xffffffff; 586} 587 588static void cayman_gpu_init(struct radeon_device *rdev) 589{ 590 u32 cc_rb_backend_disable = 0; 591 u32 cc_gc_shader_pipe_config; 592 u32 gb_addr_config = 0; 593 u32 mc_shared_chmap, mc_arb_ramcfg; 594 u32 gb_backend_map; 595 u32 cgts_tcc_disable; 596 u32 sx_debug_1; 597 u32 smx_dc_ctl0; 598 u32 gc_user_shader_pipe_config; 599 u32 gc_user_rb_backend_disable; 600 u32 cgts_user_tcc_disable; 601 u32 cgts_sm_ctrl_reg; 602 u32 hdp_host_path_cntl; 603 u32 tmp; 604 int i, j; 605 606 switch (rdev->family) { 607 case CHIP_CAYMAN: 608 default: 609 rdev->config.cayman.max_shader_engines = 2; 610 rdev->config.cayman.max_pipes_per_simd = 4; 611 rdev->config.cayman.max_tile_pipes = 8; 612 rdev->config.cayman.max_simds_per_se = 12; 613 rdev->config.cayman.max_backends_per_se = 4; 614 rdev->config.cayman.max_texture_channel_caches = 8; 615 rdev->config.cayman.max_gprs = 256; 616 rdev->config.cayman.max_threads = 256; 617 rdev->config.cayman.max_gs_threads = 32; 618 rdev->config.cayman.max_stack_entries = 512; 619 rdev->config.cayman.sx_num_of_sets = 8; 620 rdev->config.cayman.sx_max_export_size = 256; 621 rdev->config.cayman.sx_max_export_pos_size = 64; 622 rdev->config.cayman.sx_max_export_smx_size = 192; 623 rdev->config.cayman.max_hw_contexts = 8; 624 rdev->config.cayman.sq_num_cf_insts = 2; 625 626 rdev->config.cayman.sc_prim_fifo_size = 0x100; 627 rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30; 628 rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130; 629 break; 630 } 631 632 /* Initialize HDP */ 633 for (i = 0, j = 0; i < 32; i++, j += 0x18) { 634 WREG32((0x2c14 + j), 0x00000000); 635 WREG32((0x2c18 + j), 0x00000000); 636 WREG32((0x2c1c + j), 0x00000000); 637 WREG32((0x2c20 + j), 0x00000000); 638 WREG32((0x2c24 + j), 0x00000000); 639 } 640 641 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); 642 643 evergreen_fix_pci_max_read_req_size(rdev); 644 645 mc_shared_chmap = RREG32(MC_SHARED_CHMAP); 646 mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG); 647 648 cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE); 649 cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG); 650 cgts_tcc_disable = 0xff000000; 651 gc_user_rb_backend_disable = RREG32(GC_USER_RB_BACKEND_DISABLE); 652 gc_user_shader_pipe_config = RREG32(GC_USER_SHADER_PIPE_CONFIG); 653 cgts_user_tcc_disable = RREG32(CGTS_USER_TCC_DISABLE); 654 655 rdev->config.cayman.num_shader_engines = rdev->config.cayman.max_shader_engines; 656 tmp = ((~gc_user_shader_pipe_config) & INACTIVE_QD_PIPES_MASK) >> INACTIVE_QD_PIPES_SHIFT; 657 rdev->config.cayman.num_shader_pipes_per_simd = r600_count_pipe_bits(tmp); 658 rdev->config.cayman.num_tile_pipes = rdev->config.cayman.max_tile_pipes; 659 tmp = ((~gc_user_shader_pipe_config) & INACTIVE_SIMDS_MASK) >> INACTIVE_SIMDS_SHIFT; 660 rdev->config.cayman.num_simds_per_se = r600_count_pipe_bits(tmp); 661 tmp = ((~gc_user_rb_backend_disable) & BACKEND_DISABLE_MASK) >> BACKEND_DISABLE_SHIFT; 662 rdev->config.cayman.num_backends_per_se = r600_count_pipe_bits(tmp); 663 tmp = (gc_user_rb_backend_disable & BACKEND_DISABLE_MASK) >> BACKEND_DISABLE_SHIFT; 664 rdev->config.cayman.backend_disable_mask_per_asic = 665 cayman_get_disable_mask_per_asic(rdev, tmp, CAYMAN_MAX_BACKENDS_PER_SE_MASK, 666 rdev->config.cayman.num_shader_engines); 667 rdev->config.cayman.backend_map = 668 cayman_get_tile_pipe_to_backend_map(rdev, rdev->config.cayman.num_tile_pipes, 669 rdev->config.cayman.num_backends_per_se * 670 rdev->config.cayman.num_shader_engines, 671 &rdev->config.cayman.backend_disable_mask_per_asic, 672 rdev->config.cayman.num_shader_engines); 673 tmp = ((~cgts_user_tcc_disable) & TCC_DISABLE_MASK) >> TCC_DISABLE_SHIFT; 674 rdev->config.cayman.num_texture_channel_caches = r600_count_pipe_bits(tmp); 675 tmp = (mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT; 676 rdev->config.cayman.mem_max_burst_length_bytes = (tmp + 1) * 256; 677 if (rdev->config.cayman.mem_max_burst_length_bytes > 512) 678 rdev->config.cayman.mem_max_burst_length_bytes = 512; 679 tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT; 680 rdev->config.cayman.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024; 681 if (rdev->config.cayman.mem_row_size_in_kb > 4) 682 rdev->config.cayman.mem_row_size_in_kb = 4; 683 /* XXX use MC settings? */ 684 rdev->config.cayman.shader_engine_tile_size = 32; 685 rdev->config.cayman.num_gpus = 1; 686 rdev->config.cayman.multi_gpu_tile_size = 64; 687 688 //gb_addr_config = 0x02011003 689#if 0 690 gb_addr_config = RREG32(GB_ADDR_CONFIG); 691#else 692 gb_addr_config = 0; 693 switch (rdev->config.cayman.num_tile_pipes) { 694 case 1: 695 default: 696 gb_addr_config |= NUM_PIPES(0); 697 break; 698 case 2: 699 gb_addr_config |= NUM_PIPES(1); 700 break; 701 case 4: 702 gb_addr_config |= NUM_PIPES(2); 703 break; 704 case 8: 705 gb_addr_config |= NUM_PIPES(3); 706 break; 707 } 708 709 tmp = (rdev->config.cayman.mem_max_burst_length_bytes / 256) - 1; 710 gb_addr_config |= PIPE_INTERLEAVE_SIZE(tmp); 711 gb_addr_config |= NUM_SHADER_ENGINES(rdev->config.cayman.num_shader_engines - 1); 712 tmp = (rdev->config.cayman.shader_engine_tile_size / 16) - 1; 713 gb_addr_config |= SHADER_ENGINE_TILE_SIZE(tmp); 714 switch (rdev->config.cayman.num_gpus) { 715 case 1: 716 default: 717 gb_addr_config |= NUM_GPUS(0); 718 break; 719 case 2: 720 gb_addr_config |= NUM_GPUS(1); 721 break; 722 case 4: 723 gb_addr_config |= NUM_GPUS(2); 724 break; 725 } 726 switch (rdev->config.cayman.multi_gpu_tile_size) { 727 case 16: 728 gb_addr_config |= MULTI_GPU_TILE_SIZE(0); 729 break; 730 case 32: 731 default: 732 gb_addr_config |= MULTI_GPU_TILE_SIZE(1); 733 break; 734 case 64: 735 gb_addr_config |= MULTI_GPU_TILE_SIZE(2); 736 break; 737 case 128: 738 gb_addr_config |= MULTI_GPU_TILE_SIZE(3); 739 break; 740 } 741 switch (rdev->config.cayman.mem_row_size_in_kb) { 742 case 1: 743 default: 744 gb_addr_config |= ROW_SIZE(0); 745 break; 746 case 2: 747 gb_addr_config |= ROW_SIZE(1); 748 break; 749 case 4: 750 gb_addr_config |= ROW_SIZE(2); 751 break; 752 } 753#endif 754 755 tmp = (gb_addr_config & NUM_PIPES_MASK) >> NUM_PIPES_SHIFT; 756 rdev->config.cayman.num_tile_pipes = (1 << tmp); 757 tmp = (gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT; 758 rdev->config.cayman.mem_max_burst_length_bytes = (tmp + 1) * 256; 759 tmp = (gb_addr_config & NUM_SHADER_ENGINES_MASK) >> NUM_SHADER_ENGINES_SHIFT; 760 rdev->config.cayman.num_shader_engines = tmp + 1; 761 tmp = (gb_addr_config & NUM_GPUS_MASK) >> NUM_GPUS_SHIFT; 762 rdev->config.cayman.num_gpus = tmp + 1; 763 tmp = (gb_addr_config & MULTI_GPU_TILE_SIZE_MASK) >> MULTI_GPU_TILE_SIZE_SHIFT; 764 rdev->config.cayman.multi_gpu_tile_size = 1 << tmp; 765 tmp = (gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT; 766 rdev->config.cayman.mem_row_size_in_kb = 1 << tmp; 767 768 //gb_backend_map = 0x76541032; 769#if 0 770 gb_backend_map = RREG32(GB_BACKEND_MAP); 771#else 772 gb_backend_map = 773 cayman_get_tile_pipe_to_backend_map(rdev, rdev->config.cayman.num_tile_pipes, 774 rdev->config.cayman.num_backends_per_se * 775 rdev->config.cayman.num_shader_engines, 776 &rdev->config.cayman.backend_disable_mask_per_asic, 777 rdev->config.cayman.num_shader_engines); 778#endif 779 /* setup tiling info dword. gb_addr_config is not adequate since it does 780 * not have bank info, so create a custom tiling dword. 781 * bits 3:0 num_pipes 782 * bits 7:4 num_banks 783 * bits 11:8 group_size 784 * bits 15:12 row_size 785 */ 786 rdev->config.cayman.tile_config = 0; 787 switch (rdev->config.cayman.num_tile_pipes) { 788 case 1: 789 default: 790 rdev->config.cayman.tile_config |= (0 << 0); 791 break; 792 case 2: 793 rdev->config.cayman.tile_config |= (1 << 0); 794 break; 795 case 4: 796 rdev->config.cayman.tile_config |= (2 << 0); 797 break; 798 case 8: 799 rdev->config.cayman.tile_config |= (3 << 0); 800 break; 801 } 802 rdev->config.cayman.tile_config |= 803 ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4; 804 rdev->config.cayman.tile_config |= 805 ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8; 806 rdev->config.cayman.tile_config |= 807 ((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12; 808 809 rdev->config.cayman.backend_map = gb_backend_map; 810 WREG32(GB_BACKEND_MAP, gb_backend_map); 811 WREG32(GB_ADDR_CONFIG, gb_addr_config); 812 WREG32(DMIF_ADDR_CONFIG, gb_addr_config); 813 WREG32(HDP_ADDR_CONFIG, gb_addr_config); 814 815 /* primary versions */ 816 WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); 817 WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable); 818 WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); 819 820 WREG32(CGTS_TCC_DISABLE, cgts_tcc_disable); 821 WREG32(CGTS_SYS_TCC_DISABLE, cgts_tcc_disable); 822 823 /* user versions */ 824 WREG32(GC_USER_RB_BACKEND_DISABLE, cc_rb_backend_disable); 825 WREG32(GC_USER_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable); 826 WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); 827 828 WREG32(CGTS_USER_SYS_TCC_DISABLE, cgts_tcc_disable); 829 WREG32(CGTS_USER_TCC_DISABLE, cgts_tcc_disable); 830 831 /* reprogram the shader complex */ 832 cgts_sm_ctrl_reg = RREG32(CGTS_SM_CTRL_REG); 833 for (i = 0; i < 16; i++) 834 WREG32(CGTS_SM_CTRL_REG, OVERRIDE); 835 WREG32(CGTS_SM_CTRL_REG, cgts_sm_ctrl_reg); 836 837 /* set HW defaults for 3D engine */ 838 WREG32(CP_MEQ_THRESHOLDS, MEQ1_START(0x30) | MEQ2_START(0x60)); 839 840 sx_debug_1 = RREG32(SX_DEBUG_1); 841 sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS; 842 WREG32(SX_DEBUG_1, sx_debug_1); 843 844 smx_dc_ctl0 = RREG32(SMX_DC_CTL0); 845 smx_dc_ctl0 &= ~NUMBER_OF_SETS(0x1ff); 846 smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.cayman.sx_num_of_sets); 847 WREG32(SMX_DC_CTL0, smx_dc_ctl0); 848 849 WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4) | CRC_SIMD_ID_WADDR_DISABLE); 850 851 /* need to be explicitly zero-ed */ 852 WREG32(VGT_OFFCHIP_LDS_BASE, 0); 853 WREG32(SQ_LSTMP_RING_BASE, 0); 854 WREG32(SQ_HSTMP_RING_BASE, 0); 855 WREG32(SQ_ESTMP_RING_BASE, 0); 856 WREG32(SQ_GSTMP_RING_BASE, 0); 857 WREG32(SQ_VSTMP_RING_BASE, 0); 858 WREG32(SQ_PSTMP_RING_BASE, 0); 859 860 WREG32(TA_CNTL_AUX, DISABLE_CUBE_ANISO); 861 862 WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.cayman.sx_max_export_size / 4) - 1) | 863 POSITION_BUFFER_SIZE((rdev->config.cayman.sx_max_export_pos_size / 4) - 1) | 864 SMX_BUFFER_SIZE((rdev->config.cayman.sx_max_export_smx_size / 4) - 1))); 865 866 WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.cayman.sc_prim_fifo_size) | 867 SC_HIZ_TILE_FIFO_SIZE(rdev->config.cayman.sc_hiz_tile_fifo_size) | 868 SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.cayman.sc_earlyz_tile_fifo_size))); 869 870 871 WREG32(VGT_NUM_INSTANCES, 1); 872 873 WREG32(CP_PERFMON_CNTL, 0); 874 875 WREG32(SQ_MS_FIFO_SIZES, (CACHE_FIFO_SIZE(16 * rdev->config.cayman.sq_num_cf_insts) | 876 FETCH_FIFO_HIWATER(0x4) | 877 DONE_FIFO_HIWATER(0xe0) | 878 ALU_UPDATE_FIFO_HIWATER(0x8))); 879 880 WREG32(SQ_GPR_RESOURCE_MGMT_1, NUM_CLAUSE_TEMP_GPRS(4)); 881 WREG32(SQ_CONFIG, (VC_ENABLE | 882 EXPORT_SRC_C | 883 GFX_PRIO(0) | 884 CS1_PRIO(0) | 885 CS2_PRIO(1))); 886 WREG32(SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, DYN_GPR_ENABLE); 887 888 WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) | 889 FORCE_EOV_MAX_REZ_CNT(255))); 890 891 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC) | 892 AUTO_INVLD_EN(ES_AND_GS_AUTO)); 893 894 WREG32(VGT_GS_VERTEX_REUSE, 16); 895 WREG32(PA_SC_LINE_STIPPLE_STATE, 0); 896 897 WREG32(CB_PERF_CTR0_SEL_0, 0); 898 WREG32(CB_PERF_CTR0_SEL_1, 0); 899 WREG32(CB_PERF_CTR1_SEL_0, 0); 900 WREG32(CB_PERF_CTR1_SEL_1, 0); 901 WREG32(CB_PERF_CTR2_SEL_0, 0); 902 WREG32(CB_PERF_CTR2_SEL_1, 0); 903 WREG32(CB_PERF_CTR3_SEL_0, 0); 904 WREG32(CB_PERF_CTR3_SEL_1, 0); 905 906 tmp = RREG32(HDP_MISC_CNTL); 907 tmp |= HDP_FLUSH_INVALIDATE_CACHE; 908 WREG32(HDP_MISC_CNTL, tmp); 909 910 hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL); 911 WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl); 912 913 WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3)); 914 915 udelay(50); 916} 917 918/* 919 * GART 920 */ 921void cayman_pcie_gart_tlb_flush(struct radeon_device *rdev) 922{ 923 /* flush hdp cache */ 924 WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1); 925 926 /* bits 0-7 are the VM contexts0-7 */ 927 WREG32(VM_INVALIDATE_REQUEST, 1); 928} 929 930int cayman_pcie_gart_enable(struct radeon_device *rdev) 931{ 932 int r; 933 934 if (rdev->gart.table.vram.robj == NULL) { 935 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); 936 return -EINVAL; 937 } 938 r = radeon_gart_table_vram_pin(rdev); 939 if (r) 940 return r; 941 radeon_gart_restore(rdev); 942 /* Setup TLB control */ 943 WREG32(MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB | 944 ENABLE_L1_FRAGMENT_PROCESSING | 945 SYSTEM_ACCESS_MODE_NOT_IN_SYS | 946 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU); 947 /* Setup L2 cache */ 948 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | 949 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | 950 ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE | 951 EFFECTIVE_L2_QUEUE_SIZE(7) | 952 CONTEXT1_IDENTITY_ACCESS_MODE(1)); 953 WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE); 954 WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY | 955 L2_CACHE_BIGK_FRAGMENT_SIZE(6)); 956 /* setup context0 */ 957 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); 958 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12); 959 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12); 960 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, 961 (u32)(rdev->dummy_page.addr >> 12)); 962 WREG32(VM_CONTEXT0_CNTL2, 0); 963 WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) | 964 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); 965 /* disable context1-7 */ 966 WREG32(VM_CONTEXT1_CNTL2, 0); 967 WREG32(VM_CONTEXT1_CNTL, 0); 968 969 cayman_pcie_gart_tlb_flush(rdev); 970 rdev->gart.ready = true; 971 return 0; 972} 973 974void cayman_pcie_gart_disable(struct radeon_device *rdev) 975{ 976 int r; 977 978 /* Disable all tables */ 979 WREG32(VM_CONTEXT0_CNTL, 0); 980 WREG32(VM_CONTEXT1_CNTL, 0); 981 /* Setup TLB control */ 982 WREG32(MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING | 983 SYSTEM_ACCESS_MODE_NOT_IN_SYS | 984 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU); 985 /* Setup L2 cache */ 986 WREG32(VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | 987 ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE | 988 EFFECTIVE_L2_QUEUE_SIZE(7) | 989 CONTEXT1_IDENTITY_ACCESS_MODE(1)); 990 WREG32(VM_L2_CNTL2, 0); 991 WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY | 992 L2_CACHE_BIGK_FRAGMENT_SIZE(6)); 993 if (rdev->gart.table.vram.robj) { 994 r = radeon_bo_reserve(rdev->gart.table.vram.robj, false); 995 if (likely(r == 0)) { 996 radeon_bo_kunmap(rdev->gart.table.vram.robj); 997 radeon_bo_unpin(rdev->gart.table.vram.robj); 998 radeon_bo_unreserve(rdev->gart.table.vram.robj); 999 } 1000 } 1001} 1002 1003void cayman_pcie_gart_fini(struct radeon_device *rdev) 1004{ 1005 cayman_pcie_gart_disable(rdev); 1006 radeon_gart_table_vram_free(rdev); 1007 radeon_gart_fini(rdev); 1008} 1009 1010/* 1011 * CP. 1012 */ 1013static void cayman_cp_enable(struct radeon_device *rdev, bool enable) 1014{ 1015 if (enable) 1016 WREG32(CP_ME_CNTL, 0); 1017 else { 1018 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); 1019 WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT)); 1020 WREG32(SCRATCH_UMSK, 0); 1021 } 1022} 1023 1024static int cayman_cp_load_microcode(struct radeon_device *rdev) 1025{ 1026 const __be32 *fw_data; 1027 int i; 1028 1029 if (!rdev->me_fw || !rdev->pfp_fw) 1030 return -EINVAL; 1031 1032 cayman_cp_enable(rdev, false); 1033 1034 fw_data = (const __be32 *)rdev->pfp_fw->data; 1035 WREG32(CP_PFP_UCODE_ADDR, 0); 1036 for (i = 0; i < CAYMAN_PFP_UCODE_SIZE; i++) 1037 WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++)); 1038 WREG32(CP_PFP_UCODE_ADDR, 0); 1039 1040 fw_data = (const __be32 *)rdev->me_fw->data; 1041 WREG32(CP_ME_RAM_WADDR, 0); 1042 for (i = 0; i < CAYMAN_PM4_UCODE_SIZE; i++) 1043 WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++)); 1044 1045 WREG32(CP_PFP_UCODE_ADDR, 0); 1046 WREG32(CP_ME_RAM_WADDR, 0); 1047 WREG32(CP_ME_RAM_RADDR, 0); 1048 return 0; 1049} 1050 1051static int cayman_cp_start(struct radeon_device *rdev) 1052{ 1053 int r, i; 1054 1055 r = radeon_ring_lock(rdev, 7); 1056 if (r) { 1057 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); 1058 return r; 1059 } 1060 radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5)); 1061 radeon_ring_write(rdev, 0x1); 1062 radeon_ring_write(rdev, 0x0); 1063 radeon_ring_write(rdev, rdev->config.cayman.max_hw_contexts - 1); 1064 radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1)); 1065 radeon_ring_write(rdev, 0); 1066 radeon_ring_write(rdev, 0); 1067 radeon_ring_unlock_commit(rdev); 1068 1069 cayman_cp_enable(rdev, true); 1070 1071 r = radeon_ring_lock(rdev, cayman_default_size + 19); 1072 if (r) { 1073 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); 1074 return r; 1075 } 1076 1077 /* setup clear context state */ 1078 radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); 1079 radeon_ring_write(rdev, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE); 1080 1081 for (i = 0; i < cayman_default_size; i++) 1082 radeon_ring_write(rdev, cayman_default_state[i]); 1083 1084 radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); 1085 radeon_ring_write(rdev, PACKET3_PREAMBLE_END_CLEAR_STATE); 1086 1087 /* set clear context state */ 1088 radeon_ring_write(rdev, PACKET3(PACKET3_CLEAR_STATE, 0)); 1089 radeon_ring_write(rdev, 0); 1090 1091 /* SQ_VTX_BASE_VTX_LOC */ 1092 radeon_ring_write(rdev, 0xc0026f00); 1093 radeon_ring_write(rdev, 0x00000000); 1094 radeon_ring_write(rdev, 0x00000000); 1095 radeon_ring_write(rdev, 0x00000000); 1096 1097 /* Clear consts */ 1098 radeon_ring_write(rdev, 0xc0036f00); 1099 radeon_ring_write(rdev, 0x00000bc4); 1100 radeon_ring_write(rdev, 0xffffffff); 1101 radeon_ring_write(rdev, 0xffffffff); 1102 radeon_ring_write(rdev, 0xffffffff); 1103 1104 radeon_ring_write(rdev, 0xc0026900); 1105 radeon_ring_write(rdev, 0x00000316); 1106 radeon_ring_write(rdev, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */ 1107 radeon_ring_write(rdev, 0x00000010); /* */ 1108 1109 radeon_ring_unlock_commit(rdev); 1110 1111 /* XXX init other rings */ 1112 1113 return 0; 1114} 1115 1116static void cayman_cp_fini(struct radeon_device *rdev) 1117{ 1118 cayman_cp_enable(rdev, false); 1119 radeon_ring_fini(rdev); 1120} 1121 1122int cayman_cp_resume(struct radeon_device *rdev) 1123{ 1124 u32 tmp; 1125 u32 rb_bufsz; 1126 int r; 1127 1128 /* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */ 1129 WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP | 1130 SOFT_RESET_PA | 1131 SOFT_RESET_SH | 1132 SOFT_RESET_VGT | 1133 SOFT_RESET_SPI | 1134 SOFT_RESET_SX)); 1135 RREG32(GRBM_SOFT_RESET); 1136 mdelay(15); 1137 WREG32(GRBM_SOFT_RESET, 0); 1138 RREG32(GRBM_SOFT_RESET); 1139 1140 WREG32(CP_SEM_WAIT_TIMER, 0x4); 1141 1142 /* Set the write pointer delay */ 1143 WREG32(CP_RB_WPTR_DELAY, 0); 1144 1145 WREG32(CP_DEBUG, (1 << 27)); 1146 1147 /* ring 0 - compute and gfx */ 1148 /* Set ring buffer size */ 1149 rb_bufsz = drm_order(rdev->cp.ring_size / 8); 1150 tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; 1151#ifdef __BIG_ENDIAN 1152 tmp |= BUF_SWAP_32BIT; 1153#endif 1154 WREG32(CP_RB0_CNTL, tmp); 1155 1156 /* Initialize the ring buffer's read and write pointers */ 1157 WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA); 1158 rdev->cp.wptr = 0; 1159 WREG32(CP_RB0_WPTR, rdev->cp.wptr); 1160 1161 /* set the wb address wether it's enabled or not */ 1162 WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC); 1163 WREG32(CP_RB0_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF); 1164 WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF); 1165 1166 if (rdev->wb.enabled) 1167 WREG32(SCRATCH_UMSK, 0xff); 1168 else { 1169 tmp |= RB_NO_UPDATE; 1170 WREG32(SCRATCH_UMSK, 0); 1171 } 1172 1173 mdelay(1); 1174 WREG32(CP_RB0_CNTL, tmp); 1175 1176 WREG32(CP_RB0_BASE, rdev->cp.gpu_addr >> 8); 1177 1178 rdev->cp.rptr = RREG32(CP_RB0_RPTR); 1179 1180 /* ring1 - compute only */ 1181 /* Set ring buffer size */ 1182 rb_bufsz = drm_order(rdev->cp1.ring_size / 8); 1183 tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; 1184#ifdef __BIG_ENDIAN 1185 tmp |= BUF_SWAP_32BIT; 1186#endif 1187 WREG32(CP_RB1_CNTL, tmp); 1188 1189 /* Initialize the ring buffer's read and write pointers */ 1190 WREG32(CP_RB1_CNTL, tmp | RB_RPTR_WR_ENA); 1191 rdev->cp1.wptr = 0; 1192 WREG32(CP_RB1_WPTR, rdev->cp1.wptr); 1193 1194 /* set the wb address wether it's enabled or not */ 1195 WREG32(CP_RB1_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFFFFFFFC); 1196 WREG32(CP_RB1_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFF); 1197 1198 mdelay(1); 1199 WREG32(CP_RB1_CNTL, tmp); 1200 1201 WREG32(CP_RB1_BASE, rdev->cp1.gpu_addr >> 8); 1202 1203 rdev->cp1.rptr = RREG32(CP_RB1_RPTR); 1204 1205 /* ring2 - compute only */ 1206 /* Set ring buffer size */ 1207 rb_bufsz = drm_order(rdev->cp2.ring_size / 8); 1208 tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; 1209#ifdef __BIG_ENDIAN 1210 tmp |= BUF_SWAP_32BIT; 1211#endif 1212 WREG32(CP_RB2_CNTL, tmp); 1213 1214 /* Initialize the ring buffer's read and write pointers */ 1215 WREG32(CP_RB2_CNTL, tmp | RB_RPTR_WR_ENA); 1216 rdev->cp2.wptr = 0; 1217 WREG32(CP_RB2_WPTR, rdev->cp2.wptr); 1218 1219 /* set the wb address wether it's enabled or not */ 1220 WREG32(CP_RB2_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFFFFFFFC); 1221 WREG32(CP_RB2_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFF); 1222 1223 mdelay(1); 1224 WREG32(CP_RB2_CNTL, tmp); 1225 1226 WREG32(CP_RB2_BASE, rdev->cp2.gpu_addr >> 8); 1227 1228 rdev->cp2.rptr = RREG32(CP_RB2_RPTR); 1229 1230 /* start the rings */ 1231 cayman_cp_start(rdev); 1232 rdev->cp.ready = true; 1233 rdev->cp1.ready = true; 1234 rdev->cp2.ready = true; 1235 /* this only test cp0 */ 1236 r = radeon_ring_test(rdev); 1237 if (r) { 1238 rdev->cp.ready = false; 1239 rdev->cp1.ready = false; 1240 rdev->cp2.ready = false; 1241 return r; 1242 } 1243 1244 return 0; 1245} 1246 1247bool cayman_gpu_is_lockup(struct radeon_device *rdev) 1248{ 1249 u32 srbm_status; 1250 u32 grbm_status; 1251 u32 grbm_status_se0, grbm_status_se1; 1252 struct r100_gpu_lockup *lockup = &rdev->config.cayman.lockup; 1253 int r; 1254 1255 srbm_status = RREG32(SRBM_STATUS); 1256 grbm_status = RREG32(GRBM_STATUS); 1257 grbm_status_se0 = RREG32(GRBM_STATUS_SE0); 1258 grbm_status_se1 = RREG32(GRBM_STATUS_SE1); 1259 if (!(grbm_status & GUI_ACTIVE)) { 1260 r100_gpu_lockup_update(lockup, &rdev->cp); 1261 return false; 1262 } 1263 /* force CP activities */ 1264 r = radeon_ring_lock(rdev, 2); 1265 if (!r) { 1266 /* PACKET2 NOP */ 1267 radeon_ring_write(rdev, 0x80000000); 1268 radeon_ring_write(rdev, 0x80000000); 1269 radeon_ring_unlock_commit(rdev); 1270 } 1271 /* XXX deal with CP0,1,2 */ 1272 rdev->cp.rptr = RREG32(CP_RB0_RPTR); 1273 return r100_gpu_cp_is_lockup(rdev, lockup, &rdev->cp); 1274} 1275 1276static int cayman_gpu_soft_reset(struct radeon_device *rdev) 1277{ 1278 struct evergreen_mc_save save; 1279 u32 grbm_reset = 0; 1280 1281 if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE)) 1282 return 0; 1283 1284 dev_info(rdev->dev, "GPU softreset \n"); 1285 dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n", 1286 RREG32(GRBM_STATUS)); 1287 dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n", 1288 RREG32(GRBM_STATUS_SE0)); 1289 dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n", 1290 RREG32(GRBM_STATUS_SE1)); 1291 dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n", 1292 RREG32(SRBM_STATUS)); 1293 evergreen_mc_stop(rdev, &save); 1294 if (evergreen_mc_wait_for_idle(rdev)) { 1295 dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); 1296 } 1297 /* Disable CP parsing/prefetching */ 1298 WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT); 1299 1300 /* reset all the gfx blocks */ 1301 grbm_reset = (SOFT_RESET_CP | 1302 SOFT_RESET_CB | 1303 SOFT_RESET_DB | 1304 SOFT_RESET_GDS | 1305 SOFT_RESET_PA | 1306 SOFT_RESET_SC | 1307 SOFT_RESET_SPI | 1308 SOFT_RESET_SH | 1309 SOFT_RESET_SX | 1310 SOFT_RESET_TC | 1311 SOFT_RESET_TA | 1312 SOFT_RESET_VGT | 1313 SOFT_RESET_IA); 1314 1315 dev_info(rdev->dev, " GRBM_SOFT_RESET=0x%08X\n", grbm_reset); 1316 WREG32(GRBM_SOFT_RESET, grbm_reset); 1317 (void)RREG32(GRBM_SOFT_RESET); 1318 udelay(50); 1319 WREG32(GRBM_SOFT_RESET, 0); 1320 (void)RREG32(GRBM_SOFT_RESET); 1321 /* Wait a little for things to settle down */ 1322 udelay(50); 1323 dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n", 1324 RREG32(GRBM_STATUS)); 1325 dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n", 1326 RREG32(GRBM_STATUS_SE0)); 1327 dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n", 1328 RREG32(GRBM_STATUS_SE1)); 1329 dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n", 1330 RREG32(SRBM_STATUS)); 1331 evergreen_mc_resume(rdev, &save); 1332 return 0; 1333} 1334 1335int cayman_asic_reset(struct radeon_device *rdev) 1336{ 1337 return cayman_gpu_soft_reset(rdev); 1338} 1339 1340static int cayman_startup(struct radeon_device *rdev) 1341{ 1342 int r; 1343 1344 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) { 1345 r = ni_init_microcode(rdev); 1346 if (r) { 1347 DRM_ERROR("Failed to load firmware!\n"); 1348 return r; 1349 } 1350 } 1351 r = ni_mc_load_microcode(rdev); 1352 if (r) { 1353 DRM_ERROR("Failed to load MC firmware!\n"); 1354 return r; 1355 } 1356 1357 evergreen_mc_program(rdev); 1358 r = cayman_pcie_gart_enable(rdev); 1359 if (r) 1360 return r; 1361 cayman_gpu_init(rdev); 1362 1363 r = evergreen_blit_init(rdev); 1364 if (r) { 1365 evergreen_blit_fini(rdev); 1366 rdev->asic->copy = NULL; 1367 dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); 1368 } 1369 1370 /* allocate wb buffer */ 1371 r = radeon_wb_init(rdev); 1372 if (r) 1373 return r; 1374 1375 /* Enable IRQ */ 1376 r = r600_irq_init(rdev); 1377 if (r) { 1378 DRM_ERROR("radeon: IH init failed (%d).\n", r); 1379 radeon_irq_kms_fini(rdev); 1380 return r; 1381 } 1382 evergreen_irq_set(rdev); 1383 1384 r = radeon_ring_init(rdev, rdev->cp.ring_size); 1385 if (r) 1386 return r; 1387 r = cayman_cp_load_microcode(rdev); 1388 if (r) 1389 return r; 1390 r = cayman_cp_resume(rdev); 1391 if (r) 1392 return r; 1393 1394 return 0; 1395} 1396 1397int cayman_resume(struct radeon_device *rdev) 1398{ 1399 int r; 1400 1401 /* Do not reset GPU before posting, on rv770 hw unlike on r500 hw, 1402 * posting will perform necessary task to bring back GPU into good 1403 * shape. 1404 */ 1405 /* post card */ 1406 atom_asic_init(rdev->mode_info.atom_context); 1407 1408 r = cayman_startup(rdev); 1409 if (r) { 1410 DRM_ERROR("cayman startup failed on resume\n"); 1411 return r; 1412 } 1413 1414 r = r600_ib_test(rdev); 1415 if (r) { 1416 DRM_ERROR("radeon: failled testing IB (%d).\n", r); 1417 return r; 1418 } 1419 1420 return r; 1421 1422} 1423 1424int cayman_suspend(struct radeon_device *rdev) 1425{ 1426 int r; 1427 1428 /* FIXME: we should wait for ring to be empty */ 1429 cayman_cp_enable(rdev, false); 1430 rdev->cp.ready = false; 1431 evergreen_irq_suspend(rdev); 1432 radeon_wb_disable(rdev); 1433 cayman_pcie_gart_disable(rdev); 1434 1435 /* unpin shaders bo */ 1436 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); 1437 if (likely(r == 0)) { 1438 radeon_bo_unpin(rdev->r600_blit.shader_obj); 1439 radeon_bo_unreserve(rdev->r600_blit.shader_obj); 1440 } 1441 1442 return 0; 1443} 1444 1445/* Plan is to move initialization in that function and use 1446 * helper function so that radeon_device_init pretty much 1447 * do nothing more than calling asic specific function. This 1448 * should also allow to remove a bunch of callback function 1449 * like vram_info. 1450 */ 1451int cayman_init(struct radeon_device *rdev) 1452{ 1453 int r; 1454 1455 /* This don't do much */ 1456 r = radeon_gem_init(rdev); 1457 if (r) 1458 return r; 1459 /* Read BIOS */ 1460 if (!radeon_get_bios(rdev)) { 1461 if (ASIC_IS_AVIVO(rdev)) 1462 return -EINVAL; 1463 } 1464 /* Must be an ATOMBIOS */ 1465 if (!rdev->is_atom_bios) { 1466 dev_err(rdev->dev, "Expecting atombios for cayman GPU\n"); 1467 return -EINVAL; 1468 } 1469 r = radeon_atombios_init(rdev); 1470 if (r) 1471 return r; 1472 1473 /* Post card if necessary */ 1474 if (!radeon_card_posted(rdev)) { 1475 if (!rdev->bios) { 1476 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n"); 1477 return -EINVAL; 1478 } 1479 DRM_INFO("GPU not posted. posting now...\n"); 1480 atom_asic_init(rdev->mode_info.atom_context); 1481 } 1482 /* Initialize scratch registers */ 1483 r600_scratch_init(rdev); 1484 /* Initialize surface registers */ 1485 radeon_surface_init(rdev); 1486 /* Initialize clocks */ 1487 radeon_get_clock_info(rdev->ddev); 1488 /* Fence driver */ 1489 r = radeon_fence_driver_init(rdev); 1490 if (r) 1491 return r; 1492 /* initialize memory controller */ 1493 r = evergreen_mc_init(rdev); 1494 if (r) 1495 return r; 1496 /* Memory manager */ 1497 r = radeon_bo_init(rdev); 1498 if (r) 1499 return r; 1500 1501 r = radeon_irq_kms_init(rdev); 1502 if (r) 1503 return r; 1504 1505 rdev->cp.ring_obj = NULL; 1506 r600_ring_init(rdev, 1024 * 1024); 1507 1508 rdev->ih.ring_obj = NULL; 1509 r600_ih_ring_init(rdev, 64 * 1024); 1510 1511 r = r600_pcie_gart_init(rdev); 1512 if (r) 1513 return r; 1514 1515 rdev->accel_working = true; 1516 r = cayman_startup(rdev); 1517 if (r) { 1518 dev_err(rdev->dev, "disabling GPU acceleration\n"); 1519 cayman_cp_fini(rdev); 1520 r600_irq_fini(rdev); 1521 radeon_wb_fini(rdev); 1522 radeon_irq_kms_fini(rdev); 1523 cayman_pcie_gart_fini(rdev); 1524 rdev->accel_working = false; 1525 } 1526 if (rdev->accel_working) { 1527 r = radeon_ib_pool_init(rdev); 1528 if (r) { 1529 DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r); 1530 rdev->accel_working = false; 1531 } 1532 r = r600_ib_test(rdev); 1533 if (r) { 1534 DRM_ERROR("radeon: failed testing IB (%d).\n", r); 1535 rdev->accel_working = false; 1536 } 1537 } 1538 1539 /* Don't start up if the MC ucode is missing. 1540 * The default clocks and voltages before the MC ucode 1541 * is loaded are not suffient for advanced operations. 1542 */ 1543 if (!rdev->mc_fw) { 1544 DRM_ERROR("radeon: MC ucode required for NI+.\n"); 1545 return -EINVAL; 1546 } 1547 1548 return 0; 1549} 1550 1551void cayman_fini(struct radeon_device *rdev) 1552{ 1553 evergreen_blit_fini(rdev); 1554 cayman_cp_fini(rdev); 1555 r600_irq_fini(rdev); 1556 radeon_wb_fini(rdev); 1557 radeon_ib_pool_fini(rdev); 1558 radeon_irq_kms_fini(rdev); 1559 cayman_pcie_gart_fini(rdev); 1560 radeon_gem_fini(rdev); 1561 radeon_fence_driver_fini(rdev); 1562 radeon_bo_fini(rdev); 1563 radeon_atombios_fini(rdev); 1564 kfree(rdev->bios); 1565 rdev->bios = NULL; 1566} 1567