Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'drm-next-3.16' of git://people.freedesktop.org/~agd5f/linux into drm-next

Highlights:
- GPUVM opimtizations
- HDMI audio cleanups
- Deep color HDMI support
- more bug fixes, cleanups

* 'drm-next-3.16' of git://people.freedesktop.org/~agd5f/linux: (29 commits)
drm/edid: Add quirk for Sony PVM-2541A to get 12 bpc hdmi deep color.
drm/edid: Parse and handle HDMI deep color modes.
drm/radeon: Limit hdmi deep color bit depth to 12 bpc.
drm/radeon: Setup HDMI_CONTROL for hdmi deep color gcp's (v2)
drm/radeon: fix pll setup for hdmi deep color (v7)
drm/radeon: use hw cts/n values for deep color
drm/radeon: only apply hdmi bpc pll flags when encoder mode is hdmi
drm/radeon/atom: fix dithering on certain panels
drm/radeon: optimize CIK VM handling v2
drm/radeon: optimize SI VM handling
drm/radeon: add define for flags used in R600+ GTT
drm/radeon: rework page flip handling v3
drm/radeon: separate vblank and pflip crtc handling
drm/radeon: split page flip and pending callback
drm/radeon: remove drm_vblank_get|put from pflip handling
drm/radeon: remove (pre|post)_page_flip callbacks
drm/radeon/dp: fix lane/clock setup for dp 1.2 capable devices
drm/radeon: fix typo in radeon_connector_is_dp12_capable()
radeon: Remove useless quirk for zx1/FireGL X1 combo introduced with fdo #7770
vgaswitcheroo: switch the mux to the igp on power down when runpm is enabled
...

+1130 -602
+116 -2
drivers/gpu/drm/drm_edid.c
··· 70 70 #define EDID_QUIRK_FORCE_REDUCED_BLANKING (1 << 7) 71 71 /* Force 8bpc */ 72 72 #define EDID_QUIRK_FORCE_8BPC (1 << 8) 73 + /* Force 12bpc */ 74 + #define EDID_QUIRK_FORCE_12BPC (1 << 9) 73 75 74 76 struct detailed_mode_closure { 75 77 struct drm_connector *connector; ··· 126 124 /* Samsung SyncMaster 22[5-6]BW */ 127 125 { "SAM", 596, EDID_QUIRK_PREFER_LARGE_60 }, 128 126 { "SAM", 638, EDID_QUIRK_PREFER_LARGE_60 }, 127 + 128 + /* Sony PVM-2541A does up to 12 bpc, but only reports max 8 bpc */ 129 + { "SNY", 0x2541, EDID_QUIRK_FORCE_12BPC }, 129 130 130 131 /* ViewSonic VA2026w */ 131 132 { "VSC", 5020, EDID_QUIRK_FORCE_REDUCED_BLANKING }, ··· 3428 3423 EXPORT_SYMBOL(drm_rgb_quant_range_selectable); 3429 3424 3430 3425 /** 3426 + * drm_assign_hdmi_deep_color_info - detect whether monitor supports 3427 + * hdmi deep color modes and update drm_display_info if so. 3428 + * 3429 + * @edid: monitor EDID information 3430 + * @info: Updated with maximum supported deep color bpc and color format 3431 + * if deep color supported. 3432 + * 3433 + * Parse the CEA extension according to CEA-861-B. 3434 + * Return true if HDMI deep color supported, false if not or unknown. 3435 + */ 3436 + static bool drm_assign_hdmi_deep_color_info(struct edid *edid, 3437 + struct drm_display_info *info, 3438 + struct drm_connector *connector) 3439 + { 3440 + u8 *edid_ext, *hdmi; 3441 + int i; 3442 + int start_offset, end_offset; 3443 + unsigned int dc_bpc = 0; 3444 + 3445 + edid_ext = drm_find_cea_extension(edid); 3446 + if (!edid_ext) 3447 + return false; 3448 + 3449 + if (cea_db_offsets(edid_ext, &start_offset, &end_offset)) 3450 + return false; 3451 + 3452 + /* 3453 + * Because HDMI identifier is in Vendor Specific Block, 3454 + * search it from all data blocks of CEA extension. 3455 + */ 3456 + for_each_cea_db(edid_ext, i, start_offset, end_offset) { 3457 + if (cea_db_is_hdmi_vsdb(&edid_ext[i])) { 3458 + /* HDMI supports at least 8 bpc */ 3459 + info->bpc = 8; 3460 + 3461 + hdmi = &edid_ext[i]; 3462 + if (cea_db_payload_len(hdmi) < 6) 3463 + return false; 3464 + 3465 + if (hdmi[6] & DRM_EDID_HDMI_DC_30) { 3466 + dc_bpc = 10; 3467 + DRM_DEBUG("%s: HDMI sink does deep color 30.\n", 3468 + drm_get_connector_name(connector)); 3469 + } 3470 + 3471 + if (hdmi[6] & DRM_EDID_HDMI_DC_36) { 3472 + dc_bpc = 12; 3473 + DRM_DEBUG("%s: HDMI sink does deep color 36.\n", 3474 + drm_get_connector_name(connector)); 3475 + } 3476 + 3477 + if (hdmi[6] & DRM_EDID_HDMI_DC_48) { 3478 + dc_bpc = 16; 3479 + DRM_DEBUG("%s: HDMI sink does deep color 48.\n", 3480 + drm_get_connector_name(connector)); 3481 + } 3482 + 3483 + if (dc_bpc > 0) { 3484 + DRM_DEBUG("%s: Assigning HDMI sink color depth as %d bpc.\n", 3485 + drm_get_connector_name(connector), dc_bpc); 3486 + info->bpc = dc_bpc; 3487 + 3488 + /* 3489 + * Deep color support mandates RGB444 support for all video 3490 + * modes and forbids YCRCB422 support for all video modes per 3491 + * HDMI 1.3 spec. 3492 + */ 3493 + info->color_formats = DRM_COLOR_FORMAT_RGB444; 3494 + 3495 + /* YCRCB444 is optional according to spec. */ 3496 + if (hdmi[6] & DRM_EDID_HDMI_DC_Y444) { 3497 + info->color_formats |= DRM_COLOR_FORMAT_YCRCB444; 3498 + DRM_DEBUG("%s: HDMI sink does YCRCB444 in deep color.\n", 3499 + drm_get_connector_name(connector)); 3500 + } 3501 + 3502 + /* 3503 + * Spec says that if any deep color mode is supported at all, 3504 + * then deep color 36 bit must be supported. 3505 + */ 3506 + if (!(hdmi[6] & DRM_EDID_HDMI_DC_36)) { 3507 + DRM_DEBUG("%s: HDMI sink should do DC_36, but does not!\n", 3508 + drm_get_connector_name(connector)); 3509 + } 3510 + 3511 + return true; 3512 + } 3513 + else { 3514 + DRM_DEBUG("%s: No deep color support on this HDMI sink.\n", 3515 + drm_get_connector_name(connector)); 3516 + } 3517 + } 3518 + } 3519 + 3520 + return false; 3521 + } 3522 + 3523 + /** 3431 3524 * drm_add_display_info - pull display info out if present 3432 3525 * @edid: EDID data 3433 3526 * @info: display info (attached to connector) 3527 + * @connector: connector whose edid is used to build display info 3434 3528 * 3435 3529 * Grab any available display info and stuff it into the drm_display_info 3436 3530 * structure that's part of the connector. Useful for tracking bpp and 3437 3531 * color spaces. 3438 3532 */ 3439 3533 static void drm_add_display_info(struct edid *edid, 3440 - struct drm_display_info *info) 3534 + struct drm_display_info *info, 3535 + struct drm_connector *connector) 3441 3536 { 3442 3537 u8 *edid_ext; 3443 3538 ··· 3567 3462 info->color_formats |= DRM_COLOR_FORMAT_YCRCB422; 3568 3463 } 3569 3464 3465 + /* HDMI deep color modes supported? Assign to info, if so */ 3466 + drm_assign_hdmi_deep_color_info(edid, info, connector); 3467 + 3570 3468 /* Only defined for 1.4 with digital displays */ 3571 3469 if (edid->revision < 4) 3572 3470 return; ··· 3598 3490 info->bpc = 0; 3599 3491 break; 3600 3492 } 3493 + 3494 + DRM_DEBUG("%s: Assigning EDID-1.4 digital sink color depth as %d bpc.\n", 3495 + drm_get_connector_name(connector), info->bpc); 3601 3496 3602 3497 info->color_formats |= DRM_COLOR_FORMAT_RGB444; 3603 3498 if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB444) ··· 3660 3549 if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75)) 3661 3550 edid_fixup_preferred(connector, quirks); 3662 3551 3663 - drm_add_display_info(edid, &connector->display_info); 3552 + drm_add_display_info(edid, &connector->display_info, connector); 3664 3553 3665 3554 if (quirks & EDID_QUIRK_FORCE_8BPC) 3666 3555 connector->display_info.bpc = 8; 3556 + 3557 + if (quirks & EDID_QUIRK_FORCE_12BPC) 3558 + connector->display_info.bpc = 12; 3667 3559 3668 3560 return num_modes; 3669 3561 }
+1 -1
drivers/gpu/drm/radeon/Makefile
··· 72 72 radeon_cs.o radeon_bios.o radeon_benchmark.o r100.o r300.o r420.o \ 73 73 rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \ 74 74 r200.o radeon_legacy_tv.o r600_cs.o r600_blit_shaders.o \ 75 - radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \ 75 + radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o dce3_1_afmt.o \ 76 76 evergreen.o evergreen_cs.o evergreen_blit_shaders.o \ 77 77 evergreen_hdmi.o radeon_trace_points.o ni.o cayman_blit_shaders.o \ 78 78 atombios_encoders.o radeon_semaphore.o radeon_sa.o atombios_i2c.o si.o \
+60 -29
drivers/gpu/drm/radeon/atombios_crtc.c
··· 559 559 u32 adjusted_clock = mode->clock; 560 560 int encoder_mode = atombios_get_encoder_mode(encoder); 561 561 u32 dp_clock = mode->clock; 562 + u32 clock = mode->clock; 562 563 int bpc = radeon_crtc->bpc; 563 564 bool is_duallink = radeon_dig_monitor_is_duallink(encoder, mode->clock); 564 565 ··· 635 634 radeon_crtc->pll_flags |= RADEON_PLL_USE_REF_DIV; 636 635 } 637 636 637 + /* adjust pll for deep color modes */ 638 + if (encoder_mode == ATOM_ENCODER_MODE_HDMI) { 639 + switch (bpc) { 640 + case 8: 641 + default: 642 + break; 643 + case 10: 644 + clock = (clock * 5) / 4; 645 + break; 646 + case 12: 647 + clock = (clock * 3) / 2; 648 + break; 649 + case 16: 650 + clock = clock * 2; 651 + break; 652 + } 653 + } 654 + 638 655 /* DCE3+ has an AdjustDisplayPll that will adjust the pixel clock 639 656 * accordingly based on the encoder/transmitter to work around 640 657 * special hw requirements. ··· 674 655 switch (crev) { 675 656 case 1: 676 657 case 2: 677 - args.v1.usPixelClock = cpu_to_le16(mode->clock / 10); 658 + args.v1.usPixelClock = cpu_to_le16(clock / 10); 678 659 args.v1.ucTransmitterID = radeon_encoder->encoder_id; 679 660 args.v1.ucEncodeMode = encoder_mode; 680 661 if (radeon_crtc->ss_enabled && radeon_crtc->ss.percentage) ··· 686 667 adjusted_clock = le16_to_cpu(args.v1.usPixelClock) * 10; 687 668 break; 688 669 case 3: 689 - args.v3.sInput.usPixelClock = cpu_to_le16(mode->clock / 10); 670 + args.v3.sInput.usPixelClock = cpu_to_le16(clock / 10); 690 671 args.v3.sInput.ucTransmitterID = radeon_encoder->encoder_id; 691 672 args.v3.sInput.ucEncodeMode = encoder_mode; 692 673 args.v3.sInput.ucDispPllConfig = 0; ··· 700 681 args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10); 701 682 } else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { 702 683 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 703 - if (encoder_mode == ATOM_ENCODER_MODE_HDMI) 704 - /* deep color support */ 705 - args.v3.sInput.usPixelClock = 706 - cpu_to_le16((mode->clock * bpc / 8) / 10); 707 684 if (dig->coherent_mode) 708 685 args.v3.sInput.ucDispPllConfig |= 709 686 DISPPLL_CONFIG_COHERENT_MODE; ··· 879 864 args.v5.ucMiscInfo = 0; /* HDMI depth, etc. */ 880 865 if (ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK)) 881 866 args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_REF_DIV_SRC; 882 - switch (bpc) { 883 - case 8: 884 - default: 885 - args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_24BPP; 886 - break; 887 - case 10: 888 - args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_30BPP; 889 - break; 867 + if (encoder_mode == ATOM_ENCODER_MODE_HDMI) { 868 + switch (bpc) { 869 + case 8: 870 + default: 871 + args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_24BPP; 872 + break; 873 + case 10: 874 + /* yes this is correct, the atom define is wrong */ 875 + args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_32BPP; 876 + break; 877 + case 12: 878 + /* yes this is correct, the atom define is wrong */ 879 + args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_30BPP; 880 + break; 881 + } 890 882 } 891 883 args.v5.ucTransmitterID = encoder_id; 892 884 args.v5.ucEncoderMode = encoder_mode; ··· 908 886 args.v6.ucMiscInfo = 0; /* HDMI depth, etc. */ 909 887 if (ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK)) 910 888 args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_REF_DIV_SRC; 911 - switch (bpc) { 912 - case 8: 913 - default: 914 - args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_24BPP; 915 - break; 916 - case 10: 917 - args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_30BPP; 918 - break; 919 - case 12: 920 - args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_36BPP; 921 - break; 922 - case 16: 923 - args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_48BPP; 924 - break; 889 + if (encoder_mode == ATOM_ENCODER_MODE_HDMI) { 890 + switch (bpc) { 891 + case 8: 892 + default: 893 + args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_24BPP; 894 + break; 895 + case 10: 896 + args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_30BPP_V6; 897 + break; 898 + case 12: 899 + args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_36BPP_V6; 900 + break; 901 + case 16: 902 + args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_48BPP; 903 + break; 904 + } 925 905 } 926 906 args.v6.ucTransmitterID = encoder_id; 927 907 args.v6.ucEncoderMode = encoder_mode; ··· 1045 1021 struct radeon_encoder *radeon_encoder = 1046 1022 to_radeon_encoder(radeon_crtc->encoder); 1047 1023 u32 pll_clock = mode->clock; 1024 + u32 clock = mode->clock; 1048 1025 u32 ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0; 1049 1026 struct radeon_pll *pll; 1050 1027 int encoder_mode = atombios_get_encoder_mode(radeon_crtc->encoder); 1028 + 1029 + /* pass the actual clock to atombios_crtc_program_pll for DCE5,6 for HDMI */ 1030 + if (ASIC_IS_DCE5(rdev) && !ASIC_IS_DCE8(rdev) && 1031 + (encoder_mode == ATOM_ENCODER_MODE_HDMI) && 1032 + (radeon_crtc->bpc > 8)) 1033 + clock = radeon_crtc->adjusted_clock; 1051 1034 1052 1035 switch (radeon_crtc->pll_id) { 1053 1036 case ATOM_PPLL1: ··· 1090 1059 radeon_crtc->crtc_id, &radeon_crtc->ss); 1091 1060 1092 1061 atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id, 1093 - encoder_mode, radeon_encoder->encoder_id, mode->clock, 1062 + encoder_mode, radeon_encoder->encoder_id, clock, 1094 1063 ref_div, fb_div, frac_fb_div, post_div, 1095 1064 radeon_crtc->bpc, radeon_crtc->ss_enabled, &radeon_crtc->ss); 1096 1065
+29 -6
drivers/gpu/drm/radeon/atombios_dp.c
··· 95 95 int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction); 96 96 unsigned char *base; 97 97 int recv_bytes; 98 + int r = 0; 98 99 99 100 memset(&args, 0, sizeof(args)); 101 + 102 + mutex_lock(&chan->mutex); 100 103 101 104 base = (unsigned char *)(rdev->mode_info.atom_context->scratch + 1); 102 105 ··· 120 117 /* timeout */ 121 118 if (args.v1.ucReplyStatus == 1) { 122 119 DRM_DEBUG_KMS("dp_aux_ch timeout\n"); 123 - return -ETIMEDOUT; 120 + r = -ETIMEDOUT; 121 + goto done; 124 122 } 125 123 126 124 /* flags not zero */ 127 125 if (args.v1.ucReplyStatus == 2) { 128 126 DRM_DEBUG_KMS("dp_aux_ch flags not zero\n"); 129 - return -EBUSY; 127 + r = -EBUSY; 128 + goto done; 130 129 } 131 130 132 131 /* error */ 133 132 if (args.v1.ucReplyStatus == 3) { 134 133 DRM_DEBUG_KMS("dp_aux_ch error\n"); 135 - return -EIO; 134 + r = -EIO; 135 + goto done; 136 136 } 137 137 138 138 recv_bytes = args.v1.ucDataOutLen; ··· 145 139 if (recv && recv_size) 146 140 radeon_atom_copy_swap(recv, base + 16, recv_bytes, false); 147 141 148 - return recv_bytes; 142 + r = recv_bytes; 143 + done: 144 + mutex_unlock(&chan->mutex); 145 + 146 + return r; 149 147 } 150 148 151 149 #define BARE_ADDRESS_SIZE 3 ··· 291 281 292 282 /***** radeon specific DP functions *****/ 293 283 284 + static int radeon_dp_get_max_link_rate(struct drm_connector *connector, 285 + u8 dpcd[DP_DPCD_SIZE]) 286 + { 287 + int max_link_rate; 288 + 289 + if (radeon_connector_is_dp12_capable(connector)) 290 + max_link_rate = min(drm_dp_max_link_rate(dpcd), 540000); 291 + else 292 + max_link_rate = min(drm_dp_max_link_rate(dpcd), 270000); 293 + 294 + return max_link_rate; 295 + } 296 + 294 297 /* First get the min lane# when low rate is used according to pixel clock 295 298 * (prefer low rate), second check max lane# supported by DP panel, 296 299 * if the max lane# < low rate lane# then use max lane# instead. ··· 313 290 int pix_clock) 314 291 { 315 292 int bpp = convert_bpc_to_bpp(radeon_get_monitor_bpc(connector)); 316 - int max_link_rate = drm_dp_max_link_rate(dpcd); 293 + int max_link_rate = radeon_dp_get_max_link_rate(connector, dpcd); 317 294 int max_lane_num = drm_dp_max_lane_count(dpcd); 318 295 int lane_num; 319 296 int max_dp_pix_clock; ··· 351 328 return 540000; 352 329 } 353 330 354 - return drm_dp_max_link_rate(dpcd); 331 + return radeon_dp_get_max_link_rate(connector, dpcd); 355 332 } 356 333 357 334 static u8 radeon_dp_encoder_service(struct radeon_device *rdev,
+4 -1
drivers/gpu/drm/radeon/atombios_encoders.c
··· 1884 1884 args.v2.ucEncodeMode = ATOM_ENCODER_MODE_CRT; 1885 1885 else 1886 1886 args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder); 1887 - } else 1887 + } else if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { 1888 + args.v2.ucEncodeMode = ATOM_ENCODER_MODE_LVDS; 1889 + } else { 1888 1890 args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder); 1891 + } 1889 1892 switch (radeon_encoder->encoder_id) { 1890 1893 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: 1891 1894 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+13 -4
drivers/gpu/drm/radeon/atombios_i2c.c
··· 43 43 int index = GetIndexIntoMasterTable(COMMAND, ProcessI2cChannelTransaction); 44 44 unsigned char *base; 45 45 u16 out = cpu_to_le16(0); 46 + int r = 0; 46 47 47 48 memset(&args, 0, sizeof(args)); 49 + 50 + mutex_lock(&chan->mutex); 48 51 49 52 base = (unsigned char *)rdev->mode_info.atom_context->scratch; 50 53 51 54 if (flags & HW_I2C_WRITE) { 52 55 if (num > ATOM_MAX_HW_I2C_WRITE) { 53 56 DRM_ERROR("hw i2c: tried to write too many bytes (%d vs 3)\n", num); 54 - return -EINVAL; 57 + r = -EINVAL; 58 + goto done; 55 59 } 56 60 if (buf == NULL) 57 61 args.ucRegIndex = 0; ··· 69 65 } else { 70 66 if (num > ATOM_MAX_HW_I2C_READ) { 71 67 DRM_ERROR("hw i2c: tried to read too many bytes (%d vs 255)\n", num); 72 - return -EINVAL; 68 + r = -EINVAL; 69 + goto done; 73 70 } 74 71 args.ucRegIndex = 0; 75 72 args.lpI2CDataOut = 0; ··· 87 82 /* error */ 88 83 if (args.ucStatus != HW_ASSISTED_I2C_STATUS_SUCCESS) { 89 84 DRM_DEBUG_KMS("hw_i2c error\n"); 90 - return -EIO; 85 + r = -EIO; 86 + goto done; 91 87 } 92 88 93 89 if (!(flags & HW_I2C_WRITE)) 94 90 radeon_atom_copy_swap(buf, base, num, false); 95 91 96 - return 0; 92 + done: 93 + mutex_unlock(&chan->mutex); 94 + 95 + return r; 97 96 } 98 97 99 98 int radeon_atom_hw_i2c_xfer(struct i2c_adapter *i2c_adap,
+10 -7
drivers/gpu/drm/radeon/cik.c
··· 5328 5328 WREG32(MC_VM_MX_L1_TLB_CNTL, 5329 5329 (0xA << 7) | 5330 5330 ENABLE_L1_TLB | 5331 + ENABLE_L1_FRAGMENT_PROCESSING | 5331 5332 SYSTEM_ACCESS_MODE_NOT_IN_SYS | 5332 5333 ENABLE_ADVANCED_DRIVER_MODEL | 5333 5334 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU); ··· 5341 5340 CONTEXT1_IDENTITY_ACCESS_MODE(1)); 5342 5341 WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE); 5343 5342 WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY | 5344 - L2_CACHE_BIGK_FRAGMENT_SIZE(6)); 5343 + BANK_SELECT(4) | 5344 + L2_CACHE_BIGK_FRAGMENT_SIZE(4)); 5345 5345 /* setup context0 */ 5346 5346 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); 5347 5347 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12); ··· 5378 5376 (u32)(rdev->dummy_page.addr >> 12)); 5379 5377 WREG32(VM_CONTEXT1_CNTL2, 4); 5380 5378 WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) | 5379 + PAGE_TABLE_BLOCK_SIZE(RADEON_VM_BLOCK_SIZE - 9) | 5381 5380 RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT | 5382 5381 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT | 5383 5382 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT | ··· 7314 7311 wake_up(&rdev->irq.vblank_queue); 7315 7312 } 7316 7313 if (atomic_read(&rdev->irq.pflip[0])) 7317 - radeon_crtc_handle_flip(rdev, 0); 7314 + radeon_crtc_handle_vblank(rdev, 0); 7318 7315 rdev->irq.stat_regs.cik.disp_int &= ~LB_D1_VBLANK_INTERRUPT; 7319 7316 DRM_DEBUG("IH: D1 vblank\n"); 7320 7317 } ··· 7340 7337 wake_up(&rdev->irq.vblank_queue); 7341 7338 } 7342 7339 if (atomic_read(&rdev->irq.pflip[1])) 7343 - radeon_crtc_handle_flip(rdev, 1); 7340 + radeon_crtc_handle_vblank(rdev, 1); 7344 7341 rdev->irq.stat_regs.cik.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT; 7345 7342 DRM_DEBUG("IH: D2 vblank\n"); 7346 7343 } ··· 7366 7363 wake_up(&rdev->irq.vblank_queue); 7367 7364 } 7368 7365 if (atomic_read(&rdev->irq.pflip[2])) 7369 - radeon_crtc_handle_flip(rdev, 2); 7366 + radeon_crtc_handle_vblank(rdev, 2); 7370 7367 rdev->irq.stat_regs.cik.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT; 7371 7368 DRM_DEBUG("IH: D3 vblank\n"); 7372 7369 } ··· 7392 7389 wake_up(&rdev->irq.vblank_queue); 7393 7390 } 7394 7391 if (atomic_read(&rdev->irq.pflip[3])) 7395 - radeon_crtc_handle_flip(rdev, 3); 7392 + radeon_crtc_handle_vblank(rdev, 3); 7396 7393 rdev->irq.stat_regs.cik.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT; 7397 7394 DRM_DEBUG("IH: D4 vblank\n"); 7398 7395 } ··· 7418 7415 wake_up(&rdev->irq.vblank_queue); 7419 7416 } 7420 7417 if (atomic_read(&rdev->irq.pflip[4])) 7421 - radeon_crtc_handle_flip(rdev, 4); 7418 + radeon_crtc_handle_vblank(rdev, 4); 7422 7419 rdev->irq.stat_regs.cik.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT; 7423 7420 DRM_DEBUG("IH: D5 vblank\n"); 7424 7421 } ··· 7444 7441 wake_up(&rdev->irq.vblank_queue); 7445 7442 } 7446 7443 if (atomic_read(&rdev->irq.pflip[5])) 7447 - radeon_crtc_handle_flip(rdev, 5); 7444 + radeon_crtc_handle_vblank(rdev, 5); 7448 7445 rdev->irq.stat_regs.cik.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT; 7449 7446 DRM_DEBUG("IH: D6 vblank\n"); 7450 7447 }
+20 -1
drivers/gpu/drm/radeon/cik_sdma.c
··· 741 741 742 742 trace_radeon_vm_set_page(pe, addr, count, incr, flags); 743 743 744 - if (flags & R600_PTE_SYSTEM) { 744 + if (flags == R600_PTE_GART) { 745 + uint64_t src = rdev->gart.table_addr + (addr >> 12) * 8; 746 + while (count) { 747 + unsigned bytes = count * 8; 748 + if (bytes > 0x1FFFF8) 749 + bytes = 0x1FFFF8; 750 + 751 + ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_WRITE_SUB_OPCODE_LINEAR, 0); 752 + ib->ptr[ib->length_dw++] = bytes; 753 + ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */ 754 + ib->ptr[ib->length_dw++] = src & 0xffffffff; 755 + ib->ptr[ib->length_dw++] = upper_32_bits(src); 756 + ib->ptr[ib->length_dw++] = pe & 0xffffffff; 757 + ib->ptr[ib->length_dw++] = upper_32_bits(pe); 758 + 759 + pe += bytes; 760 + src += bytes; 761 + count -= bytes / 8; 762 + } 763 + } else if (flags & R600_PTE_SYSTEM) { 745 764 while (count) { 746 765 ndw = count * 2; 747 766 if (ndw > 0xFFFFE)
+1
drivers/gpu/drm/radeon/cikd.h
··· 482 482 #define READ_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 16) 483 483 #define WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 18) 484 484 #define WRITE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 19) 485 + #define PAGE_TABLE_BLOCK_SIZE(x) (((x) & 0xF) << 24) 485 486 #define VM_CONTEXT1_CNTL 0x1414 486 487 #define VM_CONTEXT0_CNTL2 0x1430 487 488 #define VM_CONTEXT1_CNTL2 0x1434
+4 -4
drivers/gpu/drm/radeon/clearstate_cayman.h
··· 1050 1050 {SECT_CONTEXT_def_5, 0x0000a29e, 5 }, 1051 1051 {SECT_CONTEXT_def_6, 0x0000a2a5, 56 }, 1052 1052 {SECT_CONTEXT_def_7, 0x0000a2de, 290 }, 1053 - { 0, 0, 0 } 1053 + { NULL, 0, 0 } 1054 1054 }; 1055 1055 static const u32 SECT_CLEAR_def_1[] = 1056 1056 { ··· 1061 1061 static const struct cs_extent_def SECT_CLEAR_defs[] = 1062 1062 { 1063 1063 {SECT_CLEAR_def_1, 0x0000ffc0, 3 }, 1064 - { 0, 0, 0 } 1064 + { NULL, 0, 0 } 1065 1065 }; 1066 1066 static const u32 SECT_CTRLCONST_def_1[] = 1067 1067 { ··· 1071 1071 static const struct cs_extent_def SECT_CTRLCONST_defs[] = 1072 1072 { 1073 1073 {SECT_CTRLCONST_def_1, 0x0000f3fc, 2 }, 1074 - { 0, 0, 0 } 1074 + { NULL, 0, 0 } 1075 1075 }; 1076 1076 static const struct cs_section_def cayman_cs_data[] = { 1077 1077 { SECT_CONTEXT_defs, SECT_CONTEXT }, 1078 1078 { SECT_CLEAR_defs, SECT_CLEAR }, 1079 1079 { SECT_CTRLCONST_defs, SECT_CTRLCONST }, 1080 - { 0, SECT_NONE } 1080 + { NULL, SECT_NONE } 1081 1081 };
+2 -2
drivers/gpu/drm/radeon/clearstate_ci.h
··· 936 936 {ci_SECT_CONTEXT_def_5, 0x0000a2a0, 2 }, 937 937 {ci_SECT_CONTEXT_def_6, 0x0000a2a3, 1 }, 938 938 {ci_SECT_CONTEXT_def_7, 0x0000a2a5, 233 }, 939 - { 0, 0, 0 } 939 + { NULL, 0, 0 } 940 940 }; 941 941 static const struct cs_section_def ci_cs_data[] = { 942 942 { ci_SECT_CONTEXT_defs, SECT_CONTEXT }, 943 - { 0, SECT_NONE } 943 + { NULL, SECT_NONE } 944 944 };
+2 -2
drivers/gpu/drm/radeon/clearstate_si.h
··· 933 933 {si_SECT_CONTEXT_def_5, 0x0000a2a1, 1 }, 934 934 {si_SECT_CONTEXT_def_6, 0x0000a2a3, 1 }, 935 935 {si_SECT_CONTEXT_def_7, 0x0000a2a5, 233 }, 936 - { 0, 0, 0 } 936 + { NULL, 0, 0 } 937 937 }; 938 938 static const struct cs_section_def si_cs_data[] = { 939 939 { si_SECT_CONTEXT_defs, SECT_CONTEXT }, 940 - { 0, SECT_NONE } 940 + { NULL, SECT_NONE } 941 941 };
+244
drivers/gpu/drm/radeon/dce3_1_afmt.c
··· 1 + /* 2 + * Copyright 2013 Advanced Micro Devices, Inc. 3 + * Copyright 2014 Rafał Miłecki 4 + * 5 + * Permission is hereby granted, free of charge, to any person obtaining a 6 + * copy of this software and associated documentation files (the "Software"), 7 + * to deal in the Software without restriction, including without limitation 8 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 + * and/or sell copies of the Software, and to permit persons to whom the 10 + * Software is furnished to do so, subject to the following conditions: 11 + * 12 + * The above copyright notice and this permission notice shall be included in 13 + * all copies or substantial portions of the Software. 14 + * 15 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 + * OTHER DEALINGS IN THE SOFTWARE. 22 + */ 23 + #include <linux/hdmi.h> 24 + #include <drm/drmP.h> 25 + #include "radeon.h" 26 + #include "radeon_asic.h" 27 + #include "r600d.h" 28 + 29 + static void dce3_2_afmt_write_speaker_allocation(struct drm_encoder *encoder) 30 + { 31 + struct radeon_device *rdev = encoder->dev->dev_private; 32 + struct drm_connector *connector; 33 + struct radeon_connector *radeon_connector = NULL; 34 + u32 tmp; 35 + u8 *sadb; 36 + int sad_count; 37 + 38 + list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { 39 + if (connector->encoder == encoder) { 40 + radeon_connector = to_radeon_connector(connector); 41 + break; 42 + } 43 + } 44 + 45 + if (!radeon_connector) { 46 + DRM_ERROR("Couldn't find encoder's connector\n"); 47 + return; 48 + } 49 + 50 + sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb); 51 + if (sad_count < 0) { 52 + DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count); 53 + return; 54 + } 55 + 56 + /* program the speaker allocation */ 57 + tmp = RREG32(AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER); 58 + tmp &= ~(DP_CONNECTION | SPEAKER_ALLOCATION_MASK); 59 + /* set HDMI mode */ 60 + tmp |= HDMI_CONNECTION; 61 + if (sad_count) 62 + tmp |= SPEAKER_ALLOCATION(sadb[0]); 63 + else 64 + tmp |= SPEAKER_ALLOCATION(5); /* stereo */ 65 + WREG32(AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER, tmp); 66 + 67 + kfree(sadb); 68 + } 69 + 70 + static void dce3_2_afmt_write_sad_regs(struct drm_encoder *encoder) 71 + { 72 + struct radeon_device *rdev = encoder->dev->dev_private; 73 + struct drm_connector *connector; 74 + struct radeon_connector *radeon_connector = NULL; 75 + struct cea_sad *sads; 76 + int i, sad_count; 77 + 78 + static const u16 eld_reg_to_type[][2] = { 79 + { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM }, 80 + { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 }, 81 + { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 }, 82 + { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 }, 83 + { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 }, 84 + { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC }, 85 + { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS }, 86 + { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC }, 87 + { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 }, 88 + { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD }, 89 + { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP }, 90 + { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO }, 91 + }; 92 + 93 + list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { 94 + if (connector->encoder == encoder) { 95 + radeon_connector = to_radeon_connector(connector); 96 + break; 97 + } 98 + } 99 + 100 + if (!radeon_connector) { 101 + DRM_ERROR("Couldn't find encoder's connector\n"); 102 + return; 103 + } 104 + 105 + sad_count = drm_edid_to_sad(radeon_connector->edid, &sads); 106 + if (sad_count < 0) { 107 + DRM_ERROR("Couldn't read SADs: %d\n", sad_count); 108 + return; 109 + } 110 + BUG_ON(!sads); 111 + 112 + for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) { 113 + u32 value = 0; 114 + u8 stereo_freqs = 0; 115 + int max_channels = -1; 116 + int j; 117 + 118 + for (j = 0; j < sad_count; j++) { 119 + struct cea_sad *sad = &sads[j]; 120 + 121 + if (sad->format == eld_reg_to_type[i][1]) { 122 + if (sad->channels > max_channels) { 123 + value = MAX_CHANNELS(sad->channels) | 124 + DESCRIPTOR_BYTE_2(sad->byte2) | 125 + SUPPORTED_FREQUENCIES(sad->freq); 126 + max_channels = sad->channels; 127 + } 128 + 129 + if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM) 130 + stereo_freqs |= sad->freq; 131 + else 132 + break; 133 + } 134 + } 135 + 136 + value |= SUPPORTED_FREQUENCIES_STEREO(stereo_freqs); 137 + 138 + WREG32(eld_reg_to_type[i][0], value); 139 + } 140 + 141 + kfree(sads); 142 + } 143 + 144 + /* 145 + * update the info frames with the data from the current display mode 146 + */ 147 + void dce3_1_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode) 148 + { 149 + struct drm_device *dev = encoder->dev; 150 + struct radeon_device *rdev = dev->dev_private; 151 + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 152 + struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 153 + u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE]; 154 + struct hdmi_avi_infoframe frame; 155 + uint32_t offset; 156 + ssize_t err; 157 + 158 + if (!dig || !dig->afmt) 159 + return; 160 + 161 + /* Silent, r600_hdmi_enable will raise WARN for us */ 162 + if (!dig->afmt->enabled) 163 + return; 164 + offset = dig->afmt->offset; 165 + 166 + /* disable audio prior to setting up hw */ 167 + dig->afmt->pin = r600_audio_get_pin(rdev); 168 + r600_audio_enable(rdev, dig->afmt->pin, false); 169 + 170 + r600_audio_set_dto(encoder, mode->clock); 171 + 172 + WREG32(HDMI0_VBI_PACKET_CONTROL + offset, 173 + HDMI0_NULL_SEND); /* send null packets when required */ 174 + 175 + WREG32(HDMI0_AUDIO_CRC_CONTROL + offset, 0x1000); 176 + 177 + if (ASIC_IS_DCE32(rdev)) { 178 + WREG32(HDMI0_AUDIO_PACKET_CONTROL + offset, 179 + HDMI0_AUDIO_DELAY_EN(1) | /* default audio delay */ 180 + HDMI0_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */ 181 + WREG32(AFMT_AUDIO_PACKET_CONTROL + offset, 182 + AFMT_AUDIO_SAMPLE_SEND | /* send audio packets */ 183 + AFMT_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */ 184 + } else { 185 + WREG32(HDMI0_AUDIO_PACKET_CONTROL + offset, 186 + HDMI0_AUDIO_SAMPLE_SEND | /* send audio packets */ 187 + HDMI0_AUDIO_DELAY_EN(1) | /* default audio delay */ 188 + HDMI0_AUDIO_PACKETS_PER_LINE(3) | /* should be suffient for all audio modes and small enough for all hblanks */ 189 + HDMI0_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */ 190 + } 191 + 192 + if (ASIC_IS_DCE32(rdev)) { 193 + dce3_2_afmt_write_speaker_allocation(encoder); 194 + dce3_2_afmt_write_sad_regs(encoder); 195 + } 196 + 197 + WREG32(HDMI0_ACR_PACKET_CONTROL + offset, 198 + HDMI0_ACR_SOURCE | /* select SW CTS value - XXX verify that hw CTS works on all families */ 199 + HDMI0_ACR_AUTO_SEND); /* allow hw to sent ACR packets when required */ 200 + 201 + WREG32(HDMI0_VBI_PACKET_CONTROL + offset, 202 + HDMI0_NULL_SEND | /* send null packets when required */ 203 + HDMI0_GC_SEND | /* send general control packets */ 204 + HDMI0_GC_CONT); /* send general control packets every frame */ 205 + 206 + /* TODO: HDMI0_AUDIO_INFO_UPDATE */ 207 + WREG32(HDMI0_INFOFRAME_CONTROL0 + offset, 208 + HDMI0_AVI_INFO_SEND | /* enable AVI info frames */ 209 + HDMI0_AVI_INFO_CONT | /* send AVI info frames every frame/field */ 210 + HDMI0_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */ 211 + HDMI0_AUDIO_INFO_CONT); /* send audio info frames every frame/field */ 212 + 213 + WREG32(HDMI0_INFOFRAME_CONTROL1 + offset, 214 + HDMI0_AVI_INFO_LINE(2) | /* anything other than 0 */ 215 + HDMI0_AUDIO_INFO_LINE(2)); /* anything other than 0 */ 216 + 217 + WREG32(HDMI0_GC + offset, 0); /* unset HDMI0_GC_AVMUTE */ 218 + 219 + err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode); 220 + if (err < 0) { 221 + DRM_ERROR("failed to setup AVI infoframe: %zd\n", err); 222 + return; 223 + } 224 + 225 + err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer)); 226 + if (err < 0) { 227 + DRM_ERROR("failed to pack AVI infoframe: %zd\n", err); 228 + return; 229 + } 230 + 231 + r600_hdmi_update_avi_infoframe(encoder, buffer, sizeof(buffer)); 232 + r600_hdmi_update_ACR(encoder, mode->clock); 233 + 234 + /* it's unknown what these bits do excatly, but it's indeed quite useful for debugging */ 235 + WREG32(HDMI0_RAMP_CONTROL0 + offset, 0x00FFFFFF); 236 + WREG32(HDMI0_RAMP_CONTROL1 + offset, 0x007FFFFF); 237 + WREG32(HDMI0_RAMP_CONTROL2 + offset, 0x00000001); 238 + WREG32(HDMI0_RAMP_CONTROL3 + offset, 0x00000001); 239 + 240 + r600_hdmi_audio_workaround(encoder); 241 + 242 + /* enable audio after to setting up hw */ 243 + r600_audio_enable(rdev, dig->afmt->pin, true); 244 + }
+22 -38
drivers/gpu/drm/radeon/evergreen.c
··· 1301 1301 } 1302 1302 1303 1303 /** 1304 - * radeon_irq_kms_pflip_irq_get - pre-pageflip callback. 1305 - * 1306 - * @rdev: radeon_device pointer 1307 - * @crtc: crtc to prepare for pageflip on 1308 - * 1309 - * Pre-pageflip callback (evergreen+). 1310 - * Enables the pageflip irq (vblank irq). 1311 - */ 1312 - void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc) 1313 - { 1314 - /* enable the pflip int */ 1315 - radeon_irq_kms_pflip_irq_get(rdev, crtc); 1316 - } 1317 - 1318 - /** 1319 - * evergreen_post_page_flip - pos-pageflip callback. 1320 - * 1321 - * @rdev: radeon_device pointer 1322 - * @crtc: crtc to cleanup pageflip on 1323 - * 1324 - * Post-pageflip callback (evergreen+). 1325 - * Disables the pageflip irq (vblank irq). 1326 - */ 1327 - void evergreen_post_page_flip(struct radeon_device *rdev, int crtc) 1328 - { 1329 - /* disable the pflip int */ 1330 - radeon_irq_kms_pflip_irq_put(rdev, crtc); 1331 - } 1332 - 1333 - /** 1334 1304 * evergreen_page_flip - pageflip callback. 1335 1305 * 1336 1306 * @rdev: radeon_device pointer ··· 1313 1343 * double buffered update to take place. 1314 1344 * Returns the current update pending status. 1315 1345 */ 1316 - u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) 1346 + void evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) 1317 1347 { 1318 1348 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; 1319 1349 u32 tmp = RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset); ··· 1345 1375 /* Unlock the lock, so double-buffering can take place inside vblank */ 1346 1376 tmp &= ~EVERGREEN_GRPH_UPDATE_LOCK; 1347 1377 WREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset, tmp); 1378 + } 1379 + 1380 + /** 1381 + * evergreen_page_flip_pending - check if page flip is still pending 1382 + * 1383 + * @rdev: radeon_device pointer 1384 + * @crtc_id: crtc to check 1385 + * 1386 + * Returns the current update pending status. 1387 + */ 1388 + bool evergreen_page_flip_pending(struct radeon_device *rdev, int crtc_id) 1389 + { 1390 + struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; 1348 1391 1349 1392 /* Return current update_pending status: */ 1350 - return RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING; 1393 + return !!(RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & 1394 + EVERGREEN_GRPH_SURFACE_UPDATE_PENDING); 1351 1395 } 1352 1396 1353 1397 /* get temperature in millidegrees */ ··· 4789 4805 wake_up(&rdev->irq.vblank_queue); 4790 4806 } 4791 4807 if (atomic_read(&rdev->irq.pflip[0])) 4792 - radeon_crtc_handle_flip(rdev, 0); 4808 + radeon_crtc_handle_vblank(rdev, 0); 4793 4809 rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT; 4794 4810 DRM_DEBUG("IH: D1 vblank\n"); 4795 4811 } ··· 4815 4831 wake_up(&rdev->irq.vblank_queue); 4816 4832 } 4817 4833 if (atomic_read(&rdev->irq.pflip[1])) 4818 - radeon_crtc_handle_flip(rdev, 1); 4834 + radeon_crtc_handle_vblank(rdev, 1); 4819 4835 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT; 4820 4836 DRM_DEBUG("IH: D2 vblank\n"); 4821 4837 } ··· 4841 4857 wake_up(&rdev->irq.vblank_queue); 4842 4858 } 4843 4859 if (atomic_read(&rdev->irq.pflip[2])) 4844 - radeon_crtc_handle_flip(rdev, 2); 4860 + radeon_crtc_handle_vblank(rdev, 2); 4845 4861 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT; 4846 4862 DRM_DEBUG("IH: D3 vblank\n"); 4847 4863 } ··· 4867 4883 wake_up(&rdev->irq.vblank_queue); 4868 4884 } 4869 4885 if (atomic_read(&rdev->irq.pflip[3])) 4870 - radeon_crtc_handle_flip(rdev, 3); 4886 + radeon_crtc_handle_vblank(rdev, 3); 4871 4887 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT; 4872 4888 DRM_DEBUG("IH: D4 vblank\n"); 4873 4889 } ··· 4893 4909 wake_up(&rdev->irq.vblank_queue); 4894 4910 } 4895 4911 if (atomic_read(&rdev->irq.pflip[4])) 4896 - radeon_crtc_handle_flip(rdev, 4); 4912 + radeon_crtc_handle_vblank(rdev, 4); 4897 4913 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT; 4898 4914 DRM_DEBUG("IH: D5 vblank\n"); 4899 4915 } ··· 4919 4935 wake_up(&rdev->irq.vblank_queue); 4920 4936 } 4921 4937 if (atomic_read(&rdev->irq.pflip[5])) 4922 - radeon_crtc_handle_flip(rdev, 5); 4938 + radeon_crtc_handle_vblank(rdev, 5); 4923 4939 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT; 4924 4940 DRM_DEBUG("IH: D6 vblank\n"); 4925 4941 }
+45 -3
drivers/gpu/drm/radeon/evergreen_hdmi.c
··· 293 293 struct radeon_device *rdev = dev->dev_private; 294 294 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 295 295 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 296 + struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); 296 297 u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE]; 297 298 struct hdmi_avi_infoframe frame; 298 299 uint32_t offset; 299 300 ssize_t err; 301 + uint32_t val; 302 + int bpc = 8; 300 303 301 304 if (!dig || !dig->afmt) 302 305 return; ··· 308 305 if (!dig->afmt->enabled) 309 306 return; 310 307 offset = dig->afmt->offset; 308 + 309 + /* hdmi deep color mode general control packets setup, if bpc > 8 */ 310 + if (encoder->crtc) { 311 + struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); 312 + bpc = radeon_crtc->bpc; 313 + } 311 314 312 315 /* disable audio prior to setting up hw */ 313 316 if (ASIC_IS_DCE6(rdev)) { ··· 330 321 HDMI_NULL_SEND); /* send null packets when required */ 331 322 332 323 WREG32(AFMT_AUDIO_CRC_CONTROL + offset, 0x1000); 324 + 325 + val = RREG32(HDMI_CONTROL + offset); 326 + val &= ~HDMI_DEEP_COLOR_ENABLE; 327 + val &= ~HDMI_DEEP_COLOR_DEPTH_MASK; 328 + 329 + switch (bpc) { 330 + case 0: 331 + case 6: 332 + case 8: 333 + case 16: 334 + default: 335 + DRM_DEBUG("%s: Disabling hdmi deep color for %d bpc.\n", 336 + drm_get_connector_name(connector), bpc); 337 + break; 338 + case 10: 339 + val |= HDMI_DEEP_COLOR_ENABLE; 340 + val |= HDMI_DEEP_COLOR_DEPTH(HDMI_30BIT_DEEP_COLOR); 341 + DRM_DEBUG("%s: Enabling hdmi deep color 30 for 10 bpc.\n", 342 + drm_get_connector_name(connector)); 343 + break; 344 + case 12: 345 + val |= HDMI_DEEP_COLOR_ENABLE; 346 + val |= HDMI_DEEP_COLOR_DEPTH(HDMI_36BIT_DEEP_COLOR); 347 + DRM_DEBUG("%s: Enabling hdmi deep color 36 for 12 bpc.\n", 348 + drm_get_connector_name(connector)); 349 + break; 350 + } 351 + 352 + WREG32(HDMI_CONTROL + offset, val); 333 353 334 354 WREG32(HDMI_VBI_PACKET_CONTROL + offset, 335 355 HDMI_NULL_SEND | /* send null packets when required */ ··· 386 348 387 349 /* fglrx clears sth in AFMT_AUDIO_PACKET_CONTROL2 here */ 388 350 389 - WREG32(HDMI_ACR_PACKET_CONTROL + offset, 390 - HDMI_ACR_SOURCE | /* select SW CTS value */ 391 - HDMI_ACR_AUTO_SEND); /* allow hw to sent ACR packets when required */ 351 + if (bpc > 8) 352 + WREG32(HDMI_ACR_PACKET_CONTROL + offset, 353 + HDMI_ACR_AUTO_SEND); /* allow hw to sent ACR packets when required */ 354 + else 355 + WREG32(HDMI_ACR_PACKET_CONTROL + offset, 356 + HDMI_ACR_SOURCE | /* select SW CTS value */ 357 + HDMI_ACR_AUTO_SEND); /* allow hw to sent ACR packets when required */ 392 358 393 359 evergreen_hdmi_update_ACR(encoder, mode->clock); 394 360
+2 -1
drivers/gpu/drm/radeon/evergreend.h
··· 517 517 # define HDMI_ERROR_ACK (1 << 8) 518 518 # define HDMI_ERROR_MASK (1 << 9) 519 519 # define HDMI_DEEP_COLOR_ENABLE (1 << 24) 520 - # define HDMI_DEEP_COLOR_DEPTH (((x) & 3) << 28) 520 + # define HDMI_DEEP_COLOR_DEPTH(x) (((x) & 3) << 28) 521 521 # define HDMI_24BIT_DEEP_COLOR 0 522 522 # define HDMI_30BIT_DEEP_COLOR 1 523 523 # define HDMI_36BIT_DEEP_COLOR 2 524 + # define HDMI_DEEP_COLOR_DEPTH_MASK (3 << 28) 524 525 #define HDMI_STATUS 0x7034 525 526 # define HDMI_ACTIVE_AVMUTE (1 << 0) 526 527 # define HDMI_AUDIO_PACKET_ERROR (1 << 16)
+3
drivers/gpu/drm/radeon/ni.c
··· 1228 1228 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU); 1229 1229 /* Setup L2 cache */ 1230 1230 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | 1231 + ENABLE_L2_FRAGMENT_PROCESSING | 1231 1232 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | 1232 1233 ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE | 1233 1234 EFFECTIVE_L2_QUEUE_SIZE(7) | 1234 1235 CONTEXT1_IDENTITY_ACCESS_MODE(1)); 1235 1236 WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE); 1236 1237 WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY | 1238 + BANK_SELECT(6) | 1237 1239 L2_CACHE_BIGK_FRAGMENT_SIZE(6)); 1238 1240 /* setup context0 */ 1239 1241 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); ··· 1268 1266 (u32)(rdev->dummy_page.addr >> 12)); 1269 1267 WREG32(VM_CONTEXT1_CNTL2, 4); 1270 1268 WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) | 1269 + PAGE_TABLE_BLOCK_SIZE(RADEON_VM_BLOCK_SIZE - 9) | 1271 1270 RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT | 1272 1271 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT | 1273 1272 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
+1
drivers/gpu/drm/radeon/nid.h
··· 128 128 #define READ_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 16) 129 129 #define WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 18) 130 130 #define WRITE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 19) 131 + #define PAGE_TABLE_BLOCK_SIZE(x) (((x) & 0xF) << 24) 131 132 #define VM_CONTEXT1_CNTL 0x1414 132 133 #define VM_CONTEXT0_CNTL2 0x1430 133 134 #define VM_CONTEXT1_CNTL2 0x1434
+20 -35
drivers/gpu/drm/radeon/r100.c
··· 142 142 } 143 143 144 144 /** 145 - * r100_pre_page_flip - pre-pageflip callback. 146 - * 147 - * @rdev: radeon_device pointer 148 - * @crtc: crtc to prepare for pageflip on 149 - * 150 - * Pre-pageflip callback (r1xx-r4xx). 151 - * Enables the pageflip irq (vblank irq). 152 - */ 153 - void r100_pre_page_flip(struct radeon_device *rdev, int crtc) 154 - { 155 - /* enable the pflip int */ 156 - radeon_irq_kms_pflip_irq_get(rdev, crtc); 157 - } 158 - 159 - /** 160 - * r100_post_page_flip - pos-pageflip callback. 161 - * 162 - * @rdev: radeon_device pointer 163 - * @crtc: crtc to cleanup pageflip on 164 - * 165 - * Post-pageflip callback (r1xx-r4xx). 166 - * Disables the pageflip irq (vblank irq). 167 - */ 168 - void r100_post_page_flip(struct radeon_device *rdev, int crtc) 169 - { 170 - /* disable the pflip int */ 171 - radeon_irq_kms_pflip_irq_put(rdev, crtc); 172 - } 173 - 174 - /** 175 145 * r100_page_flip - pageflip callback. 176 146 * 177 147 * @rdev: radeon_device pointer ··· 152 182 * During vblank we take the crtc lock and wait for the update_pending 153 183 * bit to go high, when it does, we release the lock, and allow the 154 184 * double buffered update to take place. 155 - * Returns the current update pending status. 156 185 */ 157 - u32 r100_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) 186 + void r100_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) 158 187 { 159 188 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; 160 189 u32 tmp = ((u32)crtc_base) | RADEON_CRTC_OFFSET__OFFSET_LOCK; ··· 175 206 tmp &= ~RADEON_CRTC_OFFSET__OFFSET_LOCK; 176 207 WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp); 177 208 209 + } 210 + 211 + /** 212 + * r100_page_flip_pending - check if page flip is still pending 213 + * 214 + * @rdev: radeon_device pointer 215 + * @crtc_id: crtc to check 216 + * 217 + * Check if the last pagefilp is still pending (r1xx-r4xx). 218 + * Returns the current update pending status. 219 + */ 220 + bool r100_page_flip_pending(struct radeon_device *rdev, int crtc_id) 221 + { 222 + struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; 223 + 178 224 /* Return current update_pending status: */ 179 - return RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET; 225 + return !!(RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & 226 + RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET); 180 227 } 181 228 182 229 /** ··· 779 794 wake_up(&rdev->irq.vblank_queue); 780 795 } 781 796 if (atomic_read(&rdev->irq.pflip[0])) 782 - radeon_crtc_handle_flip(rdev, 0); 797 + radeon_crtc_handle_vblank(rdev, 0); 783 798 } 784 799 if (status & RADEON_CRTC2_VBLANK_STAT) { 785 800 if (rdev->irq.crtc_vblank_int[1]) { ··· 788 803 wake_up(&rdev->irq.vblank_queue); 789 804 } 790 805 if (atomic_read(&rdev->irq.pflip[1])) 791 - radeon_crtc_handle_flip(rdev, 1); 806 + radeon_crtc_handle_vblank(rdev, 1); 792 807 } 793 808 if (status & RADEON_FP_DETECT_STAT) { 794 809 queue_hotplug = true;
+2 -2
drivers/gpu/drm/radeon/r600.c
··· 3876 3876 wake_up(&rdev->irq.vblank_queue); 3877 3877 } 3878 3878 if (atomic_read(&rdev->irq.pflip[0])) 3879 - radeon_crtc_handle_flip(rdev, 0); 3879 + radeon_crtc_handle_vblank(rdev, 0); 3880 3880 rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT; 3881 3881 DRM_DEBUG("IH: D1 vblank\n"); 3882 3882 } ··· 3902 3902 wake_up(&rdev->irq.vblank_queue); 3903 3903 } 3904 3904 if (atomic_read(&rdev->irq.pflip[1])) 3905 - radeon_crtc_handle_flip(rdev, 1); 3905 + radeon_crtc_handle_vblank(rdev, 1); 3906 3906 rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT; 3907 3907 DRM_DEBUG("IH: D2 vblank\n"); 3908 3908 }
+100 -227
drivers/gpu/drm/radeon/r600_hdmi.c
··· 133 133 /* 134 134 * update the N and CTS parameters for a given pixel clock rate 135 135 */ 136 - static void r600_hdmi_update_ACR(struct drm_encoder *encoder, uint32_t clock) 136 + void r600_hdmi_update_ACR(struct drm_encoder *encoder, uint32_t clock) 137 137 { 138 138 struct drm_device *dev = encoder->dev; 139 139 struct radeon_device *rdev = dev->dev_private; ··· 142 142 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 143 143 uint32_t offset = dig->afmt->offset; 144 144 145 - WREG32(HDMI0_ACR_32_0 + offset, HDMI0_ACR_CTS_32(acr.cts_32khz)); 146 - WREG32(HDMI0_ACR_32_1 + offset, acr.n_32khz); 145 + WREG32_P(HDMI0_ACR_32_0 + offset, 146 + HDMI0_ACR_CTS_32(acr.cts_32khz), 147 + ~HDMI0_ACR_CTS_32_MASK); 148 + WREG32_P(HDMI0_ACR_32_1 + offset, 149 + HDMI0_ACR_N_32(acr.n_32khz), 150 + ~HDMI0_ACR_N_32_MASK); 147 151 148 - WREG32(HDMI0_ACR_44_0 + offset, HDMI0_ACR_CTS_44(acr.cts_44_1khz)); 149 - WREG32(HDMI0_ACR_44_1 + offset, acr.n_44_1khz); 152 + WREG32_P(HDMI0_ACR_44_0 + offset, 153 + HDMI0_ACR_CTS_44(acr.cts_44_1khz), 154 + ~HDMI0_ACR_CTS_44_MASK); 155 + WREG32_P(HDMI0_ACR_44_1 + offset, 156 + HDMI0_ACR_N_44(acr.n_44_1khz), 157 + ~HDMI0_ACR_N_44_MASK); 150 158 151 - WREG32(HDMI0_ACR_48_0 + offset, HDMI0_ACR_CTS_48(acr.cts_48khz)); 152 - WREG32(HDMI0_ACR_48_1 + offset, acr.n_48khz); 159 + WREG32_P(HDMI0_ACR_48_0 + offset, 160 + HDMI0_ACR_CTS_48(acr.cts_48khz), 161 + ~HDMI0_ACR_CTS_48_MASK); 162 + WREG32_P(HDMI0_ACR_48_1 + offset, 163 + HDMI0_ACR_N_48(acr.n_48khz), 164 + ~HDMI0_ACR_N_48_MASK); 153 165 } 154 166 155 167 /* 156 168 * build a HDMI Video Info Frame 157 169 */ 158 - static void r600_hdmi_update_avi_infoframe(struct drm_encoder *encoder, 159 - void *buffer, size_t size) 170 + void r600_hdmi_update_avi_infoframe(struct drm_encoder *encoder, void *buffer, 171 + size_t size) 160 172 { 161 173 struct drm_device *dev = encoder->dev; 162 174 struct radeon_device *rdev = dev->dev_private; ··· 243 231 /* 244 232 * write the audio workaround status to the hardware 245 233 */ 246 - static void r600_hdmi_audio_workaround(struct drm_encoder *encoder) 234 + void r600_hdmi_audio_workaround(struct drm_encoder *encoder) 247 235 { 248 236 struct drm_device *dev = encoder->dev; 249 237 struct radeon_device *rdev = dev->dev_private; ··· 262 250 value, ~HDMI0_AUDIO_TEST_EN); 263 251 } 264 252 265 - static void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock) 253 + void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock) 266 254 { 267 255 struct drm_device *dev = encoder->dev; 268 256 struct radeon_device *rdev = dev->dev_private; ··· 332 320 } 333 321 } 334 322 335 - static void dce3_2_afmt_write_speaker_allocation(struct drm_encoder *encoder) 336 - { 337 - struct radeon_device *rdev = encoder->dev->dev_private; 338 - struct drm_connector *connector; 339 - struct radeon_connector *radeon_connector = NULL; 340 - u32 tmp; 341 - u8 *sadb; 342 - int sad_count; 343 - 344 - list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { 345 - if (connector->encoder == encoder) { 346 - radeon_connector = to_radeon_connector(connector); 347 - break; 348 - } 349 - } 350 - 351 - if (!radeon_connector) { 352 - DRM_ERROR("Couldn't find encoder's connector\n"); 353 - return; 354 - } 355 - 356 - sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb); 357 - if (sad_count < 0) { 358 - DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count); 359 - return; 360 - } 361 - 362 - /* program the speaker allocation */ 363 - tmp = RREG32(AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER); 364 - tmp &= ~(DP_CONNECTION | SPEAKER_ALLOCATION_MASK); 365 - /* set HDMI mode */ 366 - tmp |= HDMI_CONNECTION; 367 - if (sad_count) 368 - tmp |= SPEAKER_ALLOCATION(sadb[0]); 369 - else 370 - tmp |= SPEAKER_ALLOCATION(5); /* stereo */ 371 - WREG32(AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER, tmp); 372 - 373 - kfree(sadb); 374 - } 375 - 376 - static void dce3_2_afmt_write_sad_regs(struct drm_encoder *encoder) 377 - { 378 - struct radeon_device *rdev = encoder->dev->dev_private; 379 - struct drm_connector *connector; 380 - struct radeon_connector *radeon_connector = NULL; 381 - struct cea_sad *sads; 382 - int i, sad_count; 383 - 384 - static const u16 eld_reg_to_type[][2] = { 385 - { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM }, 386 - { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 }, 387 - { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 }, 388 - { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 }, 389 - { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 }, 390 - { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC }, 391 - { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS }, 392 - { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC }, 393 - { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 }, 394 - { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD }, 395 - { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP }, 396 - { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO }, 397 - }; 398 - 399 - list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { 400 - if (connector->encoder == encoder) { 401 - radeon_connector = to_radeon_connector(connector); 402 - break; 403 - } 404 - } 405 - 406 - if (!radeon_connector) { 407 - DRM_ERROR("Couldn't find encoder's connector\n"); 408 - return; 409 - } 410 - 411 - sad_count = drm_edid_to_sad(radeon_connector->edid, &sads); 412 - if (sad_count < 0) { 413 - DRM_ERROR("Couldn't read SADs: %d\n", sad_count); 414 - return; 415 - } 416 - BUG_ON(!sads); 417 - 418 - for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) { 419 - u32 value = 0; 420 - u8 stereo_freqs = 0; 421 - int max_channels = -1; 422 - int j; 423 - 424 - for (j = 0; j < sad_count; j++) { 425 - struct cea_sad *sad = &sads[j]; 426 - 427 - if (sad->format == eld_reg_to_type[i][1]) { 428 - if (sad->channels > max_channels) { 429 - value = MAX_CHANNELS(sad->channels) | 430 - DESCRIPTOR_BYTE_2(sad->byte2) | 431 - SUPPORTED_FREQUENCIES(sad->freq); 432 - max_channels = sad->channels; 433 - } 434 - 435 - if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM) 436 - stereo_freqs |= sad->freq; 437 - else 438 - break; 439 - } 440 - } 441 - 442 - value |= SUPPORTED_FREQUENCIES_STEREO(stereo_freqs); 443 - 444 - WREG32(eld_reg_to_type[i][0], value); 445 - } 446 - 447 - kfree(sads); 448 - } 449 - 450 323 /* 451 324 * update the info frames with the data from the current display mode 452 325 */ ··· 344 447 u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE]; 345 448 struct hdmi_avi_infoframe frame; 346 449 uint32_t offset; 450 + uint32_t acr_ctl; 347 451 ssize_t err; 348 452 349 453 if (!dig || !dig->afmt) ··· 361 463 362 464 r600_audio_set_dto(encoder, mode->clock); 363 465 364 - WREG32(HDMI0_VBI_PACKET_CONTROL + offset, 365 - HDMI0_NULL_SEND); /* send null packets when required */ 466 + WREG32_P(HDMI0_AUDIO_PACKET_CONTROL + offset, 467 + HDMI0_AUDIO_SAMPLE_SEND | /* send audio packets */ 468 + HDMI0_AUDIO_DELAY_EN(1) | /* default audio delay */ 469 + HDMI0_AUDIO_PACKETS_PER_LINE(3) | /* should be suffient for all audio modes and small enough for all hblanks */ 470 + HDMI0_60958_CS_UPDATE, /* allow 60958 channel status fields to be updated */ 471 + ~(HDMI0_AUDIO_SAMPLE_SEND | 472 + HDMI0_AUDIO_DELAY_EN_MASK | 473 + HDMI0_AUDIO_PACKETS_PER_LINE_MASK | 474 + HDMI0_60958_CS_UPDATE)); 366 475 367 - WREG32(HDMI0_AUDIO_CRC_CONTROL + offset, 0x1000); 476 + /* DCE 3.0 uses register that's normally for CRC_CONTROL */ 477 + acr_ctl = ASIC_IS_DCE3(rdev) ? DCE3_HDMI0_ACR_PACKET_CONTROL : 478 + HDMI0_ACR_PACKET_CONTROL; 479 + WREG32_P(acr_ctl + offset, 480 + HDMI0_ACR_SOURCE | /* select SW CTS value - XXX verify that hw CTS works on all families */ 481 + HDMI0_ACR_AUTO_SEND, /* allow hw to sent ACR packets when required */ 482 + ~(HDMI0_ACR_SOURCE | 483 + HDMI0_ACR_AUTO_SEND)); 368 484 369 - if (ASIC_IS_DCE32(rdev)) { 370 - WREG32(HDMI0_AUDIO_PACKET_CONTROL + offset, 371 - HDMI0_AUDIO_DELAY_EN(1) | /* default audio delay */ 372 - HDMI0_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */ 373 - WREG32(AFMT_AUDIO_PACKET_CONTROL + offset, 374 - AFMT_AUDIO_SAMPLE_SEND | /* send audio packets */ 375 - AFMT_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */ 376 - } else { 377 - WREG32(HDMI0_AUDIO_PACKET_CONTROL + offset, 378 - HDMI0_AUDIO_SAMPLE_SEND | /* send audio packets */ 379 - HDMI0_AUDIO_DELAY_EN(1) | /* default audio delay */ 380 - HDMI0_AUDIO_PACKETS_PER_LINE(3) | /* should be suffient for all audio modes and small enough for all hblanks */ 381 - HDMI0_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */ 382 - } 485 + WREG32_OR(HDMI0_VBI_PACKET_CONTROL + offset, 486 + HDMI0_NULL_SEND | /* send null packets when required */ 487 + HDMI0_GC_SEND | /* send general control packets */ 488 + HDMI0_GC_CONT); /* send general control packets every frame */ 383 489 384 - if (ASIC_IS_DCE32(rdev)) { 385 - dce3_2_afmt_write_speaker_allocation(encoder); 386 - dce3_2_afmt_write_sad_regs(encoder); 387 - } 490 + WREG32_OR(HDMI0_INFOFRAME_CONTROL0 + offset, 491 + HDMI0_AVI_INFO_SEND | /* enable AVI info frames */ 492 + HDMI0_AVI_INFO_CONT | /* send AVI info frames every frame/field */ 493 + HDMI0_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */ 494 + HDMI0_AUDIO_INFO_UPDATE); /* required for audio info values to be updated */ 388 495 389 - WREG32(HDMI0_ACR_PACKET_CONTROL + offset, 390 - HDMI0_ACR_SOURCE | /* select SW CTS value - XXX verify that hw CTS works on all families */ 391 - HDMI0_ACR_AUTO_SEND); /* allow hw to sent ACR packets when required */ 496 + WREG32_P(HDMI0_INFOFRAME_CONTROL1 + offset, 497 + HDMI0_AVI_INFO_LINE(2) | /* anything other than 0 */ 498 + HDMI0_AUDIO_INFO_LINE(2), /* anything other than 0 */ 499 + ~(HDMI0_AVI_INFO_LINE_MASK | 500 + HDMI0_AUDIO_INFO_LINE_MASK)); 392 501 393 - WREG32(HDMI0_VBI_PACKET_CONTROL + offset, 394 - HDMI0_NULL_SEND | /* send null packets when required */ 395 - HDMI0_GC_SEND | /* send general control packets */ 396 - HDMI0_GC_CONT); /* send general control packets every frame */ 397 - 398 - /* TODO: HDMI0_AUDIO_INFO_UPDATE */ 399 - WREG32(HDMI0_INFOFRAME_CONTROL0 + offset, 400 - HDMI0_AVI_INFO_SEND | /* enable AVI info frames */ 401 - HDMI0_AVI_INFO_CONT | /* send AVI info frames every frame/field */ 402 - HDMI0_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */ 403 - HDMI0_AUDIO_INFO_CONT); /* send audio info frames every frame/field */ 404 - 405 - WREG32(HDMI0_INFOFRAME_CONTROL1 + offset, 406 - HDMI0_AVI_INFO_LINE(2) | /* anything other than 0 */ 407 - HDMI0_AUDIO_INFO_LINE(2)); /* anything other than 0 */ 408 - 409 - WREG32(HDMI0_GC + offset, 0); /* unset HDMI0_GC_AVMUTE */ 502 + WREG32_AND(HDMI0_GC + offset, 503 + ~HDMI0_GC_AVMUTE); /* unset HDMI0_GC_AVMUTE */ 410 504 411 505 err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode); 412 506 if (err < 0) { ··· 413 523 } 414 524 415 525 r600_hdmi_update_avi_infoframe(encoder, buffer, sizeof(buffer)); 526 + 527 + /* fglrx duplicates INFOFRAME_CONTROL0 & INFOFRAME_CONTROL1 ops here */ 528 + 529 + WREG32_AND(HDMI0_GENERIC_PACKET_CONTROL + offset, 530 + ~(HDMI0_GENERIC0_SEND | 531 + HDMI0_GENERIC0_CONT | 532 + HDMI0_GENERIC0_UPDATE | 533 + HDMI0_GENERIC1_SEND | 534 + HDMI0_GENERIC1_CONT | 535 + HDMI0_GENERIC0_LINE_MASK | 536 + HDMI0_GENERIC1_LINE_MASK)); 537 + 416 538 r600_hdmi_update_ACR(encoder, mode->clock); 539 + 540 + WREG32_P(HDMI0_60958_0 + offset, 541 + HDMI0_60958_CS_CHANNEL_NUMBER_L(1), 542 + ~(HDMI0_60958_CS_CHANNEL_NUMBER_L_MASK | 543 + HDMI0_60958_CS_CLOCK_ACCURACY_MASK)); 544 + 545 + WREG32_P(HDMI0_60958_1 + offset, 546 + HDMI0_60958_CS_CHANNEL_NUMBER_R(2), 547 + ~HDMI0_60958_CS_CHANNEL_NUMBER_R_MASK); 417 548 418 549 /* it's unknown what these bits do excatly, but it's indeed quite useful for debugging */ 419 550 WREG32(HDMI0_RAMP_CONTROL0 + offset, 0x00FFFFFF); ··· 442 531 WREG32(HDMI0_RAMP_CONTROL2 + offset, 0x00000001); 443 532 WREG32(HDMI0_RAMP_CONTROL3 + offset, 0x00000001); 444 533 445 - r600_hdmi_audio_workaround(encoder); 446 - 447 534 /* enable audio after to setting up hw */ 448 535 r600_audio_enable(rdev, dig->afmt->pin, true); 449 536 } 450 537 451 - /* 452 - * update settings with current parameters from audio engine 538 + /** 539 + * r600_hdmi_update_audio_settings - Update audio infoframe 540 + * 541 + * @encoder: drm encoder 542 + * 543 + * Gets info about current audio stream and updates audio infoframe. 453 544 */ 454 545 void r600_hdmi_update_audio_settings(struct drm_encoder *encoder) 455 546 { ··· 463 550 uint8_t buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AUDIO_INFOFRAME_SIZE]; 464 551 struct hdmi_audio_infoframe frame; 465 552 uint32_t offset; 466 - uint32_t iec; 553 + uint32_t value; 467 554 ssize_t err; 468 555 469 556 if (!dig->afmt || !dig->afmt->enabled) ··· 475 562 audio.channels, audio.rate, audio.bits_per_sample); 476 563 DRM_DEBUG("0x%02X IEC60958 status bits and 0x%02X category code\n", 477 564 (int)audio.status_bits, (int)audio.category_code); 478 - 479 - iec = 0; 480 - if (audio.status_bits & AUDIO_STATUS_PROFESSIONAL) 481 - iec |= 1 << 0; 482 - if (audio.status_bits & AUDIO_STATUS_NONAUDIO) 483 - iec |= 1 << 1; 484 - if (audio.status_bits & AUDIO_STATUS_COPYRIGHT) 485 - iec |= 1 << 2; 486 - if (audio.status_bits & AUDIO_STATUS_EMPHASIS) 487 - iec |= 1 << 3; 488 - 489 - iec |= HDMI0_60958_CS_CATEGORY_CODE(audio.category_code); 490 - 491 - switch (audio.rate) { 492 - case 32000: 493 - iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0x3); 494 - break; 495 - case 44100: 496 - iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0x0); 497 - break; 498 - case 48000: 499 - iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0x2); 500 - break; 501 - case 88200: 502 - iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0x8); 503 - break; 504 - case 96000: 505 - iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0xa); 506 - break; 507 - case 176400: 508 - iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0xc); 509 - break; 510 - case 192000: 511 - iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0xe); 512 - break; 513 - } 514 - 515 - WREG32(HDMI0_60958_0 + offset, iec); 516 - 517 - iec = 0; 518 - switch (audio.bits_per_sample) { 519 - case 16: 520 - iec |= HDMI0_60958_CS_WORD_LENGTH(0x2); 521 - break; 522 - case 20: 523 - iec |= HDMI0_60958_CS_WORD_LENGTH(0x3); 524 - break; 525 - case 24: 526 - iec |= HDMI0_60958_CS_WORD_LENGTH(0xb); 527 - break; 528 - } 529 - if (audio.status_bits & AUDIO_STATUS_V) 530 - iec |= 0x5 << 16; 531 - WREG32_P(HDMI0_60958_1 + offset, iec, ~0x5000f); 532 565 533 566 err = hdmi_audio_infoframe_init(&frame); 534 567 if (err < 0) { ··· 490 631 return; 491 632 } 492 633 634 + value = RREG32(HDMI0_AUDIO_PACKET_CONTROL + offset); 635 + if (value & HDMI0_AUDIO_TEST_EN) 636 + WREG32(HDMI0_AUDIO_PACKET_CONTROL + offset, 637 + value & ~HDMI0_AUDIO_TEST_EN); 638 + 639 + WREG32_OR(HDMI0_CONTROL + offset, 640 + HDMI0_ERROR_ACK); 641 + 642 + WREG32_AND(HDMI0_INFOFRAME_CONTROL0 + offset, 643 + ~HDMI0_AUDIO_INFO_SOURCE); 644 + 493 645 r600_hdmi_update_audio_infoframe(encoder, buffer, sizeof(buffer)); 494 - r600_hdmi_audio_workaround(encoder); 646 + 647 + WREG32_OR(HDMI0_INFOFRAME_CONTROL0 + offset, 648 + HDMI0_AUDIO_INFO_CONT | 649 + HDMI0_AUDIO_INFO_UPDATE); 495 650 } 496 651 497 652 /*
+17
drivers/gpu/drm/radeon/r600d.h
··· 1029 1029 #define HDMI0_AUDIO_PACKET_CONTROL 0x7408 1030 1030 # define HDMI0_AUDIO_SAMPLE_SEND (1 << 0) 1031 1031 # define HDMI0_AUDIO_DELAY_EN(x) (((x) & 3) << 4) 1032 + # define HDMI0_AUDIO_DELAY_EN_MASK (3 << 4) 1032 1033 # define HDMI0_AUDIO_SEND_MAX_PACKETS (1 << 8) 1033 1034 # define HDMI0_AUDIO_TEST_EN (1 << 12) 1034 1035 # define HDMI0_AUDIO_PACKETS_PER_LINE(x) (((x) & 0x1f) << 16) 1036 + # define HDMI0_AUDIO_PACKETS_PER_LINE_MASK (0x1f << 16) 1035 1037 # define HDMI0_AUDIO_CHANNEL_SWAP (1 << 24) 1036 1038 # define HDMI0_60958_CS_UPDATE (1 << 26) 1037 1039 # define HDMI0_AZ_FORMAT_WTRIG_MASK (1 << 28) 1038 1040 # define HDMI0_AZ_FORMAT_WTRIG_ACK (1 << 29) 1039 1041 #define HDMI0_AUDIO_CRC_CONTROL 0x740c 1040 1042 # define HDMI0_AUDIO_CRC_EN (1 << 0) 1043 + #define DCE3_HDMI0_ACR_PACKET_CONTROL 0x740c 1041 1044 #define HDMI0_VBI_PACKET_CONTROL 0x7410 1042 1045 # define HDMI0_NULL_SEND (1 << 0) 1043 1046 # define HDMI0_GC_SEND (1 << 4) ··· 1057 1054 # define HDMI0_MPEG_INFO_UPDATE (1 << 10) 1058 1055 #define HDMI0_INFOFRAME_CONTROL1 0x7418 1059 1056 # define HDMI0_AVI_INFO_LINE(x) (((x) & 0x3f) << 0) 1057 + # define HDMI0_AVI_INFO_LINE_MASK (0x3f << 0) 1060 1058 # define HDMI0_AUDIO_INFO_LINE(x) (((x) & 0x3f) << 8) 1059 + # define HDMI0_AUDIO_INFO_LINE_MASK (0x3f << 8) 1061 1060 # define HDMI0_MPEG_INFO_LINE(x) (((x) & 0x3f) << 16) 1062 1061 #define HDMI0_GENERIC_PACKET_CONTROL 0x741c 1063 1062 # define HDMI0_GENERIC0_SEND (1 << 0) ··· 1068 1063 # define HDMI0_GENERIC1_SEND (1 << 4) 1069 1064 # define HDMI0_GENERIC1_CONT (1 << 5) 1070 1065 # define HDMI0_GENERIC0_LINE(x) (((x) & 0x3f) << 16) 1066 + # define HDMI0_GENERIC0_LINE_MASK (0x3f << 16) 1071 1067 # define HDMI0_GENERIC1_LINE(x) (((x) & 0x3f) << 24) 1068 + # define HDMI0_GENERIC1_LINE_MASK (0x3f << 24) 1072 1069 #define HDMI0_GC 0x7428 1073 1070 # define HDMI0_GC_AVMUTE (1 << 0) 1074 1071 #define HDMI0_AVI_INFO0 0x7454 ··· 1126 1119 #define HDMI0_GENERIC1_6 0x74a8 1127 1120 #define HDMI0_ACR_32_0 0x74ac 1128 1121 # define HDMI0_ACR_CTS_32(x) (((x) & 0xfffff) << 12) 1122 + # define HDMI0_ACR_CTS_32_MASK (0xfffff << 12) 1129 1123 #define HDMI0_ACR_32_1 0x74b0 1130 1124 # define HDMI0_ACR_N_32(x) (((x) & 0xfffff) << 0) 1125 + # define HDMI0_ACR_N_32_MASK (0xfffff << 0) 1131 1126 #define HDMI0_ACR_44_0 0x74b4 1132 1127 # define HDMI0_ACR_CTS_44(x) (((x) & 0xfffff) << 12) 1128 + # define HDMI0_ACR_CTS_44_MASK (0xfffff << 12) 1133 1129 #define HDMI0_ACR_44_1 0x74b8 1134 1130 # define HDMI0_ACR_N_44(x) (((x) & 0xfffff) << 0) 1131 + # define HDMI0_ACR_N_44_MASK (0xfffff << 0) 1135 1132 #define HDMI0_ACR_48_0 0x74bc 1136 1133 # define HDMI0_ACR_CTS_48(x) (((x) & 0xfffff) << 12) 1134 + # define HDMI0_ACR_CTS_48_MASK (0xfffff << 12) 1137 1135 #define HDMI0_ACR_48_1 0x74c0 1138 1136 # define HDMI0_ACR_N_48(x) (((x) & 0xfffff) << 0) 1137 + # define HDMI0_ACR_N_48_MASK (0xfffff << 0) 1139 1138 #define HDMI0_ACR_STATUS_0 0x74c4 1140 1139 #define HDMI0_ACR_STATUS_1 0x74c8 1141 1140 #define HDMI0_AUDIO_INFO0 0x74cc ··· 1161 1148 # define HDMI0_60958_CS_CATEGORY_CODE(x) (((x) & 0xff) << 8) 1162 1149 # define HDMI0_60958_CS_SOURCE_NUMBER(x) (((x) & 0xf) << 16) 1163 1150 # define HDMI0_60958_CS_CHANNEL_NUMBER_L(x) (((x) & 0xf) << 20) 1151 + # define HDMI0_60958_CS_CHANNEL_NUMBER_L_MASK (0xf << 20) 1164 1152 # define HDMI0_60958_CS_SAMPLING_FREQUENCY(x) (((x) & 0xf) << 24) 1165 1153 # define HDMI0_60958_CS_CLOCK_ACCURACY(x) (((x) & 3) << 28) 1154 + # define HDMI0_60958_CS_CLOCK_ACCURACY_MASK (3 << 28) 1166 1155 #define HDMI0_60958_1 0x74d8 1167 1156 # define HDMI0_60958_CS_WORD_LENGTH(x) (((x) & 0xf) << 0) 1168 1157 # define HDMI0_60958_CS_ORIGINAL_SAMPLING_FREQUENCY(x) (((x) & 0xf) << 4) 1169 1158 # define HDMI0_60958_CS_VALID_L(x) (((x) & 1) << 16) 1170 1159 # define HDMI0_60958_CS_VALID_R(x) (((x) & 1) << 18) 1171 1160 # define HDMI0_60958_CS_CHANNEL_NUMBER_R(x) (((x) & 0xf) << 20) 1161 + # define HDMI0_60958_CS_CHANNEL_NUMBER_R_MASK (0xf << 20) 1172 1162 #define HDMI0_ACR_PACKET_CONTROL 0x74dc 1173 1163 # define HDMI0_ACR_SEND (1 << 0) 1174 1164 # define HDMI0_ACR_CONT (1 << 1) ··· 1182 1166 # define HDMI0_ACR_48 3 1183 1167 # define HDMI0_ACR_SOURCE (1 << 8) /* 0 - hw; 1 - cts value */ 1184 1168 # define HDMI0_ACR_AUTO_SEND (1 << 12) 1169 + #define DCE3_HDMI0_AUDIO_CRC_CONTROL 0x74dc 1185 1170 #define HDMI0_RAMP_CONTROL0 0x74e0 1186 1171 # define HDMI0_RAMP_MAX_COUNT(x) (((x) & 0xffffff) << 0) 1187 1172 #define HDMI0_RAMP_CONTROL1 0x74e4
+21 -12
drivers/gpu/drm/radeon/radeon.h
··· 676 676 * IRQS. 677 677 */ 678 678 679 - struct radeon_unpin_work { 680 - struct work_struct work; 681 - struct radeon_device *rdev; 682 - int crtc_id; 683 - struct radeon_fence *fence; 679 + struct radeon_flip_work { 680 + struct work_struct flip_work; 681 + struct work_struct unpin_work; 682 + struct radeon_device *rdev; 683 + int crtc_id; 684 + struct drm_framebuffer *fb; 684 685 struct drm_pending_vblank_event *event; 685 - struct radeon_bo *old_rbo; 686 - u64 new_crtc_base; 686 + struct radeon_bo *old_rbo; 687 + struct radeon_bo *new_rbo; 688 + struct radeon_fence *fence; 687 689 }; 688 690 689 691 struct r500_irq_stat_regs { ··· 849 847 #define R600_PTE_SNOOPED (1 << 2) 850 848 #define R600_PTE_READABLE (1 << 5) 851 849 #define R600_PTE_WRITEABLE (1 << 6) 850 + 851 + /* PTE (Page Table Entry) fragment field for different page sizes */ 852 + #define R600_PTE_FRAG_4KB (0 << 7) 853 + #define R600_PTE_FRAG_64KB (4 << 7) 854 + #define R600_PTE_FRAG_256KB (6 << 7) 855 + 856 + /* flags used for GART page table entries on R600+ */ 857 + #define R600_PTE_GART ( R600_PTE_VALID | R600_PTE_SYSTEM | R600_PTE_SNOOPED \ 858 + | R600_PTE_READABLE | R600_PTE_WRITEABLE) 852 859 853 860 struct radeon_vm_pt { 854 861 struct radeon_bo *bo; ··· 1887 1876 } dpm; 1888 1877 /* pageflipping */ 1889 1878 struct { 1890 - void (*pre_page_flip)(struct radeon_device *rdev, int crtc); 1891 - u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base); 1892 - void (*post_page_flip)(struct radeon_device *rdev, int crtc); 1879 + void (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base); 1880 + bool (*page_flip_pending)(struct radeon_device *rdev, int crtc); 1893 1881 } pflip; 1894 1882 }; 1895 1883 ··· 2747 2737 #define radeon_pm_finish(rdev) (rdev)->asic->pm.finish((rdev)) 2748 2738 #define radeon_pm_init_profile(rdev) (rdev)->asic->pm.init_profile((rdev)) 2749 2739 #define radeon_pm_get_dynpm_state(rdev) (rdev)->asic->pm.get_dynpm_state((rdev)) 2750 - #define radeon_pre_page_flip(rdev, crtc) (rdev)->asic->pflip.pre_page_flip((rdev), (crtc)) 2751 2740 #define radeon_page_flip(rdev, crtc, base) (rdev)->asic->pflip.page_flip((rdev), (crtc), (base)) 2752 - #define radeon_post_page_flip(rdev, crtc) (rdev)->asic->pflip.post_page_flip((rdev), (crtc)) 2741 + #define radeon_page_flip_pending(rdev, crtc) (rdev)->asic->pflip.page_flip_pending((rdev), (crtc)) 2753 2742 #define radeon_wait_for_vblank(rdev, crtc) (rdev)->asic->display.wait_for_vblank((rdev), (crtc)) 2754 2743 #define radeon_mc_wait_for_idle(rdev) (rdev)->asic->mc_wait_for_idle((rdev)) 2755 2744 #define radeon_get_xclk(rdev) (rdev)->asic->get_xclk((rdev))
-3
drivers/gpu/drm/radeon/radeon_agp.c
··· 117 117 /* ATI Host Bridge / RV280 [M9+] Needs AGPMode 1 (phoronix forum) */ 118 118 { PCI_VENDOR_ID_ATI, 0xcbb2, PCI_VENDOR_ID_ATI, 0x5c61, 119 119 PCI_VENDOR_ID_SONY, 0x8175, 1}, 120 - /* HP Host Bridge / R300 [FireGL X1] Needs AGPMode 2 (fdo #7770) */ 121 - { PCI_VENDOR_ID_HP, 0x122e, PCI_VENDOR_ID_ATI, 0x4e47, 122 - PCI_VENDOR_ID_ATI, 0x0152, 2}, 123 120 { 0, 0, 0, 0, 0, 0, 0 }, 124 121 }; 125 122 #endif
+23 -45
drivers/gpu/drm/radeon/radeon_asic.c
··· 248 248 .set_clock_gating = &radeon_legacy_set_clock_gating, 249 249 }, 250 250 .pflip = { 251 - .pre_page_flip = &r100_pre_page_flip, 252 251 .page_flip = &r100_page_flip, 253 - .post_page_flip = &r100_post_page_flip, 252 + .page_flip_pending = &r100_page_flip_pending, 254 253 }, 255 254 }; 256 255 ··· 314 315 .set_clock_gating = &radeon_legacy_set_clock_gating, 315 316 }, 316 317 .pflip = { 317 - .pre_page_flip = &r100_pre_page_flip, 318 318 .page_flip = &r100_page_flip, 319 - .post_page_flip = &r100_post_page_flip, 319 + .page_flip_pending = &r100_page_flip_pending, 320 320 }, 321 321 }; 322 322 ··· 394 396 .set_clock_gating = &radeon_legacy_set_clock_gating, 395 397 }, 396 398 .pflip = { 397 - .pre_page_flip = &r100_pre_page_flip, 398 399 .page_flip = &r100_page_flip, 399 - .post_page_flip = &r100_post_page_flip, 400 + .page_flip_pending = &r100_page_flip_pending, 400 401 }, 401 402 }; 402 403 ··· 460 463 .set_clock_gating = &radeon_legacy_set_clock_gating, 461 464 }, 462 465 .pflip = { 463 - .pre_page_flip = &r100_pre_page_flip, 464 466 .page_flip = &r100_page_flip, 465 - .post_page_flip = &r100_post_page_flip, 467 + .page_flip_pending = &r100_page_flip_pending, 466 468 }, 467 469 }; 468 470 ··· 526 530 .set_clock_gating = &radeon_atom_set_clock_gating, 527 531 }, 528 532 .pflip = { 529 - .pre_page_flip = &r100_pre_page_flip, 530 533 .page_flip = &r100_page_flip, 531 - .post_page_flip = &r100_post_page_flip, 534 + .page_flip_pending = &r100_page_flip_pending, 532 535 }, 533 536 }; 534 537 ··· 592 597 .set_clock_gating = &radeon_legacy_set_clock_gating, 593 598 }, 594 599 .pflip = { 595 - .pre_page_flip = &r100_pre_page_flip, 596 600 .page_flip = &r100_page_flip, 597 - .post_page_flip = &r100_post_page_flip, 601 + .page_flip_pending = &r100_page_flip_pending, 598 602 }, 599 603 }; 600 604 ··· 660 666 .set_clock_gating = &radeon_atom_set_clock_gating, 661 667 }, 662 668 .pflip = { 663 - .pre_page_flip = &rs600_pre_page_flip, 664 669 .page_flip = &rs600_page_flip, 665 - .post_page_flip = &rs600_post_page_flip, 670 + .page_flip_pending = &rs600_page_flip_pending, 666 671 }, 667 672 }; 668 673 ··· 728 735 .set_clock_gating = &radeon_atom_set_clock_gating, 729 736 }, 730 737 .pflip = { 731 - .pre_page_flip = &rs600_pre_page_flip, 732 738 .page_flip = &rs600_page_flip, 733 - .post_page_flip = &rs600_post_page_flip, 739 + .page_flip_pending = &rs600_page_flip_pending, 734 740 }, 735 741 }; 736 742 ··· 794 802 .set_clock_gating = &radeon_atom_set_clock_gating, 795 803 }, 796 804 .pflip = { 797 - .pre_page_flip = &rs600_pre_page_flip, 798 805 .page_flip = &rs600_page_flip, 799 - .post_page_flip = &rs600_post_page_flip, 806 + .page_flip_pending = &rs600_page_flip_pending, 800 807 }, 801 808 }; 802 809 ··· 860 869 .set_clock_gating = &radeon_atom_set_clock_gating, 861 870 }, 862 871 .pflip = { 863 - .pre_page_flip = &rs600_pre_page_flip, 864 872 .page_flip = &rs600_page_flip, 865 - .post_page_flip = &rs600_post_page_flip, 873 + .page_flip_pending = &rs600_page_flip_pending, 866 874 }, 867 875 }; 868 876 ··· 958 968 .get_temperature = &rv6xx_get_temp, 959 969 }, 960 970 .pflip = { 961 - .pre_page_flip = &rs600_pre_page_flip, 962 971 .page_flip = &rs600_page_flip, 963 - .post_page_flip = &rs600_post_page_flip, 972 + .page_flip_pending = &rs600_page_flip_pending, 964 973 }, 965 974 }; 966 975 ··· 1048 1059 .force_performance_level = &rv6xx_dpm_force_performance_level, 1049 1060 }, 1050 1061 .pflip = { 1051 - .pre_page_flip = &rs600_pre_page_flip, 1052 1062 .page_flip = &rs600_page_flip, 1053 - .post_page_flip = &rs600_post_page_flip, 1063 + .page_flip_pending = &rs600_page_flip_pending, 1054 1064 }, 1055 1065 }; 1056 1066 ··· 1138 1150 .force_performance_level = &rs780_dpm_force_performance_level, 1139 1151 }, 1140 1152 .pflip = { 1141 - .pre_page_flip = &rs600_pre_page_flip, 1142 1153 .page_flip = &rs600_page_flip, 1143 - .post_page_flip = &rs600_post_page_flip, 1154 + .page_flip_pending = &rs600_page_flip_pending, 1144 1155 }, 1145 1156 }; 1146 1157 ··· 1188 1201 .set_backlight_level = &atombios_set_backlight_level, 1189 1202 .get_backlight_level = &atombios_get_backlight_level, 1190 1203 .hdmi_enable = &r600_hdmi_enable, 1191 - .hdmi_setmode = &r600_hdmi_setmode, 1204 + .hdmi_setmode = &dce3_1_hdmi_setmode, 1192 1205 }, 1193 1206 .copy = { 1194 1207 .blit = &r600_copy_cpdma, ··· 1243 1256 .vblank_too_short = &rv770_dpm_vblank_too_short, 1244 1257 }, 1245 1258 .pflip = { 1246 - .pre_page_flip = &rs600_pre_page_flip, 1247 1259 .page_flip = &rv770_page_flip, 1248 - .post_page_flip = &rs600_post_page_flip, 1260 + .page_flip_pending = &rv770_page_flip_pending, 1249 1261 }, 1250 1262 }; 1251 1263 ··· 1361 1375 .vblank_too_short = &cypress_dpm_vblank_too_short, 1362 1376 }, 1363 1377 .pflip = { 1364 - .pre_page_flip = &evergreen_pre_page_flip, 1365 1378 .page_flip = &evergreen_page_flip, 1366 - .post_page_flip = &evergreen_post_page_flip, 1379 + .page_flip_pending = &evergreen_page_flip_pending, 1367 1380 }, 1368 1381 }; 1369 1382 ··· 1452 1467 .force_performance_level = &sumo_dpm_force_performance_level, 1453 1468 }, 1454 1469 .pflip = { 1455 - .pre_page_flip = &evergreen_pre_page_flip, 1456 1470 .page_flip = &evergreen_page_flip, 1457 - .post_page_flip = &evergreen_post_page_flip, 1471 + .page_flip_pending = &evergreen_page_flip_pending, 1458 1472 }, 1459 1473 }; 1460 1474 ··· 1544 1560 .vblank_too_short = &btc_dpm_vblank_too_short, 1545 1561 }, 1546 1562 .pflip = { 1547 - .pre_page_flip = &evergreen_pre_page_flip, 1548 1563 .page_flip = &evergreen_page_flip, 1549 - .post_page_flip = &evergreen_post_page_flip, 1564 + .page_flip_pending = &evergreen_page_flip_pending, 1550 1565 }, 1551 1566 }; 1552 1567 ··· 1687 1704 .vblank_too_short = &ni_dpm_vblank_too_short, 1688 1705 }, 1689 1706 .pflip = { 1690 - .pre_page_flip = &evergreen_pre_page_flip, 1691 1707 .page_flip = &evergreen_page_flip, 1692 - .post_page_flip = &evergreen_post_page_flip, 1708 + .page_flip_pending = &evergreen_page_flip_pending, 1693 1709 }, 1694 1710 }; 1695 1711 ··· 1787 1805 .enable_bapm = &trinity_dpm_enable_bapm, 1788 1806 }, 1789 1807 .pflip = { 1790 - .pre_page_flip = &evergreen_pre_page_flip, 1791 1808 .page_flip = &evergreen_page_flip, 1792 - .post_page_flip = &evergreen_post_page_flip, 1809 + .page_flip_pending = &evergreen_page_flip_pending, 1793 1810 }, 1794 1811 }; 1795 1812 ··· 1917 1936 .vblank_too_short = &ni_dpm_vblank_too_short, 1918 1937 }, 1919 1938 .pflip = { 1920 - .pre_page_flip = &evergreen_pre_page_flip, 1921 1939 .page_flip = &evergreen_page_flip, 1922 - .post_page_flip = &evergreen_post_page_flip, 1940 + .page_flip_pending = &evergreen_page_flip_pending, 1923 1941 }, 1924 1942 }; 1925 1943 ··· 2079 2099 .powergate_uvd = &ci_dpm_powergate_uvd, 2080 2100 }, 2081 2101 .pflip = { 2082 - .pre_page_flip = &evergreen_pre_page_flip, 2083 2102 .page_flip = &evergreen_page_flip, 2084 - .post_page_flip = &evergreen_post_page_flip, 2103 + .page_flip_pending = &evergreen_page_flip_pending, 2085 2104 }, 2086 2105 }; 2087 2106 ··· 2183 2204 .enable_bapm = &kv_dpm_enable_bapm, 2184 2205 }, 2185 2206 .pflip = { 2186 - .pre_page_flip = &evergreen_pre_page_flip, 2187 2207 .page_flip = &evergreen_page_flip, 2188 - .post_page_flip = &evergreen_post_page_flip, 2208 + .page_flip_pending = &evergreen_page_flip_pending, 2189 2209 }, 2190 2210 }; 2191 2211
+18 -10
drivers/gpu/drm/radeon/radeon_asic.h
··· 135 135 extern void r100_pm_finish(struct radeon_device *rdev); 136 136 extern void r100_pm_init_profile(struct radeon_device *rdev); 137 137 extern void r100_pm_get_dynpm_state(struct radeon_device *rdev); 138 - extern void r100_pre_page_flip(struct radeon_device *rdev, int crtc); 139 - extern u32 r100_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base); 140 - extern void r100_post_page_flip(struct radeon_device *rdev, int crtc); 138 + extern void r100_page_flip(struct radeon_device *rdev, int crtc, 139 + u64 crtc_base); 140 + extern bool r100_page_flip_pending(struct radeon_device *rdev, int crtc); 141 141 extern void r100_wait_for_vblank(struct radeon_device *rdev, int crtc); 142 142 extern int r100_mc_wait_for_idle(struct radeon_device *rdev); 143 143 ··· 241 241 extern void rs600_pm_misc(struct radeon_device *rdev); 242 242 extern void rs600_pm_prepare(struct radeon_device *rdev); 243 243 extern void rs600_pm_finish(struct radeon_device *rdev); 244 - extern void rs600_pre_page_flip(struct radeon_device *rdev, int crtc); 245 - extern u32 rs600_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base); 246 - extern void rs600_post_page_flip(struct radeon_device *rdev, int crtc); 244 + extern void rs600_page_flip(struct radeon_device *rdev, int crtc, 245 + u64 crtc_base); 246 + extern bool rs600_page_flip_pending(struct radeon_device *rdev, int crtc); 247 247 void rs600_set_safe_registers(struct radeon_device *rdev); 248 248 extern void avivo_wait_for_vblank(struct radeon_device *rdev, int crtc); 249 249 extern int rs600_mc_wait_for_idle(struct radeon_device *rdev); ··· 387 387 int r600_audio_init(struct radeon_device *rdev); 388 388 struct r600_audio_pin r600_audio_status(struct radeon_device *rdev); 389 389 void r600_audio_fini(struct radeon_device *rdev); 390 + void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock); 391 + void r600_hdmi_update_avi_infoframe(struct drm_encoder *encoder, void *buffer, 392 + size_t size); 393 + void r600_hdmi_update_ACR(struct drm_encoder *encoder, uint32_t clock); 394 + void r600_hdmi_audio_workaround(struct drm_encoder *encoder); 390 395 int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder); 391 396 void r600_hdmi_update_audio_settings(struct drm_encoder *encoder); 392 397 void r600_hdmi_enable(struct drm_encoder *encoder, bool enable); ··· 452 447 int rv770_suspend(struct radeon_device *rdev); 453 448 int rv770_resume(struct radeon_device *rdev); 454 449 void rv770_pm_misc(struct radeon_device *rdev); 455 - u32 rv770_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base); 450 + void rv770_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base); 451 + bool rv770_page_flip_pending(struct radeon_device *rdev, int crtc); 456 452 void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc); 457 453 void r700_cp_stop(struct radeon_device *rdev); 458 454 void r700_cp_fini(struct radeon_device *rdev); ··· 464 458 u32 rv770_get_xclk(struct radeon_device *rdev); 465 459 int rv770_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk); 466 460 int rv770_get_temp(struct radeon_device *rdev); 461 + /* hdmi */ 462 + void dce3_1_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode); 467 463 /* rv7xx pm */ 468 464 int rv770_dpm_init(struct radeon_device *rdev); 469 465 int rv770_dpm_enable(struct radeon_device *rdev); ··· 521 513 extern void btc_pm_init_profile(struct radeon_device *rdev); 522 514 int sumo_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk); 523 515 int evergreen_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk); 524 - extern void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc); 525 - extern u32 evergreen_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base); 526 - extern void evergreen_post_page_flip(struct radeon_device *rdev, int crtc); 516 + extern void evergreen_page_flip(struct radeon_device *rdev, int crtc, 517 + u64 crtc_base); 518 + extern bool evergreen_page_flip_pending(struct radeon_device *rdev, int crtc); 527 519 extern void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc); 528 520 void evergreen_disable_interrupt_state(struct radeon_device *rdev); 529 521 int evergreen_mc_wait_for_idle(struct radeon_device *rdev);
+26 -1
drivers/gpu/drm/radeon/radeon_connectors.c
··· 145 145 } 146 146 break; 147 147 } 148 + 149 + if (drm_detect_hdmi_monitor(radeon_connector->edid)) { 150 + /* hdmi deep color only implemented on DCE4+ */ 151 + if ((bpc > 8) && !ASIC_IS_DCE4(rdev)) { 152 + DRM_DEBUG("%s: HDMI deep color %d bpc unsupported. Using 8 bpc.\n", 153 + drm_get_connector_name(connector), bpc); 154 + bpc = 8; 155 + } 156 + 157 + /* 158 + * Pre DCE-8 hw can't handle > 12 bpc, and more than 12 bpc doesn't make 159 + * much sense without support for > 12 bpc framebuffers. RGB 4:4:4 at 160 + * 12 bpc is always supported on hdmi deep color sinks, as this is 161 + * required by the HDMI-1.3 spec. Clamp to a safe 12 bpc maximum. 162 + */ 163 + if (bpc > 12) { 164 + DRM_DEBUG("%s: HDMI deep color %d bpc unsupported. Using 12 bpc.\n", 165 + drm_get_connector_name(connector), bpc); 166 + bpc = 12; 167 + } 168 + } 169 + 170 + DRM_DEBUG("%s: Display bpc=%d, returned bpc=%d\n", 171 + drm_get_connector_name(connector), connector->display_info.bpc, bpc); 172 + 148 173 return bpc; 149 174 } 150 175 ··· 1412 1387 struct radeon_device *rdev = dev->dev_private; 1413 1388 1414 1389 if (ASIC_IS_DCE5(rdev) && 1415 - (rdev->clock.dp_extclk >= 53900) && 1390 + (rdev->clock.default_dispclk >= 53900) && 1416 1391 radeon_connector_encoder_is_hbr2(connector)) { 1417 1392 return true; 1418 1393 }
+165 -119
drivers/gpu/drm/radeon/radeon_display.c
··· 249 249 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 250 250 251 251 drm_crtc_cleanup(crtc); 252 + destroy_workqueue(radeon_crtc->flip_queue); 252 253 kfree(radeon_crtc); 253 254 } 254 255 255 - /* 256 - * Handle unpin events outside the interrupt handler proper. 256 + /** 257 + * radeon_unpin_work_func - unpin old buffer object 258 + * 259 + * @__work - kernel work item 260 + * 261 + * Unpin the old frame buffer object outside of the interrupt handler 257 262 */ 258 263 static void radeon_unpin_work_func(struct work_struct *__work) 259 264 { 260 - struct radeon_unpin_work *work = 261 - container_of(__work, struct radeon_unpin_work, work); 265 + struct radeon_flip_work *work = 266 + container_of(__work, struct radeon_flip_work, unpin_work); 262 267 int r; 263 268 264 269 /* unpin of the old buffer */ ··· 281 276 kfree(work); 282 277 } 283 278 284 - void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id) 279 + void radeon_crtc_handle_vblank(struct radeon_device *rdev, int crtc_id) 285 280 { 286 281 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; 287 - struct radeon_unpin_work *work; 282 + struct radeon_flip_work *work; 288 283 unsigned long flags; 289 284 u32 update_pending; 290 285 int vpos, hpos; 291 286 292 287 spin_lock_irqsave(&rdev->ddev->event_lock, flags); 293 - work = radeon_crtc->unpin_work; 294 - if (work == NULL || 295 - (work->fence && !radeon_fence_signaled(work->fence))) { 288 + work = radeon_crtc->flip_work; 289 + if (work == NULL) { 296 290 spin_unlock_irqrestore(&rdev->ddev->event_lock, flags); 297 291 return; 298 292 } 299 - /* New pageflip, or just completion of a previous one? */ 300 - if (!radeon_crtc->deferred_flip_completion) { 301 - /* do the flip (mmio) */ 302 - update_pending = radeon_page_flip(rdev, crtc_id, work->new_crtc_base); 303 - } else { 304 - /* This is just a completion of a flip queued in crtc 305 - * at last invocation. Make sure we go directly to 306 - * completion routine. 307 - */ 308 - update_pending = 0; 309 - radeon_crtc->deferred_flip_completion = 0; 310 - } 293 + 294 + update_pending = radeon_page_flip_pending(rdev, crtc_id); 311 295 312 296 /* Has the pageflip already completed in crtc, or is it certain 313 297 * to complete in this vblank? ··· 314 320 */ 315 321 update_pending = 0; 316 322 } 317 - if (update_pending) { 318 - /* crtc didn't flip in this target vblank interval, 319 - * but flip is pending in crtc. It will complete it 320 - * in next vblank interval, so complete the flip at 321 - * next vblank irq. 322 - */ 323 - radeon_crtc->deferred_flip_completion = 1; 323 + spin_unlock_irqrestore(&rdev->ddev->event_lock, flags); 324 + if (!update_pending) 325 + radeon_crtc_handle_flip(rdev, crtc_id); 326 + } 327 + 328 + /** 329 + * radeon_crtc_handle_flip - page flip completed 330 + * 331 + * @rdev: radeon device pointer 332 + * @crtc_id: crtc number this event is for 333 + * 334 + * Called when we are sure that a page flip for this crtc is completed. 335 + */ 336 + void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id) 337 + { 338 + struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; 339 + struct radeon_flip_work *work; 340 + unsigned long flags; 341 + 342 + /* this can happen at init */ 343 + if (radeon_crtc == NULL) 344 + return; 345 + 346 + spin_lock_irqsave(&rdev->ddev->event_lock, flags); 347 + work = radeon_crtc->flip_work; 348 + if (work == NULL) { 324 349 spin_unlock_irqrestore(&rdev->ddev->event_lock, flags); 325 350 return; 326 351 } 327 352 328 - /* Pageflip (will be) certainly completed in this vblank. Clean up. */ 329 - radeon_crtc->unpin_work = NULL; 353 + /* Pageflip completed. Clean up. */ 354 + radeon_crtc->flip_work = NULL; 330 355 331 356 /* wakeup userspace */ 332 357 if (work->event) ··· 353 340 354 341 spin_unlock_irqrestore(&rdev->ddev->event_lock, flags); 355 342 356 - drm_vblank_put(rdev->ddev, radeon_crtc->crtc_id); 357 343 radeon_fence_unref(&work->fence); 358 - radeon_post_page_flip(work->rdev, work->crtc_id); 359 - schedule_work(&work->work); 344 + radeon_irq_kms_pflip_irq_get(rdev, work->crtc_id); 345 + queue_work(radeon_crtc->flip_queue, &work->unpin_work); 360 346 } 361 347 362 - static int radeon_crtc_page_flip(struct drm_crtc *crtc, 363 - struct drm_framebuffer *fb, 364 - struct drm_pending_vblank_event *event, 365 - uint32_t page_flip_flags) 348 + /** 349 + * radeon_flip_work_func - page flip framebuffer 350 + * 351 + * @work - kernel work item 352 + * 353 + * Wait for the buffer object to become idle and do the actual page flip 354 + */ 355 + static void radeon_flip_work_func(struct work_struct *__work) 366 356 { 367 - struct drm_device *dev = crtc->dev; 368 - struct radeon_device *rdev = dev->dev_private; 369 - struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 370 - struct radeon_framebuffer *old_radeon_fb; 371 - struct radeon_framebuffer *new_radeon_fb; 372 - struct drm_gem_object *obj; 373 - struct radeon_bo *rbo; 374 - struct radeon_unpin_work *work; 357 + struct radeon_flip_work *work = 358 + container_of(__work, struct radeon_flip_work, flip_work); 359 + struct radeon_device *rdev = work->rdev; 360 + struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[work->crtc_id]; 361 + 362 + struct drm_crtc *crtc = &radeon_crtc->base; 363 + struct drm_framebuffer *fb = work->fb; 364 + 365 + uint32_t tiling_flags, pitch_pixels; 366 + uint64_t base; 367 + 375 368 unsigned long flags; 376 - u32 tiling_flags, pitch_pixels; 377 - u64 base; 378 369 int r; 379 370 380 - work = kzalloc(sizeof *work, GFP_KERNEL); 381 - if (work == NULL) 382 - return -ENOMEM; 371 + down_read(&rdev->exclusive_lock); 372 + while (work->fence) { 373 + r = radeon_fence_wait(work->fence, false); 374 + if (r == -EDEADLK) { 375 + up_read(&rdev->exclusive_lock); 376 + r = radeon_gpu_reset(rdev); 377 + down_read(&rdev->exclusive_lock); 378 + } 383 379 384 - work->event = event; 385 - work->rdev = rdev; 386 - work->crtc_id = radeon_crtc->crtc_id; 387 - old_radeon_fb = to_radeon_framebuffer(crtc->primary->fb); 388 - new_radeon_fb = to_radeon_framebuffer(fb); 389 - /* schedule unpin of the old buffer */ 390 - obj = old_radeon_fb->obj; 391 - /* take a reference to the old object */ 392 - drm_gem_object_reference(obj); 393 - rbo = gem_to_radeon_bo(obj); 394 - work->old_rbo = rbo; 395 - obj = new_radeon_fb->obj; 396 - rbo = gem_to_radeon_bo(obj); 397 - 398 - spin_lock(&rbo->tbo.bdev->fence_lock); 399 - if (rbo->tbo.sync_obj) 400 - work->fence = radeon_fence_ref(rbo->tbo.sync_obj); 401 - spin_unlock(&rbo->tbo.bdev->fence_lock); 402 - 403 - INIT_WORK(&work->work, radeon_unpin_work_func); 404 - 405 - /* We borrow the event spin lock for protecting unpin_work */ 406 - spin_lock_irqsave(&dev->event_lock, flags); 407 - if (radeon_crtc->unpin_work) { 408 - DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); 409 - r = -EBUSY; 410 - goto unlock_free; 380 + if (r) { 381 + DRM_ERROR("failed to wait on page flip fence (%d)!\n", 382 + r); 383 + goto cleanup; 384 + } else 385 + radeon_fence_unref(&work->fence); 411 386 } 412 - radeon_crtc->unpin_work = work; 413 - radeon_crtc->deferred_flip_completion = 0; 414 - spin_unlock_irqrestore(&dev->event_lock, flags); 415 387 416 388 /* pin the new buffer */ 417 389 DRM_DEBUG_DRIVER("flip-ioctl() cur_fbo = %p, cur_bbo = %p\n", 418 - work->old_rbo, rbo); 390 + work->old_rbo, work->new_rbo); 419 391 420 - r = radeon_bo_reserve(rbo, false); 392 + r = radeon_bo_reserve(work->new_rbo, false); 421 393 if (unlikely(r != 0)) { 422 394 DRM_ERROR("failed to reserve new rbo buffer before flip\n"); 423 - goto pflip_cleanup; 395 + goto cleanup; 424 396 } 425 397 /* Only 27 bit offset for legacy CRTC */ 426 - r = radeon_bo_pin_restricted(rbo, RADEON_GEM_DOMAIN_VRAM, 398 + r = radeon_bo_pin_restricted(work->new_rbo, RADEON_GEM_DOMAIN_VRAM, 427 399 ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27, &base); 428 400 if (unlikely(r != 0)) { 429 - radeon_bo_unreserve(rbo); 401 + radeon_bo_unreserve(work->new_rbo); 430 402 r = -EINVAL; 431 403 DRM_ERROR("failed to pin new rbo buffer before flip\n"); 432 - goto pflip_cleanup; 404 + goto cleanup; 433 405 } 434 - radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL); 435 - radeon_bo_unreserve(rbo); 406 + radeon_bo_get_tiling_flags(work->new_rbo, &tiling_flags, NULL); 407 + radeon_bo_unreserve(work->new_rbo); 436 408 437 409 if (!ASIC_IS_AVIVO(rdev)) { 438 410 /* crtc offset is from display base addr not FB location */ ··· 455 457 base &= ~7; 456 458 } 457 459 458 - spin_lock_irqsave(&dev->event_lock, flags); 459 - work->new_crtc_base = base; 460 - spin_unlock_irqrestore(&dev->event_lock, flags); 460 + /* We borrow the event spin lock for protecting flip_work */ 461 + spin_lock_irqsave(&crtc->dev->event_lock, flags); 462 + 463 + /* set the proper interrupt */ 464 + radeon_irq_kms_pflip_irq_get(rdev, radeon_crtc->crtc_id); 465 + 466 + /* do the flip (mmio) */ 467 + radeon_page_flip(rdev, radeon_crtc->crtc_id, base); 468 + 469 + spin_unlock_irqrestore(&crtc->dev->event_lock, flags); 470 + up_read(&rdev->exclusive_lock); 471 + 472 + return; 473 + 474 + cleanup: 475 + drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base); 476 + radeon_fence_unref(&work->fence); 477 + kfree(work); 478 + up_read(&rdev->exclusive_lock); 479 + } 480 + 481 + static int radeon_crtc_page_flip(struct drm_crtc *crtc, 482 + struct drm_framebuffer *fb, 483 + struct drm_pending_vblank_event *event, 484 + uint32_t page_flip_flags) 485 + { 486 + struct drm_device *dev = crtc->dev; 487 + struct radeon_device *rdev = dev->dev_private; 488 + struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 489 + struct radeon_framebuffer *old_radeon_fb; 490 + struct radeon_framebuffer *new_radeon_fb; 491 + struct drm_gem_object *obj; 492 + struct radeon_flip_work *work; 493 + unsigned long flags; 494 + 495 + work = kzalloc(sizeof *work, GFP_KERNEL); 496 + if (work == NULL) 497 + return -ENOMEM; 498 + 499 + INIT_WORK(&work->flip_work, radeon_flip_work_func); 500 + INIT_WORK(&work->unpin_work, radeon_unpin_work_func); 501 + 502 + work->rdev = rdev; 503 + work->crtc_id = radeon_crtc->crtc_id; 504 + work->fb = fb; 505 + work->event = event; 506 + 507 + /* schedule unpin of the old buffer */ 508 + old_radeon_fb = to_radeon_framebuffer(crtc->primary->fb); 509 + obj = old_radeon_fb->obj; 510 + 511 + /* take a reference to the old object */ 512 + drm_gem_object_reference(obj); 513 + work->old_rbo = gem_to_radeon_bo(obj); 514 + 515 + new_radeon_fb = to_radeon_framebuffer(fb); 516 + obj = new_radeon_fb->obj; 517 + work->new_rbo = gem_to_radeon_bo(obj); 518 + 519 + spin_lock(&work->new_rbo->tbo.bdev->fence_lock); 520 + if (work->new_rbo->tbo.sync_obj) 521 + work->fence = radeon_fence_ref(work->new_rbo->tbo.sync_obj); 522 + spin_unlock(&work->new_rbo->tbo.bdev->fence_lock); 461 523 462 524 /* update crtc fb */ 463 525 crtc->primary->fb = fb; 464 526 465 - r = drm_vblank_get(dev, radeon_crtc->crtc_id); 466 - if (r) { 467 - DRM_ERROR("failed to get vblank before flip\n"); 468 - goto pflip_cleanup1; 469 - } 527 + /* We borrow the event spin lock for protecting flip_work */ 528 + spin_lock_irqsave(&crtc->dev->event_lock, flags); 470 529 471 - /* set the proper interrupt */ 472 - radeon_pre_page_flip(rdev, radeon_crtc->crtc_id); 530 + if (radeon_crtc->flip_work) { 531 + DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); 532 + spin_unlock_irqrestore(&crtc->dev->event_lock, flags); 533 + drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base); 534 + radeon_fence_unref(&work->fence); 535 + kfree(work); 536 + return -EBUSY; 537 + } 538 + radeon_crtc->flip_work = work; 539 + 540 + spin_unlock_irqrestore(&crtc->dev->event_lock, flags); 541 + 542 + queue_work(radeon_crtc->flip_queue, &work->flip_work); 473 543 474 544 return 0; 475 - 476 - pflip_cleanup1: 477 - if (unlikely(radeon_bo_reserve(rbo, false) != 0)) { 478 - DRM_ERROR("failed to reserve new rbo in error path\n"); 479 - goto pflip_cleanup; 480 - } 481 - if (unlikely(radeon_bo_unpin(rbo) != 0)) { 482 - DRM_ERROR("failed to unpin new rbo in error path\n"); 483 - } 484 - radeon_bo_unreserve(rbo); 485 - 486 - pflip_cleanup: 487 - spin_lock_irqsave(&dev->event_lock, flags); 488 - radeon_crtc->unpin_work = NULL; 489 - unlock_free: 490 - spin_unlock_irqrestore(&dev->event_lock, flags); 491 - drm_gem_object_unreference_unlocked(old_radeon_fb->obj); 492 - radeon_fence_unref(&work->fence); 493 - kfree(work); 494 - 495 - return r; 496 545 } 497 546 498 547 static int ··· 609 564 610 565 drm_mode_crtc_set_gamma_size(&radeon_crtc->base, 256); 611 566 radeon_crtc->crtc_id = index; 567 + radeon_crtc->flip_queue = create_singlethread_workqueue("radeon-crtc"); 612 568 rdev->mode_info.crtcs[index] = radeon_crtc; 613 569 614 570 if (rdev->family >= CHIP_BONAIRE) {
+9
drivers/gpu/drm/radeon/radeon_i2c.c
··· 94 94 struct radeon_i2c_bus_rec *rec = &i2c->rec; 95 95 uint32_t temp; 96 96 97 + mutex_lock(&i2c->mutex); 98 + 97 99 /* RV410 appears to have a bug where the hw i2c in reset 98 100 * holds the i2c port in a bad state - switch hw i2c away before 99 101 * doing DDC - do this for all r200s/r300s/r400s for safety sake ··· 172 170 temp = RREG32(rec->mask_data_reg) & ~rec->mask_data_mask; 173 171 WREG32(rec->mask_data_reg, temp); 174 172 temp = RREG32(rec->mask_data_reg); 173 + 174 + mutex_unlock(&i2c->mutex); 175 175 } 176 176 177 177 static int get_clock(void *i2c_priv) ··· 817 813 struct radeon_i2c_bus_rec *rec = &i2c->rec; 818 814 int ret = 0; 819 815 816 + mutex_lock(&i2c->mutex); 817 + 820 818 switch (rdev->family) { 821 819 case CHIP_R100: 822 820 case CHIP_RV100: ··· 885 879 break; 886 880 } 887 881 882 + mutex_unlock(&i2c->mutex); 883 + 888 884 return ret; 889 885 } 890 886 ··· 927 919 i2c->adapter.dev.parent = &dev->pdev->dev; 928 920 i2c->dev = dev; 929 921 i2c_set_adapdata(&i2c->adapter, i2c); 922 + mutex_init(&i2c->mutex); 930 923 if (rec->mm_i2c || 931 924 (rec->hw_capable && 932 925 radeon_hw_i2c &&
+4 -2
drivers/gpu/drm/radeon/radeon_mode.h
··· 191 191 struct radeon_i2c_bus_rec rec; 192 192 struct drm_dp_aux aux; 193 193 bool has_aux; 194 + struct mutex mutex; 194 195 }; 195 196 196 197 /* mostly for macs, but really any system without connector tables */ ··· 325 324 struct drm_display_mode native_mode; 326 325 int pll_id; 327 326 /* page flipping */ 328 - struct radeon_unpin_work *unpin_work; 329 - int deferred_flip_completion; 327 + struct workqueue_struct *flip_queue; 328 + struct radeon_flip_work *flip_work; 330 329 /* pll sharing */ 331 330 struct radeon_atom_ss ss; 332 331 bool ss_enabled; ··· 907 906 908 907 void radeon_fb_output_poll_changed(struct radeon_device *rdev); 909 908 909 + void radeon_crtc_handle_vblank(struct radeon_device *rdev, int crtc_id); 910 910 void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id); 911 911 912 912 int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled);
+1 -1
drivers/gpu/drm/radeon/radeon_object.c
··· 722 722 { 723 723 int r; 724 724 725 - r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0); 725 + r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, NULL); 726 726 if (unlikely(r != 0)) 727 727 return r; 728 728 spin_lock(&bo->tbo.bdev->fence_lock);
+1 -1
drivers/gpu/drm/radeon/radeon_object.h
··· 65 65 { 66 66 int r; 67 67 68 - r = ttm_bo_reserve(&bo->tbo, !no_intr, false, false, 0); 68 + r = ttm_bo_reserve(&bo->tbo, !no_intr, false, false, NULL); 69 69 if (unlikely(r != 0)) { 70 70 if (r != -ERESTARTSYS) 71 71 dev_err(bo->rdev->dev, "%p reserve failed\n", bo);
+87 -8
drivers/gpu/drm/radeon/radeon_vm.c
··· 658 658 } 659 659 660 660 /** 661 + * radeon_vm_frag_ptes - add fragment information to PTEs 662 + * 663 + * @rdev: radeon_device pointer 664 + * @ib: IB for the update 665 + * @pe_start: first PTE to handle 666 + * @pe_end: last PTE to handle 667 + * @addr: addr those PTEs should point to 668 + * @flags: hw mapping flags 669 + * 670 + * Global and local mutex must be locked! 671 + */ 672 + static void radeon_vm_frag_ptes(struct radeon_device *rdev, 673 + struct radeon_ib *ib, 674 + uint64_t pe_start, uint64_t pe_end, 675 + uint64_t addr, uint32_t flags) 676 + { 677 + /** 678 + * The MC L1 TLB supports variable sized pages, based on a fragment 679 + * field in the PTE. When this field is set to a non-zero value, page 680 + * granularity is increased from 4KB to (1 << (12 + frag)). The PTE 681 + * flags are considered valid for all PTEs within the fragment range 682 + * and corresponding mappings are assumed to be physically contiguous. 683 + * 684 + * The L1 TLB can store a single PTE for the whole fragment, 685 + * significantly increasing the space available for translation 686 + * caching. This leads to large improvements in throughput when the 687 + * TLB is under pressure. 688 + * 689 + * The L2 TLB distributes small and large fragments into two 690 + * asymmetric partitions. The large fragment cache is significantly 691 + * larger. Thus, we try to use large fragments wherever possible. 692 + * Userspace can support this by aligning virtual base address and 693 + * allocation size to the fragment size. 694 + */ 695 + 696 + /* NI is optimized for 256KB fragments, SI and newer for 64KB */ 697 + uint64_t frag_flags = rdev->family == CHIP_CAYMAN ? 698 + R600_PTE_FRAG_256KB : R600_PTE_FRAG_64KB; 699 + uint64_t frag_align = rdev->family == CHIP_CAYMAN ? 0x200 : 0x80; 700 + 701 + uint64_t frag_start = ALIGN(pe_start, frag_align); 702 + uint64_t frag_end = pe_end & ~(frag_align - 1); 703 + 704 + unsigned count; 705 + 706 + /* system pages are non continuously */ 707 + if ((flags & R600_PTE_SYSTEM) || !(flags & R600_PTE_VALID) || 708 + (frag_start >= frag_end)) { 709 + 710 + count = (pe_end - pe_start) / 8; 711 + radeon_asic_vm_set_page(rdev, ib, pe_start, addr, count, 712 + RADEON_GPU_PAGE_SIZE, flags); 713 + return; 714 + } 715 + 716 + /* handle the 4K area at the beginning */ 717 + if (pe_start != frag_start) { 718 + count = (frag_start - pe_start) / 8; 719 + radeon_asic_vm_set_page(rdev, ib, pe_start, addr, count, 720 + RADEON_GPU_PAGE_SIZE, flags); 721 + addr += RADEON_GPU_PAGE_SIZE * count; 722 + } 723 + 724 + /* handle the area in the middle */ 725 + count = (frag_end - frag_start) / 8; 726 + radeon_asic_vm_set_page(rdev, ib, frag_start, addr, count, 727 + RADEON_GPU_PAGE_SIZE, flags | frag_flags); 728 + 729 + /* handle the 4K area at the end */ 730 + if (frag_end != pe_end) { 731 + addr += RADEON_GPU_PAGE_SIZE * count; 732 + count = (pe_end - frag_end) / 8; 733 + radeon_asic_vm_set_page(rdev, ib, frag_end, addr, count, 734 + RADEON_GPU_PAGE_SIZE, flags); 735 + } 736 + } 737 + 738 + /** 661 739 * radeon_vm_update_ptes - make sure that page tables are valid 662 740 * 663 741 * @rdev: radeon_device pointer ··· 781 703 if ((last_pte + 8 * count) != pte) { 782 704 783 705 if (count) { 784 - radeon_asic_vm_set_page(rdev, ib, last_pte, 785 - last_dst, count, 786 - RADEON_GPU_PAGE_SIZE, 787 - flags); 706 + radeon_vm_frag_ptes(rdev, ib, last_pte, 707 + last_pte + 8 * count, 708 + last_dst, flags); 788 709 } 789 710 790 711 count = nptes; ··· 798 721 } 799 722 800 723 if (count) { 801 - radeon_asic_vm_set_page(rdev, ib, last_pte, 802 - last_dst, count, 803 - RADEON_GPU_PAGE_SIZE, flags); 724 + radeon_vm_frag_ptes(rdev, ib, last_pte, 725 + last_pte + 8 * count, 726 + last_dst, flags); 804 727 } 805 728 } 806 729 ··· 964 887 */ 965 888 int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm) 966 889 { 890 + const unsigned align = min(RADEON_VM_PTB_ALIGN_SIZE, 891 + RADEON_VM_PTE_COUNT * 8); 967 892 unsigned pd_size, pd_entries, pts_size; 968 893 int r; 969 894 ··· 987 908 return -ENOMEM; 988 909 } 989 910 990 - r = radeon_bo_create(rdev, pd_size, RADEON_VM_PTB_ALIGN_SIZE, false, 911 + r = radeon_bo_create(rdev, pd_size, align, false, 991 912 RADEON_GEM_DOMAIN_VRAM, NULL, 992 913 &vm->page_directory); 993 914 if (r)
+11 -24
drivers/gpu/drm/radeon/rs600.c
··· 109 109 } 110 110 } 111 111 112 - void rs600_pre_page_flip(struct radeon_device *rdev, int crtc) 113 - { 114 - /* enable the pflip int */ 115 - radeon_irq_kms_pflip_irq_get(rdev, crtc); 116 - } 117 - 118 - void rs600_post_page_flip(struct radeon_device *rdev, int crtc) 119 - { 120 - /* disable the pflip int */ 121 - radeon_irq_kms_pflip_irq_put(rdev, crtc); 122 - } 123 - 124 - u32 rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) 112 + void rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) 125 113 { 126 114 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; 127 115 u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset); ··· 136 148 /* Unlock the lock, so double-buffering can take place inside vblank */ 137 149 tmp &= ~AVIVO_D1GRPH_UPDATE_LOCK; 138 150 WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp); 151 + } 152 + 153 + bool rs600_page_flip_pending(struct radeon_device *rdev, int crtc_id) 154 + { 155 + struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; 139 156 140 157 /* Return current update_pending status: */ 141 - return RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING; 158 + return !!(RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & 159 + AVIVO_D1GRPH_SURFACE_UPDATE_PENDING); 142 160 } 143 161 144 162 void avivo_program_fmt(struct drm_encoder *encoder) ··· 626 632 radeon_gart_table_vram_free(rdev); 627 633 } 628 634 629 - #define R600_PTE_VALID (1 << 0) 630 - #define R600_PTE_SYSTEM (1 << 1) 631 - #define R600_PTE_SNOOPED (1 << 2) 632 - #define R600_PTE_READABLE (1 << 5) 633 - #define R600_PTE_WRITEABLE (1 << 6) 634 - 635 635 int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) 636 636 { 637 637 void __iomem *ptr = (void *)rdev->gart.ptr; ··· 634 646 return -EINVAL; 635 647 } 636 648 addr = addr & 0xFFFFFFFFFFFFF000ULL; 637 - addr |= R600_PTE_VALID | R600_PTE_SYSTEM | R600_PTE_SNOOPED; 638 - addr |= R600_PTE_READABLE | R600_PTE_WRITEABLE; 649 + addr |= R600_PTE_GART; 639 650 writeq(addr, ptr + (i * 8)); 640 651 return 0; 641 652 } ··· 774 787 wake_up(&rdev->irq.vblank_queue); 775 788 } 776 789 if (atomic_read(&rdev->irq.pflip[0])) 777 - radeon_crtc_handle_flip(rdev, 0); 790 + radeon_crtc_handle_vblank(rdev, 0); 778 791 } 779 792 if (G_007EDC_LB_D2_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { 780 793 if (rdev->irq.crtc_vblank_int[1]) { ··· 783 796 wake_up(&rdev->irq.vblank_queue); 784 797 } 785 798 if (atomic_read(&rdev->irq.pflip[1])) 786 - radeon_crtc_handle_flip(rdev, 1); 799 + radeon_crtc_handle_vblank(rdev, 1); 787 800 } 788 801 if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { 789 802 queue_hotplug = true;
+8 -2
drivers/gpu/drm/radeon/rv770.c
··· 801 801 return reference_clock; 802 802 } 803 803 804 - u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) 804 + void rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) 805 805 { 806 806 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; 807 807 u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset); ··· 835 835 /* Unlock the lock, so double-buffering can take place inside vblank */ 836 836 tmp &= ~AVIVO_D1GRPH_UPDATE_LOCK; 837 837 WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp); 838 + } 839 + 840 + bool rv770_page_flip_pending(struct radeon_device *rdev, int crtc_id) 841 + { 842 + struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; 838 843 839 844 /* Return current update_pending status: */ 840 - return RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING; 845 + return !!(RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & 846 + AVIVO_D1GRPH_SURFACE_UPDATE_PENDING); 841 847 } 842 848 843 849 /* get temperature in millidegrees */
+11 -7
drivers/gpu/drm/radeon/si.c
··· 4044 4044 WREG32(MC_VM_MX_L1_TLB_CNTL, 4045 4045 (0xA << 7) | 4046 4046 ENABLE_L1_TLB | 4047 + ENABLE_L1_FRAGMENT_PROCESSING | 4047 4048 SYSTEM_ACCESS_MODE_NOT_IN_SYS | 4048 4049 ENABLE_ADVANCED_DRIVER_MODEL | 4049 4050 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU); 4050 4051 /* Setup L2 cache */ 4051 4052 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | 4053 + ENABLE_L2_FRAGMENT_PROCESSING | 4052 4054 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | 4053 4055 ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE | 4054 4056 EFFECTIVE_L2_QUEUE_SIZE(7) | 4055 4057 CONTEXT1_IDENTITY_ACCESS_MODE(1)); 4056 4058 WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE); 4057 4059 WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY | 4058 - L2_CACHE_BIGK_FRAGMENT_SIZE(0)); 4060 + BANK_SELECT(4) | 4061 + L2_CACHE_BIGK_FRAGMENT_SIZE(4)); 4059 4062 /* setup context0 */ 4060 4063 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); 4061 4064 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12); ··· 4095 4092 (u32)(rdev->dummy_page.addr >> 12)); 4096 4093 WREG32(VM_CONTEXT1_CNTL2, 4); 4097 4094 WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) | 4095 + PAGE_TABLE_BLOCK_SIZE(RADEON_VM_BLOCK_SIZE - 9) | 4098 4096 RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT | 4099 4097 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT | 4100 4098 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT | ··· 6150 6146 wake_up(&rdev->irq.vblank_queue); 6151 6147 } 6152 6148 if (atomic_read(&rdev->irq.pflip[0])) 6153 - radeon_crtc_handle_flip(rdev, 0); 6149 + radeon_crtc_handle_vblank(rdev, 0); 6154 6150 rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT; 6155 6151 DRM_DEBUG("IH: D1 vblank\n"); 6156 6152 } ··· 6176 6172 wake_up(&rdev->irq.vblank_queue); 6177 6173 } 6178 6174 if (atomic_read(&rdev->irq.pflip[1])) 6179 - radeon_crtc_handle_flip(rdev, 1); 6175 + radeon_crtc_handle_vblank(rdev, 1); 6180 6176 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT; 6181 6177 DRM_DEBUG("IH: D2 vblank\n"); 6182 6178 } ··· 6202 6198 wake_up(&rdev->irq.vblank_queue); 6203 6199 } 6204 6200 if (atomic_read(&rdev->irq.pflip[2])) 6205 - radeon_crtc_handle_flip(rdev, 2); 6201 + radeon_crtc_handle_vblank(rdev, 2); 6206 6202 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT; 6207 6203 DRM_DEBUG("IH: D3 vblank\n"); 6208 6204 } ··· 6228 6224 wake_up(&rdev->irq.vblank_queue); 6229 6225 } 6230 6226 if (atomic_read(&rdev->irq.pflip[3])) 6231 - radeon_crtc_handle_flip(rdev, 3); 6227 + radeon_crtc_handle_vblank(rdev, 3); 6232 6228 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT; 6233 6229 DRM_DEBUG("IH: D4 vblank\n"); 6234 6230 } ··· 6254 6250 wake_up(&rdev->irq.vblank_queue); 6255 6251 } 6256 6252 if (atomic_read(&rdev->irq.pflip[4])) 6257 - radeon_crtc_handle_flip(rdev, 4); 6253 + radeon_crtc_handle_vblank(rdev, 4); 6258 6254 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT; 6259 6255 DRM_DEBUG("IH: D5 vblank\n"); 6260 6256 } ··· 6280 6276 wake_up(&rdev->irq.vblank_queue); 6281 6277 } 6282 6278 if (atomic_read(&rdev->irq.pflip[5])) 6283 - radeon_crtc_handle_flip(rdev, 5); 6279 + radeon_crtc_handle_vblank(rdev, 5); 6284 6280 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT; 6285 6281 DRM_DEBUG("IH: D6 vblank\n"); 6286 6282 }
+19 -1
drivers/gpu/drm/radeon/si_dma.c
··· 79 79 80 80 trace_radeon_vm_set_page(pe, addr, count, incr, flags); 81 81 82 - if (flags & R600_PTE_SYSTEM) { 82 + if (flags == R600_PTE_GART) { 83 + uint64_t src = rdev->gart.table_addr + (addr >> 12) * 8; 84 + while (count) { 85 + unsigned bytes = count * 8; 86 + if (bytes > 0xFFFF8) 87 + bytes = 0xFFFF8; 88 + 89 + ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY, 90 + 1, 0, 0, bytes); 91 + ib->ptr[ib->length_dw++] = pe & 0xffffffff; 92 + ib->ptr[ib->length_dw++] = src & 0xffffffff; 93 + ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; 94 + ib->ptr[ib->length_dw++] = upper_32_bits(src) & 0xff; 95 + 96 + pe += bytes; 97 + src += bytes; 98 + count -= bytes / 8; 99 + } 100 + } else if (flags & R600_PTE_SYSTEM) { 83 101 while (count) { 84 102 ndw = count * 2; 85 103 if (ndw > 0xFFFFE)
+1
drivers/gpu/drm/radeon/sid.h
··· 362 362 #define READ_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 16) 363 363 #define WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 18) 364 364 #define WRITE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 19) 365 + #define PAGE_TABLE_BLOCK_SIZE(x) (((x) & 0xF) << 24) 365 366 #define VM_CONTEXT1_CNTL 0x1414 366 367 #define VM_CONTEXT0_CNTL2 0x1430 367 368 #define VM_CONTEXT1_CNTL2 0x1434
+2 -1
drivers/gpu/vga/vga_switcheroo.c
··· 623 623 ret = dev->bus->pm->runtime_suspend(dev); 624 624 if (ret) 625 625 return ret; 626 - 626 + if (vgasr_priv.handler->switchto) 627 + vgasr_priv.handler->switchto(VGA_SWITCHEROO_IGD); 627 628 vga_switcheroo_power_switch(pdev, VGA_SWITCHEROO_OFF); 628 629 return 0; 629 630 }
+5
include/drm/drm_edid.h
··· 202 202 #define DRM_EDID_FEATURE_PM_SUSPEND (1 << 6) 203 203 #define DRM_EDID_FEATURE_PM_STANDBY (1 << 7) 204 204 205 + #define DRM_EDID_HDMI_DC_48 (1 << 6) 206 + #define DRM_EDID_HDMI_DC_36 (1 << 5) 207 + #define DRM_EDID_HDMI_DC_30 (1 << 4) 208 + #define DRM_EDID_HDMI_DC_Y444 (1 << 3) 209 + 205 210 struct edid { 206 211 u8 header[8]; 207 212 /* Vendor & product info */