Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'drm-core-next' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6

* 'drm-core-next' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6: (169 commits)
drivers/gpu/drm/radeon/atom.c: fix warning
drm/radeon/kms: bump kms version number
drm/radeon/kms: properly set num banks for fusion asics
drm/radeon/kms/atom: move dig phy init out of modesetting
drm/radeon/kms/cayman: fix typo in register mask
drm/radeon/kms: fix typo in spread spectrum code
drm/radeon/kms: fix tile_config value reported to userspace on cayman.
drm/radeon/kms: fix incorrect comparison in cayman setup code.
drm/radeon/kms: add wait idle ioctl for eg->cayman
drm/radeon/cayman: setup hdp to invalidate and flush when asked
drm/radeon/evergreen/btc/fusion: setup hdp to invalidate and flush when asked
agp/uninorth: Fix lockups with radeon KMS and >1x.
drm/radeon/kms: the SS_Id field in the LCD table if for LVDS only
drm/radeon/kms: properly set the CLK_REF bit for DCE3 devices
drm/radeon/kms: fixup eDP connector handling
drm/radeon/kms: bail early for eDP in hotplug callback
drm/radeon/kms: simplify hotplug handler logic
drm/radeon/kms: rewrite DP handling
drm/radeon/kms/atom: add support for setting DP panel mode
drm/radeon/kms: atombios.h updates for DP panel mode
...

+9527 -4180
+2 -2
MAINTAINERS
··· 2245 2245 F: include/drm/ 2246 2246 2247 2247 INTEL DRM DRIVERS (excluding Poulsbo, Moorestown and derivative chipsets) 2248 - M: Chris Wilson <chris@chris-wilson.co.uk> 2248 + M: Keith Packard <keithp@keithp.com> 2249 2249 L: intel-gfx@lists.freedesktop.org (subscribers-only) 2250 2250 L: dri-devel@lists.freedesktop.org 2251 - T: git git://git.kernel.org/pub/scm/linux/kernel/git/ickle/drm-intel.git 2251 + T: git git://git.kernel.org/pub/scm/linux/kernel/git/keithp/linux-2.6.git 2252 2252 S: Supported 2253 2253 F: drivers/gpu/drm/i915 2254 2254 F: include/drm/i915*
+3
drivers/char/agp/intel-agp.c
··· 903 903 ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB), 904 904 ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB), 905 905 ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB), 906 + ID(PCI_DEVICE_ID_INTEL_IVYBRIDGE_HB), 907 + ID(PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_HB), 908 + ID(PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_HB), 906 909 { } 907 910 }; 908 911
+8
drivers/char/agp/intel-agp.h
··· 225 225 #define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG 0x0126 226 226 #define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB 0x0108 /* Server */ 227 227 #define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG 0x010A 228 + #define PCI_DEVICE_ID_INTEL_IVYBRIDGE_HB 0x0150 /* Desktop */ 229 + #define PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT1_IG 0x0152 230 + #define PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT2_IG 0x0162 231 + #define PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_HB 0x0154 /* Mobile */ 232 + #define PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT1_IG 0x0156 233 + #define PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT2_IG 0x0166 234 + #define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_HB 0x0158 /* Server */ 235 + #define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT1_IG 0x015A 228 236 229 237 int intel_gmch_probe(struct pci_dev *pdev, 230 238 struct agp_bridge_data *bridge);
+10
drivers/char/agp/intel-gtt.c
··· 1420 1420 "Sandybridge", &sandybridge_gtt_driver }, 1421 1421 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG, 1422 1422 "Sandybridge", &sandybridge_gtt_driver }, 1423 + { PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT1_IG, 1424 + "Ivybridge", &sandybridge_gtt_driver }, 1425 + { PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT2_IG, 1426 + "Ivybridge", &sandybridge_gtt_driver }, 1427 + { PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT1_IG, 1428 + "Ivybridge", &sandybridge_gtt_driver }, 1429 + { PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT2_IG, 1430 + "Ivybridge", &sandybridge_gtt_driver }, 1431 + { PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT1_IG, 1432 + "Ivybridge", &sandybridge_gtt_driver }, 1423 1433 { 0, NULL, NULL } 1424 1434 }; 1425 1435
+1 -1
drivers/char/agp/uninorth-agp.c
··· 80 80 ctrl | UNI_N_CFG_GART_INVAL); 81 81 pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL, ctrl); 82 82 83 - if (uninorth_rev <= 0x30) { 83 + if (!mem && uninorth_rev <= 0x30) { 84 84 pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL, 85 85 ctrl | UNI_N_CFG_GART_2xRESET); 86 86 pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL,
+59 -2
drivers/gpu/drm/drm_edid.c
··· 1413 1413 EXPORT_SYMBOL(drm_detect_monitor_audio); 1414 1414 1415 1415 /** 1416 + * drm_add_display_info - pull display info out if present 1417 + * @edid: EDID data 1418 + * @info: display info (attached to connector) 1419 + * 1420 + * Grab any available display info and stuff it into the drm_display_info 1421 + * structure that's part of the connector. Useful for tracking bpp and 1422 + * color spaces. 1423 + */ 1424 + static void drm_add_display_info(struct edid *edid, 1425 + struct drm_display_info *info) 1426 + { 1427 + info->width_mm = edid->width_cm * 10; 1428 + info->height_mm = edid->height_cm * 10; 1429 + 1430 + /* driver figures it out in this case */ 1431 + info->bpc = 0; 1432 + info->color_formats = 0; 1433 + 1434 + /* Only defined for 1.4 with digital displays */ 1435 + if (edid->revision < 4) 1436 + return; 1437 + 1438 + if (!(edid->input & DRM_EDID_INPUT_DIGITAL)) 1439 + return; 1440 + 1441 + switch (edid->input & DRM_EDID_DIGITAL_DEPTH_MASK) { 1442 + case DRM_EDID_DIGITAL_DEPTH_6: 1443 + info->bpc = 6; 1444 + break; 1445 + case DRM_EDID_DIGITAL_DEPTH_8: 1446 + info->bpc = 8; 1447 + break; 1448 + case DRM_EDID_DIGITAL_DEPTH_10: 1449 + info->bpc = 10; 1450 + break; 1451 + case DRM_EDID_DIGITAL_DEPTH_12: 1452 + info->bpc = 12; 1453 + break; 1454 + case DRM_EDID_DIGITAL_DEPTH_14: 1455 + info->bpc = 14; 1456 + break; 1457 + case DRM_EDID_DIGITAL_DEPTH_16: 1458 + info->bpc = 16; 1459 + break; 1460 + case DRM_EDID_DIGITAL_DEPTH_UNDEF: 1461 + default: 1462 + info->bpc = 0; 1463 + break; 1464 + } 1465 + 1466 + info->color_formats = DRM_COLOR_FORMAT_RGB444; 1467 + if (info->color_formats & DRM_EDID_FEATURE_RGB_YCRCB444) 1468 + info->color_formats = DRM_COLOR_FORMAT_YCRCB444; 1469 + if (info->color_formats & DRM_EDID_FEATURE_RGB_YCRCB422) 1470 + info->color_formats = DRM_COLOR_FORMAT_YCRCB422; 1471 + } 1472 + 1473 + /** 1416 1474 * drm_add_edid_modes - add modes from EDID data, if available 1417 1475 * @connector: connector we're probing 1418 1476 * @edid: edid data ··· 1518 1460 if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75)) 1519 1461 edid_fixup_preferred(connector, quirks); 1520 1462 1521 - connector->display_info.width_mm = edid->width_cm * 10; 1522 - connector->display_info.height_mm = edid->height_cm * 10; 1463 + drm_add_display_info(edid, &connector->display_info); 1523 1464 1524 1465 return num_modes; 1525 1466 }
+36 -171
drivers/gpu/drm/drm_fb_helper.c
··· 70 70 } 71 71 EXPORT_SYMBOL(drm_fb_helper_single_add_all_connectors); 72 72 73 - /** 74 - * drm_fb_helper_connector_parse_command_line - parse command line for connector 75 - * @connector - connector to parse line for 76 - * @mode_option - per connector mode option 77 - * 78 - * This parses the connector specific then generic command lines for 79 - * modes and options to configure the connector. 80 - * 81 - * This uses the same parameters as the fb modedb.c, except for extra 82 - * <xres>x<yres>[M][R][-<bpp>][@<refresh>][i][m][eDd] 83 - * 84 - * enable/enable Digital/disable bit at the end 85 - */ 86 - static bool drm_fb_helper_connector_parse_command_line(struct drm_fb_helper_connector *fb_helper_conn, 87 - const char *mode_option) 88 - { 89 - const char *name; 90 - unsigned int namelen; 91 - int res_specified = 0, bpp_specified = 0, refresh_specified = 0; 92 - unsigned int xres = 0, yres = 0, bpp = 32, refresh = 0; 93 - int yres_specified = 0, cvt = 0, rb = 0, interlace = 0, margins = 0; 94 - int i; 95 - enum drm_connector_force force = DRM_FORCE_UNSPECIFIED; 96 - struct drm_fb_helper_cmdline_mode *cmdline_mode; 97 - struct drm_connector *connector; 98 - 99 - if (!fb_helper_conn) 100 - return false; 101 - connector = fb_helper_conn->connector; 102 - 103 - cmdline_mode = &fb_helper_conn->cmdline_mode; 104 - if (!mode_option) 105 - mode_option = fb_mode_option; 106 - 107 - if (!mode_option) { 108 - cmdline_mode->specified = false; 109 - return false; 110 - } 111 - 112 - name = mode_option; 113 - namelen = strlen(name); 114 - for (i = namelen-1; i >= 0; i--) { 115 - switch (name[i]) { 116 - case '@': 117 - namelen = i; 118 - if (!refresh_specified && !bpp_specified && 119 - !yres_specified) { 120 - refresh = simple_strtol(&name[i+1], NULL, 10); 121 - refresh_specified = 1; 122 - if (cvt || rb) 123 - cvt = 0; 124 - } else 125 - goto done; 126 - break; 127 - case '-': 128 - namelen = i; 129 - if (!bpp_specified && !yres_specified) { 130 - bpp = simple_strtol(&name[i+1], NULL, 10); 131 - bpp_specified = 1; 132 - if (cvt || rb) 133 - cvt = 0; 134 - } else 135 - goto done; 136 - break; 137 - case 'x': 138 - if (!yres_specified) { 139 - yres = simple_strtol(&name[i+1], NULL, 10); 140 - yres_specified = 1; 141 - } else 142 - goto done; 143 - case '0' ... '9': 144 - break; 145 - case 'M': 146 - if (!yres_specified) 147 - cvt = 1; 148 - break; 149 - case 'R': 150 - if (cvt) 151 - rb = 1; 152 - break; 153 - case 'm': 154 - if (!cvt) 155 - margins = 1; 156 - break; 157 - case 'i': 158 - if (!cvt) 159 - interlace = 1; 160 - break; 161 - case 'e': 162 - force = DRM_FORCE_ON; 163 - break; 164 - case 'D': 165 - if ((connector->connector_type != DRM_MODE_CONNECTOR_DVII) && 166 - (connector->connector_type != DRM_MODE_CONNECTOR_HDMIB)) 167 - force = DRM_FORCE_ON; 168 - else 169 - force = DRM_FORCE_ON_DIGITAL; 170 - break; 171 - case 'd': 172 - force = DRM_FORCE_OFF; 173 - break; 174 - default: 175 - goto done; 176 - } 177 - } 178 - if (i < 0 && yres_specified) { 179 - xres = simple_strtol(name, NULL, 10); 180 - res_specified = 1; 181 - } 182 - done: 183 - 184 - DRM_DEBUG_KMS("cmdline mode for connector %s %dx%d@%dHz%s%s%s\n", 185 - drm_get_connector_name(connector), xres, yres, 186 - (refresh) ? refresh : 60, (rb) ? " reduced blanking" : 187 - "", (margins) ? " with margins" : "", (interlace) ? 188 - " interlaced" : ""); 189 - 190 - if (force) { 191 - const char *s; 192 - switch (force) { 193 - case DRM_FORCE_OFF: s = "OFF"; break; 194 - case DRM_FORCE_ON_DIGITAL: s = "ON - dig"; break; 195 - default: 196 - case DRM_FORCE_ON: s = "ON"; break; 197 - } 198 - 199 - DRM_INFO("forcing %s connector %s\n", 200 - drm_get_connector_name(connector), s); 201 - connector->force = force; 202 - } 203 - 204 - if (res_specified) { 205 - cmdline_mode->specified = true; 206 - cmdline_mode->xres = xres; 207 - cmdline_mode->yres = yres; 208 - } 209 - 210 - if (refresh_specified) { 211 - cmdline_mode->refresh_specified = true; 212 - cmdline_mode->refresh = refresh; 213 - } 214 - 215 - if (bpp_specified) { 216 - cmdline_mode->bpp_specified = true; 217 - cmdline_mode->bpp = bpp; 218 - } 219 - cmdline_mode->rb = rb ? true : false; 220 - cmdline_mode->cvt = cvt ? true : false; 221 - cmdline_mode->interlace = interlace ? true : false; 222 - 223 - return true; 224 - } 225 - 226 73 static int drm_fb_helper_parse_command_line(struct drm_fb_helper *fb_helper) 227 74 { 228 75 struct drm_fb_helper_connector *fb_helper_conn; 229 76 int i; 230 77 231 78 for (i = 0; i < fb_helper->connector_count; i++) { 79 + struct drm_cmdline_mode *mode; 80 + struct drm_connector *connector; 232 81 char *option = NULL; 233 82 234 83 fb_helper_conn = fb_helper->connector_info[i]; 84 + connector = fb_helper_conn->connector; 85 + mode = &fb_helper_conn->cmdline_mode; 235 86 236 87 /* do something on return - turn off connector maybe */ 237 - if (fb_get_options(drm_get_connector_name(fb_helper_conn->connector), &option)) 88 + if (fb_get_options(drm_get_connector_name(connector), &option)) 238 89 continue; 239 90 240 - drm_fb_helper_connector_parse_command_line(fb_helper_conn, option); 91 + if (drm_mode_parse_command_line_for_connector(option, 92 + connector, 93 + mode)) { 94 + if (mode->force) { 95 + const char *s; 96 + switch (mode->force) { 97 + case DRM_FORCE_OFF: s = "OFF"; break; 98 + case DRM_FORCE_ON_DIGITAL: s = "ON - dig"; break; 99 + default: 100 + case DRM_FORCE_ON: s = "ON"; break; 101 + } 102 + 103 + DRM_INFO("forcing %s connector %s\n", 104 + drm_get_connector_name(connector), s); 105 + connector->force = mode->force; 106 + } 107 + 108 + DRM_DEBUG_KMS("cmdline mode for connector %s %dx%d@%dHz%s%s%s\n", 109 + drm_get_connector_name(connector), 110 + mode->xres, mode->yres, 111 + mode->refresh_specified ? mode->refresh : 60, 112 + mode->rb ? " reduced blanking" : "", 113 + mode->margins ? " with margins" : "", 114 + mode->interlace ? " interlaced" : ""); 115 + } 116 + 241 117 } 242 118 return 0; 243 119 } ··· 777 901 /* first up get a count of crtcs now in use and new min/maxes width/heights */ 778 902 for (i = 0; i < fb_helper->connector_count; i++) { 779 903 struct drm_fb_helper_connector *fb_helper_conn = fb_helper->connector_info[i]; 780 - struct drm_fb_helper_cmdline_mode *cmdline_mode; 904 + struct drm_cmdline_mode *cmdline_mode; 781 905 782 906 cmdline_mode = &fb_helper_conn->cmdline_mode; 783 907 ··· 999 1123 1000 1124 static bool drm_has_cmdline_mode(struct drm_fb_helper_connector *fb_connector) 1001 1125 { 1002 - struct drm_fb_helper_cmdline_mode *cmdline_mode; 1126 + struct drm_cmdline_mode *cmdline_mode; 1003 1127 cmdline_mode = &fb_connector->cmdline_mode; 1004 1128 return cmdline_mode->specified; 1005 1129 } ··· 1007 1131 static struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn, 1008 1132 int width, int height) 1009 1133 { 1010 - struct drm_fb_helper_cmdline_mode *cmdline_mode; 1134 + struct drm_cmdline_mode *cmdline_mode; 1011 1135 struct drm_display_mode *mode = NULL; 1012 1136 1013 1137 cmdline_mode = &fb_helper_conn->cmdline_mode; ··· 1039 1163 } 1040 1164 1041 1165 create_mode: 1042 - if (cmdline_mode->cvt) 1043 - mode = drm_cvt_mode(fb_helper_conn->connector->dev, 1044 - cmdline_mode->xres, cmdline_mode->yres, 1045 - cmdline_mode->refresh_specified ? cmdline_mode->refresh : 60, 1046 - cmdline_mode->rb, cmdline_mode->interlace, 1047 - cmdline_mode->margins); 1048 - else 1049 - mode = drm_gtf_mode(fb_helper_conn->connector->dev, 1050 - cmdline_mode->xres, cmdline_mode->yres, 1051 - cmdline_mode->refresh_specified ? cmdline_mode->refresh : 60, 1052 - cmdline_mode->interlace, 1053 - cmdline_mode->margins); 1054 - drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V); 1166 + mode = drm_mode_create_from_cmdline_mode(fb_helper_conn->connector->dev, 1167 + cmdline_mode); 1055 1168 list_add(&mode->head, &fb_helper_conn->connector->modes); 1056 1169 return mode; 1057 1170 }
+5 -4
drivers/gpu/drm/drm_irq.c
··· 684 684 */ 685 685 *vblank_time = ns_to_timeval(timeval_to_ns(&raw_time) - delta_ns); 686 686 687 - DRM_DEBUG("crtc %d : v %d p(%d,%d)@ %d.%d -> %d.%d [e %d us, %d rep]\n", 688 - crtc, (int) vbl_status, hpos, vpos, raw_time.tv_sec, 689 - raw_time.tv_usec, vblank_time->tv_sec, vblank_time->tv_usec, 690 - (int) duration_ns/1000, i); 687 + DRM_DEBUG("crtc %d : v %d p(%d,%d)@ %ld.%ld -> %ld.%ld [e %d us, %d rep]\n", 688 + crtc, (int)vbl_status, hpos, vpos, 689 + (long)raw_time.tv_sec, (long)raw_time.tv_usec, 690 + (long)vblank_time->tv_sec, (long)vblank_time->tv_usec, 691 + (int)duration_ns/1000, i); 691 692 692 693 vbl_status = DRM_VBLANKTIME_SCANOUTPOS_METHOD; 693 694 if (invbl)
+156
drivers/gpu/drm/drm_modes.c
··· 974 974 } 975 975 } 976 976 EXPORT_SYMBOL(drm_mode_connector_list_update); 977 + 978 + /** 979 + * drm_mode_parse_command_line_for_connector - parse command line for connector 980 + * @mode_option - per connector mode option 981 + * @connector - connector to parse line for 982 + * 983 + * This parses the connector specific then generic command lines for 984 + * modes and options to configure the connector. 985 + * 986 + * This uses the same parameters as the fb modedb.c, except for extra 987 + * <xres>x<yres>[M][R][-<bpp>][@<refresh>][i][m][eDd] 988 + * 989 + * enable/enable Digital/disable bit at the end 990 + */ 991 + bool drm_mode_parse_command_line_for_connector(const char *mode_option, 992 + struct drm_connector *connector, 993 + struct drm_cmdline_mode *mode) 994 + { 995 + const char *name; 996 + unsigned int namelen; 997 + int res_specified = 0, bpp_specified = 0, refresh_specified = 0; 998 + unsigned int xres = 0, yres = 0, bpp = 32, refresh = 0; 999 + int yres_specified = 0, cvt = 0, rb = 0, interlace = 0, margins = 0; 1000 + int i; 1001 + enum drm_connector_force force = DRM_FORCE_UNSPECIFIED; 1002 + 1003 + #ifdef CONFIG_FB 1004 + if (!mode_option) 1005 + mode_option = fb_mode_option; 1006 + #endif 1007 + 1008 + if (!mode_option) { 1009 + mode->specified = false; 1010 + return false; 1011 + } 1012 + 1013 + name = mode_option; 1014 + namelen = strlen(name); 1015 + for (i = namelen-1; i >= 0; i--) { 1016 + switch (name[i]) { 1017 + case '@': 1018 + namelen = i; 1019 + if (!refresh_specified && !bpp_specified && 1020 + !yres_specified) { 1021 + refresh = simple_strtol(&name[i+1], NULL, 10); 1022 + refresh_specified = 1; 1023 + if (cvt || rb) 1024 + cvt = 0; 1025 + } else 1026 + goto done; 1027 + break; 1028 + case '-': 1029 + namelen = i; 1030 + if (!bpp_specified && !yres_specified) { 1031 + bpp = simple_strtol(&name[i+1], NULL, 10); 1032 + bpp_specified = 1; 1033 + if (cvt || rb) 1034 + cvt = 0; 1035 + } else 1036 + goto done; 1037 + break; 1038 + case 'x': 1039 + if (!yres_specified) { 1040 + yres = simple_strtol(&name[i+1], NULL, 10); 1041 + yres_specified = 1; 1042 + } else 1043 + goto done; 1044 + case '0' ... '9': 1045 + break; 1046 + case 'M': 1047 + if (!yres_specified) 1048 + cvt = 1; 1049 + break; 1050 + case 'R': 1051 + if (cvt) 1052 + rb = 1; 1053 + break; 1054 + case 'm': 1055 + if (!cvt) 1056 + margins = 1; 1057 + break; 1058 + case 'i': 1059 + if (!cvt) 1060 + interlace = 1; 1061 + break; 1062 + case 'e': 1063 + force = DRM_FORCE_ON; 1064 + break; 1065 + case 'D': 1066 + if ((connector->connector_type != DRM_MODE_CONNECTOR_DVII) && 1067 + (connector->connector_type != DRM_MODE_CONNECTOR_HDMIB)) 1068 + force = DRM_FORCE_ON; 1069 + else 1070 + force = DRM_FORCE_ON_DIGITAL; 1071 + break; 1072 + case 'd': 1073 + force = DRM_FORCE_OFF; 1074 + break; 1075 + default: 1076 + goto done; 1077 + } 1078 + } 1079 + if (i < 0 && yres_specified) { 1080 + xres = simple_strtol(name, NULL, 10); 1081 + res_specified = 1; 1082 + } 1083 + done: 1084 + if (res_specified) { 1085 + mode->specified = true; 1086 + mode->xres = xres; 1087 + mode->yres = yres; 1088 + } 1089 + 1090 + if (refresh_specified) { 1091 + mode->refresh_specified = true; 1092 + mode->refresh = refresh; 1093 + } 1094 + 1095 + if (bpp_specified) { 1096 + mode->bpp_specified = true; 1097 + mode->bpp = bpp; 1098 + } 1099 + mode->rb = rb ? true : false; 1100 + mode->cvt = cvt ? true : false; 1101 + mode->interlace = interlace ? true : false; 1102 + mode->force = force; 1103 + 1104 + return true; 1105 + } 1106 + EXPORT_SYMBOL(drm_mode_parse_command_line_for_connector); 1107 + 1108 + struct drm_display_mode * 1109 + drm_mode_create_from_cmdline_mode(struct drm_device *dev, 1110 + struct drm_cmdline_mode *cmd) 1111 + { 1112 + struct drm_display_mode *mode; 1113 + 1114 + if (cmd->cvt) 1115 + mode = drm_cvt_mode(dev, 1116 + cmd->xres, cmd->yres, 1117 + cmd->refresh_specified ? cmd->refresh : 60, 1118 + cmd->rb, cmd->interlace, 1119 + cmd->margins); 1120 + else 1121 + mode = drm_gtf_mode(dev, 1122 + cmd->xres, cmd->yres, 1123 + cmd->refresh_specified ? cmd->refresh : 60, 1124 + cmd->interlace, 1125 + cmd->margins); 1126 + if (!mode) 1127 + return NULL; 1128 + 1129 + drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V); 1130 + return mode; 1131 + } 1132 + EXPORT_SYMBOL(drm_mode_create_from_cmdline_mode);
+21
drivers/gpu/drm/drm_stub.c
··· 62 62 struct class *drm_class; 63 63 struct proc_dir_entry *drm_proc_root; 64 64 struct dentry *drm_debugfs_root; 65 + 66 + int drm_err(const char *func, const char *format, ...) 67 + { 68 + struct va_format vaf; 69 + va_list args; 70 + int r; 71 + 72 + va_start(args, format); 73 + 74 + vaf.fmt = format; 75 + vaf.va = &args; 76 + 77 + r = printk(KERN_ERR "[" DRM_NAME ":%s] *ERROR* %pV", func, &vaf); 78 + 79 + va_end(args); 80 + 81 + return r; 82 + } 83 + EXPORT_SYMBOL(drm_err); 84 + 65 85 void drm_ut_debug_printk(unsigned int request_level, 66 86 const char *prefix, 67 87 const char *function_name, ··· 98 78 } 99 79 } 100 80 EXPORT_SYMBOL(drm_ut_debug_printk); 81 + 101 82 static int drm_minor_get_id(struct drm_device *dev, int type) 102 83 { 103 84 int new_id;
+123 -8
drivers/gpu/drm/i915/i915_debugfs.c
··· 106 106 } 107 107 } 108 108 109 - static const char *agp_type_str(int type) 109 + static const char *cache_level_str(int type) 110 110 { 111 111 switch (type) { 112 - case 0: return " uncached"; 113 - case 1: return " snooped"; 112 + case I915_CACHE_NONE: return " uncached"; 113 + case I915_CACHE_LLC: return " snooped (LLC)"; 114 + case I915_CACHE_LLC_MLC: return " snooped (LLC+MLC)"; 114 115 default: return ""; 115 116 } 116 117 } ··· 128 127 obj->base.write_domain, 129 128 obj->last_rendering_seqno, 130 129 obj->last_fenced_seqno, 131 - agp_type_str(obj->agp_type == AGP_USER_CACHED_MEMORY), 130 + cache_level_str(obj->cache_level), 132 131 obj->dirty ? " dirty" : "", 133 132 obj->madv == I915_MADV_DONTNEED ? " purgeable" : ""); 134 133 if (obj->base.name) ··· 715 714 dirty_flag(err->dirty), 716 715 purgeable_flag(err->purgeable), 717 716 ring_str(err->ring), 718 - agp_type_str(err->agp_type)); 717 + cache_level_str(err->cache_level)); 719 718 720 719 if (err->name) 721 720 seq_printf(m, " (name: %d)", err->name); ··· 853 852 struct drm_info_node *node = (struct drm_info_node *) m->private; 854 853 struct drm_device *dev = node->minor->dev; 855 854 drm_i915_private_t *dev_priv = dev->dev_private; 855 + int ret; 856 856 857 857 if (IS_GEN5(dev)) { 858 858 u16 rgvswctl = I915_READ16(MEMSWCTL); ··· 875 873 int max_freq; 876 874 877 875 /* RPSTAT1 is in the GT power well */ 878 - __gen6_gt_force_wake_get(dev_priv); 876 + ret = mutex_lock_interruptible(&dev->struct_mutex); 877 + if (ret) 878 + return ret; 879 + 880 + gen6_gt_force_wake_get(dev_priv); 879 881 880 882 rpstat = I915_READ(GEN6_RPSTAT1); 881 883 rpupei = I915_READ(GEN6_RP_CUR_UP_EI); ··· 888 882 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI); 889 883 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN); 890 884 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN); 885 + 886 + gen6_gt_force_wake_put(dev_priv); 887 + mutex_unlock(&dev->struct_mutex); 891 888 892 889 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); 893 890 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat); ··· 926 917 max_freq = rp_state_cap & 0xff; 927 918 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", 928 919 max_freq * 50); 929 - 930 - __gen6_gt_force_wake_put(dev_priv); 931 920 } else { 932 921 seq_printf(m, "no P-state info available\n"); 933 922 } ··· 1065 1058 case FBC_MULTIPLE_PIPES: 1066 1059 seq_printf(m, "multiple pipes are enabled"); 1067 1060 break; 1061 + case FBC_MODULE_PARAM: 1062 + seq_printf(m, "disabled per module param (default off)"); 1063 + break; 1068 1064 default: 1069 1065 seq_printf(m, "unknown reason"); 1070 1066 } ··· 1196 1186 return 0; 1197 1187 } 1198 1188 1189 + static int i915_context_status(struct seq_file *m, void *unused) 1190 + { 1191 + struct drm_info_node *node = (struct drm_info_node *) m->private; 1192 + struct drm_device *dev = node->minor->dev; 1193 + drm_i915_private_t *dev_priv = dev->dev_private; 1194 + int ret; 1195 + 1196 + ret = mutex_lock_interruptible(&dev->mode_config.mutex); 1197 + if (ret) 1198 + return ret; 1199 + 1200 + seq_printf(m, "power context "); 1201 + describe_obj(m, dev_priv->pwrctx); 1202 + seq_printf(m, "\n"); 1203 + 1204 + seq_printf(m, "render context "); 1205 + describe_obj(m, dev_priv->renderctx); 1206 + seq_printf(m, "\n"); 1207 + 1208 + mutex_unlock(&dev->mode_config.mutex); 1209 + 1210 + return 0; 1211 + } 1212 + 1213 + static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data) 1214 + { 1215 + struct drm_info_node *node = (struct drm_info_node *) m->private; 1216 + struct drm_device *dev = node->minor->dev; 1217 + struct drm_i915_private *dev_priv = dev->dev_private; 1218 + 1219 + seq_printf(m, "forcewake count = %d\n", 1220 + atomic_read(&dev_priv->forcewake_count)); 1221 + 1222 + return 0; 1223 + } 1224 + 1199 1225 static int 1200 1226 i915_wedged_open(struct inode *inode, 1201 1227 struct file *filp) ··· 1334 1288 return drm_add_fake_info_node(minor, ent, &i915_wedged_fops); 1335 1289 } 1336 1290 1291 + static int i915_forcewake_open(struct inode *inode, struct file *file) 1292 + { 1293 + struct drm_device *dev = inode->i_private; 1294 + struct drm_i915_private *dev_priv = dev->dev_private; 1295 + int ret; 1296 + 1297 + if (!IS_GEN6(dev)) 1298 + return 0; 1299 + 1300 + ret = mutex_lock_interruptible(&dev->struct_mutex); 1301 + if (ret) 1302 + return ret; 1303 + gen6_gt_force_wake_get(dev_priv); 1304 + mutex_unlock(&dev->struct_mutex); 1305 + 1306 + return 0; 1307 + } 1308 + 1309 + int i915_forcewake_release(struct inode *inode, struct file *file) 1310 + { 1311 + struct drm_device *dev = inode->i_private; 1312 + struct drm_i915_private *dev_priv = dev->dev_private; 1313 + 1314 + if (!IS_GEN6(dev)) 1315 + return 0; 1316 + 1317 + /* 1318 + * It's bad that we can potentially hang userspace if struct_mutex gets 1319 + * forever stuck. However, if we cannot acquire this lock it means that 1320 + * almost certainly the driver has hung, is not unload-able. Therefore 1321 + * hanging here is probably a minor inconvenience not to be seen my 1322 + * almost every user. 1323 + */ 1324 + mutex_lock(&dev->struct_mutex); 1325 + gen6_gt_force_wake_put(dev_priv); 1326 + mutex_unlock(&dev->struct_mutex); 1327 + 1328 + return 0; 1329 + } 1330 + 1331 + static const struct file_operations i915_forcewake_fops = { 1332 + .owner = THIS_MODULE, 1333 + .open = i915_forcewake_open, 1334 + .release = i915_forcewake_release, 1335 + }; 1336 + 1337 + static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor) 1338 + { 1339 + struct drm_device *dev = minor->dev; 1340 + struct dentry *ent; 1341 + 1342 + ent = debugfs_create_file("i915_forcewake_user", 1343 + S_IRUSR, 1344 + root, dev, 1345 + &i915_forcewake_fops); 1346 + if (IS_ERR(ent)) 1347 + return PTR_ERR(ent); 1348 + 1349 + return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops); 1350 + } 1351 + 1337 1352 static struct drm_info_list i915_debugfs_list[] = { 1338 1353 {"i915_capabilities", i915_capabilities, 0}, 1339 1354 {"i915_gem_objects", i915_gem_object_info, 0}, ··· 1431 1324 {"i915_sr_status", i915_sr_status, 0}, 1432 1325 {"i915_opregion", i915_opregion, 0}, 1433 1326 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0}, 1327 + {"i915_context_status", i915_context_status, 0}, 1328 + {"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info, 0}, 1434 1329 }; 1435 1330 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) 1436 1331 ··· 1441 1332 int ret; 1442 1333 1443 1334 ret = i915_wedged_create(minor->debugfs_root, minor); 1335 + if (ret) 1336 + return ret; 1337 + 1338 + ret = i915_forcewake_create(minor->debugfs_root, minor); 1444 1339 if (ret) 1445 1340 return ret; 1446 1341 ··· 1457 1344 { 1458 1345 drm_debugfs_remove_files(i915_debugfs_list, 1459 1346 I915_DEBUGFS_ENTRIES, minor); 1347 + drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops, 1348 + 1, minor); 1460 1349 drm_debugfs_remove_files((struct drm_info_list *) &i915_wedged_fops, 1461 1350 1, minor); 1462 1351 }
+49 -11
drivers/gpu/drm/i915/i915_dma.c
··· 571 571 struct intel_ring_buffer *ring = LP_RING(dev->dev_private); 572 572 573 573 i915_kernel_lost_context(dev); 574 - return intel_wait_ring_buffer(ring, ring->size - 8); 574 + return intel_wait_ring_idle(ring); 575 575 } 576 576 577 577 static int i915_flush_ioctl(struct drm_device *dev, void *data, ··· 1176 1176 return can_switch; 1177 1177 } 1178 1178 1179 - static int i915_load_modeset_init(struct drm_device *dev) 1179 + static int i915_load_gem_init(struct drm_device *dev) 1180 1180 { 1181 1181 struct drm_i915_private *dev_priv = dev->dev_private; 1182 1182 unsigned long prealloc_size, gtt_size, mappable_size; 1183 - int ret = 0; 1183 + int ret; 1184 1184 1185 1185 prealloc_size = dev_priv->mm.gtt->stolen_size; 1186 1186 gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT; ··· 1204 1204 ret = i915_gem_init_ringbuffer(dev); 1205 1205 mutex_unlock(&dev->struct_mutex); 1206 1206 if (ret) 1207 - goto out; 1207 + return ret; 1208 1208 1209 1209 /* Try to set up FBC with a reasonable compressed buffer size */ 1210 1210 if (I915_HAS_FBC(dev) && i915_powersave) { ··· 1222 1222 1223 1223 /* Allow hardware batchbuffers unless told otherwise. */ 1224 1224 dev_priv->allow_batchbuffer = 1; 1225 + return 0; 1226 + } 1227 + 1228 + static int i915_load_modeset_init(struct drm_device *dev) 1229 + { 1230 + struct drm_i915_private *dev_priv = dev->dev_private; 1231 + int ret; 1225 1232 1226 1233 ret = intel_parse_bios(dev); 1227 1234 if (ret) ··· 1243 1236 */ 1244 1237 ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode); 1245 1238 if (ret && ret != -ENODEV) 1246 - goto cleanup_ringbuffer; 1239 + goto out; 1247 1240 1248 1241 intel_register_dsm_handler(); 1249 1242 ··· 1260 1253 1261 1254 intel_modeset_init(dev); 1262 1255 1263 - ret = drm_irq_install(dev); 1256 + ret = i915_load_gem_init(dev); 1264 1257 if (ret) 1265 1258 goto cleanup_vga_switcheroo; 1259 + 1260 + intel_modeset_gem_init(dev); 1261 + 1262 + if (IS_IVYBRIDGE(dev)) { 1263 + /* Share pre & uninstall handlers with ILK/SNB */ 1264 + dev->driver->irq_handler = ivybridge_irq_handler; 1265 + dev->driver->irq_preinstall = ironlake_irq_preinstall; 1266 + dev->driver->irq_postinstall = ivybridge_irq_postinstall; 1267 + dev->driver->irq_uninstall = ironlake_irq_uninstall; 1268 + dev->driver->enable_vblank = ivybridge_enable_vblank; 1269 + dev->driver->disable_vblank = ivybridge_disable_vblank; 1270 + } else if (HAS_PCH_SPLIT(dev)) { 1271 + dev->driver->irq_handler = ironlake_irq_handler; 1272 + dev->driver->irq_preinstall = ironlake_irq_preinstall; 1273 + dev->driver->irq_postinstall = ironlake_irq_postinstall; 1274 + dev->driver->irq_uninstall = ironlake_irq_uninstall; 1275 + dev->driver->enable_vblank = ironlake_enable_vblank; 1276 + dev->driver->disable_vblank = ironlake_disable_vblank; 1277 + } else { 1278 + dev->driver->irq_preinstall = i915_driver_irq_preinstall; 1279 + dev->driver->irq_postinstall = i915_driver_irq_postinstall; 1280 + dev->driver->irq_uninstall = i915_driver_irq_uninstall; 1281 + dev->driver->irq_handler = i915_driver_irq_handler; 1282 + dev->driver->enable_vblank = i915_enable_vblank; 1283 + dev->driver->disable_vblank = i915_disable_vblank; 1284 + } 1285 + 1286 + ret = drm_irq_install(dev); 1287 + if (ret) 1288 + goto cleanup_gem; 1266 1289 1267 1290 /* Always safe in the mode setting case. */ 1268 1291 /* FIXME: do pre/post-mode set stuff in core KMS code */ ··· 1311 1274 1312 1275 cleanup_irq: 1313 1276 drm_irq_uninstall(dev); 1277 + cleanup_gem: 1278 + mutex_lock(&dev->struct_mutex); 1279 + i915_gem_cleanup_ringbuffer(dev); 1280 + mutex_unlock(&dev->struct_mutex); 1314 1281 cleanup_vga_switcheroo: 1315 1282 vga_switcheroo_unregister_client(dev->pdev); 1316 1283 cleanup_vga_client: 1317 1284 vga_client_register(dev->pdev, NULL, NULL, NULL); 1318 - cleanup_ringbuffer: 1319 - mutex_lock(&dev->struct_mutex); 1320 - i915_gem_cleanup_ringbuffer(dev); 1321 - mutex_unlock(&dev->struct_mutex); 1322 1285 out: 1323 1286 return ret; 1324 1287 } ··· 2019 1982 2020 1983 dev->driver->get_vblank_counter = i915_get_vblank_counter; 2021 1984 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 2022 - if (IS_G4X(dev) || IS_GEN5(dev) || IS_GEN6(dev)) { 1985 + if (IS_G4X(dev) || IS_GEN5(dev) || IS_GEN6(dev) || IS_IVYBRIDGE(dev)) { 2023 1986 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ 2024 1987 dev->driver->get_vblank_counter = gm45_get_vblank_counter; 2025 1988 } ··· 2062 2025 2063 2026 spin_lock_init(&dev_priv->irq_lock); 2064 2027 spin_lock_init(&dev_priv->error_lock); 2028 + spin_lock_init(&dev_priv->rps_lock); 2065 2029 2066 2030 if (IS_MOBILE(dev) || !IS_GEN2(dev)) 2067 2031 dev_priv->num_pipe = 2;
+63 -5
drivers/gpu/drm/i915/i915_drv.c
··· 52 52 unsigned int i915_semaphores = 0; 53 53 module_param_named(semaphores, i915_semaphores, int, 0600); 54 54 55 - unsigned int i915_enable_rc6 = 0; 55 + unsigned int i915_enable_rc6 = 1; 56 56 module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600); 57 + 58 + unsigned int i915_enable_fbc = 0; 59 + module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600); 57 60 58 61 unsigned int i915_lvds_downclock = 0; 59 62 module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400); ··· 172 169 static const struct intel_device_info intel_ironlake_m_info = { 173 170 .gen = 5, .is_mobile = 1, 174 171 .need_gfx_hws = 1, .has_hotplug = 1, 175 - .has_fbc = 0, /* disabled due to buggy hardware */ 172 + .has_fbc = 1, 176 173 .has_bsd_ring = 1, 177 174 }; 178 175 ··· 187 184 .gen = 6, .is_mobile = 1, 188 185 .need_gfx_hws = 1, .has_hotplug = 1, 189 186 .has_fbc = 1, 187 + .has_bsd_ring = 1, 188 + .has_blt_ring = 1, 189 + }; 190 + 191 + static const struct intel_device_info intel_ivybridge_d_info = { 192 + .is_ivybridge = 1, .gen = 7, 193 + .need_gfx_hws = 1, .has_hotplug = 1, 194 + .has_bsd_ring = 1, 195 + .has_blt_ring = 1, 196 + }; 197 + 198 + static const struct intel_device_info intel_ivybridge_m_info = { 199 + .is_ivybridge = 1, .gen = 7, .is_mobile = 1, 200 + .need_gfx_hws = 1, .has_hotplug = 1, 201 + .has_fbc = 0, /* FBC is not enabled on Ivybridge mobile yet */ 190 202 .has_bsd_ring = 1, 191 203 .has_blt_ring = 1, 192 204 }; ··· 245 227 INTEL_VGA_DEVICE(0x0116, &intel_sandybridge_m_info), 246 228 INTEL_VGA_DEVICE(0x0126, &intel_sandybridge_m_info), 247 229 INTEL_VGA_DEVICE(0x010A, &intel_sandybridge_d_info), 230 + INTEL_VGA_DEVICE(0x0156, &intel_ivybridge_m_info), /* GT1 mobile */ 231 + INTEL_VGA_DEVICE(0x0166, &intel_ivybridge_m_info), /* GT2 mobile */ 232 + INTEL_VGA_DEVICE(0x0152, &intel_ivybridge_d_info), /* GT1 desktop */ 233 + INTEL_VGA_DEVICE(0x0162, &intel_ivybridge_d_info), /* GT2 desktop */ 234 + INTEL_VGA_DEVICE(0x015a, &intel_ivybridge_d_info), /* GT1 server */ 248 235 {0, 0, 0} 249 236 }; 250 237 ··· 258 235 #endif 259 236 260 237 #define INTEL_PCH_DEVICE_ID_MASK 0xff00 238 + #define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 261 239 #define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00 240 + #define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00 262 241 263 242 void intel_detect_pch (struct drm_device *dev) 264 243 { ··· 279 254 int id; 280 255 id = pch->device & INTEL_PCH_DEVICE_ID_MASK; 281 256 282 - if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) { 257 + if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) { 258 + dev_priv->pch_type = PCH_IBX; 259 + DRM_DEBUG_KMS("Found Ibex Peak PCH\n"); 260 + } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) { 283 261 dev_priv->pch_type = PCH_CPT; 284 262 DRM_DEBUG_KMS("Found CougarPoint PCH\n"); 263 + } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) { 264 + /* PantherPoint is CPT compatible */ 265 + dev_priv->pch_type = PCH_CPT; 266 + DRM_DEBUG_KMS("Found PatherPoint PCH\n"); 285 267 } 286 268 } 287 269 pci_dev_put(pch); 288 270 } 289 271 } 290 272 291 - void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) 273 + static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) 292 274 { 293 275 int count; 294 276 ··· 311 279 udelay(10); 312 280 } 313 281 314 - void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) 282 + /* 283 + * Generally this is called implicitly by the register read function. However, 284 + * if some sequence requires the GT to not power down then this function should 285 + * be called at the beginning of the sequence followed by a call to 286 + * gen6_gt_force_wake_put() at the end of the sequence. 287 + */ 288 + void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) 289 + { 290 + WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex)); 291 + 292 + /* Forcewake is atomic in case we get in here without the lock */ 293 + if (atomic_add_return(1, &dev_priv->forcewake_count) == 1) 294 + __gen6_gt_force_wake_get(dev_priv); 295 + } 296 + 297 + static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) 315 298 { 316 299 I915_WRITE_NOTRACE(FORCEWAKE, 0); 317 300 POSTING_READ(FORCEWAKE); 301 + } 302 + 303 + /* 304 + * see gen6_gt_force_wake_get() 305 + */ 306 + void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) 307 + { 308 + WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex)); 309 + 310 + if (atomic_dec_and_test(&dev_priv->forcewake_count)) 311 + __gen6_gt_force_wake_put(dev_priv); 318 312 } 319 313 320 314 void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
+77 -36
drivers/gpu/drm/i915/i915_drv.h
··· 188 188 u32 dirty:1; 189 189 u32 purgeable:1; 190 190 u32 ring:4; 191 - u32 agp_type:1; 191 + u32 cache_level:2; 192 192 } *active_bo, *pinned_bo; 193 193 u32 active_bo_count, pinned_bo_count; 194 194 struct intel_overlay_error_state *overlay; ··· 203 203 int (*get_display_clock_speed)(struct drm_device *dev); 204 204 int (*get_fifo_size)(struct drm_device *dev, int plane); 205 205 void (*update_wm)(struct drm_device *dev); 206 + int (*crtc_mode_set)(struct drm_crtc *crtc, 207 + struct drm_display_mode *mode, 208 + struct drm_display_mode *adjusted_mode, 209 + int x, int y, 210 + struct drm_framebuffer *old_fb); 211 + void (*fdi_link_train)(struct drm_crtc *crtc); 212 + void (*init_clock_gating)(struct drm_device *dev); 213 + void (*init_pch_clock_gating)(struct drm_device *dev); 206 214 /* clock updates for mode set */ 207 215 /* cursor updates */ 208 216 /* render clock increase/decrease */ 209 217 /* display clock increase/decrease */ 210 218 /* pll clock increase/decrease */ 211 - /* clock gating init */ 212 219 }; 213 220 214 221 struct intel_device_info { ··· 230 223 u8 is_pineview : 1; 231 224 u8 is_broadwater : 1; 232 225 u8 is_crestline : 1; 226 + u8 is_ivybridge : 1; 233 227 u8 has_fbc : 1; 234 228 u8 has_pipe_cxsr : 1; 235 229 u8 has_hotplug : 1; ··· 250 242 FBC_BAD_PLANE, /* fbc not supported on plane */ 251 243 FBC_NOT_TILED, /* buffer not tiled */ 252 244 FBC_MULTIPLE_PIPES, /* more than one pipe active */ 245 + FBC_MODULE_PARAM, 253 246 }; 254 247 255 248 enum intel_pch { ··· 685 676 686 677 bool mchbar_need_disable; 687 678 679 + struct work_struct rps_work; 680 + spinlock_t rps_lock; 681 + u32 pm_iir; 682 + 688 683 u8 cur_delay; 689 684 u8 min_delay; 690 685 u8 max_delay; ··· 716 703 struct intel_fbdev *fbdev; 717 704 718 705 struct drm_property *broadcast_rgb_property; 706 + 707 + atomic_t forcewake_count; 719 708 } drm_i915_private_t; 709 + 710 + enum i915_cache_level { 711 + I915_CACHE_NONE, 712 + I915_CACHE_LLC, 713 + I915_CACHE_LLC_MLC, /* gen6+ */ 714 + }; 720 715 721 716 struct drm_i915_gem_object { 722 717 struct drm_gem_object base; ··· 812 791 unsigned int pending_fenced_gpu_access:1; 813 792 unsigned int fenced_gpu_access:1; 814 793 794 + unsigned int cache_level:2; 795 + 815 796 struct page **pages; 816 797 817 798 /** ··· 850 827 /** Record of address bit 17 of each page at last unbind. */ 851 828 unsigned long *bit_17; 852 829 853 - /** AGP mapping type (AGP_USER_MEMORY or AGP_USER_CACHED_MEMORY */ 854 - uint32_t agp_type; 855 830 856 831 /** 857 832 * If present, while GEM_DOMAIN_CPU is in the read domain this array ··· 936 915 #define IS_G33(dev) (INTEL_INFO(dev)->is_g33) 937 916 #define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042) 938 917 #define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046) 918 + #define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge) 939 919 #define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) 940 920 921 + /* 922 + * The genX designation typically refers to the render engine, so render 923 + * capability related checks should use IS_GEN, while display and other checks 924 + * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular 925 + * chips, etc.). 926 + */ 941 927 #define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2) 942 928 #define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3) 943 929 #define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4) 944 930 #define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5) 945 931 #define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6) 932 + #define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7) 946 933 947 934 #define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring) 948 935 #define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring) ··· 977 948 #define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr) 978 949 #define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc) 979 950 980 - #define HAS_PCH_SPLIT(dev) (IS_GEN5(dev) || IS_GEN6(dev)) 981 - #define HAS_PIPE_CONTROL(dev) (IS_GEN5(dev) || IS_GEN6(dev)) 951 + #define HAS_PCH_SPLIT(dev) (IS_GEN5(dev) || IS_GEN6(dev) || IS_IVYBRIDGE(dev)) 952 + #define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5) 982 953 983 954 #define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type) 984 955 #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) ··· 996 967 extern unsigned int i915_panel_use_ssc; 997 968 extern int i915_vbt_sdvo_panel_type; 998 969 extern unsigned int i915_enable_rc6; 970 + extern unsigned int i915_enable_fbc; 999 971 1000 972 extern int i915_suspend(struct drm_device *dev, pm_message_t state); 1001 973 extern int i915_resume(struct drm_device *dev); ··· 1040 1010 extern void i915_driver_irq_preinstall(struct drm_device * dev); 1041 1011 extern int i915_driver_irq_postinstall(struct drm_device *dev); 1042 1012 extern void i915_driver_irq_uninstall(struct drm_device * dev); 1013 + 1014 + extern irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS); 1015 + extern void ironlake_irq_preinstall(struct drm_device *dev); 1016 + extern int ironlake_irq_postinstall(struct drm_device *dev); 1017 + extern void ironlake_irq_uninstall(struct drm_device *dev); 1018 + 1019 + extern irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS); 1020 + extern void ivybridge_irq_preinstall(struct drm_device *dev); 1021 + extern int ivybridge_irq_postinstall(struct drm_device *dev); 1022 + extern void ivybridge_irq_uninstall(struct drm_device *dev); 1023 + 1043 1024 extern int i915_vblank_pipe_set(struct drm_device *dev, void *data, 1044 1025 struct drm_file *file_priv); 1045 1026 extern int i915_vblank_pipe_get(struct drm_device *dev, void *data, 1046 1027 struct drm_file *file_priv); 1047 1028 extern int i915_enable_vblank(struct drm_device *dev, int crtc); 1048 1029 extern void i915_disable_vblank(struct drm_device *dev, int crtc); 1030 + extern int ironlake_enable_vblank(struct drm_device *dev, int crtc); 1031 + extern void ironlake_disable_vblank(struct drm_device *dev, int crtc); 1032 + extern int ivybridge_enable_vblank(struct drm_device *dev, int crtc); 1033 + extern void ivybridge_disable_vblank(struct drm_device *dev, int crtc); 1049 1034 extern u32 i915_get_vblank_counter(struct drm_device *dev, int crtc); 1050 1035 extern u32 gm45_get_vblank_counter(struct drm_device *dev, int crtc); 1051 1036 extern int i915_vblank_swap(struct drm_device *dev, void *data, ··· 1310 1265 1311 1266 /* modesetting */ 1312 1267 extern void intel_modeset_init(struct drm_device *dev); 1268 + extern void intel_modeset_gem_init(struct drm_device *dev); 1313 1269 extern void intel_modeset_cleanup(struct drm_device *dev); 1314 1270 extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); 1315 1271 extern void i8xx_disable_fbc(struct drm_device *dev); ··· 1358 1312 LOCK_TEST_WITH_RETURN(dev, file); \ 1359 1313 } while (0) 1360 1314 1315 + /* On SNB platform, before reading ring registers forcewake bit 1316 + * must be set to prevent GT core from power down and stale values being 1317 + * returned. 1318 + */ 1319 + void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv); 1320 + void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv); 1321 + void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv); 1322 + 1323 + /* We give fast paths for the really cool registers */ 1324 + #define NEEDS_FORCE_WAKE(dev_priv, reg) \ 1325 + (((dev_priv)->info->gen >= 6) && \ 1326 + ((reg) < 0x40000) && \ 1327 + ((reg) != FORCEWAKE)) 1361 1328 1362 1329 #define __i915_read(x, y) \ 1363 1330 static inline u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \ 1364 - u##x val = read##y(dev_priv->regs + reg); \ 1331 + u##x val = 0; \ 1332 + if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ 1333 + gen6_gt_force_wake_get(dev_priv); \ 1334 + val = read##y(dev_priv->regs + reg); \ 1335 + gen6_gt_force_wake_put(dev_priv); \ 1336 + } else { \ 1337 + val = read##y(dev_priv->regs + reg); \ 1338 + } \ 1365 1339 trace_i915_reg_rw(false, reg, val, sizeof(val)); \ 1366 1340 return val; \ 1367 1341 } 1342 + 1368 1343 __i915_read(8, b) 1369 1344 __i915_read(16, w) 1370 1345 __i915_read(32, l) ··· 1395 1328 #define __i915_write(x, y) \ 1396 1329 static inline void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \ 1397 1330 trace_i915_reg_rw(true, reg, val, sizeof(val)); \ 1331 + if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ 1332 + __gen6_gt_wait_for_fifo(dev_priv); \ 1333 + } \ 1398 1334 write##y(val, dev_priv->regs + reg); \ 1399 1335 } 1400 1336 __i915_write(8, b) ··· 1426 1356 #define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg) 1427 1357 1428 1358 1429 - /* On SNB platform, before reading ring registers forcewake bit 1430 - * must be set to prevent GT core from power down and stale values being 1431 - * returned. 1432 - */ 1433 - void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv); 1434 - void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv); 1435 - void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv); 1436 - 1437 - static inline u32 i915_gt_read(struct drm_i915_private *dev_priv, u32 reg) 1438 - { 1439 - u32 val; 1440 - 1441 - if (dev_priv->info->gen >= 6) { 1442 - __gen6_gt_force_wake_get(dev_priv); 1443 - val = I915_READ(reg); 1444 - __gen6_gt_force_wake_put(dev_priv); 1445 - } else 1446 - val = I915_READ(reg); 1447 - 1448 - return val; 1449 - } 1450 - 1451 - static inline void i915_gt_write(struct drm_i915_private *dev_priv, 1452 - u32 reg, u32 val) 1453 - { 1454 - if (dev_priv->info->gen >= 6) 1455 - __gen6_gt_wait_for_fifo(dev_priv); 1456 - I915_WRITE(reg, val); 1457 - } 1458 1359 #endif
+17 -19
drivers/gpu/drm/i915/i915_gem.c
··· 2673 2673 update: 2674 2674 obj->tiling_changed = false; 2675 2675 switch (INTEL_INFO(dev)->gen) { 2676 + case 7: 2676 2677 case 6: 2677 2678 ret = sandybridge_write_fence_reg(obj, pipelined); 2678 2679 break; ··· 2707 2706 uint32_t fence_reg = reg - dev_priv->fence_regs; 2708 2707 2709 2708 switch (INTEL_INFO(dev)->gen) { 2709 + case 7: 2710 2710 case 6: 2711 2711 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + fence_reg*8, 0); 2712 2712 break; ··· 2878 2876 * again at bind time. 2879 2877 */ 2880 2878 if (obj->pages == NULL) 2879 + return; 2880 + 2881 + /* If the GPU is snooping the contents of the CPU cache, 2882 + * we do not need to manually clear the CPU cache lines. However, 2883 + * the caches are only snooped when the render cache is 2884 + * flushed/invalidated. As we always have to emit invalidations 2885 + * and flushes when moving into and out of the RENDER domain, correct 2886 + * snooping behaviour occurs naturally as the result of our domain 2887 + * tracking. 2888 + */ 2889 + if (obj->cache_level != I915_CACHE_NONE) 2881 2890 return; 2882 2891 2883 2892 trace_i915_gem_object_clflush(obj); ··· 3582 3569 obj->base.write_domain = I915_GEM_DOMAIN_CPU; 3583 3570 obj->base.read_domains = I915_GEM_DOMAIN_CPU; 3584 3571 3585 - obj->agp_type = AGP_USER_MEMORY; 3572 + obj->cache_level = I915_CACHE_NONE; 3586 3573 obj->base.driver_private = NULL; 3587 3574 obj->fence_reg = I915_FENCE_REG_NONE; 3588 3575 INIT_LIST_HEAD(&obj->mm_list); ··· 3858 3845 dev_priv->num_fence_regs = 8; 3859 3846 3860 3847 /* Initialize fence registers to zero */ 3861 - switch (INTEL_INFO(dev)->gen) { 3862 - case 6: 3863 - for (i = 0; i < 16; i++) 3864 - I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), 0); 3865 - break; 3866 - case 5: 3867 - case 4: 3868 - for (i = 0; i < 16; i++) 3869 - I915_WRITE64(FENCE_REG_965_0 + (i * 8), 0); 3870 - break; 3871 - case 3: 3872 - if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) 3873 - for (i = 0; i < 8; i++) 3874 - I915_WRITE(FENCE_REG_945_8 + (i * 4), 0); 3875 - case 2: 3876 - for (i = 0; i < 8; i++) 3877 - I915_WRITE(FENCE_REG_830_0 + (i * 4), 0); 3878 - break; 3848 + for (i = 0; i < dev_priv->num_fence_regs; i++) { 3849 + i915_gem_clear_fence_reg(dev, &dev_priv->fence_regs[i]); 3879 3850 } 3851 + 3880 3852 i915_gem_detect_bit_6_swizzle(dev); 3881 3853 init_waitqueue_head(&dev_priv->pending_flip_queue); 3882 3854
+29 -6
drivers/gpu/drm/i915/i915_gem_gtt.c
··· 29 29 #include "i915_trace.h" 30 30 #include "intel_drv.h" 31 31 32 + /* XXX kill agp_type! */ 33 + static unsigned int cache_level_to_agp_type(struct drm_device *dev, 34 + enum i915_cache_level cache_level) 35 + { 36 + switch (cache_level) { 37 + case I915_CACHE_LLC_MLC: 38 + if (INTEL_INFO(dev)->gen >= 6) 39 + return AGP_USER_CACHED_MEMORY_LLC_MLC; 40 + /* Older chipsets do not have this extra level of CPU 41 + * cacheing, so fallthrough and request the PTE simply 42 + * as cached. 43 + */ 44 + case I915_CACHE_LLC: 45 + return AGP_USER_CACHED_MEMORY; 46 + default: 47 + case I915_CACHE_NONE: 48 + return AGP_USER_MEMORY; 49 + } 50 + } 51 + 32 52 void i915_gem_restore_gtt_mappings(struct drm_device *dev) 33 53 { 34 54 struct drm_i915_private *dev_priv = dev->dev_private; ··· 59 39 (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE); 60 40 61 41 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { 42 + unsigned int agp_type = 43 + cache_level_to_agp_type(dev, obj->cache_level); 44 + 62 45 i915_gem_clflush_object(obj); 63 46 64 47 if (dev_priv->mm.gtt->needs_dmar) { ··· 69 46 70 47 intel_gtt_insert_sg_entries(obj->sg_list, 71 48 obj->num_sg, 72 - obj->gtt_space->start 73 - >> PAGE_SHIFT, 74 - obj->agp_type); 49 + obj->gtt_space->start >> PAGE_SHIFT, 50 + agp_type); 75 51 } else 76 52 intel_gtt_insert_pages(obj->gtt_space->start 77 53 >> PAGE_SHIFT, 78 54 obj->base.size >> PAGE_SHIFT, 79 55 obj->pages, 80 - obj->agp_type); 56 + agp_type); 81 57 } 82 58 83 59 intel_gtt_chipset_flush(); ··· 86 64 { 87 65 struct drm_device *dev = obj->base.dev; 88 66 struct drm_i915_private *dev_priv = dev->dev_private; 67 + unsigned int agp_type = cache_level_to_agp_type(dev, obj->cache_level); 89 68 int ret; 90 69 91 70 if (dev_priv->mm.gtt->needs_dmar) { ··· 100 77 intel_gtt_insert_sg_entries(obj->sg_list, 101 78 obj->num_sg, 102 79 obj->gtt_space->start >> PAGE_SHIFT, 103 - obj->agp_type); 80 + agp_type); 104 81 } else 105 82 intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT, 106 83 obj->base.size >> PAGE_SHIFT, 107 84 obj->pages, 108 - obj->agp_type); 85 + agp_type); 109 86 110 87 return 0; 111 88 }
+1 -1
drivers/gpu/drm/i915/i915_gem_tiling.c
··· 92 92 uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; 93 93 uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; 94 94 95 - if (IS_GEN5(dev) || IS_GEN6(dev)) { 95 + if (INTEL_INFO(dev)->gen >= 5) { 96 96 /* On Ironlake whatever DRAM config, GPU always do 97 97 * same swizzling setup. 98 98 */
+264 -47
drivers/gpu/drm/i915/i915_irq.c
··· 367 367 jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); 368 368 } 369 369 370 - static void gen6_pm_irq_handler(struct drm_device *dev) 370 + static void gen6_pm_rps_work(struct work_struct *work) 371 371 { 372 - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 372 + drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 373 + rps_work); 373 374 u8 new_delay = dev_priv->cur_delay; 374 - u32 pm_iir; 375 + u32 pm_iir, pm_imr; 375 376 376 - pm_iir = I915_READ(GEN6_PMIIR); 377 + spin_lock_irq(&dev_priv->rps_lock); 378 + pm_iir = dev_priv->pm_iir; 379 + dev_priv->pm_iir = 0; 380 + pm_imr = I915_READ(GEN6_PMIMR); 381 + spin_unlock_irq(&dev_priv->rps_lock); 382 + 377 383 if (!pm_iir) 378 384 return; 379 385 386 + mutex_lock(&dev_priv->dev->struct_mutex); 380 387 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { 381 388 if (dev_priv->cur_delay != dev_priv->max_delay) 382 389 new_delay = dev_priv->cur_delay + 1; 383 390 if (new_delay > dev_priv->max_delay) 384 391 new_delay = dev_priv->max_delay; 385 392 } else if (pm_iir & (GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT)) { 393 + gen6_gt_force_wake_get(dev_priv); 386 394 if (dev_priv->cur_delay != dev_priv->min_delay) 387 395 new_delay = dev_priv->cur_delay - 1; 388 396 if (new_delay < dev_priv->min_delay) { ··· 404 396 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, 405 397 I915_READ(GEN6_RP_INTERRUPT_LIMITS) & ~0x3f0000); 406 398 } 407 - 399 + gen6_gt_force_wake_put(dev_priv); 408 400 } 409 401 410 - gen6_set_rps(dev, new_delay); 402 + gen6_set_rps(dev_priv->dev, new_delay); 411 403 dev_priv->cur_delay = new_delay; 412 404 413 - I915_WRITE(GEN6_PMIIR, pm_iir); 405 + /* 406 + * rps_lock not held here because clearing is non-destructive. There is 407 + * an *extremely* unlikely race with gen6_rps_enable() that is prevented 408 + * by holding struct_mutex for the duration of the write. 409 + */ 410 + I915_WRITE(GEN6_PMIMR, pm_imr & ~pm_iir); 411 + mutex_unlock(&dev_priv->dev->struct_mutex); 414 412 } 415 413 416 414 static void pch_irq_handler(struct drm_device *dev) ··· 462 448 DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n"); 463 449 } 464 450 465 - static irqreturn_t ironlake_irq_handler(struct drm_device *dev) 451 + irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS) 466 452 { 453 + struct drm_device *dev = (struct drm_device *) arg; 454 + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 455 + int ret = IRQ_NONE; 456 + u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir; 457 + struct drm_i915_master_private *master_priv; 458 + 459 + atomic_inc(&dev_priv->irq_received); 460 + 461 + /* disable master interrupt before clearing iir */ 462 + de_ier = I915_READ(DEIER); 463 + I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 464 + POSTING_READ(DEIER); 465 + 466 + de_iir = I915_READ(DEIIR); 467 + gt_iir = I915_READ(GTIIR); 468 + pch_iir = I915_READ(SDEIIR); 469 + pm_iir = I915_READ(GEN6_PMIIR); 470 + 471 + if (de_iir == 0 && gt_iir == 0 && pch_iir == 0 && pm_iir == 0) 472 + goto done; 473 + 474 + ret = IRQ_HANDLED; 475 + 476 + if (dev->primary->master) { 477 + master_priv = dev->primary->master->driver_priv; 478 + if (master_priv->sarea_priv) 479 + master_priv->sarea_priv->last_dispatch = 480 + READ_BREADCRUMB(dev_priv); 481 + } 482 + 483 + if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY)) 484 + notify_ring(dev, &dev_priv->ring[RCS]); 485 + if (gt_iir & GT_GEN6_BSD_USER_INTERRUPT) 486 + notify_ring(dev, &dev_priv->ring[VCS]); 487 + if (gt_iir & GT_BLT_USER_INTERRUPT) 488 + notify_ring(dev, &dev_priv->ring[BCS]); 489 + 490 + if (de_iir & DE_GSE_IVB) 491 + intel_opregion_gse_intr(dev); 492 + 493 + if (de_iir & DE_PLANEA_FLIP_DONE_IVB) { 494 + intel_prepare_page_flip(dev, 0); 495 + intel_finish_page_flip_plane(dev, 0); 496 + } 497 + 498 + if (de_iir & DE_PLANEB_FLIP_DONE_IVB) { 499 + intel_prepare_page_flip(dev, 1); 500 + intel_finish_page_flip_plane(dev, 1); 501 + } 502 + 503 + if (de_iir & DE_PIPEA_VBLANK_IVB) 504 + drm_handle_vblank(dev, 0); 505 + 506 + if (de_iir & DE_PIPEB_VBLANK_IVB); 507 + drm_handle_vblank(dev, 1); 508 + 509 + /* check event from PCH */ 510 + if (de_iir & DE_PCH_EVENT_IVB) { 511 + if (pch_iir & SDE_HOTPLUG_MASK_CPT) 512 + queue_work(dev_priv->wq, &dev_priv->hotplug_work); 513 + pch_irq_handler(dev); 514 + } 515 + 516 + if (pm_iir & GEN6_PM_DEFERRED_EVENTS) { 517 + unsigned long flags; 518 + spin_lock_irqsave(&dev_priv->rps_lock, flags); 519 + WARN(dev_priv->pm_iir & pm_iir, "Missed a PM interrupt\n"); 520 + I915_WRITE(GEN6_PMIMR, pm_iir); 521 + dev_priv->pm_iir |= pm_iir; 522 + spin_unlock_irqrestore(&dev_priv->rps_lock, flags); 523 + queue_work(dev_priv->wq, &dev_priv->rps_work); 524 + } 525 + 526 + /* should clear PCH hotplug event before clear CPU irq */ 527 + I915_WRITE(SDEIIR, pch_iir); 528 + I915_WRITE(GTIIR, gt_iir); 529 + I915_WRITE(DEIIR, de_iir); 530 + I915_WRITE(GEN6_PMIIR, pm_iir); 531 + 532 + done: 533 + I915_WRITE(DEIER, de_ier); 534 + POSTING_READ(DEIER); 535 + 536 + return ret; 537 + } 538 + 539 + irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS) 540 + { 541 + struct drm_device *dev = (struct drm_device *) arg; 467 542 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 468 543 int ret = IRQ_NONE; 469 544 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir; 470 545 u32 hotplug_mask; 471 546 struct drm_i915_master_private *master_priv; 472 547 u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT; 548 + 549 + atomic_inc(&dev_priv->irq_received); 473 550 474 551 if (IS_GEN6(dev)) 475 552 bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT; ··· 631 526 i915_handle_rps_change(dev); 632 527 } 633 528 634 - if (IS_GEN6(dev)) 635 - gen6_pm_irq_handler(dev); 529 + if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS) { 530 + /* 531 + * IIR bits should never already be set because IMR should 532 + * prevent an interrupt from being shown in IIR. The warning 533 + * displays a case where we've unsafely cleared 534 + * dev_priv->pm_iir. Although missing an interrupt of the same 535 + * type is not a problem, it displays a problem in the logic. 536 + * 537 + * The mask bit in IMR is cleared by rps_work. 538 + */ 539 + unsigned long flags; 540 + spin_lock_irqsave(&dev_priv->rps_lock, flags); 541 + WARN(dev_priv->pm_iir & pm_iir, "Missed a PM interrupt\n"); 542 + I915_WRITE(GEN6_PMIMR, pm_iir); 543 + dev_priv->pm_iir |= pm_iir; 544 + spin_unlock_irqrestore(&dev_priv->rps_lock, flags); 545 + queue_work(dev_priv->wq, &dev_priv->rps_work); 546 + } 636 547 637 548 /* should clear PCH hotplug event before clear CPU irq */ 638 549 I915_WRITE(SDEIIR, pch_iir); 639 550 I915_WRITE(GTIIR, gt_iir); 640 551 I915_WRITE(DEIIR, de_iir); 552 + I915_WRITE(GEN6_PMIIR, pm_iir); 641 553 642 554 done: 643 555 I915_WRITE(DEIER, de_ier); ··· 798 676 err->dirty = obj->dirty; 799 677 err->purgeable = obj->madv != I915_MADV_WILLNEED; 800 678 err->ring = obj->ring ? obj->ring->id : 0; 801 - err->agp_type = obj->agp_type == AGP_USER_CACHED_MEMORY; 679 + err->cache_level = obj->cache_level; 802 680 803 681 if (++i == count) 804 682 break; ··· 1225 1103 1226 1104 atomic_inc(&dev_priv->irq_received); 1227 1105 1228 - if (HAS_PCH_SPLIT(dev)) 1229 - return ironlake_irq_handler(dev); 1230 - 1231 1106 iir = I915_READ(IIR); 1232 1107 1233 1108 if (INTEL_INFO(dev)->gen >= 4) ··· 1463 1344 return -EINVAL; 1464 1345 1465 1346 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1466 - if (HAS_PCH_SPLIT(dev)) 1467 - ironlake_enable_display_irq(dev_priv, (pipe == 0) ? 1468 - DE_PIPEA_VBLANK: DE_PIPEB_VBLANK); 1469 - else if (INTEL_INFO(dev)->gen >= 4) 1347 + if (INTEL_INFO(dev)->gen >= 4) 1470 1348 i915_enable_pipestat(dev_priv, pipe, 1471 1349 PIPE_START_VBLANK_INTERRUPT_ENABLE); 1472 1350 else ··· 1473 1357 /* maintain vblank delivery even in deep C-states */ 1474 1358 if (dev_priv->info->gen == 3) 1475 1359 I915_WRITE(INSTPM, INSTPM_AGPBUSY_DIS << 16); 1360 + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1361 + 1362 + return 0; 1363 + } 1364 + 1365 + int ironlake_enable_vblank(struct drm_device *dev, int pipe) 1366 + { 1367 + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1368 + unsigned long irqflags; 1369 + 1370 + if (!i915_pipe_enabled(dev, pipe)) 1371 + return -EINVAL; 1372 + 1373 + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1374 + ironlake_enable_display_irq(dev_priv, (pipe == 0) ? 1375 + DE_PIPEA_VBLANK: DE_PIPEB_VBLANK); 1376 + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1377 + 1378 + return 0; 1379 + } 1380 + 1381 + int ivybridge_enable_vblank(struct drm_device *dev, int pipe) 1382 + { 1383 + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1384 + unsigned long irqflags; 1385 + 1386 + if (!i915_pipe_enabled(dev, pipe)) 1387 + return -EINVAL; 1388 + 1389 + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1390 + ironlake_enable_display_irq(dev_priv, (pipe == 0) ? 1391 + DE_PIPEA_VBLANK_IVB : DE_PIPEB_VBLANK_IVB); 1476 1392 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1477 1393 1478 1394 return 0; ··· 1523 1375 I915_WRITE(INSTPM, 1524 1376 INSTPM_AGPBUSY_DIS << 16 | INSTPM_AGPBUSY_DIS); 1525 1377 1526 - if (HAS_PCH_SPLIT(dev)) 1527 - ironlake_disable_display_irq(dev_priv, (pipe == 0) ? 1528 - DE_PIPEA_VBLANK: DE_PIPEB_VBLANK); 1529 - else 1530 - i915_disable_pipestat(dev_priv, pipe, 1531 - PIPE_VBLANK_INTERRUPT_ENABLE | 1532 - PIPE_START_VBLANK_INTERRUPT_ENABLE); 1378 + i915_disable_pipestat(dev_priv, pipe, 1379 + PIPE_VBLANK_INTERRUPT_ENABLE | 1380 + PIPE_START_VBLANK_INTERRUPT_ENABLE); 1381 + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1382 + } 1383 + 1384 + void ironlake_disable_vblank(struct drm_device *dev, int pipe) 1385 + { 1386 + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1387 + unsigned long irqflags; 1388 + 1389 + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1390 + ironlake_disable_display_irq(dev_priv, (pipe == 0) ? 1391 + DE_PIPEA_VBLANK: DE_PIPEB_VBLANK); 1392 + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1393 + } 1394 + 1395 + void ivybridge_disable_vblank(struct drm_device *dev, int pipe) 1396 + { 1397 + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1398 + unsigned long irqflags; 1399 + 1400 + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1401 + ironlake_disable_display_irq(dev_priv, (pipe == 0) ? 1402 + DE_PIPEA_VBLANK_IVB : DE_PIPEB_VBLANK_IVB); 1533 1403 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1534 1404 } 1535 1405 ··· 1728 1562 1729 1563 /* drm_dma.h hooks 1730 1564 */ 1731 - static void ironlake_irq_preinstall(struct drm_device *dev) 1565 + void ironlake_irq_preinstall(struct drm_device *dev) 1732 1566 { 1733 1567 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1568 + 1569 + atomic_set(&dev_priv->irq_received, 0); 1570 + 1571 + INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); 1572 + INIT_WORK(&dev_priv->error_work, i915_error_work_func); 1573 + if (IS_GEN6(dev) || IS_IVYBRIDGE(dev)) 1574 + INIT_WORK(&dev_priv->rps_work, gen6_pm_rps_work); 1734 1575 1735 1576 I915_WRITE(HWSTAM, 0xeffe); 1736 1577 ··· 1758 1585 POSTING_READ(SDEIER); 1759 1586 } 1760 1587 1761 - static int ironlake_irq_postinstall(struct drm_device *dev) 1588 + int ironlake_irq_postinstall(struct drm_device *dev) 1762 1589 { 1763 1590 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1764 1591 /* enable kind of interrupts always enabled */ ··· 1767 1594 u32 render_irqs; 1768 1595 u32 hotplug_mask; 1769 1596 1597 + DRM_INIT_WAITQUEUE(&dev_priv->ring[RCS].irq_queue); 1598 + if (HAS_BSD(dev)) 1599 + DRM_INIT_WAITQUEUE(&dev_priv->ring[VCS].irq_queue); 1600 + if (HAS_BLT(dev)) 1601 + DRM_INIT_WAITQUEUE(&dev_priv->ring[BCS].irq_queue); 1602 + 1603 + dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; 1770 1604 dev_priv->irq_mask = ~display_mask; 1771 1605 1772 1606 /* should always can generate irq */ ··· 1830 1650 return 0; 1831 1651 } 1832 1652 1653 + int ivybridge_irq_postinstall(struct drm_device *dev) 1654 + { 1655 + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1656 + /* enable kind of interrupts always enabled */ 1657 + u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | 1658 + DE_PCH_EVENT_IVB | DE_PLANEA_FLIP_DONE_IVB | 1659 + DE_PLANEB_FLIP_DONE_IVB; 1660 + u32 render_irqs; 1661 + u32 hotplug_mask; 1662 + 1663 + DRM_INIT_WAITQUEUE(&dev_priv->ring[RCS].irq_queue); 1664 + if (HAS_BSD(dev)) 1665 + DRM_INIT_WAITQUEUE(&dev_priv->ring[VCS].irq_queue); 1666 + if (HAS_BLT(dev)) 1667 + DRM_INIT_WAITQUEUE(&dev_priv->ring[BCS].irq_queue); 1668 + 1669 + dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; 1670 + dev_priv->irq_mask = ~display_mask; 1671 + 1672 + /* should always can generate irq */ 1673 + I915_WRITE(DEIIR, I915_READ(DEIIR)); 1674 + I915_WRITE(DEIMR, dev_priv->irq_mask); 1675 + I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK_IVB | 1676 + DE_PIPEB_VBLANK_IVB); 1677 + POSTING_READ(DEIER); 1678 + 1679 + dev_priv->gt_irq_mask = ~0; 1680 + 1681 + I915_WRITE(GTIIR, I915_READ(GTIIR)); 1682 + I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 1683 + 1684 + render_irqs = GT_USER_INTERRUPT | GT_GEN6_BSD_USER_INTERRUPT | 1685 + GT_BLT_USER_INTERRUPT; 1686 + I915_WRITE(GTIER, render_irqs); 1687 + POSTING_READ(GTIER); 1688 + 1689 + hotplug_mask = (SDE_CRT_HOTPLUG_CPT | 1690 + SDE_PORTB_HOTPLUG_CPT | 1691 + SDE_PORTC_HOTPLUG_CPT | 1692 + SDE_PORTD_HOTPLUG_CPT); 1693 + dev_priv->pch_irq_mask = ~hotplug_mask; 1694 + 1695 + I915_WRITE(SDEIIR, I915_READ(SDEIIR)); 1696 + I915_WRITE(SDEIMR, dev_priv->pch_irq_mask); 1697 + I915_WRITE(SDEIER, hotplug_mask); 1698 + POSTING_READ(SDEIER); 1699 + 1700 + return 0; 1701 + } 1702 + 1833 1703 void i915_driver_irq_preinstall(struct drm_device * dev) 1834 1704 { 1835 1705 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; ··· 1889 1659 1890 1660 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); 1891 1661 INIT_WORK(&dev_priv->error_work, i915_error_work_func); 1892 - 1893 - if (HAS_PCH_SPLIT(dev)) { 1894 - ironlake_irq_preinstall(dev); 1895 - return; 1896 - } 1897 1662 1898 1663 if (I915_HAS_HOTPLUG(dev)) { 1899 1664 I915_WRITE(PORT_HOTPLUG_EN, 0); ··· 1913 1688 u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR; 1914 1689 u32 error_mask; 1915 1690 1916 - DRM_INIT_WAITQUEUE(&dev_priv->ring[RCS].irq_queue); 1917 - if (HAS_BSD(dev)) 1918 - DRM_INIT_WAITQUEUE(&dev_priv->ring[VCS].irq_queue); 1919 - if (HAS_BLT(dev)) 1920 - DRM_INIT_WAITQUEUE(&dev_priv->ring[BCS].irq_queue); 1921 - 1922 1691 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; 1923 - 1924 - if (HAS_PCH_SPLIT(dev)) 1925 - return ironlake_irq_postinstall(dev); 1926 1692 1927 1693 /* Unmask the interrupts that we always want on. */ 1928 1694 dev_priv->irq_mask = ~I915_INTERRUPT_ENABLE_FIX; ··· 1983 1767 return 0; 1984 1768 } 1985 1769 1986 - static void ironlake_irq_uninstall(struct drm_device *dev) 1770 + void ironlake_irq_uninstall(struct drm_device *dev) 1987 1771 { 1988 1772 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1773 + 1774 + if (!dev_priv) 1775 + return; 1776 + 1777 + dev_priv->vblank_pipe = 0; 1778 + 1989 1779 I915_WRITE(HWSTAM, 0xffffffff); 1990 1780 1991 1781 I915_WRITE(DEIMR, 0xffffffff); ··· 2012 1790 return; 2013 1791 2014 1792 dev_priv->vblank_pipe = 0; 2015 - 2016 - if (HAS_PCH_SPLIT(dev)) { 2017 - ironlake_irq_uninstall(dev); 2018 - return; 2019 - } 2020 1793 2021 1794 if (I915_HAS_HOTPLUG(dev)) { 2022 1795 I915_WRITE(PORT_HOTPLUG_EN, 0);
+34 -1
drivers/gpu/drm/i915/i915_reg.h
··· 291 291 #define RING_MAX_IDLE(base) ((base)+0x54) 292 292 #define RING_HWS_PGA(base) ((base)+0x80) 293 293 #define RING_HWS_PGA_GEN6(base) ((base)+0x2080) 294 + #define RENDER_HWS_PGA_GEN7 (0x04080) 295 + #define BSD_HWS_PGA_GEN7 (0x04180) 296 + #define BLT_HWS_PGA_GEN7 (0x04280) 294 297 #define RING_ACTHD(base) ((base)+0x74) 295 298 #define RING_NOPID(base) ((base)+0x94) 296 299 #define RING_IMR(base) ((base)+0xa8) ··· 2781 2778 #define DE_PIPEA_VSYNC (1 << 3) 2782 2779 #define DE_PIPEA_FIFO_UNDERRUN (1 << 0) 2783 2780 2781 + /* More Ivybridge lolz */ 2782 + #define DE_ERR_DEBUG_IVB (1<<30) 2783 + #define DE_GSE_IVB (1<<29) 2784 + #define DE_PCH_EVENT_IVB (1<<28) 2785 + #define DE_DP_A_HOTPLUG_IVB (1<<27) 2786 + #define DE_AUX_CHANNEL_A_IVB (1<<26) 2787 + #define DE_SPRITEB_FLIP_DONE_IVB (1<<9) 2788 + #define DE_SPRITEA_FLIP_DONE_IVB (1<<4) 2789 + #define DE_PLANEB_FLIP_DONE_IVB (1<<8) 2790 + #define DE_PLANEA_FLIP_DONE_IVB (1<<3) 2791 + #define DE_PIPEB_VBLANK_IVB (1<<5) 2792 + #define DE_PIPEA_VBLANK_IVB (1<<0) 2793 + 2784 2794 #define DEISR 0x44000 2785 2795 #define DEIMR 0x44004 2786 2796 #define DEIIR 0x44008 ··· 2825 2809 #define ILK_eDP_A_DISABLE (1<<24) 2826 2810 #define ILK_DESKTOP (1<<23) 2827 2811 #define ILK_DSPCLK_GATE 0x42020 2812 + #define IVB_VRHUNIT_CLK_GATE (1<<28) 2828 2813 #define ILK_DPARB_CLK_GATE (1<<5) 2829 2814 #define ILK_DPFD_CLK_GATE (1<<7) 2830 2815 ··· 3074 3057 #define TRANS_6BPC (2<<5) 3075 3058 #define TRANS_12BPC (3<<5) 3076 3059 3060 + #define SOUTH_CHICKEN2 0xc2004 3061 + #define DPLS_EDP_PPS_FIX_DIS (1<<0) 3062 + 3077 3063 #define _FDI_RXA_CHICKEN 0xc200c 3078 3064 #define _FDI_RXB_CHICKEN 0xc2010 3079 3065 #define FDI_RX_PHASE_SYNC_POINTER_OVR (1<<1) ··· 3124 3104 #define FDI_TX_ENHANCE_FRAME_ENABLE (1<<18) 3125 3105 /* Ironlake: hardwired to 1 */ 3126 3106 #define FDI_TX_PLL_ENABLE (1<<14) 3107 + 3108 + /* Ivybridge has different bits for lolz */ 3109 + #define FDI_LINK_TRAIN_PATTERN_1_IVB (0<<8) 3110 + #define FDI_LINK_TRAIN_PATTERN_2_IVB (1<<8) 3111 + #define FDI_LINK_TRAIN_PATTERN_IDLE_IVB (2<<8) 3112 + #define FDI_LINK_TRAIN_NONE_IVB (3<<8) 3113 + 3127 3114 /* both Tx and Rx */ 3115 + #define FDI_LINK_TRAIN_AUTO (1<<10) 3128 3116 #define FDI_SCRAMBLING_ENABLE (0<<7) 3129 3117 #define FDI_SCRAMBLING_DISABLE (1<<7) 3130 3118 ··· 3142 3114 #define FDI_RX_CTL(pipe) _PIPE(pipe, _FDI_RXA_CTL, _FDI_RXB_CTL) 3143 3115 #define FDI_RX_ENABLE (1<<31) 3144 3116 /* train, dp width same as FDI_TX */ 3117 + #define FDI_FS_ERRC_ENABLE (1<<27) 3118 + #define FDI_FE_ERRC_ENABLE (1<<26) 3145 3119 #define FDI_DP_PORT_WIDTH_X8 (7<<19) 3146 3120 #define FDI_8BPC (0<<16) 3147 3121 #define FDI_10BPC (1<<16) ··· 3416 3386 #define GEN6_PMINTRMSK 0xA168 3417 3387 3418 3388 #define GEN6_PMISR 0x44020 3419 - #define GEN6_PMIMR 0x44024 3389 + #define GEN6_PMIMR 0x44024 /* rps_lock */ 3420 3390 #define GEN6_PMIIR 0x44028 3421 3391 #define GEN6_PMIER 0x4402C 3422 3392 #define GEN6_PM_MBOX_EVENT (1<<25) ··· 3426 3396 #define GEN6_PM_RP_DOWN_THRESHOLD (1<<4) 3427 3397 #define GEN6_PM_RP_UP_EI_EXPIRED (1<<2) 3428 3398 #define GEN6_PM_RP_DOWN_EI_EXPIRED (1<<1) 3399 + #define GEN6_PM_DEFERRED_EVENTS (GEN6_PM_RP_UP_THRESHOLD | \ 3400 + GEN6_PM_RP_DOWN_THRESHOLD | \ 3401 + GEN6_PM_RP_DOWN_TIMEOUT) 3429 3402 3430 3403 #define GEN6_PCODE_MAILBOX 0x138124 3431 3404 #define GEN6_PCODE_READY (1<<31)
+1 -2
drivers/gpu/drm/i915/i915_suspend.c
··· 863 863 I915_WRITE(IMR, dev_priv->saveIMR); 864 864 } 865 865 866 - /* Clock gating state */ 867 - intel_enable_clock_gating(dev); 866 + intel_init_clock_gating(dev); 868 867 869 868 if (IS_IRONLAKE_M(dev)) { 870 869 ironlake_enable_drps(dev);
+3 -3
drivers/gpu/drm/i915/intel_bios.c
··· 214 214 i915_lvds_downclock) { 215 215 dev_priv->lvds_downclock_avail = 1; 216 216 dev_priv->lvds_downclock = temp_downclock; 217 - DRM_DEBUG_KMS("LVDS downclock is found in VBT. ", 218 - "Normal Clock %dKHz, downclock %dKHz\n", 219 - temp_downclock, panel_fixed_mode->clock); 217 + DRM_DEBUG_KMS("LVDS downclock is found in VBT. " 218 + "Normal Clock %dKHz, downclock %dKHz\n", 219 + temp_downclock, panel_fixed_mode->clock); 220 220 } 221 221 return; 222 222 }
+11 -13
drivers/gpu/drm/i915/intel_crt.c
··· 305 305 } 306 306 307 307 static enum drm_connector_status 308 - intel_crt_load_detect(struct drm_crtc *crtc, struct intel_crt *crt) 308 + intel_crt_load_detect(struct intel_crt *crt) 309 309 { 310 - struct drm_encoder *encoder = &crt->base.base; 311 - struct drm_device *dev = encoder->dev; 310 + struct drm_device *dev = crt->base.base.dev; 312 311 struct drm_i915_private *dev_priv = dev->dev_private; 313 - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 314 - uint32_t pipe = intel_crtc->pipe; 312 + uint32_t pipe = to_intel_crtc(crt->base.base.crtc)->pipe; 315 313 uint32_t save_bclrpat; 316 314 uint32_t save_vtotal; 317 315 uint32_t vtotal, vactive; ··· 430 432 struct drm_device *dev = connector->dev; 431 433 struct intel_crt *crt = intel_attached_crt(connector); 432 434 struct drm_crtc *crtc; 433 - int dpms_mode; 434 435 enum drm_connector_status status; 435 436 436 437 if (I915_HAS_HOTPLUG(dev)) { ··· 451 454 /* for pre-945g platforms use load detect */ 452 455 crtc = crt->base.base.crtc; 453 456 if (crtc && crtc->enabled) { 454 - status = intel_crt_load_detect(crtc, crt); 457 + status = intel_crt_load_detect(crt); 455 458 } else { 456 - crtc = intel_get_load_detect_pipe(&crt->base, connector, 457 - NULL, &dpms_mode); 458 - if (crtc) { 459 + struct intel_load_detect_pipe tmp; 460 + 461 + if (intel_get_load_detect_pipe(&crt->base, connector, NULL, 462 + &tmp)) { 459 463 if (intel_crt_detect_ddc(connector)) 460 464 status = connector_status_connected; 461 465 else 462 - status = intel_crt_load_detect(crtc, crt); 463 - intel_release_load_detect_pipe(&crt->base, 464 - connector, dpms_mode); 466 + status = intel_crt_load_detect(crt); 467 + intel_release_load_detect_pipe(&crt->base, connector, 468 + &tmp); 465 469 } else 466 470 status = connector_status_unknown; 467 471 }
+1399 -1014
drivers/gpu/drm/i915/intel_display.c
··· 76 76 int, int, intel_clock_t *); 77 77 }; 78 78 79 - #define I8XX_DOT_MIN 25000 80 - #define I8XX_DOT_MAX 350000 81 - #define I8XX_VCO_MIN 930000 82 - #define I8XX_VCO_MAX 1400000 83 - #define I8XX_N_MIN 3 84 - #define I8XX_N_MAX 16 85 - #define I8XX_M_MIN 96 86 - #define I8XX_M_MAX 140 87 - #define I8XX_M1_MIN 18 88 - #define I8XX_M1_MAX 26 89 - #define I8XX_M2_MIN 6 90 - #define I8XX_M2_MAX 16 91 - #define I8XX_P_MIN 4 92 - #define I8XX_P_MAX 128 93 - #define I8XX_P1_MIN 2 94 - #define I8XX_P1_MAX 33 95 - #define I8XX_P1_LVDS_MIN 1 96 - #define I8XX_P1_LVDS_MAX 6 97 - #define I8XX_P2_SLOW 4 98 - #define I8XX_P2_FAST 2 99 - #define I8XX_P2_LVDS_SLOW 14 100 - #define I8XX_P2_LVDS_FAST 7 101 - #define I8XX_P2_SLOW_LIMIT 165000 102 - 103 - #define I9XX_DOT_MIN 20000 104 - #define I9XX_DOT_MAX 400000 105 - #define I9XX_VCO_MIN 1400000 106 - #define I9XX_VCO_MAX 2800000 107 - #define PINEVIEW_VCO_MIN 1700000 108 - #define PINEVIEW_VCO_MAX 3500000 109 - #define I9XX_N_MIN 1 110 - #define I9XX_N_MAX 6 111 - /* Pineview's Ncounter is a ring counter */ 112 - #define PINEVIEW_N_MIN 3 113 - #define PINEVIEW_N_MAX 6 114 - #define I9XX_M_MIN 70 115 - #define I9XX_M_MAX 120 116 - #define PINEVIEW_M_MIN 2 117 - #define PINEVIEW_M_MAX 256 118 - #define I9XX_M1_MIN 10 119 - #define I9XX_M1_MAX 22 120 - #define I9XX_M2_MIN 5 121 - #define I9XX_M2_MAX 9 122 - /* Pineview M1 is reserved, and must be 0 */ 123 - #define PINEVIEW_M1_MIN 0 124 - #define PINEVIEW_M1_MAX 0 125 - #define PINEVIEW_M2_MIN 0 126 - #define PINEVIEW_M2_MAX 254 127 - #define I9XX_P_SDVO_DAC_MIN 5 128 - #define I9XX_P_SDVO_DAC_MAX 80 129 - #define I9XX_P_LVDS_MIN 7 130 - #define I9XX_P_LVDS_MAX 98 131 - #define PINEVIEW_P_LVDS_MIN 7 132 - #define PINEVIEW_P_LVDS_MAX 112 133 - #define I9XX_P1_MIN 1 134 - #define I9XX_P1_MAX 8 135 - #define I9XX_P2_SDVO_DAC_SLOW 10 136 - #define I9XX_P2_SDVO_DAC_FAST 5 137 - #define I9XX_P2_SDVO_DAC_SLOW_LIMIT 200000 138 - #define I9XX_P2_LVDS_SLOW 14 139 - #define I9XX_P2_LVDS_FAST 7 140 - #define I9XX_P2_LVDS_SLOW_LIMIT 112000 141 - 142 - /*The parameter is for SDVO on G4x platform*/ 143 - #define G4X_DOT_SDVO_MIN 25000 144 - #define G4X_DOT_SDVO_MAX 270000 145 - #define G4X_VCO_MIN 1750000 146 - #define G4X_VCO_MAX 3500000 147 - #define G4X_N_SDVO_MIN 1 148 - #define G4X_N_SDVO_MAX 4 149 - #define G4X_M_SDVO_MIN 104 150 - #define G4X_M_SDVO_MAX 138 151 - #define G4X_M1_SDVO_MIN 17 152 - #define G4X_M1_SDVO_MAX 23 153 - #define G4X_M2_SDVO_MIN 5 154 - #define G4X_M2_SDVO_MAX 11 155 - #define G4X_P_SDVO_MIN 10 156 - #define G4X_P_SDVO_MAX 30 157 - #define G4X_P1_SDVO_MIN 1 158 - #define G4X_P1_SDVO_MAX 3 159 - #define G4X_P2_SDVO_SLOW 10 160 - #define G4X_P2_SDVO_FAST 10 161 - #define G4X_P2_SDVO_LIMIT 270000 162 - 163 - /*The parameter is for HDMI_DAC on G4x platform*/ 164 - #define G4X_DOT_HDMI_DAC_MIN 22000 165 - #define G4X_DOT_HDMI_DAC_MAX 400000 166 - #define G4X_N_HDMI_DAC_MIN 1 167 - #define G4X_N_HDMI_DAC_MAX 4 168 - #define G4X_M_HDMI_DAC_MIN 104 169 - #define G4X_M_HDMI_DAC_MAX 138 170 - #define G4X_M1_HDMI_DAC_MIN 16 171 - #define G4X_M1_HDMI_DAC_MAX 23 172 - #define G4X_M2_HDMI_DAC_MIN 5 173 - #define G4X_M2_HDMI_DAC_MAX 11 174 - #define G4X_P_HDMI_DAC_MIN 5 175 - #define G4X_P_HDMI_DAC_MAX 80 176 - #define G4X_P1_HDMI_DAC_MIN 1 177 - #define G4X_P1_HDMI_DAC_MAX 8 178 - #define G4X_P2_HDMI_DAC_SLOW 10 179 - #define G4X_P2_HDMI_DAC_FAST 5 180 - #define G4X_P2_HDMI_DAC_LIMIT 165000 181 - 182 - /*The parameter is for SINGLE_CHANNEL_LVDS on G4x platform*/ 183 - #define G4X_DOT_SINGLE_CHANNEL_LVDS_MIN 20000 184 - #define G4X_DOT_SINGLE_CHANNEL_LVDS_MAX 115000 185 - #define G4X_N_SINGLE_CHANNEL_LVDS_MIN 1 186 - #define G4X_N_SINGLE_CHANNEL_LVDS_MAX 3 187 - #define G4X_M_SINGLE_CHANNEL_LVDS_MIN 104 188 - #define G4X_M_SINGLE_CHANNEL_LVDS_MAX 138 189 - #define G4X_M1_SINGLE_CHANNEL_LVDS_MIN 17 190 - #define G4X_M1_SINGLE_CHANNEL_LVDS_MAX 23 191 - #define G4X_M2_SINGLE_CHANNEL_LVDS_MIN 5 192 - #define G4X_M2_SINGLE_CHANNEL_LVDS_MAX 11 193 - #define G4X_P_SINGLE_CHANNEL_LVDS_MIN 28 194 - #define G4X_P_SINGLE_CHANNEL_LVDS_MAX 112 195 - #define G4X_P1_SINGLE_CHANNEL_LVDS_MIN 2 196 - #define G4X_P1_SINGLE_CHANNEL_LVDS_MAX 8 197 - #define G4X_P2_SINGLE_CHANNEL_LVDS_SLOW 14 198 - #define G4X_P2_SINGLE_CHANNEL_LVDS_FAST 14 199 - #define G4X_P2_SINGLE_CHANNEL_LVDS_LIMIT 0 200 - 201 - /*The parameter is for DUAL_CHANNEL_LVDS on G4x platform*/ 202 - #define G4X_DOT_DUAL_CHANNEL_LVDS_MIN 80000 203 - #define G4X_DOT_DUAL_CHANNEL_LVDS_MAX 224000 204 - #define G4X_N_DUAL_CHANNEL_LVDS_MIN 1 205 - #define G4X_N_DUAL_CHANNEL_LVDS_MAX 3 206 - #define G4X_M_DUAL_CHANNEL_LVDS_MIN 104 207 - #define G4X_M_DUAL_CHANNEL_LVDS_MAX 138 208 - #define G4X_M1_DUAL_CHANNEL_LVDS_MIN 17 209 - #define G4X_M1_DUAL_CHANNEL_LVDS_MAX 23 210 - #define G4X_M2_DUAL_CHANNEL_LVDS_MIN 5 211 - #define G4X_M2_DUAL_CHANNEL_LVDS_MAX 11 212 - #define G4X_P_DUAL_CHANNEL_LVDS_MIN 14 213 - #define G4X_P_DUAL_CHANNEL_LVDS_MAX 42 214 - #define G4X_P1_DUAL_CHANNEL_LVDS_MIN 2 215 - #define G4X_P1_DUAL_CHANNEL_LVDS_MAX 6 216 - #define G4X_P2_DUAL_CHANNEL_LVDS_SLOW 7 217 - #define G4X_P2_DUAL_CHANNEL_LVDS_FAST 7 218 - #define G4X_P2_DUAL_CHANNEL_LVDS_LIMIT 0 219 - 220 - /*The parameter is for DISPLAY PORT on G4x platform*/ 221 - #define G4X_DOT_DISPLAY_PORT_MIN 161670 222 - #define G4X_DOT_DISPLAY_PORT_MAX 227000 223 - #define G4X_N_DISPLAY_PORT_MIN 1 224 - #define G4X_N_DISPLAY_PORT_MAX 2 225 - #define G4X_M_DISPLAY_PORT_MIN 97 226 - #define G4X_M_DISPLAY_PORT_MAX 108 227 - #define G4X_M1_DISPLAY_PORT_MIN 0x10 228 - #define G4X_M1_DISPLAY_PORT_MAX 0x12 229 - #define G4X_M2_DISPLAY_PORT_MIN 0x05 230 - #define G4X_M2_DISPLAY_PORT_MAX 0x06 231 - #define G4X_P_DISPLAY_PORT_MIN 10 232 - #define G4X_P_DISPLAY_PORT_MAX 20 233 - #define G4X_P1_DISPLAY_PORT_MIN 1 234 - #define G4X_P1_DISPLAY_PORT_MAX 2 235 - #define G4X_P2_DISPLAY_PORT_SLOW 10 236 - #define G4X_P2_DISPLAY_PORT_FAST 10 237 - #define G4X_P2_DISPLAY_PORT_LIMIT 0 238 - 239 - /* Ironlake / Sandybridge */ 240 - /* as we calculate clock using (register_value + 2) for 241 - N/M1/M2, so here the range value for them is (actual_value-2). 242 - */ 243 - #define IRONLAKE_DOT_MIN 25000 244 - #define IRONLAKE_DOT_MAX 350000 245 - #define IRONLAKE_VCO_MIN 1760000 246 - #define IRONLAKE_VCO_MAX 3510000 247 - #define IRONLAKE_M1_MIN 12 248 - #define IRONLAKE_M1_MAX 22 249 - #define IRONLAKE_M2_MIN 5 250 - #define IRONLAKE_M2_MAX 9 251 - #define IRONLAKE_P2_DOT_LIMIT 225000 /* 225Mhz */ 252 - 253 - /* We have parameter ranges for different type of outputs. */ 254 - 255 - /* DAC & HDMI Refclk 120Mhz */ 256 - #define IRONLAKE_DAC_N_MIN 1 257 - #define IRONLAKE_DAC_N_MAX 5 258 - #define IRONLAKE_DAC_M_MIN 79 259 - #define IRONLAKE_DAC_M_MAX 127 260 - #define IRONLAKE_DAC_P_MIN 5 261 - #define IRONLAKE_DAC_P_MAX 80 262 - #define IRONLAKE_DAC_P1_MIN 1 263 - #define IRONLAKE_DAC_P1_MAX 8 264 - #define IRONLAKE_DAC_P2_SLOW 10 265 - #define IRONLAKE_DAC_P2_FAST 5 266 - 267 - /* LVDS single-channel 120Mhz refclk */ 268 - #define IRONLAKE_LVDS_S_N_MIN 1 269 - #define IRONLAKE_LVDS_S_N_MAX 3 270 - #define IRONLAKE_LVDS_S_M_MIN 79 271 - #define IRONLAKE_LVDS_S_M_MAX 118 272 - #define IRONLAKE_LVDS_S_P_MIN 28 273 - #define IRONLAKE_LVDS_S_P_MAX 112 274 - #define IRONLAKE_LVDS_S_P1_MIN 2 275 - #define IRONLAKE_LVDS_S_P1_MAX 8 276 - #define IRONLAKE_LVDS_S_P2_SLOW 14 277 - #define IRONLAKE_LVDS_S_P2_FAST 14 278 - 279 - /* LVDS dual-channel 120Mhz refclk */ 280 - #define IRONLAKE_LVDS_D_N_MIN 1 281 - #define IRONLAKE_LVDS_D_N_MAX 3 282 - #define IRONLAKE_LVDS_D_M_MIN 79 283 - #define IRONLAKE_LVDS_D_M_MAX 127 284 - #define IRONLAKE_LVDS_D_P_MIN 14 285 - #define IRONLAKE_LVDS_D_P_MAX 56 286 - #define IRONLAKE_LVDS_D_P1_MIN 2 287 - #define IRONLAKE_LVDS_D_P1_MAX 8 288 - #define IRONLAKE_LVDS_D_P2_SLOW 7 289 - #define IRONLAKE_LVDS_D_P2_FAST 7 290 - 291 - /* LVDS single-channel 100Mhz refclk */ 292 - #define IRONLAKE_LVDS_S_SSC_N_MIN 1 293 - #define IRONLAKE_LVDS_S_SSC_N_MAX 2 294 - #define IRONLAKE_LVDS_S_SSC_M_MIN 79 295 - #define IRONLAKE_LVDS_S_SSC_M_MAX 126 296 - #define IRONLAKE_LVDS_S_SSC_P_MIN 28 297 - #define IRONLAKE_LVDS_S_SSC_P_MAX 112 298 - #define IRONLAKE_LVDS_S_SSC_P1_MIN 2 299 - #define IRONLAKE_LVDS_S_SSC_P1_MAX 8 300 - #define IRONLAKE_LVDS_S_SSC_P2_SLOW 14 301 - #define IRONLAKE_LVDS_S_SSC_P2_FAST 14 302 - 303 - /* LVDS dual-channel 100Mhz refclk */ 304 - #define IRONLAKE_LVDS_D_SSC_N_MIN 1 305 - #define IRONLAKE_LVDS_D_SSC_N_MAX 3 306 - #define IRONLAKE_LVDS_D_SSC_M_MIN 79 307 - #define IRONLAKE_LVDS_D_SSC_M_MAX 126 308 - #define IRONLAKE_LVDS_D_SSC_P_MIN 14 309 - #define IRONLAKE_LVDS_D_SSC_P_MAX 42 310 - #define IRONLAKE_LVDS_D_SSC_P1_MIN 2 311 - #define IRONLAKE_LVDS_D_SSC_P1_MAX 6 312 - #define IRONLAKE_LVDS_D_SSC_P2_SLOW 7 313 - #define IRONLAKE_LVDS_D_SSC_P2_FAST 7 314 - 315 - /* DisplayPort */ 316 - #define IRONLAKE_DP_N_MIN 1 317 - #define IRONLAKE_DP_N_MAX 2 318 - #define IRONLAKE_DP_M_MIN 81 319 - #define IRONLAKE_DP_M_MAX 90 320 - #define IRONLAKE_DP_P_MIN 10 321 - #define IRONLAKE_DP_P_MAX 20 322 - #define IRONLAKE_DP_P2_FAST 10 323 - #define IRONLAKE_DP_P2_SLOW 10 324 - #define IRONLAKE_DP_P2_LIMIT 0 325 - #define IRONLAKE_DP_P1_MIN 1 326 - #define IRONLAKE_DP_P1_MAX 2 327 - 328 79 /* FDI */ 329 80 #define IRONLAKE_FDI_FREQ 2700000 /* in kHz for mode->clock */ 330 81 ··· 104 353 } 105 354 106 355 static const intel_limit_t intel_limits_i8xx_dvo = { 107 - .dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX }, 108 - .vco = { .min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX }, 109 - .n = { .min = I8XX_N_MIN, .max = I8XX_N_MAX }, 110 - .m = { .min = I8XX_M_MIN, .max = I8XX_M_MAX }, 111 - .m1 = { .min = I8XX_M1_MIN, .max = I8XX_M1_MAX }, 112 - .m2 = { .min = I8XX_M2_MIN, .max = I8XX_M2_MAX }, 113 - .p = { .min = I8XX_P_MIN, .max = I8XX_P_MAX }, 114 - .p1 = { .min = I8XX_P1_MIN, .max = I8XX_P1_MAX }, 115 - .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT, 116 - .p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST }, 356 + .dot = { .min = 25000, .max = 350000 }, 357 + .vco = { .min = 930000, .max = 1400000 }, 358 + .n = { .min = 3, .max = 16 }, 359 + .m = { .min = 96, .max = 140 }, 360 + .m1 = { .min = 18, .max = 26 }, 361 + .m2 = { .min = 6, .max = 16 }, 362 + .p = { .min = 4, .max = 128 }, 363 + .p1 = { .min = 2, .max = 33 }, 364 + .p2 = { .dot_limit = 165000, 365 + .p2_slow = 4, .p2_fast = 2 }, 117 366 .find_pll = intel_find_best_PLL, 118 367 }; 119 368 120 369 static const intel_limit_t intel_limits_i8xx_lvds = { 121 - .dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX }, 122 - .vco = { .min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX }, 123 - .n = { .min = I8XX_N_MIN, .max = I8XX_N_MAX }, 124 - .m = { .min = I8XX_M_MIN, .max = I8XX_M_MAX }, 125 - .m1 = { .min = I8XX_M1_MIN, .max = I8XX_M1_MAX }, 126 - .m2 = { .min = I8XX_M2_MIN, .max = I8XX_M2_MAX }, 127 - .p = { .min = I8XX_P_MIN, .max = I8XX_P_MAX }, 128 - .p1 = { .min = I8XX_P1_LVDS_MIN, .max = I8XX_P1_LVDS_MAX }, 129 - .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT, 130 - .p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST }, 370 + .dot = { .min = 25000, .max = 350000 }, 371 + .vco = { .min = 930000, .max = 1400000 }, 372 + .n = { .min = 3, .max = 16 }, 373 + .m = { .min = 96, .max = 140 }, 374 + .m1 = { .min = 18, .max = 26 }, 375 + .m2 = { .min = 6, .max = 16 }, 376 + .p = { .min = 4, .max = 128 }, 377 + .p1 = { .min = 1, .max = 6 }, 378 + .p2 = { .dot_limit = 165000, 379 + .p2_slow = 14, .p2_fast = 7 }, 131 380 .find_pll = intel_find_best_PLL, 132 381 }; 133 - 382 + 134 383 static const intel_limit_t intel_limits_i9xx_sdvo = { 135 - .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX }, 136 - .vco = { .min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX }, 137 - .n = { .min = I9XX_N_MIN, .max = I9XX_N_MAX }, 138 - .m = { .min = I9XX_M_MIN, .max = I9XX_M_MAX }, 139 - .m1 = { .min = I9XX_M1_MIN, .max = I9XX_M1_MAX }, 140 - .m2 = { .min = I9XX_M2_MIN, .max = I9XX_M2_MAX }, 141 - .p = { .min = I9XX_P_SDVO_DAC_MIN, .max = I9XX_P_SDVO_DAC_MAX }, 142 - .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX }, 143 - .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT, 144 - .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST }, 384 + .dot = { .min = 20000, .max = 400000 }, 385 + .vco = { .min = 1400000, .max = 2800000 }, 386 + .n = { .min = 1, .max = 6 }, 387 + .m = { .min = 70, .max = 120 }, 388 + .m1 = { .min = 10, .max = 22 }, 389 + .m2 = { .min = 5, .max = 9 }, 390 + .p = { .min = 5, .max = 80 }, 391 + .p1 = { .min = 1, .max = 8 }, 392 + .p2 = { .dot_limit = 200000, 393 + .p2_slow = 10, .p2_fast = 5 }, 145 394 .find_pll = intel_find_best_PLL, 146 395 }; 147 396 148 397 static const intel_limit_t intel_limits_i9xx_lvds = { 149 - .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX }, 150 - .vco = { .min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX }, 151 - .n = { .min = I9XX_N_MIN, .max = I9XX_N_MAX }, 152 - .m = { .min = I9XX_M_MIN, .max = I9XX_M_MAX }, 153 - .m1 = { .min = I9XX_M1_MIN, .max = I9XX_M1_MAX }, 154 - .m2 = { .min = I9XX_M2_MIN, .max = I9XX_M2_MAX }, 155 - .p = { .min = I9XX_P_LVDS_MIN, .max = I9XX_P_LVDS_MAX }, 156 - .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX }, 157 - /* The single-channel range is 25-112Mhz, and dual-channel 158 - * is 80-224Mhz. Prefer single channel as much as possible. 159 - */ 160 - .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT, 161 - .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST }, 398 + .dot = { .min = 20000, .max = 400000 }, 399 + .vco = { .min = 1400000, .max = 2800000 }, 400 + .n = { .min = 1, .max = 6 }, 401 + .m = { .min = 70, .max = 120 }, 402 + .m1 = { .min = 10, .max = 22 }, 403 + .m2 = { .min = 5, .max = 9 }, 404 + .p = { .min = 7, .max = 98 }, 405 + .p1 = { .min = 1, .max = 8 }, 406 + .p2 = { .dot_limit = 112000, 407 + .p2_slow = 14, .p2_fast = 7 }, 162 408 .find_pll = intel_find_best_PLL, 163 409 }; 164 410 165 - /* below parameter and function is for G4X Chipset Family*/ 411 + 166 412 static const intel_limit_t intel_limits_g4x_sdvo = { 167 - .dot = { .min = G4X_DOT_SDVO_MIN, .max = G4X_DOT_SDVO_MAX }, 168 - .vco = { .min = G4X_VCO_MIN, .max = G4X_VCO_MAX}, 169 - .n = { .min = G4X_N_SDVO_MIN, .max = G4X_N_SDVO_MAX }, 170 - .m = { .min = G4X_M_SDVO_MIN, .max = G4X_M_SDVO_MAX }, 171 - .m1 = { .min = G4X_M1_SDVO_MIN, .max = G4X_M1_SDVO_MAX }, 172 - .m2 = { .min = G4X_M2_SDVO_MIN, .max = G4X_M2_SDVO_MAX }, 173 - .p = { .min = G4X_P_SDVO_MIN, .max = G4X_P_SDVO_MAX }, 174 - .p1 = { .min = G4X_P1_SDVO_MIN, .max = G4X_P1_SDVO_MAX}, 175 - .p2 = { .dot_limit = G4X_P2_SDVO_LIMIT, 176 - .p2_slow = G4X_P2_SDVO_SLOW, 177 - .p2_fast = G4X_P2_SDVO_FAST 413 + .dot = { .min = 25000, .max = 270000 }, 414 + .vco = { .min = 1750000, .max = 3500000}, 415 + .n = { .min = 1, .max = 4 }, 416 + .m = { .min = 104, .max = 138 }, 417 + .m1 = { .min = 17, .max = 23 }, 418 + .m2 = { .min = 5, .max = 11 }, 419 + .p = { .min = 10, .max = 30 }, 420 + .p1 = { .min = 1, .max = 3}, 421 + .p2 = { .dot_limit = 270000, 422 + .p2_slow = 10, 423 + .p2_fast = 10 178 424 }, 179 425 .find_pll = intel_g4x_find_best_PLL, 180 426 }; 181 427 182 428 static const intel_limit_t intel_limits_g4x_hdmi = { 183 - .dot = { .min = G4X_DOT_HDMI_DAC_MIN, .max = G4X_DOT_HDMI_DAC_MAX }, 184 - .vco = { .min = G4X_VCO_MIN, .max = G4X_VCO_MAX}, 185 - .n = { .min = G4X_N_HDMI_DAC_MIN, .max = G4X_N_HDMI_DAC_MAX }, 186 - .m = { .min = G4X_M_HDMI_DAC_MIN, .max = G4X_M_HDMI_DAC_MAX }, 187 - .m1 = { .min = G4X_M1_HDMI_DAC_MIN, .max = G4X_M1_HDMI_DAC_MAX }, 188 - .m2 = { .min = G4X_M2_HDMI_DAC_MIN, .max = G4X_M2_HDMI_DAC_MAX }, 189 - .p = { .min = G4X_P_HDMI_DAC_MIN, .max = G4X_P_HDMI_DAC_MAX }, 190 - .p1 = { .min = G4X_P1_HDMI_DAC_MIN, .max = G4X_P1_HDMI_DAC_MAX}, 191 - .p2 = { .dot_limit = G4X_P2_HDMI_DAC_LIMIT, 192 - .p2_slow = G4X_P2_HDMI_DAC_SLOW, 193 - .p2_fast = G4X_P2_HDMI_DAC_FAST 194 - }, 429 + .dot = { .min = 22000, .max = 400000 }, 430 + .vco = { .min = 1750000, .max = 3500000}, 431 + .n = { .min = 1, .max = 4 }, 432 + .m = { .min = 104, .max = 138 }, 433 + .m1 = { .min = 16, .max = 23 }, 434 + .m2 = { .min = 5, .max = 11 }, 435 + .p = { .min = 5, .max = 80 }, 436 + .p1 = { .min = 1, .max = 8}, 437 + .p2 = { .dot_limit = 165000, 438 + .p2_slow = 10, .p2_fast = 5 }, 195 439 .find_pll = intel_g4x_find_best_PLL, 196 440 }; 197 441 198 442 static const intel_limit_t intel_limits_g4x_single_channel_lvds = { 199 - .dot = { .min = G4X_DOT_SINGLE_CHANNEL_LVDS_MIN, 200 - .max = G4X_DOT_SINGLE_CHANNEL_LVDS_MAX }, 201 - .vco = { .min = G4X_VCO_MIN, 202 - .max = G4X_VCO_MAX }, 203 - .n = { .min = G4X_N_SINGLE_CHANNEL_LVDS_MIN, 204 - .max = G4X_N_SINGLE_CHANNEL_LVDS_MAX }, 205 - .m = { .min = G4X_M_SINGLE_CHANNEL_LVDS_MIN, 206 - .max = G4X_M_SINGLE_CHANNEL_LVDS_MAX }, 207 - .m1 = { .min = G4X_M1_SINGLE_CHANNEL_LVDS_MIN, 208 - .max = G4X_M1_SINGLE_CHANNEL_LVDS_MAX }, 209 - .m2 = { .min = G4X_M2_SINGLE_CHANNEL_LVDS_MIN, 210 - .max = G4X_M2_SINGLE_CHANNEL_LVDS_MAX }, 211 - .p = { .min = G4X_P_SINGLE_CHANNEL_LVDS_MIN, 212 - .max = G4X_P_SINGLE_CHANNEL_LVDS_MAX }, 213 - .p1 = { .min = G4X_P1_SINGLE_CHANNEL_LVDS_MIN, 214 - .max = G4X_P1_SINGLE_CHANNEL_LVDS_MAX }, 215 - .p2 = { .dot_limit = G4X_P2_SINGLE_CHANNEL_LVDS_LIMIT, 216 - .p2_slow = G4X_P2_SINGLE_CHANNEL_LVDS_SLOW, 217 - .p2_fast = G4X_P2_SINGLE_CHANNEL_LVDS_FAST 443 + .dot = { .min = 20000, .max = 115000 }, 444 + .vco = { .min = 1750000, .max = 3500000 }, 445 + .n = { .min = 1, .max = 3 }, 446 + .m = { .min = 104, .max = 138 }, 447 + .m1 = { .min = 17, .max = 23 }, 448 + .m2 = { .min = 5, .max = 11 }, 449 + .p = { .min = 28, .max = 112 }, 450 + .p1 = { .min = 2, .max = 8 }, 451 + .p2 = { .dot_limit = 0, 452 + .p2_slow = 14, .p2_fast = 14 218 453 }, 219 454 .find_pll = intel_g4x_find_best_PLL, 220 455 }; 221 456 222 457 static const intel_limit_t intel_limits_g4x_dual_channel_lvds = { 223 - .dot = { .min = G4X_DOT_DUAL_CHANNEL_LVDS_MIN, 224 - .max = G4X_DOT_DUAL_CHANNEL_LVDS_MAX }, 225 - .vco = { .min = G4X_VCO_MIN, 226 - .max = G4X_VCO_MAX }, 227 - .n = { .min = G4X_N_DUAL_CHANNEL_LVDS_MIN, 228 - .max = G4X_N_DUAL_CHANNEL_LVDS_MAX }, 229 - .m = { .min = G4X_M_DUAL_CHANNEL_LVDS_MIN, 230 - .max = G4X_M_DUAL_CHANNEL_LVDS_MAX }, 231 - .m1 = { .min = G4X_M1_DUAL_CHANNEL_LVDS_MIN, 232 - .max = G4X_M1_DUAL_CHANNEL_LVDS_MAX }, 233 - .m2 = { .min = G4X_M2_DUAL_CHANNEL_LVDS_MIN, 234 - .max = G4X_M2_DUAL_CHANNEL_LVDS_MAX }, 235 - .p = { .min = G4X_P_DUAL_CHANNEL_LVDS_MIN, 236 - .max = G4X_P_DUAL_CHANNEL_LVDS_MAX }, 237 - .p1 = { .min = G4X_P1_DUAL_CHANNEL_LVDS_MIN, 238 - .max = G4X_P1_DUAL_CHANNEL_LVDS_MAX }, 239 - .p2 = { .dot_limit = G4X_P2_DUAL_CHANNEL_LVDS_LIMIT, 240 - .p2_slow = G4X_P2_DUAL_CHANNEL_LVDS_SLOW, 241 - .p2_fast = G4X_P2_DUAL_CHANNEL_LVDS_FAST 458 + .dot = { .min = 80000, .max = 224000 }, 459 + .vco = { .min = 1750000, .max = 3500000 }, 460 + .n = { .min = 1, .max = 3 }, 461 + .m = { .min = 104, .max = 138 }, 462 + .m1 = { .min = 17, .max = 23 }, 463 + .m2 = { .min = 5, .max = 11 }, 464 + .p = { .min = 14, .max = 42 }, 465 + .p1 = { .min = 2, .max = 6 }, 466 + .p2 = { .dot_limit = 0, 467 + .p2_slow = 7, .p2_fast = 7 242 468 }, 243 469 .find_pll = intel_g4x_find_best_PLL, 244 470 }; 245 471 246 472 static const intel_limit_t intel_limits_g4x_display_port = { 247 - .dot = { .min = G4X_DOT_DISPLAY_PORT_MIN, 248 - .max = G4X_DOT_DISPLAY_PORT_MAX }, 249 - .vco = { .min = G4X_VCO_MIN, 250 - .max = G4X_VCO_MAX}, 251 - .n = { .min = G4X_N_DISPLAY_PORT_MIN, 252 - .max = G4X_N_DISPLAY_PORT_MAX }, 253 - .m = { .min = G4X_M_DISPLAY_PORT_MIN, 254 - .max = G4X_M_DISPLAY_PORT_MAX }, 255 - .m1 = { .min = G4X_M1_DISPLAY_PORT_MIN, 256 - .max = G4X_M1_DISPLAY_PORT_MAX }, 257 - .m2 = { .min = G4X_M2_DISPLAY_PORT_MIN, 258 - .max = G4X_M2_DISPLAY_PORT_MAX }, 259 - .p = { .min = G4X_P_DISPLAY_PORT_MIN, 260 - .max = G4X_P_DISPLAY_PORT_MAX }, 261 - .p1 = { .min = G4X_P1_DISPLAY_PORT_MIN, 262 - .max = G4X_P1_DISPLAY_PORT_MAX}, 263 - .p2 = { .dot_limit = G4X_P2_DISPLAY_PORT_LIMIT, 264 - .p2_slow = G4X_P2_DISPLAY_PORT_SLOW, 265 - .p2_fast = G4X_P2_DISPLAY_PORT_FAST }, 473 + .dot = { .min = 161670, .max = 227000 }, 474 + .vco = { .min = 1750000, .max = 3500000}, 475 + .n = { .min = 1, .max = 2 }, 476 + .m = { .min = 97, .max = 108 }, 477 + .m1 = { .min = 0x10, .max = 0x12 }, 478 + .m2 = { .min = 0x05, .max = 0x06 }, 479 + .p = { .min = 10, .max = 20 }, 480 + .p1 = { .min = 1, .max = 2}, 481 + .p2 = { .dot_limit = 0, 482 + .p2_slow = 10, .p2_fast = 10 }, 266 483 .find_pll = intel_find_pll_g4x_dp, 267 484 }; 268 485 269 486 static const intel_limit_t intel_limits_pineview_sdvo = { 270 - .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX}, 271 - .vco = { .min = PINEVIEW_VCO_MIN, .max = PINEVIEW_VCO_MAX }, 272 - .n = { .min = PINEVIEW_N_MIN, .max = PINEVIEW_N_MAX }, 273 - .m = { .min = PINEVIEW_M_MIN, .max = PINEVIEW_M_MAX }, 274 - .m1 = { .min = PINEVIEW_M1_MIN, .max = PINEVIEW_M1_MAX }, 275 - .m2 = { .min = PINEVIEW_M2_MIN, .max = PINEVIEW_M2_MAX }, 276 - .p = { .min = I9XX_P_SDVO_DAC_MIN, .max = I9XX_P_SDVO_DAC_MAX }, 277 - .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX }, 278 - .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT, 279 - .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST }, 487 + .dot = { .min = 20000, .max = 400000}, 488 + .vco = { .min = 1700000, .max = 3500000 }, 489 + /* Pineview's Ncounter is a ring counter */ 490 + .n = { .min = 3, .max = 6 }, 491 + .m = { .min = 2, .max = 256 }, 492 + /* Pineview only has one combined m divider, which we treat as m2. */ 493 + .m1 = { .min = 0, .max = 0 }, 494 + .m2 = { .min = 0, .max = 254 }, 495 + .p = { .min = 5, .max = 80 }, 496 + .p1 = { .min = 1, .max = 8 }, 497 + .p2 = { .dot_limit = 200000, 498 + .p2_slow = 10, .p2_fast = 5 }, 280 499 .find_pll = intel_find_best_PLL, 281 500 }; 282 501 283 502 static const intel_limit_t intel_limits_pineview_lvds = { 284 - .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX }, 285 - .vco = { .min = PINEVIEW_VCO_MIN, .max = PINEVIEW_VCO_MAX }, 286 - .n = { .min = PINEVIEW_N_MIN, .max = PINEVIEW_N_MAX }, 287 - .m = { .min = PINEVIEW_M_MIN, .max = PINEVIEW_M_MAX }, 288 - .m1 = { .min = PINEVIEW_M1_MIN, .max = PINEVIEW_M1_MAX }, 289 - .m2 = { .min = PINEVIEW_M2_MIN, .max = PINEVIEW_M2_MAX }, 290 - .p = { .min = PINEVIEW_P_LVDS_MIN, .max = PINEVIEW_P_LVDS_MAX }, 291 - .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX }, 292 - /* Pineview only supports single-channel mode. */ 293 - .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT, 294 - .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW }, 503 + .dot = { .min = 20000, .max = 400000 }, 504 + .vco = { .min = 1700000, .max = 3500000 }, 505 + .n = { .min = 3, .max = 6 }, 506 + .m = { .min = 2, .max = 256 }, 507 + .m1 = { .min = 0, .max = 0 }, 508 + .m2 = { .min = 0, .max = 254 }, 509 + .p = { .min = 7, .max = 112 }, 510 + .p1 = { .min = 1, .max = 8 }, 511 + .p2 = { .dot_limit = 112000, 512 + .p2_slow = 14, .p2_fast = 14 }, 295 513 .find_pll = intel_find_best_PLL, 296 514 }; 297 515 516 + /* Ironlake / Sandybridge 517 + * 518 + * We calculate clock using (register_value + 2) for N/M1/M2, so here 519 + * the range value for them is (actual_value - 2). 520 + */ 298 521 static const intel_limit_t intel_limits_ironlake_dac = { 299 - .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX }, 300 - .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX }, 301 - .n = { .min = IRONLAKE_DAC_N_MIN, .max = IRONLAKE_DAC_N_MAX }, 302 - .m = { .min = IRONLAKE_DAC_M_MIN, .max = IRONLAKE_DAC_M_MAX }, 303 - .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, 304 - .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, 305 - .p = { .min = IRONLAKE_DAC_P_MIN, .max = IRONLAKE_DAC_P_MAX }, 306 - .p1 = { .min = IRONLAKE_DAC_P1_MIN, .max = IRONLAKE_DAC_P1_MAX }, 307 - .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, 308 - .p2_slow = IRONLAKE_DAC_P2_SLOW, 309 - .p2_fast = IRONLAKE_DAC_P2_FAST }, 522 + .dot = { .min = 25000, .max = 350000 }, 523 + .vco = { .min = 1760000, .max = 3510000 }, 524 + .n = { .min = 1, .max = 5 }, 525 + .m = { .min = 79, .max = 127 }, 526 + .m1 = { .min = 12, .max = 22 }, 527 + .m2 = { .min = 5, .max = 9 }, 528 + .p = { .min = 5, .max = 80 }, 529 + .p1 = { .min = 1, .max = 8 }, 530 + .p2 = { .dot_limit = 225000, 531 + .p2_slow = 10, .p2_fast = 5 }, 310 532 .find_pll = intel_g4x_find_best_PLL, 311 533 }; 312 534 313 535 static const intel_limit_t intel_limits_ironlake_single_lvds = { 314 - .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX }, 315 - .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX }, 316 - .n = { .min = IRONLAKE_LVDS_S_N_MIN, .max = IRONLAKE_LVDS_S_N_MAX }, 317 - .m = { .min = IRONLAKE_LVDS_S_M_MIN, .max = IRONLAKE_LVDS_S_M_MAX }, 318 - .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, 319 - .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, 320 - .p = { .min = IRONLAKE_LVDS_S_P_MIN, .max = IRONLAKE_LVDS_S_P_MAX }, 321 - .p1 = { .min = IRONLAKE_LVDS_S_P1_MIN, .max = IRONLAKE_LVDS_S_P1_MAX }, 322 - .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, 323 - .p2_slow = IRONLAKE_LVDS_S_P2_SLOW, 324 - .p2_fast = IRONLAKE_LVDS_S_P2_FAST }, 536 + .dot = { .min = 25000, .max = 350000 }, 537 + .vco = { .min = 1760000, .max = 3510000 }, 538 + .n = { .min = 1, .max = 3 }, 539 + .m = { .min = 79, .max = 118 }, 540 + .m1 = { .min = 12, .max = 22 }, 541 + .m2 = { .min = 5, .max = 9 }, 542 + .p = { .min = 28, .max = 112 }, 543 + .p1 = { .min = 2, .max = 8 }, 544 + .p2 = { .dot_limit = 225000, 545 + .p2_slow = 14, .p2_fast = 14 }, 325 546 .find_pll = intel_g4x_find_best_PLL, 326 547 }; 327 548 328 549 static const intel_limit_t intel_limits_ironlake_dual_lvds = { 329 - .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX }, 330 - .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX }, 331 - .n = { .min = IRONLAKE_LVDS_D_N_MIN, .max = IRONLAKE_LVDS_D_N_MAX }, 332 - .m = { .min = IRONLAKE_LVDS_D_M_MIN, .max = IRONLAKE_LVDS_D_M_MAX }, 333 - .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, 334 - .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, 335 - .p = { .min = IRONLAKE_LVDS_D_P_MIN, .max = IRONLAKE_LVDS_D_P_MAX }, 336 - .p1 = { .min = IRONLAKE_LVDS_D_P1_MIN, .max = IRONLAKE_LVDS_D_P1_MAX }, 337 - .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, 338 - .p2_slow = IRONLAKE_LVDS_D_P2_SLOW, 339 - .p2_fast = IRONLAKE_LVDS_D_P2_FAST }, 550 + .dot = { .min = 25000, .max = 350000 }, 551 + .vco = { .min = 1760000, .max = 3510000 }, 552 + .n = { .min = 1, .max = 3 }, 553 + .m = { .min = 79, .max = 127 }, 554 + .m1 = { .min = 12, .max = 22 }, 555 + .m2 = { .min = 5, .max = 9 }, 556 + .p = { .min = 14, .max = 56 }, 557 + .p1 = { .min = 2, .max = 8 }, 558 + .p2 = { .dot_limit = 225000, 559 + .p2_slow = 7, .p2_fast = 7 }, 340 560 .find_pll = intel_g4x_find_best_PLL, 341 561 }; 342 562 563 + /* LVDS 100mhz refclk limits. */ 343 564 static const intel_limit_t intel_limits_ironlake_single_lvds_100m = { 344 - .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX }, 345 - .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX }, 346 - .n = { .min = IRONLAKE_LVDS_S_SSC_N_MIN, .max = IRONLAKE_LVDS_S_SSC_N_MAX }, 347 - .m = { .min = IRONLAKE_LVDS_S_SSC_M_MIN, .max = IRONLAKE_LVDS_S_SSC_M_MAX }, 348 - .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, 349 - .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, 350 - .p = { .min = IRONLAKE_LVDS_S_SSC_P_MIN, .max = IRONLAKE_LVDS_S_SSC_P_MAX }, 351 - .p1 = { .min = IRONLAKE_LVDS_S_SSC_P1_MIN,.max = IRONLAKE_LVDS_S_SSC_P1_MAX }, 352 - .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, 353 - .p2_slow = IRONLAKE_LVDS_S_SSC_P2_SLOW, 354 - .p2_fast = IRONLAKE_LVDS_S_SSC_P2_FAST }, 565 + .dot = { .min = 25000, .max = 350000 }, 566 + .vco = { .min = 1760000, .max = 3510000 }, 567 + .n = { .min = 1, .max = 2 }, 568 + .m = { .min = 79, .max = 126 }, 569 + .m1 = { .min = 12, .max = 22 }, 570 + .m2 = { .min = 5, .max = 9 }, 571 + .p = { .min = 28, .max = 112 }, 572 + .p1 = { .min = 2,.max = 8 }, 573 + .p2 = { .dot_limit = 225000, 574 + .p2_slow = 14, .p2_fast = 14 }, 355 575 .find_pll = intel_g4x_find_best_PLL, 356 576 }; 357 577 358 578 static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = { 359 - .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX }, 360 - .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX }, 361 - .n = { .min = IRONLAKE_LVDS_D_SSC_N_MIN, .max = IRONLAKE_LVDS_D_SSC_N_MAX }, 362 - .m = { .min = IRONLAKE_LVDS_D_SSC_M_MIN, .max = IRONLAKE_LVDS_D_SSC_M_MAX }, 363 - .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, 364 - .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, 365 - .p = { .min = IRONLAKE_LVDS_D_SSC_P_MIN, .max = IRONLAKE_LVDS_D_SSC_P_MAX }, 366 - .p1 = { .min = IRONLAKE_LVDS_D_SSC_P1_MIN,.max = IRONLAKE_LVDS_D_SSC_P1_MAX }, 367 - .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, 368 - .p2_slow = IRONLAKE_LVDS_D_SSC_P2_SLOW, 369 - .p2_fast = IRONLAKE_LVDS_D_SSC_P2_FAST }, 579 + .dot = { .min = 25000, .max = 350000 }, 580 + .vco = { .min = 1760000, .max = 3510000 }, 581 + .n = { .min = 1, .max = 3 }, 582 + .m = { .min = 79, .max = 126 }, 583 + .m1 = { .min = 12, .max = 22 }, 584 + .m2 = { .min = 5, .max = 9 }, 585 + .p = { .min = 14, .max = 42 }, 586 + .p1 = { .min = 2,.max = 6 }, 587 + .p2 = { .dot_limit = 225000, 588 + .p2_slow = 7, .p2_fast = 7 }, 370 589 .find_pll = intel_g4x_find_best_PLL, 371 590 }; 372 591 373 592 static const intel_limit_t intel_limits_ironlake_display_port = { 374 - .dot = { .min = IRONLAKE_DOT_MIN, 375 - .max = IRONLAKE_DOT_MAX }, 376 - .vco = { .min = IRONLAKE_VCO_MIN, 377 - .max = IRONLAKE_VCO_MAX}, 378 - .n = { .min = IRONLAKE_DP_N_MIN, 379 - .max = IRONLAKE_DP_N_MAX }, 380 - .m = { .min = IRONLAKE_DP_M_MIN, 381 - .max = IRONLAKE_DP_M_MAX }, 382 - .m1 = { .min = IRONLAKE_M1_MIN, 383 - .max = IRONLAKE_M1_MAX }, 384 - .m2 = { .min = IRONLAKE_M2_MIN, 385 - .max = IRONLAKE_M2_MAX }, 386 - .p = { .min = IRONLAKE_DP_P_MIN, 387 - .max = IRONLAKE_DP_P_MAX }, 388 - .p1 = { .min = IRONLAKE_DP_P1_MIN, 389 - .max = IRONLAKE_DP_P1_MAX}, 390 - .p2 = { .dot_limit = IRONLAKE_DP_P2_LIMIT, 391 - .p2_slow = IRONLAKE_DP_P2_SLOW, 392 - .p2_fast = IRONLAKE_DP_P2_FAST }, 593 + .dot = { .min = 25000, .max = 350000 }, 594 + .vco = { .min = 1760000, .max = 3510000}, 595 + .n = { .min = 1, .max = 2 }, 596 + .m = { .min = 81, .max = 90 }, 597 + .m1 = { .min = 12, .max = 22 }, 598 + .m2 = { .min = 5, .max = 9 }, 599 + .p = { .min = 10, .max = 20 }, 600 + .p1 = { .min = 1, .max = 2}, 601 + .p2 = { .dot_limit = 0, 602 + .p2_slow = 10, .p2_fast = 10 }, 393 603 .find_pll = intel_find_pll_ironlake_dp, 394 604 }; 395 605 ··· 1540 1828 u32 blt_ecoskpd; 1541 1829 1542 1830 /* Make sure blitter notifies FBC of writes */ 1543 - __gen6_gt_force_wake_get(dev_priv); 1831 + gen6_gt_force_wake_get(dev_priv); 1544 1832 blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD); 1545 1833 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY << 1546 1834 GEN6_BLITTER_LOCK_SHIFT; ··· 1551 1839 GEN6_BLITTER_LOCK_SHIFT); 1552 1840 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd); 1553 1841 POSTING_READ(GEN6_BLITTER_ECOSKPD); 1554 - __gen6_gt_force_wake_put(dev_priv); 1842 + gen6_gt_force_wake_put(dev_priv); 1555 1843 } 1556 1844 1557 1845 static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) ··· 1731 2019 intel_fb = to_intel_framebuffer(fb); 1732 2020 obj = intel_fb->obj; 1733 2021 2022 + if (!i915_enable_fbc) { 2023 + DRM_DEBUG_KMS("fbc disabled per module param (default off)\n"); 2024 + dev_priv->no_fbc_reason = FBC_MODULE_PARAM; 2025 + goto out_disable; 2026 + } 1734 2027 if (intel_fb->obj->base.size > dev_priv->cfb_size) { 1735 2028 DRM_DEBUG_KMS("framebuffer too large, disabling " 1736 2029 "compression\n"); ··· 2056 2339 /* enable normal train */ 2057 2340 reg = FDI_TX_CTL(pipe); 2058 2341 temp = I915_READ(reg); 2059 - temp &= ~FDI_LINK_TRAIN_NONE; 2060 - temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE; 2342 + if (IS_IVYBRIDGE(dev)) { 2343 + temp &= ~FDI_LINK_TRAIN_NONE_IVB; 2344 + temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE; 2345 + } else { 2346 + temp &= ~FDI_LINK_TRAIN_NONE; 2347 + temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE; 2348 + } 2061 2349 I915_WRITE(reg, temp); 2062 2350 2063 2351 reg = FDI_RX_CTL(pipe); ··· 2079 2357 /* wait one idle pattern time */ 2080 2358 POSTING_READ(reg); 2081 2359 udelay(1000); 2360 + 2361 + /* IVB wants error correction enabled */ 2362 + if (IS_IVYBRIDGE(dev)) 2363 + I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE | 2364 + FDI_FE_ERRC_ENABLE); 2082 2365 } 2083 2366 2084 2367 /* The FDI link training functions for ILK/Ibexpeak. */ ··· 2311 2584 DRM_DEBUG_KMS("FDI train done.\n"); 2312 2585 } 2313 2586 2314 - static void ironlake_fdi_enable(struct drm_crtc *crtc) 2587 + /* Manual link training for Ivy Bridge A0 parts */ 2588 + static void ivb_manual_fdi_link_train(struct drm_crtc *crtc) 2589 + { 2590 + struct drm_device *dev = crtc->dev; 2591 + struct drm_i915_private *dev_priv = dev->dev_private; 2592 + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2593 + int pipe = intel_crtc->pipe; 2594 + u32 reg, temp, i; 2595 + 2596 + /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 2597 + for train result */ 2598 + reg = FDI_RX_IMR(pipe); 2599 + temp = I915_READ(reg); 2600 + temp &= ~FDI_RX_SYMBOL_LOCK; 2601 + temp &= ~FDI_RX_BIT_LOCK; 2602 + I915_WRITE(reg, temp); 2603 + 2604 + POSTING_READ(reg); 2605 + udelay(150); 2606 + 2607 + /* enable CPU FDI TX and PCH FDI RX */ 2608 + reg = FDI_TX_CTL(pipe); 2609 + temp = I915_READ(reg); 2610 + temp &= ~(7 << 19); 2611 + temp |= (intel_crtc->fdi_lanes - 1) << 19; 2612 + temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB); 2613 + temp |= FDI_LINK_TRAIN_PATTERN_1_IVB; 2614 + temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 2615 + temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; 2616 + I915_WRITE(reg, temp | FDI_TX_ENABLE); 2617 + 2618 + reg = FDI_RX_CTL(pipe); 2619 + temp = I915_READ(reg); 2620 + temp &= ~FDI_LINK_TRAIN_AUTO; 2621 + temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 2622 + temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; 2623 + I915_WRITE(reg, temp | FDI_RX_ENABLE); 2624 + 2625 + POSTING_READ(reg); 2626 + udelay(150); 2627 + 2628 + for (i = 0; i < 4; i++ ) { 2629 + reg = FDI_TX_CTL(pipe); 2630 + temp = I915_READ(reg); 2631 + temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 2632 + temp |= snb_b_fdi_train_param[i]; 2633 + I915_WRITE(reg, temp); 2634 + 2635 + POSTING_READ(reg); 2636 + udelay(500); 2637 + 2638 + reg = FDI_RX_IIR(pipe); 2639 + temp = I915_READ(reg); 2640 + DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 2641 + 2642 + if (temp & FDI_RX_BIT_LOCK || 2643 + (I915_READ(reg) & FDI_RX_BIT_LOCK)) { 2644 + I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); 2645 + DRM_DEBUG_KMS("FDI train 1 done.\n"); 2646 + break; 2647 + } 2648 + } 2649 + if (i == 4) 2650 + DRM_ERROR("FDI train 1 fail!\n"); 2651 + 2652 + /* Train 2 */ 2653 + reg = FDI_TX_CTL(pipe); 2654 + temp = I915_READ(reg); 2655 + temp &= ~FDI_LINK_TRAIN_NONE_IVB; 2656 + temp |= FDI_LINK_TRAIN_PATTERN_2_IVB; 2657 + temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 2658 + temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; 2659 + I915_WRITE(reg, temp); 2660 + 2661 + reg = FDI_RX_CTL(pipe); 2662 + temp = I915_READ(reg); 2663 + temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 2664 + temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; 2665 + I915_WRITE(reg, temp); 2666 + 2667 + POSTING_READ(reg); 2668 + udelay(150); 2669 + 2670 + for (i = 0; i < 4; i++ ) { 2671 + reg = FDI_TX_CTL(pipe); 2672 + temp = I915_READ(reg); 2673 + temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 2674 + temp |= snb_b_fdi_train_param[i]; 2675 + I915_WRITE(reg, temp); 2676 + 2677 + POSTING_READ(reg); 2678 + udelay(500); 2679 + 2680 + reg = FDI_RX_IIR(pipe); 2681 + temp = I915_READ(reg); 2682 + DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 2683 + 2684 + if (temp & FDI_RX_SYMBOL_LOCK) { 2685 + I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); 2686 + DRM_DEBUG_KMS("FDI train 2 done.\n"); 2687 + break; 2688 + } 2689 + } 2690 + if (i == 4) 2691 + DRM_ERROR("FDI train 2 fail!\n"); 2692 + 2693 + DRM_DEBUG_KMS("FDI train done.\n"); 2694 + } 2695 + 2696 + static void ironlake_fdi_pll_enable(struct drm_crtc *crtc) 2315 2697 { 2316 2698 struct drm_device *dev = crtc->dev; 2317 2699 struct drm_i915_private *dev_priv = dev->dev_private; ··· 2593 2757 u32 reg, temp; 2594 2758 2595 2759 /* For PCH output, training FDI link */ 2596 - if (IS_GEN6(dev)) 2597 - gen6_fdi_link_train(crtc); 2598 - else 2599 - ironlake_fdi_link_train(crtc); 2760 + dev_priv->display.fdi_link_train(crtc); 2600 2761 2601 2762 intel_enable_pch_pll(dev_priv, pipe); 2602 2763 ··· 2683 2850 is_pch_port = intel_crtc_driving_pch(crtc); 2684 2851 2685 2852 if (is_pch_port) 2686 - ironlake_fdi_enable(crtc); 2853 + ironlake_fdi_pll_enable(crtc); 2687 2854 else 2688 2855 ironlake_fdi_disable(crtc); 2689 2856 ··· 2706 2873 ironlake_pch_enable(crtc); 2707 2874 2708 2875 intel_crtc_load_lut(crtc); 2876 + 2877 + mutex_lock(&dev->struct_mutex); 2709 2878 intel_update_fbc(dev); 2879 + mutex_unlock(&dev->struct_mutex); 2880 + 2710 2881 intel_crtc_update_cursor(crtc, true); 2711 2882 } 2712 2883 ··· 2806 2969 2807 2970 intel_crtc->active = false; 2808 2971 intel_update_watermarks(dev); 2972 + 2973 + mutex_lock(&dev->struct_mutex); 2809 2974 intel_update_fbc(dev); 2810 2975 intel_clear_scanline_wait(dev); 2976 + mutex_unlock(&dev->struct_mutex); 2811 2977 } 2812 2978 2813 2979 static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) ··· 3337 3497 1000; 3338 3498 entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size); 3339 3499 3340 - DRM_DEBUG_KMS("FIFO entries required for mode: %d\n", entries_required); 3500 + DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required); 3341 3501 3342 3502 wm_size = fifo_size - (entries_required + wm->guard_size); 3343 3503 3344 - DRM_DEBUG_KMS("FIFO watermark level: %d\n", wm_size); 3504 + DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size); 3345 3505 3346 3506 /* Don't promote wm_size to unsigned... */ 3347 3507 if (wm_size > (long)wm->max_wm) ··· 3663 3823 display_wm, cursor_wm); 3664 3824 3665 3825 if (display_wm > display->max_wm) { 3666 - DRM_DEBUG_KMS("display watermark is too large(%d), disabling\n", 3826 + DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n", 3667 3827 display_wm, display->max_wm); 3668 3828 return false; 3669 3829 } 3670 3830 3671 3831 if (cursor_wm > cursor->max_wm) { 3672 - DRM_DEBUG_KMS("cursor watermark is too large(%d), disabling\n", 3832 + DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n", 3673 3833 cursor_wm, cursor->max_wm); 3674 3834 return false; 3675 3835 } ··· 4356 4516 return dev_priv->lvds_use_ssc && i915_panel_use_ssc; 4357 4517 } 4358 4518 4359 - static int intel_crtc_mode_set(struct drm_crtc *crtc, 4360 - struct drm_display_mode *mode, 4361 - struct drm_display_mode *adjusted_mode, 4362 - int x, int y, 4363 - struct drm_framebuffer *old_fb) 4519 + static int i9xx_crtc_mode_set(struct drm_crtc *crtc, 4520 + struct drm_display_mode *mode, 4521 + struct drm_display_mode *adjusted_mode, 4522 + int x, int y, 4523 + struct drm_framebuffer *old_fb) 4364 4524 { 4365 4525 struct drm_device *dev = crtc->dev; 4366 4526 struct drm_i915_private *dev_priv = dev->dev_private; 4367 4527 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4368 4528 int pipe = intel_crtc->pipe; 4369 4529 int plane = intel_crtc->plane; 4370 - u32 fp_reg, dpll_reg; 4371 4530 int refclk, num_connectors = 0; 4372 4531 intel_clock_t clock, reduced_clock; 4373 4532 u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf; 4374 4533 bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false; 4375 4534 bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false; 4376 - struct intel_encoder *has_edp_encoder = NULL; 4377 4535 struct drm_mode_config *mode_config = &dev->mode_config; 4378 4536 struct intel_encoder *encoder; 4379 4537 const intel_limit_t *limit; 4380 4538 int ret; 4381 - struct fdi_m_n m_n = {0}; 4382 - u32 reg, temp; 4539 + u32 temp; 4383 4540 u32 lvds_sync = 0; 4384 - int target_clock; 4385 - 4386 - drm_vblank_pre_modeset(dev, pipe); 4387 4541 4388 4542 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { 4389 4543 if (encoder->base.crtc != crtc) ··· 4405 4571 case INTEL_OUTPUT_DISPLAYPORT: 4406 4572 is_dp = true; 4407 4573 break; 4408 - case INTEL_OUTPUT_EDP: 4409 - has_edp_encoder = encoder; 4410 - break; 4411 4574 } 4412 4575 4413 4576 num_connectors++; ··· 4416 4585 refclk / 1000); 4417 4586 } else if (!IS_GEN2(dev)) { 4418 4587 refclk = 96000; 4419 - if (HAS_PCH_SPLIT(dev) && 4420 - (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base))) 4421 - refclk = 120000; /* 120Mhz refclk */ 4422 4588 } else { 4423 4589 refclk = 48000; 4424 4590 } ··· 4429 4601 ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock); 4430 4602 if (!ok) { 4431 4603 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 4432 - drm_vblank_post_modeset(dev, pipe); 4604 + return -EINVAL; 4605 + } 4606 + 4607 + /* Ensure that the cursor is valid for the new mode before changing... */ 4608 + intel_crtc_update_cursor(crtc, true); 4609 + 4610 + if (is_lvds && dev_priv->lvds_downclock_avail) { 4611 + has_reduced_clock = limit->find_pll(limit, crtc, 4612 + dev_priv->lvds_downclock, 4613 + refclk, 4614 + &reduced_clock); 4615 + if (has_reduced_clock && (clock.p != reduced_clock.p)) { 4616 + /* 4617 + * If the different P is found, it means that we can't 4618 + * switch the display clock by using the FP0/FP1. 4619 + * In such case we will disable the LVDS downclock 4620 + * feature. 4621 + */ 4622 + DRM_DEBUG_KMS("Different P is found for " 4623 + "LVDS clock/downclock\n"); 4624 + has_reduced_clock = 0; 4625 + } 4626 + } 4627 + /* SDVO TV has fixed PLL values depend on its clock range, 4628 + this mirrors vbios setting. */ 4629 + if (is_sdvo && is_tv) { 4630 + if (adjusted_mode->clock >= 100000 4631 + && adjusted_mode->clock < 140500) { 4632 + clock.p1 = 2; 4633 + clock.p2 = 10; 4634 + clock.n = 3; 4635 + clock.m1 = 16; 4636 + clock.m2 = 8; 4637 + } else if (adjusted_mode->clock >= 140500 4638 + && adjusted_mode->clock <= 200000) { 4639 + clock.p1 = 1; 4640 + clock.p2 = 10; 4641 + clock.n = 6; 4642 + clock.m1 = 12; 4643 + clock.m2 = 8; 4644 + } 4645 + } 4646 + 4647 + if (IS_PINEVIEW(dev)) { 4648 + fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2; 4649 + if (has_reduced_clock) 4650 + fp2 = (1 << reduced_clock.n) << 16 | 4651 + reduced_clock.m1 << 8 | reduced_clock.m2; 4652 + } else { 4653 + fp = clock.n << 16 | clock.m1 << 8 | clock.m2; 4654 + if (has_reduced_clock) 4655 + fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 | 4656 + reduced_clock.m2; 4657 + } 4658 + 4659 + dpll = DPLL_VGA_MODE_DIS; 4660 + 4661 + if (!IS_GEN2(dev)) { 4662 + if (is_lvds) 4663 + dpll |= DPLLB_MODE_LVDS; 4664 + else 4665 + dpll |= DPLLB_MODE_DAC_SERIAL; 4666 + if (is_sdvo) { 4667 + int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); 4668 + if (pixel_multiplier > 1) { 4669 + if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) 4670 + dpll |= (pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES; 4671 + } 4672 + dpll |= DPLL_DVO_HIGH_SPEED; 4673 + } 4674 + if (is_dp) 4675 + dpll |= DPLL_DVO_HIGH_SPEED; 4676 + 4677 + /* compute bitmask from p1 value */ 4678 + if (IS_PINEVIEW(dev)) 4679 + dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW; 4680 + else { 4681 + dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 4682 + if (IS_G4X(dev) && has_reduced_clock) 4683 + dpll |= (1 << (reduced_clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; 4684 + } 4685 + switch (clock.p2) { 4686 + case 5: 4687 + dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; 4688 + break; 4689 + case 7: 4690 + dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; 4691 + break; 4692 + case 10: 4693 + dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; 4694 + break; 4695 + case 14: 4696 + dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; 4697 + break; 4698 + } 4699 + if (INTEL_INFO(dev)->gen >= 4) 4700 + dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT); 4701 + } else { 4702 + if (is_lvds) { 4703 + dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 4704 + } else { 4705 + if (clock.p1 == 2) 4706 + dpll |= PLL_P1_DIVIDE_BY_TWO; 4707 + else 4708 + dpll |= (clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT; 4709 + if (clock.p2 == 4) 4710 + dpll |= PLL_P2_DIVIDE_BY_4; 4711 + } 4712 + } 4713 + 4714 + if (is_sdvo && is_tv) 4715 + dpll |= PLL_REF_INPUT_TVCLKINBC; 4716 + else if (is_tv) 4717 + /* XXX: just matching BIOS for now */ 4718 + /* dpll |= PLL_REF_INPUT_TVCLKINBC; */ 4719 + dpll |= 3; 4720 + else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) 4721 + dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 4722 + else 4723 + dpll |= PLL_REF_INPUT_DREFCLK; 4724 + 4725 + /* setup pipeconf */ 4726 + pipeconf = I915_READ(PIPECONF(pipe)); 4727 + 4728 + /* Set up the display plane register */ 4729 + dspcntr = DISPPLANE_GAMMA_ENABLE; 4730 + 4731 + /* Ironlake's plane is forced to pipe, bit 24 is to 4732 + enable color space conversion */ 4733 + if (pipe == 0) 4734 + dspcntr &= ~DISPPLANE_SEL_PIPE_MASK; 4735 + else 4736 + dspcntr |= DISPPLANE_SEL_PIPE_B; 4737 + 4738 + if (pipe == 0 && INTEL_INFO(dev)->gen < 4) { 4739 + /* Enable pixel doubling when the dot clock is > 90% of the (display) 4740 + * core speed. 4741 + * 4742 + * XXX: No double-wide on 915GM pipe B. Is that the only reason for the 4743 + * pipe == 0 check? 4744 + */ 4745 + if (mode->clock > 4746 + dev_priv->display.get_display_clock_speed(dev) * 9 / 10) 4747 + pipeconf |= PIPECONF_DOUBLE_WIDE; 4748 + else 4749 + pipeconf &= ~PIPECONF_DOUBLE_WIDE; 4750 + } 4751 + 4752 + dpll |= DPLL_VCO_ENABLE; 4753 + 4754 + DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B'); 4755 + drm_mode_debug_printmodeline(mode); 4756 + 4757 + I915_WRITE(FP0(pipe), fp); 4758 + I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE); 4759 + 4760 + POSTING_READ(DPLL(pipe)); 4761 + udelay(150); 4762 + 4763 + /* The LVDS pin pair needs to be on before the DPLLs are enabled. 4764 + * This is an exception to the general rule that mode_set doesn't turn 4765 + * things on. 4766 + */ 4767 + if (is_lvds) { 4768 + temp = I915_READ(LVDS); 4769 + temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP; 4770 + if (pipe == 1) { 4771 + temp |= LVDS_PIPEB_SELECT; 4772 + } else { 4773 + temp &= ~LVDS_PIPEB_SELECT; 4774 + } 4775 + /* set the corresponsding LVDS_BORDER bit */ 4776 + temp |= dev_priv->lvds_border_bits; 4777 + /* Set the B0-B3 data pairs corresponding to whether we're going to 4778 + * set the DPLLs for dual-channel mode or not. 4779 + */ 4780 + if (clock.p2 == 7) 4781 + temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP; 4782 + else 4783 + temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP); 4784 + 4785 + /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP) 4786 + * appropriately here, but we need to look more thoroughly into how 4787 + * panels behave in the two modes. 4788 + */ 4789 + /* set the dithering flag on LVDS as needed */ 4790 + if (INTEL_INFO(dev)->gen >= 4) { 4791 + if (dev_priv->lvds_dither) 4792 + temp |= LVDS_ENABLE_DITHER; 4793 + else 4794 + temp &= ~LVDS_ENABLE_DITHER; 4795 + } 4796 + if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) 4797 + lvds_sync |= LVDS_HSYNC_POLARITY; 4798 + if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) 4799 + lvds_sync |= LVDS_VSYNC_POLARITY; 4800 + if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY)) 4801 + != lvds_sync) { 4802 + char flags[2] = "-+"; 4803 + DRM_INFO("Changing LVDS panel from " 4804 + "(%chsync, %cvsync) to (%chsync, %cvsync)\n", 4805 + flags[!(temp & LVDS_HSYNC_POLARITY)], 4806 + flags[!(temp & LVDS_VSYNC_POLARITY)], 4807 + flags[!(lvds_sync & LVDS_HSYNC_POLARITY)], 4808 + flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]); 4809 + temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY); 4810 + temp |= lvds_sync; 4811 + } 4812 + I915_WRITE(LVDS, temp); 4813 + } 4814 + 4815 + if (is_dp) { 4816 + intel_dp_set_m_n(crtc, mode, adjusted_mode); 4817 + } 4818 + 4819 + I915_WRITE(DPLL(pipe), dpll); 4820 + 4821 + /* Wait for the clocks to stabilize. */ 4822 + POSTING_READ(DPLL(pipe)); 4823 + udelay(150); 4824 + 4825 + if (INTEL_INFO(dev)->gen >= 4) { 4826 + temp = 0; 4827 + if (is_sdvo) { 4828 + temp = intel_mode_get_pixel_multiplier(adjusted_mode); 4829 + if (temp > 1) 4830 + temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; 4831 + else 4832 + temp = 0; 4833 + } 4834 + I915_WRITE(DPLL_MD(pipe), temp); 4835 + } else { 4836 + /* The pixel multiplier can only be updated once the 4837 + * DPLL is enabled and the clocks are stable. 4838 + * 4839 + * So write it again. 4840 + */ 4841 + I915_WRITE(DPLL(pipe), dpll); 4842 + } 4843 + 4844 + intel_crtc->lowfreq_avail = false; 4845 + if (is_lvds && has_reduced_clock && i915_powersave) { 4846 + I915_WRITE(FP1(pipe), fp2); 4847 + intel_crtc->lowfreq_avail = true; 4848 + if (HAS_PIPE_CXSR(dev)) { 4849 + DRM_DEBUG_KMS("enabling CxSR downclocking\n"); 4850 + pipeconf |= PIPECONF_CXSR_DOWNCLOCK; 4851 + } 4852 + } else { 4853 + I915_WRITE(FP1(pipe), fp); 4854 + if (HAS_PIPE_CXSR(dev)) { 4855 + DRM_DEBUG_KMS("disabling CxSR downclocking\n"); 4856 + pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK; 4857 + } 4858 + } 4859 + 4860 + if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { 4861 + pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION; 4862 + /* the chip adds 2 halflines automatically */ 4863 + adjusted_mode->crtc_vdisplay -= 1; 4864 + adjusted_mode->crtc_vtotal -= 1; 4865 + adjusted_mode->crtc_vblank_start -= 1; 4866 + adjusted_mode->crtc_vblank_end -= 1; 4867 + adjusted_mode->crtc_vsync_end -= 1; 4868 + adjusted_mode->crtc_vsync_start -= 1; 4869 + } else 4870 + pipeconf &= ~PIPECONF_INTERLACE_W_FIELD_INDICATION; /* progressive */ 4871 + 4872 + I915_WRITE(HTOTAL(pipe), 4873 + (adjusted_mode->crtc_hdisplay - 1) | 4874 + ((adjusted_mode->crtc_htotal - 1) << 16)); 4875 + I915_WRITE(HBLANK(pipe), 4876 + (adjusted_mode->crtc_hblank_start - 1) | 4877 + ((adjusted_mode->crtc_hblank_end - 1) << 16)); 4878 + I915_WRITE(HSYNC(pipe), 4879 + (adjusted_mode->crtc_hsync_start - 1) | 4880 + ((adjusted_mode->crtc_hsync_end - 1) << 16)); 4881 + 4882 + I915_WRITE(VTOTAL(pipe), 4883 + (adjusted_mode->crtc_vdisplay - 1) | 4884 + ((adjusted_mode->crtc_vtotal - 1) << 16)); 4885 + I915_WRITE(VBLANK(pipe), 4886 + (adjusted_mode->crtc_vblank_start - 1) | 4887 + ((adjusted_mode->crtc_vblank_end - 1) << 16)); 4888 + I915_WRITE(VSYNC(pipe), 4889 + (adjusted_mode->crtc_vsync_start - 1) | 4890 + ((adjusted_mode->crtc_vsync_end - 1) << 16)); 4891 + 4892 + /* pipesrc and dspsize control the size that is scaled from, 4893 + * which should always be the user's requested size. 4894 + */ 4895 + I915_WRITE(DSPSIZE(plane), 4896 + ((mode->vdisplay - 1) << 16) | 4897 + (mode->hdisplay - 1)); 4898 + I915_WRITE(DSPPOS(plane), 0); 4899 + I915_WRITE(PIPESRC(pipe), 4900 + ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1)); 4901 + 4902 + I915_WRITE(PIPECONF(pipe), pipeconf); 4903 + POSTING_READ(PIPECONF(pipe)); 4904 + intel_enable_pipe(dev_priv, pipe, false); 4905 + 4906 + intel_wait_for_vblank(dev, pipe); 4907 + 4908 + I915_WRITE(DSPCNTR(plane), dspcntr); 4909 + POSTING_READ(DSPCNTR(plane)); 4910 + 4911 + ret = intel_pipe_set_base(crtc, x, y, old_fb); 4912 + 4913 + intel_update_watermarks(dev); 4914 + 4915 + return ret; 4916 + } 4917 + 4918 + static int ironlake_crtc_mode_set(struct drm_crtc *crtc, 4919 + struct drm_display_mode *mode, 4920 + struct drm_display_mode *adjusted_mode, 4921 + int x, int y, 4922 + struct drm_framebuffer *old_fb) 4923 + { 4924 + struct drm_device *dev = crtc->dev; 4925 + struct drm_i915_private *dev_priv = dev->dev_private; 4926 + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4927 + int pipe = intel_crtc->pipe; 4928 + int plane = intel_crtc->plane; 4929 + int refclk, num_connectors = 0; 4930 + intel_clock_t clock, reduced_clock; 4931 + u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf; 4932 + bool ok, has_reduced_clock = false, is_sdvo = false; 4933 + bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false; 4934 + struct intel_encoder *has_edp_encoder = NULL; 4935 + struct drm_mode_config *mode_config = &dev->mode_config; 4936 + struct intel_encoder *encoder; 4937 + const intel_limit_t *limit; 4938 + int ret; 4939 + struct fdi_m_n m_n = {0}; 4940 + u32 temp; 4941 + u32 lvds_sync = 0; 4942 + int target_clock, pixel_multiplier, lane, link_bw, bpp, factor; 4943 + 4944 + list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { 4945 + if (encoder->base.crtc != crtc) 4946 + continue; 4947 + 4948 + switch (encoder->type) { 4949 + case INTEL_OUTPUT_LVDS: 4950 + is_lvds = true; 4951 + break; 4952 + case INTEL_OUTPUT_SDVO: 4953 + case INTEL_OUTPUT_HDMI: 4954 + is_sdvo = true; 4955 + if (encoder->needs_tv_clock) 4956 + is_tv = true; 4957 + break; 4958 + case INTEL_OUTPUT_TVOUT: 4959 + is_tv = true; 4960 + break; 4961 + case INTEL_OUTPUT_ANALOG: 4962 + is_crt = true; 4963 + break; 4964 + case INTEL_OUTPUT_DISPLAYPORT: 4965 + is_dp = true; 4966 + break; 4967 + case INTEL_OUTPUT_EDP: 4968 + has_edp_encoder = encoder; 4969 + break; 4970 + } 4971 + 4972 + num_connectors++; 4973 + } 4974 + 4975 + if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) { 4976 + refclk = dev_priv->lvds_ssc_freq * 1000; 4977 + DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n", 4978 + refclk / 1000); 4979 + } else { 4980 + refclk = 96000; 4981 + if (!has_edp_encoder || 4982 + intel_encoder_is_pch_edp(&has_edp_encoder->base)) 4983 + refclk = 120000; /* 120Mhz refclk */ 4984 + } 4985 + 4986 + /* 4987 + * Returns a set of divisors for the desired target clock with the given 4988 + * refclk, or FALSE. The returned values represent the clock equation: 4989 + * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 4990 + */ 4991 + limit = intel_limit(crtc, refclk); 4992 + ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock); 4993 + if (!ok) { 4994 + DRM_ERROR("Couldn't find PLL settings for mode!\n"); 4433 4995 return -EINVAL; 4434 4996 } 4435 4997 ··· 4864 4646 } 4865 4647 4866 4648 /* FDI link */ 4867 - if (HAS_PCH_SPLIT(dev)) { 4868 - int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); 4869 - int lane = 0, link_bw, bpp; 4870 - /* CPU eDP doesn't require FDI link, so just set DP M/N 4871 - according to current link config */ 4872 - if (has_edp_encoder && !intel_encoder_is_pch_edp(&has_edp_encoder->base)) { 4649 + pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); 4650 + lane = 0; 4651 + /* CPU eDP doesn't require FDI link, so just set DP M/N 4652 + according to current link config */ 4653 + if (has_edp_encoder && 4654 + !intel_encoder_is_pch_edp(&has_edp_encoder->base)) { 4655 + target_clock = mode->clock; 4656 + intel_edp_link_config(has_edp_encoder, 4657 + &lane, &link_bw); 4658 + } else { 4659 + /* [e]DP over FDI requires target mode clock 4660 + instead of link clock */ 4661 + if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) 4873 4662 target_clock = mode->clock; 4874 - intel_edp_link_config(has_edp_encoder, 4875 - &lane, &link_bw); 4876 - } else { 4877 - /* [e]DP over FDI requires target mode clock 4878 - instead of link clock */ 4879 - if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) 4880 - target_clock = mode->clock; 4881 - else 4882 - target_clock = adjusted_mode->clock; 4663 + else 4664 + target_clock = adjusted_mode->clock; 4883 4665 4884 - /* FDI is a binary signal running at ~2.7GHz, encoding 4885 - * each output octet as 10 bits. The actual frequency 4886 - * is stored as a divider into a 100MHz clock, and the 4887 - * mode pixel clock is stored in units of 1KHz. 4888 - * Hence the bw of each lane in terms of the mode signal 4889 - * is: 4890 - */ 4891 - link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10; 4892 - } 4893 - 4894 - /* determine panel color depth */ 4895 - temp = I915_READ(PIPECONF(pipe)); 4896 - temp &= ~PIPE_BPC_MASK; 4897 - if (is_lvds) { 4898 - /* the BPC will be 6 if it is 18-bit LVDS panel */ 4899 - if ((I915_READ(PCH_LVDS) & LVDS_A3_POWER_MASK) == LVDS_A3_POWER_UP) 4900 - temp |= PIPE_8BPC; 4901 - else 4902 - temp |= PIPE_6BPC; 4903 - } else if (has_edp_encoder) { 4904 - switch (dev_priv->edp.bpp/3) { 4905 - case 8: 4906 - temp |= PIPE_8BPC; 4907 - break; 4908 - case 10: 4909 - temp |= PIPE_10BPC; 4910 - break; 4911 - case 6: 4912 - temp |= PIPE_6BPC; 4913 - break; 4914 - case 12: 4915 - temp |= PIPE_12BPC; 4916 - break; 4917 - } 4918 - } else 4919 - temp |= PIPE_8BPC; 4920 - I915_WRITE(PIPECONF(pipe), temp); 4921 - 4922 - switch (temp & PIPE_BPC_MASK) { 4923 - case PIPE_8BPC: 4924 - bpp = 24; 4925 - break; 4926 - case PIPE_10BPC: 4927 - bpp = 30; 4928 - break; 4929 - case PIPE_6BPC: 4930 - bpp = 18; 4931 - break; 4932 - case PIPE_12BPC: 4933 - bpp = 36; 4934 - break; 4935 - default: 4936 - DRM_ERROR("unknown pipe bpc value\n"); 4937 - bpp = 24; 4938 - } 4939 - 4940 - if (!lane) { 4941 - /* 4942 - * Account for spread spectrum to avoid 4943 - * oversubscribing the link. Max center spread 4944 - * is 2.5%; use 5% for safety's sake. 4945 - */ 4946 - u32 bps = target_clock * bpp * 21 / 20; 4947 - lane = bps / (link_bw * 8) + 1; 4948 - } 4949 - 4950 - intel_crtc->fdi_lanes = lane; 4951 - 4952 - if (pixel_multiplier > 1) 4953 - link_bw *= pixel_multiplier; 4954 - ironlake_compute_m_n(bpp, lane, target_clock, link_bw, &m_n); 4666 + /* FDI is a binary signal running at ~2.7GHz, encoding 4667 + * each output octet as 10 bits. The actual frequency 4668 + * is stored as a divider into a 100MHz clock, and the 4669 + * mode pixel clock is stored in units of 1KHz. 4670 + * Hence the bw of each lane in terms of the mode signal 4671 + * is: 4672 + */ 4673 + link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10; 4955 4674 } 4675 + 4676 + /* determine panel color depth */ 4677 + temp = I915_READ(PIPECONF(pipe)); 4678 + temp &= ~PIPE_BPC_MASK; 4679 + if (is_lvds) { 4680 + /* the BPC will be 6 if it is 18-bit LVDS panel */ 4681 + if ((I915_READ(PCH_LVDS) & LVDS_A3_POWER_MASK) == LVDS_A3_POWER_UP) 4682 + temp |= PIPE_8BPC; 4683 + else 4684 + temp |= PIPE_6BPC; 4685 + } else if (has_edp_encoder) { 4686 + switch (dev_priv->edp.bpp/3) { 4687 + case 8: 4688 + temp |= PIPE_8BPC; 4689 + break; 4690 + case 10: 4691 + temp |= PIPE_10BPC; 4692 + break; 4693 + case 6: 4694 + temp |= PIPE_6BPC; 4695 + break; 4696 + case 12: 4697 + temp |= PIPE_12BPC; 4698 + break; 4699 + } 4700 + } else 4701 + temp |= PIPE_8BPC; 4702 + I915_WRITE(PIPECONF(pipe), temp); 4703 + 4704 + switch (temp & PIPE_BPC_MASK) { 4705 + case PIPE_8BPC: 4706 + bpp = 24; 4707 + break; 4708 + case PIPE_10BPC: 4709 + bpp = 30; 4710 + break; 4711 + case PIPE_6BPC: 4712 + bpp = 18; 4713 + break; 4714 + case PIPE_12BPC: 4715 + bpp = 36; 4716 + break; 4717 + default: 4718 + DRM_ERROR("unknown pipe bpc value\n"); 4719 + bpp = 24; 4720 + } 4721 + 4722 + if (!lane) { 4723 + /* 4724 + * Account for spread spectrum to avoid 4725 + * oversubscribing the link. Max center spread 4726 + * is 2.5%; use 5% for safety's sake. 4727 + */ 4728 + u32 bps = target_clock * bpp * 21 / 20; 4729 + lane = bps / (link_bw * 8) + 1; 4730 + } 4731 + 4732 + intel_crtc->fdi_lanes = lane; 4733 + 4734 + if (pixel_multiplier > 1) 4735 + link_bw *= pixel_multiplier; 4736 + ironlake_compute_m_n(bpp, lane, target_clock, link_bw, &m_n); 4956 4737 4957 4738 /* Ironlake: try to setup display ref clock before DPLL 4958 4739 * enabling. This is only under driver's control after 4959 4740 * PCH B stepping, previous chipset stepping should be 4960 4741 * ignoring this setting. 4961 4742 */ 4962 - if (HAS_PCH_SPLIT(dev)) { 4963 - temp = I915_READ(PCH_DREF_CONTROL); 4964 - /* Always enable nonspread source */ 4965 - temp &= ~DREF_NONSPREAD_SOURCE_MASK; 4966 - temp |= DREF_NONSPREAD_SOURCE_ENABLE; 4967 - temp &= ~DREF_SSC_SOURCE_MASK; 4968 - temp |= DREF_SSC_SOURCE_ENABLE; 4969 - I915_WRITE(PCH_DREF_CONTROL, temp); 4743 + temp = I915_READ(PCH_DREF_CONTROL); 4744 + /* Always enable nonspread source */ 4745 + temp &= ~DREF_NONSPREAD_SOURCE_MASK; 4746 + temp |= DREF_NONSPREAD_SOURCE_ENABLE; 4747 + temp &= ~DREF_SSC_SOURCE_MASK; 4748 + temp |= DREF_SSC_SOURCE_ENABLE; 4749 + I915_WRITE(PCH_DREF_CONTROL, temp); 4970 4750 4971 - POSTING_READ(PCH_DREF_CONTROL); 4972 - udelay(200); 4751 + POSTING_READ(PCH_DREF_CONTROL); 4752 + udelay(200); 4973 4753 4974 - if (has_edp_encoder) { 4975 - if (intel_panel_use_ssc(dev_priv)) { 4976 - temp |= DREF_SSC1_ENABLE; 4977 - I915_WRITE(PCH_DREF_CONTROL, temp); 4978 - 4979 - POSTING_READ(PCH_DREF_CONTROL); 4980 - udelay(200); 4981 - } 4982 - temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 4983 - 4984 - /* Enable CPU source on CPU attached eDP */ 4985 - if (!intel_encoder_is_pch_edp(&has_edp_encoder->base)) { 4986 - if (intel_panel_use_ssc(dev_priv)) 4987 - temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; 4988 - else 4989 - temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; 4990 - } else { 4991 - /* Enable SSC on PCH eDP if needed */ 4992 - if (intel_panel_use_ssc(dev_priv)) { 4993 - DRM_ERROR("enabling SSC on PCH\n"); 4994 - temp |= DREF_SUPERSPREAD_SOURCE_ENABLE; 4995 - } 4996 - } 4754 + if (has_edp_encoder) { 4755 + if (intel_panel_use_ssc(dev_priv)) { 4756 + temp |= DREF_SSC1_ENABLE; 4997 4757 I915_WRITE(PCH_DREF_CONTROL, temp); 4758 + 4998 4759 POSTING_READ(PCH_DREF_CONTROL); 4999 4760 udelay(200); 5000 4761 } 4762 + temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 4763 + 4764 + /* Enable CPU source on CPU attached eDP */ 4765 + if (!intel_encoder_is_pch_edp(&has_edp_encoder->base)) { 4766 + if (intel_panel_use_ssc(dev_priv)) 4767 + temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; 4768 + else 4769 + temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; 4770 + } else { 4771 + /* Enable SSC on PCH eDP if needed */ 4772 + if (intel_panel_use_ssc(dev_priv)) { 4773 + DRM_ERROR("enabling SSC on PCH\n"); 4774 + temp |= DREF_SUPERSPREAD_SOURCE_ENABLE; 4775 + } 4776 + } 4777 + I915_WRITE(PCH_DREF_CONTROL, temp); 4778 + POSTING_READ(PCH_DREF_CONTROL); 4779 + udelay(200); 5001 4780 } 5002 4781 5003 - if (IS_PINEVIEW(dev)) { 5004 - fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2; 5005 - if (has_reduced_clock) 5006 - fp2 = (1 << reduced_clock.n) << 16 | 5007 - reduced_clock.m1 << 8 | reduced_clock.m2; 5008 - } else { 5009 - fp = clock.n << 16 | clock.m1 << 8 | clock.m2; 5010 - if (has_reduced_clock) 5011 - fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 | 5012 - reduced_clock.m2; 5013 - } 4782 + fp = clock.n << 16 | clock.m1 << 8 | clock.m2; 4783 + if (has_reduced_clock) 4784 + fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 | 4785 + reduced_clock.m2; 5014 4786 5015 4787 /* Enable autotuning of the PLL clock (if permissible) */ 5016 - if (HAS_PCH_SPLIT(dev)) { 5017 - int factor = 21; 4788 + factor = 21; 4789 + if (is_lvds) { 4790 + if ((intel_panel_use_ssc(dev_priv) && 4791 + dev_priv->lvds_ssc_freq == 100) || 4792 + (I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP) 4793 + factor = 25; 4794 + } else if (is_sdvo && is_tv) 4795 + factor = 20; 5018 4796 5019 - if (is_lvds) { 5020 - if ((intel_panel_use_ssc(dev_priv) && 5021 - dev_priv->lvds_ssc_freq == 100) || 5022 - (I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP) 5023 - factor = 25; 5024 - } else if (is_sdvo && is_tv) 5025 - factor = 20; 5026 - 5027 - if (clock.m1 < factor * clock.n) 5028 - fp |= FP_CB_TUNE; 5029 - } 4797 + if (clock.m1 < factor * clock.n) 4798 + fp |= FP_CB_TUNE; 5030 4799 5031 4800 dpll = 0; 5032 - if (!HAS_PCH_SPLIT(dev)) 5033 - dpll = DPLL_VGA_MODE_DIS; 5034 4801 5035 - if (!IS_GEN2(dev)) { 5036 - if (is_lvds) 5037 - dpll |= DPLLB_MODE_LVDS; 5038 - else 5039 - dpll |= DPLLB_MODE_DAC_SERIAL; 5040 - if (is_sdvo) { 5041 - int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); 5042 - if (pixel_multiplier > 1) { 5043 - if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) 5044 - dpll |= (pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES; 5045 - else if (HAS_PCH_SPLIT(dev)) 5046 - dpll |= (pixel_multiplier - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; 5047 - } 5048 - dpll |= DPLL_DVO_HIGH_SPEED; 4802 + if (is_lvds) 4803 + dpll |= DPLLB_MODE_LVDS; 4804 + else 4805 + dpll |= DPLLB_MODE_DAC_SERIAL; 4806 + if (is_sdvo) { 4807 + int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); 4808 + if (pixel_multiplier > 1) { 4809 + dpll |= (pixel_multiplier - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; 5049 4810 } 5050 - if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) 5051 - dpll |= DPLL_DVO_HIGH_SPEED; 4811 + dpll |= DPLL_DVO_HIGH_SPEED; 4812 + } 4813 + if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) 4814 + dpll |= DPLL_DVO_HIGH_SPEED; 5052 4815 5053 - /* compute bitmask from p1 value */ 5054 - if (IS_PINEVIEW(dev)) 5055 - dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW; 5056 - else { 5057 - dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 5058 - /* also FPA1 */ 5059 - if (HAS_PCH_SPLIT(dev)) 5060 - dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; 5061 - if (IS_G4X(dev) && has_reduced_clock) 5062 - dpll |= (1 << (reduced_clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; 5063 - } 5064 - switch (clock.p2) { 5065 - case 5: 5066 - dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; 5067 - break; 5068 - case 7: 5069 - dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; 5070 - break; 5071 - case 10: 5072 - dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; 5073 - break; 5074 - case 14: 5075 - dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; 5076 - break; 5077 - } 5078 - if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) 5079 - dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT); 5080 - } else { 5081 - if (is_lvds) { 5082 - dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 5083 - } else { 5084 - if (clock.p1 == 2) 5085 - dpll |= PLL_P1_DIVIDE_BY_TWO; 5086 - else 5087 - dpll |= (clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT; 5088 - if (clock.p2 == 4) 5089 - dpll |= PLL_P2_DIVIDE_BY_4; 5090 - } 4816 + /* compute bitmask from p1 value */ 4817 + dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 4818 + /* also FPA1 */ 4819 + dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; 4820 + 4821 + switch (clock.p2) { 4822 + case 5: 4823 + dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; 4824 + break; 4825 + case 7: 4826 + dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; 4827 + break; 4828 + case 10: 4829 + dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; 4830 + break; 4831 + case 14: 4832 + dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; 4833 + break; 5091 4834 } 5092 4835 5093 4836 if (is_sdvo && is_tv) ··· 5068 4889 /* Set up the display plane register */ 5069 4890 dspcntr = DISPPLANE_GAMMA_ENABLE; 5070 4891 5071 - /* Ironlake's plane is forced to pipe, bit 24 is to 5072 - enable color space conversion */ 5073 - if (!HAS_PCH_SPLIT(dev)) { 5074 - if (pipe == 0) 5075 - dspcntr &= ~DISPPLANE_SEL_PIPE_MASK; 5076 - else 5077 - dspcntr |= DISPPLANE_SEL_PIPE_B; 5078 - } 5079 - 5080 - if (pipe == 0 && INTEL_INFO(dev)->gen < 4) { 5081 - /* Enable pixel doubling when the dot clock is > 90% of the (display) 5082 - * core speed. 5083 - * 5084 - * XXX: No double-wide on 915GM pipe B. Is that the only reason for the 5085 - * pipe == 0 check? 5086 - */ 5087 - if (mode->clock > 5088 - dev_priv->display.get_display_clock_speed(dev) * 9 / 10) 5089 - pipeconf |= PIPECONF_DOUBLE_WIDE; 5090 - else 5091 - pipeconf &= ~PIPECONF_DOUBLE_WIDE; 5092 - } 5093 - 5094 - if (!HAS_PCH_SPLIT(dev)) 5095 - dpll |= DPLL_VCO_ENABLE; 5096 - 5097 4892 DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B'); 5098 4893 drm_mode_debug_printmodeline(mode); 5099 4894 5100 - /* assign to Ironlake registers */ 5101 - if (HAS_PCH_SPLIT(dev)) { 5102 - fp_reg = PCH_FP0(pipe); 5103 - dpll_reg = PCH_DPLL(pipe); 5104 - } else { 5105 - fp_reg = FP0(pipe); 5106 - dpll_reg = DPLL(pipe); 5107 - } 5108 - 5109 4895 /* PCH eDP needs FDI, but CPU eDP does not */ 5110 4896 if (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)) { 5111 - I915_WRITE(fp_reg, fp); 5112 - I915_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE); 4897 + I915_WRITE(PCH_FP0(pipe), fp); 4898 + I915_WRITE(PCH_DPLL(pipe), dpll & ~DPLL_VCO_ENABLE); 5113 4899 5114 - POSTING_READ(dpll_reg); 4900 + POSTING_READ(PCH_DPLL(pipe)); 5115 4901 udelay(150); 5116 4902 } 5117 4903 ··· 5108 4964 * things on. 5109 4965 */ 5110 4966 if (is_lvds) { 5111 - reg = LVDS; 5112 - if (HAS_PCH_SPLIT(dev)) 5113 - reg = PCH_LVDS; 5114 - 5115 - temp = I915_READ(reg); 4967 + temp = I915_READ(PCH_LVDS); 5116 4968 temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP; 5117 4969 if (pipe == 1) { 5118 4970 if (HAS_PCH_CPT(dev)) ··· 5135 4995 * appropriately here, but we need to look more thoroughly into how 5136 4996 * panels behave in the two modes. 5137 4997 */ 5138 - /* set the dithering flag on non-PCH LVDS as needed */ 5139 - if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) { 5140 - if (dev_priv->lvds_dither) 5141 - temp |= LVDS_ENABLE_DITHER; 5142 - else 5143 - temp &= ~LVDS_ENABLE_DITHER; 5144 - } 5145 4998 if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) 5146 4999 lvds_sync |= LVDS_HSYNC_POLARITY; 5147 5000 if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) ··· 5151 5018 temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY); 5152 5019 temp |= lvds_sync; 5153 5020 } 5154 - I915_WRITE(reg, temp); 5021 + I915_WRITE(PCH_LVDS, temp); 5155 5022 } 5156 5023 5157 5024 /* set the dithering flag and clear for anything other than a panel. */ 5158 - if (HAS_PCH_SPLIT(dev)) { 5159 - pipeconf &= ~PIPECONF_DITHER_EN; 5160 - pipeconf &= ~PIPECONF_DITHER_TYPE_MASK; 5161 - if (dev_priv->lvds_dither && (is_lvds || has_edp_encoder)) { 5162 - pipeconf |= PIPECONF_DITHER_EN; 5163 - pipeconf |= PIPECONF_DITHER_TYPE_ST1; 5164 - } 5025 + pipeconf &= ~PIPECONF_DITHER_EN; 5026 + pipeconf &= ~PIPECONF_DITHER_TYPE_MASK; 5027 + if (dev_priv->lvds_dither && (is_lvds || has_edp_encoder)) { 5028 + pipeconf |= PIPECONF_DITHER_EN; 5029 + pipeconf |= PIPECONF_DITHER_TYPE_ST1; 5165 5030 } 5166 5031 5167 5032 if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) { 5168 5033 intel_dp_set_m_n(crtc, mode, adjusted_mode); 5169 - } else if (HAS_PCH_SPLIT(dev)) { 5034 + } else { 5170 5035 /* For non-DP output, clear any trans DP clock recovery setting.*/ 5171 5036 I915_WRITE(TRANSDATA_M1(pipe), 0); 5172 5037 I915_WRITE(TRANSDATA_N1(pipe), 0); ··· 5172 5041 I915_WRITE(TRANSDPLINK_N1(pipe), 0); 5173 5042 } 5174 5043 5175 - if (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)) { 5176 - I915_WRITE(dpll_reg, dpll); 5044 + if (!has_edp_encoder || 5045 + intel_encoder_is_pch_edp(&has_edp_encoder->base)) { 5046 + I915_WRITE(PCH_DPLL(pipe), dpll); 5177 5047 5178 5048 /* Wait for the clocks to stabilize. */ 5179 - POSTING_READ(dpll_reg); 5049 + POSTING_READ(PCH_DPLL(pipe)); 5180 5050 udelay(150); 5181 5051 5182 - if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) { 5183 - temp = 0; 5184 - if (is_sdvo) { 5185 - temp = intel_mode_get_pixel_multiplier(adjusted_mode); 5186 - if (temp > 1) 5187 - temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; 5188 - else 5189 - temp = 0; 5190 - } 5191 - I915_WRITE(DPLL_MD(pipe), temp); 5192 - } else { 5193 - /* The pixel multiplier can only be updated once the 5194 - * DPLL is enabled and the clocks are stable. 5195 - * 5196 - * So write it again. 5197 - */ 5198 - I915_WRITE(dpll_reg, dpll); 5199 - } 5052 + /* The pixel multiplier can only be updated once the 5053 + * DPLL is enabled and the clocks are stable. 5054 + * 5055 + * So write it again. 5056 + */ 5057 + I915_WRITE(PCH_DPLL(pipe), dpll); 5200 5058 } 5201 5059 5202 5060 intel_crtc->lowfreq_avail = false; 5203 5061 if (is_lvds && has_reduced_clock && i915_powersave) { 5204 - I915_WRITE(fp_reg + 4, fp2); 5062 + I915_WRITE(PCH_FP1(pipe), fp2); 5205 5063 intel_crtc->lowfreq_avail = true; 5206 5064 if (HAS_PIPE_CXSR(dev)) { 5207 5065 DRM_DEBUG_KMS("enabling CxSR downclocking\n"); 5208 5066 pipeconf |= PIPECONF_CXSR_DOWNCLOCK; 5209 5067 } 5210 5068 } else { 5211 - I915_WRITE(fp_reg + 4, fp); 5069 + I915_WRITE(PCH_FP1(pipe), fp); 5212 5070 if (HAS_PIPE_CXSR(dev)) { 5213 5071 DRM_DEBUG_KMS("disabling CxSR downclocking\n"); 5214 5072 pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK; ··· 5236 5116 (adjusted_mode->crtc_vsync_start - 1) | 5237 5117 ((adjusted_mode->crtc_vsync_end - 1) << 16)); 5238 5118 5239 - /* pipesrc and dspsize control the size that is scaled from, 5240 - * which should always be the user's requested size. 5119 + /* pipesrc controls the size that is scaled from, which should 5120 + * always be the user's requested size. 5241 5121 */ 5242 - if (!HAS_PCH_SPLIT(dev)) { 5243 - I915_WRITE(DSPSIZE(plane), 5244 - ((mode->vdisplay - 1) << 16) | 5245 - (mode->hdisplay - 1)); 5246 - I915_WRITE(DSPPOS(plane), 0); 5247 - } 5248 5122 I915_WRITE(PIPESRC(pipe), 5249 5123 ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1)); 5250 5124 5251 - if (HAS_PCH_SPLIT(dev)) { 5252 - I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m); 5253 - I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n); 5254 - I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m); 5255 - I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n); 5125 + I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m); 5126 + I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n); 5127 + I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m); 5128 + I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n); 5256 5129 5257 - if (has_edp_encoder && !intel_encoder_is_pch_edp(&has_edp_encoder->base)) { 5258 - ironlake_set_pll_edp(crtc, adjusted_mode->clock); 5259 - } 5130 + if (has_edp_encoder && 5131 + !intel_encoder_is_pch_edp(&has_edp_encoder->base)) { 5132 + ironlake_set_pll_edp(crtc, adjusted_mode->clock); 5260 5133 } 5261 5134 5262 5135 I915_WRITE(PIPECONF(pipe), pipeconf); 5263 5136 POSTING_READ(PIPECONF(pipe)); 5264 - if (!HAS_PCH_SPLIT(dev)) 5265 - intel_enable_pipe(dev_priv, pipe, false); 5266 5137 5267 5138 intel_wait_for_vblank(dev, pipe); 5268 5139 ··· 5271 5160 ret = intel_pipe_set_base(crtc, x, y, old_fb); 5272 5161 5273 5162 intel_update_watermarks(dev); 5163 + 5164 + return ret; 5165 + } 5166 + 5167 + static int intel_crtc_mode_set(struct drm_crtc *crtc, 5168 + struct drm_display_mode *mode, 5169 + struct drm_display_mode *adjusted_mode, 5170 + int x, int y, 5171 + struct drm_framebuffer *old_fb) 5172 + { 5173 + struct drm_device *dev = crtc->dev; 5174 + struct drm_i915_private *dev_priv = dev->dev_private; 5175 + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5176 + int pipe = intel_crtc->pipe; 5177 + int ret; 5178 + 5179 + drm_vblank_pre_modeset(dev, pipe); 5180 + 5181 + ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode, 5182 + x, y, old_fb); 5274 5183 5275 5184 drm_vblank_post_modeset(dev, pipe); 5276 5185 ··· 5614 5483 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), 5615 5484 }; 5616 5485 5617 - struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, 5618 - struct drm_connector *connector, 5619 - struct drm_display_mode *mode, 5620 - int *dpms_mode) 5486 + static struct drm_framebuffer * 5487 + intel_framebuffer_create(struct drm_device *dev, 5488 + struct drm_mode_fb_cmd *mode_cmd, 5489 + struct drm_i915_gem_object *obj) 5490 + { 5491 + struct intel_framebuffer *intel_fb; 5492 + int ret; 5493 + 5494 + intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 5495 + if (!intel_fb) { 5496 + drm_gem_object_unreference_unlocked(&obj->base); 5497 + return ERR_PTR(-ENOMEM); 5498 + } 5499 + 5500 + ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj); 5501 + if (ret) { 5502 + drm_gem_object_unreference_unlocked(&obj->base); 5503 + kfree(intel_fb); 5504 + return ERR_PTR(ret); 5505 + } 5506 + 5507 + return &intel_fb->base; 5508 + } 5509 + 5510 + static u32 5511 + intel_framebuffer_pitch_for_width(int width, int bpp) 5512 + { 5513 + u32 pitch = DIV_ROUND_UP(width * bpp, 8); 5514 + return ALIGN(pitch, 64); 5515 + } 5516 + 5517 + static u32 5518 + intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp) 5519 + { 5520 + u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp); 5521 + return ALIGN(pitch * mode->vdisplay, PAGE_SIZE); 5522 + } 5523 + 5524 + static struct drm_framebuffer * 5525 + intel_framebuffer_create_for_mode(struct drm_device *dev, 5526 + struct drm_display_mode *mode, 5527 + int depth, int bpp) 5528 + { 5529 + struct drm_i915_gem_object *obj; 5530 + struct drm_mode_fb_cmd mode_cmd; 5531 + 5532 + obj = i915_gem_alloc_object(dev, 5533 + intel_framebuffer_size_for_mode(mode, bpp)); 5534 + if (obj == NULL) 5535 + return ERR_PTR(-ENOMEM); 5536 + 5537 + mode_cmd.width = mode->hdisplay; 5538 + mode_cmd.height = mode->vdisplay; 5539 + mode_cmd.depth = depth; 5540 + mode_cmd.bpp = bpp; 5541 + mode_cmd.pitch = intel_framebuffer_pitch_for_width(mode_cmd.width, bpp); 5542 + 5543 + return intel_framebuffer_create(dev, &mode_cmd, obj); 5544 + } 5545 + 5546 + static struct drm_framebuffer * 5547 + mode_fits_in_fbdev(struct drm_device *dev, 5548 + struct drm_display_mode *mode) 5549 + { 5550 + struct drm_i915_private *dev_priv = dev->dev_private; 5551 + struct drm_i915_gem_object *obj; 5552 + struct drm_framebuffer *fb; 5553 + 5554 + if (dev_priv->fbdev == NULL) 5555 + return NULL; 5556 + 5557 + obj = dev_priv->fbdev->ifb.obj; 5558 + if (obj == NULL) 5559 + return NULL; 5560 + 5561 + fb = &dev_priv->fbdev->ifb.base; 5562 + if (fb->pitch < intel_framebuffer_pitch_for_width(mode->hdisplay, 5563 + fb->bits_per_pixel)) 5564 + return NULL; 5565 + 5566 + if (obj->base.size < mode->vdisplay * fb->pitch) 5567 + return NULL; 5568 + 5569 + return fb; 5570 + } 5571 + 5572 + bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, 5573 + struct drm_connector *connector, 5574 + struct drm_display_mode *mode, 5575 + struct intel_load_detect_pipe *old) 5621 5576 { 5622 5577 struct intel_crtc *intel_crtc; 5623 5578 struct drm_crtc *possible_crtc; 5624 - struct drm_crtc *supported_crtc =NULL; 5625 5579 struct drm_encoder *encoder = &intel_encoder->base; 5626 5580 struct drm_crtc *crtc = NULL; 5627 5581 struct drm_device *dev = encoder->dev; 5628 - struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; 5629 - struct drm_crtc_helper_funcs *crtc_funcs; 5582 + struct drm_framebuffer *old_fb; 5630 5583 int i = -1; 5584 + 5585 + DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", 5586 + connector->base.id, drm_get_connector_name(connector), 5587 + encoder->base.id, drm_get_encoder_name(encoder)); 5631 5588 5632 5589 /* 5633 5590 * Algorithm gets a little messy: 5591 + * 5634 5592 * - if the connector already has an assigned crtc, use it (but make 5635 5593 * sure it's on first) 5594 + * 5636 5595 * - try to find the first unused crtc that can drive this connector, 5637 5596 * and use that if we find one 5638 - * - if there are no unused crtcs available, try to use the first 5639 - * one we found that supports the connector 5640 5597 */ 5641 5598 5642 5599 /* See if we already have a CRTC for this connector */ 5643 5600 if (encoder->crtc) { 5644 5601 crtc = encoder->crtc; 5645 - /* Make sure the crtc and connector are running */ 5602 + 5646 5603 intel_crtc = to_intel_crtc(crtc); 5647 - *dpms_mode = intel_crtc->dpms_mode; 5604 + old->dpms_mode = intel_crtc->dpms_mode; 5605 + old->load_detect_temp = false; 5606 + 5607 + /* Make sure the crtc and connector are running */ 5648 5608 if (intel_crtc->dpms_mode != DRM_MODE_DPMS_ON) { 5609 + struct drm_encoder_helper_funcs *encoder_funcs; 5610 + struct drm_crtc_helper_funcs *crtc_funcs; 5611 + 5649 5612 crtc_funcs = crtc->helper_private; 5650 5613 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON); 5614 + 5615 + encoder_funcs = encoder->helper_private; 5651 5616 encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON); 5652 5617 } 5653 - return crtc; 5618 + 5619 + return true; 5654 5620 } 5655 5621 5656 5622 /* Find an unused one (if possible) */ ··· 5759 5531 crtc = possible_crtc; 5760 5532 break; 5761 5533 } 5762 - if (!supported_crtc) 5763 - supported_crtc = possible_crtc; 5764 5534 } 5765 5535 5766 5536 /* 5767 5537 * If we didn't find an unused CRTC, don't use any. 5768 5538 */ 5769 5539 if (!crtc) { 5770 - return NULL; 5540 + DRM_DEBUG_KMS("no pipe available for load-detect\n"); 5541 + return false; 5771 5542 } 5772 5543 5773 5544 encoder->crtc = crtc; 5774 5545 connector->encoder = encoder; 5775 - intel_encoder->load_detect_temp = true; 5776 5546 5777 5547 intel_crtc = to_intel_crtc(crtc); 5778 - *dpms_mode = intel_crtc->dpms_mode; 5548 + old->dpms_mode = intel_crtc->dpms_mode; 5549 + old->load_detect_temp = true; 5550 + old->release_fb = NULL; 5779 5551 5780 - if (!crtc->enabled) { 5781 - if (!mode) 5782 - mode = &load_detect_mode; 5783 - drm_crtc_helper_set_mode(crtc, mode, 0, 0, crtc->fb); 5784 - } else { 5785 - if (intel_crtc->dpms_mode != DRM_MODE_DPMS_ON) { 5786 - crtc_funcs = crtc->helper_private; 5787 - crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON); 5788 - } 5552 + if (!mode) 5553 + mode = &load_detect_mode; 5789 5554 5790 - /* Add this connector to the crtc */ 5791 - encoder_funcs->mode_set(encoder, &crtc->mode, &crtc->mode); 5792 - encoder_funcs->commit(encoder); 5555 + old_fb = crtc->fb; 5556 + 5557 + /* We need a framebuffer large enough to accommodate all accesses 5558 + * that the plane may generate whilst we perform load detection. 5559 + * We can not rely on the fbcon either being present (we get called 5560 + * during its initialisation to detect all boot displays, or it may 5561 + * not even exist) or that it is large enough to satisfy the 5562 + * requested mode. 5563 + */ 5564 + crtc->fb = mode_fits_in_fbdev(dev, mode); 5565 + if (crtc->fb == NULL) { 5566 + DRM_DEBUG_KMS("creating tmp fb for load-detection\n"); 5567 + crtc->fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32); 5568 + old->release_fb = crtc->fb; 5569 + } else 5570 + DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n"); 5571 + if (IS_ERR(crtc->fb)) { 5572 + DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n"); 5573 + crtc->fb = old_fb; 5574 + return false; 5793 5575 } 5576 + 5577 + if (!drm_crtc_helper_set_mode(crtc, mode, 0, 0, old_fb)) { 5578 + DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n"); 5579 + if (old->release_fb) 5580 + old->release_fb->funcs->destroy(old->release_fb); 5581 + crtc->fb = old_fb; 5582 + return false; 5583 + } 5584 + 5794 5585 /* let the connector get through one full cycle before testing */ 5795 5586 intel_wait_for_vblank(dev, intel_crtc->pipe); 5796 5587 5797 - return crtc; 5588 + return true; 5798 5589 } 5799 5590 5800 5591 void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, 5801 - struct drm_connector *connector, int dpms_mode) 5592 + struct drm_connector *connector, 5593 + struct intel_load_detect_pipe *old) 5802 5594 { 5803 5595 struct drm_encoder *encoder = &intel_encoder->base; 5804 5596 struct drm_device *dev = encoder->dev; ··· 5826 5578 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; 5827 5579 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; 5828 5580 5829 - if (intel_encoder->load_detect_temp) { 5830 - encoder->crtc = NULL; 5581 + DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", 5582 + connector->base.id, drm_get_connector_name(connector), 5583 + encoder->base.id, drm_get_encoder_name(encoder)); 5584 + 5585 + if (old->load_detect_temp) { 5831 5586 connector->encoder = NULL; 5832 - intel_encoder->load_detect_temp = false; 5833 - crtc->enabled = drm_helper_crtc_in_use(crtc); 5834 5587 drm_helper_disable_unused_functions(dev); 5588 + 5589 + if (old->release_fb) 5590 + old->release_fb->funcs->destroy(old->release_fb); 5591 + 5592 + return; 5835 5593 } 5836 5594 5837 5595 /* Switch crtc and encoder back off if necessary */ 5838 - if (crtc->enabled && dpms_mode != DRM_MODE_DPMS_ON) { 5839 - if (encoder->crtc == crtc) 5840 - encoder_funcs->dpms(encoder, dpms_mode); 5841 - crtc_funcs->dpms(crtc, dpms_mode); 5596 + if (old->dpms_mode != DRM_MODE_DPMS_ON) { 5597 + encoder_funcs->dpms(encoder, old->dpms_mode); 5598 + crtc_funcs->dpms(crtc, old->dpms_mode); 5842 5599 } 5843 5600 } 5844 5601 ··· 6438 6185 break; 6439 6186 6440 6187 case 6: 6188 + case 7: 6441 6189 OUT_RING(MI_DISPLAY_FLIP | 6442 6190 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 6443 6191 OUT_RING(fb->pitch | obj->tiling_mode); ··· 6758 6504 } 6759 6505 6760 6506 intel_panel_setup_backlight(dev); 6507 + 6508 + /* disable all the possible outputs/crtcs before entering KMS mode */ 6509 + drm_helper_disable_unused_functions(dev); 6761 6510 } 6762 6511 6763 6512 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) ··· 6828 6571 struct drm_mode_fb_cmd *mode_cmd) 6829 6572 { 6830 6573 struct drm_i915_gem_object *obj; 6831 - struct intel_framebuffer *intel_fb; 6832 - int ret; 6833 6574 6834 6575 obj = to_intel_bo(drm_gem_object_lookup(dev, filp, mode_cmd->handle)); 6835 6576 if (&obj->base == NULL) 6836 6577 return ERR_PTR(-ENOENT); 6837 6578 6838 - intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 6839 - if (!intel_fb) { 6840 - drm_gem_object_unreference_unlocked(&obj->base); 6841 - return ERR_PTR(-ENOMEM); 6842 - } 6843 - 6844 - ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj); 6845 - if (ret) { 6846 - drm_gem_object_unreference_unlocked(&obj->base); 6847 - kfree(intel_fb); 6848 - return ERR_PTR(ret); 6849 - } 6850 - 6851 - return &intel_fb->base; 6579 + return intel_framebuffer_create(dev, mode_cmd, obj); 6852 6580 } 6853 6581 6854 6582 static const struct drm_mode_config_funcs intel_mode_funcs = { ··· 6847 6605 struct drm_i915_gem_object *ctx; 6848 6606 int ret; 6849 6607 6608 + WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 6609 + 6850 6610 ctx = i915_gem_alloc_object(dev, 4096); 6851 6611 if (!ctx) { 6852 6612 DRM_DEBUG("failed to alloc power context, RC6 disabled\n"); 6853 6613 return NULL; 6854 6614 } 6855 6615 6856 - mutex_lock(&dev->struct_mutex); 6857 6616 ret = i915_gem_object_pin(ctx, 4096, true); 6858 6617 if (ret) { 6859 6618 DRM_ERROR("failed to pin power context: %d\n", ret); ··· 6866 6623 DRM_ERROR("failed to set-domain on power context: %d\n", ret); 6867 6624 goto err_unpin; 6868 6625 } 6869 - mutex_unlock(&dev->struct_mutex); 6870 6626 6871 6627 return ctx; 6872 6628 ··· 7000 6758 I915_WRITE(GEN6_RPNSWREQ, 1 << 31); 7001 6759 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff); 7002 6760 I915_WRITE(GEN6_PMIER, 0); 6761 + 6762 + spin_lock_irq(&dev_priv->rps_lock); 6763 + dev_priv->pm_iir = 0; 6764 + spin_unlock_irq(&dev_priv->rps_lock); 6765 + 7003 6766 I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR)); 7004 6767 } 7005 6768 ··· 7098 6851 { 7099 6852 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 7100 6853 u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); 7101 - u32 pcu_mbox; 6854 + u32 pcu_mbox, rc6_mask = 0; 7102 6855 int cur_freq, min_freq, max_freq; 7103 6856 int i; 7104 6857 ··· 7109 6862 * userspace... 7110 6863 */ 7111 6864 I915_WRITE(GEN6_RC_STATE, 0); 7112 - __gen6_gt_force_wake_get(dev_priv); 6865 + mutex_lock(&dev_priv->dev->struct_mutex); 6866 + gen6_gt_force_wake_get(dev_priv); 7113 6867 7114 6868 /* disable the counters and set deterministic thresholds */ 7115 6869 I915_WRITE(GEN6_RC_CONTROL, 0); ··· 7130 6882 I915_WRITE(GEN6_RC6p_THRESHOLD, 100000); 7131 6883 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */ 7132 6884 6885 + if (i915_enable_rc6) 6886 + rc6_mask = GEN6_RC_CTL_RC6p_ENABLE | 6887 + GEN6_RC_CTL_RC6_ENABLE; 6888 + 7133 6889 I915_WRITE(GEN6_RC_CONTROL, 7134 - GEN6_RC_CTL_RC6p_ENABLE | 7135 - GEN6_RC_CTL_RC6_ENABLE | 6890 + rc6_mask | 7136 6891 GEN6_RC_CTL_EI_MODE(1) | 7137 6892 GEN6_RC_CTL_HW_ENABLE); 7138 6893 ··· 7207 6956 GEN6_PM_RP_DOWN_THRESHOLD | 7208 6957 GEN6_PM_RP_UP_EI_EXPIRED | 7209 6958 GEN6_PM_RP_DOWN_EI_EXPIRED); 6959 + spin_lock_irq(&dev_priv->rps_lock); 6960 + WARN_ON(dev_priv->pm_iir != 0); 7210 6961 I915_WRITE(GEN6_PMIMR, 0); 6962 + spin_unlock_irq(&dev_priv->rps_lock); 7211 6963 /* enable all PM interrupts */ 7212 6964 I915_WRITE(GEN6_PMINTRMSK, 0); 7213 6965 7214 - __gen6_gt_force_wake_put(dev_priv); 6966 + gen6_gt_force_wake_put(dev_priv); 6967 + mutex_unlock(&dev_priv->dev->struct_mutex); 7215 6968 } 7216 6969 7217 - void intel_enable_clock_gating(struct drm_device *dev) 6970 + static void ironlake_init_clock_gating(struct drm_device *dev) 6971 + { 6972 + struct drm_i915_private *dev_priv = dev->dev_private; 6973 + uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE; 6974 + 6975 + /* Required for FBC */ 6976 + dspclk_gate |= DPFCUNIT_CLOCK_GATE_DISABLE | 6977 + DPFCRUNIT_CLOCK_GATE_DISABLE | 6978 + DPFDUNIT_CLOCK_GATE_DISABLE; 6979 + /* Required for CxSR */ 6980 + dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE; 6981 + 6982 + I915_WRITE(PCH_3DCGDIS0, 6983 + MARIUNIT_CLOCK_GATE_DISABLE | 6984 + SVSMUNIT_CLOCK_GATE_DISABLE); 6985 + I915_WRITE(PCH_3DCGDIS1, 6986 + VFMUNIT_CLOCK_GATE_DISABLE); 6987 + 6988 + I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); 6989 + 6990 + /* 6991 + * According to the spec the following bits should be set in 6992 + * order to enable memory self-refresh 6993 + * The bit 22/21 of 0x42004 6994 + * The bit 5 of 0x42020 6995 + * The bit 15 of 0x45000 6996 + */ 6997 + I915_WRITE(ILK_DISPLAY_CHICKEN2, 6998 + (I915_READ(ILK_DISPLAY_CHICKEN2) | 6999 + ILK_DPARB_GATE | ILK_VSDPFD_FULL)); 7000 + I915_WRITE(ILK_DSPCLK_GATE, 7001 + (I915_READ(ILK_DSPCLK_GATE) | 7002 + ILK_DPARB_CLK_GATE)); 7003 + I915_WRITE(DISP_ARB_CTL, 7004 + (I915_READ(DISP_ARB_CTL) | 7005 + DISP_FBC_WM_DIS)); 7006 + I915_WRITE(WM3_LP_ILK, 0); 7007 + I915_WRITE(WM2_LP_ILK, 0); 7008 + I915_WRITE(WM1_LP_ILK, 0); 7009 + 7010 + /* 7011 + * Based on the document from hardware guys the following bits 7012 + * should be set unconditionally in order to enable FBC. 7013 + * The bit 22 of 0x42000 7014 + * The bit 22 of 0x42004 7015 + * The bit 7,8,9 of 0x42020. 7016 + */ 7017 + if (IS_IRONLAKE_M(dev)) { 7018 + I915_WRITE(ILK_DISPLAY_CHICKEN1, 7019 + I915_READ(ILK_DISPLAY_CHICKEN1) | 7020 + ILK_FBCQ_DIS); 7021 + I915_WRITE(ILK_DISPLAY_CHICKEN2, 7022 + I915_READ(ILK_DISPLAY_CHICKEN2) | 7023 + ILK_DPARB_GATE); 7024 + I915_WRITE(ILK_DSPCLK_GATE, 7025 + I915_READ(ILK_DSPCLK_GATE) | 7026 + ILK_DPFC_DIS1 | 7027 + ILK_DPFC_DIS2 | 7028 + ILK_CLK_FBC); 7029 + } 7030 + 7031 + I915_WRITE(ILK_DISPLAY_CHICKEN2, 7032 + I915_READ(ILK_DISPLAY_CHICKEN2) | 7033 + ILK_ELPIN_409_SELECT); 7034 + I915_WRITE(_3D_CHICKEN2, 7035 + _3D_CHICKEN2_WM_READ_PIPELINED << 16 | 7036 + _3D_CHICKEN2_WM_READ_PIPELINED); 7037 + } 7038 + 7039 + static void gen6_init_clock_gating(struct drm_device *dev) 7218 7040 { 7219 7041 struct drm_i915_private *dev_priv = dev->dev_private; 7220 7042 int pipe; 7043 + uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE; 7044 + 7045 + I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); 7046 + 7047 + I915_WRITE(ILK_DISPLAY_CHICKEN2, 7048 + I915_READ(ILK_DISPLAY_CHICKEN2) | 7049 + ILK_ELPIN_409_SELECT); 7050 + 7051 + I915_WRITE(WM3_LP_ILK, 0); 7052 + I915_WRITE(WM2_LP_ILK, 0); 7053 + I915_WRITE(WM1_LP_ILK, 0); 7221 7054 7222 7055 /* 7223 - * Disable clock gating reported to work incorrectly according to the 7224 - * specs, but enable as much else as we can. 7056 + * According to the spec the following bits should be 7057 + * set in order to enable memory self-refresh and fbc: 7058 + * The bit21 and bit22 of 0x42000 7059 + * The bit21 and bit22 of 0x42004 7060 + * The bit5 and bit7 of 0x42020 7061 + * The bit14 of 0x70180 7062 + * The bit14 of 0x71180 7225 7063 */ 7226 - if (HAS_PCH_SPLIT(dev)) { 7227 - uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE; 7064 + I915_WRITE(ILK_DISPLAY_CHICKEN1, 7065 + I915_READ(ILK_DISPLAY_CHICKEN1) | 7066 + ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS); 7067 + I915_WRITE(ILK_DISPLAY_CHICKEN2, 7068 + I915_READ(ILK_DISPLAY_CHICKEN2) | 7069 + ILK_DPARB_GATE | ILK_VSDPFD_FULL); 7070 + I915_WRITE(ILK_DSPCLK_GATE, 7071 + I915_READ(ILK_DSPCLK_GATE) | 7072 + ILK_DPARB_CLK_GATE | 7073 + ILK_DPFD_CLK_GATE); 7228 7074 7229 - if (IS_GEN5(dev)) { 7230 - /* Required for FBC */ 7231 - dspclk_gate |= DPFCUNIT_CLOCK_GATE_DISABLE | 7232 - DPFCRUNIT_CLOCK_GATE_DISABLE | 7233 - DPFDUNIT_CLOCK_GATE_DISABLE; 7234 - /* Required for CxSR */ 7235 - dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE; 7075 + for_each_pipe(pipe) 7076 + I915_WRITE(DSPCNTR(pipe), 7077 + I915_READ(DSPCNTR(pipe)) | 7078 + DISPPLANE_TRICKLE_FEED_DISABLE); 7079 + } 7236 7080 7237 - I915_WRITE(PCH_3DCGDIS0, 7238 - MARIUNIT_CLOCK_GATE_DISABLE | 7239 - SVSMUNIT_CLOCK_GATE_DISABLE); 7240 - I915_WRITE(PCH_3DCGDIS1, 7241 - VFMUNIT_CLOCK_GATE_DISABLE); 7242 - } 7081 + static void ivybridge_init_clock_gating(struct drm_device *dev) 7082 + { 7083 + struct drm_i915_private *dev_priv = dev->dev_private; 7084 + int pipe; 7085 + uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE; 7243 7086 7244 - I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); 7087 + I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); 7245 7088 7246 - /* 7247 - * On Ibex Peak and Cougar Point, we need to disable clock 7248 - * gating for the panel power sequencer or it will fail to 7249 - * start up when no ports are active. 7250 - */ 7251 - I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); 7089 + I915_WRITE(WM3_LP_ILK, 0); 7090 + I915_WRITE(WM2_LP_ILK, 0); 7091 + I915_WRITE(WM1_LP_ILK, 0); 7252 7092 7253 - /* 7254 - * According to the spec the following bits should be set in 7255 - * order to enable memory self-refresh 7256 - * The bit 22/21 of 0x42004 7257 - * The bit 5 of 0x42020 7258 - * The bit 15 of 0x45000 7259 - */ 7260 - if (IS_GEN5(dev)) { 7261 - I915_WRITE(ILK_DISPLAY_CHICKEN2, 7262 - (I915_READ(ILK_DISPLAY_CHICKEN2) | 7263 - ILK_DPARB_GATE | ILK_VSDPFD_FULL)); 7264 - I915_WRITE(ILK_DSPCLK_GATE, 7265 - (I915_READ(ILK_DSPCLK_GATE) | 7266 - ILK_DPARB_CLK_GATE)); 7267 - I915_WRITE(DISP_ARB_CTL, 7268 - (I915_READ(DISP_ARB_CTL) | 7269 - DISP_FBC_WM_DIS)); 7270 - I915_WRITE(WM3_LP_ILK, 0); 7271 - I915_WRITE(WM2_LP_ILK, 0); 7272 - I915_WRITE(WM1_LP_ILK, 0); 7273 - } 7274 - /* 7275 - * Based on the document from hardware guys the following bits 7276 - * should be set unconditionally in order to enable FBC. 7277 - * The bit 22 of 0x42000 7278 - * The bit 22 of 0x42004 7279 - * The bit 7,8,9 of 0x42020. 7280 - */ 7281 - if (IS_IRONLAKE_M(dev)) { 7282 - I915_WRITE(ILK_DISPLAY_CHICKEN1, 7283 - I915_READ(ILK_DISPLAY_CHICKEN1) | 7284 - ILK_FBCQ_DIS); 7285 - I915_WRITE(ILK_DISPLAY_CHICKEN2, 7286 - I915_READ(ILK_DISPLAY_CHICKEN2) | 7287 - ILK_DPARB_GATE); 7288 - I915_WRITE(ILK_DSPCLK_GATE, 7289 - I915_READ(ILK_DSPCLK_GATE) | 7290 - ILK_DPFC_DIS1 | 7291 - ILK_DPFC_DIS2 | 7292 - ILK_CLK_FBC); 7293 - } 7093 + I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE); 7294 7094 7295 - I915_WRITE(ILK_DISPLAY_CHICKEN2, 7296 - I915_READ(ILK_DISPLAY_CHICKEN2) | 7297 - ILK_ELPIN_409_SELECT); 7095 + for_each_pipe(pipe) 7096 + I915_WRITE(DSPCNTR(pipe), 7097 + I915_READ(DSPCNTR(pipe)) | 7098 + DISPPLANE_TRICKLE_FEED_DISABLE); 7099 + } 7298 7100 7299 - if (IS_GEN5(dev)) { 7300 - I915_WRITE(_3D_CHICKEN2, 7301 - _3D_CHICKEN2_WM_READ_PIPELINED << 16 | 7302 - _3D_CHICKEN2_WM_READ_PIPELINED); 7303 - } 7101 + static void g4x_init_clock_gating(struct drm_device *dev) 7102 + { 7103 + struct drm_i915_private *dev_priv = dev->dev_private; 7104 + uint32_t dspclk_gate; 7304 7105 7305 - if (IS_GEN6(dev)) { 7306 - I915_WRITE(WM3_LP_ILK, 0); 7307 - I915_WRITE(WM2_LP_ILK, 0); 7308 - I915_WRITE(WM1_LP_ILK, 0); 7106 + I915_WRITE(RENCLK_GATE_D1, 0); 7107 + I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE | 7108 + GS_UNIT_CLOCK_GATE_DISABLE | 7109 + CL_UNIT_CLOCK_GATE_DISABLE); 7110 + I915_WRITE(RAMCLK_GATE_D, 0); 7111 + dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE | 7112 + OVRUNIT_CLOCK_GATE_DISABLE | 7113 + OVCUNIT_CLOCK_GATE_DISABLE; 7114 + if (IS_GM45(dev)) 7115 + dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE; 7116 + I915_WRITE(DSPCLK_GATE_D, dspclk_gate); 7117 + } 7309 7118 7310 - /* 7311 - * According to the spec the following bits should be 7312 - * set in order to enable memory self-refresh and fbc: 7313 - * The bit21 and bit22 of 0x42000 7314 - * The bit21 and bit22 of 0x42004 7315 - * The bit5 and bit7 of 0x42020 7316 - * The bit14 of 0x70180 7317 - * The bit14 of 0x71180 7318 - */ 7319 - I915_WRITE(ILK_DISPLAY_CHICKEN1, 7320 - I915_READ(ILK_DISPLAY_CHICKEN1) | 7321 - ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS); 7322 - I915_WRITE(ILK_DISPLAY_CHICKEN2, 7323 - I915_READ(ILK_DISPLAY_CHICKEN2) | 7324 - ILK_DPARB_GATE | ILK_VSDPFD_FULL); 7325 - I915_WRITE(ILK_DSPCLK_GATE, 7326 - I915_READ(ILK_DSPCLK_GATE) | 7327 - ILK_DPARB_CLK_GATE | 7328 - ILK_DPFD_CLK_GATE); 7119 + static void crestline_init_clock_gating(struct drm_device *dev) 7120 + { 7121 + struct drm_i915_private *dev_priv = dev->dev_private; 7329 7122 7330 - for_each_pipe(pipe) 7331 - I915_WRITE(DSPCNTR(pipe), 7332 - I915_READ(DSPCNTR(pipe)) | 7333 - DISPPLANE_TRICKLE_FEED_DISABLE); 7334 - } 7335 - } else if (IS_G4X(dev)) { 7336 - uint32_t dspclk_gate; 7337 - I915_WRITE(RENCLK_GATE_D1, 0); 7338 - I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE | 7339 - GS_UNIT_CLOCK_GATE_DISABLE | 7340 - CL_UNIT_CLOCK_GATE_DISABLE); 7341 - I915_WRITE(RAMCLK_GATE_D, 0); 7342 - dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE | 7343 - OVRUNIT_CLOCK_GATE_DISABLE | 7344 - OVCUNIT_CLOCK_GATE_DISABLE; 7345 - if (IS_GM45(dev)) 7346 - dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE; 7347 - I915_WRITE(DSPCLK_GATE_D, dspclk_gate); 7348 - } else if (IS_CRESTLINE(dev)) { 7349 - I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE); 7350 - I915_WRITE(RENCLK_GATE_D2, 0); 7351 - I915_WRITE(DSPCLK_GATE_D, 0); 7352 - I915_WRITE(RAMCLK_GATE_D, 0); 7353 - I915_WRITE16(DEUC, 0); 7354 - } else if (IS_BROADWATER(dev)) { 7355 - I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE | 7356 - I965_RCC_CLOCK_GATE_DISABLE | 7357 - I965_RCPB_CLOCK_GATE_DISABLE | 7358 - I965_ISC_CLOCK_GATE_DISABLE | 7359 - I965_FBC_CLOCK_GATE_DISABLE); 7360 - I915_WRITE(RENCLK_GATE_D2, 0); 7361 - } else if (IS_GEN3(dev)) { 7362 - u32 dstate = I915_READ(D_STATE); 7123 + I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE); 7124 + I915_WRITE(RENCLK_GATE_D2, 0); 7125 + I915_WRITE(DSPCLK_GATE_D, 0); 7126 + I915_WRITE(RAMCLK_GATE_D, 0); 7127 + I915_WRITE16(DEUC, 0); 7128 + } 7363 7129 7364 - dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING | 7365 - DSTATE_DOT_CLOCK_GATING; 7366 - I915_WRITE(D_STATE, dstate); 7367 - } else if (IS_I85X(dev) || IS_I865G(dev)) { 7368 - I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE); 7369 - } else if (IS_I830(dev)) { 7370 - I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE); 7371 - } 7130 + static void broadwater_init_clock_gating(struct drm_device *dev) 7131 + { 7132 + struct drm_i915_private *dev_priv = dev->dev_private; 7133 + 7134 + I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE | 7135 + I965_RCC_CLOCK_GATE_DISABLE | 7136 + I965_RCPB_CLOCK_GATE_DISABLE | 7137 + I965_ISC_CLOCK_GATE_DISABLE | 7138 + I965_FBC_CLOCK_GATE_DISABLE); 7139 + I915_WRITE(RENCLK_GATE_D2, 0); 7140 + } 7141 + 7142 + static void gen3_init_clock_gating(struct drm_device *dev) 7143 + { 7144 + struct drm_i915_private *dev_priv = dev->dev_private; 7145 + u32 dstate = I915_READ(D_STATE); 7146 + 7147 + dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING | 7148 + DSTATE_DOT_CLOCK_GATING; 7149 + I915_WRITE(D_STATE, dstate); 7150 + } 7151 + 7152 + static void i85x_init_clock_gating(struct drm_device *dev) 7153 + { 7154 + struct drm_i915_private *dev_priv = dev->dev_private; 7155 + 7156 + I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE); 7157 + } 7158 + 7159 + static void i830_init_clock_gating(struct drm_device *dev) 7160 + { 7161 + struct drm_i915_private *dev_priv = dev->dev_private; 7162 + 7163 + I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE); 7164 + } 7165 + 7166 + static void ibx_init_clock_gating(struct drm_device *dev) 7167 + { 7168 + struct drm_i915_private *dev_priv = dev->dev_private; 7169 + 7170 + /* 7171 + * On Ibex Peak and Cougar Point, we need to disable clock 7172 + * gating for the panel power sequencer or it will fail to 7173 + * start up when no ports are active. 7174 + */ 7175 + I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); 7176 + } 7177 + 7178 + static void cpt_init_clock_gating(struct drm_device *dev) 7179 + { 7180 + struct drm_i915_private *dev_priv = dev->dev_private; 7181 + 7182 + /* 7183 + * On Ibex Peak and Cougar Point, we need to disable clock 7184 + * gating for the panel power sequencer or it will fail to 7185 + * start up when no ports are active. 7186 + */ 7187 + I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); 7188 + I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) | 7189 + DPLS_EDP_PPS_FIX_DIS); 7372 7190 } 7373 7191 7374 7192 static void ironlake_teardown_rc6(struct drm_device *dev) ··· 7507 7187 if (!i915_enable_rc6) 7508 7188 return; 7509 7189 7190 + mutex_lock(&dev->struct_mutex); 7510 7191 ret = ironlake_setup_rc6(dev); 7511 - if (ret) 7192 + if (ret) { 7193 + mutex_unlock(&dev->struct_mutex); 7512 7194 return; 7195 + } 7513 7196 7514 7197 /* 7515 7198 * GPU can automatically power down the render unit if given a page ··· 7521 7198 ret = BEGIN_LP_RING(6); 7522 7199 if (ret) { 7523 7200 ironlake_teardown_rc6(dev); 7201 + mutex_unlock(&dev->struct_mutex); 7524 7202 return; 7525 7203 } 7526 7204 ··· 7537 7213 OUT_RING(MI_FLUSH); 7538 7214 ADVANCE_LP_RING(); 7539 7215 7216 + /* 7217 + * Wait for the command parser to advance past MI_SET_CONTEXT. The HW 7218 + * does an implicit flush, combined with MI_FLUSH above, it should be 7219 + * safe to assume that renderctx is valid 7220 + */ 7221 + ret = intel_wait_ring_idle(LP_RING(dev_priv)); 7222 + if (ret) { 7223 + DRM_ERROR("failed to enable ironlake power power savings\n"); 7224 + ironlake_teardown_rc6(dev); 7225 + mutex_unlock(&dev->struct_mutex); 7226 + return; 7227 + } 7228 + 7540 7229 I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN); 7541 7230 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); 7231 + mutex_unlock(&dev->struct_mutex); 7542 7232 } 7543 7233 7234 + void intel_init_clock_gating(struct drm_device *dev) 7235 + { 7236 + struct drm_i915_private *dev_priv = dev->dev_private; 7237 + 7238 + dev_priv->display.init_clock_gating(dev); 7239 + 7240 + if (dev_priv->display.init_pch_clock_gating) 7241 + dev_priv->display.init_pch_clock_gating(dev); 7242 + } 7544 7243 7545 7244 /* Set up chip specific display functions */ 7546 7245 static void intel_init_display(struct drm_device *dev) ··· 7571 7224 struct drm_i915_private *dev_priv = dev->dev_private; 7572 7225 7573 7226 /* We always want a DPMS function */ 7574 - if (HAS_PCH_SPLIT(dev)) 7227 + if (HAS_PCH_SPLIT(dev)) { 7575 7228 dev_priv->display.dpms = ironlake_crtc_dpms; 7576 - else 7229 + dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set; 7230 + } else { 7577 7231 dev_priv->display.dpms = i9xx_crtc_dpms; 7232 + dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set; 7233 + } 7578 7234 7579 7235 if (I915_HAS_FBC(dev)) { 7580 7236 if (HAS_PCH_SPLIT(dev)) { ··· 7621 7271 7622 7272 /* For FIFO watermark updates */ 7623 7273 if (HAS_PCH_SPLIT(dev)) { 7274 + if (HAS_PCH_IBX(dev)) 7275 + dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating; 7276 + else if (HAS_PCH_CPT(dev)) 7277 + dev_priv->display.init_pch_clock_gating = cpt_init_clock_gating; 7278 + 7624 7279 if (IS_GEN5(dev)) { 7625 7280 if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK) 7626 7281 dev_priv->display.update_wm = ironlake_update_wm; ··· 7634 7279 "Disable CxSR\n"); 7635 7280 dev_priv->display.update_wm = NULL; 7636 7281 } 7282 + dev_priv->display.fdi_link_train = ironlake_fdi_link_train; 7283 + dev_priv->display.init_clock_gating = ironlake_init_clock_gating; 7637 7284 } else if (IS_GEN6(dev)) { 7638 7285 if (SNB_READ_WM0_LATENCY()) { 7639 7286 dev_priv->display.update_wm = sandybridge_update_wm; ··· 7644 7287 "Disable CxSR\n"); 7645 7288 dev_priv->display.update_wm = NULL; 7646 7289 } 7290 + dev_priv->display.fdi_link_train = gen6_fdi_link_train; 7291 + dev_priv->display.init_clock_gating = gen6_init_clock_gating; 7292 + } else if (IS_IVYBRIDGE(dev)) { 7293 + /* FIXME: detect B0+ stepping and use auto training */ 7294 + dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train; 7295 + if (SNB_READ_WM0_LATENCY()) { 7296 + dev_priv->display.update_wm = sandybridge_update_wm; 7297 + } else { 7298 + DRM_DEBUG_KMS("Failed to read display plane latency. " 7299 + "Disable CxSR\n"); 7300 + dev_priv->display.update_wm = NULL; 7301 + } 7302 + dev_priv->display.init_clock_gating = ivybridge_init_clock_gating; 7303 + 7647 7304 } else 7648 7305 dev_priv->display.update_wm = NULL; 7649 7306 } else if (IS_PINEVIEW(dev)) { ··· 7675 7304 dev_priv->display.update_wm = NULL; 7676 7305 } else 7677 7306 dev_priv->display.update_wm = pineview_update_wm; 7678 - } else if (IS_G4X(dev)) 7307 + } else if (IS_G4X(dev)) { 7679 7308 dev_priv->display.update_wm = g4x_update_wm; 7680 - else if (IS_GEN4(dev)) 7309 + dev_priv->display.init_clock_gating = g4x_init_clock_gating; 7310 + } else if (IS_GEN4(dev)) { 7681 7311 dev_priv->display.update_wm = i965_update_wm; 7682 - else if (IS_GEN3(dev)) { 7312 + if (IS_CRESTLINE(dev)) 7313 + dev_priv->display.init_clock_gating = crestline_init_clock_gating; 7314 + else if (IS_BROADWATER(dev)) 7315 + dev_priv->display.init_clock_gating = broadwater_init_clock_gating; 7316 + } else if (IS_GEN3(dev)) { 7683 7317 dev_priv->display.update_wm = i9xx_update_wm; 7684 7318 dev_priv->display.get_fifo_size = i9xx_get_fifo_size; 7319 + dev_priv->display.init_clock_gating = gen3_init_clock_gating; 7320 + } else if (IS_I865G(dev)) { 7321 + dev_priv->display.update_wm = i830_update_wm; 7322 + dev_priv->display.init_clock_gating = i85x_init_clock_gating; 7323 + dev_priv->display.get_fifo_size = i830_get_fifo_size; 7685 7324 } else if (IS_I85X(dev)) { 7686 7325 dev_priv->display.update_wm = i9xx_update_wm; 7687 7326 dev_priv->display.get_fifo_size = i85x_get_fifo_size; 7327 + dev_priv->display.init_clock_gating = i85x_init_clock_gating; 7688 7328 } else { 7689 7329 dev_priv->display.update_wm = i830_update_wm; 7330 + dev_priv->display.init_clock_gating = i830_init_clock_gating; 7690 7331 if (IS_845G(dev)) 7691 7332 dev_priv->display.get_fifo_size = i845_get_fifo_size; 7692 7333 else ··· 7824 7441 intel_crtc_init(dev, i); 7825 7442 } 7826 7443 7827 - intel_setup_outputs(dev); 7828 - 7829 - intel_enable_clock_gating(dev); 7830 - 7831 7444 /* Just disable it once at startup */ 7832 7445 i915_disable_vga(dev); 7446 + intel_setup_outputs(dev); 7447 + 7448 + intel_init_clock_gating(dev); 7833 7449 7834 7450 if (IS_IRONLAKE_M(dev)) { 7835 7451 ironlake_enable_drps(dev); ··· 7838 7456 if (IS_GEN6(dev)) 7839 7457 gen6_enable_rps(dev_priv); 7840 7458 7841 - if (IS_IRONLAKE_M(dev)) 7842 - ironlake_enable_rc6(dev); 7843 - 7844 7459 INIT_WORK(&dev_priv->idle_work, intel_idle_update); 7845 7460 setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer, 7846 7461 (unsigned long)dev); 7462 + } 7463 + 7464 + void intel_modeset_gem_init(struct drm_device *dev) 7465 + { 7466 + if (IS_IRONLAKE_M(dev)) 7467 + ironlake_enable_rc6(dev); 7847 7468 7848 7469 intel_setup_overlay(dev); 7849 7470 }
+13 -6
drivers/gpu/drm/i915/intel_drv.h
··· 140 140 struct intel_encoder { 141 141 struct drm_encoder base; 142 142 int type; 143 - bool load_detect_temp; 144 143 bool needs_tv_clock; 145 144 void (*hot_plug)(struct intel_encoder *); 146 145 int crtc_mask; ··· 290 291 struct drm_file *file_priv); 291 292 extern void intel_wait_for_vblank(struct drm_device *dev, int pipe); 292 293 extern void intel_wait_for_pipe_off(struct drm_device *dev, int pipe); 293 - extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, 294 - struct drm_connector *connector, 295 - struct drm_display_mode *mode, 296 - int *dpms_mode); 294 + 295 + struct intel_load_detect_pipe { 296 + struct drm_framebuffer *release_fb; 297 + bool load_detect_temp; 298 + int dpms_mode; 299 + }; 300 + extern bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, 301 + struct drm_connector *connector, 302 + struct drm_display_mode *mode, 303 + struct intel_load_detect_pipe *old); 297 304 extern void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, 298 305 struct drm_connector *connector, 299 - int dpms_mode); 306 + struct intel_load_detect_pipe *old); 300 307 301 308 extern struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB); 302 309 extern int intel_sdvo_supports_hotplug(struct drm_connector *connector); ··· 344 339 345 340 extern void intel_fb_output_poll_changed(struct drm_device *dev); 346 341 extern void intel_fb_restore_mode(struct drm_device *dev); 342 + 343 + extern void intel_init_clock_gating(struct drm_device *dev); 347 344 #endif /* __INTEL_DRV_H__ */
+57 -39
drivers/gpu/drm/i915/intel_ringbuffer.c
··· 236 236 ret = -ENOMEM; 237 237 goto err; 238 238 } 239 - obj->agp_type = AGP_USER_CACHED_MEMORY; 239 + obj->cache_level = I915_CACHE_LLC; 240 240 241 241 ret = i915_gem_object_pin(obj, 4096, true); 242 242 if (ret) ··· 286 286 287 287 if (INTEL_INFO(dev)->gen > 3) { 288 288 int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH; 289 - if (IS_GEN6(dev)) 289 + if (IS_GEN6(dev) || IS_GEN7(dev)) 290 290 mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE; 291 291 I915_WRITE(MI_MODE, mode); 292 292 } ··· 551 551 552 552 void intel_ring_setup_status_page(struct intel_ring_buffer *ring) 553 553 { 554 + struct drm_device *dev = ring->dev; 554 555 drm_i915_private_t *dev_priv = ring->dev->dev_private; 555 - u32 mmio = IS_GEN6(ring->dev) ? 556 - RING_HWS_PGA_GEN6(ring->mmio_base) : 557 - RING_HWS_PGA(ring->mmio_base); 556 + u32 mmio = 0; 557 + 558 + /* The ring status page addresses are no longer next to the rest of 559 + * the ring registers as of gen7. 560 + */ 561 + if (IS_GEN7(dev)) { 562 + switch (ring->id) { 563 + case RING_RENDER: 564 + mmio = RENDER_HWS_PGA_GEN7; 565 + break; 566 + case RING_BLT: 567 + mmio = BLT_HWS_PGA_GEN7; 568 + break; 569 + case RING_BSD: 570 + mmio = BSD_HWS_PGA_GEN7; 571 + break; 572 + } 573 + } else if (IS_GEN6(ring->dev)) { 574 + mmio = RING_HWS_PGA_GEN6(ring->mmio_base); 575 + } else { 576 + mmio = RING_HWS_PGA(ring->mmio_base); 577 + } 578 + 558 579 I915_WRITE(mmio, (u32)ring->status_page.gfx_addr); 559 580 POSTING_READ(mmio); 560 581 } ··· 621 600 } 622 601 623 602 static bool 624 - ring_get_irq(struct intel_ring_buffer *ring, u32 flag) 625 - { 626 - struct drm_device *dev = ring->dev; 627 - drm_i915_private_t *dev_priv = dev->dev_private; 628 - 629 - if (!dev->irq_enabled) 630 - return false; 631 - 632 - spin_lock(&ring->irq_lock); 633 - if (ring->irq_refcount++ == 0) 634 - ironlake_enable_irq(dev_priv, flag); 635 - spin_unlock(&ring->irq_lock); 636 - 637 - return true; 638 - } 639 - 640 - static void 641 - ring_put_irq(struct intel_ring_buffer *ring, u32 flag) 642 - { 643 - struct drm_device *dev = ring->dev; 644 - drm_i915_private_t *dev_priv = dev->dev_private; 645 - 646 - spin_lock(&ring->irq_lock); 647 - if (--ring->irq_refcount == 0) 648 - ironlake_disable_irq(dev_priv, flag); 649 - spin_unlock(&ring->irq_lock); 650 - } 651 - 652 - static bool 653 603 gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag) 654 604 { 655 605 struct drm_device *dev = ring->dev; ··· 658 666 static bool 659 667 bsd_ring_get_irq(struct intel_ring_buffer *ring) 660 668 { 661 - return ring_get_irq(ring, GT_BSD_USER_INTERRUPT); 669 + struct drm_device *dev = ring->dev; 670 + drm_i915_private_t *dev_priv = dev->dev_private; 671 + 672 + if (!dev->irq_enabled) 673 + return false; 674 + 675 + spin_lock(&ring->irq_lock); 676 + if (ring->irq_refcount++ == 0) { 677 + if (IS_G4X(dev)) 678 + i915_enable_irq(dev_priv, I915_BSD_USER_INTERRUPT); 679 + else 680 + ironlake_enable_irq(dev_priv, GT_BSD_USER_INTERRUPT); 681 + } 682 + spin_unlock(&ring->irq_lock); 683 + 684 + return true; 662 685 } 663 686 static void 664 687 bsd_ring_put_irq(struct intel_ring_buffer *ring) 665 688 { 666 - ring_put_irq(ring, GT_BSD_USER_INTERRUPT); 689 + struct drm_device *dev = ring->dev; 690 + drm_i915_private_t *dev_priv = dev->dev_private; 691 + 692 + spin_lock(&ring->irq_lock); 693 + if (--ring->irq_refcount == 0) { 694 + if (IS_G4X(dev)) 695 + i915_disable_irq(dev_priv, I915_BSD_USER_INTERRUPT); 696 + else 697 + ironlake_disable_irq(dev_priv, GT_BSD_USER_INTERRUPT); 698 + } 699 + spin_unlock(&ring->irq_lock); 667 700 } 668 701 669 702 static int ··· 776 759 ret = -ENOMEM; 777 760 goto err; 778 761 } 779 - obj->agp_type = AGP_USER_CACHED_MEMORY; 762 + obj->cache_level = I915_CACHE_LLC; 780 763 781 764 ret = i915_gem_object_pin(obj, 4096, true); 782 765 if (ret != 0) { ··· 817 800 INIT_LIST_HEAD(&ring->request_list); 818 801 INIT_LIST_HEAD(&ring->gpu_write_list); 819 802 803 + init_waitqueue_head(&ring->irq_queue); 820 804 spin_lock_init(&ring->irq_lock); 821 805 ring->irq_mask = ~0; 822 806 ··· 890 872 891 873 /* Disable the ring buffer. The ring must be idle at this point */ 892 874 dev_priv = ring->dev->dev_private; 893 - ret = intel_wait_ring_buffer(ring, ring->size - 8); 875 + ret = intel_wait_ring_idle(ring); 894 876 if (ret) 895 877 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n", 896 878 ring->name, ret); ··· 1351 1333 drm_i915_private_t *dev_priv = dev->dev_private; 1352 1334 struct intel_ring_buffer *ring = &dev_priv->ring[VCS]; 1353 1335 1354 - if (IS_GEN6(dev)) 1336 + if (IS_GEN6(dev) || IS_GEN7(dev)) 1355 1337 *ring = gen6_bsd_ring; 1356 1338 else 1357 1339 *ring = bsd_ring;
+19 -16
drivers/gpu/drm/i915/intel_ringbuffer.h
··· 14 14 struct drm_i915_gem_object *obj; 15 15 }; 16 16 17 - #define I915_RING_READ(reg) i915_gt_read(dev_priv, reg) 18 - #define I915_RING_WRITE(reg, val) i915_gt_write(dev_priv, reg, val) 17 + #define I915_READ_TAIL(ring) I915_READ(RING_TAIL((ring)->mmio_base)) 18 + #define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val) 19 19 20 - #define I915_READ_TAIL(ring) I915_RING_READ(RING_TAIL((ring)->mmio_base)) 21 - #define I915_WRITE_TAIL(ring, val) I915_RING_WRITE(RING_TAIL((ring)->mmio_base), val) 20 + #define I915_READ_START(ring) I915_READ(RING_START((ring)->mmio_base)) 21 + #define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val) 22 22 23 - #define I915_READ_START(ring) I915_RING_READ(RING_START((ring)->mmio_base)) 24 - #define I915_WRITE_START(ring, val) I915_RING_WRITE(RING_START((ring)->mmio_base), val) 23 + #define I915_READ_HEAD(ring) I915_READ(RING_HEAD((ring)->mmio_base)) 24 + #define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val) 25 25 26 - #define I915_READ_HEAD(ring) I915_RING_READ(RING_HEAD((ring)->mmio_base)) 27 - #define I915_WRITE_HEAD(ring, val) I915_RING_WRITE(RING_HEAD((ring)->mmio_base), val) 26 + #define I915_READ_CTL(ring) I915_READ(RING_CTL((ring)->mmio_base)) 27 + #define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val) 28 28 29 - #define I915_READ_CTL(ring) I915_RING_READ(RING_CTL((ring)->mmio_base)) 30 - #define I915_WRITE_CTL(ring, val) I915_RING_WRITE(RING_CTL((ring)->mmio_base), val) 29 + #define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base)) 30 + #define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val) 31 31 32 - #define I915_READ_IMR(ring) I915_RING_READ(RING_IMR((ring)->mmio_base)) 33 - #define I915_WRITE_IMR(ring, val) I915_RING_WRITE(RING_IMR((ring)->mmio_base), val) 34 - 35 - #define I915_READ_NOPID(ring) I915_RING_READ(RING_NOPID((ring)->mmio_base)) 36 - #define I915_READ_SYNC_0(ring) I915_RING_READ(RING_SYNC_0((ring)->mmio_base)) 37 - #define I915_READ_SYNC_1(ring) I915_RING_READ(RING_SYNC_1((ring)->mmio_base)) 32 + #define I915_READ_NOPID(ring) I915_READ(RING_NOPID((ring)->mmio_base)) 33 + #define I915_READ_SYNC_0(ring) I915_READ(RING_SYNC_0((ring)->mmio_base)) 34 + #define I915_READ_SYNC_1(ring) I915_READ(RING_SYNC_1((ring)->mmio_base)) 38 35 39 36 struct intel_ring_buffer { 40 37 const char *name; ··· 161 164 #define I915_BREADCRUMB_INDEX 0x21 162 165 163 166 void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring); 167 + 164 168 int __must_check intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n); 169 + static inline int intel_wait_ring_idle(struct intel_ring_buffer *ring) 170 + { 171 + return intel_wait_ring_buffer(ring, ring->space - 8); 172 + } 173 + 165 174 int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n); 166 175 167 176 static inline void intel_ring_emit(struct intel_ring_buffer *ring,
+4 -6
drivers/gpu/drm/i915/intel_sdvo.c
··· 2544 2544 if (!intel_sdvo) 2545 2545 return false; 2546 2546 2547 + intel_sdvo->sdvo_reg = sdvo_reg; 2548 + intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, sdvo_reg) >> 1; 2549 + intel_sdvo_select_i2c_bus(dev_priv, intel_sdvo, sdvo_reg); 2547 2550 if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev)) { 2548 2551 kfree(intel_sdvo); 2549 2552 return false; 2550 2553 } 2551 2554 2552 - intel_sdvo->sdvo_reg = sdvo_reg; 2553 - 2555 + /* encoder type will be decided later */ 2554 2556 intel_encoder = &intel_sdvo->base; 2555 2557 intel_encoder->type = INTEL_OUTPUT_SDVO; 2556 - /* encoder type will be decided later */ 2557 2558 drm_encoder_init(dev, &intel_encoder->base, &intel_sdvo_enc_funcs, 0); 2558 - 2559 - intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, sdvo_reg) >> 1; 2560 - intel_sdvo_select_i2c_bus(dev_priv, intel_sdvo, sdvo_reg); 2561 2559 2562 2560 /* Read the regs to test if we can talk to the device */ 2563 2561 for (i = 0; i < 0x40; i++) {
+6 -7
drivers/gpu/drm/i915/intel_tv.c
··· 1361 1361 if (intel_tv->base.base.crtc && intel_tv->base.base.crtc->enabled) { 1362 1362 type = intel_tv_detect_type(intel_tv, connector); 1363 1363 } else if (force) { 1364 - struct drm_crtc *crtc; 1365 - int dpms_mode; 1364 + struct intel_load_detect_pipe tmp; 1366 1365 1367 - crtc = intel_get_load_detect_pipe(&intel_tv->base, connector, 1368 - &mode, &dpms_mode); 1369 - if (crtc) { 1366 + if (intel_get_load_detect_pipe(&intel_tv->base, connector, 1367 + &mode, &tmp)) { 1370 1368 type = intel_tv_detect_type(intel_tv, connector); 1371 - intel_release_load_detect_pipe(&intel_tv->base, connector, 1372 - dpms_mode); 1369 + intel_release_load_detect_pipe(&intel_tv->base, 1370 + connector, 1371 + &tmp); 1373 1372 } else 1374 1373 return connector_status_unknown; 1375 1374 } else
+2
drivers/gpu/drm/nouveau/Kconfig
··· 11 11 select FRAMEBUFFER_CONSOLE if !EXPERT 12 12 select FB_BACKLIGHT if DRM_NOUVEAU_BACKLIGHT 13 13 select ACPI_VIDEO if ACPI && X86 && BACKLIGHT_CLASS_DEVICE && VIDEO_OUTPUT_CONTROL && INPUT 14 + select ACPI_WMI if ACPI 15 + select MXM_WMI if ACPI 14 16 help 15 17 Choose this option for open-source nVidia support. 16 18
+2
drivers/gpu/drm/nouveau/Makefile
··· 20 20 nv40_graph.o nv50_graph.o nvc0_graph.o \ 21 21 nv40_grctx.o nv50_grctx.o nvc0_grctx.o \ 22 22 nv84_crypt.o \ 23 + nva3_copy.o nvc0_copy.o \ 24 + nv40_mpeg.o nv50_mpeg.o \ 23 25 nv04_instmem.o nv50_instmem.o nvc0_instmem.o \ 24 26 nv50_evo.o nv50_crtc.o nv50_dac.o nv50_sor.o \ 25 27 nv50_cursor.o nv50_display.o \
+97 -11
drivers/gpu/drm/nouveau/nouveau_acpi.c
··· 4 4 #include <acpi/acpi_drivers.h> 5 5 #include <acpi/acpi_bus.h> 6 6 #include <acpi/video.h> 7 + #include <acpi/acpi.h> 8 + #include <linux/mxm-wmi.h> 7 9 8 10 #include "drmP.h" 9 11 #include "drm.h" ··· 37 35 38 36 static struct nouveau_dsm_priv { 39 37 bool dsm_detected; 38 + bool optimus_detected; 40 39 acpi_handle dhandle; 41 40 acpi_handle rom_handle; 42 41 } nouveau_dsm_priv; 42 + 43 + #define NOUVEAU_DSM_HAS_MUX 0x1 44 + #define NOUVEAU_DSM_HAS_OPT 0x2 43 45 44 46 static const char nouveau_dsm_muid[] = { 45 47 0xA0, 0xA0, 0x95, 0x9D, 0x60, 0x00, 0x48, 0x4D, 46 48 0xB3, 0x4D, 0x7E, 0x5F, 0xEA, 0x12, 0x9F, 0xD4, 47 49 }; 50 + 51 + static const char nouveau_op_dsm_muid[] = { 52 + 0xF8, 0xD8, 0x86, 0xA4, 0xDA, 0x0B, 0x1B, 0x47, 53 + 0xA7, 0x2B, 0x60, 0x42, 0xA6, 0xB5, 0xBE, 0xE0, 54 + }; 55 + 56 + static int nouveau_optimus_dsm(acpi_handle handle, int func, int arg, uint32_t *result) 57 + { 58 + struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; 59 + struct acpi_object_list input; 60 + union acpi_object params[4]; 61 + union acpi_object *obj; 62 + int err; 63 + 64 + input.count = 4; 65 + input.pointer = params; 66 + params[0].type = ACPI_TYPE_BUFFER; 67 + params[0].buffer.length = sizeof(nouveau_op_dsm_muid); 68 + params[0].buffer.pointer = (char *)nouveau_op_dsm_muid; 69 + params[1].type = ACPI_TYPE_INTEGER; 70 + params[1].integer.value = 0x00000100; 71 + params[2].type = ACPI_TYPE_INTEGER; 72 + params[2].integer.value = func; 73 + params[3].type = ACPI_TYPE_BUFFER; 74 + params[3].buffer.length = 0; 75 + 76 + err = acpi_evaluate_object(handle, "_DSM", &input, &output); 77 + if (err) { 78 + printk(KERN_INFO "failed to evaluate _DSM: %d\n", err); 79 + return err; 80 + } 81 + 82 + obj = (union acpi_object *)output.pointer; 83 + 84 + if (obj->type == ACPI_TYPE_INTEGER) 85 + if (obj->integer.value == 0x80000002) { 86 + return -ENODEV; 87 + } 88 + 89 + if (obj->type == ACPI_TYPE_BUFFER) { 90 + if (obj->buffer.length == 4 && result) { 91 + *result = 0; 92 + *result |= obj->buffer.pointer[0]; 93 + *result |= (obj->buffer.pointer[1] << 8); 94 + *result |= (obj->buffer.pointer[2] << 16); 95 + *result |= (obj->buffer.pointer[3] << 24); 96 + } 97 + } 98 + 99 + kfree(output.pointer); 100 + return 0; 101 + } 48 102 49 103 static int nouveau_dsm(acpi_handle handle, int func, int arg, uint32_t *result) 50 104 { ··· 150 92 151 93 static int nouveau_dsm_switch_mux(acpi_handle handle, int mux_id) 152 94 { 95 + mxm_wmi_call_mxmx(mux_id == NOUVEAU_DSM_LED_STAMINA ? MXM_MXDS_ADAPTER_IGD : MXM_MXDS_ADAPTER_0); 96 + mxm_wmi_call_mxds(mux_id == NOUVEAU_DSM_LED_STAMINA ? MXM_MXDS_ADAPTER_IGD : MXM_MXDS_ADAPTER_0); 153 97 return nouveau_dsm(handle, NOUVEAU_DSM_LED, mux_id, NULL); 154 98 } 155 99 ··· 208 148 .get_client_id = nouveau_dsm_get_client_id, 209 149 }; 210 150 211 - static bool nouveau_dsm_pci_probe(struct pci_dev *pdev) 151 + static int nouveau_dsm_pci_probe(struct pci_dev *pdev) 212 152 { 213 153 acpi_handle dhandle, nvidia_handle; 214 154 acpi_status status; 215 - int ret; 155 + int ret, retval = 0; 216 156 uint32_t result; 217 157 218 158 dhandle = DEVICE_ACPI_HANDLE(&pdev->dev); ··· 226 166 227 167 ret = nouveau_dsm(dhandle, NOUVEAU_DSM_SUPPORTED, 228 168 NOUVEAU_DSM_SUPPORTED_FUNCTIONS, &result); 229 - if (ret < 0) 230 - return false; 169 + if (ret == 0) 170 + retval |= NOUVEAU_DSM_HAS_MUX; 231 171 232 - nouveau_dsm_priv.dhandle = dhandle; 233 - return true; 172 + ret = nouveau_optimus_dsm(dhandle, 0, 0, &result); 173 + if (ret == 0) 174 + retval |= NOUVEAU_DSM_HAS_OPT; 175 + 176 + if (retval) 177 + nouveau_dsm_priv.dhandle = dhandle; 178 + 179 + return retval; 234 180 } 235 181 236 182 static bool nouveau_dsm_detect(void) ··· 245 179 struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name}; 246 180 struct pci_dev *pdev = NULL; 247 181 int has_dsm = 0; 182 + int has_optimus; 248 183 int vga_count = 0; 184 + bool guid_valid; 185 + int retval; 186 + bool ret = false; 249 187 188 + /* lookup the MXM GUID */ 189 + guid_valid = mxm_wmi_supported(); 190 + 191 + if (guid_valid) 192 + printk("MXM: GUID detected in BIOS\n"); 193 + 194 + /* now do DSM detection */ 250 195 while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) { 251 196 vga_count++; 252 197 253 - has_dsm |= (nouveau_dsm_pci_probe(pdev) == true); 198 + retval = nouveau_dsm_pci_probe(pdev); 199 + printk("ret val is %d\n", retval); 200 + if (retval & NOUVEAU_DSM_HAS_MUX) 201 + has_dsm |= 1; 202 + if (retval & NOUVEAU_DSM_HAS_OPT) 203 + has_optimus = 1; 254 204 } 255 205 256 - if (vga_count == 2 && has_dsm) { 206 + if (vga_count == 2 && has_dsm && guid_valid) { 257 207 acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME, &buffer); 258 208 printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n", 259 209 acpi_method_name); 260 210 nouveau_dsm_priv.dsm_detected = true; 261 - return true; 211 + ret = true; 262 212 } 263 - return false; 213 + 214 + if (has_optimus == 1) 215 + nouveau_dsm_priv.optimus_detected = true; 216 + 217 + return ret; 264 218 } 265 219 266 220 void nouveau_register_dsm_handler(void) ··· 333 247 acpi_status status; 334 248 acpi_handle dhandle, rom_handle; 335 249 336 - if (!nouveau_dsm_priv.dsm_detected) 250 + if (!nouveau_dsm_priv.dsm_detected && !nouveau_dsm_priv.optimus_detected) 337 251 return false; 338 252 339 253 dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
+2 -5
drivers/gpu/drm/nouveau/nouveau_bios.c
··· 5049 5049 pll_lim->vco1.max_n = record[11]; 5050 5050 pll_lim->min_p = record[12]; 5051 5051 pll_lim->max_p = record[13]; 5052 - /* where did this go to?? */ 5053 - if ((entry[0] & 0xf0) == 0x80) 5054 - pll_lim->refclk = 27000; 5055 - else 5056 - pll_lim->refclk = 100000; 5052 + pll_lim->refclk = ROM16(entry[9]) * 1000; 5057 5053 } 5058 5054 5059 5055 /* ··· 6031 6035 case DCB_CONNECTOR_DVI_I: 6032 6036 case DCB_CONNECTOR_DVI_D: 6033 6037 case DCB_CONNECTOR_LVDS: 6038 + case DCB_CONNECTOR_LVDS_SPWG: 6034 6039 case DCB_CONNECTOR_DP: 6035 6040 case DCB_CONNECTOR_eDP: 6036 6041 case DCB_CONNECTOR_HDMI_0:
+1
drivers/gpu/drm/nouveau/nouveau_bios.h
··· 82 82 DCB_CONNECTOR_DVI_I = 0x30, 83 83 DCB_CONNECTOR_DVI_D = 0x31, 84 84 DCB_CONNECTOR_LVDS = 0x40, 85 + DCB_CONNECTOR_LVDS_SPWG = 0x41, 85 86 DCB_CONNECTOR_DP = 0x46, 86 87 DCB_CONNECTOR_eDP = 0x47, 87 88 DCB_CONNECTOR_HDMI_0 = 0x60,
+6 -14
drivers/gpu/drm/nouveau/nouveau_channel.c
··· 268 268 struct drm_device *dev = chan->dev; 269 269 struct drm_nouveau_private *dev_priv = dev->dev_private; 270 270 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; 271 - struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; 272 - struct nouveau_crypt_engine *pcrypt = &dev_priv->engine.crypt; 273 271 unsigned long flags; 272 + int i; 274 273 275 274 /* decrement the refcount, and we're done if there's still refs */ 276 275 if (likely(!atomic_dec_and_test(&chan->users))) { ··· 293 294 /* boot it off the hardware */ 294 295 pfifo->reassign(dev, false); 295 296 296 - /* We want to give pgraph a chance to idle and get rid of all 297 - * potential errors. We need to do this without the context 298 - * switch lock held, otherwise the irq handler is unable to 299 - * process them. 300 - */ 301 - if (pgraph->channel(dev) == chan) 302 - nouveau_wait_for_idle(dev); 303 - 304 297 /* destroy the engine specific contexts */ 305 298 pfifo->destroy_context(chan); 306 - pgraph->destroy_context(chan); 307 - if (pcrypt->destroy_context) 308 - pcrypt->destroy_context(chan); 299 + for (i = 0; i < NVOBJ_ENGINE_NR; i++) { 300 + if (chan->engctx[i]) 301 + dev_priv->eng[i]->context_del(chan, i); 302 + } 309 303 310 304 pfifo->reassign(dev, true); 311 305 ··· 406 414 struct nouveau_channel *chan; 407 415 int ret; 408 416 409 - if (dev_priv->engine.graph.accel_blocked) 417 + if (!dev_priv->eng[NVOBJ_ENGINE_GR]) 410 418 return -ENODEV; 411 419 412 420 if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0)
+5 -3
drivers/gpu/drm/nouveau/nouveau_connector.c
··· 442 442 } 443 443 444 444 /* LVDS always needs gpu scaling */ 445 - if (nv_connector->dcb->type == DCB_CONNECTOR_LVDS && 445 + if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS && 446 446 value == DRM_MODE_SCALE_NONE) 447 447 return -EINVAL; 448 448 ··· 650 650 ret = get_slave_funcs(encoder)->get_modes(encoder, connector); 651 651 652 652 if (nv_connector->dcb->type == DCB_CONNECTOR_LVDS || 653 + nv_connector->dcb->type == DCB_CONNECTOR_LVDS_SPWG || 653 654 nv_connector->dcb->type == DCB_CONNECTOR_eDP) 654 655 ret += nouveau_connector_scaler_modes_add(connector); 655 656 ··· 811 810 type = DRM_MODE_CONNECTOR_HDMIA; 812 811 break; 813 812 case DCB_CONNECTOR_LVDS: 813 + case DCB_CONNECTOR_LVDS_SPWG: 814 814 type = DRM_MODE_CONNECTOR_LVDS; 815 815 funcs = &nouveau_connector_funcs_lvds; 816 816 break; ··· 840 838 drm_connector_helper_add(connector, &nouveau_connector_helper_funcs); 841 839 842 840 /* Check if we need dithering enabled */ 843 - if (dcb->type == DCB_CONNECTOR_LVDS) { 841 + if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS) { 844 842 bool dummy, is_24bit = false; 845 843 846 844 ret = nouveau_bios_parse_lvds_table(dev, 0, &dummy, &is_24bit); ··· 885 883 nv_connector->use_dithering ? 886 884 DRM_MODE_DITHERING_ON : DRM_MODE_DITHERING_OFF); 887 885 888 - if (dcb->type != DCB_CONNECTOR_LVDS) { 886 + if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS) { 889 887 if (dev_priv->card_type >= NV_50) 890 888 connector->polled = DRM_CONNECTOR_POLL_HPD; 891 889 else
+1 -1
drivers/gpu/drm/nouveau/nouveau_display.c
··· 276 276 struct nouveau_fence *fence; 277 277 int ret; 278 278 279 - if (dev_priv->engine.graph.accel_blocked) 279 + if (!dev_priv->channel) 280 280 return -ENODEV; 281 281 282 282 s = kzalloc(sizeof(*s), GFP_KERNEL);
+17 -8
drivers/gpu/drm/nouveau/nouveau_drv.c
··· 162 162 struct drm_device *dev = pci_get_drvdata(pdev); 163 163 struct drm_nouveau_private *dev_priv = dev->dev_private; 164 164 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem; 165 - struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; 166 165 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; 167 166 struct nouveau_channel *chan; 168 167 struct drm_crtc *crtc; 169 - int ret, i; 168 + int ret, i, e; 170 169 171 170 if (pm_state.event == PM_EVENT_PRETHAW) 172 171 return 0; ··· 205 206 nouveau_channel_idle(chan); 206 207 } 207 208 208 - pgraph->fifo_access(dev, false); 209 - nouveau_wait_for_idle(dev); 210 209 pfifo->reassign(dev, false); 211 210 pfifo->disable(dev); 212 211 pfifo->unload_context(dev); 213 - pgraph->unload_context(dev); 212 + 213 + for (e = NVOBJ_ENGINE_NR - 1; e >= 0; e--) { 214 + if (dev_priv->eng[e]) { 215 + ret = dev_priv->eng[e]->fini(dev, e); 216 + if (ret) 217 + goto out_abort; 218 + } 219 + } 214 220 215 221 ret = pinstmem->suspend(dev); 216 222 if (ret) { ··· 246 242 247 243 out_abort: 248 244 NV_INFO(dev, "Re-enabling acceleration..\n"); 245 + for (e = e + 1; e < NVOBJ_ENGINE_NR; e++) { 246 + if (dev_priv->eng[e]) 247 + dev_priv->eng[e]->init(dev, e); 248 + } 249 249 pfifo->enable(dev); 250 250 pfifo->reassign(dev, true); 251 - pgraph->fifo_access(dev, true); 252 251 return ret; 253 252 } 254 253 ··· 306 299 engine->mc.init(dev); 307 300 engine->timer.init(dev); 308 301 engine->fb.init(dev); 309 - engine->graph.init(dev); 310 - engine->crypt.init(dev); 302 + for (i = 0; i < NVOBJ_ENGINE_NR; i++) { 303 + if (dev_priv->eng[i]) 304 + dev_priv->eng[i]->init(dev, i); 305 + } 311 306 engine->fifo.init(dev); 312 307 313 308 nouveau_irq_postinstall(dev);
+83 -125
drivers/gpu/drm/nouveau/nouveau_drv.h
··· 150 150 151 151 #define NVOBJ_ENGINE_SW 0 152 152 #define NVOBJ_ENGINE_GR 1 153 - #define NVOBJ_ENGINE_PPP 2 154 - #define NVOBJ_ENGINE_COPY 3 155 - #define NVOBJ_ENGINE_VP 4 156 - #define NVOBJ_ENGINE_CRYPT 5 157 - #define NVOBJ_ENGINE_BSP 6 158 - #define NVOBJ_ENGINE_DISPLAY 0xcafe0001 159 - #define NVOBJ_ENGINE_INT 0xdeadbeef 153 + #define NVOBJ_ENGINE_CRYPT 2 154 + #define NVOBJ_ENGINE_COPY0 3 155 + #define NVOBJ_ENGINE_COPY1 4 156 + #define NVOBJ_ENGINE_MPEG 5 157 + #define NVOBJ_ENGINE_DISPLAY 15 158 + #define NVOBJ_ENGINE_NR 16 160 159 161 160 #define NVOBJ_FLAG_DONT_MAP (1 << 0) 162 161 #define NVOBJ_FLAG_ZERO_ALLOC (1 << 1) ··· 244 245 struct nouveau_gpuobj *cache; 245 246 void *fifo_priv; 246 247 247 - /* PGRAPH context */ 248 - /* XXX may be merge 2 pointers as private data ??? */ 249 - struct nouveau_gpuobj *ramin_grctx; 250 - struct nouveau_gpuobj *crypt_ctx; 251 - void *pgraph_ctx; 248 + /* Execution engine contexts */ 249 + void *engctx[NVOBJ_ENGINE_NR]; 252 250 253 251 /* NV50 VM */ 254 252 struct nouveau_vm *vm; ··· 292 296 char name[32]; 293 297 struct drm_info_list info; 294 298 } debugfs; 299 + }; 300 + 301 + struct nouveau_exec_engine { 302 + void (*destroy)(struct drm_device *, int engine); 303 + int (*init)(struct drm_device *, int engine); 304 + int (*fini)(struct drm_device *, int engine); 305 + int (*context_new)(struct nouveau_channel *, int engine); 306 + void (*context_del)(struct nouveau_channel *, int engine); 307 + int (*object_new)(struct nouveau_channel *, int engine, 308 + u32 handle, u16 class); 309 + void (*set_tile_region)(struct drm_device *dev, int i); 310 + void (*tlb_flush)(struct drm_device *, int engine); 295 311 }; 296 312 297 313 struct nouveau_instmem_engine { ··· 372 364 void (*tlb_flush)(struct drm_device *dev); 373 365 }; 374 366 375 - struct nouveau_pgraph_engine { 376 - bool accel_blocked; 377 - bool registered; 378 - int grctx_size; 379 - void *priv; 380 - 381 - /* NV2x/NV3x context table (0x400780) */ 382 - struct nouveau_gpuobj *ctx_table; 383 - 384 - int (*init)(struct drm_device *); 385 - void (*takedown)(struct drm_device *); 386 - 387 - void (*fifo_access)(struct drm_device *, bool); 388 - 389 - struct nouveau_channel *(*channel)(struct drm_device *); 390 - int (*create_context)(struct nouveau_channel *); 391 - void (*destroy_context)(struct nouveau_channel *); 392 - int (*load_context)(struct nouveau_channel *); 393 - int (*unload_context)(struct drm_device *); 394 - void (*tlb_flush)(struct drm_device *dev); 395 - 396 - void (*set_tile_region)(struct drm_device *dev, int i); 397 - }; 398 - 399 367 struct nouveau_display_engine { 400 368 void *priv; 401 369 int (*early_init)(struct drm_device *); ··· 410 426 int nr_level; 411 427 }; 412 428 429 + struct nouveau_pm_memtiming { 430 + int id; 431 + u32 reg_100220; 432 + u32 reg_100224; 433 + u32 reg_100228; 434 + u32 reg_10022c; 435 + u32 reg_100230; 436 + u32 reg_100234; 437 + u32 reg_100238; 438 + u32 reg_10023c; 439 + u32 reg_100240; 440 + }; 441 + 413 442 #define NOUVEAU_PM_MAX_LEVEL 8 414 443 struct nouveau_pm_level { 415 444 struct device_attribute dev_attr; ··· 433 436 u32 memory; 434 437 u32 shader; 435 438 u32 unk05; 439 + u32 unk0a; 436 440 437 441 u8 voltage; 438 442 u8 fanspeed; 439 443 440 444 u16 memscript; 445 + struct nouveau_pm_memtiming *timing; 441 446 }; 442 447 443 448 struct nouveau_pm_temp_sensor_constants { ··· 454 455 s16 critical; 455 456 s16 down_clock; 456 457 s16 fan_boost; 457 - }; 458 - 459 - struct nouveau_pm_memtiming { 460 - u32 reg_100220; 461 - u32 reg_100224; 462 - u32 reg_100228; 463 - u32 reg_10022c; 464 - u32 reg_100230; 465 - u32 reg_100234; 466 - u32 reg_100238; 467 - u32 reg_10023c; 468 458 }; 469 459 470 460 struct nouveau_pm_memtimings { ··· 487 499 int (*temp_get)(struct drm_device *); 488 500 }; 489 501 490 - struct nouveau_crypt_engine { 491 - bool registered; 492 - 493 - int (*init)(struct drm_device *); 494 - void (*takedown)(struct drm_device *); 495 - int (*create_context)(struct nouveau_channel *); 496 - void (*destroy_context)(struct nouveau_channel *); 497 - void (*tlb_flush)(struct drm_device *dev); 498 - }; 499 - 500 502 struct nouveau_vram_engine { 501 503 int (*init)(struct drm_device *); 502 504 int (*get)(struct drm_device *, u64, u32 align, u32 size_nc, ··· 501 523 struct nouveau_mc_engine mc; 502 524 struct nouveau_timer_engine timer; 503 525 struct nouveau_fb_engine fb; 504 - struct nouveau_pgraph_engine graph; 505 526 struct nouveau_fifo_engine fifo; 506 527 struct nouveau_display_engine display; 507 528 struct nouveau_gpio_engine gpio; 508 529 struct nouveau_pm_engine pm; 509 - struct nouveau_crypt_engine crypt; 510 530 struct nouveau_vram_engine vram; 511 531 }; 512 532 ··· 613 637 enum nouveau_card_type card_type; 614 638 /* exact chipset, derived from NV_PMC_BOOT_0 */ 615 639 int chipset; 640 + int stepping; 616 641 int flags; 617 642 618 643 void __iomem *mmio; ··· 624 647 u32 ramin_base; 625 648 bool ramin_available; 626 649 struct drm_mm ramin_heap; 650 + struct nouveau_exec_engine *eng[NVOBJ_ENGINE_NR]; 627 651 struct list_head gpuobj_list; 628 652 struct list_head classes; 629 653 ··· 723 745 uint32_t crtc_owner; 724 746 uint32_t dac_users[4]; 725 747 726 - struct nouveau_suspend_resume { 727 - uint32_t *ramin_copy; 728 - } susres; 729 - 730 748 struct backlight_device *backlight; 731 749 732 750 struct { ··· 731 757 732 758 struct nouveau_fbdev *nfbdev; 733 759 struct apertures_struct *apertures; 734 - 735 - bool powered_down; 736 760 }; 737 761 738 762 static inline struct drm_nouveau_private * ··· 855 883 extern void nouveau_channel_idle(struct nouveau_channel *chan); 856 884 857 885 /* nouveau_object.c */ 858 - #define NVOBJ_CLASS(d,c,e) do { \ 886 + #define NVOBJ_ENGINE_ADD(d, e, p) do { \ 887 + struct drm_nouveau_private *dev_priv = (d)->dev_private; \ 888 + dev_priv->eng[NVOBJ_ENGINE_##e] = (p); \ 889 + } while (0) 890 + 891 + #define NVOBJ_ENGINE_DEL(d, e) do { \ 892 + struct drm_nouveau_private *dev_priv = (d)->dev_private; \ 893 + dev_priv->eng[NVOBJ_ENGINE_##e] = NULL; \ 894 + } while (0) 895 + 896 + #define NVOBJ_CLASS(d, c, e) do { \ 859 897 int ret = nouveau_gpuobj_class_new((d), (c), NVOBJ_ENGINE_##e); \ 860 898 if (ret) \ 861 899 return ret; \ 862 - } while(0) 900 + } while (0) 863 901 864 - #define NVOBJ_MTHD(d,c,m,e) do { \ 902 + #define NVOBJ_MTHD(d, c, m, e) do { \ 865 903 int ret = nouveau_gpuobj_mthd_new((d), (c), (m), (e)); \ 866 904 if (ret) \ 867 905 return ret; \ 868 - } while(0) 906 + } while (0) 869 907 870 908 extern int nouveau_gpuobj_early_init(struct drm_device *); 871 909 extern int nouveau_gpuobj_init(struct drm_device *); ··· 885 903 extern int nouveau_gpuobj_class_new(struct drm_device *, u32 class, u32 eng); 886 904 extern int nouveau_gpuobj_mthd_new(struct drm_device *, u32 class, u32 mthd, 887 905 int (*exec)(struct nouveau_channel *, 888 - u32 class, u32 mthd, u32 data)); 906 + u32 class, u32 mthd, u32 data)); 889 907 extern int nouveau_gpuobj_mthd_call(struct nouveau_channel *, u32, u32, u32); 890 908 extern int nouveau_gpuobj_mthd_call2(struct drm_device *, int, u32, u32, u32); 891 909 extern int nouveau_gpuobj_channel_init(struct nouveau_channel *, ··· 1119 1137 extern int nvc0_fifo_unload_context(struct drm_device *); 1120 1138 1121 1139 /* nv04_graph.c */ 1122 - extern int nv04_graph_init(struct drm_device *); 1123 - extern void nv04_graph_takedown(struct drm_device *); 1140 + extern int nv04_graph_create(struct drm_device *); 1124 1141 extern void nv04_graph_fifo_access(struct drm_device *, bool); 1125 - extern struct nouveau_channel *nv04_graph_channel(struct drm_device *); 1126 - extern int nv04_graph_create_context(struct nouveau_channel *); 1127 - extern void nv04_graph_destroy_context(struct nouveau_channel *); 1128 - extern int nv04_graph_load_context(struct nouveau_channel *); 1129 - extern int nv04_graph_unload_context(struct drm_device *); 1142 + extern int nv04_graph_object_new(struct nouveau_channel *, int, u32, u16); 1130 1143 extern int nv04_graph_mthd_page_flip(struct nouveau_channel *chan, 1131 1144 u32 class, u32 mthd, u32 data); 1132 1145 extern struct nouveau_bitfield nv04_graph_nsource[]; 1133 1146 1134 1147 /* nv10_graph.c */ 1135 - extern int nv10_graph_init(struct drm_device *); 1136 - extern void nv10_graph_takedown(struct drm_device *); 1148 + extern int nv10_graph_create(struct drm_device *); 1137 1149 extern struct nouveau_channel *nv10_graph_channel(struct drm_device *); 1138 - extern int nv10_graph_create_context(struct nouveau_channel *); 1139 - extern void nv10_graph_destroy_context(struct nouveau_channel *); 1140 - extern int nv10_graph_load_context(struct nouveau_channel *); 1141 - extern int nv10_graph_unload_context(struct drm_device *); 1142 - extern void nv10_graph_set_tile_region(struct drm_device *dev, int i); 1143 1150 extern struct nouveau_bitfield nv10_graph_intr[]; 1144 1151 extern struct nouveau_bitfield nv10_graph_nstatus[]; 1145 1152 1146 1153 /* nv20_graph.c */ 1147 - extern int nv20_graph_create_context(struct nouveau_channel *); 1148 - extern void nv20_graph_destroy_context(struct nouveau_channel *); 1149 - extern int nv20_graph_load_context(struct nouveau_channel *); 1150 - extern int nv20_graph_unload_context(struct drm_device *); 1151 - extern int nv20_graph_init(struct drm_device *); 1152 - extern void nv20_graph_takedown(struct drm_device *); 1153 - extern int nv30_graph_init(struct drm_device *); 1154 - extern void nv20_graph_set_tile_region(struct drm_device *dev, int i); 1154 + extern int nv20_graph_create(struct drm_device *); 1155 1155 1156 1156 /* nv40_graph.c */ 1157 - extern int nv40_graph_init(struct drm_device *); 1158 - extern void nv40_graph_takedown(struct drm_device *); 1159 - extern struct nouveau_channel *nv40_graph_channel(struct drm_device *); 1160 - extern int nv40_graph_create_context(struct nouveau_channel *); 1161 - extern void nv40_graph_destroy_context(struct nouveau_channel *); 1162 - extern int nv40_graph_load_context(struct nouveau_channel *); 1163 - extern int nv40_graph_unload_context(struct drm_device *); 1157 + extern int nv40_graph_create(struct drm_device *); 1164 1158 extern void nv40_grctx_init(struct nouveau_grctx *); 1165 - extern void nv40_graph_set_tile_region(struct drm_device *dev, int i); 1166 1159 1167 1160 /* nv50_graph.c */ 1168 - extern int nv50_graph_init(struct drm_device *); 1169 - extern void nv50_graph_takedown(struct drm_device *); 1170 - extern void nv50_graph_fifo_access(struct drm_device *, bool); 1171 - extern struct nouveau_channel *nv50_graph_channel(struct drm_device *); 1172 - extern int nv50_graph_create_context(struct nouveau_channel *); 1173 - extern void nv50_graph_destroy_context(struct nouveau_channel *); 1174 - extern int nv50_graph_load_context(struct nouveau_channel *); 1175 - extern int nv50_graph_unload_context(struct drm_device *); 1161 + extern int nv50_graph_create(struct drm_device *); 1176 1162 extern int nv50_grctx_init(struct nouveau_grctx *); 1177 - extern void nv50_graph_tlb_flush(struct drm_device *dev); 1178 - extern void nv84_graph_tlb_flush(struct drm_device *dev); 1179 1163 extern struct nouveau_enum nv50_data_error_names[]; 1164 + extern int nv50_graph_isr_chid(struct drm_device *dev, u64 inst); 1180 1165 1181 1166 /* nvc0_graph.c */ 1182 - extern int nvc0_graph_init(struct drm_device *); 1183 - extern void nvc0_graph_takedown(struct drm_device *); 1184 - extern void nvc0_graph_fifo_access(struct drm_device *, bool); 1185 - extern struct nouveau_channel *nvc0_graph_channel(struct drm_device *); 1186 - extern int nvc0_graph_create_context(struct nouveau_channel *); 1187 - extern void nvc0_graph_destroy_context(struct nouveau_channel *); 1188 - extern int nvc0_graph_load_context(struct nouveau_channel *); 1189 - extern int nvc0_graph_unload_context(struct drm_device *); 1167 + extern int nvc0_graph_create(struct drm_device *); 1168 + extern int nvc0_graph_isr_chid(struct drm_device *dev, u64 inst); 1190 1169 1191 1170 /* nv84_crypt.c */ 1192 - extern int nv84_crypt_init(struct drm_device *dev); 1193 - extern void nv84_crypt_fini(struct drm_device *dev); 1194 - extern int nv84_crypt_create_context(struct nouveau_channel *); 1195 - extern void nv84_crypt_destroy_context(struct nouveau_channel *); 1196 - extern void nv84_crypt_tlb_flush(struct drm_device *dev); 1171 + extern int nv84_crypt_create(struct drm_device *); 1172 + 1173 + /* nva3_copy.c */ 1174 + extern int nva3_copy_create(struct drm_device *dev); 1175 + 1176 + /* nvc0_copy.c */ 1177 + extern int nvc0_copy_create(struct drm_device *dev, int engine); 1178 + 1179 + /* nv40_mpeg.c */ 1180 + extern int nv40_mpeg_create(struct drm_device *dev); 1181 + 1182 + /* nv50_mpeg.c */ 1183 + extern int nv50_mpeg_create(struct drm_device *dev); 1197 1184 1198 1185 /* nv04_instmem.c */ 1199 1186 extern int nv04_instmem_init(struct drm_device *); ··· 1353 1402 /* nv50_calc. */ 1354 1403 int nv50_calc_pll(struct drm_device *, struct pll_lims *, int clk, 1355 1404 int *N1, int *M1, int *N2, int *M2, int *P); 1356 - int nv50_calc_pll2(struct drm_device *, struct pll_lims *, 1357 - int clk, int *N, int *fN, int *M, int *P); 1405 + int nva3_calc_pll(struct drm_device *, struct pll_lims *, 1406 + int clk, int *N, int *fN, int *M, int *P); 1358 1407 1359 1408 #ifndef ioread32_native 1360 1409 #ifdef __BIG_ENDIAN ··· 1528 1577 return dev->pdev->device == device && 1529 1578 dev->pdev->subsystem_vendor == sub_vendor && 1530 1579 dev->pdev->subsystem_device == sub_device; 1580 + } 1581 + 1582 + static inline void * 1583 + nv_engine(struct drm_device *dev, int engine) 1584 + { 1585 + struct drm_nouveau_private *dev_priv = dev->dev_private; 1586 + return (void *)dev_priv->eng[engine]; 1531 1587 } 1532 1588 1533 1589 /* returns 1 if device is one of the nv4x using the 0x4497 object class,
+5 -5
drivers/gpu/drm/nouveau/nouveau_grctx.h
··· 87 87 cp_out(ctx, CP_BRA | (mod << 18) | ip | flag | 88 88 (state ? 0 : CP_BRA_IF_CLEAR)); 89 89 } 90 - #define cp_bra(c,f,s,n) _cp_bra((c), 0, CP_FLAG_##f, CP_FLAG_##f##_##s, n) 90 + #define cp_bra(c, f, s, n) _cp_bra((c), 0, CP_FLAG_##f, CP_FLAG_##f##_##s, n) 91 91 #ifdef CP_BRA_MOD 92 - #define cp_cal(c,f,s,n) _cp_bra((c), 1, CP_FLAG_##f, CP_FLAG_##f##_##s, n) 93 - #define cp_ret(c,f,s) _cp_bra((c), 2, CP_FLAG_##f, CP_FLAG_##f##_##s, 0) 92 + #define cp_cal(c, f, s, n) _cp_bra((c), 1, CP_FLAG_##f, CP_FLAG_##f##_##s, n) 93 + #define cp_ret(c, f, s) _cp_bra((c), 2, CP_FLAG_##f, CP_FLAG_##f##_##s, 0) 94 94 #endif 95 95 96 96 static inline void ··· 98 98 { 99 99 cp_out(ctx, CP_WAIT | flag | (state ? CP_WAIT_SET : 0)); 100 100 } 101 - #define cp_wait(c,f,s) _cp_wait((c), CP_FLAG_##f, CP_FLAG_##f##_##s) 101 + #define cp_wait(c, f, s) _cp_wait((c), CP_FLAG_##f, CP_FLAG_##f##_##s) 102 102 103 103 static inline void 104 104 _cp_set(struct nouveau_grctx *ctx, int flag, int state) 105 105 { 106 106 cp_out(ctx, CP_SET | flag | (state ? CP_SET_1 : 0)); 107 107 } 108 - #define cp_set(c,f,s) _cp_set((c), CP_FLAG_##f, CP_FLAG_##f##_##s) 108 + #define cp_set(c, f, s) _cp_set((c), CP_FLAG_##f, CP_FLAG_##f##_##s) 109 109 110 110 static inline void 111 111 cp_pos(struct nouveau_grctx *ctx, int offset)
+43 -23
drivers/gpu/drm/nouveau/nouveau_mem.c
··· 51 51 struct drm_nouveau_private *dev_priv = dev->dev_private; 52 52 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; 53 53 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; 54 - struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; 55 - int i = tile - dev_priv->tile.reg; 54 + int i = tile - dev_priv->tile.reg, j; 56 55 unsigned long save; 57 56 58 57 nouveau_fence_unref(&tile->fence); ··· 69 70 nouveau_wait_for_idle(dev); 70 71 71 72 pfb->set_tile_region(dev, i); 72 - pgraph->set_tile_region(dev, i); 73 + for (j = 0; j < NVOBJ_ENGINE_NR; j++) { 74 + if (dev_priv->eng[j] && dev_priv->eng[j]->set_tile_region) 75 + dev_priv->eng[j]->set_tile_region(dev, i); 76 + } 73 77 74 78 pfifo->cache_pull(dev, true); 75 79 pfifo->reassign(dev, true); ··· 597 595 if (!memtimings->timing) 598 596 return; 599 597 600 - /* Get "some number" from the timing reg for NV_40 598 + /* Get "some number" from the timing reg for NV_40 and NV_50 601 599 * Used in calculations later */ 602 - if(dev_priv->card_type == NV_40) { 603 - magic_number = (nv_rd32(dev,0x100228) & 0x0f000000) >> 24; 600 + if (dev_priv->card_type >= NV_40 && dev_priv->chipset < 0x98) { 601 + magic_number = (nv_rd32(dev, 0x100228) & 0x0f000000) >> 24; 604 602 } 605 603 606 604 entry = mem + mem[1]; ··· 643 641 /* XXX: I don't trust the -1's and +1's... they must come 644 642 * from somewhere! */ 645 643 timing->reg_100224 = (tUNK_0 + tUNK_19 + 1 + magic_number) << 24 | 646 - tUNK_18 << 16 | 644 + max(tUNK_18, (u8) 1) << 16 | 647 645 (tUNK_1 + tUNK_19 + 1 + magic_number) << 8; 648 - if(dev_priv->chipset == 0xa8) { 646 + if (dev_priv->chipset == 0xa8) { 649 647 timing->reg_100224 |= (tUNK_2 - 1); 650 648 } else { 651 649 timing->reg_100224 |= (tUNK_2 + 2 - magic_number); 652 650 } 653 651 654 652 timing->reg_100228 = (tUNK_12 << 16 | tUNK_11 << 8 | tUNK_10); 655 - if(dev_priv->chipset >= 0xa3 && dev_priv->chipset < 0xaa) { 653 + if (dev_priv->chipset >= 0xa3 && dev_priv->chipset < 0xaa) 656 654 timing->reg_100228 |= (tUNK_19 - 1) << 24; 657 - } 655 + else 656 + timing->reg_100228 |= magic_number << 24; 658 657 659 - if(dev_priv->card_type == NV_40) { 658 + if (dev_priv->card_type == NV_40) { 660 659 /* NV40: don't know what the rest of the regs are.. 661 660 * And don't need to know either */ 662 - timing->reg_100228 |= 0x20200000 | magic_number << 24; 663 - } else if(dev_priv->card_type >= NV_50) { 664 - /* XXX: reg_10022c */ 665 - timing->reg_10022c = tUNK_2 - 1; 661 + timing->reg_100228 |= 0x20200000; 662 + } else if (dev_priv->card_type >= NV_50) { 663 + if (dev_priv->chipset < 0x98 || 664 + (dev_priv->chipset == 0x98 && 665 + dev_priv->stepping <= 0xa1)) { 666 + timing->reg_10022c = (0x14 + tUNK_2) << 24 | 667 + 0x16 << 16 | 668 + (tUNK_2 - 1) << 8 | 669 + (tUNK_2 - 1); 670 + } else { 671 + /* XXX: reg_10022c for recentish cards */ 672 + timing->reg_10022c = tUNK_2 - 1; 673 + } 666 674 667 675 timing->reg_100230 = (tUNK_20 << 24 | tUNK_21 << 16 | 668 676 tUNK_13 << 8 | tUNK_13); 669 677 670 678 timing->reg_100234 = (tRAS << 24 | tRC); 671 - timing->reg_100234 += max(tUNK_10,tUNK_11) << 16; 679 + timing->reg_100234 += max(tUNK_10, tUNK_11) << 16; 672 680 673 - if(dev_priv->chipset < 0xa3) { 681 + if (dev_priv->chipset < 0x98 || 682 + (dev_priv->chipset == 0x98 && 683 + dev_priv->stepping <= 0xa1)) { 674 684 timing->reg_100234 |= (tUNK_2 + 2) << 8; 675 685 } else { 676 686 /* XXX: +6? */ 677 687 timing->reg_100234 |= (tUNK_19 + 6) << 8; 678 688 } 679 689 680 - /* XXX; reg_100238, reg_10023c 681 - * reg_100238: 0x00?????? 682 - * reg_10023c: 0x!!??0202 for NV50+ cards (empirical evidence) */ 690 + /* XXX; reg_100238 691 + * reg_100238: 0x00?????? */ 683 692 timing->reg_10023c = 0x202; 684 - if(dev_priv->chipset < 0xa3) { 693 + if (dev_priv->chipset < 0x98 || 694 + (dev_priv->chipset == 0x98 && 695 + dev_priv->stepping <= 0xa1)) { 685 696 timing->reg_10023c |= 0x4000000 | (tUNK_2 - 1) << 16; 686 697 } else { 687 - /* currently unknown 698 + /* XXX: reg_10023c 699 + * currently unknown 688 700 * 10023c seen as 06xxxxxx, 0bxxxxxx or 0fxxxxxx */ 689 701 } 702 + 703 + /* XXX: reg_100240? */ 690 704 } 705 + timing->id = i; 691 706 692 707 NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x %08x\n", i, 693 708 timing->reg_100220, timing->reg_100224, ··· 712 693 NV_DEBUG(dev, " 230: %08x %08x %08x %08x\n", 713 694 timing->reg_100230, timing->reg_100234, 714 695 timing->reg_100238, timing->reg_10023c); 696 + NV_DEBUG(dev, " 240: %08x\n", timing->reg_100240); 715 697 } 716 698 717 699 memtimings->nr_timing = entries; 718 - memtimings->supported = true; 700 + memtimings->supported = (dev_priv->chipset <= 0x98); 719 701 } 720 702 721 703 void
+21 -107
drivers/gpu/drm/nouveau/nouveau_object.c
··· 361 361 return 0; 362 362 } 363 363 364 - 365 - static uint32_t 366 - nouveau_gpuobj_class_instmem_size(struct drm_device *dev, int class) 367 - { 368 - struct drm_nouveau_private *dev_priv = dev->dev_private; 369 - 370 - /*XXX: dodgy hack for now */ 371 - if (dev_priv->card_type >= NV_50) 372 - return 24; 373 - if (dev_priv->card_type >= NV_40) 374 - return 32; 375 - return 16; 376 - } 377 - 378 364 /* 379 365 DMA objects are used to reference a piece of memory in the 380 366 framebuffer, PCI or AGP address space. Each object is 16 bytes big ··· 592 606 set to 0? 593 607 */ 594 608 static int 595 - nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class, 596 - struct nouveau_gpuobj **gpuobj_ret) 609 + nouveau_gpuobj_sw_new(struct nouveau_channel *chan, u32 handle, u16 class) 597 610 { 598 611 struct drm_nouveau_private *dev_priv = chan->dev->dev_private; 599 612 struct nouveau_gpuobj *gpuobj; 613 + int ret; 600 614 601 615 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL); 602 616 if (!gpuobj) ··· 610 624 spin_lock(&dev_priv->ramin_lock); 611 625 list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list); 612 626 spin_unlock(&dev_priv->ramin_lock); 613 - *gpuobj_ret = gpuobj; 614 - return 0; 627 + 628 + ret = nouveau_ramht_insert(chan, handle, gpuobj); 629 + nouveau_gpuobj_ref(NULL, &gpuobj); 630 + return ret; 615 631 } 616 632 617 633 int ··· 622 634 struct drm_nouveau_private *dev_priv = chan->dev->dev_private; 623 635 struct drm_device *dev = chan->dev; 624 636 struct nouveau_gpuobj_class *oc; 625 - struct nouveau_gpuobj *gpuobj; 626 637 int ret; 627 638 628 639 NV_DEBUG(dev, "ch%d class=0x%04x\n", chan->id, class); 629 640 630 641 list_for_each_entry(oc, &dev_priv->classes, head) { 631 - if (oc->id == class) 632 - goto found; 642 + struct nouveau_exec_engine *eng = dev_priv->eng[oc->engine]; 643 + 644 + if (oc->id != class) 645 + continue; 646 + 647 + if (oc->engine == NVOBJ_ENGINE_SW) 648 + return nouveau_gpuobj_sw_new(chan, handle, class); 649 + 650 + if (!chan->engctx[oc->engine]) { 651 + ret = eng->context_new(chan, oc->engine); 652 + if (ret) 653 + return ret; 654 + } 655 + 656 + return eng->object_new(chan, oc->engine, handle, class); 633 657 } 634 658 635 659 NV_ERROR(dev, "illegal object class: 0x%x\n", class); 636 660 return -EINVAL; 637 - 638 - found: 639 - switch (oc->engine) { 640 - case NVOBJ_ENGINE_SW: 641 - if (dev_priv->card_type < NV_C0) { 642 - ret = nouveau_gpuobj_sw_new(chan, class, &gpuobj); 643 - if (ret) 644 - return ret; 645 - goto insert; 646 - } 647 - break; 648 - case NVOBJ_ENGINE_GR: 649 - if ((dev_priv->card_type >= NV_20 && !chan->ramin_grctx) || 650 - (dev_priv->card_type < NV_20 && !chan->pgraph_ctx)) { 651 - struct nouveau_pgraph_engine *pgraph = 652 - &dev_priv->engine.graph; 653 - 654 - ret = pgraph->create_context(chan); 655 - if (ret) 656 - return ret; 657 - } 658 - break; 659 - case NVOBJ_ENGINE_CRYPT: 660 - if (!chan->crypt_ctx) { 661 - struct nouveau_crypt_engine *pcrypt = 662 - &dev_priv->engine.crypt; 663 - 664 - ret = pcrypt->create_context(chan); 665 - if (ret) 666 - return ret; 667 - } 668 - break; 669 - } 670 - 671 - /* we're done if this is fermi */ 672 - if (dev_priv->card_type >= NV_C0) 673 - return 0; 674 - 675 - ret = nouveau_gpuobj_new(dev, chan, 676 - nouveau_gpuobj_class_instmem_size(dev, class), 677 - 16, 678 - NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE, 679 - &gpuobj); 680 - if (ret) { 681 - NV_ERROR(dev, "error creating gpuobj: %d\n", ret); 682 - return ret; 683 - } 684 - 685 - if (dev_priv->card_type >= NV_50) { 686 - nv_wo32(gpuobj, 0, class); 687 - nv_wo32(gpuobj, 20, 0x00010000); 688 - } else { 689 - switch (class) { 690 - case NV_CLASS_NULL: 691 - nv_wo32(gpuobj, 0, 0x00001030); 692 - nv_wo32(gpuobj, 4, 0xFFFFFFFF); 693 - break; 694 - default: 695 - if (dev_priv->card_type >= NV_40) { 696 - nv_wo32(gpuobj, 0, class); 697 - #ifdef __BIG_ENDIAN 698 - nv_wo32(gpuobj, 8, 0x01000000); 699 - #endif 700 - } else { 701 - #ifdef __BIG_ENDIAN 702 - nv_wo32(gpuobj, 0, class | 0x00080000); 703 - #else 704 - nv_wo32(gpuobj, 0, class); 705 - #endif 706 - } 707 - } 708 - } 709 - dev_priv->engine.instmem.flush(dev); 710 - 711 - gpuobj->engine = oc->engine; 712 - gpuobj->class = oc->id; 713 - 714 - insert: 715 - ret = nouveau_ramht_insert(chan, handle, gpuobj); 716 - if (ret) 717 - NV_ERROR(dev, "error adding gpuobj to RAMHT: %d\n", ret); 718 - nouveau_gpuobj_ref(NULL, &gpuobj); 719 - return ret; 720 661 } 721 662 722 663 static int ··· 662 745 /* Base amount for object storage (4KiB enough?) */ 663 746 size = 0x2000; 664 747 base = 0; 665 - 666 - /* PGRAPH context */ 667 - size += dev_priv->engine.graph.grctx_size; 668 748 669 749 if (dev_priv->card_type == NV_50) { 670 750 /* Various fixed table thingos */
+89 -3
drivers/gpu/drm/nouveau/nouveau_perf.c
··· 72 72 pm->nr_perflvl = 1; 73 73 } 74 74 75 + static struct nouveau_pm_memtiming * 76 + nouveau_perf_timing(struct drm_device *dev, struct bit_entry *P, 77 + u16 memclk, u8 *entry, u8 recordlen, u8 entries) 78 + { 79 + struct drm_nouveau_private *dev_priv = dev->dev_private; 80 + struct nouveau_pm_engine *pm = &dev_priv->engine.pm; 81 + struct nvbios *bios = &dev_priv->vbios; 82 + u8 ramcfg; 83 + int i; 84 + 85 + /* perf v2 has a separate "timing map" table, we have to match 86 + * the target memory clock to a specific entry, *then* use 87 + * ramcfg to select the correct subentry 88 + */ 89 + if (P->version == 2) { 90 + u8 *tmap = ROMPTR(bios, P->data[4]); 91 + if (!tmap) { 92 + NV_DEBUG(dev, "no timing map pointer\n"); 93 + return NULL; 94 + } 95 + 96 + if (tmap[0] != 0x10) { 97 + NV_WARN(dev, "timing map 0x%02x unknown\n", tmap[0]); 98 + return NULL; 99 + } 100 + 101 + entry = tmap + tmap[1]; 102 + recordlen = tmap[2] + (tmap[4] * tmap[3]); 103 + for (i = 0; i < tmap[5]; i++, entry += recordlen) { 104 + if (memclk >= ROM16(entry[0]) && 105 + memclk <= ROM16(entry[2])) 106 + break; 107 + } 108 + 109 + if (i == tmap[5]) { 110 + NV_WARN(dev, "no match in timing map table\n"); 111 + return NULL; 112 + } 113 + 114 + entry += tmap[2]; 115 + recordlen = tmap[3]; 116 + entries = tmap[4]; 117 + } 118 + 119 + ramcfg = (nv_rd32(dev, NV_PEXTDEV_BOOT_0) & 0x0000003c) >> 2; 120 + if (bios->ram_restrict_tbl_ptr) 121 + ramcfg = bios->data[bios->ram_restrict_tbl_ptr + ramcfg]; 122 + 123 + if (ramcfg >= entries) { 124 + NV_WARN(dev, "ramcfg strap out of bounds!\n"); 125 + return NULL; 126 + } 127 + 128 + entry += ramcfg * recordlen; 129 + if (entry[1] >= pm->memtimings.nr_timing) { 130 + NV_WARN(dev, "timingset %d does not exist\n", entry[1]); 131 + return NULL; 132 + } 133 + 134 + return &pm->memtimings.timing[entry[1]]; 135 + } 136 + 75 137 void 76 138 nouveau_perf_init(struct drm_device *dev) 77 139 { ··· 186 124 for (i = 0; i < entries; i++) { 187 125 struct nouveau_pm_level *perflvl = &pm->perflvl[pm->nr_perflvl]; 188 126 127 + perflvl->timing = NULL; 128 + 189 129 if (entry[0] == 0xff) { 190 130 entry += recordlen; 191 131 continue; ··· 238 174 #define subent(n) entry[perf[2] + ((n) * perf[3])] 239 175 perflvl->fanspeed = 0; /*XXX*/ 240 176 perflvl->voltage = entry[2]; 241 - perflvl->core = (ROM16(subent(0)) & 0xfff) * 1000; 242 - perflvl->shader = (ROM16(subent(1)) & 0xfff) * 1000; 243 - perflvl->memory = (ROM16(subent(2)) & 0xfff) * 1000; 177 + if (dev_priv->card_type == NV_50) { 178 + perflvl->core = ROM16(subent(0)) & 0xfff; 179 + perflvl->shader = ROM16(subent(1)) & 0xfff; 180 + perflvl->memory = ROM16(subent(2)) & 0xfff; 181 + } else { 182 + perflvl->shader = ROM16(subent(3)) & 0xfff; 183 + perflvl->core = perflvl->shader / 2; 184 + perflvl->unk0a = ROM16(subent(4)) & 0xfff; 185 + perflvl->memory = ROM16(subent(5)) & 0xfff; 186 + } 187 + 188 + perflvl->core *= 1000; 189 + perflvl->shader *= 1000; 190 + perflvl->memory *= 1000; 191 + perflvl->unk0a *= 1000; 244 192 break; 245 193 } 246 194 ··· 264 188 entry += recordlen; 265 189 continue; 266 190 } 191 + } 192 + 193 + /* get the corresponding memory timings */ 194 + if (version > 0x15) { 195 + /* last 3 args are for < 0x40, ignored for >= 0x40 */ 196 + perflvl->timing = 197 + nouveau_perf_timing(dev, &P, 198 + perflvl->memory / 1000, 199 + entry + perf[3], 200 + perf[5], perf[4]); 267 201 } 268 202 269 203 snprintf(perflvl->name, sizeof(perflvl->name),
+13 -8
drivers/gpu/drm/nouveau/nouveau_pm.c
··· 156 156 static void 157 157 nouveau_pm_perflvl_info(struct nouveau_pm_level *perflvl, char *ptr, int len) 158 158 { 159 - char c[16], s[16], v[16], f[16]; 159 + char c[16], s[16], v[16], f[16], t[16]; 160 160 161 161 c[0] = '\0'; 162 162 if (perflvl->core) ··· 174 174 if (perflvl->fanspeed) 175 175 snprintf(f, sizeof(f), " fanspeed %d%%", perflvl->fanspeed); 176 176 177 - snprintf(ptr, len, "memory %dMHz%s%s%s%s\n", perflvl->memory / 1000, 178 - c, s, v, f); 177 + t[0] = '\0'; 178 + if (perflvl->timing) 179 + snprintf(t, sizeof(t), " timing %d", perflvl->timing->id); 180 + 181 + snprintf(ptr, len, "memory %dMHz%s%s%s%s%s\n", perflvl->memory / 1000, 182 + c, s, v, f, t); 179 183 } 180 184 181 185 static ssize_t ··· 453 449 #endif 454 450 } 455 451 456 - #ifdef CONFIG_ACPI 452 + #if defined(CONFIG_ACPI) && defined(CONFIG_POWER_SUPPLY) 457 453 static int 458 454 nouveau_pm_acpi_event(struct notifier_block *nb, unsigned long val, void *data) 459 455 { ··· 480 476 char info[256]; 481 477 int ret, i; 482 478 479 + nouveau_mem_timing_init(dev); 483 480 nouveau_volt_init(dev); 484 481 nouveau_perf_init(dev); 485 482 nouveau_temp_init(dev); 486 - nouveau_mem_timing_init(dev); 487 483 488 484 NV_INFO(dev, "%d available performance level(s)\n", pm->nr_perflvl); 489 485 for (i = 0; i < pm->nr_perflvl; i++) { ··· 494 490 /* determine current ("boot") performance level */ 495 491 ret = nouveau_pm_perflvl_get(dev, &pm->boot); 496 492 if (ret == 0) { 493 + strncpy(pm->boot.name, "boot", 4); 497 494 pm->cur = &pm->boot; 498 495 499 496 nouveau_pm_perflvl_info(&pm->boot, info, sizeof(info)); ··· 512 507 513 508 nouveau_sysfs_init(dev); 514 509 nouveau_hwmon_init(dev); 515 - #ifdef CONFIG_ACPI 510 + #if defined(CONFIG_ACPI) && defined(CONFIG_POWER_SUPPLY) 516 511 pm->acpi_nb.notifier_call = nouveau_pm_acpi_event; 517 512 register_acpi_notifier(&pm->acpi_nb); 518 513 #endif ··· 529 524 if (pm->cur != &pm->boot) 530 525 nouveau_pm_perflvl_set(dev, &pm->boot); 531 526 532 - nouveau_mem_timing_fini(dev); 533 527 nouveau_temp_fini(dev); 534 528 nouveau_perf_fini(dev); 535 529 nouveau_volt_fini(dev); 530 + nouveau_mem_timing_fini(dev); 536 531 537 - #ifdef CONFIG_ACPI 532 + #if defined(CONFIG_ACPI) && defined(CONFIG_POWER_SUPPLY) 538 533 unregister_acpi_notifier(&pm->acpi_nb); 539 534 #endif 540 535 nouveau_hwmon_fini(dev);
+7 -7
drivers/gpu/drm/nouveau/nouveau_reg.h
··· 639 639 # define NV50_PCONNECTOR_I2C_PORT_4 0x0000e240 640 640 # define NV50_PCONNECTOR_I2C_PORT_5 0x0000e258 641 641 642 - #define NV50_AUXCH_DATA_OUT(i,n) ((n) * 4 + (i) * 0x50 + 0x0000e4c0) 642 + #define NV50_AUXCH_DATA_OUT(i, n) ((n) * 4 + (i) * 0x50 + 0x0000e4c0) 643 643 #define NV50_AUXCH_DATA_OUT__SIZE 4 644 - #define NV50_AUXCH_DATA_IN(i,n) ((n) * 4 + (i) * 0x50 + 0x0000e4d0) 644 + #define NV50_AUXCH_DATA_IN(i, n) ((n) * 4 + (i) * 0x50 + 0x0000e4d0) 645 645 #define NV50_AUXCH_DATA_IN__SIZE 4 646 646 #define NV50_AUXCH_ADDR(i) ((i) * 0x50 + 0x0000e4e0) 647 647 #define NV50_AUXCH_CTRL(i) ((i) * 0x50 + 0x0000e4e4) ··· 829 829 #define NV50_PDISPLAY_SOR_BACKLIGHT 0x0061c084 830 830 #define NV50_PDISPLAY_SOR_BACKLIGHT_ENABLE 0x80000000 831 831 #define NV50_PDISPLAY_SOR_BACKLIGHT_LEVEL 0x00000fff 832 - #define NV50_SOR_DP_CTRL(i,l) (0x0061c10c + (i) * 0x800 + (l) * 0x80) 832 + #define NV50_SOR_DP_CTRL(i, l) (0x0061c10c + (i) * 0x800 + (l) * 0x80) 833 833 #define NV50_SOR_DP_CTRL_ENABLED 0x00000001 834 834 #define NV50_SOR_DP_CTRL_ENHANCED_FRAME_ENABLED 0x00004000 835 835 #define NV50_SOR_DP_CTRL_LANE_MASK 0x001f0000 ··· 841 841 #define NV50_SOR_DP_CTRL_TRAINING_PATTERN_DISABLED 0x00000000 842 842 #define NV50_SOR_DP_CTRL_TRAINING_PATTERN_1 0x01000000 843 843 #define NV50_SOR_DP_CTRL_TRAINING_PATTERN_2 0x02000000 844 - #define NV50_SOR_DP_UNK118(i,l) (0x0061c118 + (i) * 0x800 + (l) * 0x80) 845 - #define NV50_SOR_DP_UNK120(i,l) (0x0061c120 + (i) * 0x800 + (l) * 0x80) 846 - #define NV50_SOR_DP_UNK128(i,l) (0x0061c128 + (i) * 0x800 + (l) * 0x80) 847 - #define NV50_SOR_DP_UNK130(i,l) (0x0061c130 + (i) * 0x800 + (l) * 0x80) 844 + #define NV50_SOR_DP_UNK118(i, l) (0x0061c118 + (i) * 0x800 + (l) * 0x80) 845 + #define NV50_SOR_DP_UNK120(i, l) (0x0061c120 + (i) * 0x800 + (l) * 0x80) 846 + #define NV50_SOR_DP_UNK128(i, l) (0x0061c128 + (i) * 0x800 + (l) * 0x80) 847 + #define NV50_SOR_DP_UNK130(i, l) (0x0061c130 + (i) * 0x800 + (l) * 0x80) 848 848 849 849 #define NV50_PDISPLAY_USER(i) ((i) * 0x1000 + 0x00640000) 850 850 #define NV50_PDISPLAY_USER_PUT(i) ((i) * 0x1000 + 0x00640000)
+92 -120
drivers/gpu/drm/nouveau/nouveau_state.c
··· 65 65 engine->timer.takedown = nv04_timer_takedown; 66 66 engine->fb.init = nv04_fb_init; 67 67 engine->fb.takedown = nv04_fb_takedown; 68 - engine->graph.init = nv04_graph_init; 69 - engine->graph.takedown = nv04_graph_takedown; 70 - engine->graph.fifo_access = nv04_graph_fifo_access; 71 - engine->graph.channel = nv04_graph_channel; 72 - engine->graph.create_context = nv04_graph_create_context; 73 - engine->graph.destroy_context = nv04_graph_destroy_context; 74 - engine->graph.load_context = nv04_graph_load_context; 75 - engine->graph.unload_context = nv04_graph_unload_context; 76 68 engine->fifo.channels = 16; 77 69 engine->fifo.init = nv04_fifo_init; 78 70 engine->fifo.takedown = nv04_fifo_fini; ··· 90 98 engine->pm.clock_get = nv04_pm_clock_get; 91 99 engine->pm.clock_pre = nv04_pm_clock_pre; 92 100 engine->pm.clock_set = nv04_pm_clock_set; 93 - engine->crypt.init = nouveau_stub_init; 94 - engine->crypt.takedown = nouveau_stub_takedown; 95 101 engine->vram.init = nouveau_mem_detect; 96 102 engine->vram.flags_valid = nouveau_mem_flags_valid; 97 103 break; ··· 113 123 engine->fb.init_tile_region = nv10_fb_init_tile_region; 114 124 engine->fb.set_tile_region = nv10_fb_set_tile_region; 115 125 engine->fb.free_tile_region = nv10_fb_free_tile_region; 116 - engine->graph.init = nv10_graph_init; 117 - engine->graph.takedown = nv10_graph_takedown; 118 - engine->graph.channel = nv10_graph_channel; 119 - engine->graph.create_context = nv10_graph_create_context; 120 - engine->graph.destroy_context = nv10_graph_destroy_context; 121 - engine->graph.fifo_access = nv04_graph_fifo_access; 122 - engine->graph.load_context = nv10_graph_load_context; 123 - engine->graph.unload_context = nv10_graph_unload_context; 124 - engine->graph.set_tile_region = nv10_graph_set_tile_region; 125 126 engine->fifo.channels = 32; 126 127 engine->fifo.init = nv10_fifo_init; 127 128 engine->fifo.takedown = nv04_fifo_fini; ··· 138 157 engine->pm.clock_get = nv04_pm_clock_get; 139 158 engine->pm.clock_pre = nv04_pm_clock_pre; 140 159 engine->pm.clock_set = nv04_pm_clock_set; 141 - engine->crypt.init = nouveau_stub_init; 142 - engine->crypt.takedown = nouveau_stub_takedown; 143 160 engine->vram.init = nouveau_mem_detect; 144 161 engine->vram.flags_valid = nouveau_mem_flags_valid; 145 162 break; ··· 161 182 engine->fb.init_tile_region = nv10_fb_init_tile_region; 162 183 engine->fb.set_tile_region = nv10_fb_set_tile_region; 163 184 engine->fb.free_tile_region = nv10_fb_free_tile_region; 164 - engine->graph.init = nv20_graph_init; 165 - engine->graph.takedown = nv20_graph_takedown; 166 - engine->graph.channel = nv10_graph_channel; 167 - engine->graph.create_context = nv20_graph_create_context; 168 - engine->graph.destroy_context = nv20_graph_destroy_context; 169 - engine->graph.fifo_access = nv04_graph_fifo_access; 170 - engine->graph.load_context = nv20_graph_load_context; 171 - engine->graph.unload_context = nv20_graph_unload_context; 172 - engine->graph.set_tile_region = nv20_graph_set_tile_region; 173 185 engine->fifo.channels = 32; 174 186 engine->fifo.init = nv10_fifo_init; 175 187 engine->fifo.takedown = nv04_fifo_fini; ··· 186 216 engine->pm.clock_get = nv04_pm_clock_get; 187 217 engine->pm.clock_pre = nv04_pm_clock_pre; 188 218 engine->pm.clock_set = nv04_pm_clock_set; 189 - engine->crypt.init = nouveau_stub_init; 190 - engine->crypt.takedown = nouveau_stub_takedown; 191 219 engine->vram.init = nouveau_mem_detect; 192 220 engine->vram.flags_valid = nouveau_mem_flags_valid; 193 221 break; ··· 209 241 engine->fb.init_tile_region = nv30_fb_init_tile_region; 210 242 engine->fb.set_tile_region = nv10_fb_set_tile_region; 211 243 engine->fb.free_tile_region = nv30_fb_free_tile_region; 212 - engine->graph.init = nv30_graph_init; 213 - engine->graph.takedown = nv20_graph_takedown; 214 - engine->graph.fifo_access = nv04_graph_fifo_access; 215 - engine->graph.channel = nv10_graph_channel; 216 - engine->graph.create_context = nv20_graph_create_context; 217 - engine->graph.destroy_context = nv20_graph_destroy_context; 218 - engine->graph.load_context = nv20_graph_load_context; 219 - engine->graph.unload_context = nv20_graph_unload_context; 220 - engine->graph.set_tile_region = nv20_graph_set_tile_region; 221 244 engine->fifo.channels = 32; 222 245 engine->fifo.init = nv10_fifo_init; 223 246 engine->fifo.takedown = nv04_fifo_fini; ··· 236 277 engine->pm.clock_set = nv04_pm_clock_set; 237 278 engine->pm.voltage_get = nouveau_voltage_gpio_get; 238 279 engine->pm.voltage_set = nouveau_voltage_gpio_set; 239 - engine->crypt.init = nouveau_stub_init; 240 - engine->crypt.takedown = nouveau_stub_takedown; 241 280 engine->vram.init = nouveau_mem_detect; 242 281 engine->vram.flags_valid = nouveau_mem_flags_valid; 243 282 break; ··· 260 303 engine->fb.init_tile_region = nv30_fb_init_tile_region; 261 304 engine->fb.set_tile_region = nv40_fb_set_tile_region; 262 305 engine->fb.free_tile_region = nv30_fb_free_tile_region; 263 - engine->graph.init = nv40_graph_init; 264 - engine->graph.takedown = nv40_graph_takedown; 265 - engine->graph.fifo_access = nv04_graph_fifo_access; 266 - engine->graph.channel = nv40_graph_channel; 267 - engine->graph.create_context = nv40_graph_create_context; 268 - engine->graph.destroy_context = nv40_graph_destroy_context; 269 - engine->graph.load_context = nv40_graph_load_context; 270 - engine->graph.unload_context = nv40_graph_unload_context; 271 - engine->graph.set_tile_region = nv40_graph_set_tile_region; 272 306 engine->fifo.channels = 32; 273 307 engine->fifo.init = nv40_fifo_init; 274 308 engine->fifo.takedown = nv04_fifo_fini; ··· 288 340 engine->pm.voltage_get = nouveau_voltage_gpio_get; 289 341 engine->pm.voltage_set = nouveau_voltage_gpio_set; 290 342 engine->pm.temp_get = nv40_temp_get; 291 - engine->crypt.init = nouveau_stub_init; 292 - engine->crypt.takedown = nouveau_stub_takedown; 293 343 engine->vram.init = nouveau_mem_detect; 294 344 engine->vram.flags_valid = nouveau_mem_flags_valid; 295 345 break; ··· 314 368 engine->timer.takedown = nv04_timer_takedown; 315 369 engine->fb.init = nv50_fb_init; 316 370 engine->fb.takedown = nv50_fb_takedown; 317 - engine->graph.init = nv50_graph_init; 318 - engine->graph.takedown = nv50_graph_takedown; 319 - engine->graph.fifo_access = nv50_graph_fifo_access; 320 - engine->graph.channel = nv50_graph_channel; 321 - engine->graph.create_context = nv50_graph_create_context; 322 - engine->graph.destroy_context = nv50_graph_destroy_context; 323 - engine->graph.load_context = nv50_graph_load_context; 324 - engine->graph.unload_context = nv50_graph_unload_context; 325 - if (dev_priv->chipset == 0x50 || 326 - dev_priv->chipset == 0xac) 327 - engine->graph.tlb_flush = nv50_graph_tlb_flush; 328 - else 329 - engine->graph.tlb_flush = nv84_graph_tlb_flush; 330 371 engine->fifo.channels = 128; 331 372 engine->fifo.init = nv50_fifo_init; 332 373 engine->fifo.takedown = nv50_fifo_takedown; ··· 365 432 engine->pm.temp_get = nv84_temp_get; 366 433 else 367 434 engine->pm.temp_get = nv40_temp_get; 368 - switch (dev_priv->chipset) { 369 - case 0x84: 370 - case 0x86: 371 - case 0x92: 372 - case 0x94: 373 - case 0x96: 374 - case 0xa0: 375 - engine->crypt.init = nv84_crypt_init; 376 - engine->crypt.takedown = nv84_crypt_fini; 377 - engine->crypt.create_context = nv84_crypt_create_context; 378 - engine->crypt.destroy_context = nv84_crypt_destroy_context; 379 - engine->crypt.tlb_flush = nv84_crypt_tlb_flush; 380 - break; 381 - default: 382 - engine->crypt.init = nouveau_stub_init; 383 - engine->crypt.takedown = nouveau_stub_takedown; 384 - break; 385 - } 386 435 engine->vram.init = nv50_vram_init; 387 436 engine->vram.get = nv50_vram_new; 388 437 engine->vram.put = nv50_vram_del; ··· 387 472 engine->timer.takedown = nv04_timer_takedown; 388 473 engine->fb.init = nvc0_fb_init; 389 474 engine->fb.takedown = nvc0_fb_takedown; 390 - engine->graph.init = nvc0_graph_init; 391 - engine->graph.takedown = nvc0_graph_takedown; 392 - engine->graph.fifo_access = nvc0_graph_fifo_access; 393 - engine->graph.channel = nvc0_graph_channel; 394 - engine->graph.create_context = nvc0_graph_create_context; 395 - engine->graph.destroy_context = nvc0_graph_destroy_context; 396 - engine->graph.load_context = nvc0_graph_load_context; 397 - engine->graph.unload_context = nvc0_graph_unload_context; 398 475 engine->fifo.channels = 128; 399 476 engine->fifo.init = nvc0_fifo_init; 400 477 engine->fifo.takedown = nvc0_fifo_takedown; ··· 410 503 engine->gpio.irq_register = nv50_gpio_irq_register; 411 504 engine->gpio.irq_unregister = nv50_gpio_irq_unregister; 412 505 engine->gpio.irq_enable = nv50_gpio_irq_enable; 413 - engine->crypt.init = nouveau_stub_init; 414 - engine->crypt.takedown = nouveau_stub_takedown; 415 506 engine->vram.init = nvc0_vram_init; 416 507 engine->vram.get = nvc0_vram_new; 417 508 engine->vram.put = nv50_vram_del; ··· 498 593 { 499 594 struct drm_nouveau_private *dev_priv = dev->dev_private; 500 595 struct nouveau_engine *engine; 501 - int ret; 596 + int ret, e = 0; 502 597 503 598 vga_client_register(dev->pdev, dev, NULL, nouveau_vga_set_decode); 504 599 vga_switcheroo_register_client(dev->pdev, nouveau_switcheroo_set_state, ··· 563 658 if (ret) 564 659 goto out_timer; 565 660 566 - if (nouveau_noaccel) 567 - engine->graph.accel_blocked = true; 568 - else { 569 - /* PGRAPH */ 570 - ret = engine->graph.init(dev); 571 - if (ret) 572 - goto out_fb; 661 + switch (dev_priv->card_type) { 662 + case NV_04: 663 + nv04_graph_create(dev); 664 + break; 665 + case NV_10: 666 + nv10_graph_create(dev); 667 + break; 668 + case NV_20: 669 + case NV_30: 670 + nv20_graph_create(dev); 671 + break; 672 + case NV_40: 673 + nv40_graph_create(dev); 674 + break; 675 + case NV_50: 676 + nv50_graph_create(dev); 677 + break; 678 + case NV_C0: 679 + nvc0_graph_create(dev); 680 + break; 681 + default: 682 + break; 683 + } 573 684 574 - /* PCRYPT */ 575 - ret = engine->crypt.init(dev); 576 - if (ret) 577 - goto out_graph; 685 + switch (dev_priv->chipset) { 686 + case 0x84: 687 + case 0x86: 688 + case 0x92: 689 + case 0x94: 690 + case 0x96: 691 + case 0xa0: 692 + nv84_crypt_create(dev); 693 + break; 694 + } 695 + 696 + switch (dev_priv->card_type) { 697 + case NV_50: 698 + switch (dev_priv->chipset) { 699 + case 0xa3: 700 + case 0xa5: 701 + case 0xa8: 702 + case 0xaf: 703 + nva3_copy_create(dev); 704 + break; 705 + } 706 + break; 707 + case NV_C0: 708 + nvc0_copy_create(dev, 0); 709 + nvc0_copy_create(dev, 1); 710 + break; 711 + default: 712 + break; 713 + } 714 + 715 + if (dev_priv->card_type == NV_40) 716 + nv40_mpeg_create(dev); 717 + else 718 + if (dev_priv->card_type == NV_50 && 719 + (dev_priv->chipset < 0x98 || dev_priv->chipset == 0xa0)) 720 + nv50_mpeg_create(dev); 721 + 722 + if (!nouveau_noaccel) { 723 + for (e = 0; e < NVOBJ_ENGINE_NR; e++) { 724 + if (dev_priv->eng[e]) { 725 + ret = dev_priv->eng[e]->init(dev, e); 726 + if (ret) 727 + goto out_engine; 728 + } 729 + } 578 730 579 731 /* PFIFO */ 580 732 ret = engine->fifo.init(dev); 581 733 if (ret) 582 - goto out_crypt; 734 + goto out_engine; 583 735 } 584 736 585 737 ret = engine->display.create(dev); ··· 653 691 654 692 /* what about PVIDEO/PCRTC/PRAMDAC etc? */ 655 693 656 - if (!engine->graph.accel_blocked) { 694 + if (dev_priv->eng[NVOBJ_ENGINE_GR]) { 657 695 ret = nouveau_fence_init(dev); 658 696 if (ret) 659 697 goto out_irq; ··· 677 715 out_fifo: 678 716 if (!nouveau_noaccel) 679 717 engine->fifo.takedown(dev); 680 - out_crypt: 681 - if (!nouveau_noaccel) 682 - engine->crypt.takedown(dev); 683 - out_graph: 684 - if (!nouveau_noaccel) 685 - engine->graph.takedown(dev); 686 - out_fb: 718 + out_engine: 719 + if (!nouveau_noaccel) { 720 + for (e = e - 1; e >= 0; e--) { 721 + if (!dev_priv->eng[e]) 722 + continue; 723 + dev_priv->eng[e]->fini(dev, e); 724 + dev_priv->eng[e]->destroy(dev,e ); 725 + } 726 + } 727 + 687 728 engine->fb.takedown(dev); 688 729 out_timer: 689 730 engine->timer.takedown(dev); ··· 716 751 { 717 752 struct drm_nouveau_private *dev_priv = dev->dev_private; 718 753 struct nouveau_engine *engine = &dev_priv->engine; 754 + int e; 719 755 720 - if (!engine->graph.accel_blocked) { 756 + if (dev_priv->channel) { 721 757 nouveau_fence_fini(dev); 722 758 nouveau_channel_put_unlocked(&dev_priv->channel); 723 759 } 724 760 725 761 if (!nouveau_noaccel) { 726 762 engine->fifo.takedown(dev); 727 - engine->crypt.takedown(dev); 728 - engine->graph.takedown(dev); 763 + for (e = NVOBJ_ENGINE_NR - 1; e >= 0; e--) { 764 + if (dev_priv->eng[e]) { 765 + dev_priv->eng[e]->fini(dev, e); 766 + dev_priv->eng[e]->destroy(dev,e ); 767 + } 768 + } 729 769 } 730 770 engine->fb.takedown(dev); 731 771 engine->timer.takedown(dev); ··· 836 866 #ifdef CONFIG_X86 837 867 primary = dev->pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW; 838 868 #endif 839 - 869 + 840 870 remove_conflicting_framebuffers(dev_priv->apertures, "nouveaufb", primary); 841 871 return 0; 842 872 } ··· 888 918 889 919 /* Time to determine the card architecture */ 890 920 reg0 = nv_rd32(dev, NV03_PMC_BOOT_0); 921 + dev_priv->stepping = 0; /* XXX: add stepping for pre-NV10? */ 891 922 892 923 /* We're dealing with >=NV10 */ 893 924 if ((reg0 & 0x0f000000) > 0) { 894 925 /* Bit 27-20 contain the architecture in hex */ 895 926 dev_priv->chipset = (reg0 & 0xff00000) >> 20; 927 + dev_priv->stepping = (reg0 & 0xff); 896 928 /* NV04 or NV05 */ 897 929 } else if ((reg0 & 0xff00fff0) == 0x20004000) { 898 930 if (reg0 & 0x00f00000)
+1 -2
drivers/gpu/drm/nouveau/nouveau_vm.h
··· 53 53 int refcount; 54 54 55 55 struct list_head pgd_list; 56 - atomic_t pgraph_refs; 57 - atomic_t pcrypt_refs; 56 + atomic_t engref[16]; 58 57 59 58 struct nouveau_vm_pgt *pgt; 60 59 u32 fpde;
+9 -1
drivers/gpu/drm/nouveau/nouveau_volt.c
··· 159 159 headerlen = volt[1]; 160 160 recordlen = volt[2]; 161 161 entries = volt[3]; 162 - vidshift = hweight8(volt[5]); 163 162 vidmask = volt[4]; 163 + /* no longer certain what volt[5] is, if it's related to 164 + * the vid shift then it's definitely not a function of 165 + * how many bits are set. 166 + * 167 + * after looking at a number of nva3+ vbios images, they 168 + * all seem likely to have a static shift of 2.. lets 169 + * go with that for now until proven otherwise. 170 + */ 171 + vidshift = 2; 164 172 break; 165 173 default: 166 174 NV_WARN(dev, "voltage table 0x%02x unknown\n", volt[0]);
+4 -5
drivers/gpu/drm/nouveau/nv04_crtc.c
··· 790 790 if (atomic) { 791 791 drm_fb = passed_fb; 792 792 fb = nouveau_framebuffer(passed_fb); 793 - } 794 - else { 793 + } else { 795 794 /* If not atomic, we can go ahead and pin, and unpin the 796 795 * old fb we were passed. 797 796 */ ··· 943 944 struct drm_gem_object *gem; 944 945 int ret = 0; 945 946 946 - if (width != 64 || height != 64) 947 - return -EINVAL; 948 - 949 947 if (!buffer_handle) { 950 948 nv_crtc->cursor.hide(nv_crtc, true); 951 949 return 0; 952 950 } 951 + 952 + if (width != 64 || height != 64) 953 + return -EINVAL; 953 954 954 955 gem = drm_gem_object_lookup(dev, file_priv, buffer_handle); 955 956 if (!gem)
+214 -169
drivers/gpu/drm/nouveau/nv04_graph.c
··· 28 28 #include "nouveau_drv.h" 29 29 #include "nouveau_hw.h" 30 30 #include "nouveau_util.h" 31 + #include "nouveau_ramht.h" 31 32 32 - static int nv04_graph_register(struct drm_device *dev); 33 - static void nv04_graph_isr(struct drm_device *dev); 33 + struct nv04_graph_engine { 34 + struct nouveau_exec_engine base; 35 + }; 34 36 35 37 static uint32_t nv04_graph_ctx_regs[] = { 36 38 0x0040053c, ··· 352 350 uint32_t nv04[ARRAY_SIZE(nv04_graph_ctx_regs)]; 353 351 }; 354 352 355 - struct nouveau_channel * 353 + static struct nouveau_channel * 356 354 nv04_graph_channel(struct drm_device *dev) 357 355 { 358 356 struct drm_nouveau_private *dev_priv = dev->dev_private; ··· 367 365 return dev_priv->channels.ptr[chid]; 368 366 } 369 367 370 - static void 371 - nv04_graph_context_switch(struct drm_device *dev) 372 - { 373 - struct drm_nouveau_private *dev_priv = dev->dev_private; 374 - struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; 375 - struct nouveau_channel *chan = NULL; 376 - int chid; 377 - 378 - nouveau_wait_for_idle(dev); 379 - 380 - /* If previous context is valid, we need to save it */ 381 - pgraph->unload_context(dev); 382 - 383 - /* Load context for next channel */ 384 - chid = dev_priv->engine.fifo.channel_id(dev); 385 - chan = dev_priv->channels.ptr[chid]; 386 - if (chan) 387 - nv04_graph_load_context(chan); 388 - } 389 - 390 368 static uint32_t *ctx_reg(struct graph_state *ctx, uint32_t reg) 391 369 { 392 370 int i; ··· 379 397 return NULL; 380 398 } 381 399 382 - int nv04_graph_create_context(struct nouveau_channel *chan) 400 + static int 401 + nv04_graph_load_context(struct nouveau_channel *chan) 383 402 { 384 - struct graph_state *pgraph_ctx; 385 - NV_DEBUG(chan->dev, "nv04_graph_context_create %d\n", chan->id); 386 - 387 - chan->pgraph_ctx = pgraph_ctx = kzalloc(sizeof(*pgraph_ctx), 388 - GFP_KERNEL); 389 - if (pgraph_ctx == NULL) 390 - return -ENOMEM; 391 - 392 - *ctx_reg(pgraph_ctx, NV04_PGRAPH_DEBUG_3) = 0xfad4ff31; 393 - 394 - return 0; 395 - } 396 - 397 - void nv04_graph_destroy_context(struct nouveau_channel *chan) 398 - { 403 + struct graph_state *pgraph_ctx = chan->engctx[NVOBJ_ENGINE_GR]; 399 404 struct drm_device *dev = chan->dev; 400 - struct drm_nouveau_private *dev_priv = dev->dev_private; 401 - struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; 402 - struct graph_state *pgraph_ctx = chan->pgraph_ctx; 403 - unsigned long flags; 404 - 405 - spin_lock_irqsave(&dev_priv->context_switch_lock, flags); 406 - pgraph->fifo_access(dev, false); 407 - 408 - /* Unload the context if it's the currently active one */ 409 - if (pgraph->channel(dev) == chan) 410 - pgraph->unload_context(dev); 411 - 412 - /* Free the context resources */ 413 - kfree(pgraph_ctx); 414 - chan->pgraph_ctx = NULL; 415 - 416 - pgraph->fifo_access(dev, true); 417 - spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); 418 - } 419 - 420 - int nv04_graph_load_context(struct nouveau_channel *chan) 421 - { 422 - struct drm_device *dev = chan->dev; 423 - struct graph_state *pgraph_ctx = chan->pgraph_ctx; 424 405 uint32_t tmp; 425 406 int i; 426 407 ··· 401 456 return 0; 402 457 } 403 458 404 - int 459 + static int 405 460 nv04_graph_unload_context(struct drm_device *dev) 406 461 { 407 462 struct drm_nouveau_private *dev_priv = dev->dev_private; 408 - struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; 409 463 struct nouveau_channel *chan = NULL; 410 464 struct graph_state *ctx; 411 465 uint32_t tmp; 412 466 int i; 413 467 414 - chan = pgraph->channel(dev); 468 + chan = nv04_graph_channel(dev); 415 469 if (!chan) 416 470 return 0; 417 - ctx = chan->pgraph_ctx; 471 + ctx = chan->engctx[NVOBJ_ENGINE_GR]; 418 472 419 473 for (i = 0; i < ARRAY_SIZE(nv04_graph_ctx_regs); i++) 420 474 ctx->nv04[i] = nv_rd32(dev, nv04_graph_ctx_regs[i]); ··· 425 481 return 0; 426 482 } 427 483 428 - int nv04_graph_init(struct drm_device *dev) 484 + static int 485 + nv04_graph_context_new(struct nouveau_channel *chan, int engine) 486 + { 487 + struct graph_state *pgraph_ctx; 488 + NV_DEBUG(chan->dev, "nv04_graph_context_create %d\n", chan->id); 489 + 490 + pgraph_ctx = kzalloc(sizeof(*pgraph_ctx), GFP_KERNEL); 491 + if (pgraph_ctx == NULL) 492 + return -ENOMEM; 493 + 494 + *ctx_reg(pgraph_ctx, NV04_PGRAPH_DEBUG_3) = 0xfad4ff31; 495 + 496 + chan->engctx[engine] = pgraph_ctx; 497 + return 0; 498 + } 499 + 500 + static void 501 + nv04_graph_context_del(struct nouveau_channel *chan, int engine) 502 + { 503 + struct drm_device *dev = chan->dev; 504 + struct drm_nouveau_private *dev_priv = dev->dev_private; 505 + struct graph_state *pgraph_ctx = chan->engctx[engine]; 506 + unsigned long flags; 507 + 508 + spin_lock_irqsave(&dev_priv->context_switch_lock, flags); 509 + nv04_graph_fifo_access(dev, false); 510 + 511 + /* Unload the context if it's the currently active one */ 512 + if (nv04_graph_channel(dev) == chan) 513 + nv04_graph_unload_context(dev); 514 + 515 + nv04_graph_fifo_access(dev, true); 516 + spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); 517 + 518 + /* Free the context resources */ 519 + kfree(pgraph_ctx); 520 + chan->engctx[engine] = NULL; 521 + } 522 + 523 + int 524 + nv04_graph_object_new(struct nouveau_channel *chan, int engine, 525 + u32 handle, u16 class) 526 + { 527 + struct drm_device *dev = chan->dev; 528 + struct nouveau_gpuobj *obj = NULL; 529 + int ret; 530 + 531 + ret = nouveau_gpuobj_new(dev, chan, 16, 16, NVOBJ_FLAG_ZERO_FREE, &obj); 532 + if (ret) 533 + return ret; 534 + obj->engine = 1; 535 + obj->class = class; 536 + 537 + #ifdef __BIG_ENDIAN 538 + nv_wo32(obj, 0x00, 0x00080000 | class); 539 + #else 540 + nv_wo32(obj, 0x00, class); 541 + #endif 542 + nv_wo32(obj, 0x04, 0x00000000); 543 + nv_wo32(obj, 0x08, 0x00000000); 544 + nv_wo32(obj, 0x0c, 0x00000000); 545 + 546 + ret = nouveau_ramht_insert(chan, handle, obj); 547 + nouveau_gpuobj_ref(NULL, &obj); 548 + return ret; 549 + } 550 + 551 + static int 552 + nv04_graph_init(struct drm_device *dev, int engine) 429 553 { 430 554 struct drm_nouveau_private *dev_priv = dev->dev_private; 431 555 uint32_t tmp; 432 - int ret; 433 556 434 557 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & 435 558 ~NV_PMC_ENABLE_PGRAPH); 436 559 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) | 437 560 NV_PMC_ENABLE_PGRAPH); 438 561 439 - ret = nv04_graph_register(dev); 440 - if (ret) 441 - return ret; 442 - 443 562 /* Enable PGRAPH interrupts */ 444 - nouveau_irq_register(dev, 12, nv04_graph_isr); 445 563 nv_wr32(dev, NV03_PGRAPH_INTR, 0xFFFFFFFF); 446 564 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); 447 565 ··· 513 507 nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x001FFFFF);*/ 514 508 nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x1231c000); 515 509 /*1231C000 blob, 001 haiku*/ 516 - //*V_WRITE(NV04_PGRAPH_DEBUG_1, 0xf2d91100);*/ 510 + /*V_WRITE(NV04_PGRAPH_DEBUG_1, 0xf2d91100);*/ 517 511 nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x72111100); 518 512 /*0x72111100 blob , 01 haiku*/ 519 513 /*nv_wr32(dev, NV04_PGRAPH_DEBUG_2, 0x11d5f870);*/ ··· 537 531 return 0; 538 532 } 539 533 540 - void nv04_graph_takedown(struct drm_device *dev) 534 + static int 535 + nv04_graph_fini(struct drm_device *dev, int engine) 541 536 { 537 + nv04_graph_unload_context(dev); 542 538 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000); 543 - nouveau_irq_unregister(dev, 12); 539 + return 0; 544 540 } 545 541 546 542 void ··· 977 969 return 1; 978 970 } 979 971 980 - static int 981 - nv04_graph_register(struct drm_device *dev) 972 + static struct nouveau_bitfield nv04_graph_intr[] = { 973 + { NV_PGRAPH_INTR_NOTIFY, "NOTIFY" }, 974 + {} 975 + }; 976 + 977 + static struct nouveau_bitfield nv04_graph_nstatus[] = { 978 + { NV04_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" }, 979 + { NV04_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" }, 980 + { NV04_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" }, 981 + { NV04_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" }, 982 + {} 983 + }; 984 + 985 + struct nouveau_bitfield nv04_graph_nsource[] = { 986 + { NV03_PGRAPH_NSOURCE_NOTIFICATION, "NOTIFICATION" }, 987 + { NV03_PGRAPH_NSOURCE_DATA_ERROR, "DATA_ERROR" }, 988 + { NV03_PGRAPH_NSOURCE_PROTECTION_ERROR, "PROTECTION_ERROR" }, 989 + { NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION, "RANGE_EXCEPTION" }, 990 + { NV03_PGRAPH_NSOURCE_LIMIT_COLOR, "LIMIT_COLOR" }, 991 + { NV03_PGRAPH_NSOURCE_LIMIT_ZETA, "LIMIT_ZETA" }, 992 + { NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD, "ILLEGAL_MTHD" }, 993 + { NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION, "DMA_R_PROTECTION" }, 994 + { NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION, "DMA_W_PROTECTION" }, 995 + { NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION, "FORMAT_EXCEPTION" }, 996 + { NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION, "PATCH_EXCEPTION" }, 997 + { NV03_PGRAPH_NSOURCE_STATE_INVALID, "STATE_INVALID" }, 998 + { NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY, "DOUBLE_NOTIFY" }, 999 + { NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE, "NOTIFY_IN_USE" }, 1000 + { NV03_PGRAPH_NSOURCE_METHOD_CNT, "METHOD_CNT" }, 1001 + { NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION, "BFR_NOTIFICATION" }, 1002 + { NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION, "DMA_VTX_PROTECTION" }, 1003 + { NV03_PGRAPH_NSOURCE_DMA_WIDTH_A, "DMA_WIDTH_A" }, 1004 + { NV03_PGRAPH_NSOURCE_DMA_WIDTH_B, "DMA_WIDTH_B" }, 1005 + {} 1006 + }; 1007 + 1008 + static void 1009 + nv04_graph_context_switch(struct drm_device *dev) 982 1010 { 983 1011 struct drm_nouveau_private *dev_priv = dev->dev_private; 1012 + struct nouveau_channel *chan = NULL; 1013 + int chid; 984 1014 985 - if (dev_priv->engine.graph.registered) 986 - return 0; 1015 + nouveau_wait_for_idle(dev); 1016 + 1017 + /* If previous context is valid, we need to save it */ 1018 + nv04_graph_unload_context(dev); 1019 + 1020 + /* Load context for next channel */ 1021 + chid = dev_priv->engine.fifo.channel_id(dev); 1022 + chan = dev_priv->channels.ptr[chid]; 1023 + if (chan) 1024 + nv04_graph_load_context(chan); 1025 + } 1026 + 1027 + static void 1028 + nv04_graph_isr(struct drm_device *dev) 1029 + { 1030 + u32 stat; 1031 + 1032 + while ((stat = nv_rd32(dev, NV03_PGRAPH_INTR))) { 1033 + u32 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE); 1034 + u32 nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS); 1035 + u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR); 1036 + u32 chid = (addr & 0x0f000000) >> 24; 1037 + u32 subc = (addr & 0x0000e000) >> 13; 1038 + u32 mthd = (addr & 0x00001ffc); 1039 + u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA); 1040 + u32 class = nv_rd32(dev, 0x400180 + subc * 4) & 0xff; 1041 + u32 show = stat; 1042 + 1043 + if (stat & NV_PGRAPH_INTR_NOTIFY) { 1044 + if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) { 1045 + if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data)) 1046 + show &= ~NV_PGRAPH_INTR_NOTIFY; 1047 + } 1048 + } 1049 + 1050 + if (stat & NV_PGRAPH_INTR_CONTEXT_SWITCH) { 1051 + nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH); 1052 + stat &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH; 1053 + show &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH; 1054 + nv04_graph_context_switch(dev); 1055 + } 1056 + 1057 + nv_wr32(dev, NV03_PGRAPH_INTR, stat); 1058 + nv_wr32(dev, NV04_PGRAPH_FIFO, 0x00000001); 1059 + 1060 + if (show && nouveau_ratelimit()) { 1061 + NV_INFO(dev, "PGRAPH -"); 1062 + nouveau_bitfield_print(nv04_graph_intr, show); 1063 + printk(" nsource:"); 1064 + nouveau_bitfield_print(nv04_graph_nsource, nsource); 1065 + printk(" nstatus:"); 1066 + nouveau_bitfield_print(nv04_graph_nstatus, nstatus); 1067 + printk("\n"); 1068 + NV_INFO(dev, "PGRAPH - ch %d/%d class 0x%04x " 1069 + "mthd 0x%04x data 0x%08x\n", 1070 + chid, subc, class, mthd, data); 1071 + } 1072 + } 1073 + } 1074 + 1075 + static void 1076 + nv04_graph_destroy(struct drm_device *dev, int engine) 1077 + { 1078 + struct nv04_graph_engine *pgraph = nv_engine(dev, engine); 1079 + 1080 + nouveau_irq_unregister(dev, 12); 1081 + 1082 + NVOBJ_ENGINE_DEL(dev, GR); 1083 + kfree(pgraph); 1084 + } 1085 + 1086 + int 1087 + nv04_graph_create(struct drm_device *dev) 1088 + { 1089 + struct nv04_graph_engine *pgraph; 1090 + 1091 + pgraph = kzalloc(sizeof(*pgraph), GFP_KERNEL); 1092 + if (!pgraph) 1093 + return -ENOMEM; 1094 + 1095 + pgraph->base.destroy = nv04_graph_destroy; 1096 + pgraph->base.init = nv04_graph_init; 1097 + pgraph->base.fini = nv04_graph_fini; 1098 + pgraph->base.context_new = nv04_graph_context_new; 1099 + pgraph->base.context_del = nv04_graph_context_del; 1100 + pgraph->base.object_new = nv04_graph_object_new; 1101 + 1102 + NVOBJ_ENGINE_ADD(dev, GR, &pgraph->base); 1103 + nouveau_irq_register(dev, 12, nv04_graph_isr); 987 1104 988 1105 /* dvd subpicture */ 989 1106 NVOBJ_CLASS(dev, 0x0038, GR); ··· 1355 1222 NVOBJ_CLASS(dev, 0x506e, SW); 1356 1223 NVOBJ_MTHD (dev, 0x506e, 0x0150, nv04_graph_mthd_set_ref); 1357 1224 NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip); 1358 - 1359 - dev_priv->engine.graph.registered = true; 1360 1225 return 0; 1361 - }; 1362 - 1363 - static struct nouveau_bitfield nv04_graph_intr[] = { 1364 - { NV_PGRAPH_INTR_NOTIFY, "NOTIFY" }, 1365 - {} 1366 - }; 1367 - 1368 - static struct nouveau_bitfield nv04_graph_nstatus[] = 1369 - { 1370 - { NV04_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" }, 1371 - { NV04_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" }, 1372 - { NV04_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" }, 1373 - { NV04_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" }, 1374 - {} 1375 - }; 1376 - 1377 - struct nouveau_bitfield nv04_graph_nsource[] = 1378 - { 1379 - { NV03_PGRAPH_NSOURCE_NOTIFICATION, "NOTIFICATION" }, 1380 - { NV03_PGRAPH_NSOURCE_DATA_ERROR, "DATA_ERROR" }, 1381 - { NV03_PGRAPH_NSOURCE_PROTECTION_ERROR, "PROTECTION_ERROR" }, 1382 - { NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION, "RANGE_EXCEPTION" }, 1383 - { NV03_PGRAPH_NSOURCE_LIMIT_COLOR, "LIMIT_COLOR" }, 1384 - { NV03_PGRAPH_NSOURCE_LIMIT_ZETA, "LIMIT_ZETA" }, 1385 - { NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD, "ILLEGAL_MTHD" }, 1386 - { NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION, "DMA_R_PROTECTION" }, 1387 - { NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION, "DMA_W_PROTECTION" }, 1388 - { NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION, "FORMAT_EXCEPTION" }, 1389 - { NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION, "PATCH_EXCEPTION" }, 1390 - { NV03_PGRAPH_NSOURCE_STATE_INVALID, "STATE_INVALID" }, 1391 - { NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY, "DOUBLE_NOTIFY" }, 1392 - { NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE, "NOTIFY_IN_USE" }, 1393 - { NV03_PGRAPH_NSOURCE_METHOD_CNT, "METHOD_CNT" }, 1394 - { NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION, "BFR_NOTIFICATION" }, 1395 - { NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION, "DMA_VTX_PROTECTION" }, 1396 - { NV03_PGRAPH_NSOURCE_DMA_WIDTH_A, "DMA_WIDTH_A" }, 1397 - { NV03_PGRAPH_NSOURCE_DMA_WIDTH_B, "DMA_WIDTH_B" }, 1398 - {} 1399 - }; 1400 - 1401 - static void 1402 - nv04_graph_isr(struct drm_device *dev) 1403 - { 1404 - u32 stat; 1405 - 1406 - while ((stat = nv_rd32(dev, NV03_PGRAPH_INTR))) { 1407 - u32 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE); 1408 - u32 nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS); 1409 - u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR); 1410 - u32 chid = (addr & 0x0f000000) >> 24; 1411 - u32 subc = (addr & 0x0000e000) >> 13; 1412 - u32 mthd = (addr & 0x00001ffc); 1413 - u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA); 1414 - u32 class = nv_rd32(dev, 0x400180 + subc * 4) & 0xff; 1415 - u32 show = stat; 1416 - 1417 - if (stat & NV_PGRAPH_INTR_NOTIFY) { 1418 - if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) { 1419 - if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data)) 1420 - show &= ~NV_PGRAPH_INTR_NOTIFY; 1421 - } 1422 - } 1423 - 1424 - if (stat & NV_PGRAPH_INTR_CONTEXT_SWITCH) { 1425 - nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH); 1426 - stat &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH; 1427 - show &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH; 1428 - nv04_graph_context_switch(dev); 1429 - } 1430 - 1431 - nv_wr32(dev, NV03_PGRAPH_INTR, stat); 1432 - nv_wr32(dev, NV04_PGRAPH_FIFO, 0x00000001); 1433 - 1434 - if (show && nouveau_ratelimit()) { 1435 - NV_INFO(dev, "PGRAPH -"); 1436 - nouveau_bitfield_print(nv04_graph_intr, show); 1437 - printk(" nsource:"); 1438 - nouveau_bitfield_print(nv04_graph_nsource, nsource); 1439 - printk(" nstatus:"); 1440 - nouveau_bitfield_print(nv04_graph_nstatus, nstatus); 1441 - printk("\n"); 1442 - NV_INFO(dev, "PGRAPH - ch %d/%d class 0x%04x " 1443 - "mthd 0x%04x data 0x%08x\n", 1444 - chid, subc, class, mthd, data); 1445 - } 1446 - } 1447 1226 }
+3
drivers/gpu/drm/nouveau/nv04_instmem.c
··· 95 95 nouveau_ramht_ref(NULL, &dev_priv->ramht, NULL); 96 96 nouveau_gpuobj_ref(NULL, &dev_priv->ramro); 97 97 nouveau_gpuobj_ref(NULL, &dev_priv->ramfc); 98 + 99 + if (drm_mm_initialized(&dev_priv->ramin_heap)) 100 + drm_mm_takedown(&dev_priv->ramin_heap); 98 101 } 99 102 100 103 int
+113 -99
drivers/gpu/drm/nouveau/nv10_graph.c
··· 28 28 #include "nouveau_drv.h" 29 29 #include "nouveau_util.h" 30 30 31 - static int nv10_graph_register(struct drm_device *); 32 - static void nv10_graph_isr(struct drm_device *); 33 - 34 - #define NV10_FIFO_NUMBER 32 31 + struct nv10_graph_engine { 32 + struct nouveau_exec_engine base; 33 + }; 35 34 36 35 struct pipe_state { 37 36 uint32_t pipe_0x0000[0x040/4]; ··· 413 414 414 415 static void nv10_graph_save_pipe(struct nouveau_channel *chan) 415 416 { 416 - struct drm_device *dev = chan->dev; 417 - struct graph_state *pgraph_ctx = chan->pgraph_ctx; 417 + struct graph_state *pgraph_ctx = chan->engctx[NVOBJ_ENGINE_GR]; 418 418 struct pipe_state *pipe = &pgraph_ctx->pipe_state; 419 + struct drm_device *dev = chan->dev; 419 420 420 421 PIPE_SAVE(dev, pipe->pipe_0x4400, 0x4400); 421 422 PIPE_SAVE(dev, pipe->pipe_0x0200, 0x0200); ··· 431 432 432 433 static void nv10_graph_load_pipe(struct nouveau_channel *chan) 433 434 { 434 - struct drm_device *dev = chan->dev; 435 - struct graph_state *pgraph_ctx = chan->pgraph_ctx; 435 + struct graph_state *pgraph_ctx = chan->engctx[NVOBJ_ENGINE_GR]; 436 436 struct pipe_state *pipe = &pgraph_ctx->pipe_state; 437 + struct drm_device *dev = chan->dev; 437 438 uint32_t xfmode0, xfmode1; 438 439 int i; 439 440 ··· 481 482 482 483 static void nv10_graph_create_pipe(struct nouveau_channel *chan) 483 484 { 484 - struct drm_device *dev = chan->dev; 485 - struct graph_state *pgraph_ctx = chan->pgraph_ctx; 485 + struct graph_state *pgraph_ctx = chan->engctx[NVOBJ_ENGINE_GR]; 486 486 struct pipe_state *fifo_pipe_state = &pgraph_ctx->pipe_state; 487 + struct drm_device *dev = chan->dev; 487 488 uint32_t *fifo_pipe_state_addr; 488 489 int i; 489 490 #define PIPE_INIT(addr) \ ··· 660 661 uint32_t inst) 661 662 { 662 663 struct drm_device *dev = chan->dev; 663 - struct drm_nouveau_private *dev_priv = dev->dev_private; 664 - struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; 665 664 uint32_t st2, st2_dl, st2_dh, fifo_ptr, fifo[0x60/4]; 666 665 uint32_t ctx_user, ctx_switch[5]; 667 666 int i, subchan = -1; ··· 708 711 0x2c000000 | chan->id << 20 | subchan << 16 | 0x18c); 709 712 nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2_DL, inst); 710 713 nv_mask(dev, NV10_PGRAPH_CTX_CONTROL, 0, 0x10000); 711 - pgraph->fifo_access(dev, true); 712 - pgraph->fifo_access(dev, false); 714 + nv04_graph_fifo_access(dev, true); 715 + nv04_graph_fifo_access(dev, false); 713 716 714 717 /* Restore the FIFO state */ 715 718 for (i = 0; i < ARRAY_SIZE(fifo); i++) ··· 726 729 nv_wr32(dev, NV10_PGRAPH_CTX_USER, ctx_user); 727 730 } 728 731 729 - int nv10_graph_load_context(struct nouveau_channel *chan) 732 + static int 733 + nv10_graph_load_context(struct nouveau_channel *chan) 730 734 { 731 735 struct drm_device *dev = chan->dev; 732 736 struct drm_nouveau_private *dev_priv = dev->dev_private; 733 - struct graph_state *pgraph_ctx = chan->pgraph_ctx; 737 + struct graph_state *pgraph_ctx = chan->engctx[NVOBJ_ENGINE_GR]; 734 738 uint32_t tmp; 735 739 int i; 736 740 ··· 755 757 return 0; 756 758 } 757 759 758 - int 760 + static int 759 761 nv10_graph_unload_context(struct drm_device *dev) 760 762 { 761 763 struct drm_nouveau_private *dev_priv = dev->dev_private; 762 - struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; 763 764 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; 764 765 struct nouveau_channel *chan; 765 766 struct graph_state *ctx; 766 767 uint32_t tmp; 767 768 int i; 768 769 769 - chan = pgraph->channel(dev); 770 + chan = nv10_graph_channel(dev); 770 771 if (!chan) 771 772 return 0; 772 - ctx = chan->pgraph_ctx; 773 + ctx = chan->engctx[NVOBJ_ENGINE_GR]; 773 774 774 775 for (i = 0; i < ARRAY_SIZE(nv10_graph_ctx_regs); i++) 775 776 ctx->nv10[i] = nv_rd32(dev, nv10_graph_ctx_regs[i]); ··· 802 805 /* Load context for next channel */ 803 806 chid = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f; 804 807 chan = dev_priv->channels.ptr[chid]; 805 - if (chan && chan->pgraph_ctx) 808 + if (chan && chan->engctx[NVOBJ_ENGINE_GR]) 806 809 nv10_graph_load_context(chan); 807 810 } 808 811 ··· 833 836 return dev_priv->channels.ptr[chid]; 834 837 } 835 838 836 - int nv10_graph_create_context(struct nouveau_channel *chan) 839 + static int 840 + nv10_graph_context_new(struct nouveau_channel *chan, int engine) 837 841 { 838 842 struct drm_device *dev = chan->dev; 839 843 struct drm_nouveau_private *dev_priv = dev->dev_private; ··· 842 844 843 845 NV_DEBUG(dev, "nv10_graph_context_create %d\n", chan->id); 844 846 845 - chan->pgraph_ctx = pgraph_ctx = kzalloc(sizeof(*pgraph_ctx), 846 - GFP_KERNEL); 847 + pgraph_ctx = kzalloc(sizeof(*pgraph_ctx), GFP_KERNEL); 847 848 if (pgraph_ctx == NULL) 848 849 return -ENOMEM; 849 - 850 + chan->engctx[engine] = pgraph_ctx; 850 851 851 852 NV_WRITE_CTX(0x00400e88, 0x08000000); 852 853 NV_WRITE_CTX(0x00400e9c, 0x4b7fffff); ··· 870 873 return 0; 871 874 } 872 875 873 - void nv10_graph_destroy_context(struct nouveau_channel *chan) 876 + static void 877 + nv10_graph_context_del(struct nouveau_channel *chan, int engine) 874 878 { 875 879 struct drm_device *dev = chan->dev; 876 880 struct drm_nouveau_private *dev_priv = dev->dev_private; 877 - struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; 878 - struct graph_state *pgraph_ctx = chan->pgraph_ctx; 881 + struct graph_state *pgraph_ctx = chan->engctx[engine]; 879 882 unsigned long flags; 880 883 881 884 spin_lock_irqsave(&dev_priv->context_switch_lock, flags); 882 - pgraph->fifo_access(dev, false); 885 + nv04_graph_fifo_access(dev, false); 883 886 884 887 /* Unload the context if it's the currently active one */ 885 - if (pgraph->channel(dev) == chan) 886 - pgraph->unload_context(dev); 888 + if (nv10_graph_channel(dev) == chan) 889 + nv10_graph_unload_context(dev); 890 + 891 + nv04_graph_fifo_access(dev, true); 892 + spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); 887 893 888 894 /* Free the context resources */ 895 + chan->engctx[engine] = NULL; 889 896 kfree(pgraph_ctx); 890 - chan->pgraph_ctx = NULL; 891 - 892 - pgraph->fifo_access(dev, true); 893 - spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); 894 897 } 895 898 896 - void 899 + static void 897 900 nv10_graph_set_tile_region(struct drm_device *dev, int i) 898 901 { 899 902 struct drm_nouveau_private *dev_priv = dev->dev_private; ··· 904 907 nv_wr32(dev, NV10_PGRAPH_TILE(i), tile->addr); 905 908 } 906 909 907 - int nv10_graph_init(struct drm_device *dev) 910 + static int 911 + nv10_graph_init(struct drm_device *dev, int engine) 908 912 { 909 913 struct drm_nouveau_private *dev_priv = dev->dev_private; 910 - uint32_t tmp; 911 - int ret, i; 914 + u32 tmp; 915 + int i; 912 916 913 917 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & 914 918 ~NV_PMC_ENABLE_PGRAPH); 915 919 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) | 916 920 NV_PMC_ENABLE_PGRAPH); 917 921 918 - ret = nv10_graph_register(dev); 919 - if (ret) 920 - return ret; 921 - 922 - nouveau_irq_register(dev, 12, nv10_graph_isr); 923 922 nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF); 924 923 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); 925 924 ··· 956 963 return 0; 957 964 } 958 965 959 - void nv10_graph_takedown(struct drm_device *dev) 966 + static int 967 + nv10_graph_fini(struct drm_device *dev, int engine) 960 968 { 969 + nv10_graph_unload_context(dev); 961 970 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000); 962 - nouveau_irq_unregister(dev, 12); 971 + return 0; 963 972 } 964 973 965 974 static int 966 975 nv17_graph_mthd_lma_window(struct nouveau_channel *chan, 967 976 u32 class, u32 mthd, u32 data) 968 977 { 978 + struct graph_state *ctx = chan->engctx[NVOBJ_ENGINE_GR]; 969 979 struct drm_device *dev = chan->dev; 970 - struct graph_state *ctx = chan->pgraph_ctx; 971 980 struct pipe_state *pipe = &ctx->pipe_state; 972 981 uint32_t pipe_0x0040[1], pipe_0x64c0[8], pipe_0x6a80[3], pipe_0x6ab0[3]; 973 982 uint32_t xfmode0, xfmode1; ··· 1056 1061 return 0; 1057 1062 } 1058 1063 1059 - static int 1060 - nv10_graph_register(struct drm_device *dev) 1061 - { 1062 - struct drm_nouveau_private *dev_priv = dev->dev_private; 1063 - 1064 - if (dev_priv->engine.graph.registered) 1065 - return 0; 1066 - 1067 - NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */ 1068 - NVOBJ_CLASS(dev, 0x0030, GR); /* null */ 1069 - NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */ 1070 - NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */ 1071 - NVOBJ_CLASS(dev, 0x005f, GR); /* imageblit */ 1072 - NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */ 1073 - NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */ 1074 - NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */ 1075 - NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */ 1076 - NVOBJ_CLASS(dev, 0x0043, GR); /* rop */ 1077 - NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */ 1078 - NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */ 1079 - NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */ 1080 - NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */ 1081 - NVOBJ_CLASS(dev, 0x0052, GR); /* swzsurf */ 1082 - NVOBJ_CLASS(dev, 0x0093, GR); /* surf3d */ 1083 - NVOBJ_CLASS(dev, 0x0094, GR); /* tex_tri */ 1084 - NVOBJ_CLASS(dev, 0x0095, GR); /* multitex_tri */ 1085 - 1086 - /* celcius */ 1087 - if (dev_priv->chipset <= 0x10) { 1088 - NVOBJ_CLASS(dev, 0x0056, GR); 1089 - } else 1090 - if (dev_priv->chipset < 0x17 || dev_priv->chipset == 0x1a) { 1091 - NVOBJ_CLASS(dev, 0x0096, GR); 1092 - } else { 1093 - NVOBJ_CLASS(dev, 0x0099, GR); 1094 - NVOBJ_MTHD (dev, 0x0099, 0x1638, nv17_graph_mthd_lma_window); 1095 - NVOBJ_MTHD (dev, 0x0099, 0x163c, nv17_graph_mthd_lma_window); 1096 - NVOBJ_MTHD (dev, 0x0099, 0x1640, nv17_graph_mthd_lma_window); 1097 - NVOBJ_MTHD (dev, 0x0099, 0x1644, nv17_graph_mthd_lma_window); 1098 - NVOBJ_MTHD (dev, 0x0099, 0x1658, nv17_graph_mthd_lma_enable); 1099 - } 1100 - 1101 - /* nvsw */ 1102 - NVOBJ_CLASS(dev, 0x506e, SW); 1103 - NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip); 1104 - 1105 - dev_priv->engine.graph.registered = true; 1106 - return 0; 1107 - } 1108 - 1109 1064 struct nouveau_bitfield nv10_graph_intr[] = { 1110 1065 { NV_PGRAPH_INTR_NOTIFY, "NOTIFY" }, 1111 1066 { NV_PGRAPH_INTR_ERROR, "ERROR" }, 1112 1067 {} 1113 1068 }; 1114 1069 1115 - struct nouveau_bitfield nv10_graph_nstatus[] = 1116 - { 1070 + struct nouveau_bitfield nv10_graph_nstatus[] = { 1117 1071 { NV10_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" }, 1118 1072 { NV10_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" }, 1119 1073 { NV10_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" }, ··· 1116 1172 chid, subc, class, mthd, data); 1117 1173 } 1118 1174 } 1175 + } 1176 + 1177 + static void 1178 + nv10_graph_destroy(struct drm_device *dev, int engine) 1179 + { 1180 + struct nv10_graph_engine *pgraph = nv_engine(dev, engine); 1181 + 1182 + nouveau_irq_unregister(dev, 12); 1183 + kfree(pgraph); 1184 + } 1185 + 1186 + int 1187 + nv10_graph_create(struct drm_device *dev) 1188 + { 1189 + struct drm_nouveau_private *dev_priv = dev->dev_private; 1190 + struct nv10_graph_engine *pgraph; 1191 + 1192 + pgraph = kzalloc(sizeof(*pgraph), GFP_KERNEL); 1193 + if (!pgraph) 1194 + return -ENOMEM; 1195 + 1196 + pgraph->base.destroy = nv10_graph_destroy; 1197 + pgraph->base.init = nv10_graph_init; 1198 + pgraph->base.fini = nv10_graph_fini; 1199 + pgraph->base.context_new = nv10_graph_context_new; 1200 + pgraph->base.context_del = nv10_graph_context_del; 1201 + pgraph->base.object_new = nv04_graph_object_new; 1202 + pgraph->base.set_tile_region = nv10_graph_set_tile_region; 1203 + 1204 + NVOBJ_ENGINE_ADD(dev, GR, &pgraph->base); 1205 + nouveau_irq_register(dev, 12, nv10_graph_isr); 1206 + 1207 + /* nvsw */ 1208 + NVOBJ_CLASS(dev, 0x506e, SW); 1209 + NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip); 1210 + 1211 + NVOBJ_CLASS(dev, 0x0030, GR); /* null */ 1212 + NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */ 1213 + NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */ 1214 + NVOBJ_CLASS(dev, 0x005f, GR); /* imageblit */ 1215 + NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */ 1216 + NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */ 1217 + NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */ 1218 + NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */ 1219 + NVOBJ_CLASS(dev, 0x0043, GR); /* rop */ 1220 + NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */ 1221 + NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */ 1222 + NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */ 1223 + NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */ 1224 + NVOBJ_CLASS(dev, 0x0052, GR); /* swzsurf */ 1225 + NVOBJ_CLASS(dev, 0x0093, GR); /* surf3d */ 1226 + NVOBJ_CLASS(dev, 0x0094, GR); /* tex_tri */ 1227 + NVOBJ_CLASS(dev, 0x0095, GR); /* multitex_tri */ 1228 + 1229 + /* celcius */ 1230 + if (dev_priv->chipset <= 0x10) { 1231 + NVOBJ_CLASS(dev, 0x0056, GR); 1232 + } else 1233 + if (dev_priv->chipset < 0x17 || dev_priv->chipset == 0x1a) { 1234 + NVOBJ_CLASS(dev, 0x0096, GR); 1235 + } else { 1236 + NVOBJ_CLASS(dev, 0x0099, GR); 1237 + NVOBJ_MTHD (dev, 0x0099, 0x1638, nv17_graph_mthd_lma_window); 1238 + NVOBJ_MTHD (dev, 0x0099, 0x163c, nv17_graph_mthd_lma_window); 1239 + NVOBJ_MTHD (dev, 0x0099, 0x1640, nv17_graph_mthd_lma_window); 1240 + NVOBJ_MTHD (dev, 0x0099, 0x1644, nv17_graph_mthd_lma_window); 1241 + NVOBJ_MTHD (dev, 0x0099, 0x1658, nv17_graph_mthd_lma_enable); 1242 + } 1243 + 1244 + return 0; 1119 1245 }
+223 -287
drivers/gpu/drm/nouveau/nv20_graph.c
··· 24 24 * 25 25 */ 26 26 27 + struct nv20_graph_engine { 28 + struct nouveau_exec_engine base; 29 + struct nouveau_gpuobj *ctxtab; 30 + void (*grctx_init)(struct nouveau_gpuobj *); 31 + u32 grctx_size; 32 + u32 grctx_user; 33 + }; 34 + 27 35 #define NV20_GRCTX_SIZE (3580*4) 28 36 #define NV25_GRCTX_SIZE (3529*4) 29 37 #define NV2A_GRCTX_SIZE (3500*4) ··· 40 32 #define NV34_GRCTX_SIZE (18140) 41 33 #define NV35_36_GRCTX_SIZE (22396) 42 34 43 - static int nv20_graph_register(struct drm_device *); 44 - static int nv30_graph_register(struct drm_device *); 45 - static void nv20_graph_isr(struct drm_device *); 35 + int 36 + nv20_graph_unload_context(struct drm_device *dev) 37 + { 38 + struct drm_nouveau_private *dev_priv = dev->dev_private; 39 + struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; 40 + struct nouveau_channel *chan; 41 + struct nouveau_gpuobj *grctx; 42 + u32 tmp; 43 + 44 + chan = nv10_graph_channel(dev); 45 + if (!chan) 46 + return 0; 47 + grctx = chan->engctx[NVOBJ_ENGINE_GR]; 48 + 49 + nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, grctx->pinst >> 4); 50 + nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_XFER, 51 + NV20_PGRAPH_CHANNEL_CTX_XFER_SAVE); 52 + 53 + nouveau_wait_for_idle(dev); 54 + 55 + nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000000); 56 + tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER) & 0x00ffffff; 57 + tmp |= (pfifo->channels - 1) << 24; 58 + nv_wr32(dev, NV10_PGRAPH_CTX_USER, tmp); 59 + return 0; 60 + } 46 61 47 62 static void 48 - nv20_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) 63 + nv20_graph_rdi(struct drm_device *dev) 64 + { 65 + struct drm_nouveau_private *dev_priv = dev->dev_private; 66 + int i, writecount = 32; 67 + uint32_t rdi_index = 0x2c80000; 68 + 69 + if (dev_priv->chipset == 0x20) { 70 + rdi_index = 0x3d0000; 71 + writecount = 15; 72 + } 73 + 74 + nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, rdi_index); 75 + for (i = 0; i < writecount; i++) 76 + nv_wr32(dev, NV10_PGRAPH_RDI_DATA, 0); 77 + 78 + nouveau_wait_for_idle(dev); 79 + } 80 + 81 + static void 82 + nv20_graph_context_init(struct nouveau_gpuobj *ctx) 49 83 { 50 84 int i; 51 85 ··· 137 87 } 138 88 139 89 static void 140 - nv25_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) 90 + nv25_graph_context_init(struct nouveau_gpuobj *ctx) 141 91 { 142 92 int i; 143 93 ··· 196 146 } 197 147 198 148 static void 199 - nv2a_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) 149 + nv2a_graph_context_init(struct nouveau_gpuobj *ctx) 200 150 { 201 151 int i; 202 152 ··· 246 196 } 247 197 248 198 static void 249 - nv30_31_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) 199 + nv30_31_graph_context_init(struct nouveau_gpuobj *ctx) 250 200 { 251 201 int i; 252 202 ··· 304 254 } 305 255 306 256 static void 307 - nv34_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) 257 + nv34_graph_context_init(struct nouveau_gpuobj *ctx) 308 258 { 309 259 int i; 310 260 ··· 362 312 } 363 313 364 314 static void 365 - nv35_36_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) 315 + nv35_36_graph_context_init(struct nouveau_gpuobj *ctx) 366 316 { 367 317 int i; 368 318 ··· 420 370 } 421 371 422 372 int 423 - nv20_graph_create_context(struct nouveau_channel *chan) 373 + nv20_graph_context_new(struct nouveau_channel *chan, int engine) 424 374 { 375 + struct nv20_graph_engine *pgraph = nv_engine(chan->dev, engine); 376 + struct nouveau_gpuobj *grctx = NULL; 425 377 struct drm_device *dev = chan->dev; 426 - struct drm_nouveau_private *dev_priv = dev->dev_private; 427 - struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; 428 - void (*ctx_init)(struct drm_device *, struct nouveau_gpuobj *); 429 - unsigned int idoffs = 0x28; 430 378 int ret; 431 379 432 - switch (dev_priv->chipset) { 433 - case 0x20: 434 - ctx_init = nv20_graph_context_init; 435 - idoffs = 0; 436 - break; 437 - case 0x25: 438 - case 0x28: 439 - ctx_init = nv25_graph_context_init; 440 - break; 441 - case 0x2a: 442 - ctx_init = nv2a_graph_context_init; 443 - idoffs = 0; 444 - break; 445 - case 0x30: 446 - case 0x31: 447 - ctx_init = nv30_31_graph_context_init; 448 - break; 449 - case 0x34: 450 - ctx_init = nv34_graph_context_init; 451 - break; 452 - case 0x35: 453 - case 0x36: 454 - ctx_init = nv35_36_graph_context_init; 455 - break; 456 - default: 457 - BUG_ON(1); 458 - } 459 - 460 - ret = nouveau_gpuobj_new(dev, chan, pgraph->grctx_size, 16, 461 - NVOBJ_FLAG_ZERO_ALLOC, &chan->ramin_grctx); 380 + ret = nouveau_gpuobj_new(dev, NULL, pgraph->grctx_size, 16, 381 + NVOBJ_FLAG_ZERO_ALLOC, &grctx); 462 382 if (ret) 463 383 return ret; 464 384 465 385 /* Initialise default context values */ 466 - ctx_init(dev, chan->ramin_grctx); 386 + pgraph->grctx_init(grctx); 467 387 468 388 /* nv20: nv_wo32(dev, chan->ramin_grctx->gpuobj, 10, chan->id<<24); */ 469 - nv_wo32(chan->ramin_grctx, idoffs, 470 - (chan->id << 24) | 0x1); /* CTX_USER */ 389 + /* CTX_USER */ 390 + nv_wo32(grctx, pgraph->grctx_user, (chan->id << 24) | 0x1); 471 391 472 - nv_wo32(pgraph->ctx_table, chan->id * 4, chan->ramin_grctx->pinst >> 4); 392 + nv_wo32(pgraph->ctxtab, chan->id * 4, grctx->pinst >> 4); 393 + chan->engctx[engine] = grctx; 473 394 return 0; 474 395 } 475 396 476 397 void 477 - nv20_graph_destroy_context(struct nouveau_channel *chan) 398 + nv20_graph_context_del(struct nouveau_channel *chan, int engine) 478 399 { 400 + struct nv20_graph_engine *pgraph = nv_engine(chan->dev, engine); 401 + struct nouveau_gpuobj *grctx = chan->engctx[engine]; 479 402 struct drm_device *dev = chan->dev; 480 403 struct drm_nouveau_private *dev_priv = dev->dev_private; 481 - struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; 482 404 unsigned long flags; 483 405 484 406 spin_lock_irqsave(&dev_priv->context_switch_lock, flags); 485 - pgraph->fifo_access(dev, false); 407 + nv04_graph_fifo_access(dev, false); 486 408 487 409 /* Unload the context if it's the currently active one */ 488 - if (pgraph->channel(dev) == chan) 489 - pgraph->unload_context(dev); 410 + if (nv10_graph_channel(dev) == chan) 411 + nv20_graph_unload_context(dev); 490 412 491 - pgraph->fifo_access(dev, true); 413 + nv04_graph_fifo_access(dev, true); 492 414 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); 493 415 494 416 /* Free the context resources */ 495 - nv_wo32(pgraph->ctx_table, chan->id * 4, 0); 496 - nouveau_gpuobj_ref(NULL, &chan->ramin_grctx); 497 - } 417 + nv_wo32(pgraph->ctxtab, chan->id * 4, 0); 498 418 499 - int 500 - nv20_graph_load_context(struct nouveau_channel *chan) 501 - { 502 - struct drm_device *dev = chan->dev; 503 - uint32_t inst; 504 - 505 - if (!chan->ramin_grctx) 506 - return -EINVAL; 507 - inst = chan->ramin_grctx->pinst >> 4; 508 - 509 - nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, inst); 510 - nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_XFER, 511 - NV20_PGRAPH_CHANNEL_CTX_XFER_LOAD); 512 - nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10010100); 513 - 514 - nouveau_wait_for_idle(dev); 515 - return 0; 516 - } 517 - 518 - int 519 - nv20_graph_unload_context(struct drm_device *dev) 520 - { 521 - struct drm_nouveau_private *dev_priv = dev->dev_private; 522 - struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; 523 - struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; 524 - struct nouveau_channel *chan; 525 - uint32_t inst, tmp; 526 - 527 - chan = pgraph->channel(dev); 528 - if (!chan) 529 - return 0; 530 - inst = chan->ramin_grctx->pinst >> 4; 531 - 532 - nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, inst); 533 - nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_XFER, 534 - NV20_PGRAPH_CHANNEL_CTX_XFER_SAVE); 535 - 536 - nouveau_wait_for_idle(dev); 537 - 538 - nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000000); 539 - tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER) & 0x00ffffff; 540 - tmp |= (pfifo->channels - 1) << 24; 541 - nv_wr32(dev, NV10_PGRAPH_CTX_USER, tmp); 542 - return 0; 419 + nouveau_gpuobj_ref(NULL, &grctx); 420 + chan->engctx[engine] = NULL; 543 421 } 544 422 545 423 static void 546 - nv20_graph_rdi(struct drm_device *dev) 547 - { 548 - struct drm_nouveau_private *dev_priv = dev->dev_private; 549 - int i, writecount = 32; 550 - uint32_t rdi_index = 0x2c80000; 551 - 552 - if (dev_priv->chipset == 0x20) { 553 - rdi_index = 0x3d0000; 554 - writecount = 15; 555 - } 556 - 557 - nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, rdi_index); 558 - for (i = 0; i < writecount; i++) 559 - nv_wr32(dev, NV10_PGRAPH_RDI_DATA, 0); 560 - 561 - nouveau_wait_for_idle(dev); 562 - } 563 - 564 - void 565 424 nv20_graph_set_tile_region(struct drm_device *dev, int i) 566 425 { 567 426 struct drm_nouveau_private *dev_priv = dev->dev_private; ··· 495 536 } 496 537 497 538 int 498 - nv20_graph_init(struct drm_device *dev) 539 + nv20_graph_init(struct drm_device *dev, int engine) 499 540 { 541 + struct nv20_graph_engine *pgraph = nv_engine(dev, engine); 500 542 struct drm_nouveau_private *dev_priv = dev->dev_private; 501 - struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; 502 543 uint32_t tmp, vramsz; 503 - int ret, i; 504 - 505 - switch (dev_priv->chipset) { 506 - case 0x20: 507 - pgraph->grctx_size = NV20_GRCTX_SIZE; 508 - break; 509 - case 0x25: 510 - case 0x28: 511 - pgraph->grctx_size = NV25_GRCTX_SIZE; 512 - break; 513 - case 0x2a: 514 - pgraph->grctx_size = NV2A_GRCTX_SIZE; 515 - break; 516 - default: 517 - NV_ERROR(dev, "unknown chipset, disabling acceleration\n"); 518 - pgraph->accel_blocked = true; 519 - return 0; 520 - } 544 + int i; 521 545 522 546 nv_wr32(dev, NV03_PMC_ENABLE, 523 547 nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PGRAPH); 524 548 nv_wr32(dev, NV03_PMC_ENABLE, 525 549 nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PGRAPH); 526 550 527 - if (!pgraph->ctx_table) { 528 - /* Create Context Pointer Table */ 529 - ret = nouveau_gpuobj_new(dev, NULL, 32 * 4, 16, 530 - NVOBJ_FLAG_ZERO_ALLOC, 531 - &pgraph->ctx_table); 532 - if (ret) 533 - return ret; 534 - } 535 - 536 - nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE, 537 - pgraph->ctx_table->pinst >> 4); 551 + nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE, pgraph->ctxtab->pinst >> 4); 538 552 539 553 nv20_graph_rdi(dev); 540 554 541 - ret = nv20_graph_register(dev); 542 - if (ret) { 543 - nouveau_gpuobj_ref(NULL, &pgraph->ctx_table); 544 - return ret; 545 - } 546 - 547 - nouveau_irq_register(dev, 12, nv20_graph_isr); 548 555 nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF); 549 556 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); 550 557 ··· 582 657 return 0; 583 658 } 584 659 585 - void 586 - nv20_graph_takedown(struct drm_device *dev) 587 - { 588 - struct drm_nouveau_private *dev_priv = dev->dev_private; 589 - struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; 590 - 591 - nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000); 592 - nouveau_irq_unregister(dev, 12); 593 - 594 - nouveau_gpuobj_ref(NULL, &pgraph->ctx_table); 595 - } 596 - 597 660 int 598 - nv30_graph_init(struct drm_device *dev) 661 + nv30_graph_init(struct drm_device *dev, int engine) 599 662 { 663 + struct nv20_graph_engine *pgraph = nv_engine(dev, engine); 600 664 struct drm_nouveau_private *dev_priv = dev->dev_private; 601 - struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; 602 - int ret, i; 603 - 604 - switch (dev_priv->chipset) { 605 - case 0x30: 606 - case 0x31: 607 - pgraph->grctx_size = NV30_31_GRCTX_SIZE; 608 - break; 609 - case 0x34: 610 - pgraph->grctx_size = NV34_GRCTX_SIZE; 611 - break; 612 - case 0x35: 613 - case 0x36: 614 - pgraph->grctx_size = NV35_36_GRCTX_SIZE; 615 - break; 616 - default: 617 - NV_ERROR(dev, "unknown chipset, disabling acceleration\n"); 618 - pgraph->accel_blocked = true; 619 - return 0; 620 - } 665 + int i; 621 666 622 667 nv_wr32(dev, NV03_PMC_ENABLE, 623 668 nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PGRAPH); 624 669 nv_wr32(dev, NV03_PMC_ENABLE, 625 670 nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PGRAPH); 626 671 627 - if (!pgraph->ctx_table) { 628 - /* Create Context Pointer Table */ 629 - ret = nouveau_gpuobj_new(dev, NULL, 32 * 4, 16, 630 - NVOBJ_FLAG_ZERO_ALLOC, 631 - &pgraph->ctx_table); 632 - if (ret) 633 - return ret; 634 - } 672 + nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE, pgraph->ctxtab->pinst >> 4); 635 673 636 - ret = nv30_graph_register(dev); 637 - if (ret) { 638 - nouveau_gpuobj_ref(NULL, &pgraph->ctx_table); 639 - return ret; 640 - } 641 - 642 - nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE, 643 - pgraph->ctx_table->pinst >> 4); 644 - 645 - nouveau_irq_register(dev, 12, nv20_graph_isr); 646 674 nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF); 647 675 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); 648 676 ··· 653 775 return 0; 654 776 } 655 777 656 - static int 657 - nv20_graph_register(struct drm_device *dev) 778 + int 779 + nv20_graph_fini(struct drm_device *dev, int engine) 658 780 { 659 - struct drm_nouveau_private *dev_priv = dev->dev_private; 660 - 661 - if (dev_priv->engine.graph.registered) 662 - return 0; 663 - 664 - NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */ 665 - NVOBJ_CLASS(dev, 0x0030, GR); /* null */ 666 - NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */ 667 - NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */ 668 - NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */ 669 - NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */ 670 - NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */ 671 - NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */ 672 - NVOBJ_CLASS(dev, 0x0043, GR); /* rop */ 673 - NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */ 674 - NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */ 675 - NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */ 676 - NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */ 677 - NVOBJ_CLASS(dev, 0x009e, GR); /* swzsurf */ 678 - NVOBJ_CLASS(dev, 0x0096, GR); /* celcius */ 679 - 680 - /* kelvin */ 681 - if (dev_priv->chipset < 0x25) 682 - NVOBJ_CLASS(dev, 0x0097, GR); 683 - else 684 - NVOBJ_CLASS(dev, 0x0597, GR); 685 - 686 - /* nvsw */ 687 - NVOBJ_CLASS(dev, 0x506e, SW); 688 - NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip); 689 - 690 - dev_priv->engine.graph.registered = true; 691 - return 0; 692 - } 693 - 694 - static int 695 - nv30_graph_register(struct drm_device *dev) 696 - { 697 - struct drm_nouveau_private *dev_priv = dev->dev_private; 698 - 699 - if (dev_priv->engine.graph.registered) 700 - return 0; 701 - 702 - NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */ 703 - NVOBJ_CLASS(dev, 0x0030, GR); /* null */ 704 - NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */ 705 - NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */ 706 - NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */ 707 - NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */ 708 - NVOBJ_CLASS(dev, 0x038a, GR); /* ifc (nv30) */ 709 - NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */ 710 - NVOBJ_CLASS(dev, 0x0389, GR); /* sifm (nv30) */ 711 - NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */ 712 - NVOBJ_CLASS(dev, 0x0362, GR); /* surf2d (nv30) */ 713 - NVOBJ_CLASS(dev, 0x0043, GR); /* rop */ 714 - NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */ 715 - NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */ 716 - NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */ 717 - NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */ 718 - NVOBJ_CLASS(dev, 0x039e, GR); /* swzsurf */ 719 - 720 - /* rankine */ 721 - if (0x00000003 & (1 << (dev_priv->chipset & 0x0f))) 722 - NVOBJ_CLASS(dev, 0x0397, GR); 723 - else 724 - if (0x00000010 & (1 << (dev_priv->chipset & 0x0f))) 725 - NVOBJ_CLASS(dev, 0x0697, GR); 726 - else 727 - if (0x000001e0 & (1 << (dev_priv->chipset & 0x0f))) 728 - NVOBJ_CLASS(dev, 0x0497, GR); 729 - 730 - /* nvsw */ 731 - NVOBJ_CLASS(dev, 0x506e, SW); 732 - NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip); 733 - 734 - dev_priv->engine.graph.registered = true; 781 + nv20_graph_unload_context(dev); 782 + nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000); 735 783 return 0; 736 784 } 737 785 ··· 700 896 chid, subc, class, mthd, data); 701 897 } 702 898 } 899 + } 900 + 901 + static void 902 + nv20_graph_destroy(struct drm_device *dev, int engine) 903 + { 904 + struct nv20_graph_engine *pgraph = nv_engine(dev, engine); 905 + 906 + nouveau_irq_unregister(dev, 12); 907 + nouveau_gpuobj_ref(NULL, &pgraph->ctxtab); 908 + 909 + NVOBJ_ENGINE_DEL(dev, GR); 910 + kfree(pgraph); 911 + } 912 + 913 + int 914 + nv20_graph_create(struct drm_device *dev) 915 + { 916 + struct drm_nouveau_private *dev_priv = dev->dev_private; 917 + struct nv20_graph_engine *pgraph; 918 + int ret; 919 + 920 + pgraph = kzalloc(sizeof(*pgraph), GFP_KERNEL); 921 + if (!pgraph) 922 + return -ENOMEM; 923 + 924 + pgraph->base.destroy = nv20_graph_destroy; 925 + pgraph->base.fini = nv20_graph_fini; 926 + pgraph->base.context_new = nv20_graph_context_new; 927 + pgraph->base.context_del = nv20_graph_context_del; 928 + pgraph->base.object_new = nv04_graph_object_new; 929 + pgraph->base.set_tile_region = nv20_graph_set_tile_region; 930 + 931 + pgraph->grctx_user = 0x0028; 932 + if (dev_priv->card_type == NV_20) { 933 + pgraph->base.init = nv20_graph_init; 934 + switch (dev_priv->chipset) { 935 + case 0x20: 936 + pgraph->grctx_init = nv20_graph_context_init; 937 + pgraph->grctx_size = NV20_GRCTX_SIZE; 938 + pgraph->grctx_user = 0x0000; 939 + break; 940 + case 0x25: 941 + case 0x28: 942 + pgraph->grctx_init = nv25_graph_context_init; 943 + pgraph->grctx_size = NV25_GRCTX_SIZE; 944 + break; 945 + case 0x2a: 946 + pgraph->grctx_init = nv2a_graph_context_init; 947 + pgraph->grctx_size = NV2A_GRCTX_SIZE; 948 + pgraph->grctx_user = 0x0000; 949 + break; 950 + default: 951 + NV_ERROR(dev, "PGRAPH: unknown chipset\n"); 952 + return 0; 953 + } 954 + } else { 955 + pgraph->base.init = nv30_graph_init; 956 + switch (dev_priv->chipset) { 957 + case 0x30: 958 + case 0x31: 959 + pgraph->grctx_init = nv30_31_graph_context_init; 960 + pgraph->grctx_size = NV30_31_GRCTX_SIZE; 961 + break; 962 + case 0x34: 963 + pgraph->grctx_init = nv34_graph_context_init; 964 + pgraph->grctx_size = NV34_GRCTX_SIZE; 965 + break; 966 + case 0x35: 967 + case 0x36: 968 + pgraph->grctx_init = nv35_36_graph_context_init; 969 + pgraph->grctx_size = NV35_36_GRCTX_SIZE; 970 + break; 971 + default: 972 + NV_ERROR(dev, "PGRAPH: unknown chipset\n"); 973 + return 0; 974 + } 975 + } 976 + 977 + /* Create Context Pointer Table */ 978 + ret = nouveau_gpuobj_new(dev, NULL, 32 * 4, 16, NVOBJ_FLAG_ZERO_ALLOC, 979 + &pgraph->ctxtab); 980 + if (ret) { 981 + kfree(pgraph); 982 + return ret; 983 + } 984 + 985 + NVOBJ_ENGINE_ADD(dev, GR, &pgraph->base); 986 + nouveau_irq_register(dev, 12, nv20_graph_isr); 987 + 988 + /* nvsw */ 989 + NVOBJ_CLASS(dev, 0x506e, SW); 990 + NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip); 991 + 992 + NVOBJ_CLASS(dev, 0x0030, GR); /* null */ 993 + NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */ 994 + NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */ 995 + NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */ 996 + NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */ 997 + NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */ 998 + NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */ 999 + NVOBJ_CLASS(dev, 0x0043, GR); /* rop */ 1000 + NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */ 1001 + NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */ 1002 + NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */ 1003 + NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */ 1004 + if (dev_priv->card_type == NV_20) { 1005 + NVOBJ_CLASS(dev, 0x009e, GR); /* swzsurf */ 1006 + NVOBJ_CLASS(dev, 0x0096, GR); /* celcius */ 1007 + 1008 + /* kelvin */ 1009 + if (dev_priv->chipset < 0x25) 1010 + NVOBJ_CLASS(dev, 0x0097, GR); 1011 + else 1012 + NVOBJ_CLASS(dev, 0x0597, GR); 1013 + } else { 1014 + NVOBJ_CLASS(dev, 0x038a, GR); /* ifc (nv30) */ 1015 + NVOBJ_CLASS(dev, 0x0389, GR); /* sifm (nv30) */ 1016 + NVOBJ_CLASS(dev, 0x0362, GR); /* surf2d (nv30) */ 1017 + NVOBJ_CLASS(dev, 0x039e, GR); /* swzsurf */ 1018 + 1019 + /* rankine */ 1020 + if (0x00000003 & (1 << (dev_priv->chipset & 0x0f))) 1021 + NVOBJ_CLASS(dev, 0x0397, GR); 1022 + else 1023 + if (0x00000010 & (1 << (dev_priv->chipset & 0x0f))) 1024 + NVOBJ_CLASS(dev, 0x0697, GR); 1025 + else 1026 + if (0x000001e0 & (1 << (dev_priv->chipset & 0x0f))) 1027 + NVOBJ_CLASS(dev, 0x0497, GR); 1028 + } 1029 + 1030 + return 0; 703 1031 }
+2
drivers/gpu/drm/nouveau/nv40_fifo.c
··· 115 115 nv_wr32(dev, 0x32e8, nv_ri32(dev, fc + 68)); 116 116 nv_wr32(dev, 0x2088, nv_ri32(dev, fc + 76)); 117 117 nv_wr32(dev, 0x3300, nv_ri32(dev, fc + 80)); 118 + nv_wr32(dev, 0x330c, nv_ri32(dev, fc + 84)); 118 119 119 120 nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0); 120 121 nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0); ··· 187 186 tmp |= (nv_rd32(dev, NV04_PFIFO_CACHE1_PUT) << 16); 188 187 nv_wi32(dev, fc + 72, tmp); 189 188 #endif 189 + nv_wi32(dev, fc + 84, nv_rd32(dev, 0x330c)); 190 190 191 191 nv40_fifo_do_load_context(dev, pfifo->channels - 1); 192 192 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1,
+176 -153
drivers/gpu/drm/nouveau/nv40_graph.c
··· 28 28 #include "drm.h" 29 29 #include "nouveau_drv.h" 30 30 #include "nouveau_grctx.h" 31 + #include "nouveau_ramht.h" 31 32 32 - static int nv40_graph_register(struct drm_device *); 33 - static void nv40_graph_isr(struct drm_device *); 33 + struct nv40_graph_engine { 34 + struct nouveau_exec_engine base; 35 + u32 grctx_size; 36 + }; 34 37 35 - struct nouveau_channel * 38 + static struct nouveau_channel * 36 39 nv40_graph_channel(struct drm_device *dev) 37 40 { 38 41 struct drm_nouveau_private *dev_priv = dev->dev_private; 42 + struct nouveau_gpuobj *grctx; 39 43 uint32_t inst; 40 44 int i; 41 45 ··· 49 45 inst = (inst & NV40_PGRAPH_CTXCTL_CUR_INSTANCE) << 4; 50 46 51 47 for (i = 0; i < dev_priv->engine.fifo.channels; i++) { 52 - struct nouveau_channel *chan = dev_priv->channels.ptr[i]; 48 + if (!dev_priv->channels.ptr[i]) 49 + continue; 53 50 54 - if (chan && chan->ramin_grctx && 55 - chan->ramin_grctx->pinst == inst) 56 - return chan; 51 + grctx = dev_priv->channels.ptr[i]->engctx[NVOBJ_ENGINE_GR]; 52 + if (grctx && grctx->pinst == inst) 53 + return dev_priv->channels.ptr[i]; 57 54 } 58 55 59 56 return NULL; 60 - } 61 - 62 - int 63 - nv40_graph_create_context(struct nouveau_channel *chan) 64 - { 65 - struct drm_device *dev = chan->dev; 66 - struct drm_nouveau_private *dev_priv = dev->dev_private; 67 - struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; 68 - struct nouveau_grctx ctx = {}; 69 - unsigned long flags; 70 - int ret; 71 - 72 - ret = nouveau_gpuobj_new(dev, chan, pgraph->grctx_size, 16, 73 - NVOBJ_FLAG_ZERO_ALLOC, &chan->ramin_grctx); 74 - if (ret) 75 - return ret; 76 - 77 - /* Initialise default context values */ 78 - ctx.dev = chan->dev; 79 - ctx.mode = NOUVEAU_GRCTX_VALS; 80 - ctx.data = chan->ramin_grctx; 81 - nv40_grctx_init(&ctx); 82 - 83 - nv_wo32(chan->ramin_grctx, 0, chan->ramin_grctx->pinst); 84 - 85 - /* init grctx pointer in ramfc, and on PFIFO if channel is 86 - * already active there 87 - */ 88 - spin_lock_irqsave(&dev_priv->context_switch_lock, flags); 89 - nv_wo32(chan->ramfc, 0x38, chan->ramin_grctx->pinst >> 4); 90 - nv_mask(dev, 0x002500, 0x00000001, 0x00000000); 91 - if ((nv_rd32(dev, 0x003204) & 0x0000001f) == chan->id) 92 - nv_wr32(dev, 0x0032e0, chan->ramin_grctx->pinst >> 4); 93 - nv_mask(dev, 0x002500, 0x00000001, 0x00000001); 94 - spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); 95 - return 0; 96 - } 97 - 98 - void 99 - nv40_graph_destroy_context(struct nouveau_channel *chan) 100 - { 101 - struct drm_device *dev = chan->dev; 102 - struct drm_nouveau_private *dev_priv = dev->dev_private; 103 - struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; 104 - unsigned long flags; 105 - 106 - spin_lock_irqsave(&dev_priv->context_switch_lock, flags); 107 - pgraph->fifo_access(dev, false); 108 - 109 - /* Unload the context if it's the currently active one */ 110 - if (pgraph->channel(dev) == chan) 111 - pgraph->unload_context(dev); 112 - 113 - pgraph->fifo_access(dev, true); 114 - spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); 115 - 116 - /* Free the context resources */ 117 - nouveau_gpuobj_ref(NULL, &chan->ramin_grctx); 118 57 } 119 58 120 59 static int ··· 101 154 return 0; 102 155 } 103 156 104 - /* Restore the context for a specific channel into PGRAPH */ 105 - int 106 - nv40_graph_load_context(struct nouveau_channel *chan) 107 - { 108 - struct drm_device *dev = chan->dev; 109 - uint32_t inst; 110 - int ret; 111 - 112 - if (!chan->ramin_grctx) 113 - return -EINVAL; 114 - inst = chan->ramin_grctx->pinst >> 4; 115 - 116 - ret = nv40_graph_transfer_context(dev, inst, 0); 117 - if (ret) 118 - return ret; 119 - 120 - /* 0x40032C, no idea of it's exact function. Could simply be a 121 - * record of the currently active PGRAPH context. It's currently 122 - * unknown as to what bit 24 does. The nv ddx has it set, so we will 123 - * set it here too. 124 - */ 125 - nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, inst); 126 - nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 127 - (inst & NV40_PGRAPH_CTXCTL_CUR_INSTANCE) | 128 - NV40_PGRAPH_CTXCTL_CUR_LOADED); 129 - /* 0x32E0 records the instance address of the active FIFO's PGRAPH 130 - * context. If at any time this doesn't match 0x40032C, you will 131 - * receive PGRAPH_INTR_CONTEXT_SWITCH 132 - */ 133 - nv_wr32(dev, NV40_PFIFO_GRCTX_INSTANCE, inst); 134 - return 0; 135 - } 136 - 137 - int 157 + static int 138 158 nv40_graph_unload_context(struct drm_device *dev) 139 159 { 140 160 uint32_t inst; ··· 118 204 return ret; 119 205 } 120 206 121 - void 207 + static int 208 + nv40_graph_context_new(struct nouveau_channel *chan, int engine) 209 + { 210 + struct nv40_graph_engine *pgraph = nv_engine(chan->dev, engine); 211 + struct drm_device *dev = chan->dev; 212 + struct drm_nouveau_private *dev_priv = dev->dev_private; 213 + struct nouveau_gpuobj *grctx = NULL; 214 + struct nouveau_grctx ctx = {}; 215 + unsigned long flags; 216 + int ret; 217 + 218 + ret = nouveau_gpuobj_new(dev, NULL, pgraph->grctx_size, 16, 219 + NVOBJ_FLAG_ZERO_ALLOC, &grctx); 220 + if (ret) 221 + return ret; 222 + 223 + /* Initialise default context values */ 224 + ctx.dev = chan->dev; 225 + ctx.mode = NOUVEAU_GRCTX_VALS; 226 + ctx.data = grctx; 227 + nv40_grctx_init(&ctx); 228 + 229 + nv_wo32(grctx, 0, grctx->vinst); 230 + 231 + /* init grctx pointer in ramfc, and on PFIFO if channel is 232 + * already active there 233 + */ 234 + spin_lock_irqsave(&dev_priv->context_switch_lock, flags); 235 + nv_wo32(chan->ramfc, 0x38, grctx->vinst >> 4); 236 + nv_mask(dev, 0x002500, 0x00000001, 0x00000000); 237 + if ((nv_rd32(dev, 0x003204) & 0x0000001f) == chan->id) 238 + nv_wr32(dev, 0x0032e0, grctx->vinst >> 4); 239 + nv_mask(dev, 0x002500, 0x00000001, 0x00000001); 240 + spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); 241 + 242 + chan->engctx[engine] = grctx; 243 + return 0; 244 + } 245 + 246 + static void 247 + nv40_graph_context_del(struct nouveau_channel *chan, int engine) 248 + { 249 + struct nouveau_gpuobj *grctx = chan->engctx[engine]; 250 + struct drm_device *dev = chan->dev; 251 + struct drm_nouveau_private *dev_priv = dev->dev_private; 252 + unsigned long flags; 253 + 254 + spin_lock_irqsave(&dev_priv->context_switch_lock, flags); 255 + nv04_graph_fifo_access(dev, false); 256 + 257 + /* Unload the context if it's the currently active one */ 258 + if (nv40_graph_channel(dev) == chan) 259 + nv40_graph_unload_context(dev); 260 + 261 + nv04_graph_fifo_access(dev, true); 262 + spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); 263 + 264 + /* Free the context resources */ 265 + nouveau_gpuobj_ref(NULL, &grctx); 266 + chan->engctx[engine] = NULL; 267 + } 268 + 269 + int 270 + nv40_graph_object_new(struct nouveau_channel *chan, int engine, 271 + u32 handle, u16 class) 272 + { 273 + struct drm_device *dev = chan->dev; 274 + struct nouveau_gpuobj *obj = NULL; 275 + int ret; 276 + 277 + ret = nouveau_gpuobj_new(dev, chan, 20, 16, NVOBJ_FLAG_ZERO_FREE, &obj); 278 + if (ret) 279 + return ret; 280 + obj->engine = 1; 281 + obj->class = class; 282 + 283 + nv_wo32(obj, 0x00, class); 284 + nv_wo32(obj, 0x04, 0x00000000); 285 + #ifndef __BIG_ENDIAN 286 + nv_wo32(obj, 0x08, 0x00000000); 287 + #else 288 + nv_wo32(obj, 0x08, 0x01000000); 289 + #endif 290 + nv_wo32(obj, 0x0c, 0x00000000); 291 + nv_wo32(obj, 0x10, 0x00000000); 292 + 293 + ret = nouveau_ramht_insert(chan, handle, obj); 294 + nouveau_gpuobj_ref(NULL, &obj); 295 + return ret; 296 + } 297 + 298 + static void 122 299 nv40_graph_set_tile_region(struct drm_device *dev, int i) 123 300 { 124 301 struct drm_nouveau_private *dev_priv = dev->dev_private; ··· 262 257 * C51 0x4e 263 258 */ 264 259 int 265 - nv40_graph_init(struct drm_device *dev) 260 + nv40_graph_init(struct drm_device *dev, int engine) 266 261 { 267 - struct drm_nouveau_private *dev_priv = 268 - (struct drm_nouveau_private *)dev->dev_private; 262 + struct nv40_graph_engine *pgraph = nv_engine(dev, engine); 263 + struct drm_nouveau_private *dev_priv = dev->dev_private; 269 264 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; 270 265 struct nouveau_grctx ctx = {}; 271 266 uint32_t vramsz, *cp; 272 - int ret, i, j; 267 + int i, j; 273 268 274 269 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & 275 270 ~NV_PMC_ENABLE_PGRAPH); ··· 285 280 ctx.data = cp; 286 281 ctx.ctxprog_max = 256; 287 282 nv40_grctx_init(&ctx); 288 - dev_priv->engine.graph.grctx_size = ctx.ctxvals_pos * 4; 283 + pgraph->grctx_size = ctx.ctxvals_pos * 4; 289 284 290 285 nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0); 291 286 for (i = 0; i < ctx.ctxprog_len; i++) ··· 293 288 294 289 kfree(cp); 295 290 296 - ret = nv40_graph_register(dev); 297 - if (ret) 298 - return ret; 299 - 300 291 /* No context present currently */ 301 292 nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0x00000000); 302 293 303 - nouveau_irq_register(dev, 12, nv40_graph_isr); 304 294 nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF); 305 295 nv_wr32(dev, NV40_PGRAPH_INTR_EN, 0xFFFFFFFF); 306 296 ··· 428 428 return 0; 429 429 } 430 430 431 - void nv40_graph_takedown(struct drm_device *dev) 432 - { 433 - nouveau_irq_unregister(dev, 12); 434 - } 435 - 436 431 static int 437 - nv40_graph_register(struct drm_device *dev) 432 + nv40_graph_fini(struct drm_device *dev, int engine) 438 433 { 439 - struct drm_nouveau_private *dev_priv = dev->dev_private; 440 - 441 - if (dev_priv->engine.graph.registered) 442 - return 0; 443 - 444 - NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */ 445 - NVOBJ_CLASS(dev, 0x0030, GR); /* null */ 446 - NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */ 447 - NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */ 448 - NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */ 449 - NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */ 450 - NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */ 451 - NVOBJ_CLASS(dev, 0x3089, GR); /* sifm (nv40) */ 452 - NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */ 453 - NVOBJ_CLASS(dev, 0x3062, GR); /* surf2d (nv40) */ 454 - NVOBJ_CLASS(dev, 0x0043, GR); /* rop */ 455 - NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */ 456 - NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */ 457 - NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */ 458 - NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */ 459 - NVOBJ_CLASS(dev, 0x309e, GR); /* swzsurf */ 460 - 461 - /* curie */ 462 - if (nv44_graph_class(dev)) 463 - NVOBJ_CLASS(dev, 0x4497, GR); 464 - else 465 - NVOBJ_CLASS(dev, 0x4097, GR); 466 - 467 - /* nvsw */ 468 - NVOBJ_CLASS(dev, 0x506e, SW); 469 - NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip); 470 - 471 - dev_priv->engine.graph.registered = true; 434 + nv40_graph_unload_context(dev); 472 435 return 0; 473 436 } 474 437 ··· 439 476 nv40_graph_isr_chid(struct drm_device *dev, u32 inst) 440 477 { 441 478 struct drm_nouveau_private *dev_priv = dev->dev_private; 442 - struct nouveau_channel *chan; 479 + struct nouveau_gpuobj *grctx; 443 480 unsigned long flags; 444 481 int i; 445 482 446 483 spin_lock_irqsave(&dev_priv->channels.lock, flags); 447 484 for (i = 0; i < dev_priv->engine.fifo.channels; i++) { 448 - chan = dev_priv->channels.ptr[i]; 449 - if (!chan || !chan->ramin_grctx) 485 + if (!dev_priv->channels.ptr[i]) 450 486 continue; 487 + grctx = dev_priv->channels.ptr[i]->engctx[NVOBJ_ENGINE_GR]; 451 488 452 - if (inst == chan->ramin_grctx->pinst) 489 + if (grctx && grctx->pinst == inst) 453 490 break; 454 491 } 455 492 spin_unlock_irqrestore(&dev_priv->channels.lock, flags); ··· 499 536 chid, inst, subc, class, mthd, data); 500 537 } 501 538 } 539 + } 540 + 541 + static void 542 + nv40_graph_destroy(struct drm_device *dev, int engine) 543 + { 544 + struct nv40_graph_engine *pgraph = nv_engine(dev, engine); 545 + 546 + nouveau_irq_unregister(dev, 12); 547 + 548 + NVOBJ_ENGINE_DEL(dev, GR); 549 + kfree(pgraph); 550 + } 551 + 552 + int 553 + nv40_graph_create(struct drm_device *dev) 554 + { 555 + struct nv40_graph_engine *pgraph; 556 + 557 + pgraph = kzalloc(sizeof(*pgraph), GFP_KERNEL); 558 + if (!pgraph) 559 + return -ENOMEM; 560 + 561 + pgraph->base.destroy = nv40_graph_destroy; 562 + pgraph->base.init = nv40_graph_init; 563 + pgraph->base.fini = nv40_graph_fini; 564 + pgraph->base.context_new = nv40_graph_context_new; 565 + pgraph->base.context_del = nv40_graph_context_del; 566 + pgraph->base.object_new = nv40_graph_object_new; 567 + pgraph->base.set_tile_region = nv40_graph_set_tile_region; 568 + 569 + NVOBJ_ENGINE_ADD(dev, GR, &pgraph->base); 570 + nouveau_irq_register(dev, 12, nv40_graph_isr); 571 + 572 + NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */ 573 + NVOBJ_CLASS(dev, 0x0030, GR); /* null */ 574 + NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */ 575 + NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */ 576 + NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */ 577 + NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */ 578 + NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */ 579 + NVOBJ_CLASS(dev, 0x3089, GR); /* sifm (nv40) */ 580 + NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */ 581 + NVOBJ_CLASS(dev, 0x3062, GR); /* surf2d (nv40) */ 582 + NVOBJ_CLASS(dev, 0x0043, GR); /* rop */ 583 + NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */ 584 + NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */ 585 + NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */ 586 + NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */ 587 + NVOBJ_CLASS(dev, 0x309e, GR); /* swzsurf */ 588 + 589 + /* curie */ 590 + if (nv44_graph_class(dev)) 591 + NVOBJ_CLASS(dev, 0x4497, GR); 592 + else 593 + NVOBJ_CLASS(dev, 0x4097, GR); 594 + 595 + /* nvsw */ 596 + NVOBJ_CLASS(dev, 0x506e, SW); 597 + NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip); 598 + return 0; 502 599 }
+311
drivers/gpu/drm/nouveau/nv40_mpeg.c
··· 1 + /* 2 + * Copyright 2011 Red Hat Inc. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + * OTHER DEALINGS IN THE SOFTWARE. 21 + * 22 + * Authors: Ben Skeggs 23 + */ 24 + 25 + #include "drmP.h" 26 + #include "nouveau_drv.h" 27 + #include "nouveau_ramht.h" 28 + 29 + struct nv40_mpeg_engine { 30 + struct nouveau_exec_engine base; 31 + }; 32 + 33 + static int 34 + nv40_mpeg_context_new(struct nouveau_channel *chan, int engine) 35 + { 36 + struct drm_device *dev = chan->dev; 37 + struct drm_nouveau_private *dev_priv = dev->dev_private; 38 + struct nouveau_gpuobj *ctx = NULL; 39 + unsigned long flags; 40 + int ret; 41 + 42 + NV_DEBUG(dev, "ch%d\n", chan->id); 43 + 44 + ret = nouveau_gpuobj_new(dev, NULL, 264 * 4, 16, NVOBJ_FLAG_ZERO_ALLOC | 45 + NVOBJ_FLAG_ZERO_FREE, &ctx); 46 + if (ret) 47 + return ret; 48 + 49 + nv_wo32(ctx, 0x78, 0x02001ec1); 50 + 51 + spin_lock_irqsave(&dev_priv->context_switch_lock, flags); 52 + nv_mask(dev, 0x002500, 0x00000001, 0x00000000); 53 + if ((nv_rd32(dev, 0x003204) & 0x1f) == chan->id) 54 + nv_wr32(dev, 0x00330c, ctx->pinst >> 4); 55 + nv_wo32(chan->ramfc, 0x54, ctx->pinst >> 4); 56 + nv_mask(dev, 0x002500, 0x00000001, 0x00000001); 57 + spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); 58 + 59 + chan->engctx[engine] = ctx; 60 + return 0; 61 + } 62 + 63 + static void 64 + nv40_mpeg_context_del(struct nouveau_channel *chan, int engine) 65 + { 66 + struct drm_nouveau_private *dev_priv = chan->dev->dev_private; 67 + struct nouveau_gpuobj *ctx = chan->engctx[engine]; 68 + struct drm_device *dev = chan->dev; 69 + unsigned long flags; 70 + u32 inst = 0x80000000 | (ctx->pinst >> 4); 71 + 72 + spin_lock_irqsave(&dev_priv->context_switch_lock, flags); 73 + nv_mask(dev, 0x00b32c, 0x00000001, 0x00000000); 74 + if (nv_rd32(dev, 0x00b318) == inst) 75 + nv_mask(dev, 0x00b318, 0x80000000, 0x00000000); 76 + nv_mask(dev, 0x00b32c, 0x00000001, 0x00000001); 77 + spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); 78 + 79 + nouveau_gpuobj_ref(NULL, &ctx); 80 + chan->engctx[engine] = NULL; 81 + } 82 + 83 + static int 84 + nv40_mpeg_object_new(struct nouveau_channel *chan, int engine, 85 + u32 handle, u16 class) 86 + { 87 + struct drm_device *dev = chan->dev; 88 + struct nouveau_gpuobj *obj = NULL; 89 + int ret; 90 + 91 + ret = nouveau_gpuobj_new(dev, chan, 20, 16, NVOBJ_FLAG_ZERO_ALLOC | 92 + NVOBJ_FLAG_ZERO_FREE, &obj); 93 + if (ret) 94 + return ret; 95 + obj->engine = 2; 96 + obj->class = class; 97 + 98 + nv_wo32(obj, 0x00, class); 99 + 100 + ret = nouveau_ramht_insert(chan, handle, obj); 101 + nouveau_gpuobj_ref(NULL, &obj); 102 + return ret; 103 + } 104 + 105 + static int 106 + nv40_mpeg_init(struct drm_device *dev, int engine) 107 + { 108 + struct drm_nouveau_private *dev_priv = dev->dev_private; 109 + struct nv40_mpeg_engine *pmpeg = nv_engine(dev, engine); 110 + int i; 111 + 112 + /* VPE init */ 113 + nv_mask(dev, 0x000200, 0x00000002, 0x00000000); 114 + nv_mask(dev, 0x000200, 0x00000002, 0x00000002); 115 + nv_wr32(dev, 0x00b0e0, 0x00000020); /* nvidia: rd 0x01, wr 0x20 */ 116 + nv_wr32(dev, 0x00b0e8, 0x00000020); /* nvidia: rd 0x01, wr 0x20 */ 117 + 118 + for (i = 0; i < dev_priv->engine.fb.num_tiles; i++) 119 + pmpeg->base.set_tile_region(dev, i); 120 + 121 + /* PMPEG init */ 122 + nv_wr32(dev, 0x00b32c, 0x00000000); 123 + nv_wr32(dev, 0x00b314, 0x00000100); 124 + nv_wr32(dev, 0x00b220, 0x00000044); 125 + nv_wr32(dev, 0x00b300, 0x02001ec1); 126 + nv_mask(dev, 0x00b32c, 0x00000001, 0x00000001); 127 + 128 + nv_wr32(dev, 0x00b100, 0xffffffff); 129 + nv_wr32(dev, 0x00b140, 0xffffffff); 130 + 131 + if (!nv_wait(dev, 0x00b200, 0x00000001, 0x00000000)) { 132 + NV_ERROR(dev, "PMPEG init: 0x%08x\n", nv_rd32(dev, 0x00b200)); 133 + return -EBUSY; 134 + } 135 + 136 + return 0; 137 + } 138 + 139 + static int 140 + nv40_mpeg_fini(struct drm_device *dev, int engine) 141 + { 142 + /*XXX: context save? */ 143 + nv_mask(dev, 0x00b32c, 0x00000001, 0x00000000); 144 + nv_wr32(dev, 0x00b140, 0x00000000); 145 + return 0; 146 + } 147 + 148 + static int 149 + nv40_mpeg_mthd_dma(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data) 150 + { 151 + struct drm_device *dev = chan->dev; 152 + u32 inst = data << 4; 153 + u32 dma0 = nv_ri32(dev, inst + 0); 154 + u32 dma1 = nv_ri32(dev, inst + 4); 155 + u32 dma2 = nv_ri32(dev, inst + 8); 156 + u32 base = (dma2 & 0xfffff000) | (dma0 >> 20); 157 + u32 size = dma1 + 1; 158 + 159 + /* only allow linear DMA objects */ 160 + if (!(dma0 & 0x00002000)) 161 + return -EINVAL; 162 + 163 + if (mthd == 0x0190) { 164 + /* DMA_CMD */ 165 + nv_mask(dev, 0x00b300, 0x00030000, (dma0 & 0x00030000)); 166 + nv_wr32(dev, 0x00b334, base); 167 + nv_wr32(dev, 0x00b324, size); 168 + } else 169 + if (mthd == 0x01a0) { 170 + /* DMA_DATA */ 171 + nv_mask(dev, 0x00b300, 0x000c0000, (dma0 & 0x00030000) << 2); 172 + nv_wr32(dev, 0x00b360, base); 173 + nv_wr32(dev, 0x00b364, size); 174 + } else { 175 + /* DMA_IMAGE, VRAM only */ 176 + if (dma0 & 0x000c0000) 177 + return -EINVAL; 178 + 179 + nv_wr32(dev, 0x00b370, base); 180 + nv_wr32(dev, 0x00b374, size); 181 + } 182 + 183 + return 0; 184 + } 185 + 186 + static int 187 + nv40_mpeg_isr_chid(struct drm_device *dev, u32 inst) 188 + { 189 + struct drm_nouveau_private *dev_priv = dev->dev_private; 190 + struct nouveau_gpuobj *ctx; 191 + unsigned long flags; 192 + int i; 193 + 194 + spin_lock_irqsave(&dev_priv->channels.lock, flags); 195 + for (i = 0; i < dev_priv->engine.fifo.channels; i++) { 196 + if (!dev_priv->channels.ptr[i]) 197 + continue; 198 + 199 + ctx = dev_priv->channels.ptr[i]->engctx[NVOBJ_ENGINE_MPEG]; 200 + if (ctx && ctx->pinst == inst) 201 + break; 202 + } 203 + spin_unlock_irqrestore(&dev_priv->channels.lock, flags); 204 + return i; 205 + } 206 + 207 + static void 208 + nv40_vpe_set_tile_region(struct drm_device *dev, int i) 209 + { 210 + struct drm_nouveau_private *dev_priv = dev->dev_private; 211 + struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i]; 212 + 213 + nv_wr32(dev, 0x00b008 + (i * 0x10), tile->pitch); 214 + nv_wr32(dev, 0x00b004 + (i * 0x10), tile->limit); 215 + nv_wr32(dev, 0x00b000 + (i * 0x10), tile->addr); 216 + } 217 + 218 + static void 219 + nv40_mpeg_isr(struct drm_device *dev) 220 + { 221 + u32 inst = (nv_rd32(dev, 0x00b318) & 0x000fffff) << 4; 222 + u32 chid = nv40_mpeg_isr_chid(dev, inst); 223 + u32 stat = nv_rd32(dev, 0x00b100); 224 + u32 type = nv_rd32(dev, 0x00b230); 225 + u32 mthd = nv_rd32(dev, 0x00b234); 226 + u32 data = nv_rd32(dev, 0x00b238); 227 + u32 show = stat; 228 + 229 + if (stat & 0x01000000) { 230 + /* happens on initial binding of the object */ 231 + if (type == 0x00000020 && mthd == 0x0000) { 232 + nv_mask(dev, 0x00b308, 0x00000000, 0x00000000); 233 + show &= ~0x01000000; 234 + } 235 + 236 + if (type == 0x00000010) { 237 + if (!nouveau_gpuobj_mthd_call2(dev, chid, 0x3174, mthd, data)) 238 + show &= ~0x01000000; 239 + } 240 + } 241 + 242 + nv_wr32(dev, 0x00b100, stat); 243 + nv_wr32(dev, 0x00b230, 0x00000001); 244 + 245 + if (show && nouveau_ratelimit()) { 246 + NV_INFO(dev, "PMPEG: Ch %d [0x%08x] 0x%08x 0x%08x 0x%08x 0x%08x\n", 247 + chid, inst, stat, type, mthd, data); 248 + } 249 + } 250 + 251 + static void 252 + nv40_vpe_isr(struct drm_device *dev) 253 + { 254 + if (nv_rd32(dev, 0x00b100)) 255 + nv40_mpeg_isr(dev); 256 + 257 + if (nv_rd32(dev, 0x00b800)) { 258 + u32 stat = nv_rd32(dev, 0x00b800); 259 + NV_INFO(dev, "PMSRCH: 0x%08x\n", stat); 260 + nv_wr32(dev, 0xb800, stat); 261 + } 262 + } 263 + 264 + static void 265 + nv40_mpeg_destroy(struct drm_device *dev, int engine) 266 + { 267 + struct nv40_mpeg_engine *pmpeg = nv_engine(dev, engine); 268 + 269 + nouveau_irq_unregister(dev, 0); 270 + 271 + NVOBJ_ENGINE_DEL(dev, MPEG); 272 + kfree(pmpeg); 273 + } 274 + 275 + int 276 + nv40_mpeg_create(struct drm_device *dev) 277 + { 278 + struct nv40_mpeg_engine *pmpeg; 279 + 280 + pmpeg = kzalloc(sizeof(*pmpeg), GFP_KERNEL); 281 + if (!pmpeg) 282 + return -ENOMEM; 283 + 284 + pmpeg->base.destroy = nv40_mpeg_destroy; 285 + pmpeg->base.init = nv40_mpeg_init; 286 + pmpeg->base.fini = nv40_mpeg_fini; 287 + pmpeg->base.context_new = nv40_mpeg_context_new; 288 + pmpeg->base.context_del = nv40_mpeg_context_del; 289 + pmpeg->base.object_new = nv40_mpeg_object_new; 290 + 291 + /* ISR vector, PMC_ENABLE bit, and TILE regs are shared between 292 + * all VPE engines, for this driver's purposes the PMPEG engine 293 + * will be treated as the "master" and handle the global VPE 294 + * bits too 295 + */ 296 + pmpeg->base.set_tile_region = nv40_vpe_set_tile_region; 297 + nouveau_irq_register(dev, 0, nv40_vpe_isr); 298 + 299 + NVOBJ_ENGINE_ADD(dev, MPEG, &pmpeg->base); 300 + NVOBJ_CLASS(dev, 0x3174, MPEG); 301 + NVOBJ_MTHD (dev, 0x3174, 0x0190, nv40_mpeg_mthd_dma); 302 + NVOBJ_MTHD (dev, 0x3174, 0x01a0, nv40_mpeg_mthd_dma); 303 + NVOBJ_MTHD (dev, 0x3174, 0x01b0, nv40_mpeg_mthd_dma); 304 + 305 + #if 0 306 + NVOBJ_ENGINE_ADD(dev, ME, &pme->base); 307 + NVOBJ_CLASS(dev, 0x4075, ME); 308 + #endif 309 + return 0; 310 + 311 + }
+37 -31
drivers/gpu/drm/nouveau/nv50_calc.c
··· 23 23 */ 24 24 25 25 #include "drmP.h" 26 - #include "drm_fixed.h" 27 26 #include "nouveau_drv.h" 28 27 #include "nouveau_hw.h" 29 28 ··· 46 47 } 47 48 48 49 int 49 - nv50_calc_pll2(struct drm_device *dev, struct pll_lims *pll, int clk, 50 - int *N, int *fN, int *M, int *P) 50 + nva3_calc_pll(struct drm_device *dev, struct pll_lims *pll, int clk, 51 + int *pN, int *pfN, int *pM, int *P) 51 52 { 52 - fixed20_12 fb_div, a, b; 53 - u32 refclk = pll->refclk / 10; 54 - u32 max_vco_freq = pll->vco1.maxfreq / 10; 55 - u32 max_vco_inputfreq = pll->vco1.max_inputfreq / 10; 56 - clk /= 10; 53 + u32 best_err = ~0, err; 54 + int M, lM, hM, N, fN; 57 55 58 - *P = max_vco_freq / clk; 56 + *P = pll->vco1.maxfreq / clk; 59 57 if (*P > pll->max_p) 60 58 *P = pll->max_p; 61 59 if (*P < pll->min_p) 62 60 *P = pll->min_p; 63 61 64 - /* *M = floor((refclk + max_vco_inputfreq) / max_vco_inputfreq); */ 65 - a.full = dfixed_const(refclk + max_vco_inputfreq); 66 - b.full = dfixed_const(max_vco_inputfreq); 67 - a.full = dfixed_div(a, b); 68 - a.full = dfixed_floor(a); 69 - *M = dfixed_trunc(a); 62 + lM = (pll->refclk + pll->vco1.max_inputfreq) / pll->vco1.max_inputfreq; 63 + lM = max(lM, (int)pll->vco1.min_m); 64 + hM = (pll->refclk + pll->vco1.min_inputfreq) / pll->vco1.min_inputfreq; 65 + hM = min(hM, (int)pll->vco1.max_m); 70 66 71 - /* fb_div = (vco * *M) / refclk; */ 72 - fb_div.full = dfixed_const(clk * *P); 73 - fb_div.full = dfixed_mul(fb_div, a); 74 - a.full = dfixed_const(refclk); 75 - fb_div.full = dfixed_div(fb_div, a); 67 + for (M = lM; M <= hM; M++) { 68 + u32 tmp = clk * *P * M; 69 + N = tmp / pll->refclk; 70 + fN = tmp % pll->refclk; 71 + if (!pfN && fN >= pll->refclk / 2) 72 + N++; 76 73 77 - /* *N = floor(fb_div); */ 78 - a.full = dfixed_floor(fb_div); 79 - *N = dfixed_trunc(fb_div); 74 + if (N < pll->vco1.min_n) 75 + continue; 76 + if (N > pll->vco1.max_n) 77 + break; 80 78 81 - /* *fN = (fmod(fb_div, 1.0) * 8192) - 4096; */ 82 - b.full = dfixed_const(8192); 83 - a.full = dfixed_mul(a, b); 84 - fb_div.full = dfixed_mul(fb_div, b); 85 - fb_div.full = fb_div.full - a.full; 86 - *fN = dfixed_trunc(fb_div) - 4096; 87 - *fN &= 0xffff; 79 + err = abs(clk - (pll->refclk * N / M / *P)); 80 + if (err < best_err) { 81 + best_err = err; 82 + *pN = N; 83 + *pM = M; 84 + } 88 85 89 - return clk; 86 + if (pfN) { 87 + *pfN = (((fN << 13) / pll->refclk) - 4096) & 0xffff; 88 + return clk; 89 + } 90 + } 91 + 92 + if (unlikely(best_err == ~0)) { 93 + NV_ERROR(dev, "unable to find matching pll values\n"); 94 + return -EINVAL; 95 + } 96 + 97 + return pll->refclk * *pN / *pM / *P; 90 98 }
+6 -7
drivers/gpu/drm/nouveau/nv50_crtc.c
··· 286 286 nv_wr32(dev, pll.reg + 8, reg2 | (P << 28) | (M2 << 16) | N2); 287 287 } else 288 288 if (dev_priv->chipset < NV_C0) { 289 - ret = nv50_calc_pll2(dev, &pll, pclk, &N1, &N2, &M1, &P); 289 + ret = nva3_calc_pll(dev, &pll, pclk, &N1, &N2, &M1, &P); 290 290 if (ret <= 0) 291 291 return 0; 292 292 ··· 298 298 nv_wr32(dev, pll.reg + 4, reg1 | (P << 16) | (M1 << 8) | N1); 299 299 nv_wr32(dev, pll.reg + 8, N2); 300 300 } else { 301 - ret = nv50_calc_pll2(dev, &pll, pclk, &N1, &N2, &M1, &P); 301 + ret = nva3_calc_pll(dev, &pll, pclk, &N1, &N2, &M1, &P); 302 302 if (ret <= 0) 303 303 return 0; 304 304 ··· 349 349 struct drm_gem_object *gem; 350 350 int ret = 0, i; 351 351 352 - if (width != 64 || height != 64) 353 - return -EINVAL; 354 - 355 352 if (!buffer_handle) { 356 353 nv_crtc->cursor.hide(nv_crtc, true); 357 354 return 0; 358 355 } 356 + 357 + if (width != 64 || height != 64) 358 + return -EINVAL; 359 359 360 360 gem = drm_gem_object_lookup(dev, file_priv, buffer_handle); 361 361 if (!gem) ··· 532 532 if (atomic) { 533 533 drm_fb = passed_fb; 534 534 fb = nouveau_framebuffer(passed_fb); 535 - } 536 - else { 535 + } else { 537 536 /* If not atomic, we can go ahead and pin, and unpin the 538 537 * old fb we were passed. 539 538 */
+15 -3
drivers/gpu/drm/nouveau/nv50_display.c
··· 517 517 if (bios->fp.if_is_24bit) 518 518 script |= 0x0200; 519 519 } else { 520 + /* determine number of lvds links */ 521 + if (nv_connector && nv_connector->edid && 522 + nv_connector->dcb->type == DCB_CONNECTOR_LVDS_SPWG) { 523 + /* http://www.spwg.org */ 524 + if (((u8 *)nv_connector->edid)[121] == 2) 525 + script |= 0x0100; 526 + } else 520 527 if (pxclk >= bios->fp.duallink_transition_clk) { 521 528 script |= 0x0100; 529 + } 530 + 531 + /* determine panel depth */ 532 + if (script & 0x0100) { 522 533 if (bios->fp.strapless_is_24bit & 2) 523 534 script |= 0x0200; 524 - } else 525 - if (bios->fp.strapless_is_24bit & 1) 526 - script |= 0x0200; 535 + } else { 536 + if (bios->fp.strapless_is_24bit & 1) 537 + script |= 0x0200; 538 + } 527 539 528 540 if (nv_connector && nv_connector->edid && 529 541 (nv_connector->edid->revision >= 4) &&
+244 -198
drivers/gpu/drm/nouveau/nv50_graph.c
··· 31 31 #include "nouveau_grctx.h" 32 32 #include "nouveau_dma.h" 33 33 #include "nouveau_vm.h" 34 + #include "nouveau_ramht.h" 34 35 #include "nv50_evo.h" 35 36 36 - static int nv50_graph_register(struct drm_device *); 37 - static void nv50_graph_isr(struct drm_device *); 37 + struct nv50_graph_engine { 38 + struct nouveau_exec_engine base; 39 + u32 ctxprog[512]; 40 + u32 ctxprog_size; 41 + u32 grctx_size; 42 + }; 43 + 44 + static void 45 + nv50_graph_fifo_access(struct drm_device *dev, bool enabled) 46 + { 47 + const uint32_t mask = 0x00010001; 48 + 49 + if (enabled) 50 + nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) | mask); 51 + else 52 + nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) & ~mask); 53 + } 54 + 55 + static struct nouveau_channel * 56 + nv50_graph_channel(struct drm_device *dev) 57 + { 58 + struct drm_nouveau_private *dev_priv = dev->dev_private; 59 + uint32_t inst; 60 + int i; 61 + 62 + /* Be sure we're not in the middle of a context switch or bad things 63 + * will happen, such as unloading the wrong pgraph context. 64 + */ 65 + if (!nv_wait(dev, 0x400300, 0x00000001, 0x00000000)) 66 + NV_ERROR(dev, "Ctxprog is still running\n"); 67 + 68 + inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR); 69 + if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED)) 70 + return NULL; 71 + inst = (inst & NV50_PGRAPH_CTXCTL_CUR_INSTANCE) << 12; 72 + 73 + for (i = 0; i < dev_priv->engine.fifo.channels; i++) { 74 + struct nouveau_channel *chan = dev_priv->channels.ptr[i]; 75 + 76 + if (chan && chan->ramin && chan->ramin->vinst == inst) 77 + return chan; 78 + } 79 + 80 + return NULL; 81 + } 82 + 83 + static int 84 + nv50_graph_do_load_context(struct drm_device *dev, uint32_t inst) 85 + { 86 + uint32_t fifo = nv_rd32(dev, 0x400500); 87 + 88 + nv_wr32(dev, 0x400500, fifo & ~1); 89 + nv_wr32(dev, 0x400784, inst); 90 + nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x40); 91 + nv_wr32(dev, 0x400320, nv_rd32(dev, 0x400320) | 0x11); 92 + nv_wr32(dev, 0x400040, 0xffffffff); 93 + (void)nv_rd32(dev, 0x400040); 94 + nv_wr32(dev, 0x400040, 0x00000000); 95 + nv_wr32(dev, 0x400304, nv_rd32(dev, 0x400304) | 1); 96 + 97 + if (nouveau_wait_for_idle(dev)) 98 + nv_wr32(dev, 0x40032c, inst | (1<<31)); 99 + nv_wr32(dev, 0x400500, fifo); 100 + 101 + return 0; 102 + } 103 + 104 + static int 105 + nv50_graph_unload_context(struct drm_device *dev) 106 + { 107 + uint32_t inst; 108 + 109 + inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR); 110 + if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED)) 111 + return 0; 112 + inst &= NV50_PGRAPH_CTXCTL_CUR_INSTANCE; 113 + 114 + nouveau_wait_for_idle(dev); 115 + nv_wr32(dev, 0x400784, inst); 116 + nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x20); 117 + nv_wr32(dev, 0x400304, nv_rd32(dev, 0x400304) | 0x01); 118 + nouveau_wait_for_idle(dev); 119 + 120 + nv_wr32(dev, NV50_PGRAPH_CTXCTL_CUR, inst); 121 + return 0; 122 + } 38 123 39 124 static void 40 125 nv50_graph_init_reset(struct drm_device *dev) ··· 137 52 { 138 53 NV_DEBUG(dev, "\n"); 139 54 140 - nouveau_irq_register(dev, 12, nv50_graph_isr); 141 55 nv_wr32(dev, NV03_PGRAPH_INTR, 0xffffffff); 142 56 nv_wr32(dev, 0x400138, 0xffffffff); 143 57 nv_wr32(dev, NV40_PGRAPH_INTR_EN, 0xffffffff); ··· 219 135 static int 220 136 nv50_graph_init_ctxctl(struct drm_device *dev) 221 137 { 222 - struct drm_nouveau_private *dev_priv = dev->dev_private; 223 - struct nouveau_grctx ctx = {}; 224 - uint32_t *cp; 138 + struct nv50_graph_engine *pgraph = nv_engine(dev, NVOBJ_ENGINE_GR); 225 139 int i; 226 140 227 141 NV_DEBUG(dev, "\n"); 228 142 229 - cp = kmalloc(512 * 4, GFP_KERNEL); 230 - if (!cp) { 231 - NV_ERROR(dev, "failed to allocate ctxprog\n"); 232 - dev_priv->engine.graph.accel_blocked = true; 233 - return 0; 234 - } 235 - 236 - ctx.dev = dev; 237 - ctx.mode = NOUVEAU_GRCTX_PROG; 238 - ctx.data = cp; 239 - ctx.ctxprog_max = 512; 240 - if (!nv50_grctx_init(&ctx)) { 241 - dev_priv->engine.graph.grctx_size = ctx.ctxvals_pos * 4; 242 - 243 - nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0); 244 - for (i = 0; i < ctx.ctxprog_len; i++) 245 - nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, cp[i]); 246 - } else { 247 - dev_priv->engine.graph.accel_blocked = true; 248 - } 249 - kfree(cp); 143 + nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0); 144 + for (i = 0; i < pgraph->ctxprog_size; i++) 145 + nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, pgraph->ctxprog[i]); 250 146 251 147 nv_wr32(dev, 0x40008c, 0x00000004); /* HW_CTX_SWITCH_ENABLED */ 252 148 nv_wr32(dev, 0x400320, 4); ··· 235 171 return 0; 236 172 } 237 173 238 - int 239 - nv50_graph_init(struct drm_device *dev) 174 + static int 175 + nv50_graph_init(struct drm_device *dev, int engine) 240 176 { 241 177 int ret; 242 178 ··· 250 186 if (ret) 251 187 return ret; 252 188 253 - ret = nv50_graph_register(dev); 254 - if (ret) 255 - return ret; 256 189 nv50_graph_init_intr(dev); 257 190 return 0; 258 191 } 259 192 260 - void 261 - nv50_graph_takedown(struct drm_device *dev) 193 + static int 194 + nv50_graph_fini(struct drm_device *dev, int engine) 262 195 { 263 196 NV_DEBUG(dev, "\n"); 197 + nv50_graph_unload_context(dev); 264 198 nv_wr32(dev, 0x40013c, 0x00000000); 265 - nouveau_irq_unregister(dev, 12); 199 + return 0; 266 200 } 267 201 268 - void 269 - nv50_graph_fifo_access(struct drm_device *dev, bool enabled) 270 - { 271 - const uint32_t mask = 0x00010001; 272 - 273 - if (enabled) 274 - nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) | mask); 275 - else 276 - nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) & ~mask); 277 - } 278 - 279 - struct nouveau_channel * 280 - nv50_graph_channel(struct drm_device *dev) 281 - { 282 - struct drm_nouveau_private *dev_priv = dev->dev_private; 283 - uint32_t inst; 284 - int i; 285 - 286 - /* Be sure we're not in the middle of a context switch or bad things 287 - * will happen, such as unloading the wrong pgraph context. 288 - */ 289 - if (!nv_wait(dev, 0x400300, 0x00000001, 0x00000000)) 290 - NV_ERROR(dev, "Ctxprog is still running\n"); 291 - 292 - inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR); 293 - if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED)) 294 - return NULL; 295 - inst = (inst & NV50_PGRAPH_CTXCTL_CUR_INSTANCE) << 12; 296 - 297 - for (i = 0; i < dev_priv->engine.fifo.channels; i++) { 298 - struct nouveau_channel *chan = dev_priv->channels.ptr[i]; 299 - 300 - if (chan && chan->ramin && chan->ramin->vinst == inst) 301 - return chan; 302 - } 303 - 304 - return NULL; 305 - } 306 - 307 - int 308 - nv50_graph_create_context(struct nouveau_channel *chan) 202 + static int 203 + nv50_graph_context_new(struct nouveau_channel *chan, int engine) 309 204 { 310 205 struct drm_device *dev = chan->dev; 311 206 struct drm_nouveau_private *dev_priv = dev->dev_private; 312 207 struct nouveau_gpuobj *ramin = chan->ramin; 313 - struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; 208 + struct nouveau_gpuobj *grctx = NULL; 209 + struct nv50_graph_engine *pgraph = nv_engine(dev, engine); 314 210 struct nouveau_grctx ctx = {}; 315 211 int hdr, ret; 316 212 317 213 NV_DEBUG(dev, "ch%d\n", chan->id); 318 214 319 - ret = nouveau_gpuobj_new(dev, chan, pgraph->grctx_size, 0, 215 + ret = nouveau_gpuobj_new(dev, NULL, pgraph->grctx_size, 0, 320 216 NVOBJ_FLAG_ZERO_ALLOC | 321 - NVOBJ_FLAG_ZERO_FREE, &chan->ramin_grctx); 217 + NVOBJ_FLAG_ZERO_FREE, &grctx); 322 218 if (ret) 323 219 return ret; 324 220 325 221 hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20; 326 222 nv_wo32(ramin, hdr + 0x00, 0x00190002); 327 - nv_wo32(ramin, hdr + 0x04, chan->ramin_grctx->vinst + 328 - pgraph->grctx_size - 1); 329 - nv_wo32(ramin, hdr + 0x08, chan->ramin_grctx->vinst); 223 + nv_wo32(ramin, hdr + 0x04, grctx->vinst + grctx->size - 1); 224 + nv_wo32(ramin, hdr + 0x08, grctx->vinst); 330 225 nv_wo32(ramin, hdr + 0x0c, 0); 331 226 nv_wo32(ramin, hdr + 0x10, 0); 332 227 nv_wo32(ramin, hdr + 0x14, 0x00010000); 333 228 334 229 ctx.dev = chan->dev; 335 230 ctx.mode = NOUVEAU_GRCTX_VALS; 336 - ctx.data = chan->ramin_grctx; 231 + ctx.data = grctx; 337 232 nv50_grctx_init(&ctx); 338 233 339 - nv_wo32(chan->ramin_grctx, 0x00000, chan->ramin->vinst >> 12); 234 + nv_wo32(grctx, 0x00000, chan->ramin->vinst >> 12); 340 235 341 236 dev_priv->engine.instmem.flush(dev); 342 - atomic_inc(&chan->vm->pgraph_refs); 237 + 238 + atomic_inc(&chan->vm->engref[NVOBJ_ENGINE_GR]); 239 + chan->engctx[NVOBJ_ENGINE_GR] = grctx; 343 240 return 0; 344 241 } 345 242 346 - void 347 - nv50_graph_destroy_context(struct nouveau_channel *chan) 243 + static void 244 + nv50_graph_context_del(struct nouveau_channel *chan, int engine) 348 245 { 246 + struct nouveau_gpuobj *grctx = chan->engctx[engine]; 349 247 struct drm_device *dev = chan->dev; 350 248 struct drm_nouveau_private *dev_priv = dev->dev_private; 351 - struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; 352 249 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; 353 250 int i, hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20; 354 251 unsigned long flags; ··· 321 296 322 297 spin_lock_irqsave(&dev_priv->context_switch_lock, flags); 323 298 pfifo->reassign(dev, false); 324 - pgraph->fifo_access(dev, false); 299 + nv50_graph_fifo_access(dev, false); 325 300 326 - if (pgraph->channel(dev) == chan) 327 - pgraph->unload_context(dev); 301 + if (nv50_graph_channel(dev) == chan) 302 + nv50_graph_unload_context(dev); 328 303 329 304 for (i = hdr; i < hdr + 24; i += 4) 330 305 nv_wo32(chan->ramin, i, 0); 331 306 dev_priv->engine.instmem.flush(dev); 332 307 333 - pgraph->fifo_access(dev, true); 308 + nv50_graph_fifo_access(dev, true); 334 309 pfifo->reassign(dev, true); 335 310 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); 336 311 337 - nouveau_gpuobj_ref(NULL, &chan->ramin_grctx); 312 + nouveau_gpuobj_ref(NULL, &grctx); 338 313 339 - atomic_dec(&chan->vm->pgraph_refs); 314 + atomic_dec(&chan->vm->engref[engine]); 315 + chan->engctx[engine] = NULL; 340 316 } 341 317 342 318 static int 343 - nv50_graph_do_load_context(struct drm_device *dev, uint32_t inst) 319 + nv50_graph_object_new(struct nouveau_channel *chan, int engine, 320 + u32 handle, u16 class) 344 321 { 345 - uint32_t fifo = nv_rd32(dev, 0x400500); 322 + struct drm_device *dev = chan->dev; 323 + struct drm_nouveau_private *dev_priv = dev->dev_private; 324 + struct nouveau_gpuobj *obj = NULL; 325 + int ret; 346 326 347 - nv_wr32(dev, 0x400500, fifo & ~1); 348 - nv_wr32(dev, 0x400784, inst); 349 - nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x40); 350 - nv_wr32(dev, 0x400320, nv_rd32(dev, 0x400320) | 0x11); 351 - nv_wr32(dev, 0x400040, 0xffffffff); 352 - (void)nv_rd32(dev, 0x400040); 353 - nv_wr32(dev, 0x400040, 0x00000000); 354 - nv_wr32(dev, 0x400304, nv_rd32(dev, 0x400304) | 1); 327 + ret = nouveau_gpuobj_new(dev, chan, 16, 16, NVOBJ_FLAG_ZERO_FREE, &obj); 328 + if (ret) 329 + return ret; 330 + obj->engine = 1; 331 + obj->class = class; 355 332 356 - if (nouveau_wait_for_idle(dev)) 357 - nv_wr32(dev, 0x40032c, inst | (1<<31)); 358 - nv_wr32(dev, 0x400500, fifo); 333 + nv_wo32(obj, 0x00, class); 334 + nv_wo32(obj, 0x04, 0x00000000); 335 + nv_wo32(obj, 0x08, 0x00000000); 336 + nv_wo32(obj, 0x0c, 0x00000000); 337 + dev_priv->engine.instmem.flush(dev); 359 338 360 - return 0; 361 - } 362 - 363 - int 364 - nv50_graph_load_context(struct nouveau_channel *chan) 365 - { 366 - uint32_t inst = chan->ramin->vinst >> 12; 367 - 368 - NV_DEBUG(chan->dev, "ch%d\n", chan->id); 369 - return nv50_graph_do_load_context(chan->dev, inst); 370 - } 371 - 372 - int 373 - nv50_graph_unload_context(struct drm_device *dev) 374 - { 375 - uint32_t inst; 376 - 377 - inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR); 378 - if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED)) 379 - return 0; 380 - inst &= NV50_PGRAPH_CTXCTL_CUR_INSTANCE; 381 - 382 - nouveau_wait_for_idle(dev); 383 - nv_wr32(dev, 0x400784, inst); 384 - nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x20); 385 - nv_wr32(dev, 0x400304, nv_rd32(dev, 0x400304) | 0x01); 386 - nouveau_wait_for_idle(dev); 387 - 388 - nv_wr32(dev, NV50_PGRAPH_CTXCTL_CUR, inst); 389 - return 0; 339 + ret = nouveau_ramht_insert(chan, handle, obj); 340 + nouveau_gpuobj_ref(NULL, &obj); 341 + return ret; 390 342 } 391 343 392 344 static void ··· 444 442 return 0; 445 443 } 446 444 447 - static int 448 - nv50_graph_register(struct drm_device *dev) 449 - { 450 - struct drm_nouveau_private *dev_priv = dev->dev_private; 451 445 452 - if (dev_priv->engine.graph.registered) 453 - return 0; 454 - 455 - NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */ 456 - NVOBJ_MTHD (dev, 0x506e, 0x018c, nv50_graph_nvsw_dma_vblsem); 457 - NVOBJ_MTHD (dev, 0x506e, 0x0400, nv50_graph_nvsw_vblsem_offset); 458 - NVOBJ_MTHD (dev, 0x506e, 0x0404, nv50_graph_nvsw_vblsem_release_val); 459 - NVOBJ_MTHD (dev, 0x506e, 0x0408, nv50_graph_nvsw_vblsem_release); 460 - NVOBJ_MTHD (dev, 0x506e, 0x0500, nv50_graph_nvsw_mthd_page_flip); 461 - 462 - NVOBJ_CLASS(dev, 0x0030, GR); /* null */ 463 - NVOBJ_CLASS(dev, 0x5039, GR); /* m2mf */ 464 - NVOBJ_CLASS(dev, 0x502d, GR); /* 2d */ 465 - 466 - /* tesla */ 467 - if (dev_priv->chipset == 0x50) 468 - NVOBJ_CLASS(dev, 0x5097, GR); /* tesla (nv50) */ 469 - else 470 - if (dev_priv->chipset < 0xa0) 471 - NVOBJ_CLASS(dev, 0x8297, GR); /* tesla (nv8x/nv9x) */ 472 - else { 473 - switch (dev_priv->chipset) { 474 - case 0xa0: 475 - case 0xaa: 476 - case 0xac: 477 - NVOBJ_CLASS(dev, 0x8397, GR); 478 - break; 479 - case 0xa3: 480 - case 0xa5: 481 - case 0xa8: 482 - NVOBJ_CLASS(dev, 0x8597, GR); 483 - break; 484 - case 0xaf: 485 - NVOBJ_CLASS(dev, 0x8697, GR); 486 - break; 487 - } 488 - } 489 - 490 - /* compute */ 491 - NVOBJ_CLASS(dev, 0x50c0, GR); 492 - if (dev_priv->chipset > 0xa0 && 493 - dev_priv->chipset != 0xaa && 494 - dev_priv->chipset != 0xac) 495 - NVOBJ_CLASS(dev, 0x85c0, GR); 496 - 497 - dev_priv->engine.graph.registered = true; 498 - return 0; 499 - } 500 - 501 - void 502 - nv50_graph_tlb_flush(struct drm_device *dev) 446 + static void 447 + nv50_graph_tlb_flush(struct drm_device *dev, int engine) 503 448 { 504 449 nv50_vm_flush_engine(dev, 0); 505 450 } 506 451 507 - void 508 - nv84_graph_tlb_flush(struct drm_device *dev) 452 + static void 453 + nv84_graph_tlb_flush(struct drm_device *dev, int engine) 509 454 { 510 455 struct drm_nouveau_private *dev_priv = dev->dev_private; 511 456 struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer; ··· 497 548 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); 498 549 } 499 550 500 - static struct nouveau_enum nv50_mp_exec_error_names[] = 501 - { 551 + static struct nouveau_enum nv50_mp_exec_error_names[] = { 502 552 { 3, "STACK_UNDERFLOW", NULL }, 503 553 { 4, "QUADON_ACTIVE", NULL }, 504 554 { 8, "TIMEOUT", NULL }, ··· 611 663 nv_rd32(dev, addr + 0x20); 612 664 pc = nv_rd32(dev, addr + 0x24); 613 665 oplow = nv_rd32(dev, addr + 0x70); 614 - ophigh= nv_rd32(dev, addr + 0x74); 666 + ophigh = nv_rd32(dev, addr + 0x74); 615 667 NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - " 616 668 "TP %d MP %d: ", tpid, i); 617 669 nouveau_enum_print(nv50_mp_exec_error_names, status); ··· 939 991 return 1; 940 992 } 941 993 942 - static int 994 + int 943 995 nv50_graph_isr_chid(struct drm_device *dev, u64 inst) 944 996 { 945 997 struct drm_nouveau_private *dev_priv = dev->dev_private; ··· 1020 1072 1021 1073 if (nv_rd32(dev, 0x400824) & (1 << 31)) 1022 1074 nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31)); 1075 + } 1076 + 1077 + static void 1078 + nv50_graph_destroy(struct drm_device *dev, int engine) 1079 + { 1080 + struct nv50_graph_engine *pgraph = nv_engine(dev, engine); 1081 + 1082 + NVOBJ_ENGINE_DEL(dev, GR); 1083 + 1084 + nouveau_irq_unregister(dev, 12); 1085 + kfree(pgraph); 1086 + } 1087 + 1088 + int 1089 + nv50_graph_create(struct drm_device *dev) 1090 + { 1091 + struct drm_nouveau_private *dev_priv = dev->dev_private; 1092 + struct nv50_graph_engine *pgraph; 1093 + struct nouveau_grctx ctx = {}; 1094 + int ret; 1095 + 1096 + pgraph = kzalloc(sizeof(*pgraph),GFP_KERNEL); 1097 + if (!pgraph) 1098 + return -ENOMEM; 1099 + 1100 + ctx.dev = dev; 1101 + ctx.mode = NOUVEAU_GRCTX_PROG; 1102 + ctx.data = pgraph->ctxprog; 1103 + ctx.ctxprog_max = ARRAY_SIZE(pgraph->ctxprog); 1104 + 1105 + ret = nv50_grctx_init(&ctx); 1106 + if (ret) { 1107 + NV_ERROR(dev, "PGRAPH: ctxprog build failed\n"); 1108 + kfree(pgraph); 1109 + return 0; 1110 + } 1111 + 1112 + pgraph->grctx_size = ctx.ctxvals_pos * 4; 1113 + pgraph->ctxprog_size = ctx.ctxprog_len; 1114 + 1115 + pgraph->base.destroy = nv50_graph_destroy; 1116 + pgraph->base.init = nv50_graph_init; 1117 + pgraph->base.fini = nv50_graph_fini; 1118 + pgraph->base.context_new = nv50_graph_context_new; 1119 + pgraph->base.context_del = nv50_graph_context_del; 1120 + pgraph->base.object_new = nv50_graph_object_new; 1121 + if (dev_priv->chipset == 0x50 || dev_priv->chipset == 0xac) 1122 + pgraph->base.tlb_flush = nv50_graph_tlb_flush; 1123 + else 1124 + pgraph->base.tlb_flush = nv84_graph_tlb_flush; 1125 + 1126 + nouveau_irq_register(dev, 12, nv50_graph_isr); 1127 + 1128 + /* NVSW really doesn't live here... */ 1129 + NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */ 1130 + NVOBJ_MTHD (dev, 0x506e, 0x018c, nv50_graph_nvsw_dma_vblsem); 1131 + NVOBJ_MTHD (dev, 0x506e, 0x0400, nv50_graph_nvsw_vblsem_offset); 1132 + NVOBJ_MTHD (dev, 0x506e, 0x0404, nv50_graph_nvsw_vblsem_release_val); 1133 + NVOBJ_MTHD (dev, 0x506e, 0x0408, nv50_graph_nvsw_vblsem_release); 1134 + NVOBJ_MTHD (dev, 0x506e, 0x0500, nv50_graph_nvsw_mthd_page_flip); 1135 + 1136 + NVOBJ_ENGINE_ADD(dev, GR, &pgraph->base); 1137 + NVOBJ_CLASS(dev, 0x0030, GR); /* null */ 1138 + NVOBJ_CLASS(dev, 0x5039, GR); /* m2mf */ 1139 + NVOBJ_CLASS(dev, 0x502d, GR); /* 2d */ 1140 + 1141 + /* tesla */ 1142 + if (dev_priv->chipset == 0x50) 1143 + NVOBJ_CLASS(dev, 0x5097, GR); /* tesla (nv50) */ 1144 + else 1145 + if (dev_priv->chipset < 0xa0) 1146 + NVOBJ_CLASS(dev, 0x8297, GR); /* tesla (nv8x/nv9x) */ 1147 + else { 1148 + switch (dev_priv->chipset) { 1149 + case 0xa0: 1150 + case 0xaa: 1151 + case 0xac: 1152 + NVOBJ_CLASS(dev, 0x8397, GR); 1153 + break; 1154 + case 0xa3: 1155 + case 0xa5: 1156 + case 0xa8: 1157 + NVOBJ_CLASS(dev, 0x8597, GR); 1158 + break; 1159 + case 0xaf: 1160 + NVOBJ_CLASS(dev, 0x8697, GR); 1161 + break; 1162 + } 1163 + } 1164 + 1165 + /* compute */ 1166 + NVOBJ_CLASS(dev, 0x50c0, GR); 1167 + if (dev_priv->chipset > 0xa0 && 1168 + dev_priv->chipset != 0xaa && 1169 + dev_priv->chipset != 0xac) 1170 + NVOBJ_CLASS(dev, 0x85c0, GR); 1171 + 1172 + return 0; 1023 1173 }
+4 -6
drivers/gpu/drm/nouveau/nv50_grctx.c
··· 747 747 gr_def(ctx, offset + 0x64, 0x0000001f); 748 748 gr_def(ctx, offset + 0x68, 0x0000000f); 749 749 gr_def(ctx, offset + 0x6c, 0x0000000f); 750 - } else if(dev_priv->chipset < 0xa0) { 750 + } else if (dev_priv->chipset < 0xa0) { 751 751 cp_ctx(ctx, offset + 0x50, 1); 752 752 cp_ctx(ctx, offset + 0x70, 1); 753 753 } else { ··· 924 924 dd_emit(ctx, 1, 0); /* 0000007f MULTISAMPLE_SAMPLES_LOG2 */ 925 925 } else { 926 926 dd_emit(ctx, 1, 0); /* 0000000f MULTISAMPLE_SAMPLES_LOG2 */ 927 - } 927 + } 928 928 dd_emit(ctx, 1, 0xc); /* 000000ff SEMANTIC_COLOR.BFC0_ID */ 929 929 if (dev_priv->chipset != 0x50) 930 930 dd_emit(ctx, 1, 0); /* 00000001 SEMANTIC_COLOR.CLMP_EN */ ··· 1803 1803 xf_emit(ctx, 1, 0); /* 1ff */ 1804 1804 xf_emit(ctx, 8, 0); /* 0? */ 1805 1805 xf_emit(ctx, 9, 0); /* ffffffff, 7ff */ 1806 - } 1807 - else 1808 - { 1806 + } else { 1809 1807 xf_emit(ctx, 0xc, 0); /* RO */ 1810 1808 /* SEEK */ 1811 1809 xf_emit(ctx, 0xe10, 0); /* 190 * 9: 8*ffffffff, 7ff */ ··· 2834 2836 xf_emit(ctx, 1, 1); /* 00000001 DST_LINEAR */ 2835 2837 if (IS_NVA3F(dev_priv->chipset)) 2836 2838 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ 2837 - if(dev_priv->chipset == 0x50) 2839 + if (dev_priv->chipset == 0x50) 2838 2840 xf_emit(ctx, 1, 0); /* ff */ 2839 2841 else 2840 2842 xf_emit(ctx, 3, 0); /* 1, 7, 3ff */
+256
drivers/gpu/drm/nouveau/nv50_mpeg.c
··· 1 + /* 2 + * Copyright 2011 Red Hat Inc. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + * OTHER DEALINGS IN THE SOFTWARE. 21 + * 22 + * Authors: Ben Skeggs 23 + */ 24 + 25 + #include "drmP.h" 26 + #include "nouveau_drv.h" 27 + #include "nouveau_ramht.h" 28 + 29 + struct nv50_mpeg_engine { 30 + struct nouveau_exec_engine base; 31 + }; 32 + 33 + static inline u32 34 + CTX_PTR(struct drm_device *dev, u32 offset) 35 + { 36 + struct drm_nouveau_private *dev_priv = dev->dev_private; 37 + 38 + if (dev_priv->chipset == 0x50) 39 + offset += 0x0260; 40 + else 41 + offset += 0x0060; 42 + 43 + return offset; 44 + } 45 + 46 + static int 47 + nv50_mpeg_context_new(struct nouveau_channel *chan, int engine) 48 + { 49 + struct drm_device *dev = chan->dev; 50 + struct drm_nouveau_private *dev_priv = dev->dev_private; 51 + struct nouveau_gpuobj *ramin = chan->ramin; 52 + struct nouveau_gpuobj *ctx = NULL; 53 + int ret; 54 + 55 + NV_DEBUG(dev, "ch%d\n", chan->id); 56 + 57 + ret = nouveau_gpuobj_new(dev, chan, 128 * 4, 0, NVOBJ_FLAG_ZERO_ALLOC | 58 + NVOBJ_FLAG_ZERO_FREE, &ctx); 59 + if (ret) 60 + return ret; 61 + 62 + nv_wo32(ramin, CTX_PTR(dev, 0x00), 0x80190002); 63 + nv_wo32(ramin, CTX_PTR(dev, 0x04), ctx->vinst + ctx->size - 1); 64 + nv_wo32(ramin, CTX_PTR(dev, 0x08), ctx->vinst); 65 + nv_wo32(ramin, CTX_PTR(dev, 0x0c), 0); 66 + nv_wo32(ramin, CTX_PTR(dev, 0x10), 0); 67 + nv_wo32(ramin, CTX_PTR(dev, 0x14), 0x00010000); 68 + 69 + nv_wo32(ctx, 0x70, 0x00801ec1); 70 + nv_wo32(ctx, 0x7c, 0x0000037c); 71 + dev_priv->engine.instmem.flush(dev); 72 + 73 + chan->engctx[engine] = ctx; 74 + return 0; 75 + } 76 + 77 + static void 78 + nv50_mpeg_context_del(struct nouveau_channel *chan, int engine) 79 + { 80 + struct drm_nouveau_private *dev_priv = chan->dev->dev_private; 81 + struct nouveau_gpuobj *ctx = chan->engctx[engine]; 82 + struct drm_device *dev = chan->dev; 83 + unsigned long flags; 84 + u32 inst, i; 85 + 86 + if (!chan->ramin) 87 + return; 88 + 89 + inst = chan->ramin->vinst >> 12; 90 + inst |= 0x80000000; 91 + 92 + spin_lock_irqsave(&dev_priv->context_switch_lock, flags); 93 + nv_mask(dev, 0x00b32c, 0x00000001, 0x00000000); 94 + if (nv_rd32(dev, 0x00b318) == inst) 95 + nv_mask(dev, 0x00b318, 0x80000000, 0x00000000); 96 + nv_mask(dev, 0x00b32c, 0x00000001, 0x00000001); 97 + spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); 98 + 99 + for (i = 0x00; i <= 0x14; i += 4) 100 + nv_wo32(chan->ramin, CTX_PTR(dev, i), 0x00000000); 101 + nouveau_gpuobj_ref(NULL, &ctx); 102 + chan->engctx[engine] = NULL; 103 + } 104 + 105 + static int 106 + nv50_mpeg_object_new(struct nouveau_channel *chan, int engine, 107 + u32 handle, u16 class) 108 + { 109 + struct drm_device *dev = chan->dev; 110 + struct drm_nouveau_private *dev_priv = dev->dev_private; 111 + struct nouveau_gpuobj *obj = NULL; 112 + int ret; 113 + 114 + ret = nouveau_gpuobj_new(dev, chan, 16, 16, NVOBJ_FLAG_ZERO_FREE, &obj); 115 + if (ret) 116 + return ret; 117 + obj->engine = 2; 118 + obj->class = class; 119 + 120 + nv_wo32(obj, 0x00, class); 121 + nv_wo32(obj, 0x04, 0x00000000); 122 + nv_wo32(obj, 0x08, 0x00000000); 123 + nv_wo32(obj, 0x0c, 0x00000000); 124 + dev_priv->engine.instmem.flush(dev); 125 + 126 + ret = nouveau_ramht_insert(chan, handle, obj); 127 + nouveau_gpuobj_ref(NULL, &obj); 128 + return ret; 129 + } 130 + 131 + static void 132 + nv50_mpeg_tlb_flush(struct drm_device *dev, int engine) 133 + { 134 + nv50_vm_flush_engine(dev, 0x08); 135 + } 136 + 137 + static int 138 + nv50_mpeg_init(struct drm_device *dev, int engine) 139 + { 140 + nv_wr32(dev, 0x00b32c, 0x00000000); 141 + nv_wr32(dev, 0x00b314, 0x00000100); 142 + nv_wr32(dev, 0x00b0e0, 0x0000001a); 143 + 144 + nv_wr32(dev, 0x00b220, 0x00000044); 145 + nv_wr32(dev, 0x00b300, 0x00801ec1); 146 + nv_wr32(dev, 0x00b390, 0x00000000); 147 + nv_wr32(dev, 0x00b394, 0x00000000); 148 + nv_wr32(dev, 0x00b398, 0x00000000); 149 + nv_mask(dev, 0x00b32c, 0x00000001, 0x00000001); 150 + 151 + nv_wr32(dev, 0x00b100, 0xffffffff); 152 + nv_wr32(dev, 0x00b140, 0xffffffff); 153 + 154 + if (!nv_wait(dev, 0x00b200, 0x00000001, 0x00000000)) { 155 + NV_ERROR(dev, "PMPEG init: 0x%08x\n", nv_rd32(dev, 0x00b200)); 156 + return -EBUSY; 157 + } 158 + 159 + return 0; 160 + } 161 + 162 + static int 163 + nv50_mpeg_fini(struct drm_device *dev, int engine) 164 + { 165 + /*XXX: context save for s/r */ 166 + nv_mask(dev, 0x00b32c, 0x00000001, 0x00000000); 167 + nv_wr32(dev, 0x00b140, 0x00000000); 168 + return 0; 169 + } 170 + 171 + static void 172 + nv50_mpeg_isr(struct drm_device *dev) 173 + { 174 + u32 stat = nv_rd32(dev, 0x00b100); 175 + u32 type = nv_rd32(dev, 0x00b230); 176 + u32 mthd = nv_rd32(dev, 0x00b234); 177 + u32 data = nv_rd32(dev, 0x00b238); 178 + u32 show = stat; 179 + 180 + if (stat & 0x01000000) { 181 + /* happens on initial binding of the object */ 182 + if (type == 0x00000020 && mthd == 0x0000) { 183 + nv_wr32(dev, 0x00b308, 0x00000100); 184 + show &= ~0x01000000; 185 + } 186 + } 187 + 188 + if (show && nouveau_ratelimit()) { 189 + NV_INFO(dev, "PMPEG - 0x%08x 0x%08x 0x%08x 0x%08x\n", 190 + stat, type, mthd, data); 191 + } 192 + 193 + nv_wr32(dev, 0x00b100, stat); 194 + nv_wr32(dev, 0x00b230, 0x00000001); 195 + nv50_fb_vm_trap(dev, 1); 196 + } 197 + 198 + static void 199 + nv50_vpe_isr(struct drm_device *dev) 200 + { 201 + if (nv_rd32(dev, 0x00b100)) 202 + nv50_mpeg_isr(dev); 203 + 204 + if (nv_rd32(dev, 0x00b800)) { 205 + u32 stat = nv_rd32(dev, 0x00b800); 206 + NV_INFO(dev, "PMSRCH: 0x%08x\n", stat); 207 + nv_wr32(dev, 0xb800, stat); 208 + } 209 + } 210 + 211 + static void 212 + nv50_mpeg_destroy(struct drm_device *dev, int engine) 213 + { 214 + struct nv50_mpeg_engine *pmpeg = nv_engine(dev, engine); 215 + 216 + nouveau_irq_unregister(dev, 0); 217 + 218 + NVOBJ_ENGINE_DEL(dev, MPEG); 219 + kfree(pmpeg); 220 + } 221 + 222 + int 223 + nv50_mpeg_create(struct drm_device *dev) 224 + { 225 + struct drm_nouveau_private *dev_priv = dev->dev_private; 226 + struct nv50_mpeg_engine *pmpeg; 227 + 228 + pmpeg = kzalloc(sizeof(*pmpeg), GFP_KERNEL); 229 + if (!pmpeg) 230 + return -ENOMEM; 231 + 232 + pmpeg->base.destroy = nv50_mpeg_destroy; 233 + pmpeg->base.init = nv50_mpeg_init; 234 + pmpeg->base.fini = nv50_mpeg_fini; 235 + pmpeg->base.context_new = nv50_mpeg_context_new; 236 + pmpeg->base.context_del = nv50_mpeg_context_del; 237 + pmpeg->base.object_new = nv50_mpeg_object_new; 238 + pmpeg->base.tlb_flush = nv50_mpeg_tlb_flush; 239 + 240 + if (dev_priv->chipset == 0x50) { 241 + nouveau_irq_register(dev, 0, nv50_vpe_isr); 242 + NVOBJ_ENGINE_ADD(dev, MPEG, &pmpeg->base); 243 + NVOBJ_CLASS(dev, 0x3174, MPEG); 244 + #if 0 245 + NVOBJ_ENGINE_ADD(dev, ME, &pme->base); 246 + NVOBJ_CLASS(dev, 0x4075, ME); 247 + #endif 248 + } else { 249 + nouveau_irq_register(dev, 0, nv50_mpeg_isr); 250 + NVOBJ_ENGINE_ADD(dev, MPEG, &pmpeg->base); 251 + NVOBJ_CLASS(dev, 0x8274, MPEG); 252 + } 253 + 254 + return 0; 255 + 256 + }
+15
drivers/gpu/drm/nouveau/nv50_pm.c
··· 47 47 48 48 reg0 = nv_rd32(dev, pll.reg + 0); 49 49 reg1 = nv_rd32(dev, pll.reg + 4); 50 + 51 + if ((reg0 & 0x80000000) == 0) { 52 + if (id == PLL_SHADER) { 53 + NV_DEBUG(dev, "Shader PLL is disabled. " 54 + "Shader clock is twice the core\n"); 55 + ret = nv50_pm_clock_get(dev, PLL_CORE); 56 + if (ret > 0) 57 + return ret << 1; 58 + } else if (id == PLL_MEMORY) { 59 + NV_DEBUG(dev, "Memory PLL is disabled. " 60 + "Memory clock is equal to the ref_clk\n"); 61 + return pll.refclk; 62 + } 63 + } 64 + 50 65 P = (reg0 & 0x00070000) >> 16; 51 66 N = (reg1 & 0x0000ff00) >> 8; 52 67 M = (reg1 & 0x000000ff);
+5 -7
drivers/gpu/drm/nouveau/nv50_vm.c
··· 151 151 struct drm_nouveau_private *dev_priv = vm->dev->dev_private; 152 152 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem; 153 153 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; 154 - struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; 155 - struct nouveau_crypt_engine *pcrypt = &dev_priv->engine.crypt; 154 + int i; 156 155 157 156 pinstmem->flush(vm->dev); 158 157 ··· 162 163 } 163 164 164 165 pfifo->tlb_flush(vm->dev); 165 - 166 - if (atomic_read(&vm->pgraph_refs)) 167 - pgraph->tlb_flush(vm->dev); 168 - if (atomic_read(&vm->pcrypt_refs)) 169 - pcrypt->tlb_flush(vm->dev); 166 + for (i = 0; i < NVOBJ_ENGINE_NR; i++) { 167 + if (atomic_read(&vm->engref[i])) 168 + dev_priv->eng[i]->tlb_flush(vm->dev, i); 169 + } 170 170 } 171 171 172 172 void
+101 -48
drivers/gpu/drm/nouveau/nv84_crypt.c
··· 26 26 #include "nouveau_drv.h" 27 27 #include "nouveau_util.h" 28 28 #include "nouveau_vm.h" 29 + #include "nouveau_ramht.h" 29 30 30 - static void nv84_crypt_isr(struct drm_device *); 31 + struct nv84_crypt_engine { 32 + struct nouveau_exec_engine base; 33 + }; 31 34 32 - int 33 - nv84_crypt_create_context(struct nouveau_channel *chan) 35 + static int 36 + nv84_crypt_context_new(struct nouveau_channel *chan, int engine) 34 37 { 35 38 struct drm_device *dev = chan->dev; 36 39 struct drm_nouveau_private *dev_priv = dev->dev_private; 37 40 struct nouveau_gpuobj *ramin = chan->ramin; 41 + struct nouveau_gpuobj *ctx; 38 42 int ret; 39 43 40 44 NV_DEBUG(dev, "ch%d\n", chan->id); 41 45 42 - ret = nouveau_gpuobj_new(dev, chan, 256, 0, 43 - NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE, 44 - &chan->crypt_ctx); 46 + ret = nouveau_gpuobj_new(dev, chan, 256, 0, NVOBJ_FLAG_ZERO_ALLOC | 47 + NVOBJ_FLAG_ZERO_FREE, &ctx); 45 48 if (ret) 46 49 return ret; 47 50 48 51 nv_wo32(ramin, 0xa0, 0x00190000); 49 - nv_wo32(ramin, 0xa4, chan->crypt_ctx->vinst + 0xff); 50 - nv_wo32(ramin, 0xa8, chan->crypt_ctx->vinst); 52 + nv_wo32(ramin, 0xa4, ctx->vinst + ctx->size - 1); 53 + nv_wo32(ramin, 0xa8, ctx->vinst); 51 54 nv_wo32(ramin, 0xac, 0); 52 55 nv_wo32(ramin, 0xb0, 0); 53 56 nv_wo32(ramin, 0xb4, 0); 54 - 55 57 dev_priv->engine.instmem.flush(dev); 56 - atomic_inc(&chan->vm->pcrypt_refs); 58 + 59 + atomic_inc(&chan->vm->engref[engine]); 60 + chan->engctx[engine] = ctx; 57 61 return 0; 58 62 } 59 63 60 - void 61 - nv84_crypt_destroy_context(struct nouveau_channel *chan) 64 + static void 65 + nv84_crypt_context_del(struct nouveau_channel *chan, int engine) 62 66 { 67 + struct nouveau_gpuobj *ctx = chan->engctx[engine]; 63 68 struct drm_device *dev = chan->dev; 64 69 u32 inst; 65 - 66 - if (!chan->crypt_ctx) 67 - return; 68 70 69 71 inst = (chan->ramin->vinst >> 12); 70 72 inst |= 0x80000000; ··· 82 80 nv_mask(dev, 0x10218c, 0x80000000, 0x00000000); 83 81 nv_wr32(dev, 0x10200c, 0x00000010); 84 82 85 - nouveau_gpuobj_ref(NULL, &chan->crypt_ctx); 86 - atomic_dec(&chan->vm->pcrypt_refs); 83 + nouveau_gpuobj_ref(NULL, &ctx); 84 + 85 + atomic_dec(&chan->vm->engref[engine]); 86 + chan->engctx[engine] = NULL; 87 87 } 88 88 89 - void 90 - nv84_crypt_tlb_flush(struct drm_device *dev) 89 + static int 90 + nv84_crypt_object_new(struct nouveau_channel *chan, int engine, 91 + u32 handle, u16 class) 92 + { 93 + struct drm_device *dev = chan->dev; 94 + struct drm_nouveau_private *dev_priv = dev->dev_private; 95 + struct nouveau_gpuobj *obj = NULL; 96 + int ret; 97 + 98 + ret = nouveau_gpuobj_new(dev, chan, 16, 16, NVOBJ_FLAG_ZERO_FREE, &obj); 99 + if (ret) 100 + return ret; 101 + obj->engine = 5; 102 + obj->class = class; 103 + 104 + nv_wo32(obj, 0x00, class); 105 + dev_priv->engine.instmem.flush(dev); 106 + 107 + ret = nouveau_ramht_insert(chan, handle, obj); 108 + nouveau_gpuobj_ref(NULL, &obj); 109 + return ret; 110 + } 111 + 112 + static void 113 + nv84_crypt_tlb_flush(struct drm_device *dev, int engine) 91 114 { 92 115 nv50_vm_flush_engine(dev, 0x0a); 93 - } 94 - 95 - int 96 - nv84_crypt_init(struct drm_device *dev) 97 - { 98 - struct drm_nouveau_private *dev_priv = dev->dev_private; 99 - struct nouveau_crypt_engine *pcrypt = &dev_priv->engine.crypt; 100 - 101 - if (!pcrypt->registered) { 102 - NVOBJ_CLASS(dev, 0x74c1, CRYPT); 103 - pcrypt->registered = true; 104 - } 105 - 106 - nv_mask(dev, 0x000200, 0x00004000, 0x00000000); 107 - nv_mask(dev, 0x000200, 0x00004000, 0x00004000); 108 - 109 - nouveau_irq_register(dev, 14, nv84_crypt_isr); 110 - nv_wr32(dev, 0x102130, 0xffffffff); 111 - nv_wr32(dev, 0x102140, 0xffffffbf); 112 - 113 - nv_wr32(dev, 0x10200c, 0x00000010); 114 - return 0; 115 - } 116 - 117 - void 118 - nv84_crypt_fini(struct drm_device *dev) 119 - { 120 - nv_wr32(dev, 0x102140, 0x00000000); 121 - nouveau_irq_unregister(dev, 14); 122 116 } 123 117 124 118 static void ··· 135 137 nv_wr32(dev, 0x10200c, 0x10); 136 138 137 139 nv50_fb_vm_trap(dev, show); 140 + } 141 + 142 + static int 143 + nv84_crypt_fini(struct drm_device *dev, int engine) 144 + { 145 + nv_wr32(dev, 0x102140, 0x00000000); 146 + return 0; 147 + } 148 + 149 + static int 150 + nv84_crypt_init(struct drm_device *dev, int engine) 151 + { 152 + nv_mask(dev, 0x000200, 0x00004000, 0x00000000); 153 + nv_mask(dev, 0x000200, 0x00004000, 0x00004000); 154 + 155 + nv_wr32(dev, 0x102130, 0xffffffff); 156 + nv_wr32(dev, 0x102140, 0xffffffbf); 157 + 158 + nv_wr32(dev, 0x10200c, 0x00000010); 159 + return 0; 160 + } 161 + 162 + static void 163 + nv84_crypt_destroy(struct drm_device *dev, int engine) 164 + { 165 + struct nv84_crypt_engine *pcrypt = nv_engine(dev, engine); 166 + 167 + NVOBJ_ENGINE_DEL(dev, CRYPT); 168 + 169 + nouveau_irq_unregister(dev, 14); 170 + kfree(pcrypt); 171 + } 172 + 173 + int 174 + nv84_crypt_create(struct drm_device *dev) 175 + { 176 + struct nv84_crypt_engine *pcrypt; 177 + 178 + pcrypt = kzalloc(sizeof(*pcrypt), GFP_KERNEL); 179 + if (!pcrypt) 180 + return -ENOMEM; 181 + 182 + pcrypt->base.destroy = nv84_crypt_destroy; 183 + pcrypt->base.init = nv84_crypt_init; 184 + pcrypt->base.fini = nv84_crypt_fini; 185 + pcrypt->base.context_new = nv84_crypt_context_new; 186 + pcrypt->base.context_del = nv84_crypt_context_del; 187 + pcrypt->base.object_new = nv84_crypt_object_new; 188 + pcrypt->base.tlb_flush = nv84_crypt_tlb_flush; 189 + 190 + nouveau_irq_register(dev, 14, nv84_crypt_isr); 191 + 192 + NVOBJ_ENGINE_ADD(dev, CRYPT, &pcrypt->base); 193 + NVOBJ_CLASS (dev, 0x74c1, CRYPT); 194 + return 0; 138 195 }
+226
drivers/gpu/drm/nouveau/nva3_copy.c
··· 1 + /* 2 + * Copyright 2011 Red Hat Inc. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + * OTHER DEALINGS IN THE SOFTWARE. 21 + * 22 + * Authors: Ben Skeggs 23 + */ 24 + 25 + #include <linux/firmware.h> 26 + #include "drmP.h" 27 + #include "nouveau_drv.h" 28 + #include "nouveau_util.h" 29 + #include "nouveau_vm.h" 30 + #include "nouveau_ramht.h" 31 + #include "nva3_copy.fuc.h" 32 + 33 + struct nva3_copy_engine { 34 + struct nouveau_exec_engine base; 35 + }; 36 + 37 + static int 38 + nva3_copy_context_new(struct nouveau_channel *chan, int engine) 39 + { 40 + struct drm_device *dev = chan->dev; 41 + struct drm_nouveau_private *dev_priv = dev->dev_private; 42 + struct nouveau_gpuobj *ramin = chan->ramin; 43 + struct nouveau_gpuobj *ctx = NULL; 44 + int ret; 45 + 46 + NV_DEBUG(dev, "ch%d\n", chan->id); 47 + 48 + ret = nouveau_gpuobj_new(dev, chan, 256, 0, NVOBJ_FLAG_ZERO_ALLOC | 49 + NVOBJ_FLAG_ZERO_FREE, &ctx); 50 + if (ret) 51 + return ret; 52 + 53 + nv_wo32(ramin, 0xc0, 0x00190000); 54 + nv_wo32(ramin, 0xc4, ctx->vinst + ctx->size - 1); 55 + nv_wo32(ramin, 0xc8, ctx->vinst); 56 + nv_wo32(ramin, 0xcc, 0x00000000); 57 + nv_wo32(ramin, 0xd0, 0x00000000); 58 + nv_wo32(ramin, 0xd4, 0x00000000); 59 + dev_priv->engine.instmem.flush(dev); 60 + 61 + atomic_inc(&chan->vm->engref[engine]); 62 + chan->engctx[engine] = ctx; 63 + return 0; 64 + } 65 + 66 + static int 67 + nva3_copy_object_new(struct nouveau_channel *chan, int engine, 68 + u32 handle, u16 class) 69 + { 70 + struct nouveau_gpuobj *ctx = chan->engctx[engine]; 71 + 72 + /* fuc engine doesn't need an object, our ramht code does.. */ 73 + ctx->engine = 3; 74 + ctx->class = class; 75 + return nouveau_ramht_insert(chan, handle, ctx); 76 + } 77 + 78 + static void 79 + nva3_copy_context_del(struct nouveau_channel *chan, int engine) 80 + { 81 + struct nouveau_gpuobj *ctx = chan->engctx[engine]; 82 + struct drm_device *dev = chan->dev; 83 + u32 inst; 84 + 85 + inst = (chan->ramin->vinst >> 12); 86 + inst |= 0x40000000; 87 + 88 + /* disable fifo access */ 89 + nv_wr32(dev, 0x104048, 0x00000000); 90 + /* mark channel as unloaded if it's currently active */ 91 + if (nv_rd32(dev, 0x104050) == inst) 92 + nv_mask(dev, 0x104050, 0x40000000, 0x00000000); 93 + /* mark next channel as invalid if it's about to be loaded */ 94 + if (nv_rd32(dev, 0x104054) == inst) 95 + nv_mask(dev, 0x104054, 0x40000000, 0x00000000); 96 + /* restore fifo access */ 97 + nv_wr32(dev, 0x104048, 0x00000003); 98 + 99 + for (inst = 0xc0; inst <= 0xd4; inst += 4) 100 + nv_wo32(chan->ramin, inst, 0x00000000); 101 + 102 + nouveau_gpuobj_ref(NULL, &ctx); 103 + 104 + atomic_dec(&chan->vm->engref[engine]); 105 + chan->engctx[engine] = ctx; 106 + } 107 + 108 + static void 109 + nva3_copy_tlb_flush(struct drm_device *dev, int engine) 110 + { 111 + nv50_vm_flush_engine(dev, 0x0d); 112 + } 113 + 114 + static int 115 + nva3_copy_init(struct drm_device *dev, int engine) 116 + { 117 + int i; 118 + 119 + nv_mask(dev, 0x000200, 0x00002000, 0x00000000); 120 + nv_mask(dev, 0x000200, 0x00002000, 0x00002000); 121 + nv_wr32(dev, 0x104014, 0xffffffff); /* disable all interrupts */ 122 + 123 + /* upload ucode */ 124 + nv_wr32(dev, 0x1041c0, 0x01000000); 125 + for (i = 0; i < sizeof(nva3_pcopy_data) / 4; i++) 126 + nv_wr32(dev, 0x1041c4, nva3_pcopy_data[i]); 127 + 128 + nv_wr32(dev, 0x104180, 0x01000000); 129 + for (i = 0; i < sizeof(nva3_pcopy_code) / 4; i++) { 130 + if ((i & 0x3f) == 0) 131 + nv_wr32(dev, 0x104188, i >> 6); 132 + nv_wr32(dev, 0x104184, nva3_pcopy_code[i]); 133 + } 134 + 135 + /* start it running */ 136 + nv_wr32(dev, 0x10410c, 0x00000000); 137 + nv_wr32(dev, 0x104104, 0x00000000); /* ENTRY */ 138 + nv_wr32(dev, 0x104100, 0x00000002); /* TRIGGER */ 139 + return 0; 140 + } 141 + 142 + static int 143 + nva3_copy_fini(struct drm_device *dev, int engine) 144 + { 145 + nv_mask(dev, 0x104048, 0x00000003, 0x00000000); 146 + 147 + /* trigger fuc context unload */ 148 + nv_wait(dev, 0x104008, 0x0000000c, 0x00000000); 149 + nv_mask(dev, 0x104054, 0x40000000, 0x00000000); 150 + nv_wr32(dev, 0x104000, 0x00000008); 151 + nv_wait(dev, 0x104008, 0x00000008, 0x00000000); 152 + 153 + nv_wr32(dev, 0x104014, 0xffffffff); 154 + return 0; 155 + } 156 + 157 + static struct nouveau_enum nva3_copy_isr_error_name[] = { 158 + { 0x0001, "ILLEGAL_MTHD" }, 159 + { 0x0002, "INVALID_ENUM" }, 160 + { 0x0003, "INVALID_BITFIELD" }, 161 + {} 162 + }; 163 + 164 + static void 165 + nva3_copy_isr(struct drm_device *dev) 166 + { 167 + u32 dispatch = nv_rd32(dev, 0x10401c); 168 + u32 stat = nv_rd32(dev, 0x104008) & dispatch & ~(dispatch >> 16); 169 + u32 inst = nv_rd32(dev, 0x104050) & 0x3fffffff; 170 + u32 ssta = nv_rd32(dev, 0x104040) & 0x0000ffff; 171 + u32 addr = nv_rd32(dev, 0x104040) >> 16; 172 + u32 mthd = (addr & 0x07ff) << 2; 173 + u32 subc = (addr & 0x3800) >> 11; 174 + u32 data = nv_rd32(dev, 0x104044); 175 + int chid = nv50_graph_isr_chid(dev, inst); 176 + 177 + if (stat & 0x00000040) { 178 + NV_INFO(dev, "PCOPY: DISPATCH_ERROR ["); 179 + nouveau_enum_print(nva3_copy_isr_error_name, ssta); 180 + printk("] ch %d [0x%08x] subc %d mthd 0x%04x data 0x%08x\n", 181 + chid, inst, subc, mthd, data); 182 + nv_wr32(dev, 0x104004, 0x00000040); 183 + stat &= ~0x00000040; 184 + } 185 + 186 + if (stat) { 187 + NV_INFO(dev, "PCOPY: unhandled intr 0x%08x\n", stat); 188 + nv_wr32(dev, 0x104004, stat); 189 + } 190 + nv50_fb_vm_trap(dev, 1); 191 + } 192 + 193 + static void 194 + nva3_copy_destroy(struct drm_device *dev, int engine) 195 + { 196 + struct nva3_copy_engine *pcopy = nv_engine(dev, engine); 197 + 198 + nouveau_irq_unregister(dev, 22); 199 + 200 + NVOBJ_ENGINE_DEL(dev, COPY0); 201 + kfree(pcopy); 202 + } 203 + 204 + int 205 + nva3_copy_create(struct drm_device *dev) 206 + { 207 + struct nva3_copy_engine *pcopy; 208 + 209 + pcopy = kzalloc(sizeof(*pcopy), GFP_KERNEL); 210 + if (!pcopy) 211 + return -ENOMEM; 212 + 213 + pcopy->base.destroy = nva3_copy_destroy; 214 + pcopy->base.init = nva3_copy_init; 215 + pcopy->base.fini = nva3_copy_fini; 216 + pcopy->base.context_new = nva3_copy_context_new; 217 + pcopy->base.context_del = nva3_copy_context_del; 218 + pcopy->base.object_new = nva3_copy_object_new; 219 + pcopy->base.tlb_flush = nva3_copy_tlb_flush; 220 + 221 + nouveau_irq_register(dev, 22, nva3_copy_isr); 222 + 223 + NVOBJ_ENGINE_ADD(dev, COPY0, &pcopy->base); 224 + NVOBJ_CLASS(dev, 0x85b5, COPY0); 225 + return 0; 226 + }
+870
drivers/gpu/drm/nouveau/nva3_copy.fuc
··· 1 + /* fuc microcode for copy engine on nva3- chipsets 2 + * 3 + * Copyright 2011 Red Hat Inc. 4 + * 5 + * Permission is hereby granted, free of charge, to any person obtaining a 6 + * copy of this software and associated documentation files (the "Software"), 7 + * to deal in the Software without restriction, including without limitation 8 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 + * and/or sell copies of the Software, and to permit persons to whom the 10 + * Software is furnished to do so, subject to the following conditions: 11 + * 12 + * The above copyright notice and this permission notice shall be included in 13 + * all copies or substantial portions of the Software. 14 + * 15 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 + * OTHER DEALINGS IN THE SOFTWARE. 22 + * 23 + * Authors: Ben Skeggs 24 + */ 25 + 26 + /* To build for nva3:nvc0 27 + * m4 -DNVA3 nva3_copy.fuc | envyas -a -w -m fuc -V nva3 -o nva3_copy.fuc.h 28 + * 29 + * To build for nvc0- 30 + * m4 -DNVC0 nva3_copy.fuc | envyas -a -w -m fuc -V nva3 -o nvc0_copy.fuc.h 31 + */ 32 + 33 + ifdef(`NVA3', 34 + .section nva3_pcopy_data, 35 + .section nvc0_pcopy_data 36 + ) 37 + 38 + ctx_object: .b32 0 39 + ifdef(`NVA3', 40 + ctx_dma: 41 + ctx_dma_query: .b32 0 42 + ctx_dma_src: .b32 0 43 + ctx_dma_dst: .b32 0 44 + ,) 45 + .equ ctx_dma_count 3 46 + ctx_query_address_high: .b32 0 47 + ctx_query_address_low: .b32 0 48 + ctx_query_counter: .b32 0 49 + ctx_src_address_high: .b32 0 50 + ctx_src_address_low: .b32 0 51 + ctx_src_pitch: .b32 0 52 + ctx_src_tile_mode: .b32 0 53 + ctx_src_xsize: .b32 0 54 + ctx_src_ysize: .b32 0 55 + ctx_src_zsize: .b32 0 56 + ctx_src_zoff: .b32 0 57 + ctx_src_xoff: .b32 0 58 + ctx_src_yoff: .b32 0 59 + ctx_src_cpp: .b32 0 60 + ctx_dst_address_high: .b32 0 61 + ctx_dst_address_low: .b32 0 62 + ctx_dst_pitch: .b32 0 63 + ctx_dst_tile_mode: .b32 0 64 + ctx_dst_xsize: .b32 0 65 + ctx_dst_ysize: .b32 0 66 + ctx_dst_zsize: .b32 0 67 + ctx_dst_zoff: .b32 0 68 + ctx_dst_xoff: .b32 0 69 + ctx_dst_yoff: .b32 0 70 + ctx_dst_cpp: .b32 0 71 + ctx_format: .b32 0 72 + ctx_swz_const0: .b32 0 73 + ctx_swz_const1: .b32 0 74 + ctx_xcnt: .b32 0 75 + ctx_ycnt: .b32 0 76 + .align 256 77 + 78 + dispatch_table: 79 + // mthd 0x0000, NAME 80 + .b16 0x000 1 81 + .b32 ctx_object ~0xffffffff 82 + // mthd 0x0100, NOP 83 + .b16 0x040 1 84 + .b32 0x00010000 + cmd_nop ~0xffffffff 85 + // mthd 0x0140, PM_TRIGGER 86 + .b16 0x050 1 87 + .b32 0x00010000 + cmd_pm_trigger ~0xffffffff 88 + ifdef(`NVA3', ` 89 + // mthd 0x0180-0x018c, DMA_ 90 + .b16 0x060 ctx_dma_count 91 + dispatch_dma: 92 + .b32 0x00010000 + cmd_dma ~0xffffffff 93 + .b32 0x00010000 + cmd_dma ~0xffffffff 94 + .b32 0x00010000 + cmd_dma ~0xffffffff 95 + ',) 96 + // mthd 0x0200-0x0218, SRC_TILE 97 + .b16 0x80 7 98 + .b32 ctx_src_tile_mode ~0x00000fff 99 + .b32 ctx_src_xsize ~0x0007ffff 100 + .b32 ctx_src_ysize ~0x00001fff 101 + .b32 ctx_src_zsize ~0x000007ff 102 + .b32 ctx_src_zoff ~0x00000fff 103 + .b32 ctx_src_xoff ~0x0007ffff 104 + .b32 ctx_src_yoff ~0x00001fff 105 + // mthd 0x0220-0x0238, DST_TILE 106 + .b16 0x88 7 107 + .b32 ctx_dst_tile_mode ~0x00000fff 108 + .b32 ctx_dst_xsize ~0x0007ffff 109 + .b32 ctx_dst_ysize ~0x00001fff 110 + .b32 ctx_dst_zsize ~0x000007ff 111 + .b32 ctx_dst_zoff ~0x00000fff 112 + .b32 ctx_dst_xoff ~0x0007ffff 113 + .b32 ctx_dst_yoff ~0x00001fff 114 + // mthd 0x0300-0x0304, EXEC, WRCACHE_FLUSH 115 + .b16 0xc0 2 116 + .b32 0x00010000 + cmd_exec ~0xffffffff 117 + .b32 0x00010000 + cmd_wrcache_flush ~0xffffffff 118 + // mthd 0x030c-0x0340, various stuff 119 + .b16 0xc3 14 120 + .b32 ctx_src_address_high ~0x000000ff 121 + .b32 ctx_src_address_low ~0xfffffff0 122 + .b32 ctx_dst_address_high ~0x000000ff 123 + .b32 ctx_dst_address_low ~0xfffffff0 124 + .b32 ctx_src_pitch ~0x0007ffff 125 + .b32 ctx_dst_pitch ~0x0007ffff 126 + .b32 ctx_xcnt ~0x0000ffff 127 + .b32 ctx_ycnt ~0x00001fff 128 + .b32 ctx_format ~0x0333ffff 129 + .b32 ctx_swz_const0 ~0xffffffff 130 + .b32 ctx_swz_const1 ~0xffffffff 131 + .b32 ctx_query_address_high ~0x000000ff 132 + .b32 ctx_query_address_low ~0xffffffff 133 + .b32 ctx_query_counter ~0xffffffff 134 + .b16 0x800 0 135 + 136 + ifdef(`NVA3', 137 + .section nva3_pcopy_code, 138 + .section nvc0_pcopy_code 139 + ) 140 + 141 + main: 142 + clear b32 $r0 143 + mov $sp $r0 144 + 145 + // setup i0 handler and route fifo and ctxswitch to it 146 + mov $r1 ih 147 + mov $iv0 $r1 148 + mov $r1 0x400 149 + movw $r2 0xfff3 150 + sethi $r2 0 151 + iowr I[$r2 + 0x300] $r2 152 + 153 + // enable interrupts 154 + or $r2 0xc 155 + iowr I[$r1] $r2 156 + bset $flags ie0 157 + 158 + // enable fifo access and context switching 159 + mov $r1 0x1200 160 + mov $r2 3 161 + iowr I[$r1] $r2 162 + 163 + // sleep forever, waking for interrupts 164 + bset $flags $p0 165 + spin: 166 + sleep $p0 167 + bra spin 168 + 169 + // i0 handler 170 + ih: 171 + iord $r1 I[$r0 + 0x200] 172 + 173 + and $r2 $r1 0x00000008 174 + bra e ih_no_chsw 175 + call chsw 176 + ih_no_chsw: 177 + and $r2 $r1 0x00000004 178 + bra e ih_no_cmd 179 + call dispatch 180 + 181 + ih_no_cmd: 182 + and $r1 $r1 0x0000000c 183 + iowr I[$r0 + 0x100] $r1 184 + iret 185 + 186 + // $p1 direction (0 = unload, 1 = load) 187 + // $r3 channel 188 + swctx: 189 + mov $r4 0x7700 190 + mov $xtargets $r4 191 + ifdef(`NVA3', ` 192 + // target 7 hardcoded to ctx dma object 193 + mov $xdbase $r0 194 + ', ` // NVC0 195 + // read SCRATCH3 to decide if we are PCOPY0 or PCOPY1 196 + mov $r4 0x2100 197 + iord $r4 I[$r4 + 0] 198 + and $r4 1 199 + shl b32 $r4 4 200 + add b32 $r4 0x30 201 + 202 + // channel is in vram 203 + mov $r15 0x61c 204 + shl b32 $r15 6 205 + mov $r5 0x114 206 + iowrs I[$r15] $r5 207 + 208 + // read 16-byte PCOPYn info, containing context pointer, from channel 209 + shl b32 $r5 $r3 4 210 + add b32 $r5 2 211 + mov $xdbase $r5 212 + mov $r5 $sp 213 + // get a chunk of stack space, aligned to 256 byte boundary 214 + sub b32 $r5 0x100 215 + mov $r6 0xff 216 + not b32 $r6 217 + and $r5 $r6 218 + sethi $r5 0x00020000 219 + xdld $r4 $r5 220 + xdwait 221 + sethi $r5 0 222 + 223 + // set context pointer, from within channel VM 224 + mov $r14 0 225 + iowrs I[$r15] $r14 226 + ld b32 $r4 D[$r5 + 0] 227 + shr b32 $r4 8 228 + ld b32 $r6 D[$r5 + 4] 229 + shl b32 $r6 24 230 + or $r4 $r6 231 + mov $xdbase $r4 232 + ') 233 + // 256-byte context, at start of data segment 234 + mov b32 $r4 $r0 235 + sethi $r4 0x60000 236 + 237 + // swap! 238 + bra $p1 swctx_load 239 + xdst $r0 $r4 240 + bra swctx_done 241 + swctx_load: 242 + xdld $r0 $r4 243 + swctx_done: 244 + xdwait 245 + ret 246 + 247 + chsw: 248 + // read current channel 249 + mov $r2 0x1400 250 + iord $r3 I[$r2] 251 + 252 + // if it's active, unload it and return 253 + xbit $r15 $r3 0x1e 254 + bra e chsw_no_unload 255 + bclr $flags $p1 256 + call swctx 257 + bclr $r3 0x1e 258 + iowr I[$r2] $r3 259 + mov $r4 1 260 + iowr I[$r2 + 0x200] $r4 261 + ret 262 + 263 + // read next channel 264 + chsw_no_unload: 265 + iord $r3 I[$r2 + 0x100] 266 + 267 + // is there a channel waiting to be loaded? 268 + xbit $r13 $r3 0x1e 269 + bra e chsw_finish_load 270 + bset $flags $p1 271 + call swctx 272 + ifdef(`NVA3', 273 + // load dma objects back into TARGET regs 274 + mov $r5 ctx_dma 275 + mov $r6 ctx_dma_count 276 + chsw_load_ctx_dma: 277 + ld b32 $r7 D[$r5 + $r6 * 4] 278 + add b32 $r8 $r6 0x180 279 + shl b32 $r8 8 280 + iowr I[$r8] $r7 281 + sub b32 $r6 1 282 + bra nc chsw_load_ctx_dma 283 + ,) 284 + 285 + chsw_finish_load: 286 + mov $r3 2 287 + iowr I[$r2 + 0x200] $r3 288 + ret 289 + 290 + dispatch: 291 + // read incoming fifo command 292 + mov $r3 0x1900 293 + iord $r2 I[$r3 + 0x100] 294 + iord $r3 I[$r3 + 0x000] 295 + and $r4 $r2 0x7ff 296 + // $r2 will be used to store exception data 297 + shl b32 $r2 0x10 298 + 299 + // lookup method in the dispatch table, ILLEGAL_MTHD if not found 300 + mov $r5 dispatch_table 301 + clear b32 $r6 302 + clear b32 $r7 303 + dispatch_loop: 304 + ld b16 $r6 D[$r5 + 0] 305 + ld b16 $r7 D[$r5 + 2] 306 + add b32 $r5 4 307 + cmpu b32 $r4 $r6 308 + bra c dispatch_illegal_mthd 309 + add b32 $r7 $r6 310 + cmpu b32 $r4 $r7 311 + bra c dispatch_valid_mthd 312 + sub b32 $r7 $r6 313 + shl b32 $r7 3 314 + add b32 $r5 $r7 315 + bra dispatch_loop 316 + 317 + // ensure no bits set in reserved fields, INVALID_BITFIELD 318 + dispatch_valid_mthd: 319 + sub b32 $r4 $r6 320 + shl b32 $r4 3 321 + add b32 $r4 $r5 322 + ld b32 $r5 D[$r4 + 4] 323 + and $r5 $r3 324 + cmpu b32 $r5 0 325 + bra ne dispatch_invalid_bitfield 326 + 327 + // depending on dispatch flags: execute method, or save data as state 328 + ld b16 $r5 D[$r4 + 0] 329 + ld b16 $r6 D[$r4 + 2] 330 + cmpu b32 $r6 0 331 + bra ne dispatch_cmd 332 + st b32 D[$r5] $r3 333 + bra dispatch_done 334 + dispatch_cmd: 335 + bclr $flags $p1 336 + call $r5 337 + bra $p1 dispatch_error 338 + bra dispatch_done 339 + 340 + dispatch_invalid_bitfield: 341 + or $r2 2 342 + dispatch_illegal_mthd: 343 + or $r2 1 344 + 345 + // store exception data in SCRATCH0/SCRATCH1, signal hostirq 346 + dispatch_error: 347 + mov $r4 0x1000 348 + iowr I[$r4 + 0x000] $r2 349 + iowr I[$r4 + 0x100] $r3 350 + mov $r2 0x40 351 + iowr I[$r0] $r2 352 + hostirq_wait: 353 + iord $r2 I[$r0 + 0x200] 354 + and $r2 0x40 355 + cmpu b32 $r2 0 356 + bra ne hostirq_wait 357 + 358 + dispatch_done: 359 + mov $r2 0x1d00 360 + mov $r3 1 361 + iowr I[$r2] $r3 362 + ret 363 + 364 + // No-operation 365 + // 366 + // Inputs: 367 + // $r1: irqh state 368 + // $r2: hostirq state 369 + // $r3: data 370 + // $r4: dispatch table entry 371 + // Outputs: 372 + // $r1: irqh state 373 + // $p1: set on error 374 + // $r2: hostirq state 375 + // $r3: data 376 + cmd_nop: 377 + ret 378 + 379 + // PM_TRIGGER 380 + // 381 + // Inputs: 382 + // $r1: irqh state 383 + // $r2: hostirq state 384 + // $r3: data 385 + // $r4: dispatch table entry 386 + // Outputs: 387 + // $r1: irqh state 388 + // $p1: set on error 389 + // $r2: hostirq state 390 + // $r3: data 391 + cmd_pm_trigger: 392 + mov $r2 0x2200 393 + clear b32 $r3 394 + sethi $r3 0x20000 395 + iowr I[$r2] $r3 396 + ret 397 + 398 + ifdef(`NVA3', 399 + // SET_DMA_* method handler 400 + // 401 + // Inputs: 402 + // $r1: irqh state 403 + // $r2: hostirq state 404 + // $r3: data 405 + // $r4: dispatch table entry 406 + // Outputs: 407 + // $r1: irqh state 408 + // $p1: set on error 409 + // $r2: hostirq state 410 + // $r3: data 411 + cmd_dma: 412 + sub b32 $r4 dispatch_dma 413 + shr b32 $r4 1 414 + bset $r3 0x1e 415 + st b32 D[$r4 + ctx_dma] $r3 416 + add b32 $r4 0x600 417 + shl b32 $r4 6 418 + iowr I[$r4] $r3 419 + ret 420 + ,) 421 + 422 + // Calculates the hw swizzle mask and adjusts the surface's xcnt to match 423 + // 424 + cmd_exec_set_format: 425 + // zero out a chunk of the stack to store the swizzle into 426 + add $sp -0x10 427 + st b32 D[$sp + 0x00] $r0 428 + st b32 D[$sp + 0x04] $r0 429 + st b32 D[$sp + 0x08] $r0 430 + st b32 D[$sp + 0x0c] $r0 431 + 432 + // extract cpp, src_ncomp and dst_ncomp from FORMAT 433 + ld b32 $r4 D[$r0 + ctx_format] 434 + extr $r5 $r4 16:17 435 + add b32 $r5 1 436 + extr $r6 $r4 20:21 437 + add b32 $r6 1 438 + extr $r7 $r4 24:25 439 + add b32 $r7 1 440 + 441 + // convert FORMAT swizzle mask to hw swizzle mask 442 + bclr $flags $p2 443 + clear b32 $r8 444 + clear b32 $r9 445 + ncomp_loop: 446 + and $r10 $r4 0xf 447 + shr b32 $r4 4 448 + clear b32 $r11 449 + bpc_loop: 450 + cmpu b8 $r10 4 451 + bra nc cmp_c0 452 + mulu $r12 $r10 $r5 453 + add b32 $r12 $r11 454 + bset $flags $p2 455 + bra bpc_next 456 + cmp_c0: 457 + bra ne cmp_c1 458 + mov $r12 0x10 459 + add b32 $r12 $r11 460 + bra bpc_next 461 + cmp_c1: 462 + cmpu b8 $r10 6 463 + bra nc cmp_zero 464 + mov $r12 0x14 465 + add b32 $r12 $r11 466 + bra bpc_next 467 + cmp_zero: 468 + mov $r12 0x80 469 + bpc_next: 470 + st b8 D[$sp + $r8] $r12 471 + add b32 $r8 1 472 + add b32 $r11 1 473 + cmpu b32 $r11 $r5 474 + bra c bpc_loop 475 + add b32 $r9 1 476 + cmpu b32 $r9 $r7 477 + bra c ncomp_loop 478 + 479 + // SRC_XCNT = (xcnt * src_cpp), or 0 if no src ref in swz (hw will hang) 480 + mulu $r6 $r5 481 + st b32 D[$r0 + ctx_src_cpp] $r6 482 + ld b32 $r8 D[$r0 + ctx_xcnt] 483 + mulu $r6 $r8 484 + bra $p2 dst_xcnt 485 + clear b32 $r6 486 + 487 + dst_xcnt: 488 + mulu $r7 $r5 489 + st b32 D[$r0 + ctx_dst_cpp] $r7 490 + mulu $r7 $r8 491 + 492 + mov $r5 0x810 493 + shl b32 $r5 6 494 + iowr I[$r5 + 0x000] $r6 495 + iowr I[$r5 + 0x100] $r7 496 + add b32 $r5 0x800 497 + ld b32 $r6 D[$r0 + ctx_dst_cpp] 498 + sub b32 $r6 1 499 + shl b32 $r6 8 500 + ld b32 $r7 D[$r0 + ctx_src_cpp] 501 + sub b32 $r7 1 502 + or $r6 $r7 503 + iowr I[$r5 + 0x000] $r6 504 + add b32 $r5 0x100 505 + ld b32 $r6 D[$sp + 0x00] 506 + iowr I[$r5 + 0x000] $r6 507 + ld b32 $r6 D[$sp + 0x04] 508 + iowr I[$r5 + 0x100] $r6 509 + ld b32 $r6 D[$sp + 0x08] 510 + iowr I[$r5 + 0x200] $r6 511 + ld b32 $r6 D[$sp + 0x0c] 512 + iowr I[$r5 + 0x300] $r6 513 + add b32 $r5 0x400 514 + ld b32 $r6 D[$r0 + ctx_swz_const0] 515 + iowr I[$r5 + 0x000] $r6 516 + ld b32 $r6 D[$r0 + ctx_swz_const1] 517 + iowr I[$r5 + 0x100] $r6 518 + add $sp 0x10 519 + ret 520 + 521 + // Setup to handle a tiled surface 522 + // 523 + // Calculates a number of parameters the hardware requires in order 524 + // to correctly handle tiling. 525 + // 526 + // Offset calculation is performed as follows (Tp/Th/Td from TILE_MODE): 527 + // nTx = round_up(w * cpp, 1 << Tp) >> Tp 528 + // nTy = round_up(h, 1 << Th) >> Th 529 + // Txo = (x * cpp) & ((1 << Tp) - 1) 530 + // Tx = (x * cpp) >> Tp 531 + // Tyo = y & ((1 << Th) - 1) 532 + // Ty = y >> Th 533 + // Tzo = z & ((1 << Td) - 1) 534 + // Tz = z >> Td 535 + // 536 + // off = (Tzo << Tp << Th) + (Tyo << Tp) + Txo 537 + // off += ((Tz * nTy * nTx)) + (Ty * nTx) + Tx) << Td << Th << Tp; 538 + // 539 + // Inputs: 540 + // $r4: hw command (0x104800) 541 + // $r5: ctx offset adjustment for src/dst selection 542 + // $p2: set if dst surface 543 + // 544 + cmd_exec_set_surface_tiled: 545 + // translate TILE_MODE into Tp, Th, Td shift values 546 + ld b32 $r7 D[$r5 + ctx_src_tile_mode] 547 + extr $r9 $r7 8:11 548 + extr $r8 $r7 4:7 549 + ifdef(`NVA3', 550 + add b32 $r8 2 551 + , 552 + add b32 $r8 3 553 + ) 554 + extr $r7 $r7 0:3 555 + cmp b32 $r7 0xe 556 + bra ne xtile64 557 + mov $r7 4 558 + bra xtileok 559 + xtile64: 560 + xbit $r7 $flags $p2 561 + add b32 $r7 17 562 + bset $r4 $r7 563 + mov $r7 6 564 + xtileok: 565 + 566 + // Op = (x * cpp) & ((1 << Tp) - 1) 567 + // Tx = (x * cpp) >> Tp 568 + ld b32 $r10 D[$r5 + ctx_src_xoff] 569 + ld b32 $r11 D[$r5 + ctx_src_cpp] 570 + mulu $r10 $r11 571 + mov $r11 1 572 + shl b32 $r11 $r7 573 + sub b32 $r11 1 574 + and $r12 $r10 $r11 575 + shr b32 $r10 $r7 576 + 577 + // Tyo = y & ((1 << Th) - 1) 578 + // Ty = y >> Th 579 + ld b32 $r13 D[$r5 + ctx_src_yoff] 580 + mov $r14 1 581 + shl b32 $r14 $r8 582 + sub b32 $r14 1 583 + and $r11 $r13 $r14 584 + shr b32 $r13 $r8 585 + 586 + // YTILE = ((1 << Th) << 12) | ((1 << Th) - Tyo) 587 + add b32 $r14 1 588 + shl b32 $r15 $r14 12 589 + sub b32 $r14 $r11 590 + or $r15 $r14 591 + xbit $r6 $flags $p2 592 + add b32 $r6 0x208 593 + shl b32 $r6 8 594 + iowr I[$r6 + 0x000] $r15 595 + 596 + // Op += Tyo << Tp 597 + shl b32 $r11 $r7 598 + add b32 $r12 $r11 599 + 600 + // nTx = ((w * cpp) + ((1 << Tp) - 1) >> Tp) 601 + ld b32 $r15 D[$r5 + ctx_src_xsize] 602 + ld b32 $r11 D[$r5 + ctx_src_cpp] 603 + mulu $r15 $r11 604 + mov $r11 1 605 + shl b32 $r11 $r7 606 + sub b32 $r11 1 607 + add b32 $r15 $r11 608 + shr b32 $r15 $r7 609 + push $r15 610 + 611 + // nTy = (h + ((1 << Th) - 1)) >> Th 612 + ld b32 $r15 D[$r5 + ctx_src_ysize] 613 + mov $r11 1 614 + shl b32 $r11 $r8 615 + sub b32 $r11 1 616 + add b32 $r15 $r11 617 + shr b32 $r15 $r8 618 + push $r15 619 + 620 + // Tys = Tp + Th 621 + // CFG_YZ_TILE_SIZE = ((1 << Th) >> 2) << Td 622 + add b32 $r7 $r8 623 + sub b32 $r8 2 624 + mov $r11 1 625 + shl b32 $r11 $r8 626 + shl b32 $r11 $r9 627 + 628 + // Tzo = z & ((1 << Td) - 1) 629 + // Tz = z >> Td 630 + // Op += Tzo << Tys 631 + // Ts = Tys + Td 632 + ld b32 $r8 D[$r5 + ctx_src_zoff] 633 + mov $r14 1 634 + shl b32 $r14 $r9 635 + sub b32 $r14 1 636 + and $r15 $r8 $r14 637 + shl b32 $r15 $r7 638 + add b32 $r12 $r15 639 + add b32 $r7 $r9 640 + shr b32 $r8 $r9 641 + 642 + // Ot = ((Tz * nTy * nTx) + (Ty * nTx) + Tx) << Ts 643 + pop $r15 644 + pop $r9 645 + mulu $r13 $r9 646 + add b32 $r10 $r13 647 + mulu $r8 $r9 648 + mulu $r8 $r15 649 + add b32 $r10 $r8 650 + shl b32 $r10 $r7 651 + 652 + // PITCH = (nTx - 1) << Ts 653 + sub b32 $r9 1 654 + shl b32 $r9 $r7 655 + iowr I[$r6 + 0x200] $r9 656 + 657 + // SRC_ADDRESS_LOW = (Ot + Op) & 0xffffffff 658 + // CFG_ADDRESS_HIGH |= ((Ot + Op) >> 32) << 16 659 + ld b32 $r7 D[$r5 + ctx_src_address_low] 660 + ld b32 $r8 D[$r5 + ctx_src_address_high] 661 + add b32 $r10 $r12 662 + add b32 $r7 $r10 663 + adc b32 $r8 0 664 + shl b32 $r8 16 665 + or $r8 $r11 666 + sub b32 $r6 0x600 667 + iowr I[$r6 + 0x000] $r7 668 + add b32 $r6 0x400 669 + iowr I[$r6 + 0x000] $r8 670 + ret 671 + 672 + // Setup to handle a linear surface 673 + // 674 + // Nothing to see here.. Sets ADDRESS and PITCH, pretty non-exciting 675 + // 676 + cmd_exec_set_surface_linear: 677 + xbit $r6 $flags $p2 678 + add b32 $r6 0x202 679 + shl b32 $r6 8 680 + ld b32 $r7 D[$r5 + ctx_src_address_low] 681 + iowr I[$r6 + 0x000] $r7 682 + add b32 $r6 0x400 683 + ld b32 $r7 D[$r5 + ctx_src_address_high] 684 + shl b32 $r7 16 685 + iowr I[$r6 + 0x000] $r7 686 + add b32 $r6 0x400 687 + ld b32 $r7 D[$r5 + ctx_src_pitch] 688 + iowr I[$r6 + 0x000] $r7 689 + ret 690 + 691 + // wait for regs to be available for use 692 + cmd_exec_wait: 693 + push $r0 694 + push $r1 695 + mov $r0 0x800 696 + shl b32 $r0 6 697 + loop: 698 + iord $r1 I[$r0] 699 + and $r1 1 700 + bra ne loop 701 + pop $r1 702 + pop $r0 703 + ret 704 + 705 + cmd_exec_query: 706 + // if QUERY_SHORT not set, write out { -, 0, TIME_LO, TIME_HI } 707 + xbit $r4 $r3 13 708 + bra ne query_counter 709 + call cmd_exec_wait 710 + mov $r4 0x80c 711 + shl b32 $r4 6 712 + ld b32 $r5 D[$r0 + ctx_query_address_low] 713 + add b32 $r5 4 714 + iowr I[$r4 + 0x000] $r5 715 + iowr I[$r4 + 0x100] $r0 716 + mov $r5 0xc 717 + iowr I[$r4 + 0x200] $r5 718 + add b32 $r4 0x400 719 + ld b32 $r5 D[$r0 + ctx_query_address_high] 720 + shl b32 $r5 16 721 + iowr I[$r4 + 0x000] $r5 722 + add b32 $r4 0x500 723 + mov $r5 0x00000b00 724 + sethi $r5 0x00010000 725 + iowr I[$r4 + 0x000] $r5 726 + mov $r5 0x00004040 727 + shl b32 $r5 1 728 + sethi $r5 0x80800000 729 + iowr I[$r4 + 0x100] $r5 730 + mov $r5 0x00001110 731 + sethi $r5 0x13120000 732 + iowr I[$r4 + 0x200] $r5 733 + mov $r5 0x00001514 734 + sethi $r5 0x17160000 735 + iowr I[$r4 + 0x300] $r5 736 + mov $r5 0x00002601 737 + sethi $r5 0x00010000 738 + mov $r4 0x800 739 + shl b32 $r4 6 740 + iowr I[$r4 + 0x000] $r5 741 + 742 + // write COUNTER 743 + query_counter: 744 + call cmd_exec_wait 745 + mov $r4 0x80c 746 + shl b32 $r4 6 747 + ld b32 $r5 D[$r0 + ctx_query_address_low] 748 + iowr I[$r4 + 0x000] $r5 749 + iowr I[$r4 + 0x100] $r0 750 + mov $r5 0x4 751 + iowr I[$r4 + 0x200] $r5 752 + add b32 $r4 0x400 753 + ld b32 $r5 D[$r0 + ctx_query_address_high] 754 + shl b32 $r5 16 755 + iowr I[$r4 + 0x000] $r5 756 + add b32 $r4 0x500 757 + mov $r5 0x00000300 758 + iowr I[$r4 + 0x000] $r5 759 + mov $r5 0x00001110 760 + sethi $r5 0x13120000 761 + iowr I[$r4 + 0x100] $r5 762 + ld b32 $r5 D[$r0 + ctx_query_counter] 763 + add b32 $r4 0x500 764 + iowr I[$r4 + 0x000] $r5 765 + mov $r5 0x00002601 766 + sethi $r5 0x00010000 767 + mov $r4 0x800 768 + shl b32 $r4 6 769 + iowr I[$r4 + 0x000] $r5 770 + ret 771 + 772 + // Execute a copy operation 773 + // 774 + // Inputs: 775 + // $r1: irqh state 776 + // $r2: hostirq state 777 + // $r3: data 778 + // 000002000 QUERY_SHORT 779 + // 000001000 QUERY 780 + // 000000100 DST_LINEAR 781 + // 000000010 SRC_LINEAR 782 + // 000000001 FORMAT 783 + // $r4: dispatch table entry 784 + // Outputs: 785 + // $r1: irqh state 786 + // $p1: set on error 787 + // $r2: hostirq state 788 + // $r3: data 789 + cmd_exec: 790 + call cmd_exec_wait 791 + 792 + // if format requested, call function to calculate it, otherwise 793 + // fill in cpp/xcnt for both surfaces as if (cpp == 1) 794 + xbit $r15 $r3 0 795 + bra e cmd_exec_no_format 796 + call cmd_exec_set_format 797 + mov $r4 0x200 798 + bra cmd_exec_init_src_surface 799 + cmd_exec_no_format: 800 + mov $r6 0x810 801 + shl b32 $r6 6 802 + mov $r7 1 803 + st b32 D[$r0 + ctx_src_cpp] $r7 804 + st b32 D[$r0 + ctx_dst_cpp] $r7 805 + ld b32 $r7 D[$r0 + ctx_xcnt] 806 + iowr I[$r6 + 0x000] $r7 807 + iowr I[$r6 + 0x100] $r7 808 + clear b32 $r4 809 + 810 + cmd_exec_init_src_surface: 811 + bclr $flags $p2 812 + clear b32 $r5 813 + xbit $r15 $r3 4 814 + bra e src_tiled 815 + call cmd_exec_set_surface_linear 816 + bra cmd_exec_init_dst_surface 817 + src_tiled: 818 + call cmd_exec_set_surface_tiled 819 + bset $r4 7 820 + 821 + cmd_exec_init_dst_surface: 822 + bset $flags $p2 823 + mov $r5 ctx_dst_address_high - ctx_src_address_high 824 + xbit $r15 $r3 8 825 + bra e dst_tiled 826 + call cmd_exec_set_surface_linear 827 + bra cmd_exec_kick 828 + dst_tiled: 829 + call cmd_exec_set_surface_tiled 830 + bset $r4 8 831 + 832 + cmd_exec_kick: 833 + mov $r5 0x800 834 + shl b32 $r5 6 835 + ld b32 $r6 D[$r0 + ctx_ycnt] 836 + iowr I[$r5 + 0x100] $r6 837 + mov $r6 0x0041 838 + // SRC_TARGET = 1, DST_TARGET = 2 839 + sethi $r6 0x44000000 840 + or $r4 $r6 841 + iowr I[$r5] $r4 842 + 843 + // if requested, queue up a QUERY write after the copy has completed 844 + xbit $r15 $r3 12 845 + bra e cmd_exec_done 846 + call cmd_exec_query 847 + 848 + cmd_exec_done: 849 + ret 850 + 851 + // Flush write cache 852 + // 853 + // Inputs: 854 + // $r1: irqh state 855 + // $r2: hostirq state 856 + // $r3: data 857 + // $r4: dispatch table entry 858 + // Outputs: 859 + // $r1: irqh state 860 + // $p1: set on error 861 + // $r2: hostirq state 862 + // $r3: data 863 + cmd_wrcache_flush: 864 + mov $r2 0x2200 865 + clear b32 $r3 866 + sethi $r3 0x10000 867 + iowr I[$r2] $r3 868 + ret 869 + 870 + .align 0x100
+534
drivers/gpu/drm/nouveau/nva3_copy.fuc.h
··· 1 + uint32_t nva3_pcopy_data[] = { 2 + 0x00000000, 3 + 0x00000000, 4 + 0x00000000, 5 + 0x00000000, 6 + 0x00000000, 7 + 0x00000000, 8 + 0x00000000, 9 + 0x00000000, 10 + 0x00000000, 11 + 0x00000000, 12 + 0x00000000, 13 + 0x00000000, 14 + 0x00000000, 15 + 0x00000000, 16 + 0x00000000, 17 + 0x00000000, 18 + 0x00000000, 19 + 0x00000000, 20 + 0x00000000, 21 + 0x00000000, 22 + 0x00000000, 23 + 0x00000000, 24 + 0x00000000, 25 + 0x00000000, 26 + 0x00000000, 27 + 0x00000000, 28 + 0x00000000, 29 + 0x00000000, 30 + 0x00000000, 31 + 0x00000000, 32 + 0x00000000, 33 + 0x00000000, 34 + 0x00000000, 35 + 0x00000000, 36 + 0x00000000, 37 + 0x00000000, 38 + 0x00000000, 39 + 0x00000000, 40 + 0x00000000, 41 + 0x00000000, 42 + 0x00000000, 43 + 0x00000000, 44 + 0x00000000, 45 + 0x00000000, 46 + 0x00000000, 47 + 0x00000000, 48 + 0x00000000, 49 + 0x00000000, 50 + 0x00000000, 51 + 0x00000000, 52 + 0x00000000, 53 + 0x00000000, 54 + 0x00000000, 55 + 0x00000000, 56 + 0x00000000, 57 + 0x00000000, 58 + 0x00000000, 59 + 0x00000000, 60 + 0x00000000, 61 + 0x00000000, 62 + 0x00000000, 63 + 0x00000000, 64 + 0x00000000, 65 + 0x00000000, 66 + 0x00010000, 67 + 0x00000000, 68 + 0x00000000, 69 + 0x00010040, 70 + 0x00010160, 71 + 0x00000000, 72 + 0x00010050, 73 + 0x00010162, 74 + 0x00000000, 75 + 0x00030060, 76 + 0x00010170, 77 + 0x00000000, 78 + 0x00010170, 79 + 0x00000000, 80 + 0x00010170, 81 + 0x00000000, 82 + 0x00070080, 83 + 0x00000028, 84 + 0xfffff000, 85 + 0x0000002c, 86 + 0xfff80000, 87 + 0x00000030, 88 + 0xffffe000, 89 + 0x00000034, 90 + 0xfffff800, 91 + 0x00000038, 92 + 0xfffff000, 93 + 0x0000003c, 94 + 0xfff80000, 95 + 0x00000040, 96 + 0xffffe000, 97 + 0x00070088, 98 + 0x00000054, 99 + 0xfffff000, 100 + 0x00000058, 101 + 0xfff80000, 102 + 0x0000005c, 103 + 0xffffe000, 104 + 0x00000060, 105 + 0xfffff800, 106 + 0x00000064, 107 + 0xfffff000, 108 + 0x00000068, 109 + 0xfff80000, 110 + 0x0000006c, 111 + 0xffffe000, 112 + 0x000200c0, 113 + 0x00010492, 114 + 0x00000000, 115 + 0x0001051b, 116 + 0x00000000, 117 + 0x000e00c3, 118 + 0x0000001c, 119 + 0xffffff00, 120 + 0x00000020, 121 + 0x0000000f, 122 + 0x00000048, 123 + 0xffffff00, 124 + 0x0000004c, 125 + 0x0000000f, 126 + 0x00000024, 127 + 0xfff80000, 128 + 0x00000050, 129 + 0xfff80000, 130 + 0x00000080, 131 + 0xffff0000, 132 + 0x00000084, 133 + 0xffffe000, 134 + 0x00000074, 135 + 0xfccc0000, 136 + 0x00000078, 137 + 0x00000000, 138 + 0x0000007c, 139 + 0x00000000, 140 + 0x00000010, 141 + 0xffffff00, 142 + 0x00000014, 143 + 0x00000000, 144 + 0x00000018, 145 + 0x00000000, 146 + 0x00000800, 147 + }; 148 + 149 + uint32_t nva3_pcopy_code[] = { 150 + 0x04fe04bd, 151 + 0x3517f000, 152 + 0xf10010fe, 153 + 0xf1040017, 154 + 0xf0fff327, 155 + 0x22d00023, 156 + 0x0c25f0c0, 157 + 0xf40012d0, 158 + 0x17f11031, 159 + 0x27f01200, 160 + 0x0012d003, 161 + 0xf40031f4, 162 + 0x0ef40028, 163 + 0x8001cffd, 164 + 0xf40812c4, 165 + 0x21f4060b, 166 + 0x0412c472, 167 + 0xf4060bf4, 168 + 0x11c4c321, 169 + 0x4001d00c, 170 + 0x47f101f8, 171 + 0x4bfe7700, 172 + 0x0007fe00, 173 + 0xf00204b9, 174 + 0x01f40643, 175 + 0x0604fa09, 176 + 0xfa060ef4, 177 + 0x03f80504, 178 + 0x27f100f8, 179 + 0x23cf1400, 180 + 0x1e3fc800, 181 + 0xf4170bf4, 182 + 0x21f40132, 183 + 0x1e3af052, 184 + 0xf00023d0, 185 + 0x24d00147, 186 + 0xcf00f880, 187 + 0x3dc84023, 188 + 0x220bf41e, 189 + 0xf40131f4, 190 + 0x57f05221, 191 + 0x0367f004, 192 + 0xa07856bc, 193 + 0xb6018068, 194 + 0x87d00884, 195 + 0x0162b600, 196 + 0xf0f018f4, 197 + 0x23d00237, 198 + 0xf100f880, 199 + 0xcf190037, 200 + 0x33cf4032, 201 + 0xff24e400, 202 + 0x1024b607, 203 + 0x010057f1, 204 + 0x74bd64bd, 205 + 0x58005658, 206 + 0x50b60157, 207 + 0x0446b804, 208 + 0xbb4d08f4, 209 + 0x47b80076, 210 + 0x0f08f404, 211 + 0xb60276bb, 212 + 0x57bb0374, 213 + 0xdf0ef400, 214 + 0xb60246bb, 215 + 0x45bb0344, 216 + 0x01459800, 217 + 0xb00453fd, 218 + 0x1bf40054, 219 + 0x00455820, 220 + 0xb0014658, 221 + 0x1bf40064, 222 + 0x00538009, 223 + 0xf4300ef4, 224 + 0x55f90132, 225 + 0xf40c01f4, 226 + 0x25f0250e, 227 + 0x0125f002, 228 + 0x100047f1, 229 + 0xd00042d0, 230 + 0x27f04043, 231 + 0x0002d040, 232 + 0xf08002cf, 233 + 0x24b04024, 234 + 0xf71bf400, 235 + 0x1d0027f1, 236 + 0xd00137f0, 237 + 0x00f80023, 238 + 0x27f100f8, 239 + 0x34bd2200, 240 + 0xd00233f0, 241 + 0x00f80023, 242 + 0x012842b7, 243 + 0xf00145b6, 244 + 0x43801e39, 245 + 0x0040b701, 246 + 0x0644b606, 247 + 0xf80043d0, 248 + 0xf030f400, 249 + 0xb00001b0, 250 + 0x01b00101, 251 + 0x0301b002, 252 + 0xc71d0498, 253 + 0x50b63045, 254 + 0x3446c701, 255 + 0xc70160b6, 256 + 0x70b63847, 257 + 0x0232f401, 258 + 0x94bd84bd, 259 + 0xb60f4ac4, 260 + 0xb4bd0445, 261 + 0xf404a430, 262 + 0xa5ff0f18, 263 + 0x00cbbbc0, 264 + 0xf40231f4, 265 + 0x1bf4220e, 266 + 0x10c7f00c, 267 + 0xf400cbbb, 268 + 0xa430160e, 269 + 0x0c18f406, 270 + 0xbb14c7f0, 271 + 0x0ef400cb, 272 + 0x80c7f107, 273 + 0x01c83800, 274 + 0xb60180b6, 275 + 0xb5b801b0, 276 + 0xc308f404, 277 + 0xb80190b6, 278 + 0x08f40497, 279 + 0x0065fdb2, 280 + 0x98110680, 281 + 0x68fd2008, 282 + 0x0502f400, 283 + 0x75fd64bd, 284 + 0x1c078000, 285 + 0xf10078fd, 286 + 0xb6081057, 287 + 0x56d00654, 288 + 0x4057d000, 289 + 0x080050b7, 290 + 0xb61c0698, 291 + 0x64b60162, 292 + 0x11079808, 293 + 0xfd0172b6, 294 + 0x56d00567, 295 + 0x0050b700, 296 + 0x0060b401, 297 + 0xb40056d0, 298 + 0x56d00160, 299 + 0x0260b440, 300 + 0xb48056d0, 301 + 0x56d00360, 302 + 0x0050b7c0, 303 + 0x1e069804, 304 + 0x980056d0, 305 + 0x56d01f06, 306 + 0x1030f440, 307 + 0x579800f8, 308 + 0x6879c70a, 309 + 0xb66478c7, 310 + 0x77c70280, 311 + 0x0e76b060, 312 + 0xf0091bf4, 313 + 0x0ef40477, 314 + 0x027cf00f, 315 + 0xfd1170b6, 316 + 0x77f00947, 317 + 0x0f5a9806, 318 + 0xfd115b98, 319 + 0xb7f000ab, 320 + 0x04b7bb01, 321 + 0xff01b2b6, 322 + 0xa7bbc4ab, 323 + 0x105d9805, 324 + 0xbb01e7f0, 325 + 0xe2b604e8, 326 + 0xb4deff01, 327 + 0xb605d8bb, 328 + 0xef9401e0, 329 + 0x02ebbb0c, 330 + 0xf005fefd, 331 + 0x60b7026c, 332 + 0x64b60208, 333 + 0x006fd008, 334 + 0xbb04b7bb, 335 + 0x5f9800cb, 336 + 0x115b980b, 337 + 0xf000fbfd, 338 + 0xb7bb01b7, 339 + 0x01b2b604, 340 + 0xbb00fbbb, 341 + 0xf0f905f7, 342 + 0xf00c5f98, 343 + 0xb8bb01b7, 344 + 0x01b2b604, 345 + 0xbb00fbbb, 346 + 0xf0f905f8, 347 + 0xb60078bb, 348 + 0xb7f00282, 349 + 0x04b8bb01, 350 + 0x9804b9bb, 351 + 0xe7f00e58, 352 + 0x04e9bb01, 353 + 0xff01e2b6, 354 + 0xf7bbf48e, 355 + 0x00cfbb04, 356 + 0xbb0079bb, 357 + 0xf0fc0589, 358 + 0xd9fd90fc, 359 + 0x00adbb00, 360 + 0xfd0089fd, 361 + 0xa8bb008f, 362 + 0x04a7bb00, 363 + 0xbb0192b6, 364 + 0x69d00497, 365 + 0x08579880, 366 + 0xbb075898, 367 + 0x7abb00ac, 368 + 0x0081b600, 369 + 0xfd1084b6, 370 + 0x62b7058b, 371 + 0x67d00600, 372 + 0x0060b700, 373 + 0x0068d004, 374 + 0x6cf000f8, 375 + 0x0260b702, 376 + 0x0864b602, 377 + 0xd0085798, 378 + 0x60b70067, 379 + 0x57980400, 380 + 0x1074b607, 381 + 0xb70067d0, 382 + 0x98040060, 383 + 0x67d00957, 384 + 0xf900f800, 385 + 0xf110f900, 386 + 0xb6080007, 387 + 0x01cf0604, 388 + 0x0114f000, 389 + 0xfcfa1bf4, 390 + 0xf800fc10, 391 + 0x0d34c800, 392 + 0xf5701bf4, 393 + 0xf103ab21, 394 + 0xb6080c47, 395 + 0x05980644, 396 + 0x0450b605, 397 + 0xd00045d0, 398 + 0x57f04040, 399 + 0x8045d00c, 400 + 0x040040b7, 401 + 0xb6040598, 402 + 0x45d01054, 403 + 0x0040b700, 404 + 0x0057f105, 405 + 0x0153f00b, 406 + 0xf10045d0, 407 + 0xb6404057, 408 + 0x53f10154, 409 + 0x45d08080, 410 + 0x1057f140, 411 + 0x1253f111, 412 + 0x8045d013, 413 + 0x151457f1, 414 + 0x171653f1, 415 + 0xf1c045d0, 416 + 0xf0260157, 417 + 0x47f10153, 418 + 0x44b60800, 419 + 0x0045d006, 420 + 0x03ab21f5, 421 + 0x080c47f1, 422 + 0x980644b6, 423 + 0x45d00505, 424 + 0x4040d000, 425 + 0xd00457f0, 426 + 0x40b78045, 427 + 0x05980400, 428 + 0x1054b604, 429 + 0xb70045d0, 430 + 0xf1050040, 431 + 0xd0030057, 432 + 0x57f10045, 433 + 0x53f11110, 434 + 0x45d01312, 435 + 0x06059840, 436 + 0x050040b7, 437 + 0xf10045d0, 438 + 0xf0260157, 439 + 0x47f10153, 440 + 0x44b60800, 441 + 0x0045d006, 442 + 0x21f500f8, 443 + 0x3fc803ab, 444 + 0x0e0bf400, 445 + 0x018921f5, 446 + 0x020047f1, 447 + 0xf11e0ef4, 448 + 0xb6081067, 449 + 0x77f00664, 450 + 0x11078001, 451 + 0x981c0780, 452 + 0x67d02007, 453 + 0x4067d000, 454 + 0x32f444bd, 455 + 0xc854bd02, 456 + 0x0bf4043f, 457 + 0x8221f50a, 458 + 0x0a0ef403, 459 + 0x027621f5, 460 + 0xf40749f0, 461 + 0x57f00231, 462 + 0x083fc82c, 463 + 0xf50a0bf4, 464 + 0xf4038221, 465 + 0x21f50a0e, 466 + 0x49f00276, 467 + 0x0057f108, 468 + 0x0654b608, 469 + 0xd0210698, 470 + 0x67f04056, 471 + 0x0063f141, 472 + 0x0546fd44, 473 + 0xc80054d0, 474 + 0x0bf40c3f, 475 + 0xc521f507, 476 + 0xf100f803, 477 + 0xbd220027, 478 + 0x0133f034, 479 + 0xf80023d0, 480 + 0x00000000, 481 + 0x00000000, 482 + 0x00000000, 483 + 0x00000000, 484 + 0x00000000, 485 + 0x00000000, 486 + 0x00000000, 487 + 0x00000000, 488 + 0x00000000, 489 + 0x00000000, 490 + 0x00000000, 491 + 0x00000000, 492 + 0x00000000, 493 + 0x00000000, 494 + 0x00000000, 495 + 0x00000000, 496 + 0x00000000, 497 + 0x00000000, 498 + 0x00000000, 499 + 0x00000000, 500 + 0x00000000, 501 + 0x00000000, 502 + 0x00000000, 503 + 0x00000000, 504 + 0x00000000, 505 + 0x00000000, 506 + 0x00000000, 507 + 0x00000000, 508 + 0x00000000, 509 + 0x00000000, 510 + 0x00000000, 511 + 0x00000000, 512 + 0x00000000, 513 + 0x00000000, 514 + 0x00000000, 515 + 0x00000000, 516 + 0x00000000, 517 + 0x00000000, 518 + 0x00000000, 519 + 0x00000000, 520 + 0x00000000, 521 + 0x00000000, 522 + 0x00000000, 523 + 0x00000000, 524 + 0x00000000, 525 + 0x00000000, 526 + 0x00000000, 527 + 0x00000000, 528 + 0x00000000, 529 + 0x00000000, 530 + 0x00000000, 531 + 0x00000000, 532 + 0x00000000, 533 + 0x00000000, 534 + };
+140 -31
drivers/gpu/drm/nouveau/nva3_pm.c
··· 27 27 #include "nouveau_bios.h" 28 28 #include "nouveau_pm.h" 29 29 30 - /*XXX: boards using limits 0x40 need fixing, the register layout 31 - * is correct here, but, there's some other funny magic 32 - * that modifies things, so it's not likely we'll set/read 33 - * the correct timings yet.. working on it... 30 + /* This is actually a lot more complex than it appears here, but hopefully 31 + * this should be able to deal with what the VBIOS leaves for us.. 32 + * 33 + * If not, well, I'll jump off that bridge when I come to it. 34 34 */ 35 35 36 36 struct nva3_pm_state { 37 - struct pll_lims pll; 38 - int N, M, P; 37 + enum pll_types type; 38 + u32 src0; 39 + u32 src1; 40 + u32 ctrl; 41 + u32 coef; 42 + u32 old_pnm; 43 + u32 new_pnm; 44 + u32 new_div; 39 45 }; 46 + 47 + static int 48 + nva3_pm_pll_offset(u32 id) 49 + { 50 + static const u32 pll_map[] = { 51 + 0x00, PLL_CORE, 52 + 0x01, PLL_SHADER, 53 + 0x02, PLL_MEMORY, 54 + 0x00, 0x00 55 + }; 56 + const u32 *map = pll_map; 57 + 58 + while (map[1]) { 59 + if (id == map[1]) 60 + return map[0]; 61 + map += 2; 62 + } 63 + 64 + return -ENOENT; 65 + } 40 66 41 67 int 42 68 nva3_pm_clock_get(struct drm_device *dev, u32 id) 43 69 { 70 + u32 src0, src1, ctrl, coef; 44 71 struct pll_lims pll; 45 - int P, N, M, ret; 46 - u32 reg; 72 + int ret, off; 73 + int P, N, M; 47 74 48 75 ret = get_pll_limits(dev, id, &pll); 49 76 if (ret) 50 77 return ret; 51 78 52 - reg = nv_rd32(dev, pll.reg + 4); 53 - P = (reg & 0x003f0000) >> 16; 54 - N = (reg & 0x0000ff00) >> 8; 55 - M = (reg & 0x000000ff); 79 + off = nva3_pm_pll_offset(id); 80 + if (off < 0) 81 + return off; 82 + 83 + src0 = nv_rd32(dev, 0x4120 + (off * 4)); 84 + src1 = nv_rd32(dev, 0x4160 + (off * 4)); 85 + ctrl = nv_rd32(dev, pll.reg + 0); 86 + coef = nv_rd32(dev, pll.reg + 4); 87 + NV_DEBUG(dev, "PLL %02x: 0x%08x 0x%08x 0x%08x 0x%08x\n", 88 + id, src0, src1, ctrl, coef); 89 + 90 + if (ctrl & 0x00000008) { 91 + u32 div = ((src1 & 0x003c0000) >> 18) + 1; 92 + return (pll.refclk * 2) / div; 93 + } 94 + 95 + P = (coef & 0x003f0000) >> 16; 96 + N = (coef & 0x0000ff00) >> 8; 97 + M = (coef & 0x000000ff); 56 98 return pll.refclk * N / M / P; 57 99 } 58 100 ··· 102 60 nva3_pm_clock_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl, 103 61 u32 id, int khz) 104 62 { 105 - struct nva3_pm_state *state; 106 - int dummy, ret; 63 + struct nva3_pm_state *pll; 64 + struct pll_lims limits; 65 + int N, M, P, diff; 66 + int ret, off; 107 67 108 - state = kzalloc(sizeof(*state), GFP_KERNEL); 109 - if (!state) 110 - return ERR_PTR(-ENOMEM); 111 - 112 - ret = get_pll_limits(dev, id, &state->pll); 113 - if (ret < 0) { 114 - kfree(state); 68 + ret = get_pll_limits(dev, id, &limits); 69 + if (ret < 0) 115 70 return (ret == -ENOENT) ? NULL : ERR_PTR(ret); 71 + 72 + off = nva3_pm_pll_offset(id); 73 + if (id < 0) 74 + return ERR_PTR(-EINVAL); 75 + 76 + 77 + pll = kzalloc(sizeof(*pll), GFP_KERNEL); 78 + if (!pll) 79 + return ERR_PTR(-ENOMEM); 80 + pll->type = id; 81 + pll->src0 = 0x004120 + (off * 4); 82 + pll->src1 = 0x004160 + (off * 4); 83 + pll->ctrl = limits.reg + 0; 84 + pll->coef = limits.reg + 4; 85 + 86 + /* If target clock is within [-2, 3) MHz of a divisor, we'll 87 + * use that instead of calculating MNP values 88 + */ 89 + pll->new_div = min((limits.refclk * 2) / (khz - 2999), 16); 90 + if (pll->new_div) { 91 + diff = khz - ((limits.refclk * 2) / pll->new_div); 92 + if (diff < -2000 || diff >= 3000) 93 + pll->new_div = 0; 116 94 } 117 95 118 - ret = nv50_calc_pll2(dev, &state->pll, khz, &state->N, &dummy, 119 - &state->M, &state->P); 120 - if (ret < 0) { 121 - kfree(state); 122 - return ERR_PTR(ret); 96 + if (!pll->new_div) { 97 + ret = nva3_calc_pll(dev, &limits, khz, &N, NULL, &M, &P); 98 + if (ret < 0) 99 + return ERR_PTR(ret); 100 + 101 + pll->new_pnm = (P << 16) | (N << 8) | M; 102 + pll->new_div = 2 - 1; 103 + } else { 104 + pll->new_pnm = 0; 105 + pll->new_div--; 123 106 } 124 107 125 - return state; 108 + if ((nv_rd32(dev, pll->src1) & 0x00000101) != 0x00000101) 109 + pll->old_pnm = nv_rd32(dev, pll->coef); 110 + return pll; 126 111 } 127 112 128 113 void 129 114 nva3_pm_clock_set(struct drm_device *dev, void *pre_state) 130 115 { 131 - struct nva3_pm_state *state = pre_state; 132 - u32 reg = state->pll.reg; 116 + struct nva3_pm_state *pll = pre_state; 117 + u32 ctrl = 0; 133 118 134 - nv_wr32(dev, reg + 4, (state->P << 16) | (state->N << 8) | state->M); 135 - kfree(state); 119 + /* For the memory clock, NVIDIA will build a "script" describing 120 + * the reclocking process and ask PDAEMON to execute it. 121 + */ 122 + if (pll->type == PLL_MEMORY) { 123 + nv_wr32(dev, 0x100210, 0); 124 + nv_wr32(dev, 0x1002dc, 1); 125 + nv_wr32(dev, 0x004018, 0x00001000); 126 + ctrl = 0x18000100; 127 + } 128 + 129 + if (pll->old_pnm || !pll->new_pnm) { 130 + nv_mask(dev, pll->src1, 0x003c0101, 0x00000101 | 131 + (pll->new_div << 18)); 132 + nv_wr32(dev, pll->ctrl, 0x0001001d | ctrl); 133 + nv_mask(dev, pll->ctrl, 0x00000001, 0x00000000); 134 + } 135 + 136 + if (pll->new_pnm) { 137 + nv_mask(dev, pll->src0, 0x00000101, 0x00000101); 138 + nv_wr32(dev, pll->coef, pll->new_pnm); 139 + nv_wr32(dev, pll->ctrl, 0x0001001d | ctrl); 140 + nv_mask(dev, pll->ctrl, 0x00000010, 0x00000000); 141 + nv_mask(dev, pll->ctrl, 0x00020010, 0x00020010); 142 + nv_wr32(dev, pll->ctrl, 0x00010015 | ctrl); 143 + nv_mask(dev, pll->src1, 0x00000100, 0x00000000); 144 + nv_mask(dev, pll->src1, 0x00000001, 0x00000000); 145 + if (pll->type == PLL_MEMORY) 146 + nv_wr32(dev, 0x4018, 0x10005000); 147 + } else { 148 + nv_mask(dev, pll->ctrl, 0x00000001, 0x00000000); 149 + nv_mask(dev, pll->src0, 0x00000100, 0x00000000); 150 + nv_mask(dev, pll->src0, 0x00000001, 0x00000000); 151 + if (pll->type == PLL_MEMORY) 152 + nv_wr32(dev, 0x4018, 0x1000d000); 153 + } 154 + 155 + if (pll->type == PLL_MEMORY) { 156 + nv_wr32(dev, 0x1002dc, 0); 157 + nv_wr32(dev, 0x100210, 0x80000000); 158 + } 159 + 160 + kfree(pll); 136 161 } 137 162
+243
drivers/gpu/drm/nouveau/nvc0_copy.c
··· 1 + /* 2 + * Copyright 2011 Red Hat Inc. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + * OTHER DEALINGS IN THE SOFTWARE. 21 + * 22 + * Authors: Ben Skeggs 23 + */ 24 + 25 + #include <linux/firmware.h> 26 + #include "drmP.h" 27 + #include "nouveau_drv.h" 28 + #include "nouveau_util.h" 29 + #include "nouveau_vm.h" 30 + #include "nouveau_ramht.h" 31 + #include "nvc0_copy.fuc.h" 32 + 33 + struct nvc0_copy_engine { 34 + struct nouveau_exec_engine base; 35 + u32 irq; 36 + u32 pmc; 37 + u32 fuc; 38 + u32 ctx; 39 + }; 40 + 41 + static int 42 + nvc0_copy_context_new(struct nouveau_channel *chan, int engine) 43 + { 44 + struct nvc0_copy_engine *pcopy = nv_engine(chan->dev, engine); 45 + struct drm_device *dev = chan->dev; 46 + struct drm_nouveau_private *dev_priv = dev->dev_private; 47 + struct nouveau_gpuobj *ramin = chan->ramin; 48 + struct nouveau_gpuobj *ctx = NULL; 49 + int ret; 50 + 51 + ret = nouveau_gpuobj_new(dev, NULL, 256, 256, 52 + NVOBJ_FLAG_VM | NVOBJ_FLAG_VM_USER | 53 + NVOBJ_FLAG_ZERO_ALLOC, &ctx); 54 + if (ret) 55 + return ret; 56 + 57 + nv_wo32(ramin, pcopy->ctx + 0, lower_32_bits(ctx->vinst)); 58 + nv_wo32(ramin, pcopy->ctx + 4, upper_32_bits(ctx->vinst)); 59 + dev_priv->engine.instmem.flush(dev); 60 + 61 + chan->engctx[engine] = ctx; 62 + return 0; 63 + } 64 + 65 + static int 66 + nvc0_copy_object_new(struct nouveau_channel *chan, int engine, 67 + u32 handle, u16 class) 68 + { 69 + return 0; 70 + } 71 + 72 + static void 73 + nvc0_copy_context_del(struct nouveau_channel *chan, int engine) 74 + { 75 + struct nvc0_copy_engine *pcopy = nv_engine(chan->dev, engine); 76 + struct nouveau_gpuobj *ctx = chan->engctx[engine]; 77 + struct drm_device *dev = chan->dev; 78 + u32 inst; 79 + 80 + inst = (chan->ramin->vinst >> 12); 81 + inst |= 0x40000000; 82 + 83 + /* disable fifo access */ 84 + nv_wr32(dev, pcopy->fuc + 0x048, 0x00000000); 85 + /* mark channel as unloaded if it's currently active */ 86 + if (nv_rd32(dev, pcopy->fuc + 0x050) == inst) 87 + nv_mask(dev, pcopy->fuc + 0x050, 0x40000000, 0x00000000); 88 + /* mark next channel as invalid if it's about to be loaded */ 89 + if (nv_rd32(dev, pcopy->fuc + 0x054) == inst) 90 + nv_mask(dev, pcopy->fuc + 0x054, 0x40000000, 0x00000000); 91 + /* restore fifo access */ 92 + nv_wr32(dev, pcopy->fuc + 0x048, 0x00000003); 93 + 94 + nv_wo32(chan->ramin, pcopy->ctx + 0, 0x00000000); 95 + nv_wo32(chan->ramin, pcopy->ctx + 4, 0x00000000); 96 + nouveau_gpuobj_ref(NULL, &ctx); 97 + 98 + chan->engctx[engine] = ctx; 99 + } 100 + 101 + static int 102 + nvc0_copy_init(struct drm_device *dev, int engine) 103 + { 104 + struct nvc0_copy_engine *pcopy = nv_engine(dev, engine); 105 + int i; 106 + 107 + nv_mask(dev, 0x000200, pcopy->pmc, 0x00000000); 108 + nv_mask(dev, 0x000200, pcopy->pmc, pcopy->pmc); 109 + nv_wr32(dev, pcopy->fuc + 0x014, 0xffffffff); 110 + 111 + nv_wr32(dev, pcopy->fuc + 0x1c0, 0x01000000); 112 + for (i = 0; i < sizeof(nvc0_pcopy_data) / 4; i++) 113 + nv_wr32(dev, pcopy->fuc + 0x1c4, nvc0_pcopy_data[i]); 114 + 115 + nv_wr32(dev, pcopy->fuc + 0x180, 0x01000000); 116 + for (i = 0; i < sizeof(nvc0_pcopy_code) / 4; i++) { 117 + if ((i & 0x3f) == 0) 118 + nv_wr32(dev, pcopy->fuc + 0x188, i >> 6); 119 + nv_wr32(dev, pcopy->fuc + 0x184, nvc0_pcopy_code[i]); 120 + } 121 + 122 + nv_wr32(dev, pcopy->fuc + 0x084, engine - NVOBJ_ENGINE_COPY0); 123 + nv_wr32(dev, pcopy->fuc + 0x10c, 0x00000000); 124 + nv_wr32(dev, pcopy->fuc + 0x104, 0x00000000); /* ENTRY */ 125 + nv_wr32(dev, pcopy->fuc + 0x100, 0x00000002); /* TRIGGER */ 126 + return 0; 127 + } 128 + 129 + static int 130 + nvc0_copy_fini(struct drm_device *dev, int engine) 131 + { 132 + struct nvc0_copy_engine *pcopy = nv_engine(dev, engine); 133 + 134 + nv_mask(dev, pcopy->fuc + 0x048, 0x00000003, 0x00000000); 135 + 136 + /* trigger fuc context unload */ 137 + nv_wait(dev, pcopy->fuc + 0x008, 0x0000000c, 0x00000000); 138 + nv_mask(dev, pcopy->fuc + 0x054, 0x40000000, 0x00000000); 139 + nv_wr32(dev, pcopy->fuc + 0x000, 0x00000008); 140 + nv_wait(dev, pcopy->fuc + 0x008, 0x00000008, 0x00000000); 141 + 142 + nv_wr32(dev, pcopy->fuc + 0x014, 0xffffffff); 143 + return 0; 144 + } 145 + 146 + static struct nouveau_enum nvc0_copy_isr_error_name[] = { 147 + { 0x0001, "ILLEGAL_MTHD" }, 148 + { 0x0002, "INVALID_ENUM" }, 149 + { 0x0003, "INVALID_BITFIELD" }, 150 + {} 151 + }; 152 + 153 + static void 154 + nvc0_copy_isr(struct drm_device *dev, int engine) 155 + { 156 + struct nvc0_copy_engine *pcopy = nv_engine(dev, engine); 157 + u32 disp = nv_rd32(dev, pcopy->fuc + 0x01c); 158 + u32 stat = nv_rd32(dev, pcopy->fuc + 0x008) & disp & ~(disp >> 16); 159 + u64 inst = (u64)(nv_rd32(dev, pcopy->fuc + 0x050) & 0x0fffffff) << 12; 160 + u32 chid = nvc0_graph_isr_chid(dev, inst); 161 + u32 ssta = nv_rd32(dev, pcopy->fuc + 0x040) & 0x0000ffff; 162 + u32 addr = nv_rd32(dev, pcopy->fuc + 0x040) >> 16; 163 + u32 mthd = (addr & 0x07ff) << 2; 164 + u32 subc = (addr & 0x3800) >> 11; 165 + u32 data = nv_rd32(dev, pcopy->fuc + 0x044); 166 + 167 + if (stat & 0x00000040) { 168 + NV_INFO(dev, "PCOPY: DISPATCH_ERROR ["); 169 + nouveau_enum_print(nvc0_copy_isr_error_name, ssta); 170 + printk("] ch %d [0x%010llx] subc %d mthd 0x%04x data 0x%08x\n", 171 + chid, inst, subc, mthd, data); 172 + nv_wr32(dev, pcopy->fuc + 0x004, 0x00000040); 173 + stat &= ~0x00000040; 174 + } 175 + 176 + if (stat) { 177 + NV_INFO(dev, "PCOPY: unhandled intr 0x%08x\n", stat); 178 + nv_wr32(dev, pcopy->fuc + 0x004, stat); 179 + } 180 + } 181 + 182 + static void 183 + nvc0_copy_isr_0(struct drm_device *dev) 184 + { 185 + nvc0_copy_isr(dev, NVOBJ_ENGINE_COPY0); 186 + } 187 + 188 + static void 189 + nvc0_copy_isr_1(struct drm_device *dev) 190 + { 191 + nvc0_copy_isr(dev, NVOBJ_ENGINE_COPY1); 192 + } 193 + 194 + static void 195 + nvc0_copy_destroy(struct drm_device *dev, int engine) 196 + { 197 + struct nvc0_copy_engine *pcopy = nv_engine(dev, engine); 198 + 199 + nouveau_irq_unregister(dev, pcopy->irq); 200 + 201 + if (engine == NVOBJ_ENGINE_COPY0) 202 + NVOBJ_ENGINE_DEL(dev, COPY0); 203 + else 204 + NVOBJ_ENGINE_DEL(dev, COPY1); 205 + kfree(pcopy); 206 + } 207 + 208 + int 209 + nvc0_copy_create(struct drm_device *dev, int engine) 210 + { 211 + struct nvc0_copy_engine *pcopy; 212 + 213 + pcopy = kzalloc(sizeof(*pcopy), GFP_KERNEL); 214 + if (!pcopy) 215 + return -ENOMEM; 216 + 217 + pcopy->base.destroy = nvc0_copy_destroy; 218 + pcopy->base.init = nvc0_copy_init; 219 + pcopy->base.fini = nvc0_copy_fini; 220 + pcopy->base.context_new = nvc0_copy_context_new; 221 + pcopy->base.context_del = nvc0_copy_context_del; 222 + pcopy->base.object_new = nvc0_copy_object_new; 223 + 224 + if (engine == 0) { 225 + pcopy->irq = 5; 226 + pcopy->pmc = 0x00000040; 227 + pcopy->fuc = 0x104000; 228 + pcopy->ctx = 0x0230; 229 + nouveau_irq_register(dev, pcopy->irq, nvc0_copy_isr_0); 230 + NVOBJ_ENGINE_ADD(dev, COPY0, &pcopy->base); 231 + NVOBJ_CLASS(dev, 0x90b5, COPY0); 232 + } else { 233 + pcopy->irq = 6; 234 + pcopy->pmc = 0x00000080; 235 + pcopy->fuc = 0x105000; 236 + pcopy->ctx = 0x0240; 237 + nouveau_irq_register(dev, pcopy->irq, nvc0_copy_isr_1); 238 + NVOBJ_ENGINE_ADD(dev, COPY1, &pcopy->base); 239 + NVOBJ_CLASS(dev, 0x90b8, COPY1); 240 + } 241 + 242 + return 0; 243 + }
+527
drivers/gpu/drm/nouveau/nvc0_copy.fuc.h
··· 1 + uint32_t nvc0_pcopy_data[] = { 2 + 0x00000000, 3 + 0x00000000, 4 + 0x00000000, 5 + 0x00000000, 6 + 0x00000000, 7 + 0x00000000, 8 + 0x00000000, 9 + 0x00000000, 10 + 0x00000000, 11 + 0x00000000, 12 + 0x00000000, 13 + 0x00000000, 14 + 0x00000000, 15 + 0x00000000, 16 + 0x00000000, 17 + 0x00000000, 18 + 0x00000000, 19 + 0x00000000, 20 + 0x00000000, 21 + 0x00000000, 22 + 0x00000000, 23 + 0x00000000, 24 + 0x00000000, 25 + 0x00000000, 26 + 0x00000000, 27 + 0x00000000, 28 + 0x00000000, 29 + 0x00000000, 30 + 0x00000000, 31 + 0x00000000, 32 + 0x00000000, 33 + 0x00000000, 34 + 0x00000000, 35 + 0x00000000, 36 + 0x00000000, 37 + 0x00000000, 38 + 0x00000000, 39 + 0x00000000, 40 + 0x00000000, 41 + 0x00000000, 42 + 0x00000000, 43 + 0x00000000, 44 + 0x00000000, 45 + 0x00000000, 46 + 0x00000000, 47 + 0x00000000, 48 + 0x00000000, 49 + 0x00000000, 50 + 0x00000000, 51 + 0x00000000, 52 + 0x00000000, 53 + 0x00000000, 54 + 0x00000000, 55 + 0x00000000, 56 + 0x00000000, 57 + 0x00000000, 58 + 0x00000000, 59 + 0x00000000, 60 + 0x00000000, 61 + 0x00000000, 62 + 0x00000000, 63 + 0x00000000, 64 + 0x00000000, 65 + 0x00000000, 66 + 0x00010000, 67 + 0x00000000, 68 + 0x00000000, 69 + 0x00010040, 70 + 0x0001019f, 71 + 0x00000000, 72 + 0x00010050, 73 + 0x000101a1, 74 + 0x00000000, 75 + 0x00070080, 76 + 0x0000001c, 77 + 0xfffff000, 78 + 0x00000020, 79 + 0xfff80000, 80 + 0x00000024, 81 + 0xffffe000, 82 + 0x00000028, 83 + 0xfffff800, 84 + 0x0000002c, 85 + 0xfffff000, 86 + 0x00000030, 87 + 0xfff80000, 88 + 0x00000034, 89 + 0xffffe000, 90 + 0x00070088, 91 + 0x00000048, 92 + 0xfffff000, 93 + 0x0000004c, 94 + 0xfff80000, 95 + 0x00000050, 96 + 0xffffe000, 97 + 0x00000054, 98 + 0xfffff800, 99 + 0x00000058, 100 + 0xfffff000, 101 + 0x0000005c, 102 + 0xfff80000, 103 + 0x00000060, 104 + 0xffffe000, 105 + 0x000200c0, 106 + 0x000104b8, 107 + 0x00000000, 108 + 0x00010541, 109 + 0x00000000, 110 + 0x000e00c3, 111 + 0x00000010, 112 + 0xffffff00, 113 + 0x00000014, 114 + 0x0000000f, 115 + 0x0000003c, 116 + 0xffffff00, 117 + 0x00000040, 118 + 0x0000000f, 119 + 0x00000018, 120 + 0xfff80000, 121 + 0x00000044, 122 + 0xfff80000, 123 + 0x00000074, 124 + 0xffff0000, 125 + 0x00000078, 126 + 0xffffe000, 127 + 0x00000068, 128 + 0xfccc0000, 129 + 0x0000006c, 130 + 0x00000000, 131 + 0x00000070, 132 + 0x00000000, 133 + 0x00000004, 134 + 0xffffff00, 135 + 0x00000008, 136 + 0x00000000, 137 + 0x0000000c, 138 + 0x00000000, 139 + 0x00000800, 140 + }; 141 + 142 + uint32_t nvc0_pcopy_code[] = { 143 + 0x04fe04bd, 144 + 0x3517f000, 145 + 0xf10010fe, 146 + 0xf1040017, 147 + 0xf0fff327, 148 + 0x22d00023, 149 + 0x0c25f0c0, 150 + 0xf40012d0, 151 + 0x17f11031, 152 + 0x27f01200, 153 + 0x0012d003, 154 + 0xf40031f4, 155 + 0x0ef40028, 156 + 0x8001cffd, 157 + 0xf40812c4, 158 + 0x21f4060b, 159 + 0x0412c4ca, 160 + 0xf5070bf4, 161 + 0xc4010221, 162 + 0x01d00c11, 163 + 0xf101f840, 164 + 0xfe770047, 165 + 0x47f1004b, 166 + 0x44cf2100, 167 + 0x0144f000, 168 + 0xb60444b6, 169 + 0xf7f13040, 170 + 0xf4b6061c, 171 + 0x1457f106, 172 + 0x00f5d101, 173 + 0xb6043594, 174 + 0x57fe0250, 175 + 0x0145fe00, 176 + 0x010052b7, 177 + 0x00ff67f1, 178 + 0x56fd60bd, 179 + 0x0253f004, 180 + 0xf80545fa, 181 + 0x0053f003, 182 + 0xd100e7f0, 183 + 0x549800fe, 184 + 0x0845b600, 185 + 0xb6015698, 186 + 0x46fd1864, 187 + 0x0047fe05, 188 + 0xf00204b9, 189 + 0x01f40643, 190 + 0x0604fa09, 191 + 0xfa060ef4, 192 + 0x03f80504, 193 + 0x27f100f8, 194 + 0x23cf1400, 195 + 0x1e3fc800, 196 + 0xf4170bf4, 197 + 0x21f40132, 198 + 0x1e3af053, 199 + 0xf00023d0, 200 + 0x24d00147, 201 + 0xcf00f880, 202 + 0x3dc84023, 203 + 0x090bf41e, 204 + 0xf40131f4, 205 + 0x37f05321, 206 + 0x8023d002, 207 + 0x37f100f8, 208 + 0x32cf1900, 209 + 0x0033cf40, 210 + 0x07ff24e4, 211 + 0xf11024b6, 212 + 0xbd010057, 213 + 0x5874bd64, 214 + 0x57580056, 215 + 0x0450b601, 216 + 0xf40446b8, 217 + 0x76bb4d08, 218 + 0x0447b800, 219 + 0xbb0f08f4, 220 + 0x74b60276, 221 + 0x0057bb03, 222 + 0xbbdf0ef4, 223 + 0x44b60246, 224 + 0x0045bb03, 225 + 0xfd014598, 226 + 0x54b00453, 227 + 0x201bf400, 228 + 0x58004558, 229 + 0x64b00146, 230 + 0x091bf400, 231 + 0xf4005380, 232 + 0x32f4300e, 233 + 0xf455f901, 234 + 0x0ef40c01, 235 + 0x0225f025, 236 + 0xf10125f0, 237 + 0xd0100047, 238 + 0x43d00042, 239 + 0x4027f040, 240 + 0xcf0002d0, 241 + 0x24f08002, 242 + 0x0024b040, 243 + 0xf1f71bf4, 244 + 0xf01d0027, 245 + 0x23d00137, 246 + 0xf800f800, 247 + 0x0027f100, 248 + 0xf034bd22, 249 + 0x23d00233, 250 + 0xf400f800, 251 + 0x01b0f030, 252 + 0x0101b000, 253 + 0xb00201b0, 254 + 0x04980301, 255 + 0x3045c71a, 256 + 0xc70150b6, 257 + 0x60b63446, 258 + 0x3847c701, 259 + 0xf40170b6, 260 + 0x84bd0232, 261 + 0x4ac494bd, 262 + 0x0445b60f, 263 + 0xa430b4bd, 264 + 0x0f18f404, 265 + 0xbbc0a5ff, 266 + 0x31f400cb, 267 + 0x220ef402, 268 + 0xf00c1bf4, 269 + 0xcbbb10c7, 270 + 0x160ef400, 271 + 0xf406a430, 272 + 0xc7f00c18, 273 + 0x00cbbb14, 274 + 0xf1070ef4, 275 + 0x380080c7, 276 + 0x80b601c8, 277 + 0x01b0b601, 278 + 0xf404b5b8, 279 + 0x90b6c308, 280 + 0x0497b801, 281 + 0xfdb208f4, 282 + 0x06800065, 283 + 0x1d08980e, 284 + 0xf40068fd, 285 + 0x64bd0502, 286 + 0x800075fd, 287 + 0x78fd1907, 288 + 0x1057f100, 289 + 0x0654b608, 290 + 0xd00056d0, 291 + 0x50b74057, 292 + 0x06980800, 293 + 0x0162b619, 294 + 0x980864b6, 295 + 0x72b60e07, 296 + 0x0567fd01, 297 + 0xb70056d0, 298 + 0xb4010050, 299 + 0x56d00060, 300 + 0x0160b400, 301 + 0xb44056d0, 302 + 0x56d00260, 303 + 0x0360b480, 304 + 0xb7c056d0, 305 + 0x98040050, 306 + 0x56d01b06, 307 + 0x1c069800, 308 + 0xf44056d0, 309 + 0x00f81030, 310 + 0xc7075798, 311 + 0x78c76879, 312 + 0x0380b664, 313 + 0xb06077c7, 314 + 0x1bf40e76, 315 + 0x0477f009, 316 + 0xf00f0ef4, 317 + 0x70b6027c, 318 + 0x0947fd11, 319 + 0x980677f0, 320 + 0x5b980c5a, 321 + 0x00abfd0e, 322 + 0xbb01b7f0, 323 + 0xb2b604b7, 324 + 0xc4abff01, 325 + 0x9805a7bb, 326 + 0xe7f00d5d, 327 + 0x04e8bb01, 328 + 0xff01e2b6, 329 + 0xd8bbb4de, 330 + 0x01e0b605, 331 + 0xbb0cef94, 332 + 0xfefd02eb, 333 + 0x026cf005, 334 + 0x020860b7, 335 + 0xd00864b6, 336 + 0xb7bb006f, 337 + 0x00cbbb04, 338 + 0x98085f98, 339 + 0xfbfd0e5b, 340 + 0x01b7f000, 341 + 0xb604b7bb, 342 + 0xfbbb01b2, 343 + 0x05f7bb00, 344 + 0x5f98f0f9, 345 + 0x01b7f009, 346 + 0xb604b8bb, 347 + 0xfbbb01b2, 348 + 0x05f8bb00, 349 + 0x78bbf0f9, 350 + 0x0282b600, 351 + 0xbb01b7f0, 352 + 0xb9bb04b8, 353 + 0x0b589804, 354 + 0xbb01e7f0, 355 + 0xe2b604e9, 356 + 0xf48eff01, 357 + 0xbb04f7bb, 358 + 0x79bb00cf, 359 + 0x0589bb00, 360 + 0x90fcf0fc, 361 + 0xbb00d9fd, 362 + 0x89fd00ad, 363 + 0x008ffd00, 364 + 0xbb00a8bb, 365 + 0x92b604a7, 366 + 0x0497bb01, 367 + 0x988069d0, 368 + 0x58980557, 369 + 0x00acbb04, 370 + 0xb6007abb, 371 + 0x84b60081, 372 + 0x058bfd10, 373 + 0x060062b7, 374 + 0xb70067d0, 375 + 0xd0040060, 376 + 0x00f80068, 377 + 0xb7026cf0, 378 + 0xb6020260, 379 + 0x57980864, 380 + 0x0067d005, 381 + 0x040060b7, 382 + 0xb6045798, 383 + 0x67d01074, 384 + 0x0060b700, 385 + 0x06579804, 386 + 0xf80067d0, 387 + 0xf900f900, 388 + 0x0007f110, 389 + 0x0604b608, 390 + 0xf00001cf, 391 + 0x1bf40114, 392 + 0xfc10fcfa, 393 + 0xc800f800, 394 + 0x1bf40d34, 395 + 0xd121f570, 396 + 0x0c47f103, 397 + 0x0644b608, 398 + 0xb6020598, 399 + 0x45d00450, 400 + 0x4040d000, 401 + 0xd00c57f0, 402 + 0x40b78045, 403 + 0x05980400, 404 + 0x1054b601, 405 + 0xb70045d0, 406 + 0xf1050040, 407 + 0xf00b0057, 408 + 0x45d00153, 409 + 0x4057f100, 410 + 0x0154b640, 411 + 0x808053f1, 412 + 0xf14045d0, 413 + 0xf1111057, 414 + 0xd0131253, 415 + 0x57f18045, 416 + 0x53f11514, 417 + 0x45d01716, 418 + 0x0157f1c0, 419 + 0x0153f026, 420 + 0x080047f1, 421 + 0xd00644b6, 422 + 0x21f50045, 423 + 0x47f103d1, 424 + 0x44b6080c, 425 + 0x02059806, 426 + 0xd00045d0, 427 + 0x57f04040, 428 + 0x8045d004, 429 + 0x040040b7, 430 + 0xb6010598, 431 + 0x45d01054, 432 + 0x0040b700, 433 + 0x0057f105, 434 + 0x0045d003, 435 + 0x111057f1, 436 + 0x131253f1, 437 + 0x984045d0, 438 + 0x40b70305, 439 + 0x45d00500, 440 + 0x0157f100, 441 + 0x0153f026, 442 + 0x080047f1, 443 + 0xd00644b6, 444 + 0x00f80045, 445 + 0x03d121f5, 446 + 0xf4003fc8, 447 + 0x21f50e0b, 448 + 0x47f101af, 449 + 0x0ef40200, 450 + 0x1067f11e, 451 + 0x0664b608, 452 + 0x800177f0, 453 + 0x07800e07, 454 + 0x1d079819, 455 + 0xd00067d0, 456 + 0x44bd4067, 457 + 0xbd0232f4, 458 + 0x043fc854, 459 + 0xf50a0bf4, 460 + 0xf403a821, 461 + 0x21f50a0e, 462 + 0x49f0029c, 463 + 0x0231f407, 464 + 0xc82c57f0, 465 + 0x0bf4083f, 466 + 0xa821f50a, 467 + 0x0a0ef403, 468 + 0x029c21f5, 469 + 0xf10849f0, 470 + 0xb6080057, 471 + 0x06980654, 472 + 0x4056d01e, 473 + 0xf14167f0, 474 + 0xfd440063, 475 + 0x54d00546, 476 + 0x0c3fc800, 477 + 0xf5070bf4, 478 + 0xf803eb21, 479 + 0x0027f100, 480 + 0xf034bd22, 481 + 0x23d00133, 482 + 0x0000f800, 483 + 0x00000000, 484 + 0x00000000, 485 + 0x00000000, 486 + 0x00000000, 487 + 0x00000000, 488 + 0x00000000, 489 + 0x00000000, 490 + 0x00000000, 491 + 0x00000000, 492 + 0x00000000, 493 + 0x00000000, 494 + 0x00000000, 495 + 0x00000000, 496 + 0x00000000, 497 + 0x00000000, 498 + 0x00000000, 499 + 0x00000000, 500 + 0x00000000, 501 + 0x00000000, 502 + 0x00000000, 503 + 0x00000000, 504 + 0x00000000, 505 + 0x00000000, 506 + 0x00000000, 507 + 0x00000000, 508 + 0x00000000, 509 + 0x00000000, 510 + 0x00000000, 511 + 0x00000000, 512 + 0x00000000, 513 + 0x00000000, 514 + 0x00000000, 515 + 0x00000000, 516 + 0x00000000, 517 + 0x00000000, 518 + 0x00000000, 519 + 0x00000000, 520 + 0x00000000, 521 + 0x00000000, 522 + 0x00000000, 523 + 0x00000000, 524 + 0x00000000, 525 + 0x00000000, 526 + 0x00000000, 527 + };
+92 -50
drivers/gpu/drm/nouveau/nvc0_fifo.c
··· 37 37 }; 38 38 39 39 struct nvc0_fifo_chan { 40 - struct nouveau_bo *user; 40 + struct nouveau_gpuobj *user; 41 41 struct nouveau_gpuobj *ramfc; 42 42 }; 43 43 ··· 106 106 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; 107 107 struct nvc0_fifo_priv *priv = pfifo->priv; 108 108 struct nvc0_fifo_chan *fifoch; 109 - u64 ib_virt, user_vinst; 109 + u64 ib_virt = chan->pushbuf_base + chan->dma.ib_base * 4; 110 110 int ret; 111 111 112 112 chan->fifo_priv = kzalloc(sizeof(*fifoch), GFP_KERNEL); ··· 115 115 fifoch = chan->fifo_priv; 116 116 117 117 /* allocate vram for control regs, map into polling area */ 118 - ret = nouveau_bo_new(dev, NULL, 0x1000, 0, TTM_PL_FLAG_VRAM, 119 - 0, 0, &fifoch->user); 118 + ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 0x1000, 119 + NVOBJ_FLAG_ZERO_ALLOC, &fifoch->user); 120 120 if (ret) 121 121 goto error; 122 122 123 - ret = nouveau_bo_pin(fifoch->user, TTM_PL_FLAG_VRAM); 124 - if (ret) { 125 - nouveau_bo_ref(NULL, &fifoch->user); 126 - goto error; 127 - } 128 - 129 - user_vinst = fifoch->user->bo.mem.start << PAGE_SHIFT; 130 - 131 - ret = nouveau_bo_map(fifoch->user); 132 - if (ret) { 133 - nouveau_bo_unpin(fifoch->user); 134 - nouveau_bo_ref(NULL, &fifoch->user); 135 - goto error; 136 - } 137 - 138 123 nouveau_vm_map_at(&priv->user_vma, chan->id * 0x1000, 139 - fifoch->user->bo.mem.mm_node); 124 + *(struct nouveau_mem **)fifoch->user->node); 140 125 141 126 chan->user = ioremap_wc(pci_resource_start(dev->pdev, 1) + 142 127 priv->user_vma.offset + (chan->id * 0x1000), ··· 131 146 goto error; 132 147 } 133 148 134 - ib_virt = chan->pushbuf_base + chan->dma.ib_base * 4; 135 - 136 - /* zero channel regs */ 137 - nouveau_bo_wr32(fifoch->user, 0x0040/4, 0); 138 - nouveau_bo_wr32(fifoch->user, 0x0044/4, 0); 139 - nouveau_bo_wr32(fifoch->user, 0x0048/4, 0); 140 - nouveau_bo_wr32(fifoch->user, 0x004c/4, 0); 141 - nouveau_bo_wr32(fifoch->user, 0x0050/4, 0); 142 - nouveau_bo_wr32(fifoch->user, 0x0058/4, 0); 143 - nouveau_bo_wr32(fifoch->user, 0x005c/4, 0); 144 - nouveau_bo_wr32(fifoch->user, 0x0060/4, 0); 145 - nouveau_bo_wr32(fifoch->user, 0x0088/4, 0); 146 - nouveau_bo_wr32(fifoch->user, 0x008c/4, 0); 147 - 148 149 /* ramfc */ 149 150 ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst, 150 151 chan->ramin->vinst, 0x100, ··· 138 167 if (ret) 139 168 goto error; 140 169 141 - nv_wo32(fifoch->ramfc, 0x08, lower_32_bits(user_vinst)); 142 - nv_wo32(fifoch->ramfc, 0x0c, upper_32_bits(user_vinst)); 170 + nv_wo32(fifoch->ramfc, 0x08, lower_32_bits(fifoch->user->vinst)); 171 + nv_wo32(fifoch->ramfc, 0x0c, upper_32_bits(fifoch->user->vinst)); 143 172 nv_wo32(fifoch->ramfc, 0x10, 0x0000face); 144 173 nv_wo32(fifoch->ramfc, 0x30, 0xfffff902); 145 174 nv_wo32(fifoch->ramfc, 0x48, lower_32_bits(ib_virt)); ··· 194 223 return; 195 224 196 225 nouveau_gpuobj_ref(NULL, &fifoch->ramfc); 197 - if (fifoch->user) { 198 - nouveau_bo_unmap(fifoch->user); 199 - nouveau_bo_unpin(fifoch->user); 200 - nouveau_bo_ref(NULL, &fifoch->user); 201 - } 226 + nouveau_gpuobj_ref(NULL, &fifoch->user); 202 227 kfree(fifoch); 203 228 } 204 229 ··· 207 240 int 208 241 nvc0_fifo_unload_context(struct drm_device *dev) 209 242 { 243 + int i; 244 + 245 + for (i = 0; i < 128; i++) { 246 + if (!(nv_rd32(dev, 0x003004 + (i * 4)) & 1)) 247 + continue; 248 + 249 + nv_mask(dev, 0x003004 + (i * 4), 0x00000001, 0x00000000); 250 + nv_wr32(dev, 0x002634, i); 251 + if (!nv_wait(dev, 0x002634, 0xffffffff, i)) { 252 + NV_INFO(dev, "PFIFO: kick ch %d failed: 0x%08x\n", 253 + i, nv_rd32(dev, 0x002634)); 254 + return -EBUSY; 255 + } 256 + } 257 + 210 258 return 0; 211 259 } 212 260 ··· 291 309 { 292 310 struct drm_nouveau_private *dev_priv = dev->dev_private; 293 311 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; 312 + struct nouveau_channel *chan; 294 313 struct nvc0_fifo_priv *priv; 295 314 int ret, i; 296 315 ··· 334 351 nv_wr32(dev, 0x002a00, 0xffffffff); /* clears PFIFO.INTR bit 30 */ 335 352 nv_wr32(dev, 0x002100, 0xffffffff); 336 353 nv_wr32(dev, 0x002140, 0xbfffffff); 354 + 355 + /* restore PFIFO context table */ 356 + for (i = 0; i < 128; i++) { 357 + chan = dev_priv->channels.ptr[i]; 358 + if (!chan || !chan->fifo_priv) 359 + continue; 360 + 361 + nv_wr32(dev, 0x003000 + (i * 8), 0xc0000000 | 362 + (chan->ramin->vinst >> 12)); 363 + nv_wr32(dev, 0x003004 + (i * 8), 0x001f0001); 364 + } 365 + nvc0_fifo_playlist_update(dev); 366 + 337 367 return 0; 338 368 } 339 369 340 370 struct nouveau_enum nvc0_fifo_fault_unit[] = { 341 - { 0, "PGRAPH" }, 342 - { 3, "PEEPHOLE" }, 343 - { 4, "BAR1" }, 344 - { 5, "BAR3" }, 345 - { 7, "PFIFO" }, 371 + { 0x00, "PGRAPH" }, 372 + { 0x03, "PEEPHOLE" }, 373 + { 0x04, "BAR1" }, 374 + { 0x05, "BAR3" }, 375 + { 0x07, "PFIFO" }, 376 + { 0x10, "PBSP" }, 377 + { 0x11, "PPPP" }, 378 + { 0x13, "PCOUNTER" }, 379 + { 0x14, "PVP" }, 380 + { 0x15, "PCOPY0" }, 381 + { 0x16, "PCOPY1" }, 382 + { 0x17, "PDAEMON" }, 346 383 {} 347 384 }; 348 385 349 386 struct nouveau_enum nvc0_fifo_fault_reason[] = { 350 - { 0, "PT_NOT_PRESENT" }, 351 - { 1, "PT_TOO_SHORT" }, 352 - { 2, "PAGE_NOT_PRESENT" }, 353 - { 3, "VM_LIMIT_EXCEEDED" }, 387 + { 0x00, "PT_NOT_PRESENT" }, 388 + { 0x01, "PT_TOO_SHORT" }, 389 + { 0x02, "PAGE_NOT_PRESENT" }, 390 + { 0x03, "VM_LIMIT_EXCEEDED" }, 391 + { 0x04, "NO_CHANNEL" }, 392 + { 0x05, "PAGE_SYSTEM_ONLY" }, 393 + { 0x06, "PAGE_READ_ONLY" }, 394 + { 0x0a, "COMPRESSED_SYSRAM" }, 395 + { 0x0c, "INVALID_STORAGE_TYPE" }, 396 + {} 397 + }; 398 + 399 + struct nouveau_enum nvc0_fifo_fault_hubclient[] = { 400 + { 0x01, "PCOPY0" }, 401 + { 0x02, "PCOPY1" }, 402 + { 0x04, "DISPATCH" }, 403 + { 0x05, "CTXCTL" }, 404 + { 0x06, "PFIFO" }, 405 + { 0x07, "BAR_READ" }, 406 + { 0x08, "BAR_WRITE" }, 407 + { 0x0b, "PVP" }, 408 + { 0x0c, "PPPP" }, 409 + { 0x0d, "PBSP" }, 410 + { 0x11, "PCOUNTER" }, 411 + { 0x12, "PDAEMON" }, 412 + { 0x14, "CCACHE" }, 413 + { 0x15, "CCACHE_POST" }, 414 + {} 415 + }; 416 + 417 + struct nouveau_enum nvc0_fifo_fault_gpcclient[] = { 418 + { 0x01, "TEX" }, 419 + { 0x0c, "ESETUP" }, 420 + { 0x0e, "CTXCTL" }, 421 + { 0x0f, "PROP" }, 354 422 {} 355 423 }; 356 424 ··· 419 385 u32 valo = nv_rd32(dev, 0x2804 + (unit * 0x10)); 420 386 u32 vahi = nv_rd32(dev, 0x2808 + (unit * 0x10)); 421 387 u32 stat = nv_rd32(dev, 0x280c + (unit * 0x10)); 388 + u32 client = (stat & 0x00001f00) >> 8; 422 389 423 390 NV_INFO(dev, "PFIFO: %s fault at 0x%010llx [", 424 391 (stat & 0x00000080) ? "write" : "read", (u64)vahi << 32 | valo); 425 392 nouveau_enum_print(nvc0_fifo_fault_reason, stat & 0x0000000f); 426 393 printk("] from "); 427 394 nouveau_enum_print(nvc0_fifo_fault_unit, unit); 395 + if (stat & 0x00000040) { 396 + printk("/"); 397 + nouveau_enum_print(nvc0_fifo_fault_hubclient, client); 398 + } else { 399 + printk("/GPC%d/", (stat & 0x1f000000) >> 24); 400 + nouveau_enum_print(nvc0_fifo_fault_gpcclient, client); 401 + } 428 402 printk(" on channel 0x%010llx\n", (u64)inst << 12); 429 403 } 430 404
+293 -305
drivers/gpu/drm/nouveau/nvc0_graph.c
··· 30 30 #include "nouveau_mm.h" 31 31 #include "nvc0_graph.h" 32 32 33 - static void nvc0_graph_isr(struct drm_device *); 34 - static void nvc0_runk140_isr(struct drm_device *); 35 - static int nvc0_graph_unload_context_to(struct drm_device *dev, u64 chan); 36 - 37 - void 38 - nvc0_graph_fifo_access(struct drm_device *dev, bool enabled) 33 + static int 34 + nvc0_graph_load_context(struct nouveau_channel *chan) 39 35 { 36 + struct drm_device *dev = chan->dev; 37 + 38 + nv_wr32(dev, 0x409840, 0x00000030); 39 + nv_wr32(dev, 0x409500, 0x80000000 | chan->ramin->vinst >> 12); 40 + nv_wr32(dev, 0x409504, 0x00000003); 41 + if (!nv_wait(dev, 0x409800, 0x00000010, 0x00000010)) 42 + NV_ERROR(dev, "PGRAPH: load_ctx timeout\n"); 43 + 44 + return 0; 40 45 } 41 46 42 - struct nouveau_channel * 43 - nvc0_graph_channel(struct drm_device *dev) 47 + static int 48 + nvc0_graph_unload_context_to(struct drm_device *dev, u64 chan) 44 49 { 45 - return NULL; 50 + nv_wr32(dev, 0x409840, 0x00000003); 51 + nv_wr32(dev, 0x409500, 0x80000000 | chan >> 12); 52 + nv_wr32(dev, 0x409504, 0x00000009); 53 + if (!nv_wait(dev, 0x409800, 0x00000001, 0x00000000)) { 54 + NV_ERROR(dev, "PGRAPH: unload_ctx timeout\n"); 55 + return -EBUSY; 56 + } 57 + 58 + return 0; 46 59 } 47 60 48 61 static int 49 62 nvc0_graph_construct_context(struct nouveau_channel *chan) 50 63 { 51 64 struct drm_nouveau_private *dev_priv = chan->dev->dev_private; 52 - struct nvc0_graph_priv *priv = dev_priv->engine.graph.priv; 53 - struct nvc0_graph_chan *grch = chan->pgraph_ctx; 65 + struct nvc0_graph_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_GR); 66 + struct nvc0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR]; 54 67 struct drm_device *dev = chan->dev; 55 68 int ret, i; 56 69 u32 *ctx; ··· 102 89 static int 103 90 nvc0_graph_create_context_mmio_list(struct nouveau_channel *chan) 104 91 { 105 - struct drm_nouveau_private *dev_priv = chan->dev->dev_private; 106 - struct nvc0_graph_priv *priv = dev_priv->engine.graph.priv; 107 - struct nvc0_graph_chan *grch = chan->pgraph_ctx; 92 + struct nvc0_graph_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_GR); 93 + struct nvc0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR]; 108 94 struct drm_device *dev = chan->dev; 109 95 int i = 0, gpc, tp, ret; 110 96 u32 magic; ··· 170 158 return 0; 171 159 } 172 160 173 - int 174 - nvc0_graph_create_context(struct nouveau_channel *chan) 161 + static int 162 + nvc0_graph_context_new(struct nouveau_channel *chan, int engine) 175 163 { 176 - struct drm_nouveau_private *dev_priv = chan->dev->dev_private; 177 - struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem; 178 - struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; 179 - struct nvc0_graph_priv *priv = pgraph->priv; 180 - struct nvc0_graph_chan *grch; 181 164 struct drm_device *dev = chan->dev; 165 + struct drm_nouveau_private *dev_priv = dev->dev_private; 166 + struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem; 167 + struct nvc0_graph_priv *priv = nv_engine(dev, engine); 168 + struct nvc0_graph_chan *grch; 182 169 struct nouveau_gpuobj *grctx; 183 170 int ret, i; 184 171 185 - chan->pgraph_ctx = kzalloc(sizeof(*grch), GFP_KERNEL); 186 - if (!chan->pgraph_ctx) 172 + grch = kzalloc(sizeof(*grch), GFP_KERNEL); 173 + if (!grch) 187 174 return -ENOMEM; 188 - grch = chan->pgraph_ctx; 175 + chan->engctx[NVOBJ_ENGINE_GR] = grch; 189 176 190 177 ret = nouveau_gpuobj_new(dev, NULL, priv->grctx_size, 256, 191 178 NVOBJ_FLAG_VM | NVOBJ_FLAG_ZERO_ALLOC, 192 179 &grch->grctx); 193 180 if (ret) 194 181 goto error; 195 - chan->ramin_grctx = grch->grctx; 196 182 grctx = grch->grctx; 197 183 198 184 ret = nvc0_graph_create_context_mmio_list(chan); ··· 210 200 for (i = 0; i < priv->grctx_size; i += 4) 211 201 nv_wo32(grctx, i, priv->grctx_vals[i / 4]); 212 202 213 - nv_wo32(grctx, 0xf4, 0); 214 - nv_wo32(grctx, 0xf8, 0); 215 - nv_wo32(grctx, 0x10, grch->mmio_nr); 216 - nv_wo32(grctx, 0x14, lower_32_bits(grch->mmio->vinst)); 217 - nv_wo32(grctx, 0x18, upper_32_bits(grch->mmio->vinst)); 218 - nv_wo32(grctx, 0x1c, 1); 219 - nv_wo32(grctx, 0x20, 0); 220 - nv_wo32(grctx, 0x28, 0); 221 - nv_wo32(grctx, 0x2c, 0); 203 + nv_wo32(grctx, 0xf4, 0); 204 + nv_wo32(grctx, 0xf8, 0); 205 + nv_wo32(grctx, 0x10, grch->mmio_nr); 206 + nv_wo32(grctx, 0x14, lower_32_bits(grch->mmio->vinst)); 207 + nv_wo32(grctx, 0x18, upper_32_bits(grch->mmio->vinst)); 208 + nv_wo32(grctx, 0x1c, 1); 209 + nv_wo32(grctx, 0x20, 0); 210 + nv_wo32(grctx, 0x28, 0); 211 + nv_wo32(grctx, 0x2c, 0); 222 212 pinstmem->flush(dev); 223 213 return 0; 224 214 225 215 error: 226 - pgraph->destroy_context(chan); 216 + priv->base.context_del(chan, engine); 227 217 return ret; 228 218 } 229 219 230 - void 231 - nvc0_graph_destroy_context(struct nouveau_channel *chan) 220 + static void 221 + nvc0_graph_context_del(struct nouveau_channel *chan, int engine) 232 222 { 233 - struct nvc0_graph_chan *grch; 234 - 235 - grch = chan->pgraph_ctx; 236 - chan->pgraph_ctx = NULL; 237 - if (!grch) 238 - return; 223 + struct nvc0_graph_chan *grch = chan->engctx[engine]; 239 224 240 225 nouveau_gpuobj_ref(NULL, &grch->mmio); 241 226 nouveau_gpuobj_ref(NULL, &grch->unk418810); 242 227 nouveau_gpuobj_ref(NULL, &grch->unk40800c); 243 228 nouveau_gpuobj_ref(NULL, &grch->unk408004); 244 229 nouveau_gpuobj_ref(NULL, &grch->grctx); 245 - chan->ramin_grctx = NULL; 230 + chan->engctx[engine] = NULL; 246 231 } 247 232 248 - int 249 - nvc0_graph_load_context(struct nouveau_channel *chan) 233 + static int 234 + nvc0_graph_object_new(struct nouveau_channel *chan, int engine, 235 + u32 handle, u16 class) 250 236 { 251 - struct drm_device *dev = chan->dev; 252 - 253 - nv_wr32(dev, 0x409840, 0x00000030); 254 - nv_wr32(dev, 0x409500, 0x80000000 | chan->ramin->vinst >> 12); 255 - nv_wr32(dev, 0x409504, 0x00000003); 256 - if (!nv_wait(dev, 0x409800, 0x00000010, 0x00000010)) 257 - NV_ERROR(dev, "PGRAPH: load_ctx timeout\n"); 258 - 259 237 return 0; 260 238 } 261 239 262 240 static int 263 - nvc0_graph_unload_context_to(struct drm_device *dev, u64 chan) 241 + nvc0_graph_fini(struct drm_device *dev, int engine) 264 242 { 265 - nv_wr32(dev, 0x409840, 0x00000003); 266 - nv_wr32(dev, 0x409500, 0x80000000 | chan >> 12); 267 - nv_wr32(dev, 0x409504, 0x00000009); 268 - if (!nv_wait(dev, 0x409800, 0x00000001, 0x00000000)) { 269 - NV_ERROR(dev, "PGRAPH: unload_ctx timeout\n"); 270 - return -EBUSY; 271 - } 272 - 273 243 return 0; 274 - } 275 - 276 - int 277 - nvc0_graph_unload_context(struct drm_device *dev) 278 - { 279 - u64 inst = (u64)(nv_rd32(dev, 0x409b00) & 0x0fffffff) << 12; 280 - return nvc0_graph_unload_context_to(dev, inst); 281 - } 282 - 283 - static void 284 - nvc0_graph_destroy(struct drm_device *dev) 285 - { 286 - struct drm_nouveau_private *dev_priv = dev->dev_private; 287 - struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; 288 - struct nvc0_graph_priv *priv; 289 - 290 - priv = pgraph->priv; 291 - if (!priv) 292 - return; 293 - 294 - nouveau_irq_unregister(dev, 12); 295 - nouveau_irq_unregister(dev, 25); 296 - 297 - nouveau_gpuobj_ref(NULL, &priv->unk4188b8); 298 - nouveau_gpuobj_ref(NULL, &priv->unk4188b4); 299 - 300 - if (priv->grctx_vals) 301 - kfree(priv->grctx_vals); 302 - kfree(priv); 303 - } 304 - 305 - void 306 - nvc0_graph_takedown(struct drm_device *dev) 307 - { 308 - nvc0_graph_destroy(dev); 309 244 } 310 245 311 246 static int ··· 261 306 return 0; 262 307 } 263 308 264 - static int 265 - nvc0_graph_create(struct drm_device *dev) 266 - { 267 - struct drm_nouveau_private *dev_priv = dev->dev_private; 268 - struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; 269 - struct nvc0_graph_priv *priv; 270 - int ret, gpc, i; 271 - 272 - priv = kzalloc(sizeof(*priv), GFP_KERNEL); 273 - if (!priv) 274 - return -ENOMEM; 275 - pgraph->priv = priv; 276 - 277 - ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 256, 0, &priv->unk4188b4); 278 - if (ret) 279 - goto error; 280 - 281 - ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 256, 0, &priv->unk4188b8); 282 - if (ret) 283 - goto error; 284 - 285 - for (i = 0; i < 0x1000; i += 4) { 286 - nv_wo32(priv->unk4188b4, i, 0x00000010); 287 - nv_wo32(priv->unk4188b8, i, 0x00000010); 288 - } 289 - 290 - priv->gpc_nr = nv_rd32(dev, 0x409604) & 0x0000001f; 291 - priv->rop_nr = (nv_rd32(dev, 0x409604) & 0x001f0000) >> 16; 292 - for (gpc = 0; gpc < priv->gpc_nr; gpc++) { 293 - priv->tp_nr[gpc] = nv_rd32(dev, GPC_UNIT(gpc, 0x2608)); 294 - priv->tp_total += priv->tp_nr[gpc]; 295 - } 296 - 297 - /*XXX: these need figuring out... */ 298 - switch (dev_priv->chipset) { 299 - case 0xc0: 300 - if (priv->tp_total == 11) { /* 465, 3/4/4/0, 4 */ 301 - priv->magic_not_rop_nr = 0x07; 302 - /* filled values up to tp_total, the rest 0 */ 303 - priv->magicgpc980[0] = 0x22111000; 304 - priv->magicgpc980[1] = 0x00000233; 305 - priv->magicgpc980[2] = 0x00000000; 306 - priv->magicgpc980[3] = 0x00000000; 307 - priv->magicgpc918 = 0x000ba2e9; 308 - } else 309 - if (priv->tp_total == 14) { /* 470, 3/3/4/4, 5 */ 310 - priv->magic_not_rop_nr = 0x05; 311 - priv->magicgpc980[0] = 0x11110000; 312 - priv->magicgpc980[1] = 0x00233222; 313 - priv->magicgpc980[2] = 0x00000000; 314 - priv->magicgpc980[3] = 0x00000000; 315 - priv->magicgpc918 = 0x00092493; 316 - } else 317 - if (priv->tp_total == 15) { /* 480, 3/4/4/4, 6 */ 318 - priv->magic_not_rop_nr = 0x06; 319 - priv->magicgpc980[0] = 0x11110000; 320 - priv->magicgpc980[1] = 0x03332222; 321 - priv->magicgpc980[2] = 0x00000000; 322 - priv->magicgpc980[3] = 0x00000000; 323 - priv->magicgpc918 = 0x00088889; 324 - } 325 - break; 326 - case 0xc3: /* 450, 4/0/0/0, 2 */ 327 - priv->magic_not_rop_nr = 0x03; 328 - priv->magicgpc980[0] = 0x00003210; 329 - priv->magicgpc980[1] = 0x00000000; 330 - priv->magicgpc980[2] = 0x00000000; 331 - priv->magicgpc980[3] = 0x00000000; 332 - priv->magicgpc918 = 0x00200000; 333 - break; 334 - case 0xc4: /* 460, 3/4/0/0, 4 */ 335 - priv->magic_not_rop_nr = 0x01; 336 - priv->magicgpc980[0] = 0x02321100; 337 - priv->magicgpc980[1] = 0x00000000; 338 - priv->magicgpc980[2] = 0x00000000; 339 - priv->magicgpc980[3] = 0x00000000; 340 - priv->magicgpc918 = 0x00124925; 341 - break; 342 - } 343 - 344 - if (!priv->magic_not_rop_nr) { 345 - NV_ERROR(dev, "PGRAPH: unknown config: %d/%d/%d/%d, %d\n", 346 - priv->tp_nr[0], priv->tp_nr[1], priv->tp_nr[2], 347 - priv->tp_nr[3], priv->rop_nr); 348 - /* use 0xc3's values... */ 349 - priv->magic_not_rop_nr = 0x03; 350 - priv->magicgpc980[0] = 0x00003210; 351 - priv->magicgpc980[1] = 0x00000000; 352 - priv->magicgpc980[2] = 0x00000000; 353 - priv->magicgpc980[3] = 0x00000000; 354 - priv->magicgpc918 = 0x00200000; 355 - } 356 - 357 - nouveau_irq_register(dev, 12, nvc0_graph_isr); 358 - nouveau_irq_register(dev, 25, nvc0_runk140_isr); 359 - NVOBJ_CLASS(dev, 0x902d, GR); /* 2D */ 360 - NVOBJ_CLASS(dev, 0x9039, GR); /* M2MF */ 361 - NVOBJ_MTHD (dev, 0x9039, 0x0500, nvc0_graph_mthd_page_flip); 362 - NVOBJ_CLASS(dev, 0x9097, GR); /* 3D */ 363 - NVOBJ_CLASS(dev, 0x90c0, GR); /* COMPUTE */ 364 - return 0; 365 - 366 - error: 367 - nvc0_graph_destroy(dev); 368 - return ret; 369 - } 370 - 371 309 static void 372 310 nvc0_graph_init_obj418880(struct drm_device *dev) 373 311 { 374 - struct drm_nouveau_private *dev_priv = dev->dev_private; 375 - struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; 376 - struct nvc0_graph_priv *priv = pgraph->priv; 312 + struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR); 377 313 int i; 378 314 379 315 nv_wr32(dev, GPC_BCAST(0x0880), 0x00000000); ··· 295 449 static void 296 450 nvc0_graph_init_gpc_0(struct drm_device *dev) 297 451 { 298 - struct drm_nouveau_private *dev_priv = dev->dev_private; 299 - struct nvc0_graph_priv *priv = dev_priv->engine.graph.priv; 300 - int gpc; 301 - 302 - // TP ROP UNKVAL(magic_not_rop_nr) 303 - // 450: 4/0/0/0 2 3 304 - // 460: 3/4/0/0 4 1 305 - // 465: 3/4/4/0 4 7 306 - // 470: 3/3/4/4 5 5 307 - // 480: 3/4/4/4 6 6 452 + struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR); 453 + u32 data[TP_MAX / 8]; 454 + u8 tpnr[GPC_MAX]; 455 + int i, gpc, tpc; 308 456 309 - // magicgpc918 310 - // 450: 00200000 00000000001000000000000000000000 311 - // 460: 00124925 00000000000100100100100100100101 312 - // 465: 000ba2e9 00000000000010111010001011101001 313 - // 470: 00092493 00000000000010010010010010010011 314 - // 480: 00088889 00000000000010001000100010001001 457 + /* 458 + * TP ROP UNKVAL(magic_not_rop_nr) 459 + * 450: 4/0/0/0 2 3 460 + * 460: 3/4/0/0 4 1 461 + * 465: 3/4/4/0 4 7 462 + * 470: 3/3/4/4 5 5 463 + * 480: 3/4/4/4 6 6 464 + * 465 + * magicgpc918 466 + * 450: 00200000 00000000001000000000000000000000 467 + * 460: 00124925 00000000000100100100100100100101 468 + * 465: 000ba2e9 00000000000010111010001011101001 469 + * 470: 00092493 00000000000010010010010010010011 470 + * 480: 00088889 00000000000010001000100010001001 471 + */ 315 472 316 - /* filled values up to tp_total, remainder 0 */ 317 - // 450: 00003210 00000000 00000000 00000000 318 - // 460: 02321100 00000000 00000000 00000000 319 - // 465: 22111000 00000233 00000000 00000000 320 - // 470: 11110000 00233222 00000000 00000000 321 - // 480: 11110000 03332222 00000000 00000000 322 - 323 - nv_wr32(dev, GPC_BCAST(0x0980), priv->magicgpc980[0]); 324 - nv_wr32(dev, GPC_BCAST(0x0984), priv->magicgpc980[1]); 325 - nv_wr32(dev, GPC_BCAST(0x0988), priv->magicgpc980[2]); 326 - nv_wr32(dev, GPC_BCAST(0x098c), priv->magicgpc980[3]); 473 + memset(data, 0x00, sizeof(data)); 474 + memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr)); 475 + for (i = 0, gpc = -1; i < priv->tp_total; i++) { 476 + do { 477 + gpc = (gpc + 1) % priv->gpc_nr; 478 + } while (!tpnr[gpc]); 479 + tpc = priv->tp_nr[gpc] - tpnr[gpc]--; 480 + 481 + data[i / 8] |= tpc << ((i % 8) * 4); 482 + } 483 + 484 + nv_wr32(dev, GPC_BCAST(0x0980), data[0]); 485 + nv_wr32(dev, GPC_BCAST(0x0984), data[1]); 486 + nv_wr32(dev, GPC_BCAST(0x0988), data[2]); 487 + nv_wr32(dev, GPC_BCAST(0x098c), data[3]); 327 488 328 489 for (gpc = 0; gpc < priv->gpc_nr; gpc++) { 329 490 nv_wr32(dev, GPC_UNIT(gpc, 0x0914), priv->magic_not_rop_nr << 8 | ··· 362 509 static void 363 510 nvc0_graph_init_gpc_1(struct drm_device *dev) 364 511 { 365 - struct drm_nouveau_private *dev_priv = dev->dev_private; 366 - struct nvc0_graph_priv *priv = dev_priv->engine.graph.priv; 512 + struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR); 367 513 int gpc, tp; 368 514 369 515 for (gpc = 0; gpc < priv->gpc_nr; gpc++) { ··· 387 535 static void 388 536 nvc0_graph_init_rop(struct drm_device *dev) 389 537 { 390 - struct drm_nouveau_private *dev_priv = dev->dev_private; 391 - struct nvc0_graph_priv *priv = dev_priv->engine.graph.priv; 538 + struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR); 392 539 int rop; 393 540 394 541 for (rop = 0; rop < priv->rop_nr; rop++) { ··· 398 547 } 399 548 } 400 549 401 - static int 402 - nvc0_fuc_load_fw(struct drm_device *dev, u32 fuc_base, 403 - const char *code_fw, const char *data_fw) 550 + static void 551 + nvc0_graph_init_fuc(struct drm_device *dev, u32 fuc_base, 552 + struct nvc0_graph_fuc *code, struct nvc0_graph_fuc *data) 404 553 { 405 - const struct firmware *fw; 406 - char name[32]; 407 - int ret, i; 408 - 409 - snprintf(name, sizeof(name), "nouveau/%s", data_fw); 410 - ret = request_firmware(&fw, name, &dev->pdev->dev); 411 - if (ret) { 412 - NV_ERROR(dev, "failed to load %s\n", data_fw); 413 - return ret; 414 - } 554 + int i; 415 555 416 556 nv_wr32(dev, fuc_base + 0x01c0, 0x01000000); 417 - for (i = 0; i < fw->size / 4; i++) 418 - nv_wr32(dev, fuc_base + 0x01c4, ((u32 *)fw->data)[i]); 419 - release_firmware(fw); 420 - 421 - snprintf(name, sizeof(name), "nouveau/%s", code_fw); 422 - ret = request_firmware(&fw, name, &dev->pdev->dev); 423 - if (ret) { 424 - NV_ERROR(dev, "failed to load %s\n", code_fw); 425 - return ret; 426 - } 557 + for (i = 0; i < data->size / 4; i++) 558 + nv_wr32(dev, fuc_base + 0x01c4, data->data[i]); 427 559 428 560 nv_wr32(dev, fuc_base + 0x0180, 0x01000000); 429 - for (i = 0; i < fw->size / 4; i++) { 561 + for (i = 0; i < code->size / 4; i++) { 430 562 if ((i & 0x3f) == 0) 431 563 nv_wr32(dev, fuc_base + 0x0188, i >> 6); 432 - nv_wr32(dev, fuc_base + 0x0184, ((u32 *)fw->data)[i]); 564 + nv_wr32(dev, fuc_base + 0x0184, code->data[i]); 433 565 } 434 - release_firmware(fw); 435 - 436 - return 0; 437 566 } 438 567 439 568 static int 440 569 nvc0_graph_init_ctxctl(struct drm_device *dev) 441 570 { 442 - struct drm_nouveau_private *dev_priv = dev->dev_private; 443 - struct nvc0_graph_priv *priv = dev_priv->engine.graph.priv; 571 + struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR); 444 572 u32 r000260; 445 - int ret; 446 573 447 574 /* load fuc microcode */ 448 575 r000260 = nv_mask(dev, 0x000260, 0x00000001, 0x00000000); 449 - ret = nvc0_fuc_load_fw(dev, 0x409000, "fuc409c", "fuc409d"); 450 - if (ret == 0) 451 - ret = nvc0_fuc_load_fw(dev, 0x41a000, "fuc41ac", "fuc41ad"); 576 + nvc0_graph_init_fuc(dev, 0x409000, &priv->fuc409c, &priv->fuc409d); 577 + nvc0_graph_init_fuc(dev, 0x41a000, &priv->fuc41ac, &priv->fuc41ad); 452 578 nv_wr32(dev, 0x000260, r000260); 453 - 454 - if (ret) 455 - return ret; 456 579 457 580 /* start both of them running */ 458 581 nv_wr32(dev, 0x409840, 0xffffffff); ··· 469 644 return 0; 470 645 } 471 646 472 - int 473 - nvc0_graph_init(struct drm_device *dev) 647 + static int 648 + nvc0_graph_init(struct drm_device *dev, int engine) 474 649 { 475 - struct drm_nouveau_private *dev_priv = dev->dev_private; 476 - struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; 477 650 int ret; 478 - 479 - dev_priv->engine.graph.accel_blocked = true; 480 - 481 - switch (dev_priv->chipset) { 482 - case 0xc0: 483 - case 0xc3: 484 - case 0xc4: 485 - break; 486 - default: 487 - NV_ERROR(dev, "PGRAPH: unsupported chipset, please report!\n"); 488 - if (nouveau_noaccel != 0) 489 - return 0; 490 - break; 491 - } 492 651 493 652 nv_mask(dev, 0x000200, 0x18001000, 0x00000000); 494 653 nv_mask(dev, 0x000200, 0x18001000, 0x18001000); 495 654 496 - if (!pgraph->priv) { 497 - ret = nvc0_graph_create(dev); 498 - if (ret) 499 - return ret; 500 - } 501 - 502 655 nvc0_graph_init_obj418880(dev); 503 656 nvc0_graph_init_regs(dev); 504 - //nvc0_graph_init_unitplemented_magics(dev); 657 + /*nvc0_graph_init_unitplemented_magics(dev);*/ 505 658 nvc0_graph_init_gpc_0(dev); 506 - //nvc0_graph_init_unitplemented_c242(dev); 659 + /*nvc0_graph_init_unitplemented_c242(dev);*/ 507 660 508 661 nv_wr32(dev, 0x400500, 0x00010001); 509 662 nv_wr32(dev, 0x400100, 0xffffffff); ··· 500 697 nv_wr32(dev, 0x400054, 0x34ce3464); 501 698 502 699 ret = nvc0_graph_init_ctxctl(dev); 503 - if (ret == 0) 504 - dev_priv->engine.graph.accel_blocked = false; 700 + if (ret) 701 + return ret; 702 + 505 703 return 0; 506 704 } 507 705 508 - static int 706 + int 509 707 nvc0_graph_isr_chid(struct drm_device *dev, u64 inst) 510 708 { 511 709 struct drm_nouveau_private *dev_priv = dev->dev_private; ··· 610 806 units &= ~(1 << unit); 611 807 } 612 808 } 809 + 810 + static int 811 + nvc0_graph_create_fw(struct drm_device *dev, const char *fwname, 812 + struct nvc0_graph_fuc *fuc) 813 + { 814 + struct drm_nouveau_private *dev_priv = dev->dev_private; 815 + const struct firmware *fw; 816 + char f[32]; 817 + int ret; 818 + 819 + snprintf(f, sizeof(f), "nouveau/nv%02x_%s", dev_priv->chipset, fwname); 820 + ret = request_firmware(&fw, f, &dev->pdev->dev); 821 + if (ret) { 822 + snprintf(f, sizeof(f), "nouveau/%s", fwname); 823 + ret = request_firmware(&fw, f, &dev->pdev->dev); 824 + if (ret) { 825 + NV_ERROR(dev, "failed to load %s\n", fwname); 826 + return ret; 827 + } 828 + } 829 + 830 + fuc->size = fw->size; 831 + fuc->data = kmemdup(fw->data, fuc->size, GFP_KERNEL); 832 + release_firmware(fw); 833 + return (fuc->data != NULL) ? 0 : -ENOMEM; 834 + } 835 + 836 + static void 837 + nvc0_graph_destroy_fw(struct nvc0_graph_fuc *fuc) 838 + { 839 + if (fuc->data) { 840 + kfree(fuc->data); 841 + fuc->data = NULL; 842 + } 843 + } 844 + 845 + static void 846 + nvc0_graph_destroy(struct drm_device *dev, int engine) 847 + { 848 + struct nvc0_graph_priv *priv = nv_engine(dev, engine); 849 + 850 + nvc0_graph_destroy_fw(&priv->fuc409c); 851 + nvc0_graph_destroy_fw(&priv->fuc409d); 852 + nvc0_graph_destroy_fw(&priv->fuc41ac); 853 + nvc0_graph_destroy_fw(&priv->fuc41ad); 854 + 855 + nouveau_irq_unregister(dev, 12); 856 + nouveau_irq_unregister(dev, 25); 857 + 858 + nouveau_gpuobj_ref(NULL, &priv->unk4188b8); 859 + nouveau_gpuobj_ref(NULL, &priv->unk4188b4); 860 + 861 + if (priv->grctx_vals) 862 + kfree(priv->grctx_vals); 863 + 864 + NVOBJ_ENGINE_DEL(dev, GR); 865 + kfree(priv); 866 + } 867 + 868 + int 869 + nvc0_graph_create(struct drm_device *dev) 870 + { 871 + struct drm_nouveau_private *dev_priv = dev->dev_private; 872 + struct nvc0_graph_priv *priv; 873 + int ret, gpc, i; 874 + 875 + switch (dev_priv->chipset) { 876 + case 0xc0: 877 + case 0xc3: 878 + case 0xc4: 879 + break; 880 + default: 881 + NV_ERROR(dev, "PGRAPH: unsupported chipset, please report!\n"); 882 + return 0; 883 + } 884 + 885 + priv = kzalloc(sizeof(*priv), GFP_KERNEL); 886 + if (!priv) 887 + return -ENOMEM; 888 + 889 + priv->base.destroy = nvc0_graph_destroy; 890 + priv->base.init = nvc0_graph_init; 891 + priv->base.fini = nvc0_graph_fini; 892 + priv->base.context_new = nvc0_graph_context_new; 893 + priv->base.context_del = nvc0_graph_context_del; 894 + priv->base.object_new = nvc0_graph_object_new; 895 + 896 + NVOBJ_ENGINE_ADD(dev, GR, &priv->base); 897 + nouveau_irq_register(dev, 12, nvc0_graph_isr); 898 + nouveau_irq_register(dev, 25, nvc0_runk140_isr); 899 + 900 + if (nvc0_graph_create_fw(dev, "fuc409c", &priv->fuc409c) || 901 + nvc0_graph_create_fw(dev, "fuc409d", &priv->fuc409d) || 902 + nvc0_graph_create_fw(dev, "fuc41ac", &priv->fuc41ac) || 903 + nvc0_graph_create_fw(dev, "fuc41ad", &priv->fuc41ad)) { 904 + ret = 0; 905 + goto error; 906 + } 907 + 908 + 909 + ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 256, 0, &priv->unk4188b4); 910 + if (ret) 911 + goto error; 912 + 913 + ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 256, 0, &priv->unk4188b8); 914 + if (ret) 915 + goto error; 916 + 917 + for (i = 0; i < 0x1000; i += 4) { 918 + nv_wo32(priv->unk4188b4, i, 0x00000010); 919 + nv_wo32(priv->unk4188b8, i, 0x00000010); 920 + } 921 + 922 + priv->gpc_nr = nv_rd32(dev, 0x409604) & 0x0000001f; 923 + priv->rop_nr = (nv_rd32(dev, 0x409604) & 0x001f0000) >> 16; 924 + for (gpc = 0; gpc < priv->gpc_nr; gpc++) { 925 + priv->tp_nr[gpc] = nv_rd32(dev, GPC_UNIT(gpc, 0x2608)); 926 + priv->tp_total += priv->tp_nr[gpc]; 927 + } 928 + 929 + /*XXX: these need figuring out... */ 930 + switch (dev_priv->chipset) { 931 + case 0xc0: 932 + if (priv->tp_total == 11) { /* 465, 3/4/4/0, 4 */ 933 + priv->magic_not_rop_nr = 0x07; 934 + /* filled values up to tp_total, the rest 0 */ 935 + priv->magicgpc918 = 0x000ba2e9; 936 + } else 937 + if (priv->tp_total == 14) { /* 470, 3/3/4/4, 5 */ 938 + priv->magic_not_rop_nr = 0x05; 939 + priv->magicgpc918 = 0x00092493; 940 + } else 941 + if (priv->tp_total == 15) { /* 480, 3/4/4/4, 6 */ 942 + priv->magic_not_rop_nr = 0x06; 943 + priv->magicgpc918 = 0x00088889; 944 + } 945 + break; 946 + case 0xc3: /* 450, 4/0/0/0, 2 */ 947 + priv->magic_not_rop_nr = 0x03; 948 + priv->magicgpc918 = 0x00200000; 949 + break; 950 + case 0xc4: /* 460, 3/4/0/0, 4 */ 951 + priv->magic_not_rop_nr = 0x01; 952 + priv->magicgpc918 = 0x00124925; 953 + break; 954 + } 955 + 956 + if (!priv->magic_not_rop_nr) { 957 + NV_ERROR(dev, "PGRAPH: unknown config: %d/%d/%d/%d, %d\n", 958 + priv->tp_nr[0], priv->tp_nr[1], priv->tp_nr[2], 959 + priv->tp_nr[3], priv->rop_nr); 960 + /* use 0xc3's values... */ 961 + priv->magic_not_rop_nr = 0x03; 962 + priv->magicgpc918 = 0x00200000; 963 + } 964 + 965 + NVOBJ_CLASS(dev, 0x902d, GR); /* 2D */ 966 + NVOBJ_CLASS(dev, 0x9039, GR); /* M2MF */ 967 + NVOBJ_MTHD (dev, 0x9039, 0x0500, nvc0_graph_mthd_page_flip); 968 + NVOBJ_CLASS(dev, 0x9097, GR); /* 3D */ 969 + NVOBJ_CLASS(dev, 0x90c0, GR); /* COMPUTE */ 970 + return 0; 971 + 972 + error: 973 + nvc0_graph_destroy(dev, NVOBJ_ENGINE_GR); 974 + return ret; 975 + } 976 + 977 + MODULE_FIRMWARE("nouveau/nvc0_fuc409c"); 978 + MODULE_FIRMWARE("nouveau/nvc0_fuc409d"); 979 + MODULE_FIRMWARE("nouveau/nvc0_fuc41ac"); 980 + MODULE_FIRMWARE("nouveau/nvc0_fuc41ad"); 981 + MODULE_FIRMWARE("nouveau/nvc3_fuc409c"); 982 + MODULE_FIRMWARE("nouveau/nvc3_fuc409d"); 983 + MODULE_FIRMWARE("nouveau/nvc3_fuc41ac"); 984 + MODULE_FIRMWARE("nouveau/nvc3_fuc41ad"); 985 + MODULE_FIRMWARE("nouveau/nvc4_fuc409c"); 986 + MODULE_FIRMWARE("nouveau/nvc4_fuc409d"); 987 + MODULE_FIRMWARE("nouveau/nvc4_fuc41ac"); 988 + MODULE_FIRMWARE("nouveau/nvc4_fuc41ad"); 989 + MODULE_FIRMWARE("nouveau/fuc409c"); 990 + MODULE_FIRMWARE("nouveau/fuc409d"); 991 + MODULE_FIRMWARE("nouveau/fuc41ac"); 992 + MODULE_FIRMWARE("nouveau/fuc41ad");
+20 -9
drivers/gpu/drm/nouveau/nvc0_graph.h
··· 28 28 #define GPC_MAX 4 29 29 #define TP_MAX 32 30 30 31 - #define ROP_BCAST(r) (0x408800 + (r)) 32 - #define ROP_UNIT(u,r) (0x410000 + (u) * 0x400 + (r)) 33 - #define GPC_BCAST(r) (0x418000 + (r)) 34 - #define GPC_UNIT(t,r) (0x500000 + (t) * 0x8000 + (r)) 35 - #define TP_UNIT(t,m,r) (0x504000 + (t) * 0x8000 + (m) * 0x800 + (r)) 31 + #define ROP_BCAST(r) (0x408800 + (r)) 32 + #define ROP_UNIT(u, r) (0x410000 + (u) * 0x400 + (r)) 33 + #define GPC_BCAST(r) (0x418000 + (r)) 34 + #define GPC_UNIT(t, r) (0x500000 + (t) * 0x8000 + (r)) 35 + #define TP_UNIT(t, m, r) (0x504000 + (t) * 0x8000 + (m) * 0x800 + (r)) 36 + 37 + struct nvc0_graph_fuc { 38 + u32 *data; 39 + u32 size; 40 + }; 36 41 37 42 struct nvc0_graph_priv { 43 + struct nouveau_exec_engine base; 44 + 45 + struct nvc0_graph_fuc fuc409c; 46 + struct nvc0_graph_fuc fuc409d; 47 + struct nvc0_graph_fuc fuc41ac; 48 + struct nvc0_graph_fuc fuc41ad; 49 + 38 50 u8 gpc_nr; 39 51 u8 rop_nr; 40 52 u8 tp_nr[GPC_MAX]; ··· 58 46 struct nouveau_gpuobj *unk4188b8; 59 47 60 48 u8 magic_not_rop_nr; 61 - u32 magicgpc980[4]; 62 49 u32 magicgpc918; 63 50 }; 64 51 65 52 struct nvc0_graph_chan { 66 53 struct nouveau_gpuobj *grctx; 67 - struct nouveau_gpuobj *unk408004; // 0x418810 too 68 - struct nouveau_gpuobj *unk40800c; // 0x419004 too 69 - struct nouveau_gpuobj *unk418810; // 0x419848 too 54 + struct nouveau_gpuobj *unk408004; /* 0x418810 too */ 55 + struct nouveau_gpuobj *unk40800c; /* 0x419004 too */ 56 + struct nouveau_gpuobj *unk418810; /* 0x419848 too */ 70 57 struct nouveau_gpuobj *mmio; 71 58 int mmio_nr; 72 59 };
+10 -10
drivers/gpu/drm/nouveau/nvc0_grctx.c
··· 1623 1623 { 1624 1624 struct drm_nouveau_private *dev_priv = dev->dev_private; 1625 1625 1626 - // ROPC_BROADCAST 1626 + /* ROPC_BROADCAST */ 1627 1627 nv_wr32(dev, 0x408800, 0x02802a3c); 1628 1628 nv_wr32(dev, 0x408804, 0x00000040); 1629 1629 nv_wr32(dev, 0x408808, 0x0003e00d); ··· 1647 1647 { 1648 1648 int i; 1649 1649 1650 - // GPC_BROADCAST 1650 + /* GPC_BROADCAST */ 1651 1651 nv_wr32(dev, 0x418380, 0x00000016); 1652 1652 nv_wr32(dev, 0x418400, 0x38004e00); 1653 1653 nv_wr32(dev, 0x418404, 0x71e0ffff); ··· 1728 1728 { 1729 1729 struct drm_nouveau_private *dev_priv = dev->dev_private; 1730 1730 1731 - // GPC_BROADCAST.TP_BROADCAST 1731 + /* GPC_BROADCAST.TP_BROADCAST */ 1732 1732 nv_wr32(dev, 0x419848, 0x00000000); 1733 1733 nv_wr32(dev, 0x419864, 0x0000012a); 1734 1734 nv_wr32(dev, 0x419888, 0x00000000); ··· 1741 1741 nv_wr32(dev, 0x419a1c, 0x00000000); 1742 1742 nv_wr32(dev, 0x419a20, 0x00000800); 1743 1743 if (dev_priv->chipset != 0xc0) 1744 - nv_wr32(dev, 0x00419ac4, 0x0007f440); // 0xc3 1744 + nv_wr32(dev, 0x00419ac4, 0x0007f440); /* 0xc3 */ 1745 1745 nv_wr32(dev, 0x419b00, 0x0a418820); 1746 1746 nv_wr32(dev, 0x419b04, 0x062080e6); 1747 1747 nv_wr32(dev, 0x419b08, 0x020398a4); ··· 1797 1797 nvc0_grctx_generate(struct nouveau_channel *chan) 1798 1798 { 1799 1799 struct drm_nouveau_private *dev_priv = chan->dev->dev_private; 1800 - struct nvc0_graph_priv *priv = dev_priv->engine.graph.priv; 1801 - struct nvc0_graph_chan *grch = chan->pgraph_ctx; 1800 + struct nvc0_graph_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_GR); 1801 + struct nvc0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR]; 1802 1802 struct drm_device *dev = chan->dev; 1803 1803 int i, gpc, tp, id; 1804 1804 u32 r000260, tmp; ··· 1912 1912 for (i = 1; i < 7; i++) 1913 1913 data2[1] |= ((1 << (i + 5)) % ntpcv) << ((i - 1) * 5); 1914 1914 1915 - // GPC_BROADCAST 1915 + /* GPC_BROADCAST */ 1916 1916 nv_wr32(dev, 0x418bb8, (priv->tp_total << 8) | 1917 1917 priv->magic_not_rop_nr); 1918 1918 for (i = 0; i < 6; i++) 1919 1919 nv_wr32(dev, 0x418b08 + (i * 4), data[i]); 1920 1920 1921 - // GPC_BROADCAST.TP_BROADCAST 1921 + /* GPC_BROADCAST.TP_BROADCAST */ 1922 1922 nv_wr32(dev, 0x419bd0, (priv->tp_total << 8) | 1923 1923 priv->magic_not_rop_nr | 1924 1924 data2[0]); ··· 1926 1926 for (i = 0; i < 6; i++) 1927 1927 nv_wr32(dev, 0x419b00 + (i * 4), data[i]); 1928 1928 1929 - // UNK78xx 1929 + /* UNK78xx */ 1930 1930 nv_wr32(dev, 0x4078bc, (priv->tp_total << 8) | 1931 1931 priv->magic_not_rop_nr); 1932 1932 for (i = 0; i < 6; i++) ··· 1944 1944 gpc = -1; 1945 1945 for (i = 0, gpc = -1; i < 32; i++) { 1946 1946 int ltp = i * (priv->tp_total - 1) / 32; 1947 - 1947 + 1948 1948 do { 1949 1949 gpc = (gpc + 1) % priv->gpc_nr; 1950 1950 } while (!tpnr[gpc]);
+2 -2
drivers/gpu/drm/radeon/atom.c
··· 652 652 653 653 static void atom_op_delay(atom_exec_context *ctx, int *ptr, int arg) 654 654 { 655 - uint8_t count = U8((*ptr)++); 655 + unsigned count = U8((*ptr)++); 656 656 SDEBUG(" count: %d\n", count); 657 657 if (arg == ATOM_UNIT_MICROSEC) 658 658 udelay(count); 659 659 else 660 - schedule_timeout_uninterruptible(msecs_to_jiffies(count)); 660 + msleep(count); 661 661 } 662 662 663 663 static void atom_op_div(atom_exec_context *ctx, int *ptr, int arg)
+20 -2
drivers/gpu/drm/radeon/atombios.h
··· 726 726 #define ATOM_ENCODER_CMD_DP_VIDEO_ON 0x0d 727 727 #define ATOM_ENCODER_CMD_QUERY_DP_LINK_TRAINING_STATUS 0x0e 728 728 #define ATOM_ENCODER_CMD_SETUP 0x0f 729 + #define ATOM_ENCODER_CMD_SETUP_PANEL_MODE 0x10 729 730 730 731 // ucStatus 731 732 #define ATOM_ENCODER_STATUS_LINK_TRAINING_COMPLETE 0x10 ··· 766 765 USHORT usPixelClock; // in 10KHz; for bios convenient 767 766 ATOM_DIG_ENCODER_CONFIG_V3 acConfig; 768 767 UCHAR ucAction; 769 - UCHAR ucEncoderMode; 768 + union { 769 + UCHAR ucEncoderMode; 770 770 // =0: DP encoder 771 771 // =1: LVDS encoder 772 772 // =2: DVI encoder 773 773 // =3: HDMI encoder 774 774 // =4: SDVO encoder 775 775 // =5: DP audio 776 + UCHAR ucPanelMode; // only valid when ucAction == ATOM_ENCODER_CMD_SETUP_PANEL_MODE 777 + // =0: external DP 778 + // =1: internal DP2 779 + // =0x11: internal DP1 for NutMeg/Travis DP translator 780 + }; 776 781 UCHAR ucLaneNum; // how many lanes to enable 777 782 UCHAR ucBitPerColor; // only valid for DP mode when ucAction = ATOM_ENCODER_CMD_SETUP 778 783 UCHAR ucReserved; ··· 823 816 UCHAR ucConfig; 824 817 }; 825 818 UCHAR ucAction; 826 - UCHAR ucEncoderMode; 819 + union { 820 + UCHAR ucEncoderMode; 827 821 // =0: DP encoder 828 822 // =1: LVDS encoder 829 823 // =2: DVI encoder 830 824 // =3: HDMI encoder 831 825 // =4: SDVO encoder 832 826 // =5: DP audio 827 + UCHAR ucPanelMode; // only valid when ucAction == ATOM_ENCODER_CMD_SETUP_PANEL_MODE 828 + // =0: external DP 829 + // =1: internal DP2 830 + // =0x11: internal DP1 for NutMeg/Travis DP translator 831 + }; 833 832 UCHAR ucLaneNum; // how many lanes to enable 834 833 UCHAR ucBitPerColor; // only valid for DP mode when ucAction = ATOM_ENCODER_CMD_SETUP 835 834 UCHAR ucHPD_ID; // HPD ID (1-6). =0 means to skip HDP programming. New comparing to previous version ··· 848 835 #define PANEL_10BIT_PER_COLOR 0x03 849 836 #define PANEL_12BIT_PER_COLOR 0x04 850 837 #define PANEL_16BIT_PER_COLOR 0x05 838 + 839 + //define ucPanelMode 840 + #define DP_PANEL_MODE_EXTERNAL_DP_MODE 0x00 841 + #define DP_PANEL_MODE_INTERNAL_DP2_MODE 0x01 842 + #define DP_PANEL_MODE_INTERNAL_DP1_MODE 0x11 851 843 852 844 /****************************************************************************/ 853 845 // Structures used by UNIPHYTransmitterControlTable
+88 -44
drivers/gpu/drm/radeon/atombios_crtc.c
··· 420 420 421 421 if (ASIC_IS_DCE5(rdev)) { 422 422 args.v3.usSpreadSpectrumAmountFrac = cpu_to_le16(0); 423 - args.v3.ucSpreadSpectrumType = ss->type; 423 + args.v3.ucSpreadSpectrumType = ss->type & ATOM_SS_CENTRE_SPREAD_MODE_MASK; 424 424 switch (pll_id) { 425 425 case ATOM_PPLL1: 426 426 args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P1PLL; ··· 440 440 case ATOM_PPLL_INVALID: 441 441 return; 442 442 } 443 - args.v2.ucEnable = enable; 443 + args.v3.ucEnable = enable; 444 + if ((ss->percentage == 0) || (ss->type & ATOM_EXTERNAL_SS_MASK)) 445 + args.v3.ucEnable = ATOM_DISABLE; 444 446 } else if (ASIC_IS_DCE4(rdev)) { 445 447 args.v2.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage); 446 - args.v2.ucSpreadSpectrumType = ss->type; 448 + args.v2.ucSpreadSpectrumType = ss->type & ATOM_SS_CENTRE_SPREAD_MODE_MASK; 447 449 switch (pll_id) { 448 450 case ATOM_PPLL1: 449 451 args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_P1PLL; ··· 466 464 return; 467 465 } 468 466 args.v2.ucEnable = enable; 467 + if ((ss->percentage == 0) || (ss->type & ATOM_EXTERNAL_SS_MASK)) 468 + args.v2.ucEnable = ATOM_DISABLE; 469 469 } else if (ASIC_IS_DCE3(rdev)) { 470 470 args.v1.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage); 471 - args.v1.ucSpreadSpectrumType = ss->type; 471 + args.v1.ucSpreadSpectrumType = ss->type & ATOM_SS_CENTRE_SPREAD_MODE_MASK; 472 472 args.v1.ucSpreadSpectrumStep = ss->step; 473 473 args.v1.ucSpreadSpectrumDelay = ss->delay; 474 474 args.v1.ucSpreadSpectrumRange = ss->range; 475 475 args.v1.ucPpll = pll_id; 476 476 args.v1.ucEnable = enable; 477 477 } else if (ASIC_IS_AVIVO(rdev)) { 478 - if (enable == ATOM_DISABLE) { 478 + if ((enable == ATOM_DISABLE) || (ss->percentage == 0) || 479 + (ss->type & ATOM_EXTERNAL_SS_MASK)) { 479 480 atombios_disable_ss(crtc); 480 481 return; 481 482 } 482 483 args.lvds_ss_2.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage); 483 - args.lvds_ss_2.ucSpreadSpectrumType = ss->type; 484 + args.lvds_ss_2.ucSpreadSpectrumType = ss->type & ATOM_SS_CENTRE_SPREAD_MODE_MASK; 484 485 args.lvds_ss_2.ucSpreadSpectrumStep = ss->step; 485 486 args.lvds_ss_2.ucSpreadSpectrumDelay = ss->delay; 486 487 args.lvds_ss_2.ucSpreadSpectrumRange = ss->range; 487 488 args.lvds_ss_2.ucEnable = enable; 488 489 } else { 489 - if (enable == ATOM_DISABLE) { 490 + if ((enable == ATOM_DISABLE) || (ss->percentage == 0) || 491 + (ss->type & ATOM_EXTERNAL_SS_MASK)) { 490 492 atombios_disable_ss(crtc); 491 493 return; 492 494 } 493 495 args.lvds_ss.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage); 494 - args.lvds_ss.ucSpreadSpectrumType = ss->type; 496 + args.lvds_ss.ucSpreadSpectrumType = ss->type & ATOM_SS_CENTRE_SPREAD_MODE_MASK; 495 497 args.lvds_ss.ucSpreadSpectrumStepSize_Delay = (ss->step & 3) << 2; 496 498 args.lvds_ss.ucSpreadSpectrumStepSize_Delay |= (ss->delay & 7) << 4; 497 499 args.lvds_ss.ucEnable = enable; ··· 518 512 struct radeon_device *rdev = dev->dev_private; 519 513 struct drm_encoder *encoder = NULL; 520 514 struct radeon_encoder *radeon_encoder = NULL; 515 + struct drm_connector *connector = NULL; 521 516 u32 adjusted_clock = mode->clock; 522 517 int encoder_mode = 0; 523 518 u32 dp_clock = mode->clock; ··· 553 546 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 554 547 if (encoder->crtc == crtc) { 555 548 radeon_encoder = to_radeon_encoder(encoder); 549 + connector = radeon_get_connector_for_encoder(encoder); 550 + if (connector) 551 + bpc = connector->display_info.bpc; 556 552 encoder_mode = atombios_get_encoder_mode(encoder); 557 - if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) { 558 - struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); 553 + if ((radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) || 554 + radeon_encoder_is_dp_bridge(encoder)) { 559 555 if (connector) { 560 556 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 561 557 struct radeon_connector_atom_dig *dig_connector = ··· 622 612 args.v1.usPixelClock = cpu_to_le16(mode->clock / 10); 623 613 args.v1.ucTransmitterID = radeon_encoder->encoder_id; 624 614 args.v1.ucEncodeMode = encoder_mode; 625 - if (ss_enabled) 615 + if (ss_enabled && ss->percentage) 626 616 args.v1.ucConfig |= 627 617 ADJUST_DISPLAY_CONFIG_SS_ENABLE; 628 618 ··· 635 625 args.v3.sInput.ucTransmitterID = radeon_encoder->encoder_id; 636 626 args.v3.sInput.ucEncodeMode = encoder_mode; 637 627 args.v3.sInput.ucDispPllConfig = 0; 638 - if (ss_enabled) 628 + if (ss_enabled && ss->percentage) 639 629 args.v3.sInput.ucDispPllConfig |= 640 630 DISPPLL_CONFIG_SS_ENABLE; 641 - if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { 631 + if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT) || 632 + radeon_encoder_is_dp_bridge(encoder)) { 642 633 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 643 634 if (encoder_mode == ATOM_ENCODER_MODE_DP) { 644 635 args.v3.sInput.ucDispPllConfig |= ··· 765 754 u32 ref_div, 766 755 u32 fb_div, 767 756 u32 frac_fb_div, 768 - u32 post_div) 757 + u32 post_div, 758 + int bpc, 759 + bool ss_enabled, 760 + struct radeon_atom_ss *ss) 769 761 { 770 762 struct drm_device *dev = crtc->dev; 771 763 struct radeon_device *rdev = dev->dev_private; ··· 815 801 args.v3.ucPostDiv = post_div; 816 802 args.v3.ucPpll = pll_id; 817 803 args.v3.ucMiscInfo = (pll_id << 2); 804 + if (ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK)) 805 + args.v3.ucMiscInfo |= PIXEL_CLOCK_MISC_REF_DIV_SRC; 818 806 args.v3.ucTransmitterId = encoder_id; 819 807 args.v3.ucEncoderMode = encoder_mode; 820 808 break; ··· 828 812 args.v5.ulFbDivDecFrac = cpu_to_le32(frac_fb_div * 100000); 829 813 args.v5.ucPostDiv = post_div; 830 814 args.v5.ucMiscInfo = 0; /* HDMI depth, etc. */ 815 + if (ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK)) 816 + args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_REF_DIV_SRC; 817 + switch (bpc) { 818 + case 8: 819 + default: 820 + args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_24BPP; 821 + break; 822 + case 10: 823 + args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_30BPP; 824 + break; 825 + } 831 826 args.v5.ucTransmitterID = encoder_id; 832 827 args.v5.ucEncoderMode = encoder_mode; 833 828 args.v5.ucPpll = pll_id; ··· 851 824 args.v6.ulFbDivDecFrac = cpu_to_le32(frac_fb_div * 100000); 852 825 args.v6.ucPostDiv = post_div; 853 826 args.v6.ucMiscInfo = 0; /* HDMI depth, etc. */ 827 + if (ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK)) 828 + args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_REF_DIV_SRC; 829 + switch (bpc) { 830 + case 8: 831 + default: 832 + args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_24BPP; 833 + break; 834 + case 10: 835 + args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_30BPP; 836 + break; 837 + case 12: 838 + args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_36BPP; 839 + break; 840 + case 16: 841 + args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_48BPP; 842 + break; 843 + } 854 844 args.v6.ucTransmitterID = encoder_id; 855 845 args.v6.ucEncoderMode = encoder_mode; 856 846 args.v6.ucPpll = pll_id; ··· 899 855 int encoder_mode = 0; 900 856 struct radeon_atom_ss ss; 901 857 bool ss_enabled = false; 858 + int bpc = 8; 902 859 903 860 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 904 861 if (encoder->crtc == crtc) { ··· 936 891 struct radeon_connector_atom_dig *dig_connector = 937 892 radeon_connector->con_priv; 938 893 int dp_clock; 894 + bpc = connector->display_info.bpc; 939 895 940 896 switch (encoder_mode) { 941 897 case ATOM_ENCODER_MODE_DP: 942 898 /* DP/eDP */ 943 899 dp_clock = dig_connector->dp_clock / 10; 944 - if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) { 945 - if (ASIC_IS_DCE4(rdev)) 946 - ss_enabled = 947 - radeon_atombios_get_asic_ss_info(rdev, &ss, 948 - dig->lcd_ss_id, 949 - dp_clock); 950 - else 900 + if (ASIC_IS_DCE4(rdev)) 901 + ss_enabled = 902 + radeon_atombios_get_asic_ss_info(rdev, &ss, 903 + ASIC_INTERNAL_SS_ON_DP, 904 + dp_clock); 905 + else { 906 + if (dp_clock == 16200) { 951 907 ss_enabled = 952 908 radeon_atombios_get_ppll_ss_info(rdev, &ss, 953 - dig->lcd_ss_id); 954 - } else { 955 - if (ASIC_IS_DCE4(rdev)) 956 - ss_enabled = 957 - radeon_atombios_get_asic_ss_info(rdev, &ss, 958 - ASIC_INTERNAL_SS_ON_DP, 959 - dp_clock); 960 - else { 961 - if (dp_clock == 16200) { 962 - ss_enabled = 963 - radeon_atombios_get_ppll_ss_info(rdev, &ss, 964 - ATOM_DP_SS_ID2); 965 - if (!ss_enabled) 966 - ss_enabled = 967 - radeon_atombios_get_ppll_ss_info(rdev, &ss, 968 - ATOM_DP_SS_ID1); 969 - } else 909 + ATOM_DP_SS_ID2); 910 + if (!ss_enabled) 970 911 ss_enabled = 971 912 radeon_atombios_get_ppll_ss_info(rdev, &ss, 972 913 ATOM_DP_SS_ID1); 973 - } 914 + } else 915 + ss_enabled = 916 + radeon_atombios_get_ppll_ss_info(rdev, &ss, 917 + ATOM_DP_SS_ID1); 974 918 } 975 919 break; 976 920 case ATOM_ENCODER_MODE_LVDS: ··· 1008 974 1009 975 atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id, 1010 976 encoder_mode, radeon_encoder->encoder_id, mode->clock, 1011 - ref_div, fb_div, frac_fb_div, post_div); 977 + ref_div, fb_div, frac_fb_div, post_div, bpc, ss_enabled, &ss); 1012 978 1013 979 if (ss_enabled) { 1014 980 /* calculate ss amount and step size */ ··· 1016 982 u32 step_size; 1017 983 u32 amount = (((fb_div * 10) + frac_fb_div) * ss.percentage) / 10000; 1018 984 ss.amount = (amount / 10) & ATOM_PPLL_SS_AMOUNT_V2_FBDIV_MASK; 1019 - ss.amount |= ((amount - (ss.amount * 10)) << ATOM_PPLL_SS_AMOUNT_V2_NFRAC_SHIFT) & 985 + ss.amount |= ((amount - (amount / 10)) << ATOM_PPLL_SS_AMOUNT_V2_NFRAC_SHIFT) & 1020 986 ATOM_PPLL_SS_AMOUNT_V2_NFRAC_MASK; 1021 987 if (ss.type & ATOM_PPLL_SS_TYPE_V2_CENTRE_SPREAD) 1022 988 step_size = (4 * amount * ref_div * (ss.rate * 2048)) / ··· 1429 1395 uint32_t pll_in_use = 0; 1430 1396 1431 1397 if (ASIC_IS_DCE4(rdev)) { 1432 - /* if crtc is driving DP and we have an ext clock, use that */ 1433 1398 list_for_each_entry(test_encoder, &dev->mode_config.encoder_list, head) { 1434 1399 if (test_encoder->crtc && (test_encoder->crtc == crtc)) { 1400 + /* in DP mode, the DP ref clock can come from PPLL, DCPLL, or ext clock, 1401 + * depending on the asic: 1402 + * DCE4: PPLL or ext clock 1403 + * DCE5: DCPLL or ext clock 1404 + * 1405 + * Setting ATOM_PPLL_INVALID will cause SetPixelClock to skip 1406 + * PPLL/DCPLL programming and only program the DP DTO for the 1407 + * crtc virtual pixel clock. 1408 + */ 1435 1409 if (atombios_get_encoder_mode(test_encoder) == ATOM_ENCODER_MODE_DP) { 1436 - if (rdev->clock.dp_extclk) 1410 + if (ASIC_IS_DCE5(rdev) || rdev->clock.dp_extclk) 1437 1411 return ATOM_PPLL_INVALID; 1438 1412 } 1439 1413 } ··· 1557 1515 static void atombios_crtc_disable(struct drm_crtc *crtc) 1558 1516 { 1559 1517 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 1518 + struct radeon_atom_ss ss; 1519 + 1560 1520 atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); 1561 1521 1562 1522 switch (radeon_crtc->pll_id) { ··· 1566 1522 case ATOM_PPLL2: 1567 1523 /* disable the ppll */ 1568 1524 atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id, 1569 - 0, 0, ATOM_DISABLE, 0, 0, 0, 0); 1525 + 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss); 1570 1526 break; 1571 1527 default: 1572 1528 break;
+638 -526
drivers/gpu/drm/radeon/atombios_dp.c
··· 43 43 "0dB", "3.5dB", "6dB", "9.5dB" 44 44 }; 45 45 46 - static const int dp_clocks[] = { 47 - 54000, /* 1 lane, 1.62 Ghz */ 48 - 90000, /* 1 lane, 2.70 Ghz */ 49 - 108000, /* 2 lane, 1.62 Ghz */ 50 - 180000, /* 2 lane, 2.70 Ghz */ 51 - 216000, /* 4 lane, 1.62 Ghz */ 52 - 360000, /* 4 lane, 2.70 Ghz */ 46 + /***** radeon AUX functions *****/ 47 + union aux_channel_transaction { 48 + PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION v1; 49 + PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2 v2; 53 50 }; 54 51 55 - static const int num_dp_clocks = sizeof(dp_clocks) / sizeof(int); 56 - 57 - /* common helper functions */ 58 - static int dp_lanes_for_mode_clock(u8 dpcd[DP_DPCD_SIZE], int mode_clock) 52 + static int radeon_process_aux_ch(struct radeon_i2c_chan *chan, 53 + u8 *send, int send_bytes, 54 + u8 *recv, int recv_size, 55 + u8 delay, u8 *ack) 59 56 { 60 - int i; 61 - u8 max_link_bw; 62 - u8 max_lane_count; 57 + struct drm_device *dev = chan->dev; 58 + struct radeon_device *rdev = dev->dev_private; 59 + union aux_channel_transaction args; 60 + int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction); 61 + unsigned char *base; 62 + int recv_bytes; 63 63 64 - if (!dpcd) 65 - return 0; 64 + memset(&args, 0, sizeof(args)); 66 65 67 - max_link_bw = dpcd[DP_MAX_LINK_RATE]; 68 - max_lane_count = dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK; 66 + base = (unsigned char *)rdev->mode_info.atom_context->scratch; 69 67 70 - switch (max_link_bw) { 71 - case DP_LINK_BW_1_62: 72 - default: 73 - for (i = 0; i < num_dp_clocks; i++) { 74 - if (i % 2) 75 - continue; 76 - switch (max_lane_count) { 77 - case 1: 78 - if (i > 1) 79 - return 0; 80 - break; 81 - case 2: 82 - if (i > 3) 83 - return 0; 84 - break; 85 - case 4: 86 - default: 87 - break; 88 - } 89 - if (dp_clocks[i] > mode_clock) { 90 - if (i < 2) 91 - return 1; 92 - else if (i < 4) 93 - return 2; 94 - else 95 - return 4; 96 - } 97 - } 68 + memcpy(base, send, send_bytes); 69 + 70 + args.v1.lpAuxRequest = 0; 71 + args.v1.lpDataOut = 16; 72 + args.v1.ucDataOutLen = 0; 73 + args.v1.ucChannelID = chan->rec.i2c_id; 74 + args.v1.ucDelay = delay / 10; 75 + if (ASIC_IS_DCE4(rdev)) 76 + args.v2.ucHPD_ID = chan->rec.hpd; 77 + 78 + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 79 + 80 + *ack = args.v1.ucReplyStatus; 81 + 82 + /* timeout */ 83 + if (args.v1.ucReplyStatus == 1) { 84 + DRM_DEBUG_KMS("dp_aux_ch timeout\n"); 85 + return -ETIMEDOUT; 86 + } 87 + 88 + /* flags not zero */ 89 + if (args.v1.ucReplyStatus == 2) { 90 + DRM_DEBUG_KMS("dp_aux_ch flags not zero\n"); 91 + return -EBUSY; 92 + } 93 + 94 + /* error */ 95 + if (args.v1.ucReplyStatus == 3) { 96 + DRM_DEBUG_KMS("dp_aux_ch error\n"); 97 + return -EIO; 98 + } 99 + 100 + recv_bytes = args.v1.ucDataOutLen; 101 + if (recv_bytes > recv_size) 102 + recv_bytes = recv_size; 103 + 104 + if (recv && recv_size) 105 + memcpy(recv, base + 16, recv_bytes); 106 + 107 + return recv_bytes; 108 + } 109 + 110 + static int radeon_dp_aux_native_write(struct radeon_connector *radeon_connector, 111 + u16 address, u8 *send, u8 send_bytes, u8 delay) 112 + { 113 + struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; 114 + int ret; 115 + u8 msg[20]; 116 + int msg_bytes = send_bytes + 4; 117 + u8 ack; 118 + 119 + if (send_bytes > 16) 120 + return -1; 121 + 122 + msg[0] = address; 123 + msg[1] = address >> 8; 124 + msg[2] = AUX_NATIVE_WRITE << 4; 125 + msg[3] = (msg_bytes << 4) | (send_bytes - 1); 126 + memcpy(&msg[4], send, send_bytes); 127 + 128 + while (1) { 129 + ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus, 130 + msg, msg_bytes, NULL, 0, delay, &ack); 131 + if (ret < 0) 132 + return ret; 133 + if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) 134 + break; 135 + else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) 136 + udelay(400); 137 + else 138 + return -EIO; 139 + } 140 + 141 + return send_bytes; 142 + } 143 + 144 + static int radeon_dp_aux_native_read(struct radeon_connector *radeon_connector, 145 + u16 address, u8 *recv, int recv_bytes, u8 delay) 146 + { 147 + struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; 148 + u8 msg[4]; 149 + int msg_bytes = 4; 150 + u8 ack; 151 + int ret; 152 + 153 + msg[0] = address; 154 + msg[1] = address >> 8; 155 + msg[2] = AUX_NATIVE_READ << 4; 156 + msg[3] = (msg_bytes << 4) | (recv_bytes - 1); 157 + 158 + while (1) { 159 + ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus, 160 + msg, msg_bytes, recv, recv_bytes, delay, &ack); 161 + if (ret == 0) 162 + return -EPROTO; 163 + if (ret < 0) 164 + return ret; 165 + if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) 166 + return ret; 167 + else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) 168 + udelay(400); 169 + else 170 + return -EIO; 171 + } 172 + } 173 + 174 + static void radeon_write_dpcd_reg(struct radeon_connector *radeon_connector, 175 + u16 reg, u8 val) 176 + { 177 + radeon_dp_aux_native_write(radeon_connector, reg, &val, 1, 0); 178 + } 179 + 180 + static u8 radeon_read_dpcd_reg(struct radeon_connector *radeon_connector, 181 + u16 reg) 182 + { 183 + u8 val = 0; 184 + 185 + radeon_dp_aux_native_read(radeon_connector, reg, &val, 1, 0); 186 + 187 + return val; 188 + } 189 + 190 + int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, 191 + u8 write_byte, u8 *read_byte) 192 + { 193 + struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; 194 + struct radeon_i2c_chan *auxch = (struct radeon_i2c_chan *)adapter; 195 + u16 address = algo_data->address; 196 + u8 msg[5]; 197 + u8 reply[2]; 198 + unsigned retry; 199 + int msg_bytes; 200 + int reply_bytes = 1; 201 + int ret; 202 + u8 ack; 203 + 204 + /* Set up the command byte */ 205 + if (mode & MODE_I2C_READ) 206 + msg[2] = AUX_I2C_READ << 4; 207 + else 208 + msg[2] = AUX_I2C_WRITE << 4; 209 + 210 + if (!(mode & MODE_I2C_STOP)) 211 + msg[2] |= AUX_I2C_MOT << 4; 212 + 213 + msg[0] = address; 214 + msg[1] = address >> 8; 215 + 216 + switch (mode) { 217 + case MODE_I2C_WRITE: 218 + msg_bytes = 5; 219 + msg[3] = msg_bytes << 4; 220 + msg[4] = write_byte; 98 221 break; 99 - case DP_LINK_BW_2_7: 100 - for (i = 0; i < num_dp_clocks; i++) { 101 - switch (max_lane_count) { 102 - case 1: 103 - if (i > 1) 104 - return 0; 105 - break; 106 - case 2: 107 - if (i > 3) 108 - return 0; 109 - break; 110 - case 4: 111 - default: 112 - break; 113 - } 114 - if (dp_clocks[i] > mode_clock) { 115 - if (i < 2) 116 - return 1; 117 - else if (i < 4) 118 - return 2; 119 - else 120 - return 4; 121 - } 122 - } 222 + case MODE_I2C_READ: 223 + msg_bytes = 4; 224 + msg[3] = msg_bytes << 4; 225 + break; 226 + default: 227 + msg_bytes = 4; 228 + msg[3] = 3 << 4; 123 229 break; 124 230 } 125 231 126 - return 0; 127 - } 128 - 129 - static int dp_link_clock_for_mode_clock(u8 dpcd[DP_DPCD_SIZE], int mode_clock) 130 - { 131 - int i; 132 - u8 max_link_bw; 133 - u8 max_lane_count; 134 - 135 - if (!dpcd) 136 - return 0; 137 - 138 - max_link_bw = dpcd[DP_MAX_LINK_RATE]; 139 - max_lane_count = dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK; 140 - 141 - switch (max_link_bw) { 142 - case DP_LINK_BW_1_62: 143 - default: 144 - for (i = 0; i < num_dp_clocks; i++) { 145 - if (i % 2) 146 - continue; 147 - switch (max_lane_count) { 148 - case 1: 149 - if (i > 1) 150 - return 0; 151 - break; 152 - case 2: 153 - if (i > 3) 154 - return 0; 155 - break; 156 - case 4: 157 - default: 158 - break; 159 - } 160 - if (dp_clocks[i] > mode_clock) 161 - return 162000; 232 + for (retry = 0; retry < 4; retry++) { 233 + ret = radeon_process_aux_ch(auxch, 234 + msg, msg_bytes, reply, reply_bytes, 0, &ack); 235 + if (ret < 0) { 236 + DRM_DEBUG_KMS("aux_ch failed %d\n", ret); 237 + return ret; 162 238 } 163 - break; 164 - case DP_LINK_BW_2_7: 165 - for (i = 0; i < num_dp_clocks; i++) { 166 - switch (max_lane_count) { 167 - case 1: 168 - if (i > 1) 169 - return 0; 170 - break; 171 - case 2: 172 - if (i > 3) 173 - return 0; 174 - break; 175 - case 4: 176 - default: 177 - break; 178 - } 179 - if (dp_clocks[i] > mode_clock) 180 - return (i % 2) ? 270000 : 162000; 239 + 240 + switch (ack & AUX_NATIVE_REPLY_MASK) { 241 + case AUX_NATIVE_REPLY_ACK: 242 + /* I2C-over-AUX Reply field is only valid 243 + * when paired with AUX ACK. 244 + */ 245 + break; 246 + case AUX_NATIVE_REPLY_NACK: 247 + DRM_DEBUG_KMS("aux_ch native nack\n"); 248 + return -EREMOTEIO; 249 + case AUX_NATIVE_REPLY_DEFER: 250 + DRM_DEBUG_KMS("aux_ch native defer\n"); 251 + udelay(400); 252 + continue; 253 + default: 254 + DRM_ERROR("aux_ch invalid native reply 0x%02x\n", ack); 255 + return -EREMOTEIO; 256 + } 257 + 258 + switch (ack & AUX_I2C_REPLY_MASK) { 259 + case AUX_I2C_REPLY_ACK: 260 + if (mode == MODE_I2C_READ) 261 + *read_byte = reply[0]; 262 + return ret; 263 + case AUX_I2C_REPLY_NACK: 264 + DRM_DEBUG_KMS("aux_i2c nack\n"); 265 + return -EREMOTEIO; 266 + case AUX_I2C_REPLY_DEFER: 267 + DRM_DEBUG_KMS("aux_i2c defer\n"); 268 + udelay(400); 269 + break; 270 + default: 271 + DRM_ERROR("aux_i2c invalid reply 0x%02x\n", ack); 272 + return -EREMOTEIO; 181 273 } 182 274 } 183 275 184 - return 0; 276 + DRM_ERROR("aux i2c too many retries, giving up\n"); 277 + return -EREMOTEIO; 185 278 } 186 279 187 - int dp_mode_valid(u8 dpcd[DP_DPCD_SIZE], int mode_clock) 188 - { 189 - int lanes = dp_lanes_for_mode_clock(dpcd, mode_clock); 190 - int dp_clock = dp_link_clock_for_mode_clock(dpcd, mode_clock); 191 - 192 - if ((lanes == 0) || (dp_clock == 0)) 193 - return MODE_CLOCK_HIGH; 194 - 195 - return MODE_OK; 196 - } 280 + /***** general DP utility functions *****/ 197 281 198 282 static u8 dp_link_status(u8 link_status[DP_LINK_STATUS_SIZE], int r) 199 283 { ··· 326 242 return true; 327 243 } 328 244 329 - static u8 dp_get_adjust_request_voltage(uint8_t link_status[DP_LINK_STATUS_SIZE], 245 + static u8 dp_get_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE], 330 246 int lane) 331 247 332 248 { ··· 339 255 return ((l >> s) & 0x3) << DP_TRAIN_VOLTAGE_SWING_SHIFT; 340 256 } 341 257 342 - static u8 dp_get_adjust_request_pre_emphasis(uint8_t link_status[DP_LINK_STATUS_SIZE], 258 + static u8 dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE], 343 259 int lane) 344 260 { 345 261 int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1); ··· 351 267 return ((l >> s) & 0x3) << DP_TRAIN_PRE_EMPHASIS_SHIFT; 352 268 } 353 269 354 - /* XXX fix me -- chip specific */ 355 270 #define DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_1200 356 - static u8 dp_pre_emphasis_max(u8 voltage_swing) 357 - { 358 - switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { 359 - case DP_TRAIN_VOLTAGE_SWING_400: 360 - return DP_TRAIN_PRE_EMPHASIS_6; 361 - case DP_TRAIN_VOLTAGE_SWING_600: 362 - return DP_TRAIN_PRE_EMPHASIS_6; 363 - case DP_TRAIN_VOLTAGE_SWING_800: 364 - return DP_TRAIN_PRE_EMPHASIS_3_5; 365 - case DP_TRAIN_VOLTAGE_SWING_1200: 366 - default: 367 - return DP_TRAIN_PRE_EMPHASIS_0; 368 - } 369 - } 271 + #define DP_PRE_EMPHASIS_MAX DP_TRAIN_PRE_EMPHASIS_9_5 370 272 371 273 static void dp_get_adjust_train(u8 link_status[DP_LINK_STATUS_SIZE], 372 274 int lane_count, ··· 378 308 } 379 309 380 310 if (v >= DP_VOLTAGE_MAX) 381 - v = DP_VOLTAGE_MAX | DP_TRAIN_MAX_SWING_REACHED; 311 + v |= DP_TRAIN_MAX_SWING_REACHED; 382 312 383 - if (p >= dp_pre_emphasis_max(v)) 384 - p = dp_pre_emphasis_max(v) | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; 313 + if (p >= DP_PRE_EMPHASIS_MAX) 314 + p |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; 385 315 386 316 DRM_DEBUG_KMS("using signal parameters: voltage %s pre_emph %s\n", 387 317 voltage_names[(v & DP_TRAIN_VOLTAGE_SWING_MASK) >> DP_TRAIN_VOLTAGE_SWING_SHIFT], ··· 391 321 train_set[lane] = v | p; 392 322 } 393 323 394 - union aux_channel_transaction { 395 - PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION v1; 396 - PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2 v2; 397 - }; 398 - 399 - /* radeon aux chan functions */ 400 - bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes, 401 - int num_bytes, u8 *read_byte, 402 - u8 read_buf_len, u8 delay) 324 + /* convert bits per color to bits per pixel */ 325 + /* get bpc from the EDID */ 326 + static int convert_bpc_to_bpp(int bpc) 403 327 { 404 - struct drm_device *dev = chan->dev; 405 - struct radeon_device *rdev = dev->dev_private; 406 - union aux_channel_transaction args; 407 - int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction); 408 - unsigned char *base; 409 - int retry_count = 0; 328 + if (bpc == 0) 329 + return 24; 330 + else 331 + return bpc * 3; 332 + } 410 333 411 - memset(&args, 0, sizeof(args)); 334 + /* get the max pix clock supported by the link rate and lane num */ 335 + static int dp_get_max_dp_pix_clock(int link_rate, 336 + int lane_num, 337 + int bpp) 338 + { 339 + return (link_rate * lane_num * 8) / bpp; 340 + } 412 341 413 - base = (unsigned char *)rdev->mode_info.atom_context->scratch; 342 + static int dp_get_max_link_rate(u8 dpcd[DP_DPCD_SIZE]) 343 + { 344 + switch (dpcd[DP_MAX_LINK_RATE]) { 345 + case DP_LINK_BW_1_62: 346 + default: 347 + return 162000; 348 + case DP_LINK_BW_2_7: 349 + return 270000; 350 + case DP_LINK_BW_5_4: 351 + return 540000; 352 + } 353 + } 414 354 415 - retry: 416 - memcpy(base, req_bytes, num_bytes); 355 + static u8 dp_get_max_lane_number(u8 dpcd[DP_DPCD_SIZE]) 356 + { 357 + return dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK; 358 + } 417 359 418 - args.v1.lpAuxRequest = 0; 419 - args.v1.lpDataOut = 16; 420 - args.v1.ucDataOutLen = 0; 421 - args.v1.ucChannelID = chan->rec.i2c_id; 422 - args.v1.ucDelay = delay / 10; 423 - if (ASIC_IS_DCE4(rdev)) 424 - args.v2.ucHPD_ID = chan->rec.hpd; 360 + static u8 dp_get_dp_link_rate_coded(int link_rate) 361 + { 362 + switch (link_rate) { 363 + case 162000: 364 + default: 365 + return DP_LINK_BW_1_62; 366 + case 270000: 367 + return DP_LINK_BW_2_7; 368 + case 540000: 369 + return DP_LINK_BW_5_4; 370 + } 371 + } 425 372 426 - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 373 + /***** radeon specific DP functions *****/ 427 374 428 - if (args.v1.ucReplyStatus && !args.v1.ucDataOutLen) { 429 - if (args.v1.ucReplyStatus == 0x20 && retry_count++ < 10) 430 - goto retry; 431 - DRM_DEBUG_KMS("failed to get auxch %02x%02x %02x %02x 0x%02x %02x after %d retries\n", 432 - req_bytes[1], req_bytes[0], req_bytes[2], req_bytes[3], 433 - chan->rec.i2c_id, args.v1.ucReplyStatus, retry_count); 434 - return false; 375 + /* First get the min lane# when low rate is used according to pixel clock 376 + * (prefer low rate), second check max lane# supported by DP panel, 377 + * if the max lane# < low rate lane# then use max lane# instead. 378 + */ 379 + static int radeon_dp_get_dp_lane_number(struct drm_connector *connector, 380 + u8 dpcd[DP_DPCD_SIZE], 381 + int pix_clock) 382 + { 383 + int bpp = convert_bpc_to_bpp(connector->display_info.bpc); 384 + int max_link_rate = dp_get_max_link_rate(dpcd); 385 + int max_lane_num = dp_get_max_lane_number(dpcd); 386 + int lane_num; 387 + int max_dp_pix_clock; 388 + 389 + for (lane_num = 1; lane_num < max_lane_num; lane_num <<= 1) { 390 + max_dp_pix_clock = dp_get_max_dp_pix_clock(max_link_rate, lane_num, bpp); 391 + if (pix_clock <= max_dp_pix_clock) 392 + break; 435 393 } 436 394 437 - if (args.v1.ucDataOutLen && read_byte && read_buf_len) { 438 - if (read_buf_len < args.v1.ucDataOutLen) { 439 - DRM_ERROR("Buffer to small for return answer %d %d\n", 440 - read_buf_len, args.v1.ucDataOutLen); 441 - return false; 442 - } 443 - { 444 - int len = min(read_buf_len, args.v1.ucDataOutLen); 445 - memcpy(read_byte, base + 16, len); 446 - } 395 + return lane_num; 396 + } 397 + 398 + static int radeon_dp_get_dp_link_clock(struct drm_connector *connector, 399 + u8 dpcd[DP_DPCD_SIZE], 400 + int pix_clock) 401 + { 402 + int bpp = convert_bpc_to_bpp(connector->display_info.bpc); 403 + int lane_num, max_pix_clock; 404 + 405 + if (radeon_connector_encoder_is_dp_bridge(connector)) 406 + return 270000; 407 + 408 + lane_num = radeon_dp_get_dp_lane_number(connector, dpcd, pix_clock); 409 + max_pix_clock = dp_get_max_dp_pix_clock(162000, lane_num, bpp); 410 + if (pix_clock <= max_pix_clock) 411 + return 162000; 412 + max_pix_clock = dp_get_max_dp_pix_clock(270000, lane_num, bpp); 413 + if (pix_clock <= max_pix_clock) 414 + return 270000; 415 + if (radeon_connector_is_dp12_capable(connector)) { 416 + max_pix_clock = dp_get_max_dp_pix_clock(540000, lane_num, bpp); 417 + if (pix_clock <= max_pix_clock) 418 + return 540000; 447 419 } 448 - return true; 420 + 421 + return dp_get_max_link_rate(dpcd); 449 422 } 450 423 451 - bool radeon_dp_aux_native_write(struct radeon_connector *radeon_connector, uint16_t address, 452 - uint8_t send_bytes, uint8_t *send) 453 - { 454 - struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; 455 - u8 msg[20]; 456 - u8 msg_len, dp_msg_len; 457 - bool ret; 458 - 459 - dp_msg_len = 4; 460 - msg[0] = address; 461 - msg[1] = address >> 8; 462 - msg[2] = AUX_NATIVE_WRITE << 4; 463 - dp_msg_len += send_bytes; 464 - msg[3] = (dp_msg_len << 4) | (send_bytes - 1); 465 - 466 - if (send_bytes > 16) 467 - return false; 468 - 469 - memcpy(&msg[4], send, send_bytes); 470 - msg_len = 4 + send_bytes; 471 - ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus, msg, msg_len, NULL, 0, 0); 472 - return ret; 473 - } 474 - 475 - bool radeon_dp_aux_native_read(struct radeon_connector *radeon_connector, uint16_t address, 476 - uint8_t delay, uint8_t expected_bytes, 477 - uint8_t *read_p) 478 - { 479 - struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; 480 - u8 msg[20]; 481 - u8 msg_len, dp_msg_len; 482 - bool ret = false; 483 - msg_len = 4; 484 - dp_msg_len = 4; 485 - msg[0] = address; 486 - msg[1] = address >> 8; 487 - msg[2] = AUX_NATIVE_READ << 4; 488 - msg[3] = (dp_msg_len) << 4; 489 - msg[3] |= expected_bytes - 1; 490 - 491 - ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus, msg, msg_len, read_p, expected_bytes, delay); 492 - return ret; 493 - } 494 - 495 - /* radeon dp functions */ 496 - static u8 radeon_dp_encoder_service(struct radeon_device *rdev, int action, int dp_clock, 497 - uint8_t ucconfig, uint8_t lane_num) 424 + static u8 radeon_dp_encoder_service(struct radeon_device *rdev, 425 + int action, int dp_clock, 426 + u8 ucconfig, u8 lane_num) 498 427 { 499 428 DP_ENCODER_SERVICE_PARAMETERS args; 500 429 int index = GetIndexIntoMasterTable(COMMAND, DPEncoderService); ··· 523 454 { 524 455 struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; 525 456 u8 msg[25]; 526 - int ret; 457 + int ret, i; 527 458 528 - ret = radeon_dp_aux_native_read(radeon_connector, DP_DPCD_REV, 0, 8, msg); 529 - if (ret) { 459 + ret = radeon_dp_aux_native_read(radeon_connector, DP_DPCD_REV, msg, 8, 0); 460 + if (ret > 0) { 530 461 memcpy(dig_connector->dpcd, msg, 8); 531 - { 532 - int i; 533 - DRM_DEBUG_KMS("DPCD: "); 534 - for (i = 0; i < 8; i++) 535 - DRM_DEBUG_KMS("%02x ", msg[i]); 536 - DRM_DEBUG_KMS("\n"); 537 - } 462 + DRM_DEBUG_KMS("DPCD: "); 463 + for (i = 0; i < 8; i++) 464 + DRM_DEBUG_KMS("%02x ", msg[i]); 465 + DRM_DEBUG_KMS("\n"); 538 466 return true; 539 467 } 540 468 dig_connector->dpcd[0] = 0; 541 469 return false; 542 470 } 543 471 472 + static void radeon_dp_set_panel_mode(struct drm_encoder *encoder, 473 + struct drm_connector *connector) 474 + { 475 + struct drm_device *dev = encoder->dev; 476 + struct radeon_device *rdev = dev->dev_private; 477 + int panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE; 478 + 479 + if (!ASIC_IS_DCE4(rdev)) 480 + return; 481 + 482 + if (radeon_connector_encoder_is_dp_bridge(connector)) 483 + panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE; 484 + 485 + atombios_dig_encoder_setup(encoder, 486 + ATOM_ENCODER_CMD_SETUP_PANEL_MODE, 487 + panel_mode); 488 + } 489 + 544 490 void radeon_dp_set_link_config(struct drm_connector *connector, 545 491 struct drm_display_mode *mode) 546 492 { 547 - struct radeon_connector *radeon_connector; 493 + struct radeon_connector *radeon_connector = to_radeon_connector(connector); 548 494 struct radeon_connector_atom_dig *dig_connector; 549 495 550 - if ((connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) && 551 - (connector->connector_type != DRM_MODE_CONNECTOR_eDP)) 552 - return; 553 - 554 - radeon_connector = to_radeon_connector(connector); 555 496 if (!radeon_connector->con_priv) 556 497 return; 557 498 dig_connector = radeon_connector->con_priv; 558 499 559 - dig_connector->dp_clock = 560 - dp_link_clock_for_mode_clock(dig_connector->dpcd, mode->clock); 561 - dig_connector->dp_lane_count = 562 - dp_lanes_for_mode_clock(dig_connector->dpcd, mode->clock); 500 + if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || 501 + (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) { 502 + dig_connector->dp_clock = 503 + radeon_dp_get_dp_link_clock(connector, dig_connector->dpcd, mode->clock); 504 + dig_connector->dp_lane_count = 505 + radeon_dp_get_dp_lane_number(connector, dig_connector->dpcd, mode->clock); 506 + } 563 507 } 564 508 565 - int radeon_dp_mode_valid_helper(struct radeon_connector *radeon_connector, 509 + int radeon_dp_mode_valid_helper(struct drm_connector *connector, 566 510 struct drm_display_mode *mode) 567 511 { 568 - struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; 512 + struct radeon_connector *radeon_connector = to_radeon_connector(connector); 513 + struct radeon_connector_atom_dig *dig_connector; 514 + int dp_clock; 569 515 570 - return dp_mode_valid(dig_connector->dpcd, mode->clock); 516 + if (!radeon_connector->con_priv) 517 + return MODE_CLOCK_HIGH; 518 + dig_connector = radeon_connector->con_priv; 519 + 520 + dp_clock = 521 + radeon_dp_get_dp_link_clock(connector, dig_connector->dpcd, mode->clock); 522 + 523 + if ((dp_clock == 540000) && 524 + (!radeon_connector_is_dp12_capable(connector))) 525 + return MODE_CLOCK_HIGH; 526 + 527 + return MODE_OK; 571 528 } 572 529 573 - static bool atom_dp_get_link_status(struct radeon_connector *radeon_connector, 574 - u8 link_status[DP_LINK_STATUS_SIZE]) 530 + static bool radeon_dp_get_link_status(struct radeon_connector *radeon_connector, 531 + u8 link_status[DP_LINK_STATUS_SIZE]) 575 532 { 576 533 int ret; 577 - ret = radeon_dp_aux_native_read(radeon_connector, DP_LANE0_1_STATUS, 100, 578 - DP_LINK_STATUS_SIZE, link_status); 579 - if (!ret) { 534 + ret = radeon_dp_aux_native_read(radeon_connector, DP_LANE0_1_STATUS, 535 + link_status, DP_LINK_STATUS_SIZE, 100); 536 + if (ret <= 0) { 580 537 DRM_ERROR("displayport link status failed\n"); 581 538 return false; 582 539 } ··· 613 518 return true; 614 519 } 615 520 616 - bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector) 617 - { 618 - struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; 521 + struct radeon_dp_link_train_info { 522 + struct radeon_device *rdev; 523 + struct drm_encoder *encoder; 524 + struct drm_connector *connector; 525 + struct radeon_connector *radeon_connector; 526 + int enc_id; 527 + int dp_clock; 528 + int dp_lane_count; 529 + int rd_interval; 530 + bool tp3_supported; 531 + u8 dpcd[8]; 532 + u8 train_set[4]; 619 533 u8 link_status[DP_LINK_STATUS_SIZE]; 534 + u8 tries; 535 + }; 620 536 621 - if (!atom_dp_get_link_status(radeon_connector, link_status)) 622 - return false; 623 - if (dp_channel_eq_ok(link_status, dig_connector->dp_lane_count)) 624 - return false; 625 - return true; 537 + static void radeon_dp_update_vs_emph(struct radeon_dp_link_train_info *dp_info) 538 + { 539 + /* set the initial vs/emph on the source */ 540 + atombios_dig_transmitter_setup(dp_info->encoder, 541 + ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH, 542 + 0, dp_info->train_set[0]); /* sets all lanes at once */ 543 + 544 + /* set the vs/emph on the sink */ 545 + radeon_dp_aux_native_write(dp_info->radeon_connector, DP_TRAINING_LANE0_SET, 546 + dp_info->train_set, dp_info->dp_lane_count, 0); 626 547 } 627 548 628 - static void dp_set_power(struct radeon_connector *radeon_connector, u8 power_state) 549 + static void radeon_dp_set_tp(struct radeon_dp_link_train_info *dp_info, int tp) 629 550 { 630 - struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; 551 + int rtp = 0; 631 552 632 - if (dig_connector->dpcd[0] >= 0x11) { 633 - radeon_dp_aux_native_write(radeon_connector, DP_SET_POWER, 1, 634 - &power_state); 553 + /* set training pattern on the source */ 554 + if (ASIC_IS_DCE4(dp_info->rdev)) { 555 + switch (tp) { 556 + case DP_TRAINING_PATTERN_1: 557 + rtp = ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN1; 558 + break; 559 + case DP_TRAINING_PATTERN_2: 560 + rtp = ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN2; 561 + break; 562 + case DP_TRAINING_PATTERN_3: 563 + rtp = ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN3; 564 + break; 565 + } 566 + atombios_dig_encoder_setup(dp_info->encoder, rtp, 0); 567 + } else { 568 + switch (tp) { 569 + case DP_TRAINING_PATTERN_1: 570 + rtp = 0; 571 + break; 572 + case DP_TRAINING_PATTERN_2: 573 + rtp = 1; 574 + break; 575 + } 576 + radeon_dp_encoder_service(dp_info->rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL, 577 + dp_info->dp_clock, dp_info->enc_id, rtp); 578 + } 579 + 580 + /* enable training pattern on the sink */ 581 + radeon_write_dpcd_reg(dp_info->radeon_connector, DP_TRAINING_PATTERN_SET, tp); 582 + } 583 + 584 + static int radeon_dp_link_train_init(struct radeon_dp_link_train_info *dp_info) 585 + { 586 + u8 tmp; 587 + 588 + /* power up the sink */ 589 + if (dp_info->dpcd[0] >= 0x11) 590 + radeon_write_dpcd_reg(dp_info->radeon_connector, 591 + DP_SET_POWER, DP_SET_POWER_D0); 592 + 593 + /* possibly enable downspread on the sink */ 594 + if (dp_info->dpcd[3] & 0x1) 595 + radeon_write_dpcd_reg(dp_info->radeon_connector, 596 + DP_DOWNSPREAD_CTRL, DP_SPREAD_AMP_0_5); 597 + else 598 + radeon_write_dpcd_reg(dp_info->radeon_connector, 599 + DP_DOWNSPREAD_CTRL, 0); 600 + 601 + radeon_dp_set_panel_mode(dp_info->encoder, dp_info->connector); 602 + 603 + /* set the lane count on the sink */ 604 + tmp = dp_info->dp_lane_count; 605 + if (dp_info->dpcd[0] >= 0x11) 606 + tmp |= DP_LANE_COUNT_ENHANCED_FRAME_EN; 607 + radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LANE_COUNT_SET, tmp); 608 + 609 + /* set the link rate on the sink */ 610 + tmp = dp_get_dp_link_rate_coded(dp_info->dp_clock); 611 + radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LINK_BW_SET, tmp); 612 + 613 + /* start training on the source */ 614 + if (ASIC_IS_DCE4(dp_info->rdev)) 615 + atombios_dig_encoder_setup(dp_info->encoder, 616 + ATOM_ENCODER_CMD_DP_LINK_TRAINING_START, 0); 617 + else 618 + radeon_dp_encoder_service(dp_info->rdev, ATOM_DP_ACTION_TRAINING_START, 619 + dp_info->dp_clock, dp_info->enc_id, 0); 620 + 621 + /* disable the training pattern on the sink */ 622 + radeon_write_dpcd_reg(dp_info->radeon_connector, 623 + DP_TRAINING_PATTERN_SET, 624 + DP_TRAINING_PATTERN_DISABLE); 625 + 626 + return 0; 627 + } 628 + 629 + static int radeon_dp_link_train_finish(struct radeon_dp_link_train_info *dp_info) 630 + { 631 + udelay(400); 632 + 633 + /* disable the training pattern on the sink */ 634 + radeon_write_dpcd_reg(dp_info->radeon_connector, 635 + DP_TRAINING_PATTERN_SET, 636 + DP_TRAINING_PATTERN_DISABLE); 637 + 638 + /* disable the training pattern on the source */ 639 + if (ASIC_IS_DCE4(dp_info->rdev)) 640 + atombios_dig_encoder_setup(dp_info->encoder, 641 + ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE, 0); 642 + else 643 + radeon_dp_encoder_service(dp_info->rdev, ATOM_DP_ACTION_TRAINING_COMPLETE, 644 + dp_info->dp_clock, dp_info->enc_id, 0); 645 + 646 + return 0; 647 + } 648 + 649 + static int radeon_dp_link_train_cr(struct radeon_dp_link_train_info *dp_info) 650 + { 651 + bool clock_recovery; 652 + u8 voltage; 653 + int i; 654 + 655 + radeon_dp_set_tp(dp_info, DP_TRAINING_PATTERN_1); 656 + memset(dp_info->train_set, 0, 4); 657 + radeon_dp_update_vs_emph(dp_info); 658 + 659 + udelay(400); 660 + 661 + /* clock recovery loop */ 662 + clock_recovery = false; 663 + dp_info->tries = 0; 664 + voltage = 0xff; 665 + while (1) { 666 + if (dp_info->rd_interval == 0) 667 + udelay(100); 668 + else 669 + mdelay(dp_info->rd_interval * 4); 670 + 671 + if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status)) 672 + break; 673 + 674 + if (dp_clock_recovery_ok(dp_info->link_status, dp_info->dp_lane_count)) { 675 + clock_recovery = true; 676 + break; 677 + } 678 + 679 + for (i = 0; i < dp_info->dp_lane_count; i++) { 680 + if ((dp_info->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) 681 + break; 682 + } 683 + if (i == dp_info->dp_lane_count) { 684 + DRM_ERROR("clock recovery reached max voltage\n"); 685 + break; 686 + } 687 + 688 + if ((dp_info->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) { 689 + ++dp_info->tries; 690 + if (dp_info->tries == 5) { 691 + DRM_ERROR("clock recovery tried 5 times\n"); 692 + break; 693 + } 694 + } else 695 + dp_info->tries = 0; 696 + 697 + voltage = dp_info->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; 698 + 699 + /* Compute new train_set as requested by sink */ 700 + dp_get_adjust_train(dp_info->link_status, dp_info->dp_lane_count, dp_info->train_set); 701 + 702 + radeon_dp_update_vs_emph(dp_info); 703 + } 704 + if (!clock_recovery) { 705 + DRM_ERROR("clock recovery failed\n"); 706 + return -1; 707 + } else { 708 + DRM_DEBUG_KMS("clock recovery at voltage %d pre-emphasis %d\n", 709 + dp_info->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK, 710 + (dp_info->train_set[0] & DP_TRAIN_PRE_EMPHASIS_MASK) >> 711 + DP_TRAIN_PRE_EMPHASIS_SHIFT); 712 + return 0; 635 713 } 636 714 } 637 715 638 - static void dp_set_downspread(struct radeon_connector *radeon_connector, u8 downspread) 716 + static int radeon_dp_link_train_ce(struct radeon_dp_link_train_info *dp_info) 639 717 { 640 - radeon_dp_aux_native_write(radeon_connector, DP_DOWNSPREAD_CTRL, 1, 641 - &downspread); 718 + bool channel_eq; 719 + 720 + if (dp_info->tp3_supported) 721 + radeon_dp_set_tp(dp_info, DP_TRAINING_PATTERN_3); 722 + else 723 + radeon_dp_set_tp(dp_info, DP_TRAINING_PATTERN_2); 724 + 725 + /* channel equalization loop */ 726 + dp_info->tries = 0; 727 + channel_eq = false; 728 + while (1) { 729 + if (dp_info->rd_interval == 0) 730 + udelay(400); 731 + else 732 + mdelay(dp_info->rd_interval * 4); 733 + 734 + if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status)) 735 + break; 736 + 737 + if (dp_channel_eq_ok(dp_info->link_status, dp_info->dp_lane_count)) { 738 + channel_eq = true; 739 + break; 740 + } 741 + 742 + /* Try 5 times */ 743 + if (dp_info->tries > 5) { 744 + DRM_ERROR("channel eq failed: 5 tries\n"); 745 + break; 746 + } 747 + 748 + /* Compute new train_set as requested by sink */ 749 + dp_get_adjust_train(dp_info->link_status, dp_info->dp_lane_count, dp_info->train_set); 750 + 751 + radeon_dp_update_vs_emph(dp_info); 752 + dp_info->tries++; 753 + } 754 + 755 + if (!channel_eq) { 756 + DRM_ERROR("channel eq failed\n"); 757 + return -1; 758 + } else { 759 + DRM_DEBUG_KMS("channel eq at voltage %d pre-emphasis %d\n", 760 + dp_info->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK, 761 + (dp_info->train_set[0] & DP_TRAIN_PRE_EMPHASIS_MASK) 762 + >> DP_TRAIN_PRE_EMPHASIS_SHIFT); 763 + return 0; 764 + } 642 765 } 643 766 644 - static void dp_set_link_bw_lanes(struct radeon_connector *radeon_connector, 645 - u8 link_configuration[DP_LINK_CONFIGURATION_SIZE]) 646 - { 647 - radeon_dp_aux_native_write(radeon_connector, DP_LINK_BW_SET, 2, 648 - link_configuration); 649 - } 650 - 651 - static void dp_update_dpvs_emph(struct radeon_connector *radeon_connector, 652 - struct drm_encoder *encoder, 653 - u8 train_set[4]) 654 - { 655 - struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; 656 - int i; 657 - 658 - for (i = 0; i < dig_connector->dp_lane_count; i++) 659 - atombios_dig_transmitter_setup(encoder, 660 - ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH, 661 - i, train_set[i]); 662 - 663 - radeon_dp_aux_native_write(radeon_connector, DP_TRAINING_LANE0_SET, 664 - dig_connector->dp_lane_count, train_set); 665 - } 666 - 667 - static void dp_set_training(struct radeon_connector *radeon_connector, 668 - u8 training) 669 - { 670 - radeon_dp_aux_native_write(radeon_connector, DP_TRAINING_PATTERN_SET, 671 - 1, &training); 672 - } 673 - 674 - void dp_link_train(struct drm_encoder *encoder, 675 - struct drm_connector *connector) 767 + void radeon_dp_link_train(struct drm_encoder *encoder, 768 + struct drm_connector *connector) 676 769 { 677 770 struct drm_device *dev = encoder->dev; 678 771 struct radeon_device *rdev = dev->dev_private; ··· 868 585 struct radeon_encoder_atom_dig *dig; 869 586 struct radeon_connector *radeon_connector; 870 587 struct radeon_connector_atom_dig *dig_connector; 871 - int enc_id = 0; 872 - bool clock_recovery, channel_eq; 873 - u8 link_status[DP_LINK_STATUS_SIZE]; 874 - u8 link_configuration[DP_LINK_CONFIGURATION_SIZE]; 875 - u8 tries, voltage; 876 - u8 train_set[4]; 877 - int i; 878 - 879 - if ((connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) && 880 - (connector->connector_type != DRM_MODE_CONNECTOR_eDP)) 881 - return; 588 + struct radeon_dp_link_train_info dp_info; 589 + u8 tmp; 882 590 883 591 if (!radeon_encoder->enc_priv) 884 592 return; ··· 880 606 return; 881 607 dig_connector = radeon_connector->con_priv; 882 608 609 + if ((dig_connector->dp_sink_type != CONNECTOR_OBJECT_ID_DISPLAYPORT) && 610 + (dig_connector->dp_sink_type != CONNECTOR_OBJECT_ID_eDP)) 611 + return; 612 + 613 + dp_info.enc_id = 0; 883 614 if (dig->dig_encoder) 884 - enc_id |= ATOM_DP_CONFIG_DIG2_ENCODER; 615 + dp_info.enc_id |= ATOM_DP_CONFIG_DIG2_ENCODER; 885 616 else 886 - enc_id |= ATOM_DP_CONFIG_DIG1_ENCODER; 617 + dp_info.enc_id |= ATOM_DP_CONFIG_DIG1_ENCODER; 887 618 if (dig->linkb) 888 - enc_id |= ATOM_DP_CONFIG_LINK_B; 619 + dp_info.enc_id |= ATOM_DP_CONFIG_LINK_B; 889 620 else 890 - enc_id |= ATOM_DP_CONFIG_LINK_A; 621 + dp_info.enc_id |= ATOM_DP_CONFIG_LINK_A; 891 622 892 - memset(link_configuration, 0, DP_LINK_CONFIGURATION_SIZE); 893 - if (dig_connector->dp_clock == 270000) 894 - link_configuration[0] = DP_LINK_BW_2_7; 623 + dp_info.rd_interval = radeon_read_dpcd_reg(radeon_connector, DP_TRAINING_AUX_RD_INTERVAL); 624 + tmp = radeon_read_dpcd_reg(radeon_connector, DP_MAX_LANE_COUNT); 625 + if (ASIC_IS_DCE5(rdev) && (tmp & DP_TPS3_SUPPORTED)) 626 + dp_info.tp3_supported = true; 895 627 else 896 - link_configuration[0] = DP_LINK_BW_1_62; 897 - link_configuration[1] = dig_connector->dp_lane_count; 898 - if (dig_connector->dpcd[0] >= 0x11) 899 - link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; 628 + dp_info.tp3_supported = false; 900 629 901 - /* power up the sink */ 902 - dp_set_power(radeon_connector, DP_SET_POWER_D0); 903 - /* disable the training pattern on the sink */ 904 - dp_set_training(radeon_connector, DP_TRAINING_PATTERN_DISABLE); 905 - /* set link bw and lanes on the sink */ 906 - dp_set_link_bw_lanes(radeon_connector, link_configuration); 907 - /* disable downspread on the sink */ 908 - dp_set_downspread(radeon_connector, 0); 909 - if (ASIC_IS_DCE4(rdev)) { 910 - /* start training on the source */ 911 - atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_LINK_TRAINING_START); 912 - /* set training pattern 1 on the source */ 913 - atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN1); 914 - } else { 915 - /* start training on the source */ 916 - radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_START, 917 - dig_connector->dp_clock, enc_id, 0); 918 - /* set training pattern 1 on the source */ 919 - radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL, 920 - dig_connector->dp_clock, enc_id, 0); 921 - } 630 + memcpy(dp_info.dpcd, dig_connector->dpcd, 8); 631 + dp_info.rdev = rdev; 632 + dp_info.encoder = encoder; 633 + dp_info.connector = connector; 634 + dp_info.radeon_connector = radeon_connector; 635 + dp_info.dp_lane_count = dig_connector->dp_lane_count; 636 + dp_info.dp_clock = dig_connector->dp_clock; 922 637 923 - /* set initial vs/emph */ 924 - memset(train_set, 0, 4); 925 - udelay(400); 926 - /* set training pattern 1 on the sink */ 927 - dp_set_training(radeon_connector, DP_TRAINING_PATTERN_1); 928 - 929 - dp_update_dpvs_emph(radeon_connector, encoder, train_set); 930 - 931 - /* clock recovery loop */ 932 - clock_recovery = false; 933 - tries = 0; 934 - voltage = 0xff; 935 - for (;;) { 936 - udelay(100); 937 - if (!atom_dp_get_link_status(radeon_connector, link_status)) 938 - break; 939 - 940 - if (dp_clock_recovery_ok(link_status, dig_connector->dp_lane_count)) { 941 - clock_recovery = true; 942 - break; 943 - } 944 - 945 - for (i = 0; i < dig_connector->dp_lane_count; i++) { 946 - if ((train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) 947 - break; 948 - } 949 - if (i == dig_connector->dp_lane_count) { 950 - DRM_ERROR("clock recovery reached max voltage\n"); 951 - break; 952 - } 953 - 954 - if ((train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) { 955 - ++tries; 956 - if (tries == 5) { 957 - DRM_ERROR("clock recovery tried 5 times\n"); 958 - break; 959 - } 960 - } else 961 - tries = 0; 962 - 963 - voltage = train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; 964 - 965 - /* Compute new train_set as requested by sink */ 966 - dp_get_adjust_train(link_status, dig_connector->dp_lane_count, train_set); 967 - dp_update_dpvs_emph(radeon_connector, encoder, train_set); 968 - } 969 - if (!clock_recovery) 970 - DRM_ERROR("clock recovery failed\n"); 971 - else 972 - DRM_DEBUG_KMS("clock recovery at voltage %d pre-emphasis %d\n", 973 - train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK, 974 - (train_set[0] & DP_TRAIN_PRE_EMPHASIS_MASK) >> 975 - DP_TRAIN_PRE_EMPHASIS_SHIFT); 976 - 977 - 978 - /* set training pattern 2 on the sink */ 979 - dp_set_training(radeon_connector, DP_TRAINING_PATTERN_2); 980 - /* set training pattern 2 on the source */ 981 - if (ASIC_IS_DCE4(rdev)) 982 - atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN2); 983 - else 984 - radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL, 985 - dig_connector->dp_clock, enc_id, 1); 986 - 987 - /* channel equalization loop */ 988 - tries = 0; 989 - channel_eq = false; 990 - for (;;) { 991 - udelay(400); 992 - if (!atom_dp_get_link_status(radeon_connector, link_status)) 993 - break; 994 - 995 - if (dp_channel_eq_ok(link_status, dig_connector->dp_lane_count)) { 996 - channel_eq = true; 997 - break; 998 - } 999 - 1000 - /* Try 5 times */ 1001 - if (tries > 5) { 1002 - DRM_ERROR("channel eq failed: 5 tries\n"); 1003 - break; 1004 - } 1005 - 1006 - /* Compute new train_set as requested by sink */ 1007 - dp_get_adjust_train(link_status, dig_connector->dp_lane_count, train_set); 1008 - dp_update_dpvs_emph(radeon_connector, encoder, train_set); 1009 - 1010 - tries++; 1011 - } 1012 - 1013 - if (!channel_eq) 1014 - DRM_ERROR("channel eq failed\n"); 1015 - else 1016 - DRM_DEBUG_KMS("channel eq at voltage %d pre-emphasis %d\n", 1017 - train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK, 1018 - (train_set[0] & DP_TRAIN_PRE_EMPHASIS_MASK) 1019 - >> DP_TRAIN_PRE_EMPHASIS_SHIFT); 1020 - 1021 - /* disable the training pattern on the sink */ 1022 - dp_set_training(radeon_connector, DP_TRAINING_PATTERN_DISABLE); 1023 - 1024 - /* disable the training pattern on the source */ 1025 - if (ASIC_IS_DCE4(rdev)) 1026 - atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE); 1027 - else 1028 - radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_COMPLETE, 1029 - dig_connector->dp_clock, enc_id, 0); 638 + if (radeon_dp_link_train_init(&dp_info)) 639 + goto done; 640 + if (radeon_dp_link_train_cr(&dp_info)) 641 + goto done; 642 + if (radeon_dp_link_train_ce(&dp_info)) 643 + goto done; 644 + done: 645 + if (radeon_dp_link_train_finish(&dp_info)) 646 + return; 1030 647 } 1031 - 1032 - int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, 1033 - uint8_t write_byte, uint8_t *read_byte) 1034 - { 1035 - struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; 1036 - struct radeon_i2c_chan *auxch = (struct radeon_i2c_chan *)adapter; 1037 - int ret = 0; 1038 - uint16_t address = algo_data->address; 1039 - uint8_t msg[5]; 1040 - uint8_t reply[2]; 1041 - int msg_len, dp_msg_len; 1042 - int reply_bytes; 1043 - 1044 - /* Set up the command byte */ 1045 - if (mode & MODE_I2C_READ) 1046 - msg[2] = AUX_I2C_READ << 4; 1047 - else 1048 - msg[2] = AUX_I2C_WRITE << 4; 1049 - 1050 - if (!(mode & MODE_I2C_STOP)) 1051 - msg[2] |= AUX_I2C_MOT << 4; 1052 - 1053 - msg[0] = address; 1054 - msg[1] = address >> 8; 1055 - 1056 - reply_bytes = 1; 1057 - 1058 - msg_len = 4; 1059 - dp_msg_len = 3; 1060 - switch (mode) { 1061 - case MODE_I2C_WRITE: 1062 - msg[4] = write_byte; 1063 - msg_len++; 1064 - dp_msg_len += 2; 1065 - break; 1066 - case MODE_I2C_READ: 1067 - dp_msg_len += 1; 1068 - break; 1069 - default: 1070 - break; 1071 - } 1072 - 1073 - msg[3] = (dp_msg_len) << 4; 1074 - ret = radeon_process_aux_ch(auxch, msg, msg_len, reply, reply_bytes, 0); 1075 - 1076 - if (ret) { 1077 - if (read_byte) 1078 - *read_byte = reply[0]; 1079 - return reply_bytes; 1080 - } 1081 - return -EREMOTEIO; 1082 - } 1083 -
+11 -3
drivers/gpu/drm/radeon/evergreen.c
··· 1578 1578 u32 sq_stack_resource_mgmt_2; 1579 1579 u32 sq_stack_resource_mgmt_3; 1580 1580 u32 vgt_cache_invalidation; 1581 - u32 hdp_host_path_cntl; 1581 + u32 hdp_host_path_cntl, tmp; 1582 1582 int i, j, num_shader_engines, ps_thread_count; 1583 1583 1584 1584 switch (rdev->family) { ··· 1936 1936 rdev->config.evergreen.tile_config |= (3 << 0); 1937 1937 break; 1938 1938 } 1939 - rdev->config.evergreen.tile_config |= 1940 - ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4; 1939 + /* num banks is 8 on all fusion asics */ 1940 + if (rdev->flags & RADEON_IS_IGP) 1941 + rdev->config.evergreen.tile_config |= 8 << 4; 1942 + else 1943 + rdev->config.evergreen.tile_config |= 1944 + ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4; 1941 1945 rdev->config.evergreen.tile_config |= 1942 1946 ((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT) << 8; 1943 1947 rdev->config.evergreen.tile_config |= ··· 2144 2140 WREG32(i, 0); 2145 2141 for (i = SQ_ALU_CONST_BUFFER_SIZE_HS_0; i < 0x29000; i += 4) 2146 2142 WREG32(i, 0); 2143 + 2144 + tmp = RREG32(HDP_MISC_CNTL); 2145 + tmp |= HDP_FLUSH_INVALIDATE_CACHE; 2146 + WREG32(HDP_MISC_CNTL, tmp); 2147 2147 2148 2148 hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL); 2149 2149 WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
+2
drivers/gpu/drm/radeon/evergreend.h
··· 64 64 #define GB_BACKEND_MAP 0x98FC 65 65 #define DMIF_ADDR_CONFIG 0xBD4 66 66 #define HDP_ADDR_CONFIG 0x2F48 67 + #define HDP_MISC_CNTL 0x2F4C 68 + #define HDP_FLUSH_INVALIDATE_CACHE (1 << 0) 67 69 68 70 #define CC_SYS_RB_BACKEND_DISABLE 0x3F88 69 71 #define GC_USER_RB_BACKEND_DISABLE 0x9B7C
+6 -2
drivers/gpu/drm/radeon/ni.c
··· 417 417 num_shader_engines = 1; 418 418 if (num_shader_engines > rdev->config.cayman.max_shader_engines) 419 419 num_shader_engines = rdev->config.cayman.max_shader_engines; 420 - if (num_backends_per_asic > num_shader_engines) 420 + if (num_backends_per_asic < num_shader_engines) 421 421 num_backends_per_asic = num_shader_engines; 422 422 if (num_backends_per_asic > (rdev->config.cayman.max_backends_per_se * num_shader_engines)) 423 423 num_backends_per_asic = rdev->config.cayman.max_backends_per_se * num_shader_engines; ··· 829 829 rdev->config.cayman.tile_config |= 830 830 ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4; 831 831 rdev->config.cayman.tile_config |= 832 - (gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT; 832 + ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8; 833 833 rdev->config.cayman.tile_config |= 834 834 ((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12; 835 835 ··· 930 930 WREG32(CB_PERF_CTR2_SEL_1, 0); 931 931 WREG32(CB_PERF_CTR3_SEL_0, 0); 932 932 WREG32(CB_PERF_CTR3_SEL_1, 0); 933 + 934 + tmp = RREG32(HDP_MISC_CNTL); 935 + tmp |= HDP_FLUSH_INVALIDATE_CACHE; 936 + WREG32(HDP_MISC_CNTL, tmp); 933 937 934 938 hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL); 935 939 WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
+3 -1
drivers/gpu/drm/radeon/nid.h
··· 136 136 #define HDP_NONSURFACE_INFO 0x2C08 137 137 #define HDP_NONSURFACE_SIZE 0x2C0C 138 138 #define HDP_ADDR_CONFIG 0x2F48 139 + #define HDP_MISC_CNTL 0x2F4C 140 + #define HDP_FLUSH_INVALIDATE_CACHE (1 << 0) 139 141 140 142 #define CC_SYS_RB_BACKEND_DISABLE 0x3F88 141 143 #define GC_USER_SYS_RB_BACKEND_DISABLE 0x3F8C ··· 353 351 #define MULTI_GPU_TILE_SIZE_MASK 0x03000000 354 352 #define MULTI_GPU_TILE_SIZE_SHIFT 24 355 353 #define ROW_SIZE(x) ((x) << 28) 356 - #define ROW_SIZE_MASK 0x30000007 354 + #define ROW_SIZE_MASK 0x30000000 357 355 #define ROW_SIZE_SHIFT 28 358 356 #define NUM_LOWER_PIPES(x) ((x) << 30) 359 357 #define NUM_LOWER_PIPES_MASK 0x40000000
+4
drivers/gpu/drm/radeon/radeon_asic.c
··· 782 782 .hpd_fini = &evergreen_hpd_fini, 783 783 .hpd_sense = &evergreen_hpd_sense, 784 784 .hpd_set_polarity = &evergreen_hpd_set_polarity, 785 + .ioctl_wait_idle = r600_ioctl_wait_idle, 785 786 .gui_idle = &r600_gui_idle, 786 787 .pm_misc = &evergreen_pm_misc, 787 788 .pm_prepare = &evergreen_pm_prepare, ··· 829 828 .hpd_fini = &evergreen_hpd_fini, 830 829 .hpd_sense = &evergreen_hpd_sense, 831 830 .hpd_set_polarity = &evergreen_hpd_set_polarity, 831 + .ioctl_wait_idle = r600_ioctl_wait_idle, 832 832 .gui_idle = &r600_gui_idle, 833 833 .pm_misc = &evergreen_pm_misc, 834 834 .pm_prepare = &evergreen_pm_prepare, ··· 876 874 .hpd_fini = &evergreen_hpd_fini, 877 875 .hpd_sense = &evergreen_hpd_sense, 878 876 .hpd_set_polarity = &evergreen_hpd_set_polarity, 877 + .ioctl_wait_idle = r600_ioctl_wait_idle, 879 878 .gui_idle = &r600_gui_idle, 880 879 .pm_misc = &evergreen_pm_misc, 881 880 .pm_prepare = &evergreen_pm_prepare, ··· 923 920 .hpd_fini = &evergreen_hpd_fini, 924 921 .hpd_sense = &evergreen_hpd_sense, 925 922 .hpd_set_polarity = &evergreen_hpd_set_polarity, 923 + .ioctl_wait_idle = r600_ioctl_wait_idle, 926 924 .gui_idle = &r600_gui_idle, 927 925 .pm_misc = &evergreen_pm_misc, 928 926 .pm_prepare = &evergreen_pm_prepare,
+102 -15
drivers/gpu/drm/radeon/radeon_combios.c
··· 505 505 * DDC_VGA = RADEON_GPIO_VGA_DDC 506 506 * DDC_LCD = RADEON_GPIOPAD_MASK 507 507 * DDC_GPIO = RADEON_MDGPIO_MASK 508 - * r1xx/r2xx 508 + * r1xx 509 509 * DDC_MONID = RADEON_GPIO_MONID 510 510 * DDC_CRT2 = RADEON_GPIO_CRT2_DDC 511 - * r3xx 511 + * r200 512 512 * DDC_MONID = RADEON_GPIO_MONID 513 513 * DDC_CRT2 = RADEON_GPIO_DVI_DDC 514 + * r300/r350 515 + * DDC_MONID = RADEON_GPIO_DVI_DDC 516 + * DDC_CRT2 = RADEON_GPIO_DVI_DDC 517 + * rv2xx/rv3xx 518 + * DDC_MONID = RADEON_GPIO_MONID 519 + * DDC_CRT2 = RADEON_GPIO_MONID 514 520 * rs3xx/rs4xx 515 521 * DDC_MONID = RADEON_GPIOPAD_MASK 516 522 * DDC_CRT2 = RADEON_GPIO_MONID ··· 543 537 rdev->family == CHIP_RS400 || 544 538 rdev->family == CHIP_RS480) 545 539 ddc_line = RADEON_GPIOPAD_MASK; 546 - else 540 + else if (rdev->family == CHIP_R300 || 541 + rdev->family == CHIP_R350) { 542 + ddc_line = RADEON_GPIO_DVI_DDC; 543 + ddc = DDC_DVI; 544 + } else 547 545 ddc_line = RADEON_GPIO_MONID; 548 546 break; 549 547 case DDC_CRT2: 550 - if (rdev->family == CHIP_RS300 || 551 - rdev->family == CHIP_RS400 || 552 - rdev->family == CHIP_RS480) 553 - ddc_line = RADEON_GPIO_MONID; 554 - else if (rdev->family >= CHIP_R300) { 548 + if (rdev->family == CHIP_R200 || 549 + rdev->family == CHIP_R300 || 550 + rdev->family == CHIP_R350) { 555 551 ddc_line = RADEON_GPIO_DVI_DDC; 556 552 ddc = DDC_DVI; 553 + } else if (rdev->family == CHIP_RS300 || 554 + rdev->family == CHIP_RS400 || 555 + rdev->family == CHIP_RS480) 556 + ddc_line = RADEON_GPIO_MONID; 557 + else if (rdev->family >= CHIP_RV350) { 558 + ddc_line = RADEON_GPIO_MONID; 559 + ddc = DDC_MONID; 557 560 } else 558 561 ddc_line = RADEON_GPIO_CRT2_DDC; 559 562 break; ··· 724 709 struct drm_device *dev = rdev->ddev; 725 710 struct radeon_i2c_bus_rec i2c; 726 711 712 + /* actual hw pads 713 + * r1xx/rs2xx/rs3xx 714 + * 0x60, 0x64, 0x68, 0x6c, gpiopads, mm 715 + * r200 716 + * 0x60, 0x64, 0x68, mm 717 + * r300/r350 718 + * 0x60, 0x64, mm 719 + * rv2xx/rv3xx/rs4xx 720 + * 0x60, 0x64, 0x68, gpiopads, mm 721 + */ 727 722 723 + /* 0x60 */ 728 724 i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0); 729 725 rdev->i2c_bus[0] = radeon_i2c_create(dev, &i2c, "DVI_DDC"); 730 - 726 + /* 0x64 */ 731 727 i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0); 732 728 rdev->i2c_bus[1] = radeon_i2c_create(dev, &i2c, "VGA_DDC"); 733 729 730 + /* mm i2c */ 734 731 i2c.valid = true; 735 732 i2c.hw_capable = true; 736 733 i2c.mm_i2c = true; 737 734 i2c.i2c_id = 0xa0; 738 735 rdev->i2c_bus[2] = radeon_i2c_create(dev, &i2c, "MM_I2C"); 739 736 740 - if (rdev->family == CHIP_RS300 || 741 - rdev->family == CHIP_RS400 || 742 - rdev->family == CHIP_RS480) { 737 + if (rdev->family == CHIP_R300 || 738 + rdev->family == CHIP_R350) { 739 + /* only 2 sw i2c pads */ 740 + } else if (rdev->family == CHIP_RS300 || 741 + rdev->family == CHIP_RS400 || 742 + rdev->family == CHIP_RS480) { 743 743 u16 offset; 744 744 u8 id, blocks, clk, data; 745 745 int i; 746 746 747 + /* 0x68 */ 747 748 i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0); 748 749 rdev->i2c_bus[3] = radeon_i2c_create(dev, &i2c, "MONID"); 749 750 ··· 771 740 if (id == 136) { 772 741 clk = RBIOS8(offset + 3 + (i * 5) + 3); 773 742 data = RBIOS8(offset + 3 + (i * 5) + 4); 743 + /* gpiopad */ 774 744 i2c = combios_setup_i2c_bus(rdev, DDC_MONID, 775 745 (1 << clk), (1 << data)); 776 746 rdev->i2c_bus[4] = radeon_i2c_create(dev, &i2c, "GPIOPAD_MASK"); ··· 779 747 } 780 748 } 781 749 } 782 - 783 - } else if (rdev->family >= CHIP_R300) { 750 + } else if (rdev->family >= CHIP_R200) { 751 + /* 0x68 */ 784 752 i2c = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0); 785 753 rdev->i2c_bus[3] = radeon_i2c_create(dev, &i2c, "MONID"); 786 754 } else { 755 + /* 0x68 */ 787 756 i2c = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0); 788 757 rdev->i2c_bus[3] = radeon_i2c_create(dev, &i2c, "MONID"); 789 - 758 + /* 0x6c */ 790 759 i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0); 791 760 rdev->i2c_bus[4] = radeon_i2c_create(dev, &i2c, "CRT2_DDC"); 792 761 } ··· 2537 2504 return true; 2538 2505 } 2539 2506 2507 + static const char *thermal_controller_names[] = { 2508 + "NONE", 2509 + "lm63", 2510 + "adm1032", 2511 + }; 2512 + 2540 2513 void radeon_combios_get_power_modes(struct radeon_device *rdev) 2541 2514 { 2542 2515 struct drm_device *dev = rdev->ddev; ··· 2561 2522 rdev->pm.current_power_state_index = rdev->pm.default_power_state_index; 2562 2523 rdev->pm.current_clock_mode_index = 0; 2563 2524 return; 2525 + } 2526 + 2527 + /* check for a thermal chip */ 2528 + offset = combios_get_table_offset(dev, COMBIOS_OVERDRIVE_INFO_TABLE); 2529 + if (offset) { 2530 + u8 thermal_controller = 0, gpio = 0, i2c_addr = 0, clk_bit = 0, data_bit = 0; 2531 + struct radeon_i2c_bus_rec i2c_bus; 2532 + 2533 + rev = RBIOS8(offset); 2534 + 2535 + if (rev == 0) { 2536 + thermal_controller = RBIOS8(offset + 3); 2537 + gpio = RBIOS8(offset + 4) & 0x3f; 2538 + i2c_addr = RBIOS8(offset + 5); 2539 + } else if (rev == 1) { 2540 + thermal_controller = RBIOS8(offset + 4); 2541 + gpio = RBIOS8(offset + 5) & 0x3f; 2542 + i2c_addr = RBIOS8(offset + 6); 2543 + } else if (rev == 2) { 2544 + thermal_controller = RBIOS8(offset + 4); 2545 + gpio = RBIOS8(offset + 5) & 0x3f; 2546 + i2c_addr = RBIOS8(offset + 6); 2547 + clk_bit = RBIOS8(offset + 0xa); 2548 + data_bit = RBIOS8(offset + 0xb); 2549 + } 2550 + if ((thermal_controller > 0) && (thermal_controller < 3)) { 2551 + DRM_INFO("Possible %s thermal controller at 0x%02x\n", 2552 + thermal_controller_names[thermal_controller], 2553 + i2c_addr >> 1); 2554 + if (gpio == DDC_LCD) { 2555 + /* MM i2c */ 2556 + i2c_bus.valid = true; 2557 + i2c_bus.hw_capable = true; 2558 + i2c_bus.mm_i2c = true; 2559 + i2c_bus.i2c_id = 0xa0; 2560 + } else if (gpio == DDC_GPIO) 2561 + i2c_bus = combios_setup_i2c_bus(rdev, gpio, 1 << clk_bit, 1 << data_bit); 2562 + else 2563 + i2c_bus = combios_setup_i2c_bus(rdev, gpio, 0, 0); 2564 + rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus); 2565 + if (rdev->pm.i2c_bus) { 2566 + struct i2c_board_info info = { }; 2567 + const char *name = thermal_controller_names[thermal_controller]; 2568 + info.addr = i2c_addr >> 1; 2569 + strlcpy(info.type, name, sizeof(info.type)); 2570 + i2c_new_device(&rdev->pm.i2c_bus->adapter, &info); 2571 + } 2572 + } 2564 2573 } 2565 2574 2566 2575 if (rdev->flags & RADEON_IS_MOBILITY) {
+427 -184
drivers/gpu/drm/radeon/radeon_connectors.c
··· 50 50 struct radeon_device *rdev = dev->dev_private; 51 51 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 52 52 53 - if (radeon_connector->hpd.hpd != RADEON_HPD_NONE) 54 - radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd); 53 + radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd); 55 54 56 - if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) || 57 - (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) { 58 - if ((radeon_dp_getsinktype(radeon_connector) == CONNECTOR_OBJECT_ID_DISPLAYPORT) || 59 - (radeon_dp_getsinktype(radeon_connector) == CONNECTOR_OBJECT_ID_eDP)) { 60 - if (radeon_dp_needs_link_train(radeon_connector)) { 61 - if (connector->encoder) 62 - dp_link_train(connector->encoder, connector); 63 - } 64 - } 55 + /* powering up/down the eDP panel generates hpd events which 56 + * can interfere with modesetting. 57 + */ 58 + if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) 59 + return; 60 + 61 + /* pre-r600 did not always have the hpd pins mapped accurately to connectors */ 62 + if (rdev->family >= CHIP_R600) { 63 + if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) 64 + drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); 65 + else 66 + drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); 65 67 } 66 - 67 68 } 68 69 69 70 static void radeon_property_change_mode(struct drm_encoder *encoder) ··· 1055 1054 int ret; 1056 1055 1057 1056 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { 1057 + struct drm_encoder *encoder; 1058 + struct drm_display_mode *mode; 1059 + 1058 1060 if (!radeon_dig_connector->edp_on) 1059 1061 atombios_set_edp_panel_power(connector, 1060 1062 ATOM_TRANSMITTER_ACTION_POWER_ON); 1061 - } 1062 - ret = radeon_ddc_get_modes(radeon_connector); 1063 - if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { 1063 + ret = radeon_ddc_get_modes(radeon_connector); 1064 1064 if (!radeon_dig_connector->edp_on) 1065 1065 atombios_set_edp_panel_power(connector, 1066 1066 ATOM_TRANSMITTER_ACTION_POWER_OFF); 1067 - } 1067 + 1068 + if (ret > 0) { 1069 + encoder = radeon_best_single_encoder(connector); 1070 + if (encoder) { 1071 + radeon_fixup_lvds_native_mode(encoder, connector); 1072 + /* add scaled modes */ 1073 + radeon_add_common_modes(encoder, connector); 1074 + } 1075 + return ret; 1076 + } 1077 + 1078 + encoder = radeon_best_single_encoder(connector); 1079 + if (!encoder) 1080 + return 0; 1081 + 1082 + /* we have no EDID modes */ 1083 + mode = radeon_fp_native_mode(encoder); 1084 + if (mode) { 1085 + ret = 1; 1086 + drm_mode_probed_add(connector, mode); 1087 + /* add the width/height from vbios tables if available */ 1088 + connector->display_info.width_mm = mode->width_mm; 1089 + connector->display_info.height_mm = mode->height_mm; 1090 + /* add scaled modes */ 1091 + radeon_add_common_modes(encoder, connector); 1092 + } 1093 + } else 1094 + ret = radeon_ddc_get_modes(radeon_connector); 1068 1095 1069 1096 return ret; 1097 + } 1098 + 1099 + bool radeon_connector_encoder_is_dp_bridge(struct drm_connector *connector) 1100 + { 1101 + struct drm_mode_object *obj; 1102 + struct drm_encoder *encoder; 1103 + struct radeon_encoder *radeon_encoder; 1104 + int i; 1105 + bool found = false; 1106 + 1107 + for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { 1108 + if (connector->encoder_ids[i] == 0) 1109 + break; 1110 + 1111 + obj = drm_mode_object_find(connector->dev, connector->encoder_ids[i], DRM_MODE_OBJECT_ENCODER); 1112 + if (!obj) 1113 + continue; 1114 + 1115 + encoder = obj_to_encoder(obj); 1116 + radeon_encoder = to_radeon_encoder(encoder); 1117 + 1118 + switch (radeon_encoder->encoder_id) { 1119 + case ENCODER_OBJECT_ID_TRAVIS: 1120 + case ENCODER_OBJECT_ID_NUTMEG: 1121 + found = true; 1122 + break; 1123 + default: 1124 + break; 1125 + } 1126 + } 1127 + 1128 + return found; 1129 + } 1130 + 1131 + bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector) 1132 + { 1133 + struct drm_mode_object *obj; 1134 + struct drm_encoder *encoder; 1135 + struct radeon_encoder *radeon_encoder; 1136 + int i; 1137 + bool found = false; 1138 + 1139 + for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { 1140 + if (connector->encoder_ids[i] == 0) 1141 + break; 1142 + 1143 + obj = drm_mode_object_find(connector->dev, connector->encoder_ids[i], DRM_MODE_OBJECT_ENCODER); 1144 + if (!obj) 1145 + continue; 1146 + 1147 + encoder = obj_to_encoder(obj); 1148 + radeon_encoder = to_radeon_encoder(encoder); 1149 + if (radeon_encoder->caps & ATOM_ENCODER_CAP_RECORD_HBR2) 1150 + found = true; 1151 + } 1152 + 1153 + return found; 1154 + } 1155 + 1156 + bool radeon_connector_is_dp12_capable(struct drm_connector *connector) 1157 + { 1158 + struct drm_device *dev = connector->dev; 1159 + struct radeon_device *rdev = dev->dev_private; 1160 + 1161 + if (ASIC_IS_DCE5(rdev) && 1162 + (rdev->clock.dp_extclk >= 53900) && 1163 + radeon_connector_encoder_is_hbr2(connector)) { 1164 + return true; 1165 + } 1166 + 1167 + return false; 1070 1168 } 1071 1169 1072 1170 static enum drm_connector_status 1073 1171 radeon_dp_detect(struct drm_connector *connector, bool force) 1074 1172 { 1173 + struct drm_device *dev = connector->dev; 1174 + struct radeon_device *rdev = dev->dev_private; 1075 1175 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 1076 1176 enum drm_connector_status ret = connector_status_disconnected; 1077 1177 struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv; ··· 1183 1081 } 1184 1082 1185 1083 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { 1084 + struct drm_encoder *encoder = radeon_best_single_encoder(connector); 1085 + if (encoder) { 1086 + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 1087 + struct drm_display_mode *native_mode = &radeon_encoder->native_mode; 1088 + 1089 + /* check if panel is valid */ 1090 + if (native_mode->hdisplay >= 320 && native_mode->vdisplay >= 240) 1091 + ret = connector_status_connected; 1092 + } 1186 1093 /* eDP is always DP */ 1187 1094 radeon_dig_connector->dp_sink_type = CONNECTOR_OBJECT_ID_DISPLAYPORT; 1188 1095 if (!radeon_dig_connector->edp_on) ··· 1204 1093 ATOM_TRANSMITTER_ACTION_POWER_OFF); 1205 1094 } else { 1206 1095 radeon_dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector); 1207 - if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) { 1208 - if (radeon_dp_getdpcd(radeon_connector)) 1209 - ret = connector_status_connected; 1096 + if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) { 1097 + ret = connector_status_connected; 1098 + if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) 1099 + radeon_dp_getdpcd(radeon_connector); 1210 1100 } else { 1211 - if (radeon_ddc_probe(radeon_connector)) 1212 - ret = connector_status_connected; 1101 + if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) { 1102 + if (radeon_dp_getdpcd(radeon_connector)) 1103 + ret = connector_status_connected; 1104 + } else { 1105 + if (radeon_ddc_probe(radeon_connector)) 1106 + ret = connector_status_connected; 1107 + } 1213 1108 } 1214 1109 } 1215 1110 ··· 1231 1114 1232 1115 /* XXX check mode bandwidth */ 1233 1116 1234 - if ((radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || 1235 - (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) 1236 - return radeon_dp_mode_valid_helper(radeon_connector, mode); 1237 - else 1117 + if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { 1118 + struct drm_encoder *encoder = radeon_best_single_encoder(connector); 1119 + 1120 + if ((mode->hdisplay < 320) || (mode->vdisplay < 240)) 1121 + return MODE_PANEL; 1122 + 1123 + if (encoder) { 1124 + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 1125 + struct drm_display_mode *native_mode = &radeon_encoder->native_mode; 1126 + 1127 + /* AVIVO hardware supports downscaling modes larger than the panel 1128 + * to the panel size, but I'm not sure this is desirable. 1129 + */ 1130 + if ((mode->hdisplay > native_mode->hdisplay) || 1131 + (mode->vdisplay > native_mode->vdisplay)) 1132 + return MODE_PANEL; 1133 + 1134 + /* if scaling is disabled, block non-native modes */ 1135 + if (radeon_encoder->rmx_type == RMX_OFF) { 1136 + if ((mode->hdisplay != native_mode->hdisplay) || 1137 + (mode->vdisplay != native_mode->vdisplay)) 1138 + return MODE_PANEL; 1139 + } 1140 + } 1238 1141 return MODE_OK; 1142 + } else { 1143 + if ((radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || 1144 + (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) 1145 + return radeon_dp_mode_valid_helper(connector, mode); 1146 + else 1147 + return MODE_OK; 1148 + } 1239 1149 } 1240 1150 1241 1151 struct drm_connector_helper_funcs radeon_dp_connector_helper_funcs = { ··· 1295 1151 struct drm_connector *connector; 1296 1152 struct radeon_connector *radeon_connector; 1297 1153 struct radeon_connector_atom_dig *radeon_dig_connector; 1154 + struct drm_encoder *encoder; 1155 + struct radeon_encoder *radeon_encoder; 1298 1156 uint32_t subpixel_order = SubPixelNone; 1299 1157 bool shared_ddc = false; 1158 + bool is_dp_bridge = false; 1300 1159 1301 1160 if (connector_type == DRM_MODE_CONNECTOR_Unknown) 1302 1161 return; ··· 1331 1184 } 1332 1185 } 1333 1186 1187 + /* check if it's a dp bridge */ 1188 + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 1189 + radeon_encoder = to_radeon_encoder(encoder); 1190 + if (radeon_encoder->devices & supported_device) { 1191 + switch (radeon_encoder->encoder_id) { 1192 + case ENCODER_OBJECT_ID_TRAVIS: 1193 + case ENCODER_OBJECT_ID_NUTMEG: 1194 + is_dp_bridge = true; 1195 + break; 1196 + default: 1197 + break; 1198 + } 1199 + } 1200 + } 1201 + 1334 1202 radeon_connector = kzalloc(sizeof(struct radeon_connector), GFP_KERNEL); 1335 1203 if (!radeon_connector) 1336 1204 return; ··· 1363 1201 if (!radeon_connector->router_bus) 1364 1202 DRM_ERROR("Failed to assign router i2c bus! Check dmesg for i2c errors.\n"); 1365 1203 } 1366 - switch (connector_type) { 1367 - case DRM_MODE_CONNECTOR_VGA: 1368 - drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); 1369 - drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs); 1370 - if (i2c_bus->valid) { 1371 - radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); 1372 - if (!radeon_connector->ddc_bus) 1373 - DRM_ERROR("VGA: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); 1374 - } 1375 - radeon_connector->dac_load_detect = true; 1376 - drm_connector_attach_property(&radeon_connector->base, 1377 - rdev->mode_info.load_detect_property, 1378 - 1); 1379 - /* no HPD on analog connectors */ 1380 - radeon_connector->hpd.hpd = RADEON_HPD_NONE; 1381 - connector->polled = DRM_CONNECTOR_POLL_CONNECT; 1382 - connector->interlace_allowed = true; 1383 - connector->doublescan_allowed = true; 1384 - break; 1385 - case DRM_MODE_CONNECTOR_DVIA: 1386 - drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); 1387 - drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs); 1388 - if (i2c_bus->valid) { 1389 - radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); 1390 - if (!radeon_connector->ddc_bus) 1391 - DRM_ERROR("DVIA: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); 1392 - } 1393 - radeon_connector->dac_load_detect = true; 1394 - drm_connector_attach_property(&radeon_connector->base, 1395 - rdev->mode_info.load_detect_property, 1396 - 1); 1397 - /* no HPD on analog connectors */ 1398 - radeon_connector->hpd.hpd = RADEON_HPD_NONE; 1399 - connector->interlace_allowed = true; 1400 - connector->doublescan_allowed = true; 1401 - break; 1402 - case DRM_MODE_CONNECTOR_DVII: 1403 - case DRM_MODE_CONNECTOR_DVID: 1404 - radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL); 1405 - if (!radeon_dig_connector) 1406 - goto failed; 1407 - radeon_dig_connector->igp_lane_info = igp_lane_info; 1408 - radeon_connector->con_priv = radeon_dig_connector; 1409 - drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type); 1410 - drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs); 1411 - if (i2c_bus->valid) { 1412 - radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); 1413 - if (!radeon_connector->ddc_bus) 1414 - DRM_ERROR("DVI: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); 1415 - } 1416 - subpixel_order = SubPixelHorizontalRGB; 1417 - drm_connector_attach_property(&radeon_connector->base, 1418 - rdev->mode_info.coherent_mode_property, 1419 - 1); 1420 - if (ASIC_IS_AVIVO(rdev)) { 1421 - drm_connector_attach_property(&radeon_connector->base, 1422 - rdev->mode_info.underscan_property, 1423 - UNDERSCAN_OFF); 1424 - drm_connector_attach_property(&radeon_connector->base, 1425 - rdev->mode_info.underscan_hborder_property, 1426 - 0); 1427 - drm_connector_attach_property(&radeon_connector->base, 1428 - rdev->mode_info.underscan_vborder_property, 1429 - 0); 1430 - } 1431 - if (connector_type == DRM_MODE_CONNECTOR_DVII) { 1432 - radeon_connector->dac_load_detect = true; 1433 - drm_connector_attach_property(&radeon_connector->base, 1434 - rdev->mode_info.load_detect_property, 1435 - 1); 1436 - } 1437 - connector->interlace_allowed = true; 1438 - if (connector_type == DRM_MODE_CONNECTOR_DVII) 1439 - connector->doublescan_allowed = true; 1440 - else 1441 - connector->doublescan_allowed = false; 1442 - break; 1443 - case DRM_MODE_CONNECTOR_HDMIA: 1444 - case DRM_MODE_CONNECTOR_HDMIB: 1445 - radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL); 1446 - if (!radeon_dig_connector) 1447 - goto failed; 1448 - radeon_dig_connector->igp_lane_info = igp_lane_info; 1449 - radeon_connector->con_priv = radeon_dig_connector; 1450 - drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type); 1451 - drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs); 1452 - if (i2c_bus->valid) { 1453 - radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); 1454 - if (!radeon_connector->ddc_bus) 1455 - DRM_ERROR("HDMI: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); 1456 - } 1457 - drm_connector_attach_property(&radeon_connector->base, 1458 - rdev->mode_info.coherent_mode_property, 1459 - 1); 1460 - if (ASIC_IS_AVIVO(rdev)) { 1461 - drm_connector_attach_property(&radeon_connector->base, 1462 - rdev->mode_info.underscan_property, 1463 - UNDERSCAN_OFF); 1464 - drm_connector_attach_property(&radeon_connector->base, 1465 - rdev->mode_info.underscan_hborder_property, 1466 - 0); 1467 - drm_connector_attach_property(&radeon_connector->base, 1468 - rdev->mode_info.underscan_vborder_property, 1469 - 0); 1470 - } 1471 - subpixel_order = SubPixelHorizontalRGB; 1472 - connector->interlace_allowed = true; 1473 - if (connector_type == DRM_MODE_CONNECTOR_HDMIB) 1474 - connector->doublescan_allowed = true; 1475 - else 1476 - connector->doublescan_allowed = false; 1477 - break; 1478 - case DRM_MODE_CONNECTOR_DisplayPort: 1479 - case DRM_MODE_CONNECTOR_eDP: 1204 + 1205 + if (is_dp_bridge) { 1480 1206 radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL); 1481 1207 if (!radeon_dig_connector) 1482 1208 goto failed; ··· 1384 1334 if (!radeon_connector->ddc_bus) 1385 1335 DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); 1386 1336 } 1387 - subpixel_order = SubPixelHorizontalRGB; 1388 - drm_connector_attach_property(&radeon_connector->base, 1389 - rdev->mode_info.coherent_mode_property, 1390 - 1); 1391 - if (ASIC_IS_AVIVO(rdev)) { 1337 + switch (connector_type) { 1338 + case DRM_MODE_CONNECTOR_VGA: 1339 + case DRM_MODE_CONNECTOR_DVIA: 1340 + default: 1341 + connector->interlace_allowed = true; 1342 + connector->doublescan_allowed = true; 1343 + break; 1344 + case DRM_MODE_CONNECTOR_DVII: 1345 + case DRM_MODE_CONNECTOR_DVID: 1346 + case DRM_MODE_CONNECTOR_HDMIA: 1347 + case DRM_MODE_CONNECTOR_HDMIB: 1348 + case DRM_MODE_CONNECTOR_DisplayPort: 1392 1349 drm_connector_attach_property(&radeon_connector->base, 1393 1350 rdev->mode_info.underscan_property, 1394 1351 UNDERSCAN_OFF); ··· 1405 1348 drm_connector_attach_property(&radeon_connector->base, 1406 1349 rdev->mode_info.underscan_vborder_property, 1407 1350 0); 1351 + subpixel_order = SubPixelHorizontalRGB; 1352 + connector->interlace_allowed = true; 1353 + if (connector_type == DRM_MODE_CONNECTOR_HDMIB) 1354 + connector->doublescan_allowed = true; 1355 + else 1356 + connector->doublescan_allowed = false; 1357 + break; 1358 + case DRM_MODE_CONNECTOR_LVDS: 1359 + case DRM_MODE_CONNECTOR_eDP: 1360 + drm_connector_attach_property(&radeon_connector->base, 1361 + dev->mode_config.scaling_mode_property, 1362 + DRM_MODE_SCALE_FULLSCREEN); 1363 + subpixel_order = SubPixelHorizontalRGB; 1364 + connector->interlace_allowed = false; 1365 + connector->doublescan_allowed = false; 1366 + break; 1408 1367 } 1409 - connector->interlace_allowed = true; 1410 - /* in theory with a DP to VGA converter... */ 1411 - connector->doublescan_allowed = false; 1412 - break; 1413 - case DRM_MODE_CONNECTOR_SVIDEO: 1414 - case DRM_MODE_CONNECTOR_Composite: 1415 - case DRM_MODE_CONNECTOR_9PinDIN: 1416 - drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type); 1417 - drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs); 1418 - radeon_connector->dac_load_detect = true; 1419 - drm_connector_attach_property(&radeon_connector->base, 1420 - rdev->mode_info.load_detect_property, 1421 - 1); 1422 - drm_connector_attach_property(&radeon_connector->base, 1423 - rdev->mode_info.tv_std_property, 1424 - radeon_atombios_get_tv_info(rdev)); 1425 - /* no HPD on analog connectors */ 1426 - radeon_connector->hpd.hpd = RADEON_HPD_NONE; 1427 - connector->interlace_allowed = false; 1428 - connector->doublescan_allowed = false; 1429 - break; 1430 - case DRM_MODE_CONNECTOR_LVDS: 1431 - radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL); 1432 - if (!radeon_dig_connector) 1433 - goto failed; 1434 - radeon_dig_connector->igp_lane_info = igp_lane_info; 1435 - radeon_connector->con_priv = radeon_dig_connector; 1436 - drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type); 1437 - drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs); 1438 - if (i2c_bus->valid) { 1439 - radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); 1440 - if (!radeon_connector->ddc_bus) 1441 - DRM_ERROR("LVDS: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); 1368 + } else { 1369 + switch (connector_type) { 1370 + case DRM_MODE_CONNECTOR_VGA: 1371 + drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); 1372 + drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs); 1373 + if (i2c_bus->valid) { 1374 + radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); 1375 + if (!radeon_connector->ddc_bus) 1376 + DRM_ERROR("VGA: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); 1377 + } 1378 + radeon_connector->dac_load_detect = true; 1379 + drm_connector_attach_property(&radeon_connector->base, 1380 + rdev->mode_info.load_detect_property, 1381 + 1); 1382 + /* no HPD on analog connectors */ 1383 + radeon_connector->hpd.hpd = RADEON_HPD_NONE; 1384 + connector->polled = DRM_CONNECTOR_POLL_CONNECT; 1385 + connector->interlace_allowed = true; 1386 + connector->doublescan_allowed = true; 1387 + break; 1388 + case DRM_MODE_CONNECTOR_DVIA: 1389 + drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); 1390 + drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs); 1391 + if (i2c_bus->valid) { 1392 + radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); 1393 + if (!radeon_connector->ddc_bus) 1394 + DRM_ERROR("DVIA: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); 1395 + } 1396 + radeon_connector->dac_load_detect = true; 1397 + drm_connector_attach_property(&radeon_connector->base, 1398 + rdev->mode_info.load_detect_property, 1399 + 1); 1400 + /* no HPD on analog connectors */ 1401 + radeon_connector->hpd.hpd = RADEON_HPD_NONE; 1402 + connector->interlace_allowed = true; 1403 + connector->doublescan_allowed = true; 1404 + break; 1405 + case DRM_MODE_CONNECTOR_DVII: 1406 + case DRM_MODE_CONNECTOR_DVID: 1407 + radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL); 1408 + if (!radeon_dig_connector) 1409 + goto failed; 1410 + radeon_dig_connector->igp_lane_info = igp_lane_info; 1411 + radeon_connector->con_priv = radeon_dig_connector; 1412 + drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type); 1413 + drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs); 1414 + if (i2c_bus->valid) { 1415 + radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); 1416 + if (!radeon_connector->ddc_bus) 1417 + DRM_ERROR("DVI: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); 1418 + } 1419 + subpixel_order = SubPixelHorizontalRGB; 1420 + drm_connector_attach_property(&radeon_connector->base, 1421 + rdev->mode_info.coherent_mode_property, 1422 + 1); 1423 + if (ASIC_IS_AVIVO(rdev)) { 1424 + drm_connector_attach_property(&radeon_connector->base, 1425 + rdev->mode_info.underscan_property, 1426 + UNDERSCAN_OFF); 1427 + drm_connector_attach_property(&radeon_connector->base, 1428 + rdev->mode_info.underscan_hborder_property, 1429 + 0); 1430 + drm_connector_attach_property(&radeon_connector->base, 1431 + rdev->mode_info.underscan_vborder_property, 1432 + 0); 1433 + } 1434 + if (connector_type == DRM_MODE_CONNECTOR_DVII) { 1435 + radeon_connector->dac_load_detect = true; 1436 + drm_connector_attach_property(&radeon_connector->base, 1437 + rdev->mode_info.load_detect_property, 1438 + 1); 1439 + } 1440 + connector->interlace_allowed = true; 1441 + if (connector_type == DRM_MODE_CONNECTOR_DVII) 1442 + connector->doublescan_allowed = true; 1443 + else 1444 + connector->doublescan_allowed = false; 1445 + break; 1446 + case DRM_MODE_CONNECTOR_HDMIA: 1447 + case DRM_MODE_CONNECTOR_HDMIB: 1448 + radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL); 1449 + if (!radeon_dig_connector) 1450 + goto failed; 1451 + radeon_dig_connector->igp_lane_info = igp_lane_info; 1452 + radeon_connector->con_priv = radeon_dig_connector; 1453 + drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type); 1454 + drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs); 1455 + if (i2c_bus->valid) { 1456 + radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); 1457 + if (!radeon_connector->ddc_bus) 1458 + DRM_ERROR("HDMI: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); 1459 + } 1460 + drm_connector_attach_property(&radeon_connector->base, 1461 + rdev->mode_info.coherent_mode_property, 1462 + 1); 1463 + if (ASIC_IS_AVIVO(rdev)) { 1464 + drm_connector_attach_property(&radeon_connector->base, 1465 + rdev->mode_info.underscan_property, 1466 + UNDERSCAN_OFF); 1467 + drm_connector_attach_property(&radeon_connector->base, 1468 + rdev->mode_info.underscan_hborder_property, 1469 + 0); 1470 + drm_connector_attach_property(&radeon_connector->base, 1471 + rdev->mode_info.underscan_vborder_property, 1472 + 0); 1473 + } 1474 + subpixel_order = SubPixelHorizontalRGB; 1475 + connector->interlace_allowed = true; 1476 + if (connector_type == DRM_MODE_CONNECTOR_HDMIB) 1477 + connector->doublescan_allowed = true; 1478 + else 1479 + connector->doublescan_allowed = false; 1480 + break; 1481 + case DRM_MODE_CONNECTOR_DisplayPort: 1482 + radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL); 1483 + if (!radeon_dig_connector) 1484 + goto failed; 1485 + radeon_dig_connector->igp_lane_info = igp_lane_info; 1486 + radeon_connector->con_priv = radeon_dig_connector; 1487 + drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type); 1488 + drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs); 1489 + if (i2c_bus->valid) { 1490 + /* add DP i2c bus */ 1491 + radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "DP-auxch"); 1492 + if (!radeon_dig_connector->dp_i2c_bus) 1493 + DRM_ERROR("DP: Failed to assign dp ddc bus! Check dmesg for i2c errors.\n"); 1494 + radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); 1495 + if (!radeon_connector->ddc_bus) 1496 + DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); 1497 + } 1498 + subpixel_order = SubPixelHorizontalRGB; 1499 + drm_connector_attach_property(&radeon_connector->base, 1500 + rdev->mode_info.coherent_mode_property, 1501 + 1); 1502 + if (ASIC_IS_AVIVO(rdev)) { 1503 + drm_connector_attach_property(&radeon_connector->base, 1504 + rdev->mode_info.underscan_property, 1505 + UNDERSCAN_OFF); 1506 + drm_connector_attach_property(&radeon_connector->base, 1507 + rdev->mode_info.underscan_hborder_property, 1508 + 0); 1509 + drm_connector_attach_property(&radeon_connector->base, 1510 + rdev->mode_info.underscan_vborder_property, 1511 + 0); 1512 + } 1513 + connector->interlace_allowed = true; 1514 + /* in theory with a DP to VGA converter... */ 1515 + connector->doublescan_allowed = false; 1516 + break; 1517 + case DRM_MODE_CONNECTOR_eDP: 1518 + radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL); 1519 + if (!radeon_dig_connector) 1520 + goto failed; 1521 + radeon_dig_connector->igp_lane_info = igp_lane_info; 1522 + radeon_connector->con_priv = radeon_dig_connector; 1523 + drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type); 1524 + drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs); 1525 + if (i2c_bus->valid) { 1526 + /* add DP i2c bus */ 1527 + radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "eDP-auxch"); 1528 + if (!radeon_dig_connector->dp_i2c_bus) 1529 + DRM_ERROR("DP: Failed to assign dp ddc bus! Check dmesg for i2c errors.\n"); 1530 + radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); 1531 + if (!radeon_connector->ddc_bus) 1532 + DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); 1533 + } 1534 + drm_connector_attach_property(&radeon_connector->base, 1535 + dev->mode_config.scaling_mode_property, 1536 + DRM_MODE_SCALE_FULLSCREEN); 1537 + subpixel_order = SubPixelHorizontalRGB; 1538 + connector->interlace_allowed = false; 1539 + connector->doublescan_allowed = false; 1540 + break; 1541 + case DRM_MODE_CONNECTOR_SVIDEO: 1542 + case DRM_MODE_CONNECTOR_Composite: 1543 + case DRM_MODE_CONNECTOR_9PinDIN: 1544 + drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type); 1545 + drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs); 1546 + radeon_connector->dac_load_detect = true; 1547 + drm_connector_attach_property(&radeon_connector->base, 1548 + rdev->mode_info.load_detect_property, 1549 + 1); 1550 + drm_connector_attach_property(&radeon_connector->base, 1551 + rdev->mode_info.tv_std_property, 1552 + radeon_atombios_get_tv_info(rdev)); 1553 + /* no HPD on analog connectors */ 1554 + radeon_connector->hpd.hpd = RADEON_HPD_NONE; 1555 + connector->interlace_allowed = false; 1556 + connector->doublescan_allowed = false; 1557 + break; 1558 + case DRM_MODE_CONNECTOR_LVDS: 1559 + radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL); 1560 + if (!radeon_dig_connector) 1561 + goto failed; 1562 + radeon_dig_connector->igp_lane_info = igp_lane_info; 1563 + radeon_connector->con_priv = radeon_dig_connector; 1564 + drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type); 1565 + drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs); 1566 + if (i2c_bus->valid) { 1567 + radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); 1568 + if (!radeon_connector->ddc_bus) 1569 + DRM_ERROR("LVDS: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); 1570 + } 1571 + drm_connector_attach_property(&radeon_connector->base, 1572 + dev->mode_config.scaling_mode_property, 1573 + DRM_MODE_SCALE_FULLSCREEN); 1574 + subpixel_order = SubPixelHorizontalRGB; 1575 + connector->interlace_allowed = false; 1576 + connector->doublescan_allowed = false; 1577 + break; 1442 1578 } 1443 - drm_connector_attach_property(&radeon_connector->base, 1444 - dev->mode_config.scaling_mode_property, 1445 - DRM_MODE_SCALE_FULLSCREEN); 1446 - subpixel_order = SubPixelHorizontalRGB; 1447 - connector->interlace_allowed = false; 1448 - connector->doublescan_allowed = false; 1449 - break; 1450 1579 } 1451 1580 1452 1581 if (radeon_connector->hpd.hpd == RADEON_HPD_NONE) {
+3
drivers/gpu/drm/radeon/radeon_device.c
··· 923 923 radeon_fbdev_set_suspend(rdev, 0); 924 924 console_unlock(); 925 925 926 + /* init dig PHYs */ 927 + if (rdev->is_atom_bios) 928 + radeon_atom_encoder_init(rdev); 926 929 /* reset hpd state */ 927 930 radeon_hpd_init(rdev); 928 931 /* blat the mode back in */
+8 -2
drivers/gpu/drm/radeon/radeon_display.c
··· 1087 1087 *frac_fb_div_p = best_frac_feedback_div; 1088 1088 *ref_div_p = best_ref_div; 1089 1089 *post_div_p = best_post_div; 1090 - DRM_DEBUG_KMS("%d %d, pll dividers - fb: %d.%d ref: %d, post %d\n", 1091 - freq, best_freq / 1000, best_feedback_div, best_frac_feedback_div, 1090 + DRM_DEBUG_KMS("%lld %d, pll dividers - fb: %d.%d ref: %d, post %d\n", 1091 + (long long)freq, 1092 + best_freq / 1000, best_feedback_div, best_frac_feedback_div, 1092 1093 best_ref_div, best_post_div); 1093 1094 1094 1095 } ··· 1345 1344 if (!ret) { 1346 1345 return ret; 1347 1346 } 1347 + 1348 + /* init dig PHYs */ 1349 + if (rdev->is_atom_bios) 1350 + radeon_atom_encoder_init(rdev); 1351 + 1348 1352 /* initialize hpd */ 1349 1353 radeon_hpd_init(rdev); 1350 1354
+2 -1
drivers/gpu/drm/radeon/radeon_drv.c
··· 50 50 * 2.7.0 - fixups for r600 2D tiling support. (no external ABI change), add eg dyn gpr regs 51 51 * 2.8.0 - pageflip support, r500 US_FORMAT regs. r500 ARGB2101010 colorbuf, r300->r500 CMASK, clock crystal query 52 52 * 2.9.0 - r600 tiling (s3tc,rgtc) working, SET_PREDICATION packet 3 on r600 + eg, backend query 53 + * 2.10.0 - fusion 2D tiling 53 54 */ 54 55 #define KMS_DRIVER_MAJOR 2 55 - #define KMS_DRIVER_MINOR 9 56 + #define KMS_DRIVER_MINOR 10 56 57 #define KMS_DRIVER_PATCHLEVEL 0 57 58 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); 58 59 int radeon_driver_unload_kms(struct drm_device *dev);
+207 -45
drivers/gpu/drm/radeon/radeon_encoders.c
··· 229 229 return NULL; 230 230 } 231 231 232 + static struct drm_connector * 233 + radeon_get_connector_for_encoder_init(struct drm_encoder *encoder) 234 + { 235 + struct drm_device *dev = encoder->dev; 236 + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 237 + struct drm_connector *connector; 238 + struct radeon_connector *radeon_connector; 239 + 240 + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 241 + radeon_connector = to_radeon_connector(connector); 242 + if (radeon_encoder->devices & radeon_connector->devices) 243 + return connector; 244 + } 245 + return NULL; 246 + } 247 + 232 248 struct drm_encoder *radeon_atom_get_external_encoder(struct drm_encoder *encoder) 233 249 { 234 250 struct drm_device *dev = encoder->dev; ··· 264 248 return other_encoder; 265 249 } 266 250 return NULL; 251 + } 252 + 253 + bool radeon_encoder_is_dp_bridge(struct drm_encoder *encoder) 254 + { 255 + struct drm_encoder *other_encoder = radeon_atom_get_external_encoder(encoder); 256 + 257 + if (other_encoder) { 258 + struct radeon_encoder *radeon_encoder = to_radeon_encoder(other_encoder); 259 + 260 + switch (radeon_encoder->encoder_id) { 261 + case ENCODER_OBJECT_ID_TRAVIS: 262 + case ENCODER_OBJECT_ID_NUTMEG: 263 + return true; 264 + default: 265 + return false; 266 + } 267 + } 268 + 269 + return false; 267 270 } 268 271 269 272 void radeon_panel_mode_fixup(struct drm_encoder *encoder, ··· 656 621 struct radeon_connector *radeon_connector; 657 622 struct radeon_connector_atom_dig *dig_connector; 658 623 624 + /* dp bridges are always DP */ 625 + if (radeon_encoder_is_dp_bridge(encoder)) 626 + return ATOM_ENCODER_MODE_DP; 627 + 659 628 connector = radeon_get_connector_for_encoder(encoder); 660 629 if (!connector) { 661 630 switch (radeon_encoder->encoder_id) { ··· 707 668 return ATOM_ENCODER_MODE_LVDS; 708 669 break; 709 670 case DRM_MODE_CONNECTOR_DisplayPort: 710 - case DRM_MODE_CONNECTOR_eDP: 711 671 dig_connector = radeon_connector->con_priv; 712 672 if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || 713 673 (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) ··· 720 682 } else 721 683 return ATOM_ENCODER_MODE_DVI; 722 684 break; 685 + case DRM_MODE_CONNECTOR_eDP: 686 + return ATOM_ENCODER_MODE_DP; 723 687 case DRM_MODE_CONNECTOR_DVIA: 724 688 case DRM_MODE_CONNECTOR_VGA: 725 689 return ATOM_ENCODER_MODE_CRT; ··· 787 747 }; 788 748 789 749 void 790 - atombios_dig_encoder_setup(struct drm_encoder *encoder, int action) 750 + atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mode) 791 751 { 792 752 struct drm_device *dev = encoder->dev; 793 753 struct radeon_device *rdev = dev->dev_private; ··· 800 760 int dp_clock = 0; 801 761 int dp_lane_count = 0; 802 762 int hpd_id = RADEON_HPD_NONE; 763 + int bpc = 8; 803 764 804 765 if (connector) { 805 766 struct radeon_connector *radeon_connector = to_radeon_connector(connector); ··· 810 769 dp_clock = dig_connector->dp_clock; 811 770 dp_lane_count = dig_connector->dp_lane_count; 812 771 hpd_id = radeon_connector->hpd.hpd; 772 + bpc = connector->display_info.bpc; 813 773 } 814 774 815 775 /* no dig encoder assigned */ ··· 833 791 834 792 args.v1.ucAction = action; 835 793 args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); 836 - args.v1.ucEncoderMode = atombios_get_encoder_mode(encoder); 794 + if (action == ATOM_ENCODER_CMD_SETUP_PANEL_MODE) 795 + args.v3.ucPanelMode = panel_mode; 796 + else 797 + args.v1.ucEncoderMode = atombios_get_encoder_mode(encoder); 837 798 838 799 if ((args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) || 839 800 (args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP_MST)) ··· 855 810 args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_5_40GHZ; 856 811 } 857 812 args.v4.acConfig.ucDigSel = dig->dig_encoder; 858 - args.v4.ucBitPerColor = PANEL_8BIT_PER_COLOR; 813 + switch (bpc) { 814 + case 0: 815 + args.v4.ucBitPerColor = PANEL_BPC_UNDEFINE; 816 + break; 817 + case 6: 818 + args.v4.ucBitPerColor = PANEL_6BIT_PER_COLOR; 819 + break; 820 + case 8: 821 + default: 822 + args.v4.ucBitPerColor = PANEL_8BIT_PER_COLOR; 823 + break; 824 + case 10: 825 + args.v4.ucBitPerColor = PANEL_10BIT_PER_COLOR; 826 + break; 827 + case 12: 828 + args.v4.ucBitPerColor = PANEL_12BIT_PER_COLOR; 829 + break; 830 + case 16: 831 + args.v4.ucBitPerColor = PANEL_16BIT_PER_COLOR; 832 + break; 833 + } 859 834 if (hpd_id == RADEON_HPD_NONE) 860 835 args.v4.ucHPD_ID = 0; 861 836 else ··· 884 819 if ((args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) && (dp_clock == 270000)) 885 820 args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ; 886 821 args.v3.acConfig.ucDigSel = dig->dig_encoder; 887 - args.v3.ucBitPerColor = PANEL_8BIT_PER_COLOR; 822 + switch (bpc) { 823 + case 0: 824 + args.v3.ucBitPerColor = PANEL_BPC_UNDEFINE; 825 + break; 826 + case 6: 827 + args.v3.ucBitPerColor = PANEL_6BIT_PER_COLOR; 828 + break; 829 + case 8: 830 + default: 831 + args.v3.ucBitPerColor = PANEL_8BIT_PER_COLOR; 832 + break; 833 + case 10: 834 + args.v3.ucBitPerColor = PANEL_10BIT_PER_COLOR; 835 + break; 836 + case 12: 837 + args.v3.ucBitPerColor = PANEL_12BIT_PER_COLOR; 838 + break; 839 + case 16: 840 + args.v3.ucBitPerColor = PANEL_16BIT_PER_COLOR; 841 + break; 842 + } 888 843 } else { 889 844 if ((args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) && (dp_clock == 270000)) 890 845 args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ; ··· 944 859 struct radeon_device *rdev = dev->dev_private; 945 860 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 946 861 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 947 - struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); 862 + struct drm_connector *connector; 948 863 union dig_transmitter_control args; 949 864 int index = 0; 950 865 uint8_t frev, crev; ··· 954 869 int dp_lane_count = 0; 955 870 int connector_object_id = 0; 956 871 int igp_lane_info = 0; 872 + 873 + if (action == ATOM_TRANSMITTER_ACTION_INIT) 874 + connector = radeon_get_connector_for_encoder_init(encoder); 875 + else 876 + connector = radeon_get_connector_for_encoder(encoder); 957 877 958 878 if (connector) { 959 879 struct radeon_connector *radeon_connector = to_radeon_connector(connector); ··· 1021 931 else 1022 932 args.v3.ucLaneNum = 4; 1023 933 1024 - if (dig->linkb) { 934 + if (dig->linkb) 1025 935 args.v3.acConfig.ucLinkSel = 1; 936 + if (dig->dig_encoder & 1) 1026 937 args.v3.acConfig.ucEncoderSel = 1; 1027 - } 1028 938 1029 939 /* Select the PLL for the PHY 1030 940 * DP PHY should be clocked from external src if there is ··· 1036 946 } 1037 947 1038 948 if (ASIC_IS_DCE5(rdev)) { 1039 - if (is_dp && rdev->clock.dp_extclk) 1040 - args.v4.acConfig.ucRefClkSource = 3; /* external src */ 1041 - else 949 + /* On DCE5 DCPLL usually generates the DP ref clock */ 950 + if (is_dp) { 951 + if (rdev->clock.dp_extclk) 952 + args.v4.acConfig.ucRefClkSource = ENCODER_REFCLK_SRC_EXTCLK; 953 + else 954 + args.v4.acConfig.ucRefClkSource = ENCODER_REFCLK_SRC_DCPLL; 955 + } else 1042 956 args.v4.acConfig.ucRefClkSource = pll_id; 1043 957 } else { 958 + /* On DCE4, if there is an external clock, it generates the DP ref clock */ 1044 959 if (is_dp && rdev->clock.dp_extclk) 1045 960 args.v3.acConfig.ucRefClkSource = 2; /* external src */ 1046 961 else ··· 1142 1047 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 1143 1048 } 1144 1049 1145 - void 1050 + bool 1146 1051 atombios_set_edp_panel_power(struct drm_connector *connector, int action) 1147 1052 { 1148 1053 struct radeon_connector *radeon_connector = to_radeon_connector(connector); ··· 1153 1058 uint8_t frev, crev; 1154 1059 1155 1060 if (connector->connector_type != DRM_MODE_CONNECTOR_eDP) 1156 - return; 1061 + goto done; 1157 1062 1158 1063 if (!ASIC_IS_DCE4(rdev)) 1159 - return; 1064 + goto done; 1160 1065 1161 1066 if ((action != ATOM_TRANSMITTER_ACTION_POWER_ON) && 1162 1067 (action != ATOM_TRANSMITTER_ACTION_POWER_OFF)) 1163 - return; 1068 + goto done; 1164 1069 1165 1070 if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) 1166 - return; 1071 + goto done; 1167 1072 1168 1073 memset(&args, 0, sizeof(args)); 1169 1074 1170 1075 args.v1.ucAction = action; 1171 1076 1172 1077 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 1078 + 1079 + /* wait for the panel to power up */ 1080 + if (action == ATOM_TRANSMITTER_ACTION_POWER_ON) { 1081 + int i; 1082 + 1083 + for (i = 0; i < 300; i++) { 1084 + if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) 1085 + return true; 1086 + mdelay(1); 1087 + } 1088 + return false; 1089 + } 1090 + done: 1091 + return true; 1173 1092 } 1174 1093 1175 1094 union external_encoder_control { ··· 1201 1092 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 1202 1093 struct radeon_encoder *ext_radeon_encoder = to_radeon_encoder(ext_encoder); 1203 1094 union external_encoder_control args; 1204 - struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); 1095 + struct drm_connector *connector; 1205 1096 int index = GetIndexIntoMasterTable(COMMAND, ExternalEncoderControl); 1206 1097 u8 frev, crev; 1207 1098 int dp_clock = 0; 1208 1099 int dp_lane_count = 0; 1209 1100 int connector_object_id = 0; 1210 1101 u32 ext_enum = (ext_radeon_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT; 1102 + int bpc = 8; 1103 + 1104 + if (action == EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT) 1105 + connector = radeon_get_connector_for_encoder_init(encoder); 1106 + else 1107 + connector = radeon_get_connector_for_encoder(encoder); 1211 1108 1212 1109 if (connector) { 1213 1110 struct radeon_connector *radeon_connector = to_radeon_connector(connector); ··· 1224 1109 dp_lane_count = dig_connector->dp_lane_count; 1225 1110 connector_object_id = 1226 1111 (radeon_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT; 1112 + bpc = connector->display_info.bpc; 1227 1113 } 1228 1114 1229 1115 memset(&args, 0, sizeof(args)); ··· 1282 1166 args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER3; 1283 1167 break; 1284 1168 } 1285 - args.v3.sExtEncoder.ucBitPerColor = PANEL_8BIT_PER_COLOR; 1169 + switch (bpc) { 1170 + case 0: 1171 + args.v3.sExtEncoder.ucBitPerColor = PANEL_BPC_UNDEFINE; 1172 + break; 1173 + case 6: 1174 + args.v3.sExtEncoder.ucBitPerColor = PANEL_6BIT_PER_COLOR; 1175 + break; 1176 + case 8: 1177 + default: 1178 + args.v3.sExtEncoder.ucBitPerColor = PANEL_8BIT_PER_COLOR; 1179 + break; 1180 + case 10: 1181 + args.v3.sExtEncoder.ucBitPerColor = PANEL_10BIT_PER_COLOR; 1182 + break; 1183 + case 12: 1184 + args.v3.sExtEncoder.ucBitPerColor = PANEL_12BIT_PER_COLOR; 1185 + break; 1186 + case 16: 1187 + args.v3.sExtEncoder.ucBitPerColor = PANEL_16BIT_PER_COLOR; 1188 + break; 1189 + } 1286 1190 break; 1287 1191 default: 1288 1192 DRM_ERROR("Unknown table version: %d, %d\n", frev, crev); ··· 1443 1307 ATOM_TRANSMITTER_ACTION_POWER_ON); 1444 1308 radeon_dig_connector->edp_on = true; 1445 1309 } 1446 - dp_link_train(encoder, connector); 1447 1310 if (ASIC_IS_DCE4(rdev)) 1448 - atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON); 1311 + atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0); 1312 + radeon_dp_link_train(encoder, connector); 1313 + if (ASIC_IS_DCE4(rdev)) 1314 + atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0); 1449 1315 } 1450 1316 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) 1451 1317 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0); ··· 1460 1322 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); 1461 1323 1462 1324 if (ASIC_IS_DCE4(rdev)) 1463 - atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF); 1325 + atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0); 1464 1326 if (connector && 1465 1327 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) { 1466 1328 struct radeon_connector *radeon_connector = to_radeon_connector(connector); ··· 1739 1601 /* DCE4/5 */ 1740 1602 if (ASIC_IS_DCE4(rdev)) { 1741 1603 dig = radeon_encoder->enc_priv; 1742 - if (ASIC_IS_DCE41(rdev)) { 1743 - if (dig->linkb) 1744 - return 1; 1745 - else 1746 - return 0; 1747 - } else { 1604 + if (ASIC_IS_DCE41(rdev)) 1605 + return radeon_crtc->crtc_id; 1606 + else { 1748 1607 switch (radeon_encoder->encoder_id) { 1749 1608 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: 1750 1609 if (dig->linkb) ··· 1797 1662 return 1; 1798 1663 } 1799 1664 1665 + /* This only needs to be called once at startup */ 1666 + void 1667 + radeon_atom_encoder_init(struct radeon_device *rdev) 1668 + { 1669 + struct drm_device *dev = rdev->ddev; 1670 + struct drm_encoder *encoder; 1671 + 1672 + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 1673 + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 1674 + struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder); 1675 + 1676 + switch (radeon_encoder->encoder_id) { 1677 + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: 1678 + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: 1679 + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: 1680 + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: 1681 + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT, 0, 0); 1682 + break; 1683 + default: 1684 + break; 1685 + } 1686 + 1687 + if (ext_encoder && ASIC_IS_DCE41(rdev)) 1688 + atombios_external_encoder_setup(encoder, ext_encoder, 1689 + EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT); 1690 + } 1691 + } 1692 + 1800 1693 static void 1801 1694 radeon_atom_encoder_mode_set(struct drm_encoder *encoder, 1802 1695 struct drm_display_mode *mode, ··· 1859 1696 /* disable the transmitter */ 1860 1697 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); 1861 1698 /* setup and enable the encoder */ 1862 - atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP); 1699 + atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0); 1863 1700 1864 - /* init and enable the transmitter */ 1865 - atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT, 0, 0); 1701 + /* enable the transmitter */ 1866 1702 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); 1867 1703 } else { 1868 1704 /* disable the encoder and transmitter */ 1869 1705 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); 1870 - atombios_dig_encoder_setup(encoder, ATOM_DISABLE); 1706 + atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0); 1871 1707 1872 1708 /* setup and enable the encoder and transmitter */ 1873 - atombios_dig_encoder_setup(encoder, ATOM_ENABLE); 1874 - atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT, 0, 0); 1709 + atombios_dig_encoder_setup(encoder, ATOM_ENABLE, 0); 1875 1710 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0); 1876 1711 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); 1877 1712 } ··· 1894 1733 } 1895 1734 1896 1735 if (ext_encoder) { 1897 - if (ASIC_IS_DCE41(rdev)) { 1898 - atombios_external_encoder_setup(encoder, ext_encoder, 1899 - EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT); 1736 + if (ASIC_IS_DCE41(rdev)) 1900 1737 atombios_external_encoder_setup(encoder, ext_encoder, 1901 1738 EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP); 1902 - } else 1739 + else 1903 1740 atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE); 1904 1741 } 1905 1742 ··· 2004 1845 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 2005 1846 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); 2006 1847 2007 - if (radeon_encoder->active_device & 2008 - (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) { 1848 + if ((radeon_encoder->active_device & 1849 + (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) || 1850 + radeon_encoder_is_dp_bridge(encoder)) { 2009 1851 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 2010 1852 if (dig) 2011 1853 dig->dig_encoder = radeon_atom_pick_dig_encoder(encoder); ··· 2015 1855 radeon_atom_output_lock(encoder, true); 2016 1856 radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); 2017 1857 2018 - /* select the clock/data port if it uses a router */ 2019 1858 if (connector) { 2020 1859 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 1860 + 1861 + /* select the clock/data port if it uses a router */ 2021 1862 if (radeon_connector->router.cd_valid) 2022 1863 radeon_router_select_cd_port(radeon_connector); 1864 + 1865 + /* turn eDP panel on for mode set */ 1866 + if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) 1867 + atombios_set_edp_panel_power(connector, 1868 + ATOM_TRANSMITTER_ACTION_POWER_ON); 2023 1869 } 2024 1870 2025 1871 /* this is needed for the pll/ss setup to work correctly in some cases */ ··· 2080 1914 else { 2081 1915 /* disable the encoder and transmitter */ 2082 1916 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); 2083 - atombios_dig_encoder_setup(encoder, ATOM_DISABLE); 1917 + atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0); 2084 1918 } 2085 1919 break; 2086 1920 case ENCODER_OBJECT_ID_INTERNAL_DDI: ··· 2282 2116 } else { 2283 2117 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS); 2284 2118 radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder); 2285 - if (ASIC_IS_AVIVO(rdev)) 2286 - radeon_encoder->underscan_type = UNDERSCAN_AUTO; 2287 2119 } 2288 2120 drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs); 2289 2121 break; ··· 2314 2150 } else { 2315 2151 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS); 2316 2152 radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder); 2317 - if (ASIC_IS_AVIVO(rdev)) 2318 - radeon_encoder->underscan_type = UNDERSCAN_AUTO; 2319 2153 } 2320 2154 drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs); 2321 2155 break;
+2
drivers/gpu/drm/radeon/radeon_i2c.c
··· 888 888 889 889 i2c->rec = *rec; 890 890 i2c->adapter.owner = THIS_MODULE; 891 + i2c->adapter.class = I2C_CLASS_DDC; 891 892 i2c->dev = dev; 892 893 i2c_set_adapdata(&i2c->adapter, i2c); 893 894 if (rec->mm_i2c || ··· 948 947 949 948 i2c->rec = *rec; 950 949 i2c->adapter.owner = THIS_MODULE; 950 + i2c->adapter.class = I2C_CLASS_DDC; 951 951 i2c->dev = dev; 952 952 snprintf(i2c->adapter.name, sizeof(i2c->adapter.name), 953 953 "Radeon aux bus %s", name);
+12 -7
drivers/gpu/drm/radeon/radeon_mode.h
··· 464 464 extern struct drm_connector * 465 465 radeon_get_connector_for_encoder(struct drm_encoder *encoder); 466 466 467 + extern bool radeon_encoder_is_dp_bridge(struct drm_encoder *encoder); 468 + extern bool radeon_connector_encoder_is_dp_bridge(struct drm_connector *connector); 469 + extern bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector); 470 + extern bool radeon_connector_is_dp12_capable(struct drm_connector *connector); 471 + 467 472 extern void radeon_connector_hotplug(struct drm_connector *connector); 468 - extern bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector); 469 - extern int radeon_dp_mode_valid_helper(struct radeon_connector *radeon_connector, 473 + extern int radeon_dp_mode_valid_helper(struct drm_connector *connector, 470 474 struct drm_display_mode *mode); 471 475 extern void radeon_dp_set_link_config(struct drm_connector *connector, 472 476 struct drm_display_mode *mode); 473 - extern void dp_link_train(struct drm_encoder *encoder, 474 - struct drm_connector *connector); 477 + extern void radeon_dp_link_train(struct drm_encoder *encoder, 478 + struct drm_connector *connector); 475 479 extern u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector); 476 480 extern bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector); 477 - extern void atombios_dig_encoder_setup(struct drm_encoder *encoder, int action); 481 + extern void atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mode); 482 + extern void radeon_atom_encoder_init(struct radeon_device *rdev); 478 483 extern void atombios_dig_transmitter_setup(struct drm_encoder *encoder, 479 484 int action, uint8_t lane_num, 480 485 uint8_t lane_set); 481 486 extern int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, 482 - uint8_t write_byte, uint8_t *read_byte); 487 + u8 write_byte, u8 *read_byte); 483 488 484 489 extern void radeon_i2c_init(struct radeon_device *rdev); 485 490 extern void radeon_i2c_fini(struct radeon_device *rdev); ··· 550 545 extern void atombios_dvo_setup(struct drm_encoder *encoder, int action); 551 546 extern void atombios_digital_setup(struct drm_encoder *encoder, int action); 552 547 extern int atombios_get_encoder_mode(struct drm_encoder *encoder); 553 - extern void atombios_set_edp_panel_power(struct drm_connector *connector, int action); 548 + extern bool atombios_set_edp_panel_power(struct drm_connector *connector, int action); 554 549 extern void radeon_encoder_set_active_device(struct drm_encoder *encoder); 555 550 556 551 extern void radeon_crtc_load_lut(struct drm_crtc *crtc);
-6
drivers/gpu/vga/vga_switcheroo.c
··· 215 215 /* stage one happens before delay */ 216 216 static int vga_switchto_stage1(struct vga_switcheroo_client *new_client) 217 217 { 218 - int ret; 219 218 int i; 220 219 struct vga_switcheroo_client *active = NULL; 221 220 ··· 226 227 } 227 228 if (!active) 228 229 return 0; 229 - 230 - /* power up the first device */ 231 - ret = pci_enable_device(new_client->pdev); 232 - if (ret) 233 - return ret; 234 230 235 231 if (new_client->pwr_state == VGA_SWITCHEROO_OFF) 236 232 vga_switchon(new_client);
+99 -14
drivers/gpu/vga/vgaarb.c
··· 61 61 unsigned int mem_lock_cnt; /* legacy MEM lock count */ 62 62 unsigned int io_norm_cnt; /* normal IO count */ 63 63 unsigned int mem_norm_cnt; /* normal MEM count */ 64 - 64 + bool bridge_has_one_vga; 65 65 /* allow IRQ enable/disable hook */ 66 66 void *cookie; 67 67 void (*irq_set_state)(void *cookie, bool enable); ··· 165 165 unsigned int wants, legacy_wants, match; 166 166 struct vga_device *conflict; 167 167 unsigned int pci_bits; 168 + u32 flags = 0; 169 + 168 170 /* Account for "normal" resources to lock. If we decode the legacy, 169 171 * counterpart, we need to request it as well 170 172 */ ··· 239 237 /* looks like he doesn't have a lock, we can steal 240 238 * them from him 241 239 */ 242 - vga_irq_set_state(conflict, false); 243 240 241 + flags = 0; 244 242 pci_bits = 0; 245 - if (lwants & (VGA_RSRC_LEGACY_MEM|VGA_RSRC_NORMAL_MEM)) 246 - pci_bits |= PCI_COMMAND_MEMORY; 247 - if (lwants & (VGA_RSRC_LEGACY_IO|VGA_RSRC_NORMAL_IO)) 248 - pci_bits |= PCI_COMMAND_IO; 249 243 250 - pci_set_vga_state(conflict->pdev, false, pci_bits, 251 - change_bridge); 244 + if (!conflict->bridge_has_one_vga) { 245 + vga_irq_set_state(conflict, false); 246 + flags |= PCI_VGA_STATE_CHANGE_DECODES; 247 + if (lwants & (VGA_RSRC_LEGACY_MEM|VGA_RSRC_NORMAL_MEM)) 248 + pci_bits |= PCI_COMMAND_MEMORY; 249 + if (lwants & (VGA_RSRC_LEGACY_IO|VGA_RSRC_NORMAL_IO)) 250 + pci_bits |= PCI_COMMAND_IO; 251 + } 252 + 253 + if (change_bridge) 254 + flags |= PCI_VGA_STATE_CHANGE_BRIDGE; 255 + 256 + pci_set_vga_state(conflict->pdev, false, pci_bits, flags); 252 257 conflict->owns &= ~lwants; 253 258 /* If he also owned non-legacy, that is no longer the case */ 254 259 if (lwants & VGA_RSRC_LEGACY_MEM) ··· 270 261 * also have in "decodes". We can lock resources we don't decode but 271 262 * not own them. 272 263 */ 264 + flags = 0; 273 265 pci_bits = 0; 274 - if (wants & (VGA_RSRC_LEGACY_MEM|VGA_RSRC_NORMAL_MEM)) 275 - pci_bits |= PCI_COMMAND_MEMORY; 276 - if (wants & (VGA_RSRC_LEGACY_IO|VGA_RSRC_NORMAL_IO)) 277 - pci_bits |= PCI_COMMAND_IO; 278 - pci_set_vga_state(vgadev->pdev, true, pci_bits, !!(wants & VGA_RSRC_LEGACY_MASK)); 279 266 280 - vga_irq_set_state(vgadev, true); 267 + if (!vgadev->bridge_has_one_vga) { 268 + flags |= PCI_VGA_STATE_CHANGE_DECODES; 269 + if (wants & (VGA_RSRC_LEGACY_MEM|VGA_RSRC_NORMAL_MEM)) 270 + pci_bits |= PCI_COMMAND_MEMORY; 271 + if (wants & (VGA_RSRC_LEGACY_IO|VGA_RSRC_NORMAL_IO)) 272 + pci_bits |= PCI_COMMAND_IO; 273 + } 274 + if (!!(wants & VGA_RSRC_LEGACY_MASK)) 275 + flags |= PCI_VGA_STATE_CHANGE_BRIDGE; 276 + 277 + pci_set_vga_state(vgadev->pdev, true, pci_bits, flags); 278 + 279 + if (!vgadev->bridge_has_one_vga) { 280 + vga_irq_set_state(vgadev, true); 281 + } 281 282 vgadev->owns |= (wants & vgadev->decodes); 282 283 lock_them: 283 284 vgadev->locks |= (rsrc & VGA_RSRC_LEGACY_MASK); ··· 440 421 } 441 422 EXPORT_SYMBOL(vga_put); 442 423 424 + /* Rules for using a bridge to control a VGA descendant decoding: 425 + if a bridge has only one VGA descendant then it can be used 426 + to control the VGA routing for that device. 427 + It should always use the bridge closest to the device to control it. 428 + If a bridge has a direct VGA descendant, but also have a sub-bridge 429 + VGA descendant then we cannot use that bridge to control the direct VGA descendant. 430 + So for every device we register, we need to iterate all its parent bridges 431 + so we can invalidate any devices using them properly. 432 + */ 433 + static void vga_arbiter_check_bridge_sharing(struct vga_device *vgadev) 434 + { 435 + struct vga_device *same_bridge_vgadev; 436 + struct pci_bus *new_bus, *bus; 437 + struct pci_dev *new_bridge, *bridge; 438 + 439 + vgadev->bridge_has_one_vga = true; 440 + 441 + if (list_empty(&vga_list)) 442 + return; 443 + 444 + /* okay iterate the new devices bridge hierarachy */ 445 + new_bus = vgadev->pdev->bus; 446 + while (new_bus) { 447 + new_bridge = new_bus->self; 448 + 449 + if (new_bridge) { 450 + /* go through list of devices already registered */ 451 + list_for_each_entry(same_bridge_vgadev, &vga_list, list) { 452 + bus = same_bridge_vgadev->pdev->bus; 453 + bridge = bus->self; 454 + 455 + /* see if the share a bridge with this device */ 456 + if (new_bridge == bridge) { 457 + /* if their direct parent bridge is the same 458 + as any bridge of this device then it can't be used 459 + for that device */ 460 + same_bridge_vgadev->bridge_has_one_vga = false; 461 + } 462 + 463 + /* now iterate the previous devices bridge hierarchy */ 464 + /* if the new devices parent bridge is in the other devices 465 + hierarchy then we can't use it to control this device */ 466 + while (bus) { 467 + bridge = bus->self; 468 + if (bridge) { 469 + if (bridge == vgadev->pdev->bus->self) 470 + vgadev->bridge_has_one_vga = false; 471 + } 472 + bus = bus->parent; 473 + } 474 + } 475 + } 476 + new_bus = new_bus->parent; 477 + } 478 + } 479 + 443 480 /* 444 481 * Currently, we assume that the "initial" setup of the system is 445 482 * not sane, that is we come up with conflicting devices and let ··· 574 499 ((vgadev->owns & VGA_RSRC_LEGACY_MASK) == VGA_RSRC_LEGACY_MASK)) 575 500 vga_default = pci_dev_get(pdev); 576 501 #endif 502 + 503 + vga_arbiter_check_bridge_sharing(vgadev); 577 504 578 505 /* Add to the list */ 579 506 list_add(&vgadev->list, &vga_list); ··· 1299 1222 { 1300 1223 int rc; 1301 1224 struct pci_dev *pdev; 1225 + struct vga_device *vgadev; 1302 1226 1303 1227 rc = misc_register(&vga_arb_device); 1304 1228 if (rc < 0) ··· 1316 1238 vga_arbiter_add_pci_device(pdev); 1317 1239 1318 1240 pr_info("vgaarb: loaded\n"); 1241 + 1242 + list_for_each_entry(vgadev, &vga_list, list) { 1243 + if (vgadev->bridge_has_one_vga) 1244 + pr_info("vgaarb: bridge control possible %s\n", pci_name(vgadev->pdev)); 1245 + else 1246 + pr_info("vgaarb: no bridge control possible %s\n", pci_name(vgadev->pdev)); 1247 + } 1319 1248 return rc; 1320 1249 } 1321 1250 subsys_initcall(vga_arb_device_init);
+14 -11
drivers/pci/pci.c
··· 3284 3284 * @dev: the PCI device 3285 3285 * @decode: true = enable decoding, false = disable decoding 3286 3286 * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY 3287 - * @change_bridge: traverse ancestors and change bridges 3287 + * @change_bridge_flags: traverse ancestors and change bridges 3288 + * CHANGE_BRIDGE_ONLY / CHANGE_BRIDGE 3288 3289 */ 3289 3290 int pci_set_vga_state(struct pci_dev *dev, bool decode, 3290 - unsigned int command_bits, bool change_bridge) 3291 + unsigned int command_bits, u32 flags) 3291 3292 { 3292 3293 struct pci_bus *bus; 3293 3294 struct pci_dev *bridge; 3294 3295 u16 cmd; 3295 3296 int rc; 3296 3297 3297 - WARN_ON(command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)); 3298 + WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) & (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY))); 3298 3299 3299 3300 /* ARCH specific VGA enables */ 3300 - rc = pci_set_vga_state_arch(dev, decode, command_bits, change_bridge); 3301 + rc = pci_set_vga_state_arch(dev, decode, command_bits, flags); 3301 3302 if (rc) 3302 3303 return rc; 3303 3304 3304 - pci_read_config_word(dev, PCI_COMMAND, &cmd); 3305 - if (decode == true) 3306 - cmd |= command_bits; 3307 - else 3308 - cmd &= ~command_bits; 3309 - pci_write_config_word(dev, PCI_COMMAND, cmd); 3305 + if (flags & PCI_VGA_STATE_CHANGE_DECODES) { 3306 + pci_read_config_word(dev, PCI_COMMAND, &cmd); 3307 + if (decode == true) 3308 + cmd |= command_bits; 3309 + else 3310 + cmd &= ~command_bits; 3311 + pci_write_config_word(dev, PCI_COMMAND, cmd); 3312 + } 3310 3313 3311 - if (change_bridge == false) 3314 + if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE)) 3312 3315 return 0; 3313 3316 3314 3317 bus = dev->bus;
+7
drivers/platform/x86/Kconfig
··· 753 753 To compile this driver as a module, choose M here: the module 754 754 will be called samsung-laptop. 755 755 756 + config MXM_WMI 757 + tristate "WMI support for MXM Laptop Graphics" 758 + depends on ACPI_WMI 759 + ---help--- 760 + MXM is a standard for laptop graphics cards, the WMI interface 761 + is required for switchable nvidia graphics machines 762 + 756 763 endif # X86_PLATFORM_DEVICES
+1
drivers/platform/x86/Makefile
··· 42 42 obj-$(CONFIG_IBM_RTL) += ibm_rtl.o 43 43 obj-$(CONFIG_SAMSUNG_LAPTOP) += samsung-laptop.o 44 44 obj-$(CONFIG_INTEL_MFLD_THERMAL) += intel_mid_thermal.o 45 + obj-$(CONFIG_MXM_WMI) += mxm-wmi.o
+111
drivers/platform/x86/mxm-wmi.c
··· 1 + /* 2 + * MXM WMI driver 3 + * 4 + * Copyright(C) 2010 Red Hat. 5 + * 6 + * This program is free software; you can redistribute it and/or modify 7 + * it under the terms of the GNU General Public License as published by 8 + * the Free Software Foundation; either version 2 of the License, or 9 + * (at your option) any later version. 10 + * 11 + * This program is distributed in the hope that it will be useful, 12 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 + * GNU General Public License for more details. 15 + * 16 + * You should have received a copy of the GNU General Public License 17 + * along with this program; if not, write to the Free Software 18 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 19 + */ 20 + #include <linux/kernel.h> 21 + #include <linux/module.h> 22 + #include <linux/init.h> 23 + #include <acpi/acpi_bus.h> 24 + #include <acpi/acpi_drivers.h> 25 + 26 + MODULE_AUTHOR("Dave Airlie"); 27 + MODULE_DESCRIPTION("MXM WMI Driver"); 28 + MODULE_LICENSE("GPL"); 29 + 30 + #define MXM_WMMX_GUID "F6CB5C3C-9CAE-4EBD-B577-931EA32A2CC0" 31 + 32 + MODULE_ALIAS("wmi:"MXM_WMMX_GUID); 33 + 34 + #define MXM_WMMX_FUNC_MXDS 0x5344584D /* "MXDS" */ 35 + #define MXM_WMMX_FUNC_MXMX 0x53445344 /* "MXMX" */ 36 + 37 + struct mxds_args { 38 + u32 func; 39 + u32 args; 40 + u32 xarg; 41 + }; 42 + 43 + int mxm_wmi_call_mxds(int adapter) 44 + { 45 + struct mxds_args args = { 46 + .func = MXM_WMMX_FUNC_MXDS, 47 + .args = 0, 48 + .xarg = 1, 49 + }; 50 + struct acpi_buffer input = { (acpi_size)sizeof(args), &args }; 51 + struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; 52 + acpi_status status; 53 + 54 + printk("calling mux switch %d\n", adapter); 55 + 56 + status = wmi_evaluate_method(MXM_WMMX_GUID, 0x1, adapter, &input, 57 + &output); 58 + 59 + if (ACPI_FAILURE(status)) 60 + return status; 61 + 62 + printk("mux switched %d\n", status); 63 + return 0; 64 + 65 + } 66 + EXPORT_SYMBOL_GPL(mxm_wmi_call_mxds); 67 + 68 + int mxm_wmi_call_mxmx(int adapter) 69 + { 70 + struct mxds_args args = { 71 + .func = MXM_WMMX_FUNC_MXMX, 72 + .args = 0, 73 + .xarg = 1, 74 + }; 75 + struct acpi_buffer input = { (acpi_size)sizeof(args), &args }; 76 + struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; 77 + acpi_status status; 78 + 79 + printk("calling mux switch %d\n", adapter); 80 + 81 + status = wmi_evaluate_method(MXM_WMMX_GUID, 0x1, adapter, &input, 82 + &output); 83 + 84 + if (ACPI_FAILURE(status)) 85 + return status; 86 + 87 + printk("mux mutex set switched %d\n", status); 88 + return 0; 89 + 90 + } 91 + EXPORT_SYMBOL_GPL(mxm_wmi_call_mxmx); 92 + 93 + bool mxm_wmi_supported(void) 94 + { 95 + bool guid_valid; 96 + guid_valid = wmi_has_guid(MXM_WMMX_GUID); 97 + return guid_valid; 98 + } 99 + EXPORT_SYMBOL_GPL(mxm_wmi_supported); 100 + 101 + static int __init mxm_wmi_init(void) 102 + { 103 + return 0; 104 + } 105 + 106 + static void __exit mxm_wmi_exit(void) 107 + { 108 + } 109 + 110 + module_init(mxm_wmi_init); 111 + module_exit(mxm_wmi_exit);
+34 -15
include/drm/drmP.h
··· 122 122 * using the DRM_DEBUG_KMS and DRM_DEBUG. 123 123 */ 124 124 125 - extern void drm_ut_debug_printk(unsigned int request_level, 125 + extern __attribute__((format (printf, 4, 5))) 126 + void drm_ut_debug_printk(unsigned int request_level, 126 127 const char *prefix, 127 128 const char *function_name, 128 129 const char *format, ...); 130 + extern __attribute__((format (printf, 2, 3))) 131 + int drm_err(const char *func, const char *format, ...); 132 + 129 133 /***********************************************************************/ 130 134 /** \name DRM template customization defaults */ 131 135 /*@{*/ ··· 185 181 * \param fmt printf() like format string. 186 182 * \param arg arguments 187 183 */ 188 - #define DRM_ERROR(fmt, arg...) \ 189 - printk(KERN_ERR "[" DRM_NAME ":%s] *ERROR* " fmt , __func__ , ##arg) 184 + #define DRM_ERROR(fmt, ...) \ 185 + drm_err(__func__, fmt, ##__VA_ARGS__) 190 186 191 - /** 192 - * Memory error output. 193 - * 194 - * \param area memory area where the error occurred. 195 - * \param fmt printf() like format string. 196 - * \param arg arguments 197 - */ 198 - #define DRM_MEM_ERROR(area, fmt, arg...) \ 199 - printk(KERN_ERR "[" DRM_NAME ":%s:%s] *ERROR* " fmt , __func__, \ 200 - drm_mem_stats[area].name , ##arg) 201 - 202 - #define DRM_INFO(fmt, arg...) printk(KERN_INFO "[" DRM_NAME "] " fmt , ##arg) 187 + #define DRM_INFO(fmt, ...) \ 188 + printk(KERN_INFO "[" DRM_NAME "] " fmt, ##__VA_ARGS__) 203 189 204 190 /** 205 191 * Debug output. ··· 994 1000 struct drm_mode_group mode_group; 995 1001 }; 996 1002 1003 + /* mode specified on the command line */ 1004 + struct drm_cmdline_mode { 1005 + bool specified; 1006 + bool refresh_specified; 1007 + bool bpp_specified; 1008 + int xres, yres; 1009 + int bpp; 1010 + int refresh; 1011 + bool rb; 1012 + bool interlace; 1013 + bool cvt; 1014 + bool margins; 1015 + enum drm_connector_force force; 1016 + }; 1017 + 1018 + 997 1019 struct drm_pending_vblank_event { 998 1020 struct drm_pending_event base; 999 1021 int pipe; ··· 1404 1394 unsigned flags, 1405 1395 struct drm_crtc *refcrtc); 1406 1396 extern void drm_calc_timestamping_constants(struct drm_crtc *crtc); 1397 + 1398 + extern bool 1399 + drm_mode_parse_command_line_for_connector(const char *mode_option, 1400 + struct drm_connector *connector, 1401 + struct drm_cmdline_mode *mode); 1402 + 1403 + extern struct drm_display_mode * 1404 + drm_mode_create_from_cmdline_mode(struct drm_device *dev, 1405 + struct drm_cmdline_mode *cmd); 1407 1406 1408 1407 /* Modesetting support */ 1409 1408 extern void drm_vblank_pre_modeset(struct drm_device *dev, int crtc);
+5 -1
include/drm/drm_crtc.h
··· 183 183 SubPixelNone, 184 184 }; 185 185 186 - 186 + #define DRM_COLOR_FORMAT_RGB444 (1<<0) 187 + #define DRM_COLOR_FORMAT_YCRCB444 (1<<1) 188 + #define DRM_COLOR_FORMAT_YCRCB422 (1<<2) 187 189 /* 188 190 * Describes a given display (e.g. CRT or flat panel) and its limitations. 189 191 */ ··· 200 198 unsigned int min_vfreq, max_vfreq; 201 199 unsigned int min_hfreq, max_hfreq; 202 200 unsigned int pixel_clock; 201 + unsigned int bpc; 203 202 204 203 enum subpixel_order subpixel_order; 204 + u32 color_formats; 205 205 206 206 char *raw_edid; /* if any */ 207 207 };
+5
include/drm/drm_dp_helper.h
··· 53 53 54 54 #define DP_MAX_LANE_COUNT 0x002 55 55 # define DP_MAX_LANE_COUNT_MASK 0x1f 56 + # define DP_TPS3_SUPPORTED (1 << 6) 56 57 # define DP_ENHANCED_FRAME_CAP (1 << 7) 57 58 58 59 #define DP_MAX_DOWNSPREAD 0x003 ··· 72 71 73 72 #define DP_MAIN_LINK_CHANNEL_CODING 0x006 74 73 74 + #define DP_TRAINING_AUX_RD_INTERVAL 0x00e 75 + 75 76 /* link configuration */ 76 77 #define DP_LINK_BW_SET 0x100 77 78 # define DP_LINK_BW_1_62 0x06 78 79 # define DP_LINK_BW_2_7 0x0a 80 + # define DP_LINK_BW_5_4 0x14 79 81 80 82 #define DP_LANE_COUNT_SET 0x101 81 83 # define DP_LANE_COUNT_MASK 0x0f ··· 88 84 # define DP_TRAINING_PATTERN_DISABLE 0 89 85 # define DP_TRAINING_PATTERN_1 1 90 86 # define DP_TRAINING_PATTERN_2 2 87 + # define DP_TRAINING_PATTERN_3 3 91 88 # define DP_TRAINING_PATTERN_MASK 0x3 92 89 93 90 # define DP_LINK_QUAL_PATTERN_DISABLE (0 << 2)
+24 -1
include/drm/drm_edid.h
··· 155 155 #define DRM_EDID_INPUT_SEPARATE_SYNCS (1 << 3) 156 156 #define DRM_EDID_INPUT_BLANK_TO_BLACK (1 << 4) 157 157 #define DRM_EDID_INPUT_VIDEO_LEVEL (3 << 5) 158 - #define DRM_EDID_INPUT_DIGITAL (1 << 7) /* bits below must be zero if set */ 158 + #define DRM_EDID_INPUT_DIGITAL (1 << 7) 159 + #define DRM_EDID_DIGITAL_DEPTH_MASK (7 << 4) 160 + #define DRM_EDID_DIGITAL_DEPTH_UNDEF (0 << 4) 161 + #define DRM_EDID_DIGITAL_DEPTH_6 (1 << 4) 162 + #define DRM_EDID_DIGITAL_DEPTH_8 (2 << 4) 163 + #define DRM_EDID_DIGITAL_DEPTH_10 (3 << 4) 164 + #define DRM_EDID_DIGITAL_DEPTH_12 (4 << 4) 165 + #define DRM_EDID_DIGITAL_DEPTH_14 (5 << 4) 166 + #define DRM_EDID_DIGITAL_DEPTH_16 (6 << 4) 167 + #define DRM_EDID_DIGITAL_DEPTH_RSVD (7 << 4) 168 + #define DRM_EDID_DIGITAL_TYPE_UNDEF (0) 169 + #define DRM_EDID_DIGITAL_TYPE_DVI (1) 170 + #define DRM_EDID_DIGITAL_TYPE_HDMI_A (2) 171 + #define DRM_EDID_DIGITAL_TYPE_HDMI_B (3) 172 + #define DRM_EDID_DIGITAL_TYPE_MDDI (4) 173 + #define DRM_EDID_DIGITAL_TYPE_DP (5) 159 174 160 175 #define DRM_EDID_FEATURE_DEFAULT_GTF (1 << 0) 161 176 #define DRM_EDID_FEATURE_PREFERRED_TIMING (1 << 1) 162 177 #define DRM_EDID_FEATURE_STANDARD_COLOR (1 << 2) 178 + /* If analog */ 163 179 #define DRM_EDID_FEATURE_DISPLAY_TYPE (3 << 3) /* 00=mono, 01=rgb, 10=non-rgb, 11=unknown */ 180 + /* If digital */ 181 + #define DRM_EDID_FEATURE_COLOR_MASK (3 << 3) 182 + #define DRM_EDID_FEATURE_RGB (0 << 3) 183 + #define DRM_EDID_FEATURE_RGB_YCRCB444 (1 << 3) 184 + #define DRM_EDID_FEATURE_RGB_YCRCB422 (2 << 3) 185 + #define DRM_EDID_FEATURE_RGB_YCRCB (3 << 3) /* both 4:4:4 and 4:2:2 */ 186 + 164 187 #define DRM_EDID_FEATURE_PM_ACTIVE_OFF (1 << 5) 165 188 #define DRM_EDID_FEATURE_PM_SUSPEND (1 << 6) 166 189 #define DRM_EDID_FEATURE_PM_STANDBY (1 << 7)
+1 -15
include/drm/drm_fb_helper.h
··· 40 40 struct drm_display_mode *desired_mode; 41 41 }; 42 42 43 - /* mode specified on the command line */ 44 - struct drm_fb_helper_cmdline_mode { 45 - bool specified; 46 - bool refresh_specified; 47 - bool bpp_specified; 48 - int xres, yres; 49 - int bpp; 50 - int refresh; 51 - bool rb; 52 - bool interlace; 53 - bool cvt; 54 - bool margins; 55 - }; 56 - 57 43 struct drm_fb_helper_surface_size { 58 44 u32 fb_width; 59 45 u32 fb_height; ··· 60 74 }; 61 75 62 76 struct drm_fb_helper_connector { 63 - struct drm_fb_helper_cmdline_mode cmdline_mode; 64 77 struct drm_connector *connector; 78 + struct drm_cmdline_mode cmdline_mode; 65 79 }; 66 80 67 81 struct drm_fb_helper {
+33
include/linux/mxm-wmi.h
··· 1 + /* 2 + * MXM WMI driver 3 + * 4 + * Copyright(C) 2010 Red Hat. 5 + * 6 + * This program is free software; you can redistribute it and/or modify 7 + * it under the terms of the GNU General Public License as published by 8 + * the Free Software Foundation; either version 2 of the License, or 9 + * (at your option) any later version. 10 + * 11 + * This program is distributed in the hope that it will be useful, 12 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 + * GNU General Public License for more details. 15 + * 16 + * You should have received a copy of the GNU General Public License 17 + * along with this program; if not, write to the Free Software 18 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 19 + */ 20 + 21 + #ifndef MXM_WMI_H 22 + #define MXM_WMI_H 23 + 24 + /* discrete adapters */ 25 + #define MXM_MXDS_ADAPTER_0 0x0 26 + #define MXM_MXDS_ADAPTER_1 0x0 27 + /* integrated adapter */ 28 + #define MXM_MXDS_ADAPTER_IGD 0x10 29 + int mxm_wmi_call_mxds(int adapter); 30 + int mxm_wmi_call_mxmx(int adapter); 31 + bool mxm_wmi_supported(void); 32 + 33 + #endif
+5 -2
include/linux/pci.h
··· 941 941 int pci_cfg_space_size(struct pci_dev *dev); 942 942 unsigned char pci_bus_max_busnr(struct pci_bus *bus); 943 943 944 + #define PCI_VGA_STATE_CHANGE_BRIDGE (1 << 0) 945 + #define PCI_VGA_STATE_CHANGE_DECODES (1 << 1) 946 + 944 947 int pci_set_vga_state(struct pci_dev *pdev, bool decode, 945 - unsigned int command_bits, bool change_bridge); 948 + unsigned int command_bits, u32 flags); 946 949 /* kmem_cache style wrapper around pci_alloc_consistent() */ 947 950 948 951 #include <linux/pci-dma.h> ··· 1090 1087 1091 1088 /* some architectures require additional setup to direct VGA traffic */ 1092 1089 typedef int (*arch_set_vga_state_t)(struct pci_dev *pdev, bool decode, 1093 - unsigned int command_bits, bool change_bridge); 1090 + unsigned int command_bits, u32 flags); 1094 1091 extern void pci_register_set_vga_state(arch_set_vga_state_t func); 1095 1092 1096 1093 #else /* CONFIG_PCI is not enabled */