Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'drm-intel-next-2014-02-14' of ssh://git.freedesktop.org/git/drm-intel into drm-next

- Fix the execbuf rebind performance regression due to topic/ppgtt (Chris).
- Fix up the connector cleanup ordering for sdvod i2c and dp aux devices (Imre).
- Try to preserve the firmware modeset config on driver load. And a bit of prep
work for smooth takeover of the fb contents (Jesse).
- Prep cleanup for larger gtt address spaces on bdw (Ben).
- Improve our vblank_wait code to make hsw modesets faster (Paulo).
- Display debugfs file (Jesse).
- DRRS prep work from Vandana Kannan.
- pipestat interrupt handler to fix a few races around vblank/pageflip handling
on byt (Imre).
- Improve display fuse handling for display-less SKUs (Damien).
- Drop locks while stalling for the gpu when serving pagefaults to improve
interactivity (Chris).
- And as usual piles of other improvements and small fixes all over.

* tag 'drm-intel-next-2014-02-14' of ssh://git.freedesktop.org/git/drm-intel: (65 commits)
drm/i915: fix NULL deref in the load detect code
drm/i915: Only bind each object rather than for every execbuffer
drm/i915: Directly return the vma from bind_to_vm
drm/i915: Simplify i915_gem_object_ggtt_unpin
drm/i915: Allow blocking in the PDE alloc when running low on gtt space
drm/i915: Don't allocate context pages as mappable
drm/i915: Handle set_cache_level errors in the status page setup
drm/i915: Don't pin the status page as mappable
drm/i915: Don't set PIN_MAPPABLE for legacy ringbuffers
drm/i915: Handle set_cache_level errors in the pipe control scratch setup
drm/i915: split PIN_GLOBAL out from PIN_MAPPABLE
drm/i915: Consolidate binding parameters into flags
drm/i915: sdvo: add i2c sysfs symlink to the connector's directory
drm/i915: sdvo: fix error path in sdvo_connector_init
drm/i915: dp: fix order of dp aux i2c device cleanup
drm/i915: add unregister callback to connector
drm/i915: don't reference null pointer at i915_sink_crc
drm/i915/lvds: Remove dead code from failing case
drm/i915: don't preserve inherited configs with nothing on v2
drm/i915/bdw: Split up PPGTT cleanup
...

+1126 -524
+23
drivers/gpu/drm/drm_crtc.c
··· 215 215 { DRM_MODE_ENCODER_DSI, "DSI" }, 216 216 }; 217 217 218 + static const struct drm_prop_enum_list drm_subpixel_enum_list[] = 219 + { 220 + { SubPixelUnknown, "Unknown" }, 221 + { SubPixelHorizontalRGB, "Horizontal RGB" }, 222 + { SubPixelHorizontalBGR, "Horizontal BGR" }, 223 + { SubPixelVerticalRGB, "Vertical RGB" }, 224 + { SubPixelVerticalBGR, "Vertical BGR" }, 225 + { SubPixelNone, "None" }, 226 + }; 227 + 218 228 void drm_connector_ida_init(void) 219 229 { 220 230 int i; ··· 273 263 return "unknown"; 274 264 } 275 265 EXPORT_SYMBOL(drm_get_connector_status_name); 266 + 267 + /** 268 + * drm_get_subpixel_order_name - return a string for a given subpixel enum 269 + * @order: enum of subpixel_order 270 + * 271 + * Note you could abuse this and return something out of bounds, but that 272 + * would be a caller error. No unscrubbed user data should make it here. 273 + */ 274 + const char *drm_get_subpixel_order_name(enum subpixel_order order) 275 + { 276 + return drm_subpixel_enum_list[order].name; 277 + } 278 + EXPORT_SYMBOL(drm_get_subpixel_order_name); 276 279 277 280 static char printable_char(int c) 278 281 {
+4 -2
drivers/gpu/drm/drm_fb_helper.c
··· 1136 1136 return count; 1137 1137 } 1138 1138 1139 - static struct drm_display_mode *drm_has_preferred_mode(struct drm_fb_helper_connector *fb_connector, int width, int height) 1139 + struct drm_display_mode *drm_has_preferred_mode(struct drm_fb_helper_connector *fb_connector, int width, int height) 1140 1140 { 1141 1141 struct drm_display_mode *mode; 1142 1142 ··· 1149 1149 } 1150 1150 return NULL; 1151 1151 } 1152 + EXPORT_SYMBOL(drm_has_preferred_mode); 1152 1153 1153 1154 static bool drm_has_cmdline_mode(struct drm_fb_helper_connector *fb_connector) 1154 1155 { ··· 1158 1157 return cmdline_mode->specified; 1159 1158 } 1160 1159 1161 - static struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn, 1160 + struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn, 1162 1161 int width, int height) 1163 1162 { 1164 1163 struct drm_cmdline_mode *cmdline_mode; ··· 1198 1197 list_add(&mode->head, &fb_helper_conn->connector->modes); 1199 1198 return mode; 1200 1199 } 1200 + EXPORT_SYMBOL(drm_pick_cmdline_mode); 1201 1201 1202 1202 static bool drm_connector_enabled(struct drm_connector *connector, bool strict) 1203 1203 {
+162
drivers/gpu/drm/i915/i915_debugfs.c
··· 1937 1937 if (connector->base.dpms != DRM_MODE_DPMS_ON) 1938 1938 continue; 1939 1939 1940 + if (!connector->base.encoder) 1941 + continue; 1942 + 1940 1943 encoder = to_intel_encoder(connector->base.encoder); 1941 1944 if (encoder->type != INTEL_OUTPUT_EDP) 1942 1945 continue; ··· 2073 2070 } 2074 2071 2075 2072 mutex_unlock(&power_domains->lock); 2073 + 2074 + return 0; 2075 + } 2076 + 2077 + static void intel_seq_print_mode(struct seq_file *m, int tabs, 2078 + struct drm_display_mode *mode) 2079 + { 2080 + int i; 2081 + 2082 + for (i = 0; i < tabs; i++) 2083 + seq_putc(m, '\t'); 2084 + 2085 + seq_printf(m, "id %d:\"%s\" freq %d clock %d hdisp %d hss %d hse %d htot %d vdisp %d vss %d vse %d vtot %d type 0x%x flags 0x%x\n", 2086 + mode->base.id, mode->name, 2087 + mode->vrefresh, mode->clock, 2088 + mode->hdisplay, mode->hsync_start, 2089 + mode->hsync_end, mode->htotal, 2090 + mode->vdisplay, mode->vsync_start, 2091 + mode->vsync_end, mode->vtotal, 2092 + mode->type, mode->flags); 2093 + } 2094 + 2095 + static void intel_encoder_info(struct seq_file *m, 2096 + struct intel_crtc *intel_crtc, 2097 + struct intel_encoder *intel_encoder) 2098 + { 2099 + struct drm_info_node *node = (struct drm_info_node *) m->private; 2100 + struct drm_device *dev = node->minor->dev; 2101 + struct drm_crtc *crtc = &intel_crtc->base; 2102 + struct intel_connector *intel_connector; 2103 + struct drm_encoder *encoder; 2104 + 2105 + encoder = &intel_encoder->base; 2106 + seq_printf(m, "\tencoder %d: type: %s, connectors:\n", 2107 + encoder->base.id, drm_get_encoder_name(encoder)); 2108 + for_each_connector_on_encoder(dev, encoder, intel_connector) { 2109 + struct drm_connector *connector = &intel_connector->base; 2110 + seq_printf(m, "\t\tconnector %d: type: %s, status: %s", 2111 + connector->base.id, 2112 + drm_get_connector_name(connector), 2113 + drm_get_connector_status_name(connector->status)); 2114 + if (connector->status == connector_status_connected) { 2115 + struct drm_display_mode *mode = &crtc->mode; 2116 + seq_printf(m, ", mode:\n"); 2117 + intel_seq_print_mode(m, 2, mode); 2118 + } else { 2119 + seq_putc(m, '\n'); 2120 + } 2121 + } 2122 + } 2123 + 2124 + static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc) 2125 + { 2126 + struct drm_info_node *node = (struct drm_info_node *) m->private; 2127 + struct drm_device *dev = node->minor->dev; 2128 + struct drm_crtc *crtc = &intel_crtc->base; 2129 + struct intel_encoder *intel_encoder; 2130 + 2131 + seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n", 2132 + crtc->fb->base.id, crtc->x, crtc->y, 2133 + crtc->fb->width, crtc->fb->height); 2134 + for_each_encoder_on_crtc(dev, crtc, intel_encoder) 2135 + intel_encoder_info(m, intel_crtc, intel_encoder); 2136 + } 2137 + 2138 + static void intel_panel_info(struct seq_file *m, struct intel_panel *panel) 2139 + { 2140 + struct drm_display_mode *mode = panel->fixed_mode; 2141 + 2142 + seq_printf(m, "\tfixed mode:\n"); 2143 + intel_seq_print_mode(m, 2, mode); 2144 + } 2145 + 2146 + static void intel_dp_info(struct seq_file *m, 2147 + struct intel_connector *intel_connector) 2148 + { 2149 + struct intel_encoder *intel_encoder = intel_connector->encoder; 2150 + struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); 2151 + 2152 + seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]); 2153 + seq_printf(m, "\taudio support: %s\n", intel_dp->has_audio ? "yes" : 2154 + "no"); 2155 + if (intel_encoder->type == INTEL_OUTPUT_EDP) 2156 + intel_panel_info(m, &intel_connector->panel); 2157 + } 2158 + 2159 + static void intel_hdmi_info(struct seq_file *m, 2160 + struct intel_connector *intel_connector) 2161 + { 2162 + struct intel_encoder *intel_encoder = intel_connector->encoder; 2163 + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base); 2164 + 2165 + seq_printf(m, "\taudio support: %s\n", intel_hdmi->has_audio ? "yes" : 2166 + "no"); 2167 + } 2168 + 2169 + static void intel_lvds_info(struct seq_file *m, 2170 + struct intel_connector *intel_connector) 2171 + { 2172 + intel_panel_info(m, &intel_connector->panel); 2173 + } 2174 + 2175 + static void intel_connector_info(struct seq_file *m, 2176 + struct drm_connector *connector) 2177 + { 2178 + struct intel_connector *intel_connector = to_intel_connector(connector); 2179 + struct intel_encoder *intel_encoder = intel_connector->encoder; 2180 + 2181 + seq_printf(m, "connector %d: type %s, status: %s\n", 2182 + connector->base.id, drm_get_connector_name(connector), 2183 + drm_get_connector_status_name(connector->status)); 2184 + if (connector->status == connector_status_connected) { 2185 + seq_printf(m, "\tname: %s\n", connector->display_info.name); 2186 + seq_printf(m, "\tphysical dimensions: %dx%dmm\n", 2187 + connector->display_info.width_mm, 2188 + connector->display_info.height_mm); 2189 + seq_printf(m, "\tsubpixel order: %s\n", 2190 + drm_get_subpixel_order_name(connector->display_info.subpixel_order)); 2191 + seq_printf(m, "\tCEA rev: %d\n", 2192 + connector->display_info.cea_rev); 2193 + } 2194 + if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT || 2195 + intel_encoder->type == INTEL_OUTPUT_EDP) 2196 + intel_dp_info(m, intel_connector); 2197 + else if (intel_encoder->type == INTEL_OUTPUT_HDMI) 2198 + intel_hdmi_info(m, intel_connector); 2199 + else if (intel_encoder->type == INTEL_OUTPUT_LVDS) 2200 + intel_lvds_info(m, intel_connector); 2201 + 2202 + } 2203 + 2204 + static int i915_display_info(struct seq_file *m, void *unused) 2205 + { 2206 + struct drm_info_node *node = (struct drm_info_node *) m->private; 2207 + struct drm_device *dev = node->minor->dev; 2208 + struct drm_crtc *crtc; 2209 + struct drm_connector *connector; 2210 + 2211 + drm_modeset_lock_all(dev); 2212 + seq_printf(m, "CRTC info\n"); 2213 + seq_printf(m, "---------\n"); 2214 + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 2215 + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2216 + 2217 + seq_printf(m, "CRTC %d: pipe: %c, active: %s\n", 2218 + crtc->base.id, pipe_name(intel_crtc->pipe), 2219 + intel_crtc->active ? "yes" : "no"); 2220 + if (intel_crtc->active) 2221 + intel_crtc_info(m, intel_crtc); 2222 + } 2223 + 2224 + seq_printf(m, "\n"); 2225 + seq_printf(m, "Connector info\n"); 2226 + seq_printf(m, "--------------\n"); 2227 + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 2228 + intel_connector_info(m, connector); 2229 + } 2230 + drm_modeset_unlock_all(dev); 2076 2231 2077 2232 return 0; 2078 2233 } ··· 3680 3519 {"i915_energy_uJ", i915_energy_uJ, 0}, 3681 3520 {"i915_pc8_status", i915_pc8_status, 0}, 3682 3521 {"i915_power_domain_info", i915_power_domain_info, 0}, 3522 + {"i915_display_info", i915_display_info, 0}, 3683 3523 }; 3684 3524 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) 3685 3525
+69 -12
drivers/gpu/drm/i915/i915_dma.c
··· 626 626 struct drm_file *file_priv) 627 627 { 628 628 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 629 - struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 630 - drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) 631 - master_priv->sarea_priv; 629 + struct drm_i915_master_private *master_priv; 630 + drm_i915_sarea_t *sarea_priv; 632 631 drm_i915_batchbuffer_t *batch = data; 633 632 int ret; 634 633 struct drm_clip_rect *cliprects = NULL; 635 634 636 635 if (drm_core_check_feature(dev, DRIVER_MODESET)) 637 636 return -ENODEV; 637 + 638 + master_priv = dev->primary->master->driver_priv; 639 + sarea_priv = (drm_i915_sarea_t *) master_priv->sarea_priv; 638 640 639 641 if (!dev_priv->dri1.allow_batchbuffer) { 640 642 DRM_ERROR("Batchbuffer ioctl disabled\n"); ··· 684 682 struct drm_file *file_priv) 685 683 { 686 684 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 687 - struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 688 - drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) 689 - master_priv->sarea_priv; 685 + struct drm_i915_master_private *master_priv; 686 + drm_i915_sarea_t *sarea_priv; 690 687 drm_i915_cmdbuffer_t *cmdbuf = data; 691 688 struct drm_clip_rect *cliprects = NULL; 692 689 void *batch_data; ··· 696 695 697 696 if (drm_core_check_feature(dev, DRIVER_MODESET)) 698 697 return -ENODEV; 698 + 699 + master_priv = dev->primary->master->driver_priv; 700 + sarea_priv = (drm_i915_sarea_t *) master_priv->sarea_priv; 699 701 700 702 RING_LOCK_TEST_WITH_RETURN(dev, file_priv); 701 703 ··· 1446 1442 1447 1443 static void i915_dump_device_info(struct drm_i915_private *dev_priv) 1448 1444 { 1449 - const struct intel_device_info *info = dev_priv->info; 1445 + const struct intel_device_info *info = &dev_priv->info; 1450 1446 1451 1447 #define PRINT_S(name) "%s" 1452 1448 #define SEP_EMPTY ··· 1463 1459 #undef SEP_COMMA 1464 1460 } 1465 1461 1462 + /* 1463 + * Determine various intel_device_info fields at runtime. 1464 + * 1465 + * Use it when either: 1466 + * - it's judged too laborious to fill n static structures with the limit 1467 + * when a simple if statement does the job, 1468 + * - run-time checks (eg read fuse/strap registers) are needed. 1469 + * 1470 + * This function needs to be called: 1471 + * - after the MMIO has been setup as we are reading registers, 1472 + * - after the PCH has been detected, 1473 + * - before the first usage of the fields it can tweak. 1474 + */ 1475 + static void intel_device_info_runtime_init(struct drm_device *dev) 1476 + { 1477 + struct drm_i915_private *dev_priv = dev->dev_private; 1478 + struct intel_device_info *info; 1479 + 1480 + info = (struct intel_device_info *)&dev_priv->info; 1481 + 1482 + info->num_sprites = 1; 1483 + if (IS_VALLEYVIEW(dev)) 1484 + info->num_sprites = 2; 1485 + 1486 + if (i915.disable_display) { 1487 + DRM_INFO("Display disabled (module parameter)\n"); 1488 + info->num_pipes = 0; 1489 + } else if (info->num_pipes > 0 && 1490 + (INTEL_INFO(dev)->gen == 7 || INTEL_INFO(dev)->gen == 8) && 1491 + !IS_VALLEYVIEW(dev)) { 1492 + u32 fuse_strap = I915_READ(FUSE_STRAP); 1493 + u32 sfuse_strap = I915_READ(SFUSE_STRAP); 1494 + 1495 + /* 1496 + * SFUSE_STRAP is supposed to have a bit signalling the display 1497 + * is fused off. Unfortunately it seems that, at least in 1498 + * certain cases, fused off display means that PCH display 1499 + * reads don't land anywhere. In that case, we read 0s. 1500 + * 1501 + * On CPT/PPT, we can detect this case as SFUSE_STRAP_FUSE_LOCK 1502 + * should be set when taking over after the firmware. 1503 + */ 1504 + if (fuse_strap & ILK_INTERNAL_DISPLAY_DISABLE || 1505 + sfuse_strap & SFUSE_STRAP_DISPLAY_DISABLED || 1506 + (dev_priv->pch_type == PCH_CPT && 1507 + !(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) { 1508 + DRM_INFO("Display fused off, disabling\n"); 1509 + info->num_pipes = 0; 1510 + } 1511 + } 1512 + } 1513 + 1466 1514 /** 1467 1515 * i915_driver_load - setup chip and create an initial config 1468 1516 * @dev: DRM device ··· 1529 1473 int i915_driver_load(struct drm_device *dev, unsigned long flags) 1530 1474 { 1531 1475 struct drm_i915_private *dev_priv; 1532 - struct intel_device_info *info; 1476 + struct intel_device_info *info, *device_info; 1533 1477 int ret = 0, mmio_bar, mmio_size; 1534 1478 uint32_t aperture_size; 1535 1479 ··· 1552 1496 1553 1497 dev->dev_private = (void *)dev_priv; 1554 1498 dev_priv->dev = dev; 1555 - dev_priv->info = info; 1499 + 1500 + /* copy initial configuration to dev_priv->info */ 1501 + device_info = (struct intel_device_info *)&dev_priv->info; 1502 + *device_info = *info; 1556 1503 1557 1504 spin_lock_init(&dev_priv->irq_lock); 1558 1505 spin_lock_init(&dev_priv->gpu_error.lock); ··· 1694 1635 if (!IS_I945G(dev) && !IS_I945GM(dev)) 1695 1636 pci_enable_msi(dev->pdev); 1696 1637 1697 - dev_priv->num_plane = 1; 1698 - if (IS_VALLEYVIEW(dev)) 1699 - dev_priv->num_plane = 2; 1638 + intel_device_info_runtime_init(dev); 1700 1639 1701 1640 if (INTEL_INFO(dev)->num_pipes) { 1702 1641 ret = drm_vblank_init(dev, INTEL_INFO(dev)->num_pipes);
+36 -22
drivers/gpu/drm/i915/i915_drv.h
··· 79 79 }; 80 80 #define plane_name(p) ((p) + 'A') 81 81 82 - #define sprite_name(p, s) ((p) * dev_priv->num_plane + (s) + 'A') 82 + #define sprite_name(p, s) ((p) * INTEL_INFO(dev)->num_sprites + (s) + 'A') 83 83 84 84 enum port { 85 85 PORT_A = 0, ··· 163 163 #define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \ 164 164 list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \ 165 165 if ((intel_encoder)->base.crtc == (__crtc)) 166 + 167 + #define for_each_connector_on_encoder(dev, __encoder, intel_connector) \ 168 + list_for_each_entry((intel_connector), &(dev)->mode_config.connector_list, base.head) \ 169 + if ((intel_connector)->base.encoder == (__encoder)) 166 170 167 171 struct drm_i915_private; 168 172 ··· 534 530 struct intel_device_info { 535 531 u32 display_mmio_offset; 536 532 u8 num_pipes:3; 533 + u8 num_sprites:2; 537 534 u8 gen; 538 535 u8 ring_mask; /* Rings supported by the HW */ 539 536 DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON); ··· 1395 1390 struct drm_device *dev; 1396 1391 struct kmem_cache *slab; 1397 1392 1398 - const struct intel_device_info *info; 1393 + const struct intel_device_info info; 1399 1394 1400 1395 int relative_constants_mode; 1401 1396 ··· 1440 1435 }; 1441 1436 u32 gt_irq_mask; 1442 1437 u32 pm_irq_mask; 1438 + u32 pipestat_irq_mask[I915_MAX_PIPES]; 1443 1439 1444 1440 struct work_struct hotplug_work; 1445 1441 bool enable_hotplug_processing; ··· 1455 1449 } hpd_stats[HPD_NUM_PINS]; 1456 1450 u32 hpd_event_bits; 1457 1451 struct timer_list hotplug_reenable_timer; 1458 - 1459 - int num_plane; 1460 1452 1461 1453 struct i915_fbc fbc; 1462 1454 struct intel_opregion opregion; ··· 1505 1501 1506 1502 struct sdvo_device_mapping sdvo_mappings[2]; 1507 1503 1508 - struct drm_crtc *plane_to_crtc_mapping[3]; 1509 - struct drm_crtc *pipe_to_crtc_mapping[3]; 1504 + struct drm_crtc *plane_to_crtc_mapping[I915_MAX_PIPES]; 1505 + struct drm_crtc *pipe_to_crtc_mapping[I915_MAX_PIPES]; 1510 1506 wait_queue_head_t pending_flip_queue; 1511 1507 1512 1508 #ifdef CONFIG_DEBUG_FS ··· 1803 1799 atomic_t rps_wait_boost; 1804 1800 }; 1805 1801 1806 - #define INTEL_INFO(dev) (to_i915(dev)->info) 1802 + #define INTEL_INFO(dev) (&to_i915(dev)->info) 1807 1803 1808 1804 #define IS_I830(dev) ((dev)->pdev->device == 0x3577) 1809 1805 #define IS_845G(dev) ((dev)->pdev->device == 0x2562) ··· 1957 1953 int vbt_sdvo_panel_type; 1958 1954 int enable_rc6; 1959 1955 int enable_fbc; 1960 - bool enable_hangcheck; 1961 1956 int enable_ppgtt; 1962 1957 int enable_psr; 1963 1958 unsigned int preliminary_hw_support; 1964 1959 int disable_power_well; 1965 1960 int enable_ips; 1966 - bool fastboot; 1967 1961 int enable_pc8; 1968 1962 int pc8_timeout; 1963 + int invert_brightness; 1964 + /* leave bools at the end to not create holes */ 1965 + bool enable_hangcheck; 1966 + bool fastboot; 1969 1967 bool prefault_disable; 1970 1968 bool reset; 1971 - int invert_brightness; 1969 + bool disable_display; 1972 1970 }; 1973 1971 extern struct i915_params i915 __read_mostly; 1974 1972 ··· 2018 2012 extern void intel_uncore_fini(struct drm_device *dev); 2019 2013 2020 2014 void 2021 - i915_enable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask); 2015 + i915_enable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, 2016 + u32 status_mask); 2022 2017 2023 2018 void 2024 - i915_disable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask); 2019 + i915_disable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, 2020 + u32 status_mask); 2025 2021 2026 2022 /* i915_gem.c */ 2027 2023 int i915_gem_init_ioctl(struct drm_device *dev, void *data, ··· 2084 2076 void i915_gem_free_object(struct drm_gem_object *obj); 2085 2077 void i915_gem_vma_destroy(struct i915_vma *vma); 2086 2078 2079 + #define PIN_MAPPABLE 0x1 2080 + #define PIN_NONBLOCK 0x2 2081 + #define PIN_GLOBAL 0x4 2087 2082 int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj, 2088 2083 struct i915_address_space *vm, 2089 2084 uint32_t alignment, 2090 - bool map_and_fenceable, 2091 - bool nonblocking); 2092 - void i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj); 2085 + unsigned flags); 2093 2086 int __must_check i915_vma_unbind(struct i915_vma *vma); 2094 - int __must_check i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj); 2095 2087 int i915_gem_object_put_pages(struct drm_i915_gem_object *obj); 2096 2088 void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv); 2097 2089 void i915_gem_release_mmap(struct drm_i915_gem_object *obj); ··· 2291 2283 static inline int __must_check 2292 2284 i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj, 2293 2285 uint32_t alignment, 2294 - bool map_and_fenceable, 2295 - bool nonblocking) 2286 + unsigned flags) 2296 2287 { 2297 - return i915_gem_object_pin(obj, obj_to_ggtt(obj), alignment, 2298 - map_and_fenceable, nonblocking); 2288 + return i915_gem_object_pin(obj, obj_to_ggtt(obj), alignment, flags | PIN_GLOBAL); 2299 2289 } 2290 + 2291 + static inline int 2292 + i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj) 2293 + { 2294 + return i915_vma_unbind(i915_gem_obj_to_ggtt(obj)); 2295 + } 2296 + 2297 + void i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj); 2300 2298 2301 2299 /* i915_gem_context.c */ 2302 2300 #define ctx_to_ppgtt(ctx) container_of((ctx)->vm, struct i915_hw_ppgtt, base) ··· 2345 2331 int min_size, 2346 2332 unsigned alignment, 2347 2333 unsigned cache_level, 2348 - bool mappable, 2349 - bool nonblock); 2334 + unsigned flags); 2350 2335 int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle); 2351 2336 int i915_gem_evict_everything(struct drm_device *dev); 2352 2337 ··· 2560 2547 extern void intel_modeset_init(struct drm_device *dev); 2561 2548 extern void intel_modeset_gem_init(struct drm_device *dev); 2562 2549 extern void intel_modeset_cleanup(struct drm_device *dev); 2550 + extern void intel_connector_unregister(struct intel_connector *); 2563 2551 extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); 2564 2552 extern void intel_modeset_setup_hw_state(struct drm_device *dev, 2565 2553 bool force_restore);
+69 -86
drivers/gpu/drm/i915/i915_gem.c
··· 43 43 static __must_check int 44 44 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, 45 45 bool readonly); 46 - static __must_check int 47 - i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, 48 - struct i915_address_space *vm, 49 - unsigned alignment, 50 - bool map_and_fenceable, 51 - bool nonblocking); 52 46 static int i915_gem_phys_pwrite(struct drm_device *dev, 53 47 struct drm_i915_gem_object *obj, 54 48 struct drm_i915_gem_pwrite *args, ··· 599 605 char __user *user_data; 600 606 int page_offset, page_length, ret; 601 607 602 - ret = i915_gem_obj_ggtt_pin(obj, 0, true, true); 608 + ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE | PIN_NONBLOCK); 603 609 if (ret) 604 610 goto out; 605 611 ··· 1008 1014 struct timespec *timeout, 1009 1015 struct drm_i915_file_private *file_priv) 1010 1016 { 1011 - drm_i915_private_t *dev_priv = ring->dev->dev_private; 1017 + struct drm_device *dev = ring->dev; 1018 + drm_i915_private_t *dev_priv = dev->dev_private; 1012 1019 const bool irq_test_in_progress = 1013 1020 ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring); 1014 1021 struct timespec before, now; ··· 1024 1029 1025 1030 timeout_expire = timeout ? jiffies + timespec_to_jiffies_timeout(timeout) : 0; 1026 1031 1027 - if (dev_priv->info->gen >= 6 && can_wait_boost(file_priv)) { 1032 + if (INTEL_INFO(dev)->gen >= 6 && can_wait_boost(file_priv)) { 1028 1033 gen6_rps_boost(dev_priv); 1029 1034 if (file_priv) 1030 1035 mod_delayed_work(dev_priv->wq, ··· 1179 1184 */ 1180 1185 static __must_check int 1181 1186 i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj, 1182 - struct drm_file *file, 1187 + struct drm_i915_file_private *file_priv, 1183 1188 bool readonly) 1184 1189 { 1185 1190 struct drm_device *dev = obj->base.dev; ··· 1206 1211 1207 1212 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); 1208 1213 mutex_unlock(&dev->struct_mutex); 1209 - ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, file->driver_priv); 1214 + ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, file_priv); 1210 1215 mutex_lock(&dev->struct_mutex); 1211 1216 if (ret) 1212 1217 return ret; ··· 1255 1260 * We will repeat the flush holding the lock in the normal manner 1256 1261 * to catch cases where we are gazumped. 1257 1262 */ 1258 - ret = i915_gem_object_wait_rendering__nonblocking(obj, file, !write_domain); 1263 + ret = i915_gem_object_wait_rendering__nonblocking(obj, 1264 + file->driver_priv, 1265 + !write_domain); 1259 1266 if (ret) 1260 1267 goto unref; 1261 1268 ··· 1389 1392 1390 1393 trace_i915_gem_object_fault(obj, page_offset, true, write); 1391 1394 1395 + /* Try to flush the object off the GPU first without holding the lock. 1396 + * Upon reacquiring the lock, we will perform our sanity checks and then 1397 + * repeat the flush holding the lock in the normal manner to catch cases 1398 + * where we are gazumped. 1399 + */ 1400 + ret = i915_gem_object_wait_rendering__nonblocking(obj, NULL, !write); 1401 + if (ret) 1402 + goto unlock; 1403 + 1392 1404 /* Access to snoopable pages through the GTT is incoherent. */ 1393 1405 if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) { 1394 1406 ret = -EINVAL; ··· 1405 1399 } 1406 1400 1407 1401 /* Now bind it into the GTT if needed */ 1408 - ret = i915_gem_obj_ggtt_pin(obj, 0, true, false); 1402 + ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE); 1409 1403 if (ret) 1410 1404 goto unlock; 1411 1405 ··· 1624 1618 } 1625 1619 1626 1620 if (obj->madv != I915_MADV_WILLNEED) { 1627 - DRM_ERROR("Attempting to mmap a purgeable buffer\n"); 1621 + DRM_DEBUG("Attempting to mmap a purgeable buffer\n"); 1628 1622 ret = -EFAULT; 1629 1623 goto out; 1630 1624 } ··· 1978 1972 return 0; 1979 1973 1980 1974 if (obj->madv != I915_MADV_WILLNEED) { 1981 - DRM_ERROR("Attempting to obtain a purgeable object\n"); 1975 + DRM_DEBUG("Attempting to obtain a purgeable object\n"); 1982 1976 return -EFAULT; 1983 1977 } 1984 1978 ··· 2715 2709 2716 2710 if (!drm_mm_node_allocated(&vma->node)) { 2717 2711 i915_gem_vma_destroy(vma); 2718 - 2719 2712 return 0; 2720 2713 } 2721 2714 ··· 2764 2759 i915_gem_object_unpin_pages(obj); 2765 2760 2766 2761 return 0; 2767 - } 2768 - 2769 - /** 2770 - * Unbinds an object from the global GTT aperture. 2771 - */ 2772 - int 2773 - i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj) 2774 - { 2775 - struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 2776 - struct i915_address_space *ggtt = &dev_priv->gtt.base; 2777 - 2778 - if (!i915_gem_obj_ggtt_bound(obj)) 2779 - return 0; 2780 - 2781 - if (i915_gem_obj_to_ggtt(obj)->pin_count) 2782 - return -EBUSY; 2783 - 2784 - BUG_ON(obj->pages == NULL); 2785 - 2786 - return i915_vma_unbind(i915_gem_obj_to_vma(obj, ggtt)); 2787 2762 } 2788 2763 2789 2764 int i915_gpu_idle(struct drm_device *dev) ··· 3188 3203 /** 3189 3204 * Finds free space in the GTT aperture and binds the object there. 3190 3205 */ 3191 - static int 3206 + static struct i915_vma * 3192 3207 i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, 3193 3208 struct i915_address_space *vm, 3194 3209 unsigned alignment, 3195 - bool map_and_fenceable, 3196 - bool nonblocking) 3210 + unsigned flags) 3197 3211 { 3198 3212 struct drm_device *dev = obj->base.dev; 3199 3213 drm_i915_private_t *dev_priv = dev->dev_private; 3200 3214 u32 size, fence_size, fence_alignment, unfenced_alignment; 3201 3215 size_t gtt_max = 3202 - map_and_fenceable ? dev_priv->gtt.mappable_end : vm->total; 3216 + flags & PIN_MAPPABLE ? dev_priv->gtt.mappable_end : vm->total; 3203 3217 struct i915_vma *vma; 3204 3218 int ret; 3205 3219 ··· 3210 3226 obj->tiling_mode, true); 3211 3227 unfenced_alignment = 3212 3228 i915_gem_get_gtt_alignment(dev, 3213 - obj->base.size, 3214 - obj->tiling_mode, false); 3229 + obj->base.size, 3230 + obj->tiling_mode, false); 3215 3231 3216 3232 if (alignment == 0) 3217 - alignment = map_and_fenceable ? fence_alignment : 3233 + alignment = flags & PIN_MAPPABLE ? fence_alignment : 3218 3234 unfenced_alignment; 3219 - if (map_and_fenceable && alignment & (fence_alignment - 1)) { 3220 - DRM_ERROR("Invalid object alignment requested %u\n", alignment); 3221 - return -EINVAL; 3235 + if (flags & PIN_MAPPABLE && alignment & (fence_alignment - 1)) { 3236 + DRM_DEBUG("Invalid object alignment requested %u\n", alignment); 3237 + return ERR_PTR(-EINVAL); 3222 3238 } 3223 3239 3224 - size = map_and_fenceable ? fence_size : obj->base.size; 3240 + size = flags & PIN_MAPPABLE ? fence_size : obj->base.size; 3225 3241 3226 3242 /* If the object is bigger than the entire aperture, reject it early 3227 3243 * before evicting everything in a vain attempt to find space. 3228 3244 */ 3229 3245 if (obj->base.size > gtt_max) { 3230 - DRM_ERROR("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%zu\n", 3246 + DRM_DEBUG("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%zu\n", 3231 3247 obj->base.size, 3232 - map_and_fenceable ? "mappable" : "total", 3248 + flags & PIN_MAPPABLE ? "mappable" : "total", 3233 3249 gtt_max); 3234 - return -E2BIG; 3250 + return ERR_PTR(-E2BIG); 3235 3251 } 3236 3252 3237 3253 ret = i915_gem_object_get_pages(obj); 3238 3254 if (ret) 3239 - return ret; 3255 + return ERR_PTR(ret); 3240 3256 3241 3257 i915_gem_object_pin_pages(obj); 3242 3258 3243 3259 vma = i915_gem_obj_lookup_or_create_vma(obj, vm); 3244 - if (IS_ERR(vma)) { 3245 - ret = PTR_ERR(vma); 3260 + if (IS_ERR(vma)) 3246 3261 goto err_unpin; 3247 - } 3248 3262 3249 3263 search_free: 3250 3264 ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node, ··· 3251 3269 DRM_MM_SEARCH_DEFAULT); 3252 3270 if (ret) { 3253 3271 ret = i915_gem_evict_something(dev, vm, size, alignment, 3254 - obj->cache_level, 3255 - map_and_fenceable, 3256 - nonblocking); 3272 + obj->cache_level, flags); 3257 3273 if (ret == 0) 3258 3274 goto search_free; 3259 3275 ··· 3282 3302 obj->map_and_fenceable = mappable && fenceable; 3283 3303 } 3284 3304 3285 - WARN_ON(map_and_fenceable && !obj->map_and_fenceable); 3305 + WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable); 3286 3306 3287 - trace_i915_vma_bind(vma, map_and_fenceable); 3307 + trace_i915_vma_bind(vma, flags); 3308 + vma->bind_vma(vma, obj->cache_level, 3309 + flags & (PIN_MAPPABLE | PIN_GLOBAL) ? GLOBAL_BIND : 0); 3310 + 3288 3311 i915_gem_verify_gtt(dev); 3289 - return 0; 3312 + return vma; 3290 3313 3291 3314 err_remove_node: 3292 3315 drm_mm_remove_node(&vma->node); 3293 3316 err_free_vma: 3294 3317 i915_gem_vma_destroy(vma); 3318 + vma = ERR_PTR(ret); 3295 3319 err_unpin: 3296 3320 i915_gem_object_unpin_pages(obj); 3297 - return ret; 3321 + return vma; 3298 3322 } 3299 3323 3300 3324 bool ··· 3490 3506 } 3491 3507 3492 3508 list_for_each_entry(vma, &obj->vma_list, vma_link) 3493 - vma->bind_vma(vma, cache_level, 0); 3509 + if (drm_mm_node_allocated(&vma->node)) 3510 + vma->bind_vma(vma, cache_level, 3511 + obj->has_global_gtt_mapping ? GLOBAL_BIND : 0); 3494 3512 } 3495 3513 3496 3514 list_for_each_entry(vma, &obj->vma_list, vma_link) ··· 3661 3675 * (e.g. libkms for the bootup splash), we have to ensure that we 3662 3676 * always use map_and_fenceable for all scanout buffers. 3663 3677 */ 3664 - ret = i915_gem_obj_ggtt_pin(obj, alignment, true, false); 3678 + ret = i915_gem_obj_ggtt_pin(obj, alignment, PIN_MAPPABLE); 3665 3679 if (ret) 3666 3680 goto err_unpin_display; 3667 3681 ··· 3817 3831 i915_gem_object_pin(struct drm_i915_gem_object *obj, 3818 3832 struct i915_address_space *vm, 3819 3833 uint32_t alignment, 3820 - bool map_and_fenceable, 3821 - bool nonblocking) 3834 + unsigned flags) 3822 3835 { 3823 - const u32 flags = map_and_fenceable ? GLOBAL_BIND : 0; 3824 3836 struct i915_vma *vma; 3825 3837 int ret; 3826 3838 3827 - WARN_ON(map_and_fenceable && !i915_is_ggtt(vm)); 3839 + if (WARN_ON(flags & (PIN_GLOBAL | PIN_MAPPABLE) && !i915_is_ggtt(vm))) 3840 + return -EINVAL; 3828 3841 3829 3842 vma = i915_gem_obj_to_vma(obj, vm); 3830 - 3831 3843 if (vma) { 3832 3844 if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT)) 3833 3845 return -EBUSY; 3834 3846 3835 3847 if ((alignment && 3836 3848 vma->node.start & (alignment - 1)) || 3837 - (map_and_fenceable && !obj->map_and_fenceable)) { 3849 + (flags & PIN_MAPPABLE && !obj->map_and_fenceable)) { 3838 3850 WARN(vma->pin_count, 3839 3851 "bo is already pinned with incorrect alignment:" 3840 3852 " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d," 3841 3853 " obj->map_and_fenceable=%d\n", 3842 3854 i915_gem_obj_offset(obj, vm), alignment, 3843 - map_and_fenceable, 3855 + flags & PIN_MAPPABLE, 3844 3856 obj->map_and_fenceable); 3845 3857 ret = i915_vma_unbind(vma); 3846 3858 if (ret) 3847 3859 return ret; 3860 + 3861 + vma = NULL; 3848 3862 } 3849 3863 } 3850 3864 3851 - if (!i915_gem_obj_bound(obj, vm)) { 3852 - ret = i915_gem_object_bind_to_vm(obj, vm, alignment, 3853 - map_and_fenceable, 3854 - nonblocking); 3855 - if (ret) 3856 - return ret; 3857 - 3865 + if (vma == NULL || !drm_mm_node_allocated(&vma->node)) { 3866 + vma = i915_gem_object_bind_to_vm(obj, vm, alignment, flags); 3867 + if (IS_ERR(vma)) 3868 + return PTR_ERR(vma); 3858 3869 } 3859 3870 3860 - vma = i915_gem_obj_to_vma(obj, vm); 3871 + if (flags & PIN_GLOBAL && !obj->has_global_gtt_mapping) 3872 + vma->bind_vma(vma, obj->cache_level, GLOBAL_BIND); 3861 3873 3862 - vma->bind_vma(vma, obj->cache_level, flags); 3863 - 3864 - i915_gem_obj_to_vma(obj, vm)->pin_count++; 3865 - obj->pin_mappable |= map_and_fenceable; 3874 + vma->pin_count++; 3875 + if (flags & PIN_MAPPABLE) 3876 + obj->pin_mappable |= true; 3866 3877 3867 3878 return 0; 3868 3879 } ··· 3899 3916 } 3900 3917 3901 3918 if (obj->madv != I915_MADV_WILLNEED) { 3902 - DRM_ERROR("Attempting to pin a purgeable buffer\n"); 3919 + DRM_DEBUG("Attempting to pin a purgeable buffer\n"); 3903 3920 ret = -EFAULT; 3904 3921 goto out; 3905 3922 } 3906 3923 3907 3924 if (obj->pin_filp != NULL && obj->pin_filp != file) { 3908 - DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n", 3925 + DRM_DEBUG("Already pinned in i915_gem_pin_ioctl(): %d\n", 3909 3926 args->handle); 3910 3927 ret = -EINVAL; 3911 3928 goto out; ··· 3917 3934 } 3918 3935 3919 3936 if (obj->user_pin_count == 0) { 3920 - ret = i915_gem_obj_ggtt_pin(obj, args->alignment, true, false); 3937 + ret = i915_gem_obj_ggtt_pin(obj, args->alignment, PIN_MAPPABLE); 3921 3938 if (ret) 3922 3939 goto out; 3923 3940 } ··· 3952 3969 } 3953 3970 3954 3971 if (obj->pin_filp != file) { 3955 - DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n", 3972 + DRM_DEBUG("Not pinned by caller in i915_gem_pin_ioctl(): %d\n", 3956 3973 args->handle); 3957 3974 ret = -EINVAL; 3958 3975 goto out;
+3 -6
drivers/gpu/drm/i915/i915_gem_context.c
··· 258 258 * context. 259 259 */ 260 260 ret = i915_gem_obj_ggtt_pin(ctx->obj, 261 - get_context_alignment(dev), 262 - false, false); 261 + get_context_alignment(dev), 0); 263 262 if (ret) { 264 263 DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret); 265 264 goto err_destroy; ··· 334 335 335 336 if (i == RCS) { 336 337 WARN_ON(i915_gem_obj_ggtt_pin(dctx->obj, 337 - get_context_alignment(dev), 338 - false, false)); 338 + get_context_alignment(dev), 0)); 339 339 /* Fake a finish/inactive */ 340 340 dctx->obj->base.write_domain = 0; 341 341 dctx->obj->active = 0; ··· 610 612 /* Trying to pin first makes error handling easier. */ 611 613 if (ring == &dev_priv->ring[RCS]) { 612 614 ret = i915_gem_obj_ggtt_pin(to->obj, 613 - get_context_alignment(ring->dev), 614 - false, false); 615 + get_context_alignment(ring->dev), 0); 615 616 if (ret) 616 617 return ret; 617 618 }
+5 -5
drivers/gpu/drm/i915/i915_gem_evict.c
··· 68 68 int 69 69 i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm, 70 70 int min_size, unsigned alignment, unsigned cache_level, 71 - bool mappable, bool nonblocking) 71 + unsigned flags) 72 72 { 73 73 drm_i915_private_t *dev_priv = dev->dev_private; 74 74 struct list_head eviction_list, unwind_list; ··· 76 76 int ret = 0; 77 77 int pass = 0; 78 78 79 - trace_i915_gem_evict(dev, min_size, alignment, mappable); 79 + trace_i915_gem_evict(dev, min_size, alignment, flags); 80 80 81 81 /* 82 82 * The goal is to evict objects and amalgamate space in LRU order. ··· 102 102 */ 103 103 104 104 INIT_LIST_HEAD(&unwind_list); 105 - if (mappable) { 105 + if (flags & PIN_MAPPABLE) { 106 106 BUG_ON(!i915_is_ggtt(vm)); 107 107 drm_mm_init_scan_with_range(&vm->mm, min_size, 108 108 alignment, cache_level, 0, ··· 117 117 goto found; 118 118 } 119 119 120 - if (nonblocking) 120 + if (flags & PIN_NONBLOCK) 121 121 goto none; 122 122 123 123 /* Now merge in the soon-to-be-expired objects... */ ··· 141 141 /* Can we unpin some objects such as idle hw contents, 142 142 * or pending flips? 143 143 */ 144 - if (nonblocking) 144 + if (flags & PIN_NONBLOCK) 145 145 return -ENOSPC; 146 146 147 147 /* Only idle the GPU and repeat the search once */
+10 -8
drivers/gpu/drm/i915/i915_gem_execbuffer.c
··· 544 544 struct drm_i915_gem_object *obj = vma->obj; 545 545 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; 546 546 bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; 547 - bool need_fence, need_mappable; 548 - u32 flags = (entry->flags & EXEC_OBJECT_NEEDS_GTT) && 549 - !vma->obj->has_global_gtt_mapping ? GLOBAL_BIND : 0; 547 + bool need_fence; 548 + unsigned flags; 550 549 int ret; 550 + 551 + flags = 0; 551 552 552 553 need_fence = 553 554 has_fenced_gpu_access && 554 555 entry->flags & EXEC_OBJECT_NEEDS_FENCE && 555 556 obj->tiling_mode != I915_TILING_NONE; 556 - need_mappable = need_fence || need_reloc_mappable(vma); 557 + if (need_fence || need_reloc_mappable(vma)) 558 + flags |= PIN_MAPPABLE; 557 559 558 - ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, need_mappable, 559 - false); 560 + if (entry->flags & EXEC_OBJECT_NEEDS_GTT) 561 + flags |= PIN_GLOBAL; 562 + 563 + ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, flags); 560 564 if (ret) 561 565 return ret; 562 566 ··· 588 584 obj->base.pending_read_domains = I915_GEM_DOMAIN_RENDER; 589 585 obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER; 590 586 } 591 - 592 - vma->bind_vma(vma, obj->cache_level, flags); 593 587 594 588 return 0; 595 589 }
+40 -23
drivers/gpu/drm/i915/i915_gem_gtt.c
··· 319 319 kunmap_atomic(pt_vaddr); 320 320 } 321 321 322 + static void gen8_ppgtt_free(struct i915_hw_ppgtt *ppgtt) 323 + { 324 + int i; 325 + 326 + for (i = 0; i < ppgtt->num_pd_pages ; i++) 327 + kfree(ppgtt->gen8_pt_dma_addr[i]); 328 + 329 + __free_pages(ppgtt->gen8_pt_pages, get_order(ppgtt->num_pt_pages << PAGE_SHIFT)); 330 + __free_pages(ppgtt->pd_pages, get_order(ppgtt->num_pd_pages << PAGE_SHIFT)); 331 + } 332 + 333 + static void gen8_ppgtt_unmap_pages(struct i915_hw_ppgtt *ppgtt) 334 + { 335 + int i, j; 336 + 337 + for (i = 0; i < ppgtt->num_pd_pages; i++) { 338 + /* TODO: In the future we'll support sparse mappings, so this 339 + * will have to change. */ 340 + if (!ppgtt->pd_dma_addr[i]) 341 + continue; 342 + 343 + pci_unmap_page(ppgtt->base.dev->pdev, 344 + ppgtt->pd_dma_addr[i], 345 + PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 346 + 347 + for (j = 0; j < GEN8_PDES_PER_PAGE; j++) { 348 + dma_addr_t addr = ppgtt->gen8_pt_dma_addr[i][j]; 349 + if (addr) 350 + pci_unmap_page(ppgtt->base.dev->pdev, 351 + addr, 352 + PAGE_SIZE, 353 + PCI_DMA_BIDIRECTIONAL); 354 + 355 + } 356 + } 357 + } 358 + 322 359 static void gen8_ppgtt_cleanup(struct i915_address_space *vm) 323 360 { 324 361 struct i915_hw_ppgtt *ppgtt = 325 362 container_of(vm, struct i915_hw_ppgtt, base); 326 - int i, j; 327 363 328 364 list_del(&vm->global_link); 329 365 drm_mm_takedown(&vm->mm); 330 366 331 - for (i = 0; i < ppgtt->num_pd_pages ; i++) { 332 - if (ppgtt->pd_dma_addr[i]) { 333 - pci_unmap_page(ppgtt->base.dev->pdev, 334 - ppgtt->pd_dma_addr[i], 335 - PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 336 - 337 - for (j = 0; j < GEN8_PDES_PER_PAGE; j++) { 338 - dma_addr_t addr = ppgtt->gen8_pt_dma_addr[i][j]; 339 - if (addr) 340 - pci_unmap_page(ppgtt->base.dev->pdev, 341 - addr, 342 - PAGE_SIZE, 343 - PCI_DMA_BIDIRECTIONAL); 344 - 345 - } 346 - } 347 - kfree(ppgtt->gen8_pt_dma_addr[i]); 348 - } 349 - 350 - __free_pages(ppgtt->gen8_pt_pages, get_order(ppgtt->num_pt_pages << PAGE_SHIFT)); 351 - __free_pages(ppgtt->pd_pages, get_order(ppgtt->num_pd_pages << PAGE_SHIFT)); 367 + gen8_ppgtt_unmap_pages(ppgtt); 368 + gen8_ppgtt_free(ppgtt); 352 369 } 353 370 354 371 /** ··· 885 868 if (ret == -ENOSPC && !retried) { 886 869 ret = i915_gem_evict_something(dev, &dev_priv->gtt.base, 887 870 GEN6_PD_SIZE, GEN6_PD_ALIGN, 888 - I915_CACHE_NONE, false, true); 871 + I915_CACHE_NONE, 0); 889 872 if (ret) 890 873 return ret; 891 874
+136 -32
drivers/gpu/drm/i915/i915_irq.c
··· 419 419 return ret; 420 420 } 421 421 422 + static bool __cpu_fifo_underrun_reporting_enabled(struct drm_device *dev, 423 + enum pipe pipe) 424 + { 425 + struct drm_i915_private *dev_priv = dev->dev_private; 426 + struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 427 + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 428 + 429 + return !intel_crtc->cpu_fifo_underrun_disabled; 430 + } 431 + 422 432 /** 423 433 * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages 424 434 * @dev: drm device ··· 483 473 484 474 485 475 void 486 - i915_enable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask) 476 + __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 477 + u32 enable_mask, u32 status_mask) 487 478 { 488 479 u32 reg = PIPESTAT(pipe); 489 - u32 pipestat = I915_READ(reg) & 0x7fff0000; 480 + u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; 490 481 491 482 assert_spin_locked(&dev_priv->irq_lock); 492 483 493 - if ((pipestat & mask) == mask) 484 + if (WARN_ON_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 485 + status_mask & ~PIPESTAT_INT_STATUS_MASK)) 494 486 return; 495 487 488 + if ((pipestat & enable_mask) == enable_mask) 489 + return; 490 + 491 + dev_priv->pipestat_irq_mask[pipe] |= status_mask; 492 + 496 493 /* Enable the interrupt, clear any pending status */ 497 - pipestat |= mask | (mask >> 16); 494 + pipestat |= enable_mask | status_mask; 498 495 I915_WRITE(reg, pipestat); 499 496 POSTING_READ(reg); 500 497 } 501 498 502 499 void 503 - i915_disable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask) 500 + __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 501 + u32 enable_mask, u32 status_mask) 504 502 { 505 503 u32 reg = PIPESTAT(pipe); 506 - u32 pipestat = I915_READ(reg) & 0x7fff0000; 504 + u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; 507 505 508 506 assert_spin_locked(&dev_priv->irq_lock); 509 507 510 - if ((pipestat & mask) == 0) 508 + if (WARN_ON_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 509 + status_mask & ~PIPESTAT_INT_STATUS_MASK)) 511 510 return; 512 511 513 - pipestat &= ~mask; 512 + if ((pipestat & enable_mask) == 0) 513 + return; 514 + 515 + dev_priv->pipestat_irq_mask[pipe] &= ~status_mask; 516 + 517 + pipestat &= ~enable_mask; 514 518 I915_WRITE(reg, pipestat); 515 519 POSTING_READ(reg); 520 + } 521 + 522 + static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask) 523 + { 524 + u32 enable_mask = status_mask << 16; 525 + 526 + /* 527 + * On pipe A we don't support the PSR interrupt yet, on pipe B the 528 + * same bit MBZ. 529 + */ 530 + if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV)) 531 + return 0; 532 + 533 + enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS | 534 + SPRITE0_FLIP_DONE_INT_EN_VLV | 535 + SPRITE1_FLIP_DONE_INT_EN_VLV); 536 + if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV) 537 + enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV; 538 + if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV) 539 + enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV; 540 + 541 + return enable_mask; 542 + } 543 + 544 + void 545 + i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 546 + u32 status_mask) 547 + { 548 + u32 enable_mask; 549 + 550 + if (IS_VALLEYVIEW(dev_priv->dev)) 551 + enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev, 552 + status_mask); 553 + else 554 + enable_mask = status_mask << 16; 555 + __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask); 556 + } 557 + 558 + void 559 + i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 560 + u32 status_mask) 561 + { 562 + u32 enable_mask; 563 + 564 + if (IS_VALLEYVIEW(dev_priv->dev)) 565 + enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev, 566 + status_mask); 567 + else 568 + enable_mask = status_mask << 16; 569 + __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask); 516 570 } 517 571 518 572 /** ··· 592 518 593 519 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 594 520 595 - i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_ENABLE); 521 + i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS); 596 522 if (INTEL_INFO(dev)->gen >= 4) 597 523 i915_enable_pipestat(dev_priv, PIPE_A, 598 - PIPE_LEGACY_BLC_EVENT_ENABLE); 524 + PIPE_LEGACY_BLC_EVENT_STATUS); 599 525 600 526 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 601 527 } ··· 1553 1479 static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir) 1554 1480 { 1555 1481 struct drm_i915_private *dev_priv = dev->dev_private; 1556 - u32 pipe_stats[I915_MAX_PIPES]; 1482 + u32 pipe_stats[I915_MAX_PIPES] = { }; 1557 1483 int pipe; 1558 1484 1559 1485 spin_lock(&dev_priv->irq_lock); 1560 1486 for_each_pipe(pipe) { 1561 - int reg = PIPESTAT(pipe); 1562 - pipe_stats[pipe] = I915_READ(reg); 1487 + int reg; 1488 + u32 mask, iir_bit = 0; 1489 + 1490 + /* 1491 + * PIPESTAT bits get signalled even when the interrupt is 1492 + * disabled with the mask bits, and some of the status bits do 1493 + * not generate interrupts at all (like the underrun bit). Hence 1494 + * we need to be careful that we only handle what we want to 1495 + * handle. 1496 + */ 1497 + mask = 0; 1498 + if (__cpu_fifo_underrun_reporting_enabled(dev, pipe)) 1499 + mask |= PIPE_FIFO_UNDERRUN_STATUS; 1500 + 1501 + switch (pipe) { 1502 + case PIPE_A: 1503 + iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT; 1504 + break; 1505 + case PIPE_B: 1506 + iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 1507 + break; 1508 + } 1509 + if (iir & iir_bit) 1510 + mask |= dev_priv->pipestat_irq_mask[pipe]; 1511 + 1512 + if (!mask) 1513 + continue; 1514 + 1515 + reg = PIPESTAT(pipe); 1516 + mask |= PIPESTAT_INT_ENABLE_MASK; 1517 + pipe_stats[pipe] = I915_READ(reg) & mask; 1563 1518 1564 1519 /* 1565 1520 * Clear the PIPE*STAT regs before the IIR 1566 1521 */ 1567 - if (pipe_stats[pipe] & 0x8000ffff) 1522 + if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS | 1523 + PIPESTAT_INT_STATUS_MASK)) 1568 1524 I915_WRITE(reg, pipe_stats[pipe]); 1569 1525 } 1570 1526 spin_unlock(&dev_priv->irq_lock); ··· 2373 2269 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2374 2270 if (INTEL_INFO(dev)->gen >= 4) 2375 2271 i915_enable_pipestat(dev_priv, pipe, 2376 - PIPE_START_VBLANK_INTERRUPT_ENABLE); 2272 + PIPE_START_VBLANK_INTERRUPT_STATUS); 2377 2273 else 2378 2274 i915_enable_pipestat(dev_priv, pipe, 2379 - PIPE_VBLANK_INTERRUPT_ENABLE); 2275 + PIPE_VBLANK_INTERRUPT_STATUS); 2380 2276 2381 2277 /* maintain vblank delivery even in deep C-states */ 2382 - if (dev_priv->info->gen == 3) 2278 + if (INTEL_INFO(dev)->gen == 3) 2383 2279 I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS)); 2384 2280 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2385 2281 ··· 2413 2309 2414 2310 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2415 2311 i915_enable_pipestat(dev_priv, pipe, 2416 - PIPE_START_VBLANK_INTERRUPT_ENABLE); 2312 + PIPE_START_VBLANK_INTERRUPT_STATUS); 2417 2313 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2418 2314 2419 2315 return 0; ··· 2444 2340 unsigned long irqflags; 2445 2341 2446 2342 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2447 - if (dev_priv->info->gen == 3) 2343 + if (INTEL_INFO(dev)->gen == 3) 2448 2344 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS)); 2449 2345 2450 2346 i915_disable_pipestat(dev_priv, pipe, 2451 - PIPE_VBLANK_INTERRUPT_ENABLE | 2452 - PIPE_START_VBLANK_INTERRUPT_ENABLE); 2347 + PIPE_VBLANK_INTERRUPT_STATUS | 2348 + PIPE_START_VBLANK_INTERRUPT_STATUS); 2453 2349 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2454 2350 } 2455 2351 ··· 2472 2368 2473 2369 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2474 2370 i915_disable_pipestat(dev_priv, pipe, 2475 - PIPE_START_VBLANK_INTERRUPT_ENABLE); 2371 + PIPE_START_VBLANK_INTERRUPT_STATUS); 2476 2372 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2477 2373 } 2478 2374 ··· 3020 2916 { 3021 2917 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3022 2918 u32 enable_mask; 3023 - u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV | 3024 - PIPE_CRC_DONE_ENABLE; 2919 + u32 pipestat_enable = PLANE_FLIP_DONE_INT_STATUS_VLV | 2920 + PIPE_CRC_DONE_INTERRUPT_STATUS; 3025 2921 unsigned long irqflags; 3026 2922 3027 2923 enable_mask = I915_DISPLAY_PORT_INTERRUPT; ··· 3052 2948 * just to make the assert_spin_locked check happy. */ 3053 2949 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3054 2950 i915_enable_pipestat(dev_priv, PIPE_A, pipestat_enable); 3055 - i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_EVENT_ENABLE); 2951 + i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 3056 2952 i915_enable_pipestat(dev_priv, PIPE_B, pipestat_enable); 3057 2953 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3058 2954 ··· 3275 3171 /* Interrupt setup is already guaranteed to be single-threaded, this is 3276 3172 * just to make the assert_spin_locked check happy. */ 3277 3173 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3278 - i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE); 3279 - i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE); 3174 + i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 3175 + i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 3280 3176 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3281 3177 3282 3178 return 0; ··· 3458 3354 /* Interrupt setup is already guaranteed to be single-threaded, this is 3459 3355 * just to make the assert_spin_locked check happy. */ 3460 3356 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3461 - i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE); 3462 - i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE); 3357 + i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 3358 + i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 3463 3359 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3464 3360 3465 3361 return 0; ··· 3668 3564 /* Interrupt setup is already guaranteed to be single-threaded, this is 3669 3565 * just to make the assert_spin_locked check happy. */ 3670 3566 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3671 - i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_EVENT_ENABLE); 3672 - i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE); 3673 - i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE); 3567 + i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 3568 + i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 3569 + i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 3674 3570 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3675 3571 3676 3572 /*
+4
drivers/gpu/drm/i915/i915_params.c
··· 47 47 .prefault_disable = 0, 48 48 .reset = true, 49 49 .invert_brightness = 0, 50 + .disable_display = 0, 50 51 }; 51 52 52 53 module_param_named(modeset, i915.modeset, int, 0400); ··· 154 153 "report PCI device ID, subsystem vendor and subsystem device ID " 155 154 "to dri-devel@lists.freedesktop.org, if your machine needs it. " 156 155 "It will then be included in an upcoming module version."); 156 + 157 + module_param_named(disable_display, i915.disable_display, bool, 0600); 158 + MODULE_PARM_DESC(disable_display, "Disable display (default: false)");
+93 -88
drivers/gpu/drm/i915/i915_reg.h
··· 789 789 #define _3D_CHICKEN3 0x02090 790 790 #define _3D_CHICKEN_SF_DISABLE_OBJEND_CULL (1 << 10) 791 791 #define _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL (1 << 5) 792 - #define _3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(x) ((x)<<1) 792 + #define _3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(x) ((x)<<1) /* gen8+ */ 793 + #define _3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH (1 << 1) /* gen6 */ 793 794 794 795 #define MI_MODE 0x0209c 795 796 # define VS_TIMER_DISPATCH (1 << 6) ··· 1205 1204 */ 1206 1205 #define DPLL_A_OFFSET 0x6014 1207 1206 #define DPLL_B_OFFSET 0x6018 1208 - #define DPLL(pipe) (dev_priv->info->dpll_offsets[pipe] + \ 1209 - dev_priv->info->display_mmio_offset) 1207 + #define DPLL(pipe) (dev_priv->info.dpll_offsets[pipe] + \ 1208 + dev_priv->info.display_mmio_offset) 1210 1209 1211 1210 #define VGA0 0x6000 1212 1211 #define VGA1 0x6004 ··· 1283 1282 1284 1283 #define DPLL_A_MD_OFFSET 0x601c /* 965+ only */ 1285 1284 #define DPLL_B_MD_OFFSET 0x6020 /* 965+ only */ 1286 - #define DPLL_MD(pipe) (dev_priv->info->dpll_md_offsets[pipe] + \ 1287 - dev_priv->info->display_mmio_offset) 1285 + #define DPLL_MD(pipe) (dev_priv->info.dpll_md_offsets[pipe] + \ 1286 + dev_priv->info.display_mmio_offset) 1288 1287 1289 1288 /* 1290 1289 * UDI pixel divider, controlling how many pixels are stuffed into a packet. ··· 1353 1352 #define DSTATE_PLL_D3_OFF (1<<3) 1354 1353 #define DSTATE_GFX_CLOCK_GATING (1<<1) 1355 1354 #define DSTATE_DOT_CLOCK_GATING (1<<0) 1356 - #define DSPCLK_GATE_D (dev_priv->info->display_mmio_offset + 0x6200) 1355 + #define DSPCLK_GATE_D (dev_priv->info.display_mmio_offset + 0x6200) 1357 1356 # define DPUNIT_B_CLOCK_GATE_DISABLE (1 << 30) /* 965 */ 1358 1357 # define VSUNIT_CLOCK_GATE_DISABLE (1 << 29) /* 965 */ 1359 1358 # define VRHUNIT_CLOCK_GATE_DISABLE (1 << 28) /* 965 */ ··· 1479 1478 */ 1480 1479 #define PALETTE_A_OFFSET 0xa000 1481 1480 #define PALETTE_B_OFFSET 0xa800 1482 - #define PALETTE(pipe) (dev_priv->info->palette_offsets[pipe] + \ 1483 - dev_priv->info->display_mmio_offset) 1481 + #define PALETTE(pipe) (dev_priv->info.palette_offsets[pipe] + \ 1482 + dev_priv->info.display_mmio_offset) 1484 1483 1485 1484 /* MCH MMIO space */ 1486 1485 ··· 1970 1969 #define TRANSCODER_C_OFFSET 0x62000 1971 1970 #define TRANSCODER_EDP_OFFSET 0x6f000 1972 1971 1973 - #define _TRANSCODER2(pipe, reg) (dev_priv->info->trans_offsets[(pipe)] - \ 1974 - dev_priv->info->trans_offsets[TRANSCODER_A] + (reg) + \ 1975 - dev_priv->info->display_mmio_offset) 1972 + #define _TRANSCODER2(pipe, reg) (dev_priv->info.trans_offsets[(pipe)] - \ 1973 + dev_priv->info.trans_offsets[TRANSCODER_A] + (reg) + \ 1974 + dev_priv->info.display_mmio_offset) 1976 1975 1977 1976 #define HTOTAL(trans) _TRANSCODER2(trans, _HTOTAL_A) 1978 1977 #define HBLANK(trans) _TRANSCODER2(trans, _HBLANK_A) ··· 2099 2098 2100 2099 2101 2100 /* Hotplug control (945+ only) */ 2102 - #define PORT_HOTPLUG_EN (dev_priv->info->display_mmio_offset + 0x61110) 2101 + #define PORT_HOTPLUG_EN (dev_priv->info.display_mmio_offset + 0x61110) 2103 2102 #define PORTB_HOTPLUG_INT_EN (1 << 29) 2104 2103 #define PORTC_HOTPLUG_INT_EN (1 << 28) 2105 2104 #define PORTD_HOTPLUG_INT_EN (1 << 27) ··· 2129 2128 #define CRT_HOTPLUG_DETECT_VOLTAGE_325MV (0 << 2) 2130 2129 #define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2) 2131 2130 2132 - #define PORT_HOTPLUG_STAT (dev_priv->info->display_mmio_offset + 0x61114) 2131 + #define PORT_HOTPLUG_STAT (dev_priv->info.display_mmio_offset + 0x61114) 2133 2132 /* 2134 2133 * HDMI/DP bits are gen4+ 2135 2134 * ··· 2347 2346 #define VIDEO_DIP_CTL 0x61170 2348 2347 /* Pre HSW: */ 2349 2348 #define VIDEO_DIP_ENABLE (1 << 31) 2350 - #define VIDEO_DIP_PORT_B (1 << 29) 2351 - #define VIDEO_DIP_PORT_C (2 << 29) 2352 - #define VIDEO_DIP_PORT_D (3 << 29) 2349 + #define VIDEO_DIP_PORT(port) ((port) << 29) 2353 2350 #define VIDEO_DIP_PORT_MASK (3 << 29) 2354 2351 #define VIDEO_DIP_ENABLE_GCP (1 << 25) 2355 2352 #define VIDEO_DIP_ENABLE_AVI (1 << 21) ··· 2404 2405 #define PP_DIVISOR 0x61210 2405 2406 2406 2407 /* Panel fitting */ 2407 - #define PFIT_CONTROL (dev_priv->info->display_mmio_offset + 0x61230) 2408 + #define PFIT_CONTROL (dev_priv->info.display_mmio_offset + 0x61230) 2408 2409 #define PFIT_ENABLE (1 << 31) 2409 2410 #define PFIT_PIPE_MASK (3 << 29) 2410 2411 #define PFIT_PIPE_SHIFT 29 ··· 2422 2423 #define PFIT_SCALING_PROGRAMMED (1 << 26) 2423 2424 #define PFIT_SCALING_PILLAR (2 << 26) 2424 2425 #define PFIT_SCALING_LETTER (3 << 26) 2425 - #define PFIT_PGM_RATIOS (dev_priv->info->display_mmio_offset + 0x61234) 2426 + #define PFIT_PGM_RATIOS (dev_priv->info.display_mmio_offset + 0x61234) 2426 2427 /* Pre-965 */ 2427 2428 #define PFIT_VERT_SCALE_SHIFT 20 2428 2429 #define PFIT_VERT_SCALE_MASK 0xfff00000 ··· 2434 2435 #define PFIT_HORIZ_SCALE_SHIFT_965 0 2435 2436 #define PFIT_HORIZ_SCALE_MASK_965 0x00001fff 2436 2437 2437 - #define PFIT_AUTO_RATIOS (dev_priv->info->display_mmio_offset + 0x61238) 2438 + #define PFIT_AUTO_RATIOS (dev_priv->info.display_mmio_offset + 0x61238) 2438 2439 2439 - #define _VLV_BLC_PWM_CTL2_A (dev_priv->info->display_mmio_offset + 0x61250) 2440 - #define _VLV_BLC_PWM_CTL2_B (dev_priv->info->display_mmio_offset + 0x61350) 2440 + #define _VLV_BLC_PWM_CTL2_A (dev_priv->info.display_mmio_offset + 0x61250) 2441 + #define _VLV_BLC_PWM_CTL2_B (dev_priv->info.display_mmio_offset + 0x61350) 2441 2442 #define VLV_BLC_PWM_CTL2(pipe) _PIPE(pipe, _VLV_BLC_PWM_CTL2_A, \ 2442 2443 _VLV_BLC_PWM_CTL2_B) 2443 2444 2444 - #define _VLV_BLC_PWM_CTL_A (dev_priv->info->display_mmio_offset + 0x61254) 2445 - #define _VLV_BLC_PWM_CTL_B (dev_priv->info->display_mmio_offset + 0x61354) 2445 + #define _VLV_BLC_PWM_CTL_A (dev_priv->info.display_mmio_offset + 0x61254) 2446 + #define _VLV_BLC_PWM_CTL_B (dev_priv->info.display_mmio_offset + 0x61354) 2446 2447 #define VLV_BLC_PWM_CTL(pipe) _PIPE(pipe, _VLV_BLC_PWM_CTL_A, \ 2447 2448 _VLV_BLC_PWM_CTL_B) 2448 2449 2449 - #define _VLV_BLC_HIST_CTL_A (dev_priv->info->display_mmio_offset + 0x61260) 2450 - #define _VLV_BLC_HIST_CTL_B (dev_priv->info->display_mmio_offset + 0x61360) 2450 + #define _VLV_BLC_HIST_CTL_A (dev_priv->info.display_mmio_offset + 0x61260) 2451 + #define _VLV_BLC_HIST_CTL_B (dev_priv->info.display_mmio_offset + 0x61360) 2451 2452 #define VLV_BLC_HIST_CTL(pipe) _PIPE(pipe, _VLV_BLC_HIST_CTL_A, \ 2452 2453 _VLV_BLC_HIST_CTL_B) 2453 2454 2454 2455 /* Backlight control */ 2455 - #define BLC_PWM_CTL2 (dev_priv->info->display_mmio_offset + 0x61250) /* 965+ only */ 2456 + #define BLC_PWM_CTL2 (dev_priv->info.display_mmio_offset + 0x61250) /* 965+ only */ 2456 2457 #define BLM_PWM_ENABLE (1 << 31) 2457 2458 #define BLM_COMBINATION_MODE (1 << 30) /* gen4 only */ 2458 2459 #define BLM_PIPE_SELECT (1 << 29) ··· 2475 2476 #define BLM_PHASE_IN_COUNT_MASK (0xff << 8) 2476 2477 #define BLM_PHASE_IN_INCR_SHIFT (0) 2477 2478 #define BLM_PHASE_IN_INCR_MASK (0xff << 0) 2478 - #define BLC_PWM_CTL (dev_priv->info->display_mmio_offset + 0x61254) 2479 + #define BLC_PWM_CTL (dev_priv->info.display_mmio_offset + 0x61254) 2479 2480 /* 2480 2481 * This is the most significant 15 bits of the number of backlight cycles in a 2481 2482 * complete cycle of the modulated backlight control. ··· 2497 2498 #define BACKLIGHT_DUTY_CYCLE_MASK_PNV (0xfffe) 2498 2499 #define BLM_POLARITY_PNV (1 << 0) /* pnv only */ 2499 2500 2500 - #define BLC_HIST_CTL (dev_priv->info->display_mmio_offset + 0x61260) 2501 + #define BLC_HIST_CTL (dev_priv->info.display_mmio_offset + 0x61260) 2501 2502 2502 2503 /* New registers for PCH-split platforms. Safe where new bits show up, the 2503 2504 * register layout machtes with gen4 BLC_PWM_CTL[12]. */ ··· 3252 3253 #define PIPE_LEGACY_BLC_EVENT_ENABLE (1UL<<22) 3253 3254 #define PIPE_ODD_FIELD_INTERRUPT_ENABLE (1UL<<21) 3254 3255 #define PIPE_EVEN_FIELD_INTERRUPT_ENABLE (1UL<<20) 3256 + #define PIPE_B_PSR_INTERRUPT_ENABLE_VLV (1UL<<19) 3255 3257 #define PIPE_HOTPLUG_TV_INTERRUPT_ENABLE (1UL<<18) /* pre-965 */ 3256 3258 #define PIPE_START_VBLANK_INTERRUPT_ENABLE (1UL<<18) /* 965 or later */ 3257 3259 #define PIPE_VBLANK_INTERRUPT_ENABLE (1UL<<17) ··· 3269 3269 #define PIPE_DISPLAY_LINE_COMPARE_STATUS (1UL<<8) 3270 3270 #define PIPE_DPST_EVENT_STATUS (1UL<<7) 3271 3271 #define PIPE_LEGACY_BLC_EVENT_STATUS (1UL<<6) 3272 + #define PIPE_A_PSR_STATUS_VLV (1UL<<6) 3272 3273 #define PIPE_ODD_FIELD_INTERRUPT_STATUS (1UL<<5) 3273 3274 #define PIPE_EVEN_FIELD_INTERRUPT_STATUS (1UL<<4) 3275 + #define PIPE_B_PSR_STATUS_VLV (1UL<<3) 3274 3276 #define PIPE_HOTPLUG_TV_INTERRUPT_STATUS (1UL<<2) /* pre-965 */ 3275 3277 #define PIPE_START_VBLANK_INTERRUPT_STATUS (1UL<<2) /* 965 or later */ 3276 3278 #define PIPE_VBLANK_INTERRUPT_STATUS (1UL<<1) 3277 3279 #define PIPE_OVERLAY_UPDATED_STATUS (1UL<<0) 3280 + 3281 + #define PIPESTAT_INT_ENABLE_MASK 0x7fff0000 3282 + #define PIPESTAT_INT_STATUS_MASK 0x0000ffff 3278 3283 3279 3284 #define PIPE_A_OFFSET 0x70000 3280 3285 #define PIPE_B_OFFSET 0x71000 ··· 3292 3287 */ 3293 3288 #define PIPE_EDP_OFFSET 0x7f000 3294 3289 3295 - #define _PIPE2(pipe, reg) (dev_priv->info->pipe_offsets[pipe] - \ 3296 - dev_priv->info->pipe_offsets[PIPE_A] + (reg) + \ 3297 - dev_priv->info->display_mmio_offset) 3290 + #define _PIPE2(pipe, reg) (dev_priv->info.pipe_offsets[pipe] - \ 3291 + dev_priv->info.pipe_offsets[PIPE_A] + (reg) + \ 3292 + dev_priv->info.display_mmio_offset) 3298 3293 3299 3294 #define PIPECONF(pipe) _PIPE2(pipe, _PIPEACONF) 3300 3295 #define PIPEDSL(pipe) _PIPE2(pipe, _PIPEADSL) ··· 3356 3351 #define DSPARB_BEND_SHIFT 9 /* on 855 */ 3357 3352 #define DSPARB_AEND_SHIFT 0 3358 3353 3359 - #define DSPFW1 (dev_priv->info->display_mmio_offset + 0x70034) 3354 + #define DSPFW1 (dev_priv->info.display_mmio_offset + 0x70034) 3360 3355 #define DSPFW_SR_SHIFT 23 3361 3356 #define DSPFW_SR_MASK (0x1ff<<23) 3362 3357 #define DSPFW_CURSORB_SHIFT 16 ··· 3364 3359 #define DSPFW_PLANEB_SHIFT 8 3365 3360 #define DSPFW_PLANEB_MASK (0x7f<<8) 3366 3361 #define DSPFW_PLANEA_MASK (0x7f) 3367 - #define DSPFW2 (dev_priv->info->display_mmio_offset + 0x70038) 3362 + #define DSPFW2 (dev_priv->info.display_mmio_offset + 0x70038) 3368 3363 #define DSPFW_CURSORA_MASK 0x00003f00 3369 3364 #define DSPFW_CURSORA_SHIFT 8 3370 3365 #define DSPFW_PLANEC_MASK (0x7f) 3371 - #define DSPFW3 (dev_priv->info->display_mmio_offset + 0x7003c) 3366 + #define DSPFW3 (dev_priv->info.display_mmio_offset + 0x7003c) 3372 3367 #define DSPFW_HPLL_SR_EN (1<<31) 3373 3368 #define DSPFW_CURSOR_SR_SHIFT 24 3374 3369 #define PINEVIEW_SELF_REFRESH_EN (1<<30) ··· 3376 3371 #define DSPFW_HPLL_CURSOR_SHIFT 16 3377 3372 #define DSPFW_HPLL_CURSOR_MASK (0x3f<<16) 3378 3373 #define DSPFW_HPLL_SR_MASK (0x1ff) 3379 - #define DSPFW4 (dev_priv->info->display_mmio_offset + 0x70070) 3380 - #define DSPFW7 (dev_priv->info->display_mmio_offset + 0x7007c) 3374 + #define DSPFW4 (dev_priv->info.display_mmio_offset + 0x70070) 3375 + #define DSPFW7 (dev_priv->info.display_mmio_offset + 0x7007c) 3381 3376 3382 3377 /* drain latency register values*/ 3383 3378 #define DRAIN_LATENCY_PRECISION_32 32 ··· 3501 3496 #define PIPE_PIXEL_MASK 0x00ffffff 3502 3497 #define PIPE_PIXEL_SHIFT 0 3503 3498 /* GM45+ just has to be different */ 3504 - #define _PIPEA_FRMCOUNT_GM45 (dev_priv->info->display_mmio_offset + 0x70040) 3505 - #define _PIPEA_FLIPCOUNT_GM45 (dev_priv->info->display_mmio_offset + 0x70044) 3499 + #define _PIPEA_FRMCOUNT_GM45 (dev_priv->info.display_mmio_offset + 0x70040) 3500 + #define _PIPEA_FLIPCOUNT_GM45 (dev_priv->info.display_mmio_offset + 0x70044) 3506 3501 #define PIPE_FRMCOUNT_GM45(pipe) _PIPE(pipe, _PIPEA_FRMCOUNT_GM45, _PIPEB_FRMCOUNT_GM45) 3507 3502 3508 3503 /* Cursor A & B regs */ 3509 - #define _CURACNTR (dev_priv->info->display_mmio_offset + 0x70080) 3504 + #define _CURACNTR (dev_priv->info.display_mmio_offset + 0x70080) 3510 3505 /* Old style CUR*CNTR flags (desktop 8xx) */ 3511 3506 #define CURSOR_ENABLE 0x80000000 3512 3507 #define CURSOR_GAMMA_ENABLE 0x40000000 ··· 3529 3524 #define MCURSOR_PIPE_B (1 << 28) 3530 3525 #define MCURSOR_GAMMA_ENABLE (1 << 26) 3531 3526 #define CURSOR_TRICKLE_FEED_DISABLE (1 << 14) 3532 - #define _CURABASE (dev_priv->info->display_mmio_offset + 0x70084) 3533 - #define _CURAPOS (dev_priv->info->display_mmio_offset + 0x70088) 3527 + #define _CURABASE (dev_priv->info.display_mmio_offset + 0x70084) 3528 + #define _CURAPOS (dev_priv->info.display_mmio_offset + 0x70088) 3534 3529 #define CURSOR_POS_MASK 0x007FF 3535 3530 #define CURSOR_POS_SIGN 0x8000 3536 3531 #define CURSOR_X_SHIFT 0 3537 3532 #define CURSOR_Y_SHIFT 16 3538 3533 #define CURSIZE 0x700a0 3539 - #define _CURBCNTR (dev_priv->info->display_mmio_offset + 0x700c0) 3540 - #define _CURBBASE (dev_priv->info->display_mmio_offset + 0x700c4) 3541 - #define _CURBPOS (dev_priv->info->display_mmio_offset + 0x700c8) 3534 + #define _CURBCNTR (dev_priv->info.display_mmio_offset + 0x700c0) 3535 + #define _CURBBASE (dev_priv->info.display_mmio_offset + 0x700c4) 3536 + #define _CURBPOS (dev_priv->info.display_mmio_offset + 0x700c8) 3542 3537 3543 3538 #define _CURBCNTR_IVB 0x71080 3544 3539 #define _CURBBASE_IVB 0x71084 ··· 3613 3608 #define I915_HI_DISPBASE(val) (val & DISP_BASEADDR_MASK) 3614 3609 3615 3610 /* VBIOS flags */ 3616 - #define SWF00 (dev_priv->info->display_mmio_offset + 0x71410) 3617 - #define SWF01 (dev_priv->info->display_mmio_offset + 0x71414) 3618 - #define SWF02 (dev_priv->info->display_mmio_offset + 0x71418) 3619 - #define SWF03 (dev_priv->info->display_mmio_offset + 0x7141c) 3620 - #define SWF04 (dev_priv->info->display_mmio_offset + 0x71420) 3621 - #define SWF05 (dev_priv->info->display_mmio_offset + 0x71424) 3622 - #define SWF06 (dev_priv->info->display_mmio_offset + 0x71428) 3623 - #define SWF10 (dev_priv->info->display_mmio_offset + 0x70410) 3624 - #define SWF11 (dev_priv->info->display_mmio_offset + 0x70414) 3625 - #define SWF14 (dev_priv->info->display_mmio_offset + 0x71420) 3626 - #define SWF30 (dev_priv->info->display_mmio_offset + 0x72414) 3627 - #define SWF31 (dev_priv->info->display_mmio_offset + 0x72418) 3628 - #define SWF32 (dev_priv->info->display_mmio_offset + 0x7241c) 3611 + #define SWF00 (dev_priv->info.display_mmio_offset + 0x71410) 3612 + #define SWF01 (dev_priv->info.display_mmio_offset + 0x71414) 3613 + #define SWF02 (dev_priv->info.display_mmio_offset + 0x71418) 3614 + #define SWF03 (dev_priv->info.display_mmio_offset + 0x7141c) 3615 + #define SWF04 (dev_priv->info.display_mmio_offset + 0x71420) 3616 + #define SWF05 (dev_priv->info.display_mmio_offset + 0x71424) 3617 + #define SWF06 (dev_priv->info.display_mmio_offset + 0x71428) 3618 + #define SWF10 (dev_priv->info.display_mmio_offset + 0x70410) 3619 + #define SWF11 (dev_priv->info.display_mmio_offset + 0x70414) 3620 + #define SWF14 (dev_priv->info.display_mmio_offset + 0x71420) 3621 + #define SWF30 (dev_priv->info.display_mmio_offset + 0x72414) 3622 + #define SWF31 (dev_priv->info.display_mmio_offset + 0x72418) 3623 + #define SWF32 (dev_priv->info.display_mmio_offset + 0x7241c) 3629 3624 3630 3625 /* Pipe B */ 3631 - #define _PIPEBDSL (dev_priv->info->display_mmio_offset + 0x71000) 3632 - #define _PIPEBCONF (dev_priv->info->display_mmio_offset + 0x71008) 3633 - #define _PIPEBSTAT (dev_priv->info->display_mmio_offset + 0x71024) 3626 + #define _PIPEBDSL (dev_priv->info.display_mmio_offset + 0x71000) 3627 + #define _PIPEBCONF (dev_priv->info.display_mmio_offset + 0x71008) 3628 + #define _PIPEBSTAT (dev_priv->info.display_mmio_offset + 0x71024) 3634 3629 #define _PIPEBFRAMEHIGH 0x71040 3635 3630 #define _PIPEBFRAMEPIXEL 0x71044 3636 - #define _PIPEB_FRMCOUNT_GM45 (dev_priv->info->display_mmio_offset + 0x71040) 3637 - #define _PIPEB_FLIPCOUNT_GM45 (dev_priv->info->display_mmio_offset + 0x71044) 3631 + #define _PIPEB_FRMCOUNT_GM45 (dev_priv->info.display_mmio_offset + 0x71040) 3632 + #define _PIPEB_FLIPCOUNT_GM45 (dev_priv->info.display_mmio_offset + 0x71044) 3638 3633 3639 3634 3640 3635 /* Display B control */ 3641 - #define _DSPBCNTR (dev_priv->info->display_mmio_offset + 0x71180) 3636 + #define _DSPBCNTR (dev_priv->info.display_mmio_offset + 0x71180) 3642 3637 #define DISPPLANE_ALPHA_TRANS_ENABLE (1<<15) 3643 3638 #define DISPPLANE_ALPHA_TRANS_DISABLE 0 3644 3639 #define DISPPLANE_SPRITE_ABOVE_DISPLAY 0 3645 3640 #define DISPPLANE_SPRITE_ABOVE_OVERLAY (1) 3646 - #define _DSPBADDR (dev_priv->info->display_mmio_offset + 0x71184) 3647 - #define _DSPBSTRIDE (dev_priv->info->display_mmio_offset + 0x71188) 3648 - #define _DSPBPOS (dev_priv->info->display_mmio_offset + 0x7118C) 3649 - #define _DSPBSIZE (dev_priv->info->display_mmio_offset + 0x71190) 3650 - #define _DSPBSURF (dev_priv->info->display_mmio_offset + 0x7119C) 3651 - #define _DSPBTILEOFF (dev_priv->info->display_mmio_offset + 0x711A4) 3652 - #define _DSPBOFFSET (dev_priv->info->display_mmio_offset + 0x711A4) 3653 - #define _DSPBSURFLIVE (dev_priv->info->display_mmio_offset + 0x711AC) 3641 + #define _DSPBADDR (dev_priv->info.display_mmio_offset + 0x71184) 3642 + #define _DSPBSTRIDE (dev_priv->info.display_mmio_offset + 0x71188) 3643 + #define _DSPBPOS (dev_priv->info.display_mmio_offset + 0x7118C) 3644 + #define _DSPBSIZE (dev_priv->info.display_mmio_offset + 0x71190) 3645 + #define _DSPBSURF (dev_priv->info.display_mmio_offset + 0x7119C) 3646 + #define _DSPBTILEOFF (dev_priv->info.display_mmio_offset + 0x711A4) 3647 + #define _DSPBOFFSET (dev_priv->info.display_mmio_offset + 0x711A4) 3648 + #define _DSPBSURFLIVE (dev_priv->info.display_mmio_offset + 0x711AC) 3654 3649 3655 3650 /* Sprite A control */ 3656 3651 #define _DVSACNTR 0x72180 ··· 4114 4109 #define ILK_ELPIN_409_SELECT (1 << 25) 4115 4110 #define ILK_DPARB_GATE (1<<22) 4116 4111 #define ILK_VSDPFD_FULL (1<<21) 4117 - #define ILK_DISPLAY_CHICKEN_FUSES 0x42014 4118 - #define ILK_INTERNAL_GRAPHICS_DISABLE (1<<31) 4119 - #define ILK_INTERNAL_DISPLAY_DISABLE (1<<30) 4120 - #define ILK_DISPLAY_DEBUG_DISABLE (1<<29) 4121 - #define ILK_HDCP_DISABLE (1<<25) 4122 - #define ILK_eDP_A_DISABLE (1<<24) 4123 - #define ILK_DESKTOP (1<<23) 4112 + #define FUSE_STRAP 0x42014 4113 + #define ILK_INTERNAL_GRAPHICS_DISABLE (1 << 31) 4114 + #define ILK_INTERNAL_DISPLAY_DISABLE (1 << 30) 4115 + #define ILK_DISPLAY_DEBUG_DISABLE (1 << 29) 4116 + #define ILK_HDCP_DISABLE (1 << 25) 4117 + #define ILK_eDP_A_DISABLE (1 << 24) 4118 + #define HSW_CDCLK_LIMIT (1 << 24) 4119 + #define ILK_DESKTOP (1 << 23) 4124 4120 4125 4121 #define ILK_DSPCLK_GATE_D 0x42020 4126 4122 #define ILK_VRHUNIT_CLOCK_GATE_DISABLE (1 << 28) ··· 4183 4177 4184 4178 #define HSW_SCRATCH1 0xb038 4185 4179 #define HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE (1<<27) 4186 - 4187 - #define HSW_FUSE_STRAP 0x42014 4188 - #define HSW_CDCLK_LIMIT (1 << 24) 4189 4180 4190 4181 /* PCH */ 4191 4182 ··· 5054 5051 #define GEN8_CENTROID_PIXEL_OPT_DIS (1<<8) 5055 5052 #define GEN8_SAMPLER_POWER_BYPASS_DIS (1<<1) 5056 5053 5057 - #define G4X_AUD_VID_DID (dev_priv->info->display_mmio_offset + 0x62020) 5054 + #define G4X_AUD_VID_DID (dev_priv->info.display_mmio_offset + 0x62020) 5058 5055 #define INTEL_AUDIO_DEVCL 0x808629FB 5059 5056 #define INTEL_AUDIO_DEVBLC 0x80862801 5060 5057 #define INTEL_AUDIO_DEVCTG 0x80862802 ··· 5438 5435 5439 5436 /* SFUSE_STRAP */ 5440 5437 #define SFUSE_STRAP 0xc2014 5438 + #define SFUSE_STRAP_FUSE_LOCK (1<<13) 5439 + #define SFUSE_STRAP_DISPLAY_DISABLED (1<<7) 5441 5440 #define SFUSE_STRAP_DDIB_DETECTED (1<<2) 5442 5441 #define SFUSE_STRAP_DDIC_DETECTED (1<<1) 5443 5442 #define SFUSE_STRAP_DDID_DETECTED (1<<0) ··· 5909 5904 #define READ_DATA_VALID(n) (1 << (n)) 5910 5905 5911 5906 /* For UMS only (deprecated): */ 5912 - #define _PALETTE_A (dev_priv->info->display_mmio_offset + 0xa000) 5913 - #define _PALETTE_B (dev_priv->info->display_mmio_offset + 0xa800) 5914 - #define _DPLL_A (dev_priv->info->display_mmio_offset + 0x6014) 5915 - #define _DPLL_B (dev_priv->info->display_mmio_offset + 0x6018) 5916 - #define _DPLL_A_MD (dev_priv->info->display_mmio_offset + 0x601c) 5917 - #define _DPLL_B_MD (dev_priv->info->display_mmio_offset + 0x6020) 5907 + #define _PALETTE_A (dev_priv->info.display_mmio_offset + 0xa000) 5908 + #define _PALETTE_B (dev_priv->info.display_mmio_offset + 0xa800) 5909 + #define _DPLL_A (dev_priv->info.display_mmio_offset + 0x6014) 5910 + #define _DPLL_B (dev_priv->info.display_mmio_offset + 0x6018) 5911 + #define _DPLL_A_MD (dev_priv->info.display_mmio_offset + 0x601c) 5912 + #define _DPLL_B_MD (dev_priv->info.display_mmio_offset + 0x6020) 5918 5913 5919 5914 #endif /* _I915_REG_H_ */
+10 -10
drivers/gpu/drm/i915/i915_trace.h
··· 34 34 ); 35 35 36 36 TRACE_EVENT(i915_vma_bind, 37 - TP_PROTO(struct i915_vma *vma, bool mappable), 38 - TP_ARGS(vma, mappable), 37 + TP_PROTO(struct i915_vma *vma, unsigned flags), 38 + TP_ARGS(vma, flags), 39 39 40 40 TP_STRUCT__entry( 41 41 __field(struct drm_i915_gem_object *, obj) 42 42 __field(struct i915_address_space *, vm) 43 43 __field(u32, offset) 44 44 __field(u32, size) 45 - __field(bool, mappable) 45 + __field(unsigned, flags) 46 46 ), 47 47 48 48 TP_fast_assign( ··· 50 50 __entry->vm = vma->vm; 51 51 __entry->offset = vma->node.start; 52 52 __entry->size = vma->node.size; 53 - __entry->mappable = mappable; 53 + __entry->flags = flags; 54 54 ), 55 55 56 56 TP_printk("obj=%p, offset=%08x size=%x%s vm=%p", 57 57 __entry->obj, __entry->offset, __entry->size, 58 - __entry->mappable ? ", mappable" : "", 58 + __entry->flags & PIN_MAPPABLE ? ", mappable" : "", 59 59 __entry->vm) 60 60 ); 61 61 ··· 196 196 ); 197 197 198 198 TRACE_EVENT(i915_gem_evict, 199 - TP_PROTO(struct drm_device *dev, u32 size, u32 align, bool mappable), 200 - TP_ARGS(dev, size, align, mappable), 199 + TP_PROTO(struct drm_device *dev, u32 size, u32 align, unsigned flags), 200 + TP_ARGS(dev, size, align, flags), 201 201 202 202 TP_STRUCT__entry( 203 203 __field(u32, dev) 204 204 __field(u32, size) 205 205 __field(u32, align) 206 - __field(bool, mappable) 206 + __field(unsigned, flags) 207 207 ), 208 208 209 209 TP_fast_assign( 210 210 __entry->dev = dev->primary->index; 211 211 __entry->size = size; 212 212 __entry->align = align; 213 - __entry->mappable = mappable; 213 + __entry->flags = flags; 214 214 ), 215 215 216 216 TP_printk("dev=%d, size=%d, align=%d %s", 217 217 __entry->dev, __entry->size, __entry->align, 218 - __entry->mappable ? ", mappable" : "") 218 + __entry->flags & PIN_MAPPABLE ? ", mappable" : "") 219 219 ); 220 220 221 221 TRACE_EVENT(i915_gem_evict_everything,
+1
drivers/gpu/drm/i915/intel_crt.c
··· 833 833 crt->base.get_hw_state = intel_crt_get_hw_state; 834 834 } 835 835 intel_connector->get_hw_state = intel_connector_get_hw_state; 836 + intel_connector->unregister = intel_connector_unregister; 836 837 837 838 drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs); 838 839
+1 -1
drivers/gpu/drm/i915/intel_ddi.c
··· 1415 1415 1416 1416 if (lcpll & LCPLL_CD_SOURCE_FCLK) { 1417 1417 return 800000; 1418 - } else if (I915_READ(HSW_FUSE_STRAP) & HSW_CDCLK_LIMIT) { 1418 + } else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT) { 1419 1419 return 450000; 1420 1420 } else if (freq == LCPLL_CLK_FREQ_450) { 1421 1421 return 450000;
+131 -126
drivers/gpu/drm/i915/intel_display.c
··· 51 51 52 52 static int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode, 53 53 int x, int y, struct drm_framebuffer *old_fb); 54 - 54 + static int intel_framebuffer_init(struct drm_device *dev, 55 + struct intel_framebuffer *ifb, 56 + struct drm_mode_fb_cmd2 *mode_cmd, 57 + struct drm_i915_gem_object *obj); 55 58 56 59 typedef struct { 57 60 int min, max; ··· 1033 1030 u32 val; 1034 1031 1035 1032 /* ILK FDI PLL is always enabled */ 1036 - if (dev_priv->info->gen == 5) 1033 + if (INTEL_INFO(dev_priv->dev)->gen == 5) 1037 1034 return; 1038 1035 1039 1036 /* On Haswell, DDI ports are responsible for the FDI PLL setup */ ··· 1192 1189 u32 val; 1193 1190 1194 1191 if (IS_VALLEYVIEW(dev)) { 1195 - for (i = 0; i < dev_priv->num_plane; i++) { 1192 + for (i = 0; i < INTEL_INFO(dev)->num_sprites; i++) { 1196 1193 reg = SPCNTR(pipe, i); 1197 1194 val = I915_READ(reg); 1198 1195 WARN((val & SP_ENABLE), ··· 1446 1443 assert_pipe_disabled(dev_priv, crtc->pipe); 1447 1444 1448 1445 /* No really, not for ILK+ */ 1449 - BUG_ON(dev_priv->info->gen >= 5); 1446 + BUG_ON(INTEL_INFO(dev)->gen >= 5); 1450 1447 1451 1448 /* PLL is protected by panel, make sure we can write it */ 1452 1449 if (IS_MOBILE(dev) && !IS_I830(dev)) ··· 1552 1549 */ 1553 1550 static void ironlake_enable_shared_dpll(struct intel_crtc *crtc) 1554 1551 { 1555 - struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 1552 + struct drm_device *dev = crtc->base.dev; 1553 + struct drm_i915_private *dev_priv = dev->dev_private; 1556 1554 struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc); 1557 1555 1558 1556 /* PCH PLLs only available on ILK, SNB and IVB */ 1559 - BUG_ON(dev_priv->info->gen < 5); 1557 + BUG_ON(INTEL_INFO(dev)->gen < 5); 1560 1558 if (WARN_ON(pll == NULL)) 1561 1559 return; 1562 1560 ··· 1582 1578 1583 1579 static void intel_disable_shared_dpll(struct intel_crtc *crtc) 1584 1580 { 1585 - struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 1581 + struct drm_device *dev = crtc->base.dev; 1582 + struct drm_i915_private *dev_priv = dev->dev_private; 1586 1583 struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc); 1587 1584 1588 1585 /* PCH only available on ILK+ */ 1589 - BUG_ON(dev_priv->info->gen < 5); 1586 + BUG_ON(INTEL_INFO(dev)->gen < 5); 1590 1587 if (WARN_ON(pll == NULL)) 1591 1588 return; 1592 1589 ··· 1622 1617 uint32_t reg, val, pipeconf_val; 1623 1618 1624 1619 /* PCH only available on ILK+ */ 1625 - BUG_ON(dev_priv->info->gen < 5); 1620 + BUG_ON(INTEL_INFO(dev)->gen < 5); 1626 1621 1627 1622 /* Make sure PCH DPLL is enabled */ 1628 1623 assert_shared_dpll_enabled(dev_priv, ··· 1675 1670 u32 val, pipeconf_val; 1676 1671 1677 1672 /* PCH only available on ILK+ */ 1678 - BUG_ON(dev_priv->info->gen < 5); 1673 + BUG_ON(INTEL_INFO(dev_priv->dev)->gen < 5); 1679 1674 1680 1675 /* FDI must be feeding us bits for PCH ports */ 1681 1676 assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder); ··· 1749 1744 1750 1745 /** 1751 1746 * intel_enable_pipe - enable a pipe, asserting requirements 1752 - * @dev_priv: i915 private structure 1753 - * @pipe: pipe to enable 1754 - * @pch_port: on ILK+, is this pipe driving a PCH port or not 1747 + * @crtc: crtc responsible for the pipe 1755 1748 * 1756 - * Enable @pipe, making sure that various hardware specific requirements 1749 + * Enable @crtc's pipe, making sure that various hardware specific requirements 1757 1750 * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc. 1758 - * 1759 - * @pipe should be %PIPE_A or %PIPE_B. 1760 - * 1761 - * Will wait until the pipe is actually running (i.e. first vblank) before 1762 - * returning. 1763 1751 */ 1764 - static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, 1765 - bool pch_port, bool dsi) 1752 + static void intel_enable_pipe(struct intel_crtc *crtc) 1766 1753 { 1754 + struct drm_device *dev = crtc->base.dev; 1755 + struct drm_i915_private *dev_priv = dev->dev_private; 1756 + enum pipe pipe = crtc->pipe; 1767 1757 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 1768 1758 pipe); 1769 1759 enum pipe pch_transcoder; ··· 1780 1780 * need the check. 1781 1781 */ 1782 1782 if (!HAS_PCH_SPLIT(dev_priv->dev)) 1783 - if (dsi) 1783 + if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DSI)) 1784 1784 assert_dsi_pll_enabled(dev_priv); 1785 1785 else 1786 1786 assert_pll_enabled(dev_priv, pipe); 1787 1787 else { 1788 - if (pch_port) { 1788 + if (crtc->config.has_pch_encoder) { 1789 1789 /* if driving the PCH, we need FDI enabled */ 1790 1790 assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder); 1791 1791 assert_fdi_tx_pll_enabled(dev_priv, ··· 1796 1796 1797 1797 reg = PIPECONF(cpu_transcoder); 1798 1798 val = I915_READ(reg); 1799 - if (val & PIPECONF_ENABLE) 1799 + if (val & PIPECONF_ENABLE) { 1800 + WARN_ON(!(pipe == PIPE_A && 1801 + dev_priv->quirks & QUIRK_PIPEA_FORCE)); 1800 1802 return; 1803 + } 1801 1804 1802 1805 I915_WRITE(reg, val | PIPECONF_ENABLE); 1803 - intel_wait_for_vblank(dev_priv->dev, pipe); 1806 + POSTING_READ(reg); 1807 + 1808 + /* 1809 + * There's no guarantee the pipe will really start running now. It 1810 + * depends on the Gen, the output type and the relative order between 1811 + * pipe and plane enabling. Avoid waiting on HSW+ since it's not 1812 + * necessary. 1813 + * TODO: audit the previous gens. 1814 + */ 1815 + if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) 1816 + intel_wait_for_vblank(dev_priv->dev, pipe); 1804 1817 } 1805 1818 1806 1819 /** ··· 1864 1851 void intel_flush_primary_plane(struct drm_i915_private *dev_priv, 1865 1852 enum plane plane) 1866 1853 { 1867 - u32 reg = dev_priv->info->gen >= 4 ? DSPSURF(plane) : DSPADDR(plane); 1854 + struct drm_device *dev = dev_priv->dev; 1855 + u32 reg = INTEL_INFO(dev)->gen >= 4 ? DSPSURF(plane) : DSPADDR(plane); 1868 1856 1869 1857 I915_WRITE(reg, I915_READ(reg)); 1870 1858 POSTING_READ(reg); ··· 1941 1927 return true; 1942 1928 #endif 1943 1929 return false; 1930 + } 1931 + 1932 + static int intel_align_height(struct drm_device *dev, int height, bool tiled) 1933 + { 1934 + int tile_height; 1935 + 1936 + tile_height = tiled ? (IS_GEN2(dev) ? 16 : 8) : 1; 1937 + return ALIGN(height, tile_height); 1944 1938 } 1945 1939 1946 1940 int ··· 2321 2299 return ret; 2322 2300 } 2323 2301 2324 - static void intel_crtc_update_sarea_pos(struct drm_crtc *crtc, int x, int y) 2325 - { 2326 - struct drm_device *dev = crtc->dev; 2327 - struct drm_i915_master_private *master_priv; 2328 - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2329 - 2330 - if (!dev->primary->master) 2331 - return; 2332 - 2333 - master_priv = dev->primary->master->driver_priv; 2334 - if (!master_priv->sarea_priv) 2335 - return; 2336 - 2337 - switch (intel_crtc->pipe) { 2338 - case 0: 2339 - master_priv->sarea_priv->pipeA_x = x; 2340 - master_priv->sarea_priv->pipeA_y = y; 2341 - break; 2342 - case 1: 2343 - master_priv->sarea_priv->pipeB_x = x; 2344 - master_priv->sarea_priv->pipeB_y = y; 2345 - break; 2346 - default: 2347 - break; 2348 - } 2349 - } 2350 - 2351 2302 static int 2352 2303 intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, 2353 2304 struct drm_framebuffer *fb) ··· 2407 2412 intel_update_fbc(dev); 2408 2413 intel_edp_psr_update(dev); 2409 2414 mutex_unlock(&dev->struct_mutex); 2410 - 2411 - intel_crtc_update_sarea_pos(crtc, x, y); 2412 2415 2413 2416 return 0; 2414 2417 } ··· 3580 3587 intel_crtc_load_lut(crtc); 3581 3588 3582 3589 intel_update_watermarks(crtc); 3583 - intel_enable_pipe(dev_priv, pipe, 3584 - intel_crtc->config.has_pch_encoder, false); 3590 + intel_enable_pipe(intel_crtc); 3585 3591 intel_enable_primary_plane(dev_priv, plane, pipe); 3586 3592 intel_enable_planes(crtc); 3587 3593 intel_crtc_update_cursor(crtc, true); ··· 3725 3733 intel_ddi_enable_transcoder_func(crtc); 3726 3734 3727 3735 intel_update_watermarks(crtc); 3728 - intel_enable_pipe(dev_priv, pipe, 3729 - intel_crtc->config.has_pch_encoder, false); 3736 + intel_enable_pipe(intel_crtc); 3730 3737 3731 3738 if (intel_crtc->config.has_pch_encoder) 3732 3739 lpt_pch_enable(crtc); ··· 3739 3748 * to change the workaround. */ 3740 3749 haswell_mode_set_planes_workaround(intel_crtc); 3741 3750 haswell_crtc_enable_planes(crtc); 3742 - 3743 - /* 3744 - * There seems to be a race in PCH platform hw (at least on some 3745 - * outputs) where an enabled pipe still completes any pageflip right 3746 - * away (as if the pipe is off) instead of waiting for vblank. As soon 3747 - * as the first vblank happend, everything works as expected. Hence just 3748 - * wait for one vblank before returning to avoid strange things 3749 - * happening. 3750 - */ 3751 - intel_wait_for_vblank(dev, intel_crtc->pipe); 3752 3751 } 3753 3752 3754 3753 static void ironlake_pfit_disable(struct intel_crtc *crtc) ··· 4150 4169 intel_crtc_load_lut(crtc); 4151 4170 4152 4171 intel_update_watermarks(crtc); 4153 - intel_enable_pipe(dev_priv, pipe, false, is_dsi); 4172 + intel_enable_pipe(intel_crtc); 4154 4173 intel_set_cpu_fifo_underrun_reporting(dev, pipe, true); 4155 4174 intel_enable_primary_plane(dev_priv, plane, pipe); 4156 4175 intel_enable_planes(crtc); ··· 4189 4208 intel_crtc_load_lut(crtc); 4190 4209 4191 4210 intel_update_watermarks(crtc); 4192 - intel_enable_pipe(dev_priv, pipe, false, false); 4211 + intel_enable_pipe(intel_crtc); 4193 4212 intel_set_cpu_fifo_underrun_reporting(dev, pipe, true); 4194 4213 intel_enable_primary_plane(dev_priv, plane, pipe); 4195 4214 intel_enable_planes(crtc); ··· 5237 5256 pipe_config->requested_mode.hdisplay = pipe_config->pipe_src_w; 5238 5257 } 5239 5258 5240 - static void intel_crtc_mode_from_pipe_config(struct intel_crtc *intel_crtc, 5241 - struct intel_crtc_config *pipe_config) 5259 + void intel_mode_from_pipe_config(struct drm_display_mode *mode, 5260 + struct intel_crtc_config *pipe_config) 5242 5261 { 5243 - struct drm_crtc *crtc = &intel_crtc->base; 5262 + mode->hdisplay = pipe_config->adjusted_mode.crtc_hdisplay; 5263 + mode->htotal = pipe_config->adjusted_mode.crtc_htotal; 5264 + mode->hsync_start = pipe_config->adjusted_mode.crtc_hsync_start; 5265 + mode->hsync_end = pipe_config->adjusted_mode.crtc_hsync_end; 5244 5266 5245 - crtc->mode.hdisplay = pipe_config->adjusted_mode.crtc_hdisplay; 5246 - crtc->mode.htotal = pipe_config->adjusted_mode.crtc_htotal; 5247 - crtc->mode.hsync_start = pipe_config->adjusted_mode.crtc_hsync_start; 5248 - crtc->mode.hsync_end = pipe_config->adjusted_mode.crtc_hsync_end; 5267 + mode->vdisplay = pipe_config->adjusted_mode.crtc_vdisplay; 5268 + mode->vtotal = pipe_config->adjusted_mode.crtc_vtotal; 5269 + mode->vsync_start = pipe_config->adjusted_mode.crtc_vsync_start; 5270 + mode->vsync_end = pipe_config->adjusted_mode.crtc_vsync_end; 5249 5271 5250 - crtc->mode.vdisplay = pipe_config->adjusted_mode.crtc_vdisplay; 5251 - crtc->mode.vtotal = pipe_config->adjusted_mode.crtc_vtotal; 5252 - crtc->mode.vsync_start = pipe_config->adjusted_mode.crtc_vsync_start; 5253 - crtc->mode.vsync_end = pipe_config->adjusted_mode.crtc_vsync_end; 5272 + mode->flags = pipe_config->adjusted_mode.flags; 5254 5273 5255 - crtc->mode.flags = pipe_config->adjusted_mode.flags; 5256 - 5257 - crtc->mode.clock = pipe_config->adjusted_mode.crtc_clock; 5258 - crtc->mode.flags |= pipe_config->adjusted_mode.flags; 5274 + mode->clock = pipe_config->adjusted_mode.crtc_clock; 5275 + mode->flags |= pipe_config->adjusted_mode.flags; 5259 5276 } 5260 5277 5261 5278 static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc) ··· 7556 7577 7557 7578 /* we only need to pin inside GTT if cursor is non-phy */ 7558 7579 mutex_lock(&dev->struct_mutex); 7559 - if (!dev_priv->info->cursor_needs_physical) { 7580 + if (!INTEL_INFO(dev)->cursor_needs_physical) { 7560 7581 unsigned alignment; 7561 7582 7562 7583 if (obj->tiling_mode) { ··· 7604 7625 7605 7626 finish: 7606 7627 if (intel_crtc->cursor_bo) { 7607 - if (dev_priv->info->cursor_needs_physical) { 7628 + if (INTEL_INFO(dev)->cursor_needs_physical) { 7608 7629 if (intel_crtc->cursor_bo != obj) 7609 7630 i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo); 7610 7631 } else ··· 7666 7687 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), 7667 7688 }; 7668 7689 7669 - static struct drm_framebuffer * 7670 - intel_framebuffer_create(struct drm_device *dev, 7671 - struct drm_mode_fb_cmd2 *mode_cmd, 7672 - struct drm_i915_gem_object *obj) 7690 + struct drm_framebuffer * 7691 + __intel_framebuffer_create(struct drm_device *dev, 7692 + struct drm_mode_fb_cmd2 *mode_cmd, 7693 + struct drm_i915_gem_object *obj) 7673 7694 { 7674 7695 struct intel_framebuffer *intel_fb; 7675 7696 int ret; ··· 7680 7701 return ERR_PTR(-ENOMEM); 7681 7702 } 7682 7703 7683 - ret = i915_mutex_lock_interruptible(dev); 7684 - if (ret) 7685 - goto err; 7686 - 7687 7704 ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj); 7688 - mutex_unlock(&dev->struct_mutex); 7689 7705 if (ret) 7690 7706 goto err; 7691 7707 ··· 7690 7716 kfree(intel_fb); 7691 7717 7692 7718 return ERR_PTR(ret); 7719 + } 7720 + 7721 + struct drm_framebuffer * 7722 + intel_framebuffer_create(struct drm_device *dev, 7723 + struct drm_mode_fb_cmd2 *mode_cmd, 7724 + struct drm_i915_gem_object *obj) 7725 + { 7726 + struct drm_framebuffer *fb; 7727 + int ret; 7728 + 7729 + ret = i915_mutex_lock_interruptible(dev); 7730 + if (ret) 7731 + return ERR_PTR(ret); 7732 + fb = __intel_framebuffer_create(dev, mode_cmd, obj); 7733 + mutex_unlock(&dev->struct_mutex); 7734 + 7735 + return fb; 7693 7736 } 7694 7737 7695 7738 static u32 ··· 7754 7763 struct drm_i915_gem_object *obj; 7755 7764 struct drm_framebuffer *fb; 7756 7765 7757 - if (dev_priv->fbdev == NULL) 7766 + if (!dev_priv->fbdev) 7758 7767 return NULL; 7759 7768 7760 - obj = dev_priv->fbdev->ifb.obj; 7761 - if (obj == NULL) 7769 + if (!dev_priv->fbdev->fb) 7762 7770 return NULL; 7763 7771 7764 - fb = &dev_priv->fbdev->ifb.base; 7772 + obj = dev_priv->fbdev->fb->obj; 7773 + BUG_ON(!obj); 7774 + 7775 + fb = &dev_priv->fbdev->fb->base; 7765 7776 if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay, 7766 7777 fb->bits_per_pixel)) 7767 7778 return NULL; ··· 8213 8220 intel_decrease_pllclock(crtc); 8214 8221 } 8215 8222 8216 - if (dev_priv->info->gen >= 6) 8223 + if (INTEL_INFO(dev)->gen >= 6) 8217 8224 gen6_rps_idle(dev->dev_private); 8218 8225 } 8219 8226 ··· 10377 10384 if ((I915_READ(DP_A) & DP_DETECTED) == 0) 10378 10385 return false; 10379 10386 10380 - if (IS_GEN5(dev) && 10381 - (I915_READ(ILK_DISPLAY_CHICKEN_FUSES) & ILK_eDP_A_DISABLE)) 10387 + if (IS_GEN5(dev) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE)) 10382 10388 return false; 10383 10389 10384 10390 return true; ··· 10530 10538 drm_helper_move_panel_connectors_to_head(dev); 10531 10539 } 10532 10540 10533 - void intel_framebuffer_fini(struct intel_framebuffer *fb) 10534 - { 10535 - drm_framebuffer_cleanup(&fb->base); 10536 - WARN_ON(!fb->obj->framebuffer_references--); 10537 - drm_gem_object_unreference_unlocked(&fb->obj->base); 10538 - } 10539 - 10540 10541 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) 10541 10542 { 10542 10543 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 10543 10544 10544 - intel_framebuffer_fini(intel_fb); 10545 + drm_framebuffer_cleanup(fb); 10546 + WARN_ON(!intel_fb->obj->framebuffer_references--); 10547 + drm_gem_object_unreference_unlocked(&intel_fb->obj->base); 10545 10548 kfree(intel_fb); 10546 10549 } 10547 10550 ··· 10560 10573 struct drm_mode_fb_cmd2 *mode_cmd, 10561 10574 struct drm_i915_gem_object *obj) 10562 10575 { 10563 - int aligned_height, tile_height; 10576 + int aligned_height; 10564 10577 int pitch_limit; 10565 10578 int ret; 10566 10579 ··· 10654 10667 if (mode_cmd->offsets[0] != 0) 10655 10668 return -EINVAL; 10656 10669 10657 - tile_height = IS_GEN2(dev) ? 16 : 8; 10658 - aligned_height = ALIGN(mode_cmd->height, 10659 - obj->tiling_mode ? tile_height : 1); 10670 + aligned_height = intel_align_height(dev, mode_cmd->height, 10671 + obj->tiling_mode); 10660 10672 /* FIXME drm helper for size checks (especially planar formats)? */ 10661 10673 if (obj->base.size < aligned_height * mode_cmd->pitches[0]) 10662 10674 return -EINVAL; ··· 11035 11049 11036 11050 for_each_pipe(i) { 11037 11051 intel_crtc_init(dev, i); 11038 - for (j = 0; j < dev_priv->num_plane; j++) { 11052 + for (j = 0; j < INTEL_INFO(dev)->num_sprites; j++) { 11039 11053 ret = intel_plane_init(dev, i, j); 11040 11054 if (ret) 11041 11055 DRM_DEBUG_KMS("pipe %c sprite %c init failed: %d\n", ··· 11055 11069 11056 11070 /* Just in case the BIOS is doing something questionable. */ 11057 11071 intel_disable_fbc(dev); 11072 + 11073 + intel_modeset_setup_hw_state(dev, false); 11058 11074 } 11059 11075 11060 11076 static void ··· 11363 11375 list_for_each_entry(crtc, &dev->mode_config.crtc_list, 11364 11376 base.head) { 11365 11377 if (crtc->active && i915.fastboot) { 11366 - intel_crtc_mode_from_pipe_config(crtc, &crtc->config); 11367 - 11378 + intel_mode_from_pipe_config(&crtc->base.mode, &crtc->config); 11368 11379 DRM_DEBUG_KMS("[CRTC:%d] found active mode: ", 11369 11380 crtc->base.base.id); 11370 11381 drm_mode_debug_printmodeline(&crtc->base.mode); ··· 11423 11436 intel_modeset_init_hw(dev); 11424 11437 11425 11438 intel_setup_overlay(dev); 11439 + } 11426 11440 11427 - mutex_lock(&dev->mode_config.mutex); 11428 - intel_modeset_setup_hw_state(dev, false); 11429 - mutex_unlock(&dev->mode_config.mutex); 11441 + void intel_connector_unregister(struct intel_connector *intel_connector) 11442 + { 11443 + struct drm_connector *connector = &intel_connector->base; 11444 + 11445 + intel_panel_destroy_backlight(connector); 11446 + drm_sysfs_connector_remove(connector); 11430 11447 } 11431 11448 11432 11449 void intel_modeset_cleanup(struct drm_device *dev) ··· 11477 11486 11478 11487 /* destroy the backlight and sysfs files before encoders/connectors */ 11479 11488 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 11480 - intel_panel_destroy_backlight(connector); 11481 - drm_sysfs_connector_remove(connector); 11489 + struct intel_connector *intel_connector; 11490 + 11491 + intel_connector = to_intel_connector(connector); 11492 + intel_connector->unregister(intel_connector); 11482 11493 } 11483 11494 11484 11495 drm_mode_config_cleanup(dev); ··· 11513 11520 unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL; 11514 11521 u16 gmch_ctrl; 11515 11522 11516 - pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl); 11523 + if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) { 11524 + DRM_ERROR("failed to read control word\n"); 11525 + return -EIO; 11526 + } 11527 + 11528 + if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state) 11529 + return 0; 11530 + 11517 11531 if (state) 11518 11532 gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE; 11519 11533 else 11520 11534 gmch_ctrl |= INTEL_GMCH_VGA_DISABLE; 11521 - pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl); 11535 + 11536 + if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) { 11537 + DRM_ERROR("failed to write control word\n"); 11538 + return -EIO; 11539 + } 11540 + 11522 11541 return 0; 11523 11542 } 11524 11543
+23 -2
drivers/gpu/drm/i915/intel_dp.c
··· 784 784 return ret; 785 785 } 786 786 787 + static void 788 + intel_dp_connector_unregister(struct intel_connector *intel_connector) 789 + { 790 + struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base); 791 + 792 + sysfs_remove_link(&intel_connector->base.kdev->kobj, 793 + intel_dp->adapter.dev.kobj.name); 794 + intel_connector_unregister(intel_connector); 795 + } 796 + 787 797 static int 788 798 intel_dp_i2c_init(struct intel_dp *intel_dp, 789 799 struct intel_connector *intel_connector, const char *name) ··· 811 801 strncpy(intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1); 812 802 intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0'; 813 803 intel_dp->adapter.algo_data = &intel_dp->algo; 814 - intel_dp->adapter.dev.parent = intel_connector->base.kdev; 804 + intel_dp->adapter.dev.parent = intel_connector->base.dev->dev; 815 805 816 806 ret = i2c_dp_aux_add_bus(&intel_dp->adapter); 807 + if (ret < 0) 808 + return ret; 809 + 810 + ret = sysfs_create_link(&intel_connector->base.kdev->kobj, 811 + &intel_dp->adapter.dev.kobj, 812 + intel_dp->adapter.dev.kobj.name); 813 + 814 + if (ret < 0) 815 + i2c_del_adapter(&intel_dp->adapter); 816 + 817 817 return ret; 818 818 } 819 819 ··· 3759 3739 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; 3760 3740 } 3761 3741 3762 - intel_panel_init(&intel_connector->panel, fixed_mode); 3742 + intel_panel_init(&intel_connector->panel, fixed_mode, NULL); 3763 3743 intel_panel_setup_backlight(connector); 3764 3744 3765 3745 return true; ··· 3828 3808 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state; 3829 3809 else 3830 3810 intel_connector->get_hw_state = intel_connector_get_hw_state; 3811 + intel_connector->unregister = intel_dp_connector_unregister; 3831 3812 3832 3813 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10; 3833 3814 if (HAS_DDI(dev)) {
+15 -5
drivers/gpu/drm/i915/intel_drv.h
··· 110 110 111 111 struct intel_fbdev { 112 112 struct drm_fb_helper helper; 113 - struct intel_framebuffer ifb; 113 + struct intel_framebuffer *fb; 114 114 struct list_head fbdev_list; 115 115 struct drm_display_mode *our_mode; 116 116 }; ··· 186 186 /* Reads out the current hw, returning true if the connector is enabled 187 187 * and active (i.e. dpms ON state). */ 188 188 bool (*get_hw_state)(struct intel_connector *); 189 + 190 + /* 191 + * Removes all interfaces through which the connector is accessible 192 + * - like sysfs, debugfs entries -, so that no new operations can be 193 + * started on the connector. Also makes sure all currently pending 194 + * operations finish before returing. 195 + */ 196 + void (*unregister)(struct intel_connector *); 189 197 190 198 /* Panel info for eDP and LVDS */ 191 199 struct intel_panel panel; ··· 689 681 struct drm_i915_gem_object *obj, 690 682 struct intel_ring_buffer *pipelined); 691 683 void intel_unpin_fb_obj(struct drm_i915_gem_object *obj); 692 - int intel_framebuffer_init(struct drm_device *dev, 693 - struct intel_framebuffer *ifb, 684 + struct drm_framebuffer * 685 + __intel_framebuffer_create(struct drm_device *dev, 694 686 struct drm_mode_fb_cmd2 *mode_cmd, 695 687 struct drm_i915_gem_object *obj); 696 - void intel_framebuffer_fini(struct intel_framebuffer *fb); 697 688 void intel_prepare_page_flip(struct drm_device *dev, int plane); 698 689 void intel_finish_page_flip(struct drm_device *dev, int pipe); 699 690 void intel_finish_page_flip_plane(struct drm_device *dev, int plane); ··· 734 727 void hsw_disable_ips(struct intel_crtc *crtc); 735 728 void intel_display_set_init_power(struct drm_device *dev, bool enable); 736 729 int valleyview_get_vco(struct drm_i915_private *dev_priv); 730 + void intel_mode_from_pipe_config(struct drm_display_mode *mode, 731 + struct intel_crtc_config *pipe_config); 737 732 738 733 /* intel_dp.c */ 739 734 void intel_dp_init(struct drm_device *dev, int output_reg, enum port port); ··· 833 824 834 825 /* intel_panel.c */ 835 826 int intel_panel_init(struct intel_panel *panel, 836 - struct drm_display_mode *fixed_mode); 827 + struct drm_display_mode *fixed_mode, 828 + struct drm_display_mode *downclock_mode); 837 829 void intel_panel_fini(struct intel_panel *panel); 838 830 void intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode, 839 831 struct drm_display_mode *adjusted_mode);
+2 -1
drivers/gpu/drm/i915/intel_dsi.c
··· 586 586 intel_encoder->get_config = intel_dsi_get_config; 587 587 588 588 intel_connector->get_hw_state = intel_connector_get_hw_state; 589 + intel_connector->unregister = intel_connector_unregister; 589 590 590 591 for (i = 0; i < ARRAY_SIZE(intel_dsi_devices); i++) { 591 592 dsi = &intel_dsi_devices[i]; ··· 625 624 } 626 625 627 626 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; 628 - intel_panel_init(&intel_connector->panel, fixed_mode); 627 + intel_panel_init(&intel_connector->panel, fixed_mode, NULL); 629 628 630 629 return true; 631 630
+1
drivers/gpu/drm/i915/intel_dvo.c
··· 477 477 intel_encoder->compute_config = intel_dvo_compute_config; 478 478 intel_encoder->mode_set = intel_dvo_mode_set; 479 479 intel_connector->get_hw_state = intel_dvo_connector_get_hw_state; 480 + intel_connector->unregister = intel_connector_unregister; 480 481 481 482 /* Now, try to find a controller */ 482 483 for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) {
+161 -10
drivers/gpu/drm/i915/intel_fbdev.c
··· 62 62 { 63 63 struct intel_fbdev *ifbdev = 64 64 container_of(helper, struct intel_fbdev, helper); 65 + struct drm_framebuffer *fb; 65 66 struct drm_device *dev = helper->dev; 66 67 struct drm_mode_fb_cmd2 mode_cmd = {}; 67 68 struct drm_i915_gem_object *obj; ··· 94 93 /* Flush everything out, we'll be doing GTT only from now on */ 95 94 ret = intel_pin_and_fence_fb_obj(dev, obj, NULL); 96 95 if (ret) { 97 - DRM_ERROR("failed to pin fb: %d\n", ret); 96 + DRM_ERROR("failed to pin obj: %d\n", ret); 98 97 goto out_unref; 99 98 } 100 99 101 - ret = intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, obj); 102 - if (ret) 100 + fb = __intel_framebuffer_create(dev, &mode_cmd, obj); 101 + if (IS_ERR(fb)) { 102 + ret = PTR_ERR(fb); 103 103 goto out_unpin; 104 + } 105 + 106 + ifbdev->fb = to_intel_framebuffer(fb); 104 107 105 108 return 0; 106 109 ··· 121 116 { 122 117 struct intel_fbdev *ifbdev = 123 118 container_of(helper, struct intel_fbdev, helper); 124 - struct intel_framebuffer *intel_fb = &ifbdev->ifb; 119 + struct intel_framebuffer *intel_fb = ifbdev->fb; 125 120 struct drm_device *dev = helper->dev; 126 121 struct drm_i915_private *dev_priv = dev->dev_private; 127 122 struct fb_info *info; ··· 131 126 132 127 mutex_lock(&dev->struct_mutex); 133 128 134 - if (!intel_fb->obj) { 129 + if (!intel_fb || WARN_ON(!intel_fb->obj)) { 135 130 DRM_DEBUG_KMS("no BIOS fb, allocating a new one\n"); 136 131 ret = intelfb_alloc(helper, sizes); 137 132 if (ret) 138 133 goto out_unlock; 134 + intel_fb = ifbdev->fb; 139 135 } else { 140 136 DRM_DEBUG_KMS("re-using BIOS fb\n"); 141 137 sizes->fb_width = intel_fb->base.width; ··· 154 148 155 149 info->par = helper; 156 150 157 - fb = &ifbdev->ifb.base; 151 + fb = &ifbdev->fb->base; 158 152 159 153 ifbdev->helper.fb = fb; 160 154 ifbdev->helper.fbdev = info; ··· 200 194 * If the object is stolen however, it will be full of whatever 201 195 * garbage was left in there. 202 196 */ 203 - if (ifbdev->ifb.obj->stolen) 197 + if (ifbdev->fb->obj->stolen) 204 198 memset_io(info->screen_base, 0, info->screen_size); 205 199 206 200 /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */ ··· 242 236 *blue = intel_crtc->lut_b[regno] << 8; 243 237 } 244 238 239 + static struct drm_fb_helper_crtc * 240 + intel_fb_helper_crtc(struct drm_fb_helper *fb_helper, struct drm_crtc *crtc) 241 + { 242 + int i; 243 + 244 + for (i = 0; i < fb_helper->crtc_count; i++) 245 + if (fb_helper->crtc_info[i].mode_set.crtc == crtc) 246 + return &fb_helper->crtc_info[i]; 247 + 248 + return NULL; 249 + } 250 + 251 + /* 252 + * Try to read the BIOS display configuration and use it for the initial 253 + * fb configuration. 254 + * 255 + * The BIOS or boot loader will generally create an initial display 256 + * configuration for us that includes some set of active pipes and displays. 257 + * This routine tries to figure out which pipes and connectors are active 258 + * and stuffs them into the crtcs and modes array given to us by the 259 + * drm_fb_helper code. 260 + * 261 + * The overall sequence is: 262 + * intel_fbdev_init - from driver load 263 + * intel_fbdev_init_bios - initialize the intel_fbdev using BIOS data 264 + * drm_fb_helper_init - build fb helper structs 265 + * drm_fb_helper_single_add_all_connectors - more fb helper structs 266 + * intel_fbdev_initial_config - apply the config 267 + * drm_fb_helper_initial_config - call ->probe then register_framebuffer() 268 + * drm_setup_crtcs - build crtc config for fbdev 269 + * intel_fb_initial_config - find active connectors etc 270 + * drm_fb_helper_single_fb_probe - set up fbdev 271 + * intelfb_create - re-use or alloc fb, build out fbdev structs 272 + * 273 + * Note that we don't make special consideration whether we could actually 274 + * switch to the selected modes without a full modeset. E.g. when the display 275 + * is in VGA mode we need to recalculate watermarks and set a new high-res 276 + * framebuffer anyway. 277 + */ 278 + static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper, 279 + struct drm_fb_helper_crtc **crtcs, 280 + struct drm_display_mode **modes, 281 + bool *enabled, int width, int height) 282 + { 283 + struct drm_device *dev = fb_helper->dev; 284 + int i, j; 285 + bool *save_enabled; 286 + bool any_enabled = false; 287 + 288 + save_enabled = kcalloc(dev->mode_config.num_connector, sizeof(bool), 289 + GFP_KERNEL); 290 + if (!save_enabled) 291 + return false; 292 + 293 + memcpy(save_enabled, enabled, dev->mode_config.num_connector); 294 + 295 + for (i = 0; i < fb_helper->connector_count; i++) { 296 + struct drm_fb_helper_connector *fb_conn; 297 + struct drm_connector *connector; 298 + struct drm_encoder *encoder; 299 + struct drm_fb_helper_crtc *new_crtc; 300 + 301 + fb_conn = fb_helper->connector_info[i]; 302 + connector = fb_conn->connector; 303 + if (!enabled[i]) { 304 + DRM_DEBUG_KMS("connector %d not enabled, skipping\n", 305 + connector->base.id); 306 + continue; 307 + } 308 + 309 + encoder = connector->encoder; 310 + if (!encoder || WARN_ON(!encoder->crtc)) { 311 + DRM_DEBUG_KMS("connector %d has no encoder or crtc, skipping\n", 312 + connector->base.id); 313 + enabled[i] = false; 314 + continue; 315 + } 316 + 317 + new_crtc = intel_fb_helper_crtc(fb_helper, encoder->crtc); 318 + 319 + /* 320 + * Make sure we're not trying to drive multiple connectors 321 + * with a single CRTC, since our cloning support may not 322 + * match the BIOS. 323 + */ 324 + for (j = 0; j < fb_helper->connector_count; j++) { 325 + if (crtcs[j] == new_crtc) { 326 + any_enabled = false; 327 + goto out; 328 + } 329 + } 330 + 331 + DRM_DEBUG_KMS("looking for cmdline mode on connector %d\n", 332 + fb_conn->connector->base.id); 333 + 334 + /* go for command line mode first */ 335 + modes[i] = drm_pick_cmdline_mode(fb_conn, width, height); 336 + 337 + /* try for preferred next */ 338 + if (!modes[i]) { 339 + DRM_DEBUG_KMS("looking for preferred mode on connector %d\n", 340 + fb_conn->connector->base.id); 341 + modes[i] = drm_has_preferred_mode(fb_conn, width, 342 + height); 343 + } 344 + 345 + /* last resort: use current mode */ 346 + if (!modes[i]) { 347 + /* 348 + * IMPORTANT: We want to use the adjusted mode (i.e. 349 + * after the panel fitter upscaling) as the initial 350 + * config, not the input mode, which is what crtc->mode 351 + * usually contains. But since our current fastboot 352 + * code puts a mode derived from the post-pfit timings 353 + * into crtc->mode this works out correctly. We don't 354 + * use hwmode anywhere right now, so use it for this 355 + * since the fb helper layer wants a pointer to 356 + * something we own. 357 + */ 358 + intel_mode_from_pipe_config(&encoder->crtc->hwmode, 359 + &to_intel_crtc(encoder->crtc)->config); 360 + modes[i] = &encoder->crtc->hwmode; 361 + } 362 + crtcs[i] = new_crtc; 363 + 364 + DRM_DEBUG_KMS("connector %s on crtc %d: %s\n", 365 + drm_get_connector_name(connector), 366 + encoder->crtc->base.id, 367 + modes[i]->name); 368 + 369 + any_enabled = true; 370 + } 371 + 372 + out: 373 + if (!any_enabled) { 374 + memcpy(enabled, save_enabled, dev->mode_config.num_connector); 375 + kfree(save_enabled); 376 + return false; 377 + } 378 + 379 + kfree(save_enabled); 380 + return true; 381 + } 382 + 245 383 static struct drm_fb_helper_funcs intel_fb_helper_funcs = { 384 + .initial_config = intel_fb_initial_config, 246 385 .gamma_set = intel_crtc_fb_gamma_set, 247 386 .gamma_get = intel_crtc_fb_gamma_get, 248 387 .fb_probe = intelfb_create, ··· 409 258 410 259 drm_fb_helper_fini(&ifbdev->helper); 411 260 412 - drm_framebuffer_unregister_private(&ifbdev->ifb.base); 413 - intel_framebuffer_fini(&ifbdev->ifb); 261 + drm_framebuffer_unregister_private(&ifbdev->fb->base); 262 + drm_framebuffer_remove(&ifbdev->fb->base); 414 263 } 415 264 416 265 int intel_fbdev_init(struct drm_device *dev) ··· 473 322 * been restored from swap. If the object is stolen however, it will be 474 323 * full of whatever garbage was left in there. 475 324 */ 476 - if (state == FBINFO_STATE_RUNNING && ifbdev->ifb.obj->stolen) 325 + if (state == FBINFO_STATE_RUNNING && ifbdev->fb->obj->stolen) 477 326 memset_io(info->screen_base, 0, info->screen_size); 478 327 479 328 fb_set_suspend(info, state);
+3 -29
drivers/gpu/drm/i915/intel_hdmi.c
··· 425 425 struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi; 426 426 u32 reg = VIDEO_DIP_CTL; 427 427 u32 val = I915_READ(reg); 428 - u32 port; 428 + u32 port = VIDEO_DIP_PORT(intel_dig_port->port); 429 429 430 430 assert_hdmi_port_disabled(intel_hdmi); 431 431 ··· 446 446 val &= ~VIDEO_DIP_ENABLE; 447 447 I915_WRITE(reg, val); 448 448 POSTING_READ(reg); 449 - return; 450 - } 451 - 452 - switch (intel_dig_port->port) { 453 - case PORT_B: 454 - port = VIDEO_DIP_PORT_B; 455 - break; 456 - case PORT_C: 457 - port = VIDEO_DIP_PORT_C; 458 - break; 459 - default: 460 - BUG(); 461 449 return; 462 450 } 463 451 ··· 479 491 struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi; 480 492 u32 reg = TVIDEO_DIP_CTL(intel_crtc->pipe); 481 493 u32 val = I915_READ(reg); 482 - u32 port; 494 + u32 port = VIDEO_DIP_PORT(intel_dig_port->port); 483 495 484 496 assert_hdmi_port_disabled(intel_hdmi); 485 497 ··· 492 504 val &= ~VIDEO_DIP_ENABLE; 493 505 I915_WRITE(reg, val); 494 506 POSTING_READ(reg); 495 - return; 496 - } 497 - 498 - switch (intel_dig_port->port) { 499 - case PORT_B: 500 - port = VIDEO_DIP_PORT_B; 501 - break; 502 - case PORT_C: 503 - port = VIDEO_DIP_PORT_C; 504 - break; 505 - case PORT_D: 506 - port = VIDEO_DIP_PORT_D; 507 - break; 508 - default: 509 - BUG(); 510 507 return; 511 508 } 512 509 ··· 1236 1263 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state; 1237 1264 else 1238 1265 intel_connector->get_hw_state = intel_connector_get_hw_state; 1266 + intel_connector->unregister = intel_connector_unregister; 1239 1267 1240 1268 intel_hdmi_add_properties(intel_hdmi, connector); 1241 1269
+6 -7
drivers/gpu/drm/i915/intel_lvds.c
··· 899 899 struct drm_encoder *encoder; 900 900 struct drm_display_mode *scan; /* *modes, *bios_mode; */ 901 901 struct drm_display_mode *fixed_mode = NULL; 902 + struct drm_display_mode *downclock_mode = NULL; 902 903 struct edid *edid; 903 904 struct drm_crtc *crtc; 904 905 u32 lvds; ··· 958 957 intel_encoder->get_hw_state = intel_lvds_get_hw_state; 959 958 intel_encoder->get_config = intel_lvds_get_config; 960 959 intel_connector->get_hw_state = intel_connector_get_hw_state; 960 + intel_connector->unregister = intel_connector_unregister; 961 961 962 962 intel_connector_attach_encoder(intel_connector, intel_encoder); 963 963 intel_encoder->type = INTEL_OUTPUT_LVDS; ··· 1034 1032 1035 1033 fixed_mode = drm_mode_duplicate(dev, scan); 1036 1034 if (fixed_mode) { 1037 - intel_connector->panel.downclock_mode = 1035 + downclock_mode = 1038 1036 intel_find_panel_downclock(dev, 1039 1037 fixed_mode, connector); 1040 - if (intel_connector->panel.downclock_mode != 1041 - NULL && i915.lvds_downclock) { 1038 + if (downclock_mode != NULL && 1039 + i915.lvds_downclock) { 1042 1040 /* We found the downclock for LVDS. */ 1043 1041 dev_priv->lvds_downclock_avail = true; 1044 1042 dev_priv->lvds_downclock = 1045 - intel_connector->panel. 1046 1043 downclock_mode->clock; 1047 1044 DRM_DEBUG_KMS("LVDS downclock is found" 1048 1045 " in EDID. Normal clock %dKhz, " ··· 1117 1116 } 1118 1117 drm_sysfs_connector_add(connector); 1119 1118 1120 - intel_panel_init(&intel_connector->panel, fixed_mode); 1119 + intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode); 1121 1120 intel_panel_setup_backlight(connector); 1122 1121 1123 1122 return; ··· 1126 1125 DRM_DEBUG_KMS("No LVDS modes found, disabling.\n"); 1127 1126 drm_connector_cleanup(connector); 1128 1127 drm_encoder_cleanup(encoder); 1129 - if (fixed_mode) 1130 - drm_mode_destroy(dev, fixed_mode); 1131 1128 kfree(lvds_encoder); 1132 1129 kfree(lvds_connector); 1133 1130 return;
+1 -1
drivers/gpu/drm/i915/intel_overlay.c
··· 1349 1349 } 1350 1350 overlay->flip_addr = reg_bo->phys_obj->handle->busaddr; 1351 1351 } else { 1352 - ret = i915_gem_obj_ggtt_pin(reg_bo, PAGE_SIZE, true, false); 1352 + ret = i915_gem_obj_ggtt_pin(reg_bo, PAGE_SIZE, PIN_MAPPABLE); 1353 1353 if (ret) { 1354 1354 DRM_ERROR("failed to pin overlay register bo\n"); 1355 1355 goto out_free_bo;
+3 -1
drivers/gpu/drm/i915/intel_panel.c
··· 1190 1190 } 1191 1191 1192 1192 int intel_panel_init(struct intel_panel *panel, 1193 - struct drm_display_mode *fixed_mode) 1193 + struct drm_display_mode *fixed_mode, 1194 + struct drm_display_mode *downclock_mode) 1194 1195 { 1195 1196 panel->fixed_mode = fixed_mode; 1197 + panel->downclock_mode = downclock_mode; 1196 1198 1197 1199 return 0; 1198 1200 }
+27 -24
drivers/gpu/drm/i915/intel_pm.c
··· 2741 2741 return NULL; 2742 2742 } 2743 2743 2744 - ret = i915_gem_obj_ggtt_pin(ctx, 4096, true, false); 2744 + ret = i915_gem_obj_ggtt_pin(ctx, 4096, 0); 2745 2745 if (ret) { 2746 2746 DRM_ERROR("failed to pin power context: %d\n", ret); 2747 2747 goto err_unref; ··· 3196 3196 3197 3197 static void intel_print_rc6_info(struct drm_device *dev, u32 mode) 3198 3198 { 3199 - if (IS_GEN6(dev)) 3200 - DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n"); 3201 - 3202 - if (IS_HASWELL(dev)) 3203 - DRM_DEBUG_DRIVER("Haswell: only RC6 available\n"); 3204 - 3205 3199 DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n", 3206 - (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off", 3207 - (mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off", 3208 - (mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off"); 3200 + (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off", 3201 + (mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off", 3202 + (mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off"); 3209 3203 } 3210 3204 3211 3205 int intel_enable_rc6(const struct drm_device *dev) ··· 3216 3222 if (INTEL_INFO(dev)->gen == 5) 3217 3223 return 0; 3218 3224 3219 - if (IS_HASWELL(dev)) 3220 - return INTEL_RC6_ENABLE; 3225 + if (IS_IVYBRIDGE(dev)) 3226 + return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE); 3221 3227 3222 - /* snb/ivb have more than one rc6 state. */ 3223 - if (INTEL_INFO(dev)->gen == 6) 3224 - return INTEL_RC6_ENABLE; 3225 - 3226 - return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE); 3228 + return INTEL_RC6_ENABLE; 3227 3229 } 3228 3230 3229 3231 static void gen6_enable_rps_interrupts(struct drm_device *dev) ··· 3276 3286 /* 3: Enable RC6 */ 3277 3287 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE) 3278 3288 rc6_mask = GEN6_RC_CTL_RC6_ENABLE; 3279 - DRM_INFO("RC6 %s\n", (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off"); 3289 + intel_print_rc6_info(dev, rc6_mask); 3280 3290 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE | 3281 - GEN6_RC_CTL_EI_MODE(1) | 3282 - rc6_mask); 3291 + GEN6_RC_CTL_EI_MODE(1) | 3292 + rc6_mask); 3283 3293 3284 3294 /* 4 Program defaults and thresholds for RPS*/ 3285 3295 I915_WRITE(GEN6_RPNSWREQ, HSW_FREQUENCY(10)); /* Request 500 MHz */ ··· 3893 3903 3894 3904 unsigned long i915_chipset_val(struct drm_i915_private *dev_priv) 3895 3905 { 3906 + struct drm_device *dev = dev_priv->dev; 3896 3907 unsigned long val; 3897 3908 3898 - if (dev_priv->info->gen != 5) 3909 + if (INTEL_INFO(dev)->gen != 5) 3899 3910 return 0; 3900 3911 3901 3912 spin_lock_irq(&mchdev_lock); ··· 3925 3934 3926 3935 static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid) 3927 3936 { 3937 + struct drm_device *dev = dev_priv->dev; 3928 3938 static const struct v_table { 3929 3939 u16 vd; /* in .1 mil */ 3930 3940 u16 vm; /* in .1 mil */ ··· 4059 4067 { 16000, 14875, }, 4060 4068 { 16125, 15000, }, 4061 4069 }; 4062 - if (dev_priv->info->is_mobile) 4070 + if (INTEL_INFO(dev)->is_mobile) 4063 4071 return v_table[pxvid].vm; 4064 4072 else 4065 4073 return v_table[pxvid].vd; ··· 4102 4110 4103 4111 void i915_update_gfx_val(struct drm_i915_private *dev_priv) 4104 4112 { 4105 - if (dev_priv->info->gen != 5) 4113 + struct drm_device *dev = dev_priv->dev; 4114 + 4115 + if (INTEL_INFO(dev)->gen != 5) 4106 4116 return; 4107 4117 4108 4118 spin_lock_irq(&mchdev_lock); ··· 4153 4159 4154 4160 unsigned long i915_gfx_val(struct drm_i915_private *dev_priv) 4155 4161 { 4162 + struct drm_device *dev = dev_priv->dev; 4156 4163 unsigned long val; 4157 4164 4158 - if (dev_priv->info->gen != 5) 4165 + if (INTEL_INFO(dev)->gen != 5) 4159 4166 return 0; 4160 4167 4161 4168 spin_lock_irq(&mchdev_lock); ··· 4691 4696 /* Bspec says we need to always set all mask bits. */ 4692 4697 I915_WRITE(_3D_CHICKEN3, (0xFFFF << 16) | 4693 4698 _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL); 4699 + 4700 + /* 4701 + * Bspec says: 4702 + * "This bit must be set if 3DSTATE_CLIP clip mode is set to normal and 4703 + * 3DSTATE_SF number of SF output attributes is more than 16." 4704 + */ 4705 + I915_WRITE(_3D_CHICKEN3, 4706 + _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH)); 4694 4707 4695 4708 /* 4696 4709 * According to the spec the following bits should be
+14 -10
drivers/gpu/drm/i915/intel_ringbuffer.c
··· 531 531 goto err; 532 532 } 533 533 534 - i915_gem_object_set_cache_level(ring->scratch.obj, I915_CACHE_LLC); 534 + ret = i915_gem_object_set_cache_level(ring->scratch.obj, I915_CACHE_LLC); 535 + if (ret) 536 + goto err_unref; 535 537 536 - ret = i915_gem_obj_ggtt_pin(ring->scratch.obj, 4096, true, false); 538 + ret = i915_gem_obj_ggtt_pin(ring->scratch.obj, 4096, 0); 537 539 if (ret) 538 540 goto err_unref; 539 541 ··· 1273 1271 goto err; 1274 1272 } 1275 1273 1276 - i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); 1277 - 1278 - ret = i915_gem_obj_ggtt_pin(obj, 4096, true, false); 1279 - if (ret != 0) { 1274 + ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); 1275 + if (ret) 1280 1276 goto err_unref; 1281 - } 1277 + 1278 + ret = i915_gem_obj_ggtt_pin(obj, 4096, 0); 1279 + if (ret) 1280 + goto err_unref; 1282 1281 1283 1282 ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj); 1284 1283 ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl)); ··· 1359 1356 1360 1357 ring->obj = obj; 1361 1358 1362 - ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, true, false); 1359 + ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE); 1363 1360 if (ret) 1364 1361 goto err_unref; 1365 1362 ··· 1516 1513 return 0; 1517 1514 } 1518 1515 1519 - if (dev->primary->master) { 1516 + if (!drm_core_check_feature(dev, DRIVER_MODESET) && 1517 + dev->primary->master) { 1520 1518 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 1521 1519 if (master_priv->sarea_priv) 1522 1520 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; ··· 1943 1939 return -ENOMEM; 1944 1940 } 1945 1941 1946 - ret = i915_gem_obj_ggtt_pin(obj, 0, true, false); 1942 + ret = i915_gem_obj_ggtt_pin(obj, 0, 0); 1947 1943 if (ret != 0) { 1948 1944 drm_gem_object_unreference(&obj->base); 1949 1945 DRM_ERROR("Failed to ping batch bo\n");
+61 -9
drivers/gpu/drm/i915/intel_sdvo.c
··· 2382 2382 } 2383 2383 2384 2384 static void 2385 + intel_sdvo_connector_unregister(struct intel_connector *intel_connector) 2386 + { 2387 + struct drm_connector *drm_connector; 2388 + struct intel_sdvo *sdvo_encoder; 2389 + 2390 + drm_connector = &intel_connector->base; 2391 + sdvo_encoder = intel_attached_sdvo(&intel_connector->base); 2392 + 2393 + sysfs_remove_link(&drm_connector->kdev->kobj, 2394 + sdvo_encoder->ddc.dev.kobj.name); 2395 + intel_connector_unregister(intel_connector); 2396 + } 2397 + 2398 + static int 2385 2399 intel_sdvo_connector_init(struct intel_sdvo_connector *connector, 2386 2400 struct intel_sdvo *encoder) 2387 2401 { 2388 - drm_connector_init(encoder->base.base.dev, 2389 - &connector->base.base, 2402 + struct drm_connector *drm_connector; 2403 + int ret; 2404 + 2405 + drm_connector = &connector->base.base; 2406 + ret = drm_connector_init(encoder->base.base.dev, 2407 + drm_connector, 2390 2408 &intel_sdvo_connector_funcs, 2391 2409 connector->base.base.connector_type); 2410 + if (ret < 0) 2411 + return ret; 2392 2412 2393 - drm_connector_helper_add(&connector->base.base, 2413 + drm_connector_helper_add(drm_connector, 2394 2414 &intel_sdvo_connector_helper_funcs); 2395 2415 2396 2416 connector->base.base.interlace_allowed = 1; 2397 2417 connector->base.base.doublescan_allowed = 0; 2398 2418 connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB; 2399 2419 connector->base.get_hw_state = intel_sdvo_connector_get_hw_state; 2420 + connector->base.unregister = intel_sdvo_connector_unregister; 2400 2421 2401 2422 intel_connector_attach_encoder(&connector->base, &encoder->base); 2402 - drm_sysfs_connector_add(&connector->base.base); 2423 + ret = drm_sysfs_connector_add(drm_connector); 2424 + if (ret < 0) 2425 + goto err1; 2426 + 2427 + ret = sysfs_create_link(&encoder->ddc.dev.kobj, 2428 + &drm_connector->kdev->kobj, 2429 + encoder->ddc.dev.kobj.name); 2430 + if (ret < 0) 2431 + goto err2; 2432 + 2433 + return 0; 2434 + 2435 + err2: 2436 + drm_sysfs_connector_remove(drm_connector); 2437 + err1: 2438 + drm_connector_cleanup(drm_connector); 2439 + 2440 + return ret; 2403 2441 } 2404 2442 2405 2443 static void ··· 2497 2459 intel_sdvo->is_hdmi = true; 2498 2460 } 2499 2461 2500 - intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); 2462 + if (intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo) < 0) { 2463 + kfree(intel_sdvo_connector); 2464 + return false; 2465 + } 2466 + 2501 2467 if (intel_sdvo->is_hdmi) 2502 2468 intel_sdvo_add_hdmi_properties(intel_sdvo, intel_sdvo_connector); 2503 2469 ··· 2532 2490 2533 2491 intel_sdvo->is_tv = true; 2534 2492 2535 - intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); 2493 + if (intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo) < 0) { 2494 + kfree(intel_sdvo_connector); 2495 + return false; 2496 + } 2536 2497 2537 2498 if (!intel_sdvo_tv_create_property(intel_sdvo, intel_sdvo_connector, type)) 2538 2499 goto err; ··· 2579 2534 intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1; 2580 2535 } 2581 2536 2582 - intel_sdvo_connector_init(intel_sdvo_connector, 2583 - intel_sdvo); 2537 + if (intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo) < 0) { 2538 + kfree(intel_sdvo_connector); 2539 + return false; 2540 + } 2541 + 2584 2542 return true; 2585 2543 } 2586 2544 ··· 2614 2566 intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1; 2615 2567 } 2616 2568 2617 - intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); 2569 + if (intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo) < 0) { 2570 + kfree(intel_sdvo_connector); 2571 + return false; 2572 + } 2573 + 2618 2574 if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector)) 2619 2575 goto err; 2620 2576
+5 -4
drivers/gpu/drm/i915/intel_tv.c
··· 1189 1189 if (connector->polled & DRM_CONNECTOR_POLL_HPD) { 1190 1190 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1191 1191 i915_disable_pipestat(dev_priv, 0, 1192 - PIPE_HOTPLUG_INTERRUPT_ENABLE | 1193 - PIPE_HOTPLUG_TV_INTERRUPT_ENABLE); 1192 + PIPE_HOTPLUG_INTERRUPT_STATUS | 1193 + PIPE_HOTPLUG_TV_INTERRUPT_STATUS); 1194 1194 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1195 1195 } 1196 1196 ··· 1266 1266 if (connector->polled & DRM_CONNECTOR_POLL_HPD) { 1267 1267 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1268 1268 i915_enable_pipestat(dev_priv, 0, 1269 - PIPE_HOTPLUG_INTERRUPT_ENABLE | 1270 - PIPE_HOTPLUG_TV_INTERRUPT_ENABLE); 1269 + PIPE_HOTPLUG_INTERRUPT_STATUS | 1270 + PIPE_HOTPLUG_TV_INTERRUPT_STATUS); 1271 1271 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1272 1272 } 1273 1273 ··· 1634 1634 intel_encoder->disable = intel_disable_tv; 1635 1635 intel_encoder->get_hw_state = intel_tv_get_hw_state; 1636 1636 intel_connector->get_hw_state = intel_connector_get_hw_state; 1637 + intel_connector->unregister = intel_connector_unregister; 1637 1638 1638 1639 intel_connector_attach_encoder(intel_connector, intel_encoder); 1639 1640 intel_encoder->type = INTEL_OUTPUT_TVOUT;
+1
include/drm/drm_crtc.h
··· 994 994 995 995 extern const char *drm_get_connector_name(const struct drm_connector *connector); 996 996 extern const char *drm_get_connector_status_name(enum drm_connector_status status); 997 + extern const char *drm_get_subpixel_order_name(enum subpixel_order order); 997 998 extern const char *drm_get_dpms_name(int val); 998 999 extern const char *drm_get_dvi_i_subconnector_name(int val); 999 1000 extern const char *drm_get_dvi_i_select_name(int val);
+6
include/drm/drm_fb_helper.h
··· 121 121 int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper); 122 122 int drm_fb_helper_debug_enter(struct fb_info *info); 123 123 int drm_fb_helper_debug_leave(struct fb_info *info); 124 + struct drm_display_mode * 125 + drm_has_preferred_mode(struct drm_fb_helper_connector *fb_connector, 126 + int width, int height); 127 + struct drm_display_mode * 128 + drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn, 129 + int width, int height); 124 130 125 131 #endif