Merge branch 'drm-intel-next' of git://git.kernel.org/pub/scm/linux/kernel/git/anholt/drm-intel

* 'drm-intel-next' of git://git.kernel.org/pub/scm/linux/kernel/git/anholt/drm-intel: (57 commits)
drm/i915: Handle ERESTARTSYS during page fault
drm/i915: Warn before mmaping a purgeable buffer.
drm/i915: Track purged state.
drm/i915: Remove eviction debug spam
drm/i915: Immediately discard any backing storage for uneeded objects
drm/i915: Do not mis-classify clean objects as purgeable
drm/i915: Whitespace correction for madv
drm/i915: BUG_ON page refleak during unbind
drm/i915: Search harder for a reusable object
drm/i915: Clean up evict from list.
drm/i915: Add tracepoints
drm/i915: framebuffer compression for GM45+
drm/i915: split display functions by chip type
drm/i915: Skip the sanity checks if the current relocation is valid
drm/i915: Check that the relocation points to within the target
drm/i915: correct FBC update when pipe base update occurs
drm/i915: blacklist Acer AspireOne lid status
ACPI: make ACPI button funcs no-ops if not built in
drm/i915: prevent FIFO calculation overflows on 32 bits with high dotclocks
drm/i915: intel_display.c handle latency variable efficiently
...

Fix up trivial conflicts in drivers/gpu/drm/i915/{i915_dma.c|i915_drv.h}

+2825 -474
+1
arch/x86/mm/pageattr.c
··· 144 144 145 145 mb(); 146 146 } 147 + EXPORT_SYMBOL_GPL(clflush_cache_range); 147 148 148 149 static void __cpa_flush_all(void *arg) 149 150 {
+43 -2
drivers/acpi/button.c
··· 115 115 .release = single_release, 116 116 }; 117 117 118 + static BLOCKING_NOTIFIER_HEAD(acpi_lid_notifier); 119 + static struct acpi_device *lid_device; 120 + 118 121 /* -------------------------------------------------------------------------- 119 122 FS Interface (/proc) 120 123 -------------------------------------------------------------------------- */ ··· 234 231 /* -------------------------------------------------------------------------- 235 232 Driver Interface 236 233 -------------------------------------------------------------------------- */ 234 + int acpi_lid_notifier_register(struct notifier_block *nb) 235 + { 236 + return blocking_notifier_chain_register(&acpi_lid_notifier, nb); 237 + } 238 + EXPORT_SYMBOL(acpi_lid_notifier_register); 239 + 240 + int acpi_lid_notifier_unregister(struct notifier_block *nb) 241 + { 242 + return blocking_notifier_chain_unregister(&acpi_lid_notifier, nb); 243 + } 244 + EXPORT_SYMBOL(acpi_lid_notifier_unregister); 245 + 246 + int acpi_lid_open(void) 247 + { 248 + acpi_status status; 249 + unsigned long long state; 250 + 251 + status = acpi_evaluate_integer(lid_device->handle, "_LID", NULL, 252 + &state); 253 + if (ACPI_FAILURE(status)) 254 + return -ENODEV; 255 + 256 + return !!state; 257 + } 258 + EXPORT_SYMBOL(acpi_lid_open); 259 + 237 260 static int acpi_lid_send_state(struct acpi_device *device) 238 261 { 239 262 struct acpi_button *button = acpi_driver_data(device); 240 263 unsigned long long state; 241 264 acpi_status status; 265 + int ret; 242 266 243 267 status = acpi_evaluate_integer(device->handle, "_LID", NULL, &state); 244 268 if (ACPI_FAILURE(status)) ··· 274 244 /* input layer checks if event is redundant */ 275 245 input_report_switch(button->input, SW_LID, !state); 276 246 input_sync(button->input); 277 - return 0; 247 + 248 + ret = blocking_notifier_call_chain(&acpi_lid_notifier, state, device); 249 + if (ret == NOTIFY_DONE) 250 + ret = blocking_notifier_call_chain(&acpi_lid_notifier, state, 251 + device); 252 + return ret; 278 253 } 279 254 280 255 static void acpi_button_notify(struct acpi_device *device, u32 event) ··· 401 366 error = input_register_device(input); 402 367 if (error) 403 368 goto err_remove_fs; 404 - if (button->type == ACPI_BUTTON_TYPE_LID) 369 + if (button->type == ACPI_BUTTON_TYPE_LID) { 405 370 acpi_lid_send_state(device); 371 + /* 372 + * This assumes there's only one lid device, or if there are 373 + * more we only care about the last one... 374 + */ 375 + lid_device = device; 376 + } 406 377 407 378 if (device->wakeup.flags.valid) { 408 379 /* Button's GPE is run-wake GPE */
+30 -7
drivers/char/agp/intel-agp.c
··· 46 46 #define PCI_DEVICE_ID_INTEL_Q35_IG 0x29B2 47 47 #define PCI_DEVICE_ID_INTEL_Q33_HB 0x29D0 48 48 #define PCI_DEVICE_ID_INTEL_Q33_IG 0x29D2 49 + #define PCI_DEVICE_ID_INTEL_B43_HB 0x2E40 50 + #define PCI_DEVICE_ID_INTEL_B43_IG 0x2E42 49 51 #define PCI_DEVICE_ID_INTEL_GM45_HB 0x2A40 50 52 #define PCI_DEVICE_ID_INTEL_GM45_IG 0x2A42 51 53 #define PCI_DEVICE_ID_INTEL_IGD_E_HB 0x2E00 ··· 93 91 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G45_HB || \ 94 92 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_GM45_HB || \ 95 93 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G41_HB || \ 94 + agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_B43_HB || \ 96 95 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_D_HB || \ 97 96 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_M_HB || \ 98 97 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_MA_HB) ··· 807 804 if (!intel_private.i8xx_page) 808 805 return; 809 806 810 - /* make page uncached */ 811 - map_page_into_agp(intel_private.i8xx_page); 812 - 813 807 intel_private.i8xx_flush_page = kmap(intel_private.i8xx_page); 814 808 if (!intel_private.i8xx_flush_page) 815 809 intel_i830_fini_flush(); 816 810 } 817 811 812 + static void 813 + do_wbinvd(void *null) 814 + { 815 + wbinvd(); 816 + } 817 + 818 + /* The chipset_flush interface needs to get data that has already been 819 + * flushed out of the CPU all the way out to main memory, because the GPU 820 + * doesn't snoop those buffers. 821 + * 822 + * The 8xx series doesn't have the same lovely interface for flushing the 823 + * chipset write buffers that the later chips do. According to the 865 824 + * specs, it's 64 octwords, or 1KB. So, to get those previous things in 825 + * that buffer out, we just fill 1KB and clflush it out, on the assumption 826 + * that it'll push whatever was in there out. It appears to work. 827 + */ 818 828 static void intel_i830_chipset_flush(struct agp_bridge_data *bridge) 819 829 { 820 830 unsigned int *pg = intel_private.i8xx_flush_page; 821 - int i; 822 831 823 - for (i = 0; i < 256; i += 2) 824 - *(pg + i) = i; 832 + memset(pg, 0, 1024); 825 833 826 - wmb(); 834 + if (cpu_has_clflush) { 835 + clflush_cache_range(pg, 1024); 836 + } else { 837 + if (on_each_cpu(do_wbinvd, NULL, 1) != 0) 838 + printk(KERN_ERR "Timed out waiting for cache flush.\n"); 839 + } 827 840 } 828 841 829 842 /* The intel i830 automatically initializes the agp aperture during POST. ··· 1360 1341 case PCI_DEVICE_ID_INTEL_Q45_HB: 1361 1342 case PCI_DEVICE_ID_INTEL_G45_HB: 1362 1343 case PCI_DEVICE_ID_INTEL_G41_HB: 1344 + case PCI_DEVICE_ID_INTEL_B43_HB: 1363 1345 case PCI_DEVICE_ID_INTEL_IGDNG_D_HB: 1364 1346 case PCI_DEVICE_ID_INTEL_IGDNG_M_HB: 1365 1347 case PCI_DEVICE_ID_INTEL_IGDNG_MA_HB: ··· 2355 2335 "Q45/Q43", NULL, &intel_i965_driver }, 2356 2336 { PCI_DEVICE_ID_INTEL_G45_HB, PCI_DEVICE_ID_INTEL_G45_IG, 0, 2357 2337 "G45/G43", NULL, &intel_i965_driver }, 2338 + { PCI_DEVICE_ID_INTEL_B43_HB, PCI_DEVICE_ID_INTEL_B43_IG, 0, 2339 + "B43", NULL, &intel_i965_driver }, 2358 2340 { PCI_DEVICE_ID_INTEL_G41_HB, PCI_DEVICE_ID_INTEL_G41_IG, 0, 2359 2341 "G41", NULL, &intel_i965_driver }, 2360 2342 { PCI_DEVICE_ID_INTEL_IGDNG_D_HB, PCI_DEVICE_ID_INTEL_IGDNG_D_IG, 0, ··· 2557 2535 ID(PCI_DEVICE_ID_INTEL_Q45_HB), 2558 2536 ID(PCI_DEVICE_ID_INTEL_G45_HB), 2559 2537 ID(PCI_DEVICE_ID_INTEL_G41_HB), 2538 + ID(PCI_DEVICE_ID_INTEL_B43_HB), 2560 2539 ID(PCI_DEVICE_ID_INTEL_IGDNG_D_HB), 2561 2540 ID(PCI_DEVICE_ID_INTEL_IGDNG_M_HB), 2562 2541 ID(PCI_DEVICE_ID_INTEL_IGDNG_MA_HB),
+1
drivers/gpu/drm/Kconfig
··· 102 102 select BACKLIGHT_CLASS_DEVICE if ACPI 103 103 select INPUT if ACPI 104 104 select ACPI_VIDEO if ACPI 105 + select ACPI_BUTTON if ACPI 105 106 help 106 107 Choose this option if you have a system that has Intel 830M, 845G, 107 108 852GM, 855GM 865G or 915G integrated graphics. If M is selected, the
+13
drivers/gpu/drm/drm_gem.c
··· 142 142 if (IS_ERR(obj->filp)) 143 143 goto free; 144 144 145 + /* Basically we want to disable the OOM killer and handle ENOMEM 146 + * ourselves by sacrificing pages from cached buffers. 147 + * XXX shmem_file_[gs]et_gfp_mask() 148 + */ 149 + mapping_set_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping, 150 + GFP_HIGHUSER | 151 + __GFP_COLD | 152 + __GFP_FS | 153 + __GFP_RECLAIMABLE | 154 + __GFP_NORETRY | 155 + __GFP_NOWARN | 156 + __GFP_NOMEMALLOC); 157 + 145 158 kref_init(&obj->refcount); 146 159 kref_init(&obj->handlecount); 147 160 obj->size = size;
+1
drivers/gpu/drm/i915/Makefile
··· 9 9 i915_gem.o \ 10 10 i915_gem_debug.o \ 11 11 i915_gem_tiling.o \ 12 + i915_trace_points.o \ 12 13 intel_display.o \ 13 14 intel_crt.o \ 14 15 intel_lvds.o \
+4 -2
drivers/gpu/drm/i915/i915_debugfs.c
··· 96 96 { 97 97 struct drm_gem_object *obj = obj_priv->obj; 98 98 99 - seq_printf(m, " %p: %s %08x %08x %d", 99 + seq_printf(m, " %p: %s %8zd %08x %08x %d %s", 100 100 obj, 101 101 get_pin_flag(obj_priv), 102 + obj->size, 102 103 obj->read_domains, obj->write_domain, 103 - obj_priv->last_rendering_seqno); 104 + obj_priv->last_rendering_seqno, 105 + obj_priv->dirty ? "dirty" : ""); 104 106 105 107 if (obj->name) 106 108 seq_printf(m, " (name: %d)", obj->name);
+189 -5
drivers/gpu/drm/i915/i915_dma.c
··· 33 33 #include "intel_drv.h" 34 34 #include "i915_drm.h" 35 35 #include "i915_drv.h" 36 + #include "i915_trace.h" 36 37 #include <linux/vgaarb.h> 37 38 38 39 /* Really want an OS-independent resettable timer. Would like to have ··· 51 50 u32 last_head = I915_READ(PRB0_HEAD) & HEAD_ADDR; 52 51 int i; 53 52 53 + trace_i915_ring_wait_begin (dev); 54 + 54 55 for (i = 0; i < 100000; i++) { 55 56 ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR; 56 57 acthd = I915_READ(acthd_reg); 57 58 ring->space = ring->head - (ring->tail + 8); 58 59 if (ring->space < 0) 59 60 ring->space += ring->Size; 60 - if (ring->space >= n) 61 + if (ring->space >= n) { 62 + trace_i915_ring_wait_end (dev); 61 63 return 0; 64 + } 62 65 63 66 if (dev->primary->master) { 64 67 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; ··· 82 77 83 78 } 84 79 80 + trace_i915_ring_wait_end (dev); 85 81 return -EBUSY; 86 82 } 87 83 ··· 928 922 * how much was set aside so we can use it for our own purposes. 929 923 */ 930 924 static int i915_probe_agp(struct drm_device *dev, uint32_t *aperture_size, 931 - uint32_t *preallocated_size) 925 + uint32_t *preallocated_size, 926 + uint32_t *start) 932 927 { 933 928 struct drm_i915_private *dev_priv = dev->dev_private; 934 929 u16 tmp = 0; ··· 1016 1009 return -1; 1017 1010 } 1018 1011 *preallocated_size = stolen - overhead; 1012 + *start = overhead; 1019 1013 1020 1014 return 0; 1015 + } 1016 + 1017 + #define PTE_ADDRESS_MASK 0xfffff000 1018 + #define PTE_ADDRESS_MASK_HIGH 0x000000f0 /* i915+ */ 1019 + #define PTE_MAPPING_TYPE_UNCACHED (0 << 1) 1020 + #define PTE_MAPPING_TYPE_DCACHE (1 << 1) /* i830 only */ 1021 + #define PTE_MAPPING_TYPE_CACHED (3 << 1) 1022 + #define PTE_MAPPING_TYPE_MASK (3 << 1) 1023 + #define PTE_VALID (1 << 0) 1024 + 1025 + /** 1026 + * i915_gtt_to_phys - take a GTT address and turn it into a physical one 1027 + * @dev: drm device 1028 + * @gtt_addr: address to translate 1029 + * 1030 + * Some chip functions require allocations from stolen space but need the 1031 + * physical address of the memory in question. We use this routine 1032 + * to get a physical address suitable for register programming from a given 1033 + * GTT address. 1034 + */ 1035 + static unsigned long i915_gtt_to_phys(struct drm_device *dev, 1036 + unsigned long gtt_addr) 1037 + { 1038 + unsigned long *gtt; 1039 + unsigned long entry, phys; 1040 + int gtt_bar = IS_I9XX(dev) ? 0 : 1; 1041 + int gtt_offset, gtt_size; 1042 + 1043 + if (IS_I965G(dev)) { 1044 + if (IS_G4X(dev) || IS_IGDNG(dev)) { 1045 + gtt_offset = 2*1024*1024; 1046 + gtt_size = 2*1024*1024; 1047 + } else { 1048 + gtt_offset = 512*1024; 1049 + gtt_size = 512*1024; 1050 + } 1051 + } else { 1052 + gtt_bar = 3; 1053 + gtt_offset = 0; 1054 + gtt_size = pci_resource_len(dev->pdev, gtt_bar); 1055 + } 1056 + 1057 + gtt = ioremap_wc(pci_resource_start(dev->pdev, gtt_bar) + gtt_offset, 1058 + gtt_size); 1059 + if (!gtt) { 1060 + DRM_ERROR("ioremap of GTT failed\n"); 1061 + return 0; 1062 + } 1063 + 1064 + entry = *(volatile u32 *)(gtt + (gtt_addr / 1024)); 1065 + 1066 + DRM_DEBUG("GTT addr: 0x%08lx, PTE: 0x%08lx\n", gtt_addr, entry); 1067 + 1068 + /* Mask out these reserved bits on this hardware. */ 1069 + if (!IS_I9XX(dev) || IS_I915G(dev) || IS_I915GM(dev) || 1070 + IS_I945G(dev) || IS_I945GM(dev)) { 1071 + entry &= ~PTE_ADDRESS_MASK_HIGH; 1072 + } 1073 + 1074 + /* If it's not a mapping type we know, then bail. */ 1075 + if ((entry & PTE_MAPPING_TYPE_MASK) != PTE_MAPPING_TYPE_UNCACHED && 1076 + (entry & PTE_MAPPING_TYPE_MASK) != PTE_MAPPING_TYPE_CACHED) { 1077 + iounmap(gtt); 1078 + return 0; 1079 + } 1080 + 1081 + if (!(entry & PTE_VALID)) { 1082 + DRM_ERROR("bad GTT entry in stolen space\n"); 1083 + iounmap(gtt); 1084 + return 0; 1085 + } 1086 + 1087 + iounmap(gtt); 1088 + 1089 + phys =(entry & PTE_ADDRESS_MASK) | 1090 + ((uint64_t)(entry & PTE_ADDRESS_MASK_HIGH) << (32 - 4)); 1091 + 1092 + DRM_DEBUG("GTT addr: 0x%08lx, phys addr: 0x%08lx\n", gtt_addr, phys); 1093 + 1094 + return phys; 1095 + } 1096 + 1097 + static void i915_warn_stolen(struct drm_device *dev) 1098 + { 1099 + DRM_ERROR("not enough stolen space for compressed buffer, disabling\n"); 1100 + DRM_ERROR("hint: you may be able to increase stolen memory size in the BIOS to avoid this\n"); 1101 + } 1102 + 1103 + static void i915_setup_compression(struct drm_device *dev, int size) 1104 + { 1105 + struct drm_i915_private *dev_priv = dev->dev_private; 1106 + struct drm_mm_node *compressed_fb, *compressed_llb; 1107 + unsigned long cfb_base, ll_base; 1108 + 1109 + /* Leave 1M for line length buffer & misc. */ 1110 + compressed_fb = drm_mm_search_free(&dev_priv->vram, size, 4096, 0); 1111 + if (!compressed_fb) { 1112 + i915_warn_stolen(dev); 1113 + return; 1114 + } 1115 + 1116 + compressed_fb = drm_mm_get_block(compressed_fb, size, 4096); 1117 + if (!compressed_fb) { 1118 + i915_warn_stolen(dev); 1119 + return; 1120 + } 1121 + 1122 + cfb_base = i915_gtt_to_phys(dev, compressed_fb->start); 1123 + if (!cfb_base) { 1124 + DRM_ERROR("failed to get stolen phys addr, disabling FBC\n"); 1125 + drm_mm_put_block(compressed_fb); 1126 + } 1127 + 1128 + if (!IS_GM45(dev)) { 1129 + compressed_llb = drm_mm_search_free(&dev_priv->vram, 4096, 1130 + 4096, 0); 1131 + if (!compressed_llb) { 1132 + i915_warn_stolen(dev); 1133 + return; 1134 + } 1135 + 1136 + compressed_llb = drm_mm_get_block(compressed_llb, 4096, 4096); 1137 + if (!compressed_llb) { 1138 + i915_warn_stolen(dev); 1139 + return; 1140 + } 1141 + 1142 + ll_base = i915_gtt_to_phys(dev, compressed_llb->start); 1143 + if (!ll_base) { 1144 + DRM_ERROR("failed to get stolen phys addr, disabling FBC\n"); 1145 + drm_mm_put_block(compressed_fb); 1146 + drm_mm_put_block(compressed_llb); 1147 + } 1148 + } 1149 + 1150 + dev_priv->cfb_size = size; 1151 + 1152 + if (IS_GM45(dev)) { 1153 + g4x_disable_fbc(dev); 1154 + I915_WRITE(DPFC_CB_BASE, compressed_fb->start); 1155 + } else { 1156 + i8xx_disable_fbc(dev); 1157 + I915_WRITE(FBC_CFB_BASE, cfb_base); 1158 + I915_WRITE(FBC_LL_BASE, ll_base); 1159 + } 1160 + 1161 + DRM_DEBUG("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", cfb_base, 1162 + ll_base, size >> 20); 1021 1163 } 1022 1164 1023 1165 /* true = enable decode, false = disable decoder */ ··· 1183 1027 } 1184 1028 1185 1029 static int i915_load_modeset_init(struct drm_device *dev, 1030 + unsigned long prealloc_start, 1186 1031 unsigned long prealloc_size, 1187 1032 unsigned long agp_size) 1188 1033 { ··· 1204 1047 1205 1048 /* Basic memrange allocator for stolen space (aka vram) */ 1206 1049 drm_mm_init(&dev_priv->vram, 0, prealloc_size); 1050 + DRM_INFO("set up %ldM of stolen space\n", prealloc_size / (1024*1024)); 1051 + 1052 + /* We're off and running w/KMS */ 1053 + dev_priv->mm.suspended = 0; 1207 1054 1208 1055 /* Let GEM Manage from end of prealloc space to end of aperture. 1209 1056 * ··· 1220 1059 */ 1221 1060 i915_gem_do_init(dev, prealloc_size, agp_size - 4096); 1222 1061 1062 + mutex_lock(&dev->struct_mutex); 1223 1063 ret = i915_gem_init_ringbuffer(dev); 1064 + mutex_unlock(&dev->struct_mutex); 1224 1065 if (ret) 1225 1066 goto out; 1067 + 1068 + /* Try to set up FBC with a reasonable compressed buffer size */ 1069 + if (IS_MOBILE(dev) && (IS_I9XX(dev) || IS_I965G(dev) || IS_GM45(dev)) && 1070 + i915_powersave) { 1071 + int cfb_size; 1072 + 1073 + /* Try to get an 8M buffer... */ 1074 + if (prealloc_size > (9*1024*1024)) 1075 + cfb_size = 8*1024*1024; 1076 + else /* fall back to 7/8 of the stolen space */ 1077 + cfb_size = prealloc_size * 7 / 8; 1078 + i915_setup_compression(dev, cfb_size); 1079 + } 1226 1080 1227 1081 /* Allow hardware batchbuffers unless told otherwise. 1228 1082 */ ··· 1356 1180 struct drm_i915_private *dev_priv = dev->dev_private; 1357 1181 resource_size_t base, size; 1358 1182 int ret = 0, mmio_bar = IS_I9XX(dev) ? 0 : 1; 1359 - uint32_t agp_size, prealloc_size; 1183 + uint32_t agp_size, prealloc_size, prealloc_start; 1360 1184 1361 1185 /* i915 has 4 more counters */ 1362 1186 dev->counters += 4; ··· 1410 1234 "performance may suffer.\n"); 1411 1235 } 1412 1236 1413 - ret = i915_probe_agp(dev, &agp_size, &prealloc_size); 1237 + ret = i915_probe_agp(dev, &agp_size, &prealloc_size, &prealloc_start); 1414 1238 if (ret) 1415 1239 goto out_iomapfree; 1416 1240 ··· 1476 1300 return ret; 1477 1301 } 1478 1302 1303 + /* Start out suspended */ 1304 + dev_priv->mm.suspended = 1; 1305 + 1479 1306 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 1480 - ret = i915_load_modeset_init(dev, prealloc_size, agp_size); 1307 + ret = i915_load_modeset_init(dev, prealloc_start, 1308 + prealloc_size, agp_size); 1481 1309 if (ret < 0) { 1482 1310 DRM_ERROR("failed to init modeset\n"); 1483 1311 goto out_workqueue_free; ··· 1493 1313 if (!IS_IGDNG(dev)) 1494 1314 intel_opregion_init(dev, 0); 1495 1315 1316 + setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed, 1317 + (unsigned long) dev); 1496 1318 return 0; 1497 1319 1498 1320 out_workqueue_free: ··· 1515 1333 struct drm_i915_private *dev_priv = dev->dev_private; 1516 1334 1517 1335 destroy_workqueue(dev_priv->wq); 1336 + del_timer_sync(&dev_priv->hangcheck_timer); 1518 1337 1519 1338 io_mapping_free(dev_priv->mm.gtt_mapping); 1520 1339 if (dev_priv->mm.gtt_mtrr >= 0) { ··· 1655 1472 DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, 0), 1656 1473 DRM_IOCTL_DEF(DRM_I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, 0), 1657 1474 DRM_IOCTL_DEF(DRM_I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0), 1475 + DRM_IOCTL_DEF(DRM_I915_GEM_MADVISE, i915_gem_madvise_ioctl, 0), 1658 1476 }; 1659 1477 1660 1478 int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
+131 -2
drivers/gpu/drm/i915/i915_drv.c
··· 89 89 pci_set_power_state(dev->pdev, PCI_D3hot); 90 90 } 91 91 92 + dev_priv->suspended = 1; 93 + 92 94 return 0; 93 95 } 94 96 ··· 99 97 struct drm_i915_private *dev_priv = dev->dev_private; 100 98 int ret = 0; 101 99 102 - pci_set_power_state(dev->pdev, PCI_D0); 103 - pci_restore_state(dev->pdev); 104 100 if (pci_enable_device(dev->pdev)) 105 101 return -1; 106 102 pci_set_master(dev->pdev); ··· 124 124 drm_helper_resume_force_mode(dev); 125 125 } 126 126 127 + dev_priv->suspended = 0; 128 + 127 129 return ret; 128 130 } 131 + 132 + /** 133 + * i965_reset - reset chip after a hang 134 + * @dev: drm device to reset 135 + * @flags: reset domains 136 + * 137 + * Reset the chip. Useful if a hang is detected. Returns zero on successful 138 + * reset or otherwise an error code. 139 + * 140 + * Procedure is fairly simple: 141 + * - reset the chip using the reset reg 142 + * - re-init context state 143 + * - re-init hardware status page 144 + * - re-init ring buffer 145 + * - re-init interrupt state 146 + * - re-init display 147 + */ 148 + int i965_reset(struct drm_device *dev, u8 flags) 149 + { 150 + drm_i915_private_t *dev_priv = dev->dev_private; 151 + unsigned long timeout; 152 + u8 gdrst; 153 + /* 154 + * We really should only reset the display subsystem if we actually 155 + * need to 156 + */ 157 + bool need_display = true; 158 + 159 + mutex_lock(&dev->struct_mutex); 160 + 161 + /* 162 + * Clear request list 163 + */ 164 + i915_gem_retire_requests(dev); 165 + 166 + if (need_display) 167 + i915_save_display(dev); 168 + 169 + if (IS_I965G(dev) || IS_G4X(dev)) { 170 + /* 171 + * Set the domains we want to reset, then the reset bit (bit 0). 172 + * Clear the reset bit after a while and wait for hardware status 173 + * bit (bit 1) to be set 174 + */ 175 + pci_read_config_byte(dev->pdev, GDRST, &gdrst); 176 + pci_write_config_byte(dev->pdev, GDRST, gdrst | flags | ((flags == GDRST_FULL) ? 0x1 : 0x0)); 177 + udelay(50); 178 + pci_write_config_byte(dev->pdev, GDRST, gdrst & 0xfe); 179 + 180 + /* ...we don't want to loop forever though, 500ms should be plenty */ 181 + timeout = jiffies + msecs_to_jiffies(500); 182 + do { 183 + udelay(100); 184 + pci_read_config_byte(dev->pdev, GDRST, &gdrst); 185 + } while ((gdrst & 0x1) && time_after(timeout, jiffies)); 186 + 187 + if (gdrst & 0x1) { 188 + WARN(true, "i915: Failed to reset chip\n"); 189 + mutex_unlock(&dev->struct_mutex); 190 + return -EIO; 191 + } 192 + } else { 193 + DRM_ERROR("Error occurred. Don't know how to reset this chip.\n"); 194 + return -ENODEV; 195 + } 196 + 197 + /* Ok, now get things going again... */ 198 + 199 + /* 200 + * Everything depends on having the GTT running, so we need to start 201 + * there. Fortunately we don't need to do this unless we reset the 202 + * chip at a PCI level. 203 + * 204 + * Next we need to restore the context, but we don't use those 205 + * yet either... 206 + * 207 + * Ring buffer needs to be re-initialized in the KMS case, or if X 208 + * was running at the time of the reset (i.e. we weren't VT 209 + * switched away). 210 + */ 211 + if (drm_core_check_feature(dev, DRIVER_MODESET) || 212 + !dev_priv->mm.suspended) { 213 + drm_i915_ring_buffer_t *ring = &dev_priv->ring; 214 + struct drm_gem_object *obj = ring->ring_obj; 215 + struct drm_i915_gem_object *obj_priv = obj->driver_private; 216 + dev_priv->mm.suspended = 0; 217 + 218 + /* Stop the ring if it's running. */ 219 + I915_WRITE(PRB0_CTL, 0); 220 + I915_WRITE(PRB0_TAIL, 0); 221 + I915_WRITE(PRB0_HEAD, 0); 222 + 223 + /* Initialize the ring. */ 224 + I915_WRITE(PRB0_START, obj_priv->gtt_offset); 225 + I915_WRITE(PRB0_CTL, 226 + ((obj->size - 4096) & RING_NR_PAGES) | 227 + RING_NO_REPORT | 228 + RING_VALID); 229 + if (!drm_core_check_feature(dev, DRIVER_MODESET)) 230 + i915_kernel_lost_context(dev); 231 + else { 232 + ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR; 233 + ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR; 234 + ring->space = ring->head - (ring->tail + 8); 235 + if (ring->space < 0) 236 + ring->space += ring->Size; 237 + } 238 + 239 + mutex_unlock(&dev->struct_mutex); 240 + drm_irq_uninstall(dev); 241 + drm_irq_install(dev); 242 + mutex_lock(&dev->struct_mutex); 243 + } 244 + 245 + /* 246 + * Display needs restore too... 247 + */ 248 + if (need_display) 249 + i915_restore_display(dev); 250 + 251 + mutex_unlock(&dev->struct_mutex); 252 + return 0; 253 + } 254 + 129 255 130 256 static int __devinit 131 257 i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ··· 360 234 { 361 235 driver.num_ioctls = i915_max_ioctl; 362 236 237 + i915_gem_shrinker_init(); 238 + 363 239 /* 364 240 * If CONFIG_DRM_I915_KMS is set, default to KMS unless 365 241 * explicitly disabled with the module pararmeter. ··· 388 260 389 261 static void __exit i915_exit(void) 390 262 { 263 + i915_gem_shrinker_exit(); 391 264 drm_exit(&driver); 392 265 } 393 266
+71 -6
drivers/gpu/drm/i915/i915_drv.h
··· 48 48 PIPE_B, 49 49 }; 50 50 51 + enum plane { 52 + PLANE_A = 0, 53 + PLANE_B, 54 + }; 55 + 51 56 #define I915_NUM_PIPE 2 52 57 53 58 /* Interface history: ··· 153 148 struct timeval time; 154 149 }; 155 150 151 + struct drm_i915_display_funcs { 152 + void (*dpms)(struct drm_crtc *crtc, int mode); 153 + bool (*fbc_enabled)(struct drm_crtc *crtc); 154 + void (*enable_fbc)(struct drm_crtc *crtc, unsigned long interval); 155 + void (*disable_fbc)(struct drm_device *dev); 156 + int (*get_display_clock_speed)(struct drm_device *dev); 157 + int (*get_fifo_size)(struct drm_device *dev, int plane); 158 + void (*update_wm)(struct drm_device *dev, int planea_clock, 159 + int planeb_clock, int sr_hdisplay, int pixel_size); 160 + /* clock updates for mode set */ 161 + /* cursor updates */ 162 + /* render clock increase/decrease */ 163 + /* display clock increase/decrease */ 164 + /* pll clock increase/decrease */ 165 + /* clock gating init */ 166 + }; 167 + 156 168 typedef struct drm_i915_private { 157 169 struct drm_device *dev; 158 170 ··· 220 198 unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds; 221 199 int vblank_pipe; 222 200 201 + /* For hangcheck timer */ 202 + #define DRM_I915_HANGCHECK_PERIOD 75 /* in jiffies */ 203 + struct timer_list hangcheck_timer; 204 + int hangcheck_count; 205 + uint32_t last_acthd; 206 + 223 207 bool cursor_needs_physical; 224 208 225 209 struct drm_mm vram; 210 + 211 + unsigned long cfb_size; 212 + unsigned long cfb_pitch; 213 + int cfb_fence; 214 + int cfb_plane; 226 215 227 216 int irq_enabled; 228 217 ··· 255 222 unsigned int edp_support:1; 256 223 int lvds_ssc_freq; 257 224 225 + struct notifier_block lid_notifier; 226 + 258 227 int crt_ddc_bus; /* -1 = unknown, else GPIO to use for CRT DDC */ 259 228 struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */ 260 229 int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */ ··· 269 234 struct work_struct error_work; 270 235 struct workqueue_struct *wq; 271 236 237 + /* Display functions */ 238 + struct drm_i915_display_funcs display; 239 + 272 240 /* Register state */ 241 + bool suspended; 273 242 u8 saveLBB; 274 243 u32 saveDSPACNTR; 275 244 u32 saveDSPBCNTR; ··· 389 350 int gtt_mtrr; 390 351 391 352 /** 353 + * Membership on list of all loaded devices, used to evict 354 + * inactive buffers under memory pressure. 355 + * 356 + * Modifications should only be done whilst holding the 357 + * shrink_list_lock spinlock. 358 + */ 359 + struct list_head shrink_list; 360 + 361 + /** 392 362 * List of objects currently involved in rendering from the 393 363 * ringbuffer. 394 364 * ··· 480 432 * It prevents command submission from occuring and makes 481 433 * every pending request fail 482 434 */ 483 - int wedged; 435 + atomic_t wedged; 484 436 485 437 /** Bit 6 swizzling required for X tiling */ 486 438 uint32_t bit_6_swizzle_x; ··· 539 491 * This is the same as gtt_space->start 540 492 */ 541 493 uint32_t gtt_offset; 542 - /** 543 - * Required alignment for the object 544 - */ 545 - uint32_t gtt_alignment; 494 + 546 495 /** 547 496 * Fake offset for use by mmap(2) 548 497 */ ··· 586 541 * in an execbuffer object list. 587 542 */ 588 543 int in_execbuffer; 544 + 545 + /** 546 + * Advice: are the backing pages purgeable? 547 + */ 548 + int madv; 589 549 }; 590 550 591 551 /** ··· 635 585 extern unsigned int i915_fbpercrtc; 636 586 extern unsigned int i915_powersave; 637 587 588 + extern void i915_save_display(struct drm_device *dev); 589 + extern void i915_restore_display(struct drm_device *dev); 638 590 extern int i915_master_create(struct drm_device *dev, struct drm_master *master); 639 591 extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master); 640 592 ··· 656 604 extern int i915_emit_box(struct drm_device *dev, 657 605 struct drm_clip_rect *boxes, 658 606 int i, int DR1, int DR4); 607 + extern int i965_reset(struct drm_device *dev, u8 flags); 659 608 660 609 /* i915_irq.c */ 610 + void i915_hangcheck_elapsed(unsigned long data); 661 611 extern int i915_irq_emit(struct drm_device *dev, void *data, 662 612 struct drm_file *file_priv); 663 613 extern int i915_irq_wait(struct drm_device *dev, void *data, ··· 730 676 struct drm_file *file_priv); 731 677 int i915_gem_throttle_ioctl(struct drm_device *dev, void *data, 732 678 struct drm_file *file_priv); 679 + int i915_gem_madvise_ioctl(struct drm_device *dev, void *data, 680 + struct drm_file *file_priv); 733 681 int i915_gem_entervt_ioctl(struct drm_device *dev, void *data, 734 682 struct drm_file *file_priv); 735 683 int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, ··· 751 695 void i915_gem_release_mmap(struct drm_gem_object *obj); 752 696 void i915_gem_lastclose(struct drm_device *dev); 753 697 uint32_t i915_get_gem_seqno(struct drm_device *dev); 698 + bool i915_seqno_passed(uint32_t seq1, uint32_t seq2); 754 699 int i915_gem_object_get_fence_reg(struct drm_gem_object *obj); 755 700 int i915_gem_object_put_fence_reg(struct drm_gem_object *obj); 756 701 void i915_gem_retire_requests(struct drm_device *dev); ··· 776 719 int i915_gem_object_get_pages(struct drm_gem_object *obj); 777 720 void i915_gem_object_put_pages(struct drm_gem_object *obj); 778 721 void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv); 722 + 723 + void i915_gem_shrinker_init(void); 724 + void i915_gem_shrinker_exit(void); 779 725 780 726 /* i915_gem_tiling.c */ 781 727 void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); ··· 827 767 extern void intel_modeset_init(struct drm_device *dev); 828 768 extern void intel_modeset_cleanup(struct drm_device *dev); 829 769 extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); 770 + extern void i8xx_disable_fbc(struct drm_device *dev); 771 + extern void g4x_disable_fbc(struct drm_device *dev); 830 772 831 773 /** 832 774 * Lock test for when it's just for synchronization of ring access. ··· 926 864 (dev)->pci_device == 0x2E12 || \ 927 865 (dev)->pci_device == 0x2E22 || \ 928 866 (dev)->pci_device == 0x2E32 || \ 867 + (dev)->pci_device == 0x2E42 || \ 929 868 (dev)->pci_device == 0x0042 || \ 930 869 (dev)->pci_device == 0x0046) 931 870 ··· 939 876 (dev)->pci_device == 0x2E12 || \ 940 877 (dev)->pci_device == 0x2E22 || \ 941 878 (dev)->pci_device == 0x2E32 || \ 879 + (dev)->pci_device == 0x2E42 || \ 942 880 IS_GM45(dev)) 943 881 944 882 #define IS_IGDG(dev) ((dev)->pci_device == 0xa001) ··· 973 909 #define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_IGDNG(dev)) 974 910 #define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_IGDNG(dev)) 975 911 #define SUPPORTS_EDP(dev) (IS_IGDNG_M(dev)) 976 - #define I915_HAS_HOTPLUG(dev) (IS_I945G(dev) || IS_I945GM(dev) || IS_I965G(dev)) 912 + #define I915_HAS_HOTPLUG(dev) (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev) || IS_I965G(dev)) 977 913 /* dsparb controlled by hw only */ 978 914 #define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IGDNG(dev)) 979 915 980 916 #define HAS_FW_BLC(dev) (IS_I9XX(dev) || IS_G4X(dev) || IS_IGDNG(dev)) 981 917 #define HAS_PIPE_CXSR(dev) (IS_G4X(dev) || IS_IGDNG(dev)) 918 + #define I915_HAS_FBC(dev) (IS_MOBILE(dev) && (IS_I9XX(dev) || IS_I965G(dev))) 982 919 983 920 #define PRIMARY_RINGBUFFER_SIZE (128*1024) 984 921
+665 -237
drivers/gpu/drm/i915/i915_gem.c
··· 29 29 #include "drm.h" 30 30 #include "i915_drm.h" 31 31 #include "i915_drv.h" 32 + #include "i915_trace.h" 32 33 #include "intel_drv.h" 33 34 #include <linux/swap.h> 34 35 #include <linux/pci.h> ··· 49 48 static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, 50 49 unsigned alignment); 51 50 static void i915_gem_clear_fence_reg(struct drm_gem_object *obj); 52 - static int i915_gem_evict_something(struct drm_device *dev); 51 + static int i915_gem_evict_something(struct drm_device *dev, int min_size); 52 + static int i915_gem_evict_from_inactive_list(struct drm_device *dev); 53 53 static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, 54 54 struct drm_i915_gem_pwrite *args, 55 55 struct drm_file *file_priv); 56 + 57 + static LIST_HEAD(shrink_list); 58 + static DEFINE_SPINLOCK(shrink_list_lock); 56 59 57 60 int i915_gem_do_init(struct drm_device *dev, unsigned long start, 58 61 unsigned long end) ··· 321 316 return ret; 322 317 } 323 318 319 + static inline gfp_t 320 + i915_gem_object_get_page_gfp_mask (struct drm_gem_object *obj) 321 + { 322 + return mapping_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping); 323 + } 324 + 325 + static inline void 326 + i915_gem_object_set_page_gfp_mask (struct drm_gem_object *obj, gfp_t gfp) 327 + { 328 + mapping_set_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping, gfp); 329 + } 330 + 331 + static int 332 + i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj) 333 + { 334 + int ret; 335 + 336 + ret = i915_gem_object_get_pages(obj); 337 + 338 + /* If we've insufficient memory to map in the pages, attempt 339 + * to make some space by throwing out some old buffers. 340 + */ 341 + if (ret == -ENOMEM) { 342 + struct drm_device *dev = obj->dev; 343 + gfp_t gfp; 344 + 345 + ret = i915_gem_evict_something(dev, obj->size); 346 + if (ret) 347 + return ret; 348 + 349 + gfp = i915_gem_object_get_page_gfp_mask(obj); 350 + i915_gem_object_set_page_gfp_mask(obj, gfp & ~__GFP_NORETRY); 351 + ret = i915_gem_object_get_pages(obj); 352 + i915_gem_object_set_page_gfp_mask (obj, gfp); 353 + } 354 + 355 + return ret; 356 + } 357 + 324 358 /** 325 359 * This is the fallback shmem pread path, which allocates temporary storage 326 360 * in kernel space to copy_to_user into outside of the struct_mutex, so we ··· 411 367 412 368 mutex_lock(&dev->struct_mutex); 413 369 414 - ret = i915_gem_object_get_pages(obj); 415 - if (ret != 0) 370 + ret = i915_gem_object_get_pages_or_evict(obj); 371 + if (ret) 416 372 goto fail_unlock; 417 373 418 374 ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset, ··· 886 842 887 843 mutex_lock(&dev->struct_mutex); 888 844 889 - ret = i915_gem_object_get_pages(obj); 890 - if (ret != 0) 845 + ret = i915_gem_object_get_pages_or_evict(obj); 846 + if (ret) 891 847 goto fail_unlock; 892 848 893 849 ret = i915_gem_object_set_to_cpu_domain(obj, 1); ··· 1199 1155 /* Now bind it into the GTT if needed */ 1200 1156 mutex_lock(&dev->struct_mutex); 1201 1157 if (!obj_priv->gtt_space) { 1202 - ret = i915_gem_object_bind_to_gtt(obj, obj_priv->gtt_alignment); 1203 - if (ret) { 1204 - mutex_unlock(&dev->struct_mutex); 1205 - return VM_FAULT_SIGBUS; 1206 - } 1207 - 1208 - ret = i915_gem_object_set_to_gtt_domain(obj, write); 1209 - if (ret) { 1210 - mutex_unlock(&dev->struct_mutex); 1211 - return VM_FAULT_SIGBUS; 1212 - } 1158 + ret = i915_gem_object_bind_to_gtt(obj, 0); 1159 + if (ret) 1160 + goto unlock; 1213 1161 1214 1162 list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list); 1163 + 1164 + ret = i915_gem_object_set_to_gtt_domain(obj, write); 1165 + if (ret) 1166 + goto unlock; 1215 1167 } 1216 1168 1217 1169 /* Need a new fence register? */ 1218 1170 if (obj_priv->tiling_mode != I915_TILING_NONE) { 1219 1171 ret = i915_gem_object_get_fence_reg(obj); 1220 - if (ret) { 1221 - mutex_unlock(&dev->struct_mutex); 1222 - return VM_FAULT_SIGBUS; 1223 - } 1172 + if (ret) 1173 + goto unlock; 1224 1174 } 1225 1175 1226 1176 pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) + ··· 1222 1184 1223 1185 /* Finally, remap it using the new GTT offset */ 1224 1186 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn); 1225 - 1187 + unlock: 1226 1188 mutex_unlock(&dev->struct_mutex); 1227 1189 1228 1190 switch (ret) { 1191 + case 0: 1192 + case -ERESTARTSYS: 1193 + return VM_FAULT_NOPAGE; 1229 1194 case -ENOMEM: 1230 1195 case -EAGAIN: 1231 1196 return VM_FAULT_OOM; 1232 - case -EFAULT: 1233 - case -EINVAL: 1234 - return VM_FAULT_SIGBUS; 1235 1197 default: 1236 - return VM_FAULT_NOPAGE; 1198 + return VM_FAULT_SIGBUS; 1237 1199 } 1238 1200 } 1239 1201 ··· 1426 1388 1427 1389 obj_priv = obj->driver_private; 1428 1390 1391 + if (obj_priv->madv != I915_MADV_WILLNEED) { 1392 + DRM_ERROR("Attempting to mmap a purgeable buffer\n"); 1393 + drm_gem_object_unreference(obj); 1394 + mutex_unlock(&dev->struct_mutex); 1395 + return -EINVAL; 1396 + } 1397 + 1398 + 1429 1399 if (!obj_priv->mmap_offset) { 1430 1400 ret = i915_gem_create_mmap_offset(obj); 1431 1401 if (ret) { ··· 1445 1399 1446 1400 args->offset = obj_priv->mmap_offset; 1447 1401 1448 - obj_priv->gtt_alignment = i915_gem_get_gtt_alignment(obj); 1449 - 1450 - /* Make sure the alignment is correct for fence regs etc */ 1451 - if (obj_priv->agp_mem && 1452 - (obj_priv->gtt_offset & (obj_priv->gtt_alignment - 1))) { 1453 - drm_gem_object_unreference(obj); 1454 - mutex_unlock(&dev->struct_mutex); 1455 - return -EINVAL; 1456 - } 1457 - 1458 1402 /* 1459 1403 * Pull it into the GTT so that we have a page list (makes the 1460 1404 * initial fault faster and any subsequent flushing possible). 1461 1405 */ 1462 1406 if (!obj_priv->agp_mem) { 1463 - ret = i915_gem_object_bind_to_gtt(obj, obj_priv->gtt_alignment); 1407 + ret = i915_gem_object_bind_to_gtt(obj, 0); 1464 1408 if (ret) { 1465 1409 drm_gem_object_unreference(obj); 1466 1410 mutex_unlock(&dev->struct_mutex); ··· 1473 1437 int i; 1474 1438 1475 1439 BUG_ON(obj_priv->pages_refcount == 0); 1440 + BUG_ON(obj_priv->madv == __I915_MADV_PURGED); 1476 1441 1477 1442 if (--obj_priv->pages_refcount != 0) 1478 1443 return; ··· 1481 1444 if (obj_priv->tiling_mode != I915_TILING_NONE) 1482 1445 i915_gem_object_save_bit_17_swizzle(obj); 1483 1446 1484 - for (i = 0; i < page_count; i++) 1485 - if (obj_priv->pages[i] != NULL) { 1486 - if (obj_priv->dirty) 1487 - set_page_dirty(obj_priv->pages[i]); 1447 + if (obj_priv->madv == I915_MADV_DONTNEED) 1448 + obj_priv->dirty = 0; 1449 + 1450 + for (i = 0; i < page_count; i++) { 1451 + if (obj_priv->pages[i] == NULL) 1452 + break; 1453 + 1454 + if (obj_priv->dirty) 1455 + set_page_dirty(obj_priv->pages[i]); 1456 + 1457 + if (obj_priv->madv == I915_MADV_WILLNEED) 1488 1458 mark_page_accessed(obj_priv->pages[i]); 1489 - page_cache_release(obj_priv->pages[i]); 1490 - } 1459 + 1460 + page_cache_release(obj_priv->pages[i]); 1461 + } 1491 1462 obj_priv->dirty = 0; 1492 1463 1493 1464 drm_free_large(obj_priv->pages); ··· 1532 1487 BUG_ON(!obj_priv->active); 1533 1488 list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list); 1534 1489 obj_priv->last_rendering_seqno = 0; 1490 + } 1491 + 1492 + /* Immediately discard the backing storage */ 1493 + static void 1494 + i915_gem_object_truncate(struct drm_gem_object *obj) 1495 + { 1496 + struct drm_i915_gem_object *obj_priv = obj->driver_private; 1497 + struct inode *inode; 1498 + 1499 + inode = obj->filp->f_path.dentry->d_inode; 1500 + if (inode->i_op->truncate) 1501 + inode->i_op->truncate (inode); 1502 + 1503 + obj_priv->madv = __I915_MADV_PURGED; 1504 + } 1505 + 1506 + static inline int 1507 + i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj_priv) 1508 + { 1509 + return obj_priv->madv == I915_MADV_DONTNEED; 1535 1510 } 1536 1511 1537 1512 static void ··· 1642 1577 1643 1578 if ((obj->write_domain & flush_domains) == 1644 1579 obj->write_domain) { 1580 + uint32_t old_write_domain = obj->write_domain; 1581 + 1645 1582 obj->write_domain = 0; 1646 1583 i915_gem_object_move_to_active(obj, seqno); 1584 + 1585 + trace_i915_gem_object_change_domain(obj, 1586 + obj->read_domains, 1587 + old_write_domain); 1647 1588 } 1648 1589 } 1649 1590 1650 1591 } 1651 1592 1652 - if (was_empty && !dev_priv->mm.suspended) 1653 - queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); 1593 + if (!dev_priv->mm.suspended) { 1594 + mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); 1595 + if (was_empty) 1596 + queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); 1597 + } 1654 1598 return seqno; 1655 1599 } 1656 1600 ··· 1696 1622 struct drm_i915_gem_request *request) 1697 1623 { 1698 1624 drm_i915_private_t *dev_priv = dev->dev_private; 1625 + 1626 + trace_i915_gem_request_retire(dev, request->seqno); 1699 1627 1700 1628 /* Move any buffers on the active list that are no longer referenced 1701 1629 * by the ringbuffer to the flushing/inactive lists as appropriate. ··· 1747 1671 /** 1748 1672 * Returns true if seq1 is later than seq2. 1749 1673 */ 1750 - static int 1674 + bool 1751 1675 i915_seqno_passed(uint32_t seq1, uint32_t seq2) 1752 1676 { 1753 1677 return (int32_t)(seq1 - seq2) >= 0; ··· 1785 1709 retiring_seqno = request->seqno; 1786 1710 1787 1711 if (i915_seqno_passed(seqno, retiring_seqno) || 1788 - dev_priv->mm.wedged) { 1712 + atomic_read(&dev_priv->mm.wedged)) { 1789 1713 i915_gem_retire_request(dev, request); 1790 1714 1791 1715 list_del(&request->list); ··· 1827 1751 1828 1752 BUG_ON(seqno == 0); 1829 1753 1754 + if (atomic_read(&dev_priv->mm.wedged)) 1755 + return -EIO; 1756 + 1830 1757 if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) { 1831 1758 if (IS_IGDNG(dev)) 1832 1759 ier = I915_READ(DEIER) | I915_READ(GTIER); ··· 1842 1763 i915_driver_irq_postinstall(dev); 1843 1764 } 1844 1765 1766 + trace_i915_gem_request_wait_begin(dev, seqno); 1767 + 1845 1768 dev_priv->mm.waiting_gem_seqno = seqno; 1846 1769 i915_user_irq_get(dev); 1847 1770 ret = wait_event_interruptible(dev_priv->irq_queue, 1848 1771 i915_seqno_passed(i915_get_gem_seqno(dev), 1849 1772 seqno) || 1850 - dev_priv->mm.wedged); 1773 + atomic_read(&dev_priv->mm.wedged)); 1851 1774 i915_user_irq_put(dev); 1852 1775 dev_priv->mm.waiting_gem_seqno = 0; 1776 + 1777 + trace_i915_gem_request_wait_end(dev, seqno); 1853 1778 } 1854 - if (dev_priv->mm.wedged) 1779 + if (atomic_read(&dev_priv->mm.wedged)) 1855 1780 ret = -EIO; 1856 1781 1857 1782 if (ret && ret != -ERESTARTSYS) ··· 1886 1803 DRM_INFO("%s: invalidate %08x flush %08x\n", __func__, 1887 1804 invalidate_domains, flush_domains); 1888 1805 #endif 1806 + trace_i915_gem_request_flush(dev, dev_priv->mm.next_gem_seqno, 1807 + invalidate_domains, flush_domains); 1889 1808 1890 1809 if (flush_domains & I915_GEM_DOMAIN_CPU) 1891 1810 drm_agp_chipset_flush(dev); ··· 2000 1915 return -EINVAL; 2001 1916 } 2002 1917 1918 + /* blow away mappings if mapped through GTT */ 1919 + i915_gem_release_mmap(obj); 1920 + 1921 + if (obj_priv->fence_reg != I915_FENCE_REG_NONE) 1922 + i915_gem_clear_fence_reg(obj); 1923 + 2003 1924 /* Move the object to the CPU domain to ensure that 2004 1925 * any possible CPU writes while it's not in the GTT 2005 1926 * are flushed when we go to remap it. This will ··· 2019 1928 return ret; 2020 1929 } 2021 1930 1931 + BUG_ON(obj_priv->active); 1932 + 2022 1933 if (obj_priv->agp_mem != NULL) { 2023 1934 drm_unbind_agp(obj_priv->agp_mem); 2024 1935 drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE); 2025 1936 obj_priv->agp_mem = NULL; 2026 1937 } 2027 1938 2028 - BUG_ON(obj_priv->active); 2029 - 2030 - /* blow away mappings if mapped through GTT */ 2031 - i915_gem_release_mmap(obj); 2032 - 2033 - if (obj_priv->fence_reg != I915_FENCE_REG_NONE) 2034 - i915_gem_clear_fence_reg(obj); 2035 - 2036 1939 i915_gem_object_put_pages(obj); 1940 + BUG_ON(obj_priv->pages_refcount); 2037 1941 2038 1942 if (obj_priv->gtt_space) { 2039 1943 atomic_dec(&dev->gtt_count); ··· 2042 1956 if (!list_empty(&obj_priv->list)) 2043 1957 list_del_init(&obj_priv->list); 2044 1958 1959 + if (i915_gem_object_is_purgeable(obj_priv)) 1960 + i915_gem_object_truncate(obj); 1961 + 1962 + trace_i915_gem_object_unbind(obj); 1963 + 1964 + return 0; 1965 + } 1966 + 1967 + static struct drm_gem_object * 1968 + i915_gem_find_inactive_object(struct drm_device *dev, int min_size) 1969 + { 1970 + drm_i915_private_t *dev_priv = dev->dev_private; 1971 + struct drm_i915_gem_object *obj_priv; 1972 + struct drm_gem_object *best = NULL; 1973 + struct drm_gem_object *first = NULL; 1974 + 1975 + /* Try to find the smallest clean object */ 1976 + list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) { 1977 + struct drm_gem_object *obj = obj_priv->obj; 1978 + if (obj->size >= min_size) { 1979 + if ((!obj_priv->dirty || 1980 + i915_gem_object_is_purgeable(obj_priv)) && 1981 + (!best || obj->size < best->size)) { 1982 + best = obj; 1983 + if (best->size == min_size) 1984 + return best; 1985 + } 1986 + if (!first) 1987 + first = obj; 1988 + } 1989 + } 1990 + 1991 + return best ? best : first; 1992 + } 1993 + 1994 + static int 1995 + i915_gem_evict_everything(struct drm_device *dev) 1996 + { 1997 + drm_i915_private_t *dev_priv = dev->dev_private; 1998 + uint32_t seqno; 1999 + int ret; 2000 + bool lists_empty; 2001 + 2002 + spin_lock(&dev_priv->mm.active_list_lock); 2003 + lists_empty = (list_empty(&dev_priv->mm.inactive_list) && 2004 + list_empty(&dev_priv->mm.flushing_list) && 2005 + list_empty(&dev_priv->mm.active_list)); 2006 + spin_unlock(&dev_priv->mm.active_list_lock); 2007 + 2008 + if (lists_empty) 2009 + return -ENOSPC; 2010 + 2011 + /* Flush everything (on to the inactive lists) and evict */ 2012 + i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); 2013 + seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS); 2014 + if (seqno == 0) 2015 + return -ENOMEM; 2016 + 2017 + ret = i915_wait_request(dev, seqno); 2018 + if (ret) 2019 + return ret; 2020 + 2021 + ret = i915_gem_evict_from_inactive_list(dev); 2022 + if (ret) 2023 + return ret; 2024 + 2025 + spin_lock(&dev_priv->mm.active_list_lock); 2026 + lists_empty = (list_empty(&dev_priv->mm.inactive_list) && 2027 + list_empty(&dev_priv->mm.flushing_list) && 2028 + list_empty(&dev_priv->mm.active_list)); 2029 + spin_unlock(&dev_priv->mm.active_list_lock); 2030 + BUG_ON(!lists_empty); 2031 + 2045 2032 return 0; 2046 2033 } 2047 2034 2048 2035 static int 2049 - i915_gem_evict_something(struct drm_device *dev) 2036 + i915_gem_evict_something(struct drm_device *dev, int min_size) 2050 2037 { 2051 2038 drm_i915_private_t *dev_priv = dev->dev_private; 2052 2039 struct drm_gem_object *obj; 2053 - struct drm_i915_gem_object *obj_priv; 2054 - int ret = 0; 2040 + int ret; 2055 2041 2056 2042 for (;;) { 2043 + i915_gem_retire_requests(dev); 2044 + 2057 2045 /* If there's an inactive buffer available now, grab it 2058 2046 * and be done. 2059 2047 */ 2060 - if (!list_empty(&dev_priv->mm.inactive_list)) { 2061 - obj_priv = list_first_entry(&dev_priv->mm.inactive_list, 2062 - struct drm_i915_gem_object, 2063 - list); 2064 - obj = obj_priv->obj; 2065 - BUG_ON(obj_priv->pin_count != 0); 2048 + obj = i915_gem_find_inactive_object(dev, min_size); 2049 + if (obj) { 2050 + struct drm_i915_gem_object *obj_priv; 2051 + 2066 2052 #if WATCH_LRU 2067 2053 DRM_INFO("%s: evicting %p\n", __func__, obj); 2068 2054 #endif 2055 + obj_priv = obj->driver_private; 2056 + BUG_ON(obj_priv->pin_count != 0); 2069 2057 BUG_ON(obj_priv->active); 2070 2058 2071 2059 /* Wait on the rendering and unbind the buffer. */ 2072 - ret = i915_gem_object_unbind(obj); 2073 - break; 2060 + return i915_gem_object_unbind(obj); 2074 2061 } 2075 2062 2076 2063 /* If we didn't get anything, but the ring is still processing 2077 - * things, wait for one of those things to finish and hopefully 2078 - * leave us a buffer to evict. 2064 + * things, wait for the next to finish and hopefully leave us 2065 + * a buffer to evict. 2079 2066 */ 2080 2067 if (!list_empty(&dev_priv->mm.request_list)) { 2081 2068 struct drm_i915_gem_request *request; ··· 2159 2000 2160 2001 ret = i915_wait_request(dev, request->seqno); 2161 2002 if (ret) 2162 - break; 2003 + return ret; 2163 2004 2164 - /* if waiting caused an object to become inactive, 2165 - * then loop around and wait for it. Otherwise, we 2166 - * assume that waiting freed and unbound something, 2167 - * so there should now be some space in the GTT 2168 - */ 2169 - if (!list_empty(&dev_priv->mm.inactive_list)) 2170 - continue; 2171 - break; 2005 + continue; 2172 2006 } 2173 2007 2174 2008 /* If we didn't have anything on the request list but there ··· 2170 2018 * will get moved to inactive. 2171 2019 */ 2172 2020 if (!list_empty(&dev_priv->mm.flushing_list)) { 2173 - obj_priv = list_first_entry(&dev_priv->mm.flushing_list, 2174 - struct drm_i915_gem_object, 2175 - list); 2176 - obj = obj_priv->obj; 2021 + struct drm_i915_gem_object *obj_priv; 2177 2022 2178 - i915_gem_flush(dev, 2179 - obj->write_domain, 2180 - obj->write_domain); 2181 - i915_add_request(dev, NULL, obj->write_domain); 2023 + /* Find an object that we can immediately reuse */ 2024 + list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) { 2025 + obj = obj_priv->obj; 2026 + if (obj->size >= min_size) 2027 + break; 2182 2028 2183 - obj = NULL; 2184 - continue; 2029 + obj = NULL; 2030 + } 2031 + 2032 + if (obj != NULL) { 2033 + uint32_t seqno; 2034 + 2035 + i915_gem_flush(dev, 2036 + obj->write_domain, 2037 + obj->write_domain); 2038 + seqno = i915_add_request(dev, NULL, obj->write_domain); 2039 + if (seqno == 0) 2040 + return -ENOMEM; 2041 + 2042 + ret = i915_wait_request(dev, seqno); 2043 + if (ret) 2044 + return ret; 2045 + 2046 + continue; 2047 + } 2185 2048 } 2186 2049 2187 - DRM_ERROR("inactive empty %d request empty %d " 2188 - "flushing empty %d\n", 2189 - list_empty(&dev_priv->mm.inactive_list), 2190 - list_empty(&dev_priv->mm.request_list), 2191 - list_empty(&dev_priv->mm.flushing_list)); 2192 - /* If we didn't do any of the above, there's nothing to be done 2193 - * and we just can't fit it in. 2050 + /* If we didn't do any of the above, there's no single buffer 2051 + * large enough to swap out for the new one, so just evict 2052 + * everything and start again. (This should be rare.) 2194 2053 */ 2195 - return -ENOSPC; 2054 + if (!list_empty (&dev_priv->mm.inactive_list)) 2055 + return i915_gem_evict_from_inactive_list(dev); 2056 + else 2057 + return i915_gem_evict_everything(dev); 2196 2058 } 2197 - return ret; 2198 - } 2199 - 2200 - static int 2201 - i915_gem_evict_everything(struct drm_device *dev) 2202 - { 2203 - int ret; 2204 - 2205 - for (;;) { 2206 - ret = i915_gem_evict_something(dev); 2207 - if (ret != 0) 2208 - break; 2209 - } 2210 - if (ret == -ENOSPC) 2211 - return 0; 2212 - return ret; 2213 2059 } 2214 2060 2215 2061 int ··· 2230 2080 BUG_ON(obj_priv->pages != NULL); 2231 2081 obj_priv->pages = drm_calloc_large(page_count, sizeof(struct page *)); 2232 2082 if (obj_priv->pages == NULL) { 2233 - DRM_ERROR("Faled to allocate page list\n"); 2234 2083 obj_priv->pages_refcount--; 2235 2084 return -ENOMEM; 2236 2085 } ··· 2240 2091 page = read_mapping_page(mapping, i, NULL); 2241 2092 if (IS_ERR(page)) { 2242 2093 ret = PTR_ERR(page); 2243 - DRM_ERROR("read_mapping_page failed: %d\n", ret); 2244 2094 i915_gem_object_put_pages(obj); 2245 2095 return ret; 2246 2096 } ··· 2476 2328 else 2477 2329 i830_write_fence_reg(reg); 2478 2330 2331 + trace_i915_gem_object_get_fence(obj, i, obj_priv->tiling_mode); 2332 + 2479 2333 return 0; 2480 2334 } 2481 2335 ··· 2560 2410 drm_i915_private_t *dev_priv = dev->dev_private; 2561 2411 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2562 2412 struct drm_mm_node *free_space; 2563 - int page_count, ret; 2413 + bool retry_alloc = false; 2414 + int ret; 2564 2415 2565 2416 if (dev_priv->mm.suspended) 2566 2417 return -EBUSY; 2418 + 2419 + if (obj_priv->madv != I915_MADV_WILLNEED) { 2420 + DRM_ERROR("Attempting to bind a purgeable object\n"); 2421 + return -EINVAL; 2422 + } 2423 + 2567 2424 if (alignment == 0) 2568 2425 alignment = i915_gem_get_gtt_alignment(obj); 2569 2426 if (alignment & (i915_gem_get_gtt_alignment(obj) - 1)) { ··· 2590 2433 } 2591 2434 } 2592 2435 if (obj_priv->gtt_space == NULL) { 2593 - bool lists_empty; 2594 - 2595 2436 /* If the gtt is empty and we're still having trouble 2596 2437 * fitting our object in, we're out of memory. 2597 2438 */ 2598 2439 #if WATCH_LRU 2599 2440 DRM_INFO("%s: GTT full, evicting something\n", __func__); 2600 2441 #endif 2601 - spin_lock(&dev_priv->mm.active_list_lock); 2602 - lists_empty = (list_empty(&dev_priv->mm.inactive_list) && 2603 - list_empty(&dev_priv->mm.flushing_list) && 2604 - list_empty(&dev_priv->mm.active_list)); 2605 - spin_unlock(&dev_priv->mm.active_list_lock); 2606 - if (lists_empty) { 2607 - DRM_ERROR("GTT full, but LRU list empty\n"); 2608 - return -ENOSPC; 2609 - } 2610 - 2611 - ret = i915_gem_evict_something(dev); 2612 - if (ret != 0) { 2613 - if (ret != -ERESTARTSYS) 2614 - DRM_ERROR("Failed to evict a buffer %d\n", ret); 2442 + ret = i915_gem_evict_something(dev, obj->size); 2443 + if (ret) 2615 2444 return ret; 2616 - } 2445 + 2617 2446 goto search_free; 2618 2447 } 2619 2448 ··· 2607 2464 DRM_INFO("Binding object of size %zd at 0x%08x\n", 2608 2465 obj->size, obj_priv->gtt_offset); 2609 2466 #endif 2467 + if (retry_alloc) { 2468 + i915_gem_object_set_page_gfp_mask (obj, 2469 + i915_gem_object_get_page_gfp_mask (obj) & ~__GFP_NORETRY); 2470 + } 2610 2471 ret = i915_gem_object_get_pages(obj); 2472 + if (retry_alloc) { 2473 + i915_gem_object_set_page_gfp_mask (obj, 2474 + i915_gem_object_get_page_gfp_mask (obj) | __GFP_NORETRY); 2475 + } 2611 2476 if (ret) { 2612 2477 drm_mm_put_block(obj_priv->gtt_space); 2613 2478 obj_priv->gtt_space = NULL; 2479 + 2480 + if (ret == -ENOMEM) { 2481 + /* first try to clear up some space from the GTT */ 2482 + ret = i915_gem_evict_something(dev, obj->size); 2483 + if (ret) { 2484 + /* now try to shrink everyone else */ 2485 + if (! retry_alloc) { 2486 + retry_alloc = true; 2487 + goto search_free; 2488 + } 2489 + 2490 + return ret; 2491 + } 2492 + 2493 + goto search_free; 2494 + } 2495 + 2614 2496 return ret; 2615 2497 } 2616 2498 2617 - page_count = obj->size / PAGE_SIZE; 2618 2499 /* Create an AGP memory structure pointing at our pages, and bind it 2619 2500 * into the GTT. 2620 2501 */ 2621 2502 obj_priv->agp_mem = drm_agp_bind_pages(dev, 2622 2503 obj_priv->pages, 2623 - page_count, 2504 + obj->size >> PAGE_SHIFT, 2624 2505 obj_priv->gtt_offset, 2625 2506 obj_priv->agp_type); 2626 2507 if (obj_priv->agp_mem == NULL) { 2627 2508 i915_gem_object_put_pages(obj); 2628 2509 drm_mm_put_block(obj_priv->gtt_space); 2629 2510 obj_priv->gtt_space = NULL; 2630 - return -ENOMEM; 2511 + 2512 + ret = i915_gem_evict_something(dev, obj->size); 2513 + if (ret) 2514 + return ret; 2515 + 2516 + goto search_free; 2631 2517 } 2632 2518 atomic_inc(&dev->gtt_count); 2633 2519 atomic_add(obj->size, &dev->gtt_memory); ··· 2667 2495 */ 2668 2496 BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS); 2669 2497 BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS); 2498 + 2499 + trace_i915_gem_object_bind(obj, obj_priv->gtt_offset); 2670 2500 2671 2501 return 0; 2672 2502 } ··· 2685 2511 if (obj_priv->pages == NULL) 2686 2512 return; 2687 2513 2688 - /* XXX: The 865 in particular appears to be weird in how it handles 2689 - * cache flushing. We haven't figured it out, but the 2690 - * clflush+agp_chipset_flush doesn't appear to successfully get the 2691 - * data visible to the PGU, while wbinvd + agp_chipset_flush does. 2692 - */ 2693 - if (IS_I865G(obj->dev)) { 2694 - wbinvd(); 2695 - return; 2696 - } 2514 + trace_i915_gem_object_clflush(obj); 2697 2515 2698 2516 drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE); 2699 2517 } ··· 2696 2530 { 2697 2531 struct drm_device *dev = obj->dev; 2698 2532 uint32_t seqno; 2533 + uint32_t old_write_domain; 2699 2534 2700 2535 if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0) 2701 2536 return; 2702 2537 2703 2538 /* Queue the GPU write cache flushing we need. */ 2539 + old_write_domain = obj->write_domain; 2704 2540 i915_gem_flush(dev, 0, obj->write_domain); 2705 2541 seqno = i915_add_request(dev, NULL, obj->write_domain); 2706 2542 obj->write_domain = 0; 2707 2543 i915_gem_object_move_to_active(obj, seqno); 2544 + 2545 + trace_i915_gem_object_change_domain(obj, 2546 + obj->read_domains, 2547 + old_write_domain); 2708 2548 } 2709 2549 2710 2550 /** Flushes the GTT write domain for the object if it's dirty. */ 2711 2551 static void 2712 2552 i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj) 2713 2553 { 2554 + uint32_t old_write_domain; 2555 + 2714 2556 if (obj->write_domain != I915_GEM_DOMAIN_GTT) 2715 2557 return; 2716 2558 ··· 2726 2552 * to it immediately go to main memory as far as we know, so there's 2727 2553 * no chipset flush. It also doesn't land in render cache. 2728 2554 */ 2555 + old_write_domain = obj->write_domain; 2729 2556 obj->write_domain = 0; 2557 + 2558 + trace_i915_gem_object_change_domain(obj, 2559 + obj->read_domains, 2560 + old_write_domain); 2730 2561 } 2731 2562 2732 2563 /** Flushes the CPU write domain for the object if it's dirty. */ ··· 2739 2560 i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj) 2740 2561 { 2741 2562 struct drm_device *dev = obj->dev; 2563 + uint32_t old_write_domain; 2742 2564 2743 2565 if (obj->write_domain != I915_GEM_DOMAIN_CPU) 2744 2566 return; 2745 2567 2746 2568 i915_gem_clflush_object(obj); 2747 2569 drm_agp_chipset_flush(dev); 2570 + old_write_domain = obj->write_domain; 2748 2571 obj->write_domain = 0; 2572 + 2573 + trace_i915_gem_object_change_domain(obj, 2574 + obj->read_domains, 2575 + old_write_domain); 2749 2576 } 2750 2577 2751 2578 /** ··· 2764 2579 i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) 2765 2580 { 2766 2581 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2582 + uint32_t old_write_domain, old_read_domains; 2767 2583 int ret; 2768 2584 2769 2585 /* Not valid to be called on unbound objects. */ ··· 2776 2590 ret = i915_gem_object_wait_rendering(obj); 2777 2591 if (ret != 0) 2778 2592 return ret; 2593 + 2594 + old_write_domain = obj->write_domain; 2595 + old_read_domains = obj->read_domains; 2779 2596 2780 2597 /* If we're writing through the GTT domain, then CPU and GPU caches 2781 2598 * will need to be invalidated at next use. ··· 2798 2609 obj_priv->dirty = 1; 2799 2610 } 2800 2611 2612 + trace_i915_gem_object_change_domain(obj, 2613 + old_read_domains, 2614 + old_write_domain); 2615 + 2801 2616 return 0; 2802 2617 } 2803 2618 ··· 2814 2621 static int 2815 2622 i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write) 2816 2623 { 2624 + uint32_t old_write_domain, old_read_domains; 2817 2625 int ret; 2818 2626 2819 2627 i915_gem_object_flush_gpu_write_domain(obj); ··· 2829 2635 * finish invalidating it and free the per-page flags. 2830 2636 */ 2831 2637 i915_gem_object_set_to_full_cpu_read_domain(obj); 2638 + 2639 + old_write_domain = obj->write_domain; 2640 + old_read_domains = obj->read_domains; 2832 2641 2833 2642 /* Flush the CPU cache if it's still invalid. */ 2834 2643 if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) { ··· 2852 2655 obj->read_domains &= I915_GEM_DOMAIN_CPU; 2853 2656 obj->write_domain = I915_GEM_DOMAIN_CPU; 2854 2657 } 2658 + 2659 + trace_i915_gem_object_change_domain(obj, 2660 + old_read_domains, 2661 + old_write_domain); 2855 2662 2856 2663 return 0; 2857 2664 } ··· 2978 2777 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2979 2778 uint32_t invalidate_domains = 0; 2980 2779 uint32_t flush_domains = 0; 2780 + uint32_t old_read_domains; 2981 2781 2982 2782 BUG_ON(obj->pending_read_domains & I915_GEM_DOMAIN_CPU); 2983 2783 BUG_ON(obj->pending_write_domain == I915_GEM_DOMAIN_CPU); ··· 3025 2823 i915_gem_clflush_object(obj); 3026 2824 } 3027 2825 2826 + old_read_domains = obj->read_domains; 2827 + 3028 2828 /* The actual obj->write_domain will be updated with 3029 2829 * pending_write_domain after we emit the accumulated flush for all 3030 2830 * of our domain changes in execbuffers (which clears objects' ··· 3045 2841 obj->read_domains, obj->write_domain, 3046 2842 dev->invalidate_domains, dev->flush_domains); 3047 2843 #endif 2844 + 2845 + trace_i915_gem_object_change_domain(obj, 2846 + old_read_domains, 2847 + obj->write_domain); 3048 2848 } 3049 2849 3050 2850 /** ··· 3101 2893 uint64_t offset, uint64_t size) 3102 2894 { 3103 2895 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2896 + uint32_t old_read_domains; 3104 2897 int i, ret; 3105 2898 3106 2899 if (offset == 0 && size == obj->size) ··· 3148 2939 */ 3149 2940 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0); 3150 2941 2942 + old_read_domains = obj->read_domains; 3151 2943 obj->read_domains |= I915_GEM_DOMAIN_CPU; 2944 + 2945 + trace_i915_gem_object_change_domain(obj, 2946 + old_read_domains, 2947 + obj->write_domain); 3152 2948 3153 2949 return 0; 3154 2950 } ··· 3198 2984 } 3199 2985 target_obj_priv = target_obj->driver_private; 3200 2986 2987 + #if WATCH_RELOC 2988 + DRM_INFO("%s: obj %p offset %08x target %d " 2989 + "read %08x write %08x gtt %08x " 2990 + "presumed %08x delta %08x\n", 2991 + __func__, 2992 + obj, 2993 + (int) reloc->offset, 2994 + (int) reloc->target_handle, 2995 + (int) reloc->read_domains, 2996 + (int) reloc->write_domain, 2997 + (int) target_obj_priv->gtt_offset, 2998 + (int) reloc->presumed_offset, 2999 + reloc->delta); 3000 + #endif 3001 + 3201 3002 /* The target buffer should have appeared before us in the 3202 3003 * exec_object list, so it should have a GTT space bound by now. 3203 3004 */ ··· 3224 2995 return -EINVAL; 3225 2996 } 3226 2997 2998 + /* Validate that the target is in a valid r/w GPU domain */ 2999 + if (reloc->write_domain & I915_GEM_DOMAIN_CPU || 3000 + reloc->read_domains & I915_GEM_DOMAIN_CPU) { 3001 + DRM_ERROR("reloc with read/write CPU domains: " 3002 + "obj %p target %d offset %d " 3003 + "read %08x write %08x", 3004 + obj, reloc->target_handle, 3005 + (int) reloc->offset, 3006 + reloc->read_domains, 3007 + reloc->write_domain); 3008 + drm_gem_object_unreference(target_obj); 3009 + i915_gem_object_unpin(obj); 3010 + return -EINVAL; 3011 + } 3012 + if (reloc->write_domain && target_obj->pending_write_domain && 3013 + reloc->write_domain != target_obj->pending_write_domain) { 3014 + DRM_ERROR("Write domain conflict: " 3015 + "obj %p target %d offset %d " 3016 + "new %08x old %08x\n", 3017 + obj, reloc->target_handle, 3018 + (int) reloc->offset, 3019 + reloc->write_domain, 3020 + target_obj->pending_write_domain); 3021 + drm_gem_object_unreference(target_obj); 3022 + i915_gem_object_unpin(obj); 3023 + return -EINVAL; 3024 + } 3025 + 3026 + target_obj->pending_read_domains |= reloc->read_domains; 3027 + target_obj->pending_write_domain |= reloc->write_domain; 3028 + 3029 + /* If the relocation already has the right value in it, no 3030 + * more work needs to be done. 3031 + */ 3032 + if (target_obj_priv->gtt_offset == reloc->presumed_offset) { 3033 + drm_gem_object_unreference(target_obj); 3034 + continue; 3035 + } 3036 + 3037 + /* Check that the relocation address is valid... */ 3227 3038 if (reloc->offset > obj->size - 4) { 3228 3039 DRM_ERROR("Relocation beyond object bounds: " 3229 3040 "obj %p target %d offset %d size %d.\n", ··· 3283 3014 return -EINVAL; 3284 3015 } 3285 3016 3286 - if (reloc->write_domain & I915_GEM_DOMAIN_CPU || 3287 - reloc->read_domains & I915_GEM_DOMAIN_CPU) { 3288 - DRM_ERROR("reloc with read/write CPU domains: " 3289 - "obj %p target %d offset %d " 3290 - "read %08x write %08x", 3017 + /* and points to somewhere within the target object. */ 3018 + if (reloc->delta >= target_obj->size) { 3019 + DRM_ERROR("Relocation beyond target object bounds: " 3020 + "obj %p target %d delta %d size %d.\n", 3291 3021 obj, reloc->target_handle, 3292 - (int) reloc->offset, 3293 - reloc->read_domains, 3294 - reloc->write_domain); 3022 + (int) reloc->delta, (int) target_obj->size); 3295 3023 drm_gem_object_unreference(target_obj); 3296 3024 i915_gem_object_unpin(obj); 3297 3025 return -EINVAL; 3298 - } 3299 - 3300 - if (reloc->write_domain && target_obj->pending_write_domain && 3301 - reloc->write_domain != target_obj->pending_write_domain) { 3302 - DRM_ERROR("Write domain conflict: " 3303 - "obj %p target %d offset %d " 3304 - "new %08x old %08x\n", 3305 - obj, reloc->target_handle, 3306 - (int) reloc->offset, 3307 - reloc->write_domain, 3308 - target_obj->pending_write_domain); 3309 - drm_gem_object_unreference(target_obj); 3310 - i915_gem_object_unpin(obj); 3311 - return -EINVAL; 3312 - } 3313 - 3314 - #if WATCH_RELOC 3315 - DRM_INFO("%s: obj %p offset %08x target %d " 3316 - "read %08x write %08x gtt %08x " 3317 - "presumed %08x delta %08x\n", 3318 - __func__, 3319 - obj, 3320 - (int) reloc->offset, 3321 - (int) reloc->target_handle, 3322 - (int) reloc->read_domains, 3323 - (int) reloc->write_domain, 3324 - (int) target_obj_priv->gtt_offset, 3325 - (int) reloc->presumed_offset, 3326 - reloc->delta); 3327 - #endif 3328 - 3329 - target_obj->pending_read_domains |= reloc->read_domains; 3330 - target_obj->pending_write_domain |= reloc->write_domain; 3331 - 3332 - /* If the relocation already has the right value in it, no 3333 - * more work needs to be done. 3334 - */ 3335 - if (target_obj_priv->gtt_offset == reloc->presumed_offset) { 3336 - drm_gem_object_unreference(target_obj); 3337 - continue; 3338 3026 } 3339 3027 3340 3028 ret = i915_gem_object_set_to_gtt_domain(obj, 1); ··· 3351 3125 3352 3126 exec_start = (uint32_t) exec_offset + exec->batch_start_offset; 3353 3127 exec_len = (uint32_t) exec->batch_len; 3128 + 3129 + trace_i915_gem_request_submit(dev, dev_priv->mm.next_gem_seqno); 3354 3130 3355 3131 count = nbox ? nbox : 1; 3356 3132 ··· 3591 3363 3592 3364 i915_verify_inactive(dev, __FILE__, __LINE__); 3593 3365 3594 - if (dev_priv->mm.wedged) { 3366 + if (atomic_read(&dev_priv->mm.wedged)) { 3595 3367 DRM_ERROR("Execbuf while wedged\n"); 3596 3368 mutex_unlock(&dev->struct_mutex); 3597 3369 ret = -EIO; ··· 3649 3421 3650 3422 /* error other than GTT full, or we've already tried again */ 3651 3423 if (ret != -ENOSPC || pin_tries >= 1) { 3652 - if (ret != -ERESTARTSYS) 3653 - DRM_ERROR("Failed to pin buffers %d\n", ret); 3424 + if (ret != -ERESTARTSYS) { 3425 + unsigned long long total_size = 0; 3426 + for (i = 0; i < args->buffer_count; i++) 3427 + total_size += object_list[i]->size; 3428 + DRM_ERROR("Failed to pin buffer %d of %d, total %llu bytes: %d\n", 3429 + pinned+1, args->buffer_count, 3430 + total_size, ret); 3431 + DRM_ERROR("%d objects [%d pinned], " 3432 + "%d object bytes [%d pinned], " 3433 + "%d/%d gtt bytes\n", 3434 + atomic_read(&dev->object_count), 3435 + atomic_read(&dev->pin_count), 3436 + atomic_read(&dev->object_memory), 3437 + atomic_read(&dev->pin_memory), 3438 + atomic_read(&dev->gtt_memory), 3439 + dev->gtt_total); 3440 + } 3654 3441 goto err; 3655 3442 } 3656 3443 ··· 3676 3433 3677 3434 /* evict everyone we can from the aperture */ 3678 3435 ret = i915_gem_evict_everything(dev); 3679 - if (ret) 3436 + if (ret && ret != -ENOSPC) 3680 3437 goto err; 3681 3438 } 3682 3439 ··· 3732 3489 3733 3490 for (i = 0; i < args->buffer_count; i++) { 3734 3491 struct drm_gem_object *obj = object_list[i]; 3492 + uint32_t old_write_domain = obj->write_domain; 3735 3493 3736 3494 obj->write_domain = obj->pending_write_domain; 3495 + trace_i915_gem_object_change_domain(obj, 3496 + obj->read_domains, 3497 + old_write_domain); 3737 3498 } 3738 3499 3739 3500 i915_verify_inactive(dev, __FILE__, __LINE__); ··· 3854 3607 i915_verify_inactive(dev, __FILE__, __LINE__); 3855 3608 if (obj_priv->gtt_space == NULL) { 3856 3609 ret = i915_gem_object_bind_to_gtt(obj, alignment); 3857 - if (ret != 0) { 3858 - if (ret != -EBUSY && ret != -ERESTARTSYS) 3859 - DRM_ERROR("Failure to bind: %d\n", ret); 3610 + if (ret) 3860 3611 return ret; 3861 - } 3862 3612 } 3863 3613 /* 3864 3614 * Pre-965 chips need a fence register set up in order to ··· 3934 3690 return -EBADF; 3935 3691 } 3936 3692 obj_priv = obj->driver_private; 3693 + 3694 + if (obj_priv->madv != I915_MADV_WILLNEED) { 3695 + DRM_ERROR("Attempting to pin a purgeable buffer\n"); 3696 + drm_gem_object_unreference(obj); 3697 + mutex_unlock(&dev->struct_mutex); 3698 + return -EINVAL; 3699 + } 3937 3700 3938 3701 if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) { 3939 3702 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n", ··· 4054 3803 return i915_gem_ring_throttle(dev, file_priv); 4055 3804 } 4056 3805 3806 + int 3807 + i915_gem_madvise_ioctl(struct drm_device *dev, void *data, 3808 + struct drm_file *file_priv) 3809 + { 3810 + struct drm_i915_gem_madvise *args = data; 3811 + struct drm_gem_object *obj; 3812 + struct drm_i915_gem_object *obj_priv; 3813 + 3814 + switch (args->madv) { 3815 + case I915_MADV_DONTNEED: 3816 + case I915_MADV_WILLNEED: 3817 + break; 3818 + default: 3819 + return -EINVAL; 3820 + } 3821 + 3822 + obj = drm_gem_object_lookup(dev, file_priv, args->handle); 3823 + if (obj == NULL) { 3824 + DRM_ERROR("Bad handle in i915_gem_madvise_ioctl(): %d\n", 3825 + args->handle); 3826 + return -EBADF; 3827 + } 3828 + 3829 + mutex_lock(&dev->struct_mutex); 3830 + obj_priv = obj->driver_private; 3831 + 3832 + if (obj_priv->pin_count) { 3833 + drm_gem_object_unreference(obj); 3834 + mutex_unlock(&dev->struct_mutex); 3835 + 3836 + DRM_ERROR("Attempted i915_gem_madvise_ioctl() on a pinned object\n"); 3837 + return -EINVAL; 3838 + } 3839 + 3840 + if (obj_priv->madv != __I915_MADV_PURGED) 3841 + obj_priv->madv = args->madv; 3842 + 3843 + /* if the object is no longer bound, discard its backing storage */ 3844 + if (i915_gem_object_is_purgeable(obj_priv) && 3845 + obj_priv->gtt_space == NULL) 3846 + i915_gem_object_truncate(obj); 3847 + 3848 + args->retained = obj_priv->madv != __I915_MADV_PURGED; 3849 + 3850 + drm_gem_object_unreference(obj); 3851 + mutex_unlock(&dev->struct_mutex); 3852 + 3853 + return 0; 3854 + } 3855 + 4057 3856 int i915_gem_init_object(struct drm_gem_object *obj) 4058 3857 { 4059 3858 struct drm_i915_gem_object *obj_priv; ··· 4128 3827 obj_priv->fence_reg = I915_FENCE_REG_NONE; 4129 3828 INIT_LIST_HEAD(&obj_priv->list); 4130 3829 INIT_LIST_HEAD(&obj_priv->fence_list); 3830 + obj_priv->madv = I915_MADV_WILLNEED; 3831 + 3832 + trace_i915_gem_object_create(obj); 4131 3833 4132 3834 return 0; 4133 3835 } ··· 4140 3836 struct drm_device *dev = obj->dev; 4141 3837 struct drm_i915_gem_object *obj_priv = obj->driver_private; 4142 3838 3839 + trace_i915_gem_object_destroy(obj); 3840 + 4143 3841 while (obj_priv->pin_count > 0) 4144 3842 i915_gem_object_unpin(obj); 4145 3843 ··· 4150 3844 4151 3845 i915_gem_object_unbind(obj); 4152 3846 4153 - i915_gem_free_mmap_offset(obj); 3847 + if (obj_priv->mmap_offset) 3848 + i915_gem_free_mmap_offset(obj); 4154 3849 4155 3850 kfree(obj_priv->page_cpu_valid); 4156 3851 kfree(obj_priv->bit_17); 4157 3852 kfree(obj->driver_private); 4158 3853 } 4159 3854 4160 - /** Unbinds all objects that are on the given buffer list. */ 3855 + /** Unbinds all inactive objects. */ 4161 3856 static int 4162 - i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head) 3857 + i915_gem_evict_from_inactive_list(struct drm_device *dev) 4163 3858 { 4164 - struct drm_gem_object *obj; 4165 - struct drm_i915_gem_object *obj_priv; 4166 - int ret; 3859 + drm_i915_private_t *dev_priv = dev->dev_private; 4167 3860 4168 - while (!list_empty(head)) { 4169 - obj_priv = list_first_entry(head, 4170 - struct drm_i915_gem_object, 4171 - list); 4172 - obj = obj_priv->obj; 3861 + while (!list_empty(&dev_priv->mm.inactive_list)) { 3862 + struct drm_gem_object *obj; 3863 + int ret; 4173 3864 4174 - if (obj_priv->pin_count != 0) { 4175 - DRM_ERROR("Pinned object in unbind list\n"); 4176 - mutex_unlock(&dev->struct_mutex); 4177 - return -EINVAL; 4178 - } 3865 + obj = list_first_entry(&dev_priv->mm.inactive_list, 3866 + struct drm_i915_gem_object, 3867 + list)->obj; 4179 3868 4180 3869 ret = i915_gem_object_unbind(obj); 4181 3870 if (ret != 0) { 4182 - DRM_ERROR("Error unbinding object in LeaveVT: %d\n", 4183 - ret); 4184 - mutex_unlock(&dev->struct_mutex); 3871 + DRM_ERROR("Error unbinding object: %d\n", ret); 4185 3872 return ret; 4186 3873 } 4187 3874 } 4188 - 4189 3875 4190 3876 return 0; 4191 3877 } ··· 4200 3902 * We need to replace this with a semaphore, or something. 4201 3903 */ 4202 3904 dev_priv->mm.suspended = 1; 3905 + del_timer(&dev_priv->hangcheck_timer); 4203 3906 4204 3907 /* Cancel the retire work handler, wait for it to finish if running 4205 3908 */ ··· 4230 3931 if (last_seqno == cur_seqno) { 4231 3932 if (stuck++ > 100) { 4232 3933 DRM_ERROR("hardware wedged\n"); 4233 - dev_priv->mm.wedged = 1; 3934 + atomic_set(&dev_priv->mm.wedged, 1); 4234 3935 DRM_WAKEUP(&dev_priv->irq_queue); 4235 3936 break; 4236 3937 } ··· 4243 3944 i915_gem_retire_requests(dev); 4244 3945 4245 3946 spin_lock(&dev_priv->mm.active_list_lock); 4246 - if (!dev_priv->mm.wedged) { 3947 + if (!atomic_read(&dev_priv->mm.wedged)) { 4247 3948 /* Active and flushing should now be empty as we've 4248 3949 * waited for a sequence higher than any pending execbuffer 4249 3950 */ ··· 4261 3962 * the GPU domains and just stuff them onto inactive. 4262 3963 */ 4263 3964 while (!list_empty(&dev_priv->mm.active_list)) { 4264 - struct drm_i915_gem_object *obj_priv; 3965 + struct drm_gem_object *obj; 3966 + uint32_t old_write_domain; 4265 3967 4266 - obj_priv = list_first_entry(&dev_priv->mm.active_list, 4267 - struct drm_i915_gem_object, 4268 - list); 4269 - obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS; 4270 - i915_gem_object_move_to_inactive(obj_priv->obj); 3968 + obj = list_first_entry(&dev_priv->mm.active_list, 3969 + struct drm_i915_gem_object, 3970 + list)->obj; 3971 + old_write_domain = obj->write_domain; 3972 + obj->write_domain &= ~I915_GEM_GPU_DOMAINS; 3973 + i915_gem_object_move_to_inactive(obj); 3974 + 3975 + trace_i915_gem_object_change_domain(obj, 3976 + obj->read_domains, 3977 + old_write_domain); 4271 3978 } 4272 3979 spin_unlock(&dev_priv->mm.active_list_lock); 4273 3980 4274 3981 while (!list_empty(&dev_priv->mm.flushing_list)) { 4275 - struct drm_i915_gem_object *obj_priv; 3982 + struct drm_gem_object *obj; 3983 + uint32_t old_write_domain; 4276 3984 4277 - obj_priv = list_first_entry(&dev_priv->mm.flushing_list, 4278 - struct drm_i915_gem_object, 4279 - list); 4280 - obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS; 4281 - i915_gem_object_move_to_inactive(obj_priv->obj); 3985 + obj = list_first_entry(&dev_priv->mm.flushing_list, 3986 + struct drm_i915_gem_object, 3987 + list)->obj; 3988 + old_write_domain = obj->write_domain; 3989 + obj->write_domain &= ~I915_GEM_GPU_DOMAINS; 3990 + i915_gem_object_move_to_inactive(obj); 3991 + 3992 + trace_i915_gem_object_change_domain(obj, 3993 + obj->read_domains, 3994 + old_write_domain); 4282 3995 } 4283 3996 4284 3997 4285 3998 /* Move all inactive buffers out of the GTT. */ 4286 - ret = i915_gem_evict_from_list(dev, &dev_priv->mm.inactive_list); 3999 + ret = i915_gem_evict_from_inactive_list(dev); 4287 4000 WARN_ON(!list_empty(&dev_priv->mm.inactive_list)); 4288 4001 if (ret) { 4289 4002 mutex_unlock(&dev->struct_mutex); ··· 4517 4206 if (drm_core_check_feature(dev, DRIVER_MODESET)) 4518 4207 return 0; 4519 4208 4520 - if (dev_priv->mm.wedged) { 4209 + if (atomic_read(&dev_priv->mm.wedged)) { 4521 4210 DRM_ERROR("Reenabling wedged hardware, good luck\n"); 4522 - dev_priv->mm.wedged = 0; 4211 + atomic_set(&dev_priv->mm.wedged, 0); 4523 4212 } 4524 4213 4525 4214 mutex_lock(&dev->struct_mutex); ··· 4584 4273 INIT_DELAYED_WORK(&dev_priv->mm.retire_work, 4585 4274 i915_gem_retire_work_handler); 4586 4275 dev_priv->mm.next_gem_seqno = 1; 4276 + 4277 + spin_lock(&shrink_list_lock); 4278 + list_add(&dev_priv->mm.shrink_list, &shrink_list); 4279 + spin_unlock(&shrink_list_lock); 4587 4280 4588 4281 /* Old X drivers will take 0-2 for front, back, depth buffers */ 4589 4282 dev_priv->fence_reg_start = 3; ··· 4805 4490 while (!list_empty(&i915_file_priv->mm.request_list)) 4806 4491 list_del_init(i915_file_priv->mm.request_list.next); 4807 4492 mutex_unlock(&dev->struct_mutex); 4493 + } 4494 + 4495 + static int 4496 + i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask) 4497 + { 4498 + drm_i915_private_t *dev_priv, *next_dev; 4499 + struct drm_i915_gem_object *obj_priv, *next_obj; 4500 + int cnt = 0; 4501 + int would_deadlock = 1; 4502 + 4503 + /* "fast-path" to count number of available objects */ 4504 + if (nr_to_scan == 0) { 4505 + spin_lock(&shrink_list_lock); 4506 + list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) { 4507 + struct drm_device *dev = dev_priv->dev; 4508 + 4509 + if (mutex_trylock(&dev->struct_mutex)) { 4510 + list_for_each_entry(obj_priv, 4511 + &dev_priv->mm.inactive_list, 4512 + list) 4513 + cnt++; 4514 + mutex_unlock(&dev->struct_mutex); 4515 + } 4516 + } 4517 + spin_unlock(&shrink_list_lock); 4518 + 4519 + return (cnt / 100) * sysctl_vfs_cache_pressure; 4520 + } 4521 + 4522 + spin_lock(&shrink_list_lock); 4523 + 4524 + /* first scan for clean buffers */ 4525 + list_for_each_entry_safe(dev_priv, next_dev, 4526 + &shrink_list, mm.shrink_list) { 4527 + struct drm_device *dev = dev_priv->dev; 4528 + 4529 + if (! mutex_trylock(&dev->struct_mutex)) 4530 + continue; 4531 + 4532 + spin_unlock(&shrink_list_lock); 4533 + 4534 + i915_gem_retire_requests(dev); 4535 + 4536 + list_for_each_entry_safe(obj_priv, next_obj, 4537 + &dev_priv->mm.inactive_list, 4538 + list) { 4539 + if (i915_gem_object_is_purgeable(obj_priv)) { 4540 + i915_gem_object_unbind(obj_priv->obj); 4541 + if (--nr_to_scan <= 0) 4542 + break; 4543 + } 4544 + } 4545 + 4546 + spin_lock(&shrink_list_lock); 4547 + mutex_unlock(&dev->struct_mutex); 4548 + 4549 + would_deadlock = 0; 4550 + 4551 + if (nr_to_scan <= 0) 4552 + break; 4553 + } 4554 + 4555 + /* second pass, evict/count anything still on the inactive list */ 4556 + list_for_each_entry_safe(dev_priv, next_dev, 4557 + &shrink_list, mm.shrink_list) { 4558 + struct drm_device *dev = dev_priv->dev; 4559 + 4560 + if (! mutex_trylock(&dev->struct_mutex)) 4561 + continue; 4562 + 4563 + spin_unlock(&shrink_list_lock); 4564 + 4565 + list_for_each_entry_safe(obj_priv, next_obj, 4566 + &dev_priv->mm.inactive_list, 4567 + list) { 4568 + if (nr_to_scan > 0) { 4569 + i915_gem_object_unbind(obj_priv->obj); 4570 + nr_to_scan--; 4571 + } else 4572 + cnt++; 4573 + } 4574 + 4575 + spin_lock(&shrink_list_lock); 4576 + mutex_unlock(&dev->struct_mutex); 4577 + 4578 + would_deadlock = 0; 4579 + } 4580 + 4581 + spin_unlock(&shrink_list_lock); 4582 + 4583 + if (would_deadlock) 4584 + return -1; 4585 + else if (cnt > 0) 4586 + return (cnt / 100) * sysctl_vfs_cache_pressure; 4587 + else 4588 + return 0; 4589 + } 4590 + 4591 + static struct shrinker shrinker = { 4592 + .shrink = i915_gem_shrink, 4593 + .seeks = DEFAULT_SEEKS, 4594 + }; 4595 + 4596 + __init void 4597 + i915_gem_shrinker_init(void) 4598 + { 4599 + register_shrinker(&shrinker); 4600 + } 4601 + 4602 + __exit void 4603 + i915_gem_shrinker_exit(void) 4604 + { 4605 + unregister_shrinker(&shrinker); 4808 4606 }
+83 -7
drivers/gpu/drm/i915/i915_irq.c
··· 31 31 #include "drm.h" 32 32 #include "i915_drm.h" 33 33 #include "i915_drv.h" 34 + #include "i915_trace.h" 34 35 #include "intel_drv.h" 35 36 36 37 #define MAX_NOPID ((u32)~0) ··· 280 279 } 281 280 282 281 if (gt_iir & GT_USER_INTERRUPT) { 283 - dev_priv->mm.irq_gem_seqno = i915_get_gem_seqno(dev); 282 + u32 seqno = i915_get_gem_seqno(dev); 283 + dev_priv->mm.irq_gem_seqno = seqno; 284 + trace_i915_gem_request_complete(dev, seqno); 284 285 DRM_WAKEUP(&dev_priv->irq_queue); 285 286 } 286 287 ··· 305 302 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 306 303 error_work); 307 304 struct drm_device *dev = dev_priv->dev; 308 - char *event_string = "ERROR=1"; 309 - char *envp[] = { event_string, NULL }; 305 + char *error_event[] = { "ERROR=1", NULL }; 306 + char *reset_event[] = { "RESET=1", NULL }; 307 + char *reset_done_event[] = { "ERROR=0", NULL }; 310 308 311 309 DRM_DEBUG("generating error event\n"); 310 + kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event); 312 311 313 - kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, envp); 312 + if (atomic_read(&dev_priv->mm.wedged)) { 313 + if (IS_I965G(dev)) { 314 + DRM_DEBUG("resetting chip\n"); 315 + kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event); 316 + if (!i965_reset(dev, GDRST_RENDER)) { 317 + atomic_set(&dev_priv->mm.wedged, 0); 318 + kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event); 319 + } 320 + } else { 321 + printk("reboot required\n"); 322 + } 323 + } 314 324 } 315 325 316 326 /** ··· 388 372 * so userspace knows something bad happened (should trigger collection 389 373 * of a ring dump etc.). 390 374 */ 391 - static void i915_handle_error(struct drm_device *dev) 375 + static void i915_handle_error(struct drm_device *dev, bool wedged) 392 376 { 393 377 struct drm_i915_private *dev_priv = dev->dev_private; 394 378 u32 eir = I915_READ(EIR); ··· 498 482 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 499 483 } 500 484 485 + if (wedged) { 486 + atomic_set(&dev_priv->mm.wedged, 1); 487 + 488 + /* 489 + * Wakeup waiting processes so they don't hang 490 + */ 491 + printk("i915: Waking up sleeping processes\n"); 492 + DRM_WAKEUP(&dev_priv->irq_queue); 493 + } 494 + 501 495 queue_work(dev_priv->wq, &dev_priv->error_work); 502 496 } 503 497 ··· 553 527 pipeb_stats = I915_READ(PIPEBSTAT); 554 528 555 529 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 556 - i915_handle_error(dev); 530 + i915_handle_error(dev, false); 557 531 558 532 /* 559 533 * Clear the PIPE(A|B)STAT regs before the IIR ··· 625 599 } 626 600 627 601 if (iir & I915_USER_INTERRUPT) { 628 - dev_priv->mm.irq_gem_seqno = i915_get_gem_seqno(dev); 602 + u32 seqno = i915_get_gem_seqno(dev); 603 + dev_priv->mm.irq_gem_seqno = seqno; 604 + trace_i915_gem_request_complete(dev, seqno); 629 605 DRM_WAKEUP(&dev_priv->irq_queue); 606 + dev_priv->hangcheck_count = 0; 607 + mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); 630 608 } 631 609 632 610 if (pipea_stats & vblank_status) { ··· 908 878 * meeting the requirements of vblank swapping. 909 879 */ 910 880 return -EINVAL; 881 + } 882 + 883 + struct drm_i915_gem_request *i915_get_tail_request(struct drm_device *dev) { 884 + drm_i915_private_t *dev_priv = dev->dev_private; 885 + return list_entry(dev_priv->mm.request_list.prev, struct drm_i915_gem_request, list); 886 + } 887 + 888 + /** 889 + * This is called when the chip hasn't reported back with completed 890 + * batchbuffers in a long time. The first time this is called we simply record 891 + * ACTHD. If ACTHD hasn't changed by the time the hangcheck timer elapses 892 + * again, we assume the chip is wedged and try to fix it. 893 + */ 894 + void i915_hangcheck_elapsed(unsigned long data) 895 + { 896 + struct drm_device *dev = (struct drm_device *)data; 897 + drm_i915_private_t *dev_priv = dev->dev_private; 898 + uint32_t acthd; 899 + 900 + if (!IS_I965G(dev)) 901 + acthd = I915_READ(ACTHD); 902 + else 903 + acthd = I915_READ(ACTHD_I965); 904 + 905 + /* If all work is done then ACTHD clearly hasn't advanced. */ 906 + if (list_empty(&dev_priv->mm.request_list) || 907 + i915_seqno_passed(i915_get_gem_seqno(dev), i915_get_tail_request(dev)->seqno)) { 908 + dev_priv->hangcheck_count = 0; 909 + return; 910 + } 911 + 912 + if (dev_priv->last_acthd == acthd && dev_priv->hangcheck_count > 0) { 913 + DRM_ERROR("Hangcheck timer elapsed... GPU hung\n"); 914 + i915_handle_error(dev, true); 915 + return; 916 + } 917 + 918 + /* Reset timer case chip hangs without another request being added */ 919 + mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); 920 + 921 + if (acthd != dev_priv->last_acthd) 922 + dev_priv->hangcheck_count = 0; 923 + else 924 + dev_priv->hangcheck_count++; 925 + 926 + dev_priv->last_acthd = acthd; 911 927 } 912 928 913 929 /* drm_dma.h hooks
+17 -5
drivers/gpu/drm/i915/i915_opregion.c
··· 148 148 struct drm_i915_private *dev_priv = dev->dev_private; 149 149 struct opregion_asle *asle = dev_priv->opregion.asle; 150 150 u32 blc_pwm_ctl, blc_pwm_ctl2; 151 + u32 max_backlight, level, shift; 151 152 152 153 if (!(bclp & ASLE_BCLP_VALID)) 153 154 return ASLE_BACKLIGHT_FAIL; ··· 158 157 return ASLE_BACKLIGHT_FAIL; 159 158 160 159 blc_pwm_ctl = I915_READ(BLC_PWM_CTL); 161 - blc_pwm_ctl &= ~BACKLIGHT_DUTY_CYCLE_MASK; 162 160 blc_pwm_ctl2 = I915_READ(BLC_PWM_CTL2); 163 161 164 - if (blc_pwm_ctl2 & BLM_COMBINATION_MODE) 162 + if (IS_I965G(dev) && (blc_pwm_ctl2 & BLM_COMBINATION_MODE)) 165 163 pci_write_config_dword(dev->pdev, PCI_LBPC, bclp); 166 - else 167 - I915_WRITE(BLC_PWM_CTL, blc_pwm_ctl | ((bclp * 0x101)-1)); 168 - 164 + else { 165 + if (IS_IGD(dev)) { 166 + blc_pwm_ctl &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1); 167 + max_backlight = (blc_pwm_ctl & BACKLIGHT_MODULATION_FREQ_MASK) >> 168 + BACKLIGHT_MODULATION_FREQ_SHIFT; 169 + shift = BACKLIGHT_DUTY_CYCLE_SHIFT + 1; 170 + } else { 171 + blc_pwm_ctl &= ~BACKLIGHT_DUTY_CYCLE_MASK; 172 + max_backlight = ((blc_pwm_ctl & BACKLIGHT_MODULATION_FREQ_MASK) >> 173 + BACKLIGHT_MODULATION_FREQ_SHIFT) * 2; 174 + shift = BACKLIGHT_DUTY_CYCLE_SHIFT; 175 + } 176 + level = (bclp * max_backlight) / 255; 177 + I915_WRITE(BLC_PWM_CTL, blc_pwm_ctl | (level << shift)); 178 + } 169 179 asle->cblv = (bclp*0x64)/0xff | ASLE_CBLV_VALID; 170 180 171 181 return 0;
+34
drivers/gpu/drm/i915/i915_reg.h
··· 86 86 #define I915_GC_RENDER_CLOCK_200_MHZ (1 << 0) 87 87 #define I915_GC_RENDER_CLOCK_333_MHZ (4 << 0) 88 88 #define LBB 0xf4 89 + #define GDRST 0xc0 90 + #define GDRST_FULL (0<<2) 91 + #define GDRST_RENDER (1<<2) 92 + #define GDRST_MEDIA (3<<2) 89 93 90 94 /* VGA stuff */ 91 95 ··· 348 344 #define FBC_CTL_PLANEA (0<<0) 349 345 #define FBC_CTL_PLANEB (1<<0) 350 346 #define FBC_FENCE_OFF 0x0321b 347 + #define FBC_TAG 0x03300 351 348 352 349 #define FBC_LL_SIZE (1536) 350 + 351 + /* Framebuffer compression for GM45+ */ 352 + #define DPFC_CB_BASE 0x3200 353 + #define DPFC_CONTROL 0x3208 354 + #define DPFC_CTL_EN (1<<31) 355 + #define DPFC_CTL_PLANEA (0<<30) 356 + #define DPFC_CTL_PLANEB (1<<30) 357 + #define DPFC_CTL_FENCE_EN (1<<29) 358 + #define DPFC_SR_EN (1<<10) 359 + #define DPFC_CTL_LIMIT_1X (0<<6) 360 + #define DPFC_CTL_LIMIT_2X (1<<6) 361 + #define DPFC_CTL_LIMIT_4X (2<<6) 362 + #define DPFC_RECOMP_CTL 0x320c 363 + #define DPFC_RECOMP_STALL_EN (1<<27) 364 + #define DPFC_RECOMP_STALL_WM_SHIFT (16) 365 + #define DPFC_RECOMP_STALL_WM_MASK (0x07ff0000) 366 + #define DPFC_RECOMP_TIMER_COUNT_SHIFT (0) 367 + #define DPFC_RECOMP_TIMER_COUNT_MASK (0x0000003f) 368 + #define DPFC_STATUS 0x3210 369 + #define DPFC_INVAL_SEG_SHIFT (16) 370 + #define DPFC_INVAL_SEG_MASK (0x07ff0000) 371 + #define DPFC_COMP_SEG_SHIFT (0) 372 + #define DPFC_COMP_SEG_MASK (0x000003ff) 373 + #define DPFC_STATUS2 0x3214 374 + #define DPFC_FENCE_YOFF 0x3218 375 + #define DPFC_CHICKEN 0x3224 376 + #define DPFC_HT_MODIFY (1<<31) 353 377 354 378 /* 355 379 * GPIO regs ··· 2032 2000 #define PF_ENABLE (1<<31) 2033 2001 #define PFA_WIN_SZ 0x68074 2034 2002 #define PFB_WIN_SZ 0x68874 2003 + #define PFA_WIN_POS 0x68070 2004 + #define PFB_WIN_POS 0x68870 2035 2005 2036 2006 /* legacy palette */ 2037 2007 #define LGC_PALETTE_A 0x4a000
+97 -73
drivers/gpu/drm/i915/i915_suspend.c
··· 228 228 229 229 if (drm_core_check_feature(dev, DRIVER_MODESET)) 230 230 return; 231 + 231 232 /* Pipe & plane A info */ 232 233 dev_priv->savePIPEACONF = I915_READ(PIPEACONF); 233 234 dev_priv->savePIPEASRC = I915_READ(PIPEASRC); ··· 286 285 dev_priv->savePIPEBSTAT = I915_READ(PIPEBSTAT); 287 286 return; 288 287 } 288 + 289 289 static void i915_restore_modeset_reg(struct drm_device *dev) 290 290 { 291 291 struct drm_i915_private *dev_priv = dev->dev_private; ··· 381 379 382 380 return; 383 381 } 384 - int i915_save_state(struct drm_device *dev) 382 + 383 + void i915_save_display(struct drm_device *dev) 385 384 { 386 385 struct drm_i915_private *dev_priv = dev->dev_private; 387 - int i; 388 - 389 - pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB); 390 - 391 - /* Render Standby */ 392 - if (IS_I965G(dev) && IS_MOBILE(dev)) 393 - dev_priv->saveRENDERSTANDBY = I915_READ(MCHBAR_RENDER_STANDBY); 394 - 395 - /* Hardware status page */ 396 - dev_priv->saveHWS = I915_READ(HWS_PGA); 397 386 398 387 /* Display arbitration control */ 399 388 dev_priv->saveDSPARB = I915_READ(DSPARB); ··· 392 399 /* This is only meaningful in non-KMS mode */ 393 400 /* Don't save them in KMS mode */ 394 401 i915_save_modeset_reg(dev); 402 + 395 403 /* Cursor state */ 396 404 dev_priv->saveCURACNTR = I915_READ(CURACNTR); 397 405 dev_priv->saveCURAPOS = I915_READ(CURAPOS); ··· 442 448 dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2); 443 449 dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL); 444 450 445 - /* Interrupt state */ 446 - dev_priv->saveIIR = I915_READ(IIR); 447 - dev_priv->saveIER = I915_READ(IER); 448 - dev_priv->saveIMR = I915_READ(IMR); 449 - 450 451 /* VGA state */ 451 452 dev_priv->saveVGA0 = I915_READ(VGA0); 452 453 dev_priv->saveVGA1 = I915_READ(VGA1); 453 454 dev_priv->saveVGA_PD = I915_READ(VGA_PD); 454 455 dev_priv->saveVGACNTRL = I915_READ(VGACNTRL); 455 456 456 - /* Clock gating state */ 457 - dev_priv->saveD_STATE = I915_READ(D_STATE); 458 - dev_priv->saveDSPCLK_GATE_D = I915_READ(DSPCLK_GATE_D); 459 - 460 - /* Cache mode state */ 461 - dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0); 462 - 463 - /* Memory Arbitration state */ 464 - dev_priv->saveMI_ARB_STATE = I915_READ(MI_ARB_STATE); 465 - 466 - /* Scratch space */ 467 - for (i = 0; i < 16; i++) { 468 - dev_priv->saveSWF0[i] = I915_READ(SWF00 + (i << 2)); 469 - dev_priv->saveSWF1[i] = I915_READ(SWF10 + (i << 2)); 470 - } 471 - for (i = 0; i < 3; i++) 472 - dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2)); 473 - 474 - /* Fences */ 475 - if (IS_I965G(dev)) { 476 - for (i = 0; i < 16; i++) 477 - dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8)); 478 - } else { 479 - for (i = 0; i < 8; i++) 480 - dev_priv->saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4)); 481 - 482 - if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) 483 - for (i = 0; i < 8; i++) 484 - dev_priv->saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4)); 485 - } 486 457 i915_save_vga(dev); 487 - 488 - return 0; 489 458 } 490 459 491 - int i915_restore_state(struct drm_device *dev) 460 + void i915_restore_display(struct drm_device *dev) 492 461 { 493 462 struct drm_i915_private *dev_priv = dev->dev_private; 494 - int i; 495 - 496 - pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB); 497 - 498 - /* Render Standby */ 499 - if (IS_I965G(dev) && IS_MOBILE(dev)) 500 - I915_WRITE(MCHBAR_RENDER_STANDBY, dev_priv->saveRENDERSTANDBY); 501 - 502 - /* Hardware status page */ 503 - I915_WRITE(HWS_PGA, dev_priv->saveHWS); 504 463 505 464 /* Display arbitration */ 506 465 I915_WRITE(DSPARB, dev_priv->saveDSPARB); 507 466 508 - /* Fences */ 509 - if (IS_I965G(dev)) { 510 - for (i = 0; i < 16; i++) 511 - I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->saveFENCE[i]); 512 - } else { 513 - for (i = 0; i < 8; i++) 514 - I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->saveFENCE[i]); 515 - if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) 516 - for (i = 0; i < 8; i++) 517 - I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]); 518 - } 519 - 520 467 /* Display port ratios (must be done before clock is set) */ 521 468 if (SUPPORTS_INTEGRATED_DP(dev)) { 522 469 I915_WRITE(PIPEA_GMCH_DATA_M, dev_priv->savePIPEA_GMCH_DATA_M); ··· 469 534 I915_WRITE(PIPEA_DP_LINK_N, dev_priv->savePIPEA_DP_LINK_N); 470 535 I915_WRITE(PIPEB_DP_LINK_N, dev_priv->savePIPEB_DP_LINK_N); 471 536 } 537 + 472 538 /* This is only meaningful in non-KMS mode */ 473 539 /* Don't restore them in KMS mode */ 474 540 i915_restore_modeset_reg(dev); 541 + 475 542 /* Cursor state */ 476 543 I915_WRITE(CURAPOS, dev_priv->saveCURAPOS); 477 544 I915_WRITE(CURACNTR, dev_priv->saveCURACNTR); ··· 523 586 I915_WRITE(VGA_PD, dev_priv->saveVGA_PD); 524 587 DRM_UDELAY(150); 525 588 589 + i915_restore_vga(dev); 590 + } 591 + 592 + int i915_save_state(struct drm_device *dev) 593 + { 594 + struct drm_i915_private *dev_priv = dev->dev_private; 595 + int i; 596 + 597 + pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB); 598 + 599 + /* Render Standby */ 600 + if (IS_I965G(dev) && IS_MOBILE(dev)) 601 + dev_priv->saveRENDERSTANDBY = I915_READ(MCHBAR_RENDER_STANDBY); 602 + 603 + /* Hardware status page */ 604 + dev_priv->saveHWS = I915_READ(HWS_PGA); 605 + 606 + i915_save_display(dev); 607 + 608 + /* Interrupt state */ 609 + dev_priv->saveIER = I915_READ(IER); 610 + dev_priv->saveIMR = I915_READ(IMR); 611 + 612 + /* Clock gating state */ 613 + dev_priv->saveD_STATE = I915_READ(D_STATE); 614 + dev_priv->saveDSPCLK_GATE_D = I915_READ(DSPCLK_GATE_D); /* Not sure about this */ 615 + 616 + /* Cache mode state */ 617 + dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0); 618 + 619 + /* Memory Arbitration state */ 620 + dev_priv->saveMI_ARB_STATE = I915_READ(MI_ARB_STATE); 621 + 622 + /* Scratch space */ 623 + for (i = 0; i < 16; i++) { 624 + dev_priv->saveSWF0[i] = I915_READ(SWF00 + (i << 2)); 625 + dev_priv->saveSWF1[i] = I915_READ(SWF10 + (i << 2)); 626 + } 627 + for (i = 0; i < 3; i++) 628 + dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2)); 629 + 630 + /* Fences */ 631 + if (IS_I965G(dev)) { 632 + for (i = 0; i < 16; i++) 633 + dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8)); 634 + } else { 635 + for (i = 0; i < 8; i++) 636 + dev_priv->saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4)); 637 + 638 + if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) 639 + for (i = 0; i < 8; i++) 640 + dev_priv->saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4)); 641 + } 642 + 643 + return 0; 644 + } 645 + 646 + int i915_restore_state(struct drm_device *dev) 647 + { 648 + struct drm_i915_private *dev_priv = dev->dev_private; 649 + int i; 650 + 651 + pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB); 652 + 653 + /* Render Standby */ 654 + if (IS_I965G(dev) && IS_MOBILE(dev)) 655 + I915_WRITE(MCHBAR_RENDER_STANDBY, dev_priv->saveRENDERSTANDBY); 656 + 657 + /* Hardware status page */ 658 + I915_WRITE(HWS_PGA, dev_priv->saveHWS); 659 + 660 + /* Fences */ 661 + if (IS_I965G(dev)) { 662 + for (i = 0; i < 16; i++) 663 + I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->saveFENCE[i]); 664 + } else { 665 + for (i = 0; i < 8; i++) 666 + I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->saveFENCE[i]); 667 + if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) 668 + for (i = 0; i < 8; i++) 669 + I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]); 670 + } 671 + 672 + i915_restore_display(dev); 673 + 674 + /* Interrupt state */ 675 + I915_WRITE (IER, dev_priv->saveIER); 676 + I915_WRITE (IMR, dev_priv->saveIMR); 677 + 526 678 /* Clock gating state */ 527 679 I915_WRITE (D_STATE, dev_priv->saveD_STATE); 528 680 I915_WRITE (DSPCLK_GATE_D, dev_priv->saveDSPCLK_GATE_D); ··· 628 602 } 629 603 for (i = 0; i < 3; i++) 630 604 I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]); 631 - 632 - i915_restore_vga(dev); 633 605 634 606 return 0; 635 607 }
+315
drivers/gpu/drm/i915/i915_trace.h
··· 1 + #if !defined(_I915_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ) 2 + #define _I915_TRACE_H_ 3 + 4 + #include <linux/stringify.h> 5 + #include <linux/types.h> 6 + #include <linux/tracepoint.h> 7 + 8 + #include <drm/drmP.h> 9 + 10 + #undef TRACE_SYSTEM 11 + #define TRACE_SYSTEM i915 12 + #define TRACE_SYSTEM_STRING __stringify(TRACE_SYSTEM) 13 + #define TRACE_INCLUDE_FILE i915_trace 14 + 15 + /* object tracking */ 16 + 17 + TRACE_EVENT(i915_gem_object_create, 18 + 19 + TP_PROTO(struct drm_gem_object *obj), 20 + 21 + TP_ARGS(obj), 22 + 23 + TP_STRUCT__entry( 24 + __field(struct drm_gem_object *, obj) 25 + __field(u32, size) 26 + ), 27 + 28 + TP_fast_assign( 29 + __entry->obj = obj; 30 + __entry->size = obj->size; 31 + ), 32 + 33 + TP_printk("obj=%p, size=%u", __entry->obj, __entry->size) 34 + ); 35 + 36 + TRACE_EVENT(i915_gem_object_bind, 37 + 38 + TP_PROTO(struct drm_gem_object *obj, u32 gtt_offset), 39 + 40 + TP_ARGS(obj, gtt_offset), 41 + 42 + TP_STRUCT__entry( 43 + __field(struct drm_gem_object *, obj) 44 + __field(u32, gtt_offset) 45 + ), 46 + 47 + TP_fast_assign( 48 + __entry->obj = obj; 49 + __entry->gtt_offset = gtt_offset; 50 + ), 51 + 52 + TP_printk("obj=%p, gtt_offset=%08x", 53 + __entry->obj, __entry->gtt_offset) 54 + ); 55 + 56 + TRACE_EVENT(i915_gem_object_clflush, 57 + 58 + TP_PROTO(struct drm_gem_object *obj), 59 + 60 + TP_ARGS(obj), 61 + 62 + TP_STRUCT__entry( 63 + __field(struct drm_gem_object *, obj) 64 + ), 65 + 66 + TP_fast_assign( 67 + __entry->obj = obj; 68 + ), 69 + 70 + TP_printk("obj=%p", __entry->obj) 71 + ); 72 + 73 + TRACE_EVENT(i915_gem_object_change_domain, 74 + 75 + TP_PROTO(struct drm_gem_object *obj, uint32_t old_read_domains, uint32_t old_write_domain), 76 + 77 + TP_ARGS(obj, old_read_domains, old_write_domain), 78 + 79 + TP_STRUCT__entry( 80 + __field(struct drm_gem_object *, obj) 81 + __field(u32, read_domains) 82 + __field(u32, write_domain) 83 + ), 84 + 85 + TP_fast_assign( 86 + __entry->obj = obj; 87 + __entry->read_domains = obj->read_domains | (old_read_domains << 16); 88 + __entry->write_domain = obj->write_domain | (old_write_domain << 16); 89 + ), 90 + 91 + TP_printk("obj=%p, read=%04x, write=%04x", 92 + __entry->obj, 93 + __entry->read_domains, __entry->write_domain) 94 + ); 95 + 96 + TRACE_EVENT(i915_gem_object_get_fence, 97 + 98 + TP_PROTO(struct drm_gem_object *obj, int fence, int tiling_mode), 99 + 100 + TP_ARGS(obj, fence, tiling_mode), 101 + 102 + TP_STRUCT__entry( 103 + __field(struct drm_gem_object *, obj) 104 + __field(int, fence) 105 + __field(int, tiling_mode) 106 + ), 107 + 108 + TP_fast_assign( 109 + __entry->obj = obj; 110 + __entry->fence = fence; 111 + __entry->tiling_mode = tiling_mode; 112 + ), 113 + 114 + TP_printk("obj=%p, fence=%d, tiling=%d", 115 + __entry->obj, __entry->fence, __entry->tiling_mode) 116 + ); 117 + 118 + TRACE_EVENT(i915_gem_object_unbind, 119 + 120 + TP_PROTO(struct drm_gem_object *obj), 121 + 122 + TP_ARGS(obj), 123 + 124 + TP_STRUCT__entry( 125 + __field(struct drm_gem_object *, obj) 126 + ), 127 + 128 + TP_fast_assign( 129 + __entry->obj = obj; 130 + ), 131 + 132 + TP_printk("obj=%p", __entry->obj) 133 + ); 134 + 135 + TRACE_EVENT(i915_gem_object_destroy, 136 + 137 + TP_PROTO(struct drm_gem_object *obj), 138 + 139 + TP_ARGS(obj), 140 + 141 + TP_STRUCT__entry( 142 + __field(struct drm_gem_object *, obj) 143 + ), 144 + 145 + TP_fast_assign( 146 + __entry->obj = obj; 147 + ), 148 + 149 + TP_printk("obj=%p", __entry->obj) 150 + ); 151 + 152 + /* batch tracing */ 153 + 154 + TRACE_EVENT(i915_gem_request_submit, 155 + 156 + TP_PROTO(struct drm_device *dev, u32 seqno), 157 + 158 + TP_ARGS(dev, seqno), 159 + 160 + TP_STRUCT__entry( 161 + __field(struct drm_device *, dev) 162 + __field(u32, seqno) 163 + ), 164 + 165 + TP_fast_assign( 166 + __entry->dev = dev; 167 + __entry->seqno = seqno; 168 + ), 169 + 170 + TP_printk("dev=%p, seqno=%u", __entry->dev, __entry->seqno) 171 + ); 172 + 173 + TRACE_EVENT(i915_gem_request_flush, 174 + 175 + TP_PROTO(struct drm_device *dev, u32 seqno, 176 + u32 flush_domains, u32 invalidate_domains), 177 + 178 + TP_ARGS(dev, seqno, flush_domains, invalidate_domains), 179 + 180 + TP_STRUCT__entry( 181 + __field(struct drm_device *, dev) 182 + __field(u32, seqno) 183 + __field(u32, flush_domains) 184 + __field(u32, invalidate_domains) 185 + ), 186 + 187 + TP_fast_assign( 188 + __entry->dev = dev; 189 + __entry->seqno = seqno; 190 + __entry->flush_domains = flush_domains; 191 + __entry->invalidate_domains = invalidate_domains; 192 + ), 193 + 194 + TP_printk("dev=%p, seqno=%u, flush=%04x, invalidate=%04x", 195 + __entry->dev, __entry->seqno, 196 + __entry->flush_domains, __entry->invalidate_domains) 197 + ); 198 + 199 + 200 + TRACE_EVENT(i915_gem_request_complete, 201 + 202 + TP_PROTO(struct drm_device *dev, u32 seqno), 203 + 204 + TP_ARGS(dev, seqno), 205 + 206 + TP_STRUCT__entry( 207 + __field(struct drm_device *, dev) 208 + __field(u32, seqno) 209 + ), 210 + 211 + TP_fast_assign( 212 + __entry->dev = dev; 213 + __entry->seqno = seqno; 214 + ), 215 + 216 + TP_printk("dev=%p, seqno=%u", __entry->dev, __entry->seqno) 217 + ); 218 + 219 + TRACE_EVENT(i915_gem_request_retire, 220 + 221 + TP_PROTO(struct drm_device *dev, u32 seqno), 222 + 223 + TP_ARGS(dev, seqno), 224 + 225 + TP_STRUCT__entry( 226 + __field(struct drm_device *, dev) 227 + __field(u32, seqno) 228 + ), 229 + 230 + TP_fast_assign( 231 + __entry->dev = dev; 232 + __entry->seqno = seqno; 233 + ), 234 + 235 + TP_printk("dev=%p, seqno=%u", __entry->dev, __entry->seqno) 236 + ); 237 + 238 + TRACE_EVENT(i915_gem_request_wait_begin, 239 + 240 + TP_PROTO(struct drm_device *dev, u32 seqno), 241 + 242 + TP_ARGS(dev, seqno), 243 + 244 + TP_STRUCT__entry( 245 + __field(struct drm_device *, dev) 246 + __field(u32, seqno) 247 + ), 248 + 249 + TP_fast_assign( 250 + __entry->dev = dev; 251 + __entry->seqno = seqno; 252 + ), 253 + 254 + TP_printk("dev=%p, seqno=%u", __entry->dev, __entry->seqno) 255 + ); 256 + 257 + TRACE_EVENT(i915_gem_request_wait_end, 258 + 259 + TP_PROTO(struct drm_device *dev, u32 seqno), 260 + 261 + TP_ARGS(dev, seqno), 262 + 263 + TP_STRUCT__entry( 264 + __field(struct drm_device *, dev) 265 + __field(u32, seqno) 266 + ), 267 + 268 + TP_fast_assign( 269 + __entry->dev = dev; 270 + __entry->seqno = seqno; 271 + ), 272 + 273 + TP_printk("dev=%p, seqno=%u", __entry->dev, __entry->seqno) 274 + ); 275 + 276 + TRACE_EVENT(i915_ring_wait_begin, 277 + 278 + TP_PROTO(struct drm_device *dev), 279 + 280 + TP_ARGS(dev), 281 + 282 + TP_STRUCT__entry( 283 + __field(struct drm_device *, dev) 284 + ), 285 + 286 + TP_fast_assign( 287 + __entry->dev = dev; 288 + ), 289 + 290 + TP_printk("dev=%p", __entry->dev) 291 + ); 292 + 293 + TRACE_EVENT(i915_ring_wait_end, 294 + 295 + TP_PROTO(struct drm_device *dev), 296 + 297 + TP_ARGS(dev), 298 + 299 + TP_STRUCT__entry( 300 + __field(struct drm_device *, dev) 301 + ), 302 + 303 + TP_fast_assign( 304 + __entry->dev = dev; 305 + ), 306 + 307 + TP_printk("dev=%p", __entry->dev) 308 + ); 309 + 310 + #endif /* _I915_TRACE_H_ */ 311 + 312 + /* This part must be outside protection */ 313 + #undef TRACE_INCLUDE_PATH 314 + #define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/i915 315 + #include <trace/define_trace.h>
+11
drivers/gpu/drm/i915/i915_trace_points.c
··· 1 + /* 2 + * Copyright © 2009 Intel Corporation 3 + * 4 + * Authors: 5 + * Chris Wilson <chris@chris-wilson.co.uk> 6 + */ 7 + 8 + #include "i915_drv.h" 9 + 10 + #define CREATE_TRACE_POINTS 11 + #include "i915_trace.h"
+3
drivers/gpu/drm/i915/intel_bios.c
··· 217 217 if (IS_I85X(dev_priv->dev)) 218 218 dev_priv->lvds_ssc_freq = 219 219 general->ssc_freq ? 66 : 48; 220 + else if (IS_IGDNG(dev_priv->dev)) 221 + dev_priv->lvds_ssc_freq = 222 + general->ssc_freq ? 100 : 120; 220 223 else 221 224 dev_priv->lvds_ssc_freq = 222 225 general->ssc_freq ? 100 : 96;
+2 -7
drivers/gpu/drm/i915/intel_crt.c
··· 179 179 { 180 180 struct drm_device *dev = connector->dev; 181 181 struct drm_i915_private *dev_priv = dev->dev_private; 182 - u32 adpa, temp; 182 + u32 adpa; 183 183 bool ret; 184 184 185 - temp = adpa = I915_READ(PCH_ADPA); 186 - 187 - adpa &= ~ADPA_DAC_ENABLE; 188 - I915_WRITE(PCH_ADPA, adpa); 185 + adpa = I915_READ(PCH_ADPA); 189 186 190 187 adpa &= ~ADPA_CRT_HOTPLUG_MASK; 191 188 ··· 209 212 else 210 213 ret = false; 211 214 212 - /* restore origin register */ 213 - I915_WRITE(PCH_ADPA, temp); 214 215 return ret; 215 216 } 216 217
+511 -109
drivers/gpu/drm/i915/intel_display.c
··· 24 24 * Eric Anholt <eric@anholt.net> 25 25 */ 26 26 27 + #include <linux/module.h> 28 + #include <linux/input.h> 27 29 #include <linux/i2c.h> 28 30 #include <linux/kernel.h> 29 31 #include "drmP.h" ··· 877 875 refclk, best_clock); 878 876 879 877 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 880 - if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) == 878 + if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == 881 879 LVDS_CLKB_POWER_UP) 882 880 clock.p2 = limit->p2.p2_fast; 883 881 else ··· 954 952 mdelay(20); 955 953 } 956 954 955 + /* Parameters have changed, update FBC info */ 956 + static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) 957 + { 958 + struct drm_device *dev = crtc->dev; 959 + struct drm_i915_private *dev_priv = dev->dev_private; 960 + struct drm_framebuffer *fb = crtc->fb; 961 + struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 962 + struct drm_i915_gem_object *obj_priv = intel_fb->obj->driver_private; 963 + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 964 + int plane, i; 965 + u32 fbc_ctl, fbc_ctl2; 966 + 967 + dev_priv->cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE; 968 + 969 + if (fb->pitch < dev_priv->cfb_pitch) 970 + dev_priv->cfb_pitch = fb->pitch; 971 + 972 + /* FBC_CTL wants 64B units */ 973 + dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1; 974 + dev_priv->cfb_fence = obj_priv->fence_reg; 975 + dev_priv->cfb_plane = intel_crtc->plane; 976 + plane = dev_priv->cfb_plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB; 977 + 978 + /* Clear old tags */ 979 + for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++) 980 + I915_WRITE(FBC_TAG + (i * 4), 0); 981 + 982 + /* Set it up... */ 983 + fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | plane; 984 + if (obj_priv->tiling_mode != I915_TILING_NONE) 985 + fbc_ctl2 |= FBC_CTL_CPU_FENCE; 986 + I915_WRITE(FBC_CONTROL2, fbc_ctl2); 987 + I915_WRITE(FBC_FENCE_OFF, crtc->y); 988 + 989 + /* enable it... */ 990 + fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC; 991 + fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; 992 + fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT; 993 + if (obj_priv->tiling_mode != I915_TILING_NONE) 994 + fbc_ctl |= dev_priv->cfb_fence; 995 + I915_WRITE(FBC_CONTROL, fbc_ctl); 996 + 997 + DRM_DEBUG("enabled FBC, pitch %ld, yoff %d, plane %d, ", 998 + dev_priv->cfb_pitch, crtc->y, dev_priv->cfb_plane); 999 + } 1000 + 1001 + void i8xx_disable_fbc(struct drm_device *dev) 1002 + { 1003 + struct drm_i915_private *dev_priv = dev->dev_private; 1004 + u32 fbc_ctl; 1005 + 1006 + if (!I915_HAS_FBC(dev)) 1007 + return; 1008 + 1009 + /* Disable compression */ 1010 + fbc_ctl = I915_READ(FBC_CONTROL); 1011 + fbc_ctl &= ~FBC_CTL_EN; 1012 + I915_WRITE(FBC_CONTROL, fbc_ctl); 1013 + 1014 + /* Wait for compressing bit to clear */ 1015 + while (I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) 1016 + ; /* nothing */ 1017 + 1018 + intel_wait_for_vblank(dev); 1019 + 1020 + DRM_DEBUG("disabled FBC\n"); 1021 + } 1022 + 1023 + static bool i8xx_fbc_enabled(struct drm_crtc *crtc) 1024 + { 1025 + struct drm_device *dev = crtc->dev; 1026 + struct drm_i915_private *dev_priv = dev->dev_private; 1027 + 1028 + return I915_READ(FBC_CONTROL) & FBC_CTL_EN; 1029 + } 1030 + 1031 + static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval) 1032 + { 1033 + struct drm_device *dev = crtc->dev; 1034 + struct drm_i915_private *dev_priv = dev->dev_private; 1035 + struct drm_framebuffer *fb = crtc->fb; 1036 + struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 1037 + struct drm_i915_gem_object *obj_priv = intel_fb->obj->driver_private; 1038 + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1039 + int plane = (intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : 1040 + DPFC_CTL_PLANEB); 1041 + unsigned long stall_watermark = 200; 1042 + u32 dpfc_ctl; 1043 + 1044 + dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1; 1045 + dev_priv->cfb_fence = obj_priv->fence_reg; 1046 + dev_priv->cfb_plane = intel_crtc->plane; 1047 + 1048 + dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X; 1049 + if (obj_priv->tiling_mode != I915_TILING_NONE) { 1050 + dpfc_ctl |= DPFC_CTL_FENCE_EN | dev_priv->cfb_fence; 1051 + I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY); 1052 + } else { 1053 + I915_WRITE(DPFC_CHICKEN, ~DPFC_HT_MODIFY); 1054 + } 1055 + 1056 + I915_WRITE(DPFC_CONTROL, dpfc_ctl); 1057 + I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN | 1058 + (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) | 1059 + (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT)); 1060 + I915_WRITE(DPFC_FENCE_YOFF, crtc->y); 1061 + 1062 + /* enable it... */ 1063 + I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN); 1064 + 1065 + DRM_DEBUG("enabled fbc on plane %d\n", intel_crtc->plane); 1066 + } 1067 + 1068 + void g4x_disable_fbc(struct drm_device *dev) 1069 + { 1070 + struct drm_i915_private *dev_priv = dev->dev_private; 1071 + u32 dpfc_ctl; 1072 + 1073 + /* Disable compression */ 1074 + dpfc_ctl = I915_READ(DPFC_CONTROL); 1075 + dpfc_ctl &= ~DPFC_CTL_EN; 1076 + I915_WRITE(DPFC_CONTROL, dpfc_ctl); 1077 + intel_wait_for_vblank(dev); 1078 + 1079 + DRM_DEBUG("disabled FBC\n"); 1080 + } 1081 + 1082 + static bool g4x_fbc_enabled(struct drm_crtc *crtc) 1083 + { 1084 + struct drm_device *dev = crtc->dev; 1085 + struct drm_i915_private *dev_priv = dev->dev_private; 1086 + 1087 + return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN; 1088 + } 1089 + 1090 + /** 1091 + * intel_update_fbc - enable/disable FBC as needed 1092 + * @crtc: CRTC to point the compressor at 1093 + * @mode: mode in use 1094 + * 1095 + * Set up the framebuffer compression hardware at mode set time. We 1096 + * enable it if possible: 1097 + * - plane A only (on pre-965) 1098 + * - no pixel mulitply/line duplication 1099 + * - no alpha buffer discard 1100 + * - no dual wide 1101 + * - framebuffer <= 2048 in width, 1536 in height 1102 + * 1103 + * We can't assume that any compression will take place (worst case), 1104 + * so the compressed buffer has to be the same size as the uncompressed 1105 + * one. It also must reside (along with the line length buffer) in 1106 + * stolen memory. 1107 + * 1108 + * We need to enable/disable FBC on a global basis. 1109 + */ 1110 + static void intel_update_fbc(struct drm_crtc *crtc, 1111 + struct drm_display_mode *mode) 1112 + { 1113 + struct drm_device *dev = crtc->dev; 1114 + struct drm_i915_private *dev_priv = dev->dev_private; 1115 + struct drm_framebuffer *fb = crtc->fb; 1116 + struct intel_framebuffer *intel_fb; 1117 + struct drm_i915_gem_object *obj_priv; 1118 + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1119 + int plane = intel_crtc->plane; 1120 + 1121 + if (!i915_powersave) 1122 + return; 1123 + 1124 + if (!dev_priv->display.fbc_enabled || 1125 + !dev_priv->display.enable_fbc || 1126 + !dev_priv->display.disable_fbc) 1127 + return; 1128 + 1129 + if (!crtc->fb) 1130 + return; 1131 + 1132 + intel_fb = to_intel_framebuffer(fb); 1133 + obj_priv = intel_fb->obj->driver_private; 1134 + 1135 + /* 1136 + * If FBC is already on, we just have to verify that we can 1137 + * keep it that way... 1138 + * Need to disable if: 1139 + * - changing FBC params (stride, fence, mode) 1140 + * - new fb is too large to fit in compressed buffer 1141 + * - going to an unsupported config (interlace, pixel multiply, etc.) 1142 + */ 1143 + if (intel_fb->obj->size > dev_priv->cfb_size) { 1144 + DRM_DEBUG("framebuffer too large, disabling compression\n"); 1145 + goto out_disable; 1146 + } 1147 + if ((mode->flags & DRM_MODE_FLAG_INTERLACE) || 1148 + (mode->flags & DRM_MODE_FLAG_DBLSCAN)) { 1149 + DRM_DEBUG("mode incompatible with compression, disabling\n"); 1150 + goto out_disable; 1151 + } 1152 + if ((mode->hdisplay > 2048) || 1153 + (mode->vdisplay > 1536)) { 1154 + DRM_DEBUG("mode too large for compression, disabling\n"); 1155 + goto out_disable; 1156 + } 1157 + if ((IS_I915GM(dev) || IS_I945GM(dev)) && plane != 0) { 1158 + DRM_DEBUG("plane not 0, disabling compression\n"); 1159 + goto out_disable; 1160 + } 1161 + if (obj_priv->tiling_mode != I915_TILING_X) { 1162 + DRM_DEBUG("framebuffer not tiled, disabling compression\n"); 1163 + goto out_disable; 1164 + } 1165 + 1166 + if (dev_priv->display.fbc_enabled(crtc)) { 1167 + /* We can re-enable it in this case, but need to update pitch */ 1168 + if (fb->pitch > dev_priv->cfb_pitch) 1169 + dev_priv->display.disable_fbc(dev); 1170 + if (obj_priv->fence_reg != dev_priv->cfb_fence) 1171 + dev_priv->display.disable_fbc(dev); 1172 + if (plane != dev_priv->cfb_plane) 1173 + dev_priv->display.disable_fbc(dev); 1174 + } 1175 + 1176 + if (!dev_priv->display.fbc_enabled(crtc)) { 1177 + /* Now try to turn it back on if possible */ 1178 + dev_priv->display.enable_fbc(crtc, 500); 1179 + } 1180 + 1181 + return; 1182 + 1183 + out_disable: 1184 + DRM_DEBUG("unsupported config, disabling FBC\n"); 1185 + /* Multiple disables should be harmless */ 1186 + if (dev_priv->display.fbc_enabled(crtc)) 1187 + dev_priv->display.disable_fbc(dev); 1188 + } 1189 + 957 1190 static int 958 1191 intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, 959 1192 struct drm_framebuffer *old_fb) ··· 1201 964 struct drm_i915_gem_object *obj_priv; 1202 965 struct drm_gem_object *obj; 1203 966 int pipe = intel_crtc->pipe; 967 + int plane = intel_crtc->plane; 1204 968 unsigned long Start, Offset; 1205 - int dspbase = (pipe == 0 ? DSPAADDR : DSPBADDR); 1206 - int dspsurf = (pipe == 0 ? DSPASURF : DSPBSURF); 1207 - int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE; 1208 - int dsptileoff = (pipe == 0 ? DSPATILEOFF : DSPBTILEOFF); 1209 - int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR; 969 + int dspbase = (plane == 0 ? DSPAADDR : DSPBADDR); 970 + int dspsurf = (plane == 0 ? DSPASURF : DSPBSURF); 971 + int dspstride = (plane == 0) ? DSPASTRIDE : DSPBSTRIDE; 972 + int dsptileoff = (plane == 0 ? DSPATILEOFF : DSPBTILEOFF); 973 + int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR; 1210 974 u32 dspcntr, alignment; 1211 975 int ret; 1212 976 ··· 1217 979 return 0; 1218 980 } 1219 981 1220 - switch (pipe) { 982 + switch (plane) { 1221 983 case 0: 1222 984 case 1: 1223 985 break; 1224 986 default: 1225 - DRM_ERROR("Can't update pipe %d in SAREA\n", pipe); 987 + DRM_ERROR("Can't update plane %d in SAREA\n", plane); 1226 988 return -EINVAL; 1227 989 } 1228 990 ··· 1323 1085 I915_WRITE(dspbase, Start + Offset); 1324 1086 I915_READ(dspbase); 1325 1087 } 1088 + 1089 + if ((IS_I965G(dev) || plane == 0)) 1090 + intel_update_fbc(crtc, &crtc->mode); 1326 1091 1327 1092 intel_wait_for_vblank(dev); 1328 1093 ··· 1458 1217 int transconf_reg = (pipe == 0) ? TRANSACONF : TRANSBCONF; 1459 1218 int pf_ctl_reg = (pipe == 0) ? PFA_CTL_1 : PFB_CTL_1; 1460 1219 int pf_win_size = (pipe == 0) ? PFA_WIN_SZ : PFB_WIN_SZ; 1220 + int pf_win_pos = (pipe == 0) ? PFA_WIN_POS : PFB_WIN_POS; 1461 1221 int cpu_htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B; 1462 1222 int cpu_hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B; 1463 1223 int cpu_hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B; ··· 1508 1266 I915_READ(fdi_tx_reg); 1509 1267 udelay(100); 1510 1268 } 1269 + } 1270 + 1271 + /* Enable panel fitting for LVDS */ 1272 + if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 1273 + temp = I915_READ(pf_ctl_reg); 1274 + I915_WRITE(pf_ctl_reg, temp | PF_ENABLE); 1275 + 1276 + /* currently full aspect */ 1277 + I915_WRITE(pf_win_pos, 0); 1278 + 1279 + I915_WRITE(pf_win_size, 1280 + (dev_priv->panel_fixed_mode->hdisplay << 16) | 1281 + (dev_priv->panel_fixed_mode->vdisplay)); 1511 1282 } 1512 1283 1513 1284 /* Enable CPU pipe */ ··· 1787 1532 struct drm_i915_private *dev_priv = dev->dev_private; 1788 1533 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1789 1534 int pipe = intel_crtc->pipe; 1535 + int plane = intel_crtc->plane; 1790 1536 int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; 1791 - int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR; 1792 - int dspbase_reg = (pipe == 0) ? DSPAADDR : DSPBADDR; 1537 + int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR; 1538 + int dspbase_reg = (plane == 0) ? DSPAADDR : DSPBADDR; 1793 1539 int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF; 1794 1540 u32 temp; 1795 1541 ··· 1833 1577 1834 1578 intel_crtc_load_lut(crtc); 1835 1579 1580 + if ((IS_I965G(dev) || plane == 0)) 1581 + intel_update_fbc(crtc, &crtc->mode); 1582 + 1836 1583 /* Give the overlay scaler a chance to enable if it's on this pipe */ 1837 1584 //intel_crtc_dpms_video(crtc, true); TODO 1838 1585 intel_update_watermarks(dev); ··· 1844 1585 intel_update_watermarks(dev); 1845 1586 /* Give the overlay scaler a chance to disable if it's on this pipe */ 1846 1587 //intel_crtc_dpms_video(crtc, FALSE); TODO 1588 + 1589 + if (dev_priv->cfb_plane == plane && 1590 + dev_priv->display.disable_fbc) 1591 + dev_priv->display.disable_fbc(dev); 1847 1592 1848 1593 /* Disable the VGA plane that we never use */ 1849 1594 i915_disable_vga(dev); ··· 1897 1634 static void intel_crtc_dpms(struct drm_crtc *crtc, int mode) 1898 1635 { 1899 1636 struct drm_device *dev = crtc->dev; 1637 + struct drm_i915_private *dev_priv = dev->dev_private; 1900 1638 struct drm_i915_master_private *master_priv; 1901 1639 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1902 1640 int pipe = intel_crtc->pipe; 1903 1641 bool enabled; 1904 1642 1905 - if (IS_IGDNG(dev)) 1906 - igdng_crtc_dpms(crtc, mode); 1907 - else 1908 - i9xx_crtc_dpms(crtc, mode); 1643 + dev_priv->display.dpms(crtc, mode); 1909 1644 1910 1645 intel_crtc->dpms_mode = mode; 1911 1646 ··· 1970 1709 return true; 1971 1710 } 1972 1711 1973 - 1974 - /** Returns the core display clock speed for i830 - i945 */ 1975 - static int intel_get_core_clock_speed(struct drm_device *dev) 1712 + static int i945_get_display_clock_speed(struct drm_device *dev) 1976 1713 { 1714 + return 400000; 1715 + } 1977 1716 1978 - /* Core clock values taken from the published datasheets. 1979 - * The 830 may go up to 166 Mhz, which we should check. 1980 - */ 1981 - if (IS_I945G(dev)) 1982 - return 400000; 1983 - else if (IS_I915G(dev)) 1984 - return 333000; 1985 - else if (IS_I945GM(dev) || IS_845G(dev) || IS_IGDGM(dev)) 1986 - return 200000; 1987 - else if (IS_I915GM(dev)) { 1988 - u16 gcfgc = 0; 1717 + static int i915_get_display_clock_speed(struct drm_device *dev) 1718 + { 1719 + return 333000; 1720 + } 1989 1721 1990 - pci_read_config_word(dev->pdev, GCFGC, &gcfgc); 1722 + static int i9xx_misc_get_display_clock_speed(struct drm_device *dev) 1723 + { 1724 + return 200000; 1725 + } 1991 1726 1992 - if (gcfgc & GC_LOW_FREQUENCY_ENABLE) 1993 - return 133000; 1994 - else { 1995 - switch (gcfgc & GC_DISPLAY_CLOCK_MASK) { 1996 - case GC_DISPLAY_CLOCK_333_MHZ: 1997 - return 333000; 1998 - default: 1999 - case GC_DISPLAY_CLOCK_190_200_MHZ: 2000 - return 190000; 2001 - } 2002 - } 2003 - } else if (IS_I865G(dev)) 2004 - return 266000; 2005 - else if (IS_I855(dev)) { 2006 - u16 hpllcc = 0; 2007 - /* Assume that the hardware is in the high speed state. This 2008 - * should be the default. 2009 - */ 2010 - switch (hpllcc & GC_CLOCK_CONTROL_MASK) { 2011 - case GC_CLOCK_133_200: 2012 - case GC_CLOCK_100_200: 2013 - return 200000; 2014 - case GC_CLOCK_166_250: 2015 - return 250000; 2016 - case GC_CLOCK_100_133: 2017 - return 133000; 2018 - } 2019 - } else /* 852, 830 */ 1727 + static int i915gm_get_display_clock_speed(struct drm_device *dev) 1728 + { 1729 + u16 gcfgc = 0; 1730 + 1731 + pci_read_config_word(dev->pdev, GCFGC, &gcfgc); 1732 + 1733 + if (gcfgc & GC_LOW_FREQUENCY_ENABLE) 2020 1734 return 133000; 1735 + else { 1736 + switch (gcfgc & GC_DISPLAY_CLOCK_MASK) { 1737 + case GC_DISPLAY_CLOCK_333_MHZ: 1738 + return 333000; 1739 + default: 1740 + case GC_DISPLAY_CLOCK_190_200_MHZ: 1741 + return 190000; 1742 + } 1743 + } 1744 + } 2021 1745 2022 - return 0; /* Silence gcc warning */ 1746 + static int i865_get_display_clock_speed(struct drm_device *dev) 1747 + { 1748 + return 266000; 1749 + } 1750 + 1751 + static int i855_get_display_clock_speed(struct drm_device *dev) 1752 + { 1753 + u16 hpllcc = 0; 1754 + /* Assume that the hardware is in the high speed state. This 1755 + * should be the default. 1756 + */ 1757 + switch (hpllcc & GC_CLOCK_CONTROL_MASK) { 1758 + case GC_CLOCK_133_200: 1759 + case GC_CLOCK_100_200: 1760 + return 200000; 1761 + case GC_CLOCK_166_250: 1762 + return 250000; 1763 + case GC_CLOCK_100_133: 1764 + return 133000; 1765 + } 1766 + 1767 + /* Shouldn't happen */ 1768 + return 0; 1769 + } 1770 + 1771 + static int i830_get_display_clock_speed(struct drm_device *dev) 1772 + { 1773 + return 133000; 2023 1774 } 2024 1775 2025 1776 /** ··· 2194 1921 { 2195 1922 long entries_required, wm_size; 2196 1923 2197 - entries_required = (clock_in_khz * pixel_size * latency_ns) / 1000000; 1924 + /* 1925 + * Note: we need to make sure we don't overflow for various clock & 1926 + * latency values. 1927 + * clocks go from a few thousand to several hundred thousand. 1928 + * latency is usually a few thousand 1929 + */ 1930 + entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) / 1931 + 1000; 2198 1932 entries_required /= wm->cacheline_size; 2199 1933 2200 1934 DRM_DEBUG("FIFO entries required for mode: %d\n", entries_required); ··· 2266 1986 for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) { 2267 1987 latency = &cxsr_latency_table[i]; 2268 1988 if (is_desktop == latency->is_desktop && 2269 - fsb == latency->fsb_freq && mem == latency->mem_freq) 2270 - break; 1989 + fsb == latency->fsb_freq && mem == latency->mem_freq) 1990 + return latency; 2271 1991 } 2272 - if (i >= ARRAY_SIZE(cxsr_latency_table)) { 2273 - DRM_DEBUG("Unknown FSB/MEM found, disable CxSR\n"); 2274 - return NULL; 2275 - } 2276 - return latency; 1992 + 1993 + DRM_DEBUG("Unknown FSB/MEM found, disable CxSR\n"); 1994 + 1995 + return NULL; 2277 1996 } 2278 1997 2279 1998 static void igd_disable_cxsr(struct drm_device *dev) ··· 2363 2084 */ 2364 2085 const static int latency_ns = 5000; 2365 2086 2366 - static int intel_get_fifo_size(struct drm_device *dev, int plane) 2087 + static int i9xx_get_fifo_size(struct drm_device *dev, int plane) 2367 2088 { 2368 2089 struct drm_i915_private *dev_priv = dev->dev_private; 2369 2090 uint32_t dsparb = I915_READ(DSPARB); 2370 2091 int size; 2371 2092 2372 - if (IS_I9XX(dev)) { 2373 - if (plane == 0) 2374 - size = dsparb & 0x7f; 2375 - else 2376 - size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - 2377 - (dsparb & 0x7f); 2378 - } else if (IS_I85X(dev)) { 2379 - if (plane == 0) 2380 - size = dsparb & 0x1ff; 2381 - else 2382 - size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - 2383 - (dsparb & 0x1ff); 2384 - size >>= 1; /* Convert to cachelines */ 2385 - } else if (IS_845G(dev)) { 2093 + if (plane == 0) 2386 2094 size = dsparb & 0x7f; 2387 - size >>= 2; /* Convert to cachelines */ 2388 - } else { 2389 - size = dsparb & 0x7f; 2390 - size >>= 1; /* Convert to cachelines */ 2391 - } 2095 + else 2096 + size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - 2097 + (dsparb & 0x7f); 2392 2098 2393 2099 DRM_DEBUG("FIFO size - (0x%08x) %s: %d\n", dsparb, plane ? "B" : "A", 2394 2100 size); ··· 2381 2117 return size; 2382 2118 } 2383 2119 2384 - static void g4x_update_wm(struct drm_device *dev) 2120 + static int i85x_get_fifo_size(struct drm_device *dev, int plane) 2121 + { 2122 + struct drm_i915_private *dev_priv = dev->dev_private; 2123 + uint32_t dsparb = I915_READ(DSPARB); 2124 + int size; 2125 + 2126 + if (plane == 0) 2127 + size = dsparb & 0x1ff; 2128 + else 2129 + size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - 2130 + (dsparb & 0x1ff); 2131 + size >>= 1; /* Convert to cachelines */ 2132 + 2133 + DRM_DEBUG("FIFO size - (0x%08x) %s: %d\n", dsparb, plane ? "B" : "A", 2134 + size); 2135 + 2136 + return size; 2137 + } 2138 + 2139 + static int i845_get_fifo_size(struct drm_device *dev, int plane) 2140 + { 2141 + struct drm_i915_private *dev_priv = dev->dev_private; 2142 + uint32_t dsparb = I915_READ(DSPARB); 2143 + int size; 2144 + 2145 + size = dsparb & 0x7f; 2146 + size >>= 2; /* Convert to cachelines */ 2147 + 2148 + DRM_DEBUG("FIFO size - (0x%08x) %s: %d\n", dsparb, plane ? "B" : "A", 2149 + size); 2150 + 2151 + return size; 2152 + } 2153 + 2154 + static int i830_get_fifo_size(struct drm_device *dev, int plane) 2155 + { 2156 + struct drm_i915_private *dev_priv = dev->dev_private; 2157 + uint32_t dsparb = I915_READ(DSPARB); 2158 + int size; 2159 + 2160 + size = dsparb & 0x7f; 2161 + size >>= 1; /* Convert to cachelines */ 2162 + 2163 + DRM_DEBUG("FIFO size - (0x%08x) %s: %d\n", dsparb, plane ? "B" : "A", 2164 + size); 2165 + 2166 + return size; 2167 + } 2168 + 2169 + static void g4x_update_wm(struct drm_device *dev, int unused, int unused2, 2170 + int unused3, int unused4) 2385 2171 { 2386 2172 struct drm_i915_private *dev_priv = dev->dev_private; 2387 2173 u32 fw_blc_self = I915_READ(FW_BLC_SELF); ··· 2443 2129 I915_WRITE(FW_BLC_SELF, fw_blc_self); 2444 2130 } 2445 2131 2446 - static void i965_update_wm(struct drm_device *dev) 2132 + static void i965_update_wm(struct drm_device *dev, int unused, int unused2, 2133 + int unused3, int unused4) 2447 2134 { 2448 2135 struct drm_i915_private *dev_priv = dev->dev_private; 2449 2136 ··· 2480 2165 cacheline_size = planea_params.cacheline_size; 2481 2166 2482 2167 /* Update per-plane FIFO sizes */ 2483 - planea_params.fifo_size = intel_get_fifo_size(dev, 0); 2484 - planeb_params.fifo_size = intel_get_fifo_size(dev, 1); 2168 + planea_params.fifo_size = dev_priv->display.get_fifo_size(dev, 0); 2169 + planeb_params.fifo_size = dev_priv->display.get_fifo_size(dev, 1); 2485 2170 2486 2171 planea_wm = intel_calculate_wm(planea_clock, &planea_params, 2487 2172 pixel_size, latency_ns); ··· 2528 2213 I915_WRITE(FW_BLC2, fwater_hi); 2529 2214 } 2530 2215 2531 - static void i830_update_wm(struct drm_device *dev, int planea_clock, 2532 - int pixel_size) 2216 + static void i830_update_wm(struct drm_device *dev, int planea_clock, int unused, 2217 + int unused2, int pixel_size) 2533 2218 { 2534 2219 struct drm_i915_private *dev_priv = dev->dev_private; 2535 2220 uint32_t fwater_lo = I915_READ(FW_BLC) & ~0xfff; 2536 2221 int planea_wm; 2537 2222 2538 - i830_wm_info.fifo_size = intel_get_fifo_size(dev, 0); 2223 + i830_wm_info.fifo_size = dev_priv->display.get_fifo_size(dev, 0); 2539 2224 2540 2225 planea_wm = intel_calculate_wm(planea_clock, &i830_wm_info, 2541 2226 pixel_size, latency_ns); ··· 2579 2264 */ 2580 2265 static void intel_update_watermarks(struct drm_device *dev) 2581 2266 { 2267 + struct drm_i915_private *dev_priv = dev->dev_private; 2582 2268 struct drm_crtc *crtc; 2583 2269 struct intel_crtc *intel_crtc; 2584 2270 int sr_hdisplay = 0; ··· 2618 2302 else if (IS_IGD(dev)) 2619 2303 igd_disable_cxsr(dev); 2620 2304 2621 - if (IS_G4X(dev)) 2622 - g4x_update_wm(dev); 2623 - else if (IS_I965G(dev)) 2624 - i965_update_wm(dev); 2625 - else if (IS_I9XX(dev) || IS_MOBILE(dev)) 2626 - i9xx_update_wm(dev, planea_clock, planeb_clock, sr_hdisplay, 2627 - pixel_size); 2628 - else 2629 - i830_update_wm(dev, planea_clock, pixel_size); 2305 + dev_priv->display.update_wm(dev, planea_clock, planeb_clock, 2306 + sr_hdisplay, pixel_size); 2630 2307 } 2631 2308 2632 2309 static int intel_crtc_mode_set(struct drm_crtc *crtc, ··· 2632 2323 struct drm_i915_private *dev_priv = dev->dev_private; 2633 2324 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2634 2325 int pipe = intel_crtc->pipe; 2326 + int plane = intel_crtc->plane; 2635 2327 int fp_reg = (pipe == 0) ? FPA0 : FPB0; 2636 2328 int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; 2637 2329 int dpll_md_reg = (intel_crtc->pipe == 0) ? DPLL_A_MD : DPLL_B_MD; 2638 - int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR; 2330 + int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR; 2639 2331 int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF; 2640 2332 int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B; 2641 2333 int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B; ··· 2644 2334 int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B; 2645 2335 int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B; 2646 2336 int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B; 2647 - int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE; 2648 - int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS; 2337 + int dspsize_reg = (plane == 0) ? DSPASIZE : DSPBSIZE; 2338 + int dsppos_reg = (plane == 0) ? DSPAPOS : DSPBPOS; 2649 2339 int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC; 2650 2340 int refclk, num_outputs = 0; 2651 2341 intel_clock_t clock, reduced_clock; ··· 2878 2568 enable color space conversion */ 2879 2569 if (!IS_IGDNG(dev)) { 2880 2570 if (pipe == 0) 2881 - dspcntr |= DISPPLANE_SEL_PIPE_A; 2571 + dspcntr &= ~DISPPLANE_SEL_PIPE_MASK; 2882 2572 else 2883 2573 dspcntr |= DISPPLANE_SEL_PIPE_B; 2884 2574 } ··· 2890 2580 * XXX: No double-wide on 915GM pipe B. Is that the only reason for the 2891 2581 * pipe == 0 check? 2892 2582 */ 2893 - if (mode->clock > intel_get_core_clock_speed(dev) * 9 / 10) 2583 + if (mode->clock > 2584 + dev_priv->display.get_display_clock_speed(dev) * 9 / 10) 2894 2585 pipeconf |= PIPEACONF_DOUBLE_WIDE; 2895 2586 else 2896 2587 pipeconf &= ~PIPEACONF_DOUBLE_WIDE; ··· 2963 2652 udelay(150); 2964 2653 2965 2654 if (IS_I965G(dev) && !IS_IGDNG(dev)) { 2966 - sdvo_pixel_multiply = adjusted_mode->clock / mode->clock; 2967 - I915_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) | 2655 + if (is_sdvo) { 2656 + sdvo_pixel_multiply = adjusted_mode->clock / mode->clock; 2657 + I915_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) | 2968 2658 ((sdvo_pixel_multiply - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT)); 2659 + } else 2660 + I915_WRITE(dpll_md_reg, 0); 2969 2661 } else { 2970 2662 /* write it again -- the BIOS does, after all */ 2971 2663 I915_WRITE(dpll_reg, dpll); ··· 3048 2734 /* Flush the plane changes */ 3049 2735 ret = intel_pipe_set_base(crtc, x, y, old_fb); 3050 2736 2737 + if ((IS_I965G(dev) || plane == 0)) 2738 + intel_update_fbc(crtc, &crtc->mode); 2739 + 3051 2740 intel_update_watermarks(dev); 3052 2741 3053 2742 drm_vblank_post_modeset(dev, pipe); ··· 3095 2778 struct drm_gem_object *bo; 3096 2779 struct drm_i915_gem_object *obj_priv; 3097 2780 int pipe = intel_crtc->pipe; 2781 + int plane = intel_crtc->plane; 3098 2782 uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR; 3099 2783 uint32_t base = (pipe == 0) ? CURABASE : CURBBASE; 3100 2784 uint32_t temp = I915_READ(control); ··· 3181 2863 i915_gem_object_unpin(intel_crtc->cursor_bo); 3182 2864 drm_gem_object_unreference(intel_crtc->cursor_bo); 3183 2865 } 2866 + 2867 + if ((IS_I965G(dev) || plane == 0)) 2868 + intel_update_fbc(crtc, &crtc->mode); 2869 + 3184 2870 mutex_unlock(&dev->struct_mutex); 3185 2871 3186 2872 intel_crtc->cursor_addr = addr; ··· 3866 3544 intel_crtc->lut_b[i] = i; 3867 3545 } 3868 3546 3547 + /* Swap pipes & planes for FBC on pre-965 */ 3548 + intel_crtc->pipe = pipe; 3549 + intel_crtc->plane = pipe; 3550 + if (IS_MOBILE(dev) && (IS_I9XX(dev) && !IS_I965G(dev))) { 3551 + DRM_DEBUG("swapping pipes & planes for FBC\n"); 3552 + intel_crtc->plane = ((pipe == 0) ? 1 : 0); 3553 + } 3554 + 3869 3555 intel_crtc->cursor_addr = 0; 3870 3556 intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF; 3871 3557 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); ··· 4156 3826 } 4157 3827 } 4158 3828 3829 + /* Set up chip specific display functions */ 3830 + static void intel_init_display(struct drm_device *dev) 3831 + { 3832 + struct drm_i915_private *dev_priv = dev->dev_private; 3833 + 3834 + /* We always want a DPMS function */ 3835 + if (IS_IGDNG(dev)) 3836 + dev_priv->display.dpms = igdng_crtc_dpms; 3837 + else 3838 + dev_priv->display.dpms = i9xx_crtc_dpms; 3839 + 3840 + /* Only mobile has FBC, leave pointers NULL for other chips */ 3841 + if (IS_MOBILE(dev)) { 3842 + if (IS_GM45(dev)) { 3843 + dev_priv->display.fbc_enabled = g4x_fbc_enabled; 3844 + dev_priv->display.enable_fbc = g4x_enable_fbc; 3845 + dev_priv->display.disable_fbc = g4x_disable_fbc; 3846 + } else if (IS_I965GM(dev) || IS_I945GM(dev) || IS_I915GM(dev)) { 3847 + dev_priv->display.fbc_enabled = i8xx_fbc_enabled; 3848 + dev_priv->display.enable_fbc = i8xx_enable_fbc; 3849 + dev_priv->display.disable_fbc = i8xx_disable_fbc; 3850 + } 3851 + /* 855GM needs testing */ 3852 + } 3853 + 3854 + /* Returns the core display clock speed */ 3855 + if (IS_I945G(dev)) 3856 + dev_priv->display.get_display_clock_speed = 3857 + i945_get_display_clock_speed; 3858 + else if (IS_I915G(dev)) 3859 + dev_priv->display.get_display_clock_speed = 3860 + i915_get_display_clock_speed; 3861 + else if (IS_I945GM(dev) || IS_845G(dev) || IS_IGDGM(dev)) 3862 + dev_priv->display.get_display_clock_speed = 3863 + i9xx_misc_get_display_clock_speed; 3864 + else if (IS_I915GM(dev)) 3865 + dev_priv->display.get_display_clock_speed = 3866 + i915gm_get_display_clock_speed; 3867 + else if (IS_I865G(dev)) 3868 + dev_priv->display.get_display_clock_speed = 3869 + i865_get_display_clock_speed; 3870 + else if (IS_I855(dev)) 3871 + dev_priv->display.get_display_clock_speed = 3872 + i855_get_display_clock_speed; 3873 + else /* 852, 830 */ 3874 + dev_priv->display.get_display_clock_speed = 3875 + i830_get_display_clock_speed; 3876 + 3877 + /* For FIFO watermark updates */ 3878 + if (IS_G4X(dev)) 3879 + dev_priv->display.update_wm = g4x_update_wm; 3880 + else if (IS_I965G(dev)) 3881 + dev_priv->display.update_wm = i965_update_wm; 3882 + else if (IS_I9XX(dev) || IS_MOBILE(dev)) { 3883 + dev_priv->display.update_wm = i9xx_update_wm; 3884 + dev_priv->display.get_fifo_size = i9xx_get_fifo_size; 3885 + } else { 3886 + if (IS_I85X(dev)) 3887 + dev_priv->display.get_fifo_size = i85x_get_fifo_size; 3888 + else if (IS_845G(dev)) 3889 + dev_priv->display.get_fifo_size = i845_get_fifo_size; 3890 + else 3891 + dev_priv->display.get_fifo_size = i830_get_fifo_size; 3892 + dev_priv->display.update_wm = i830_update_wm; 3893 + } 3894 + } 3895 + 4159 3896 void intel_modeset_init(struct drm_device *dev) 4160 3897 { 4161 3898 struct drm_i915_private *dev_priv = dev->dev_private; ··· 4235 3838 dev->mode_config.min_height = 0; 4236 3839 4237 3840 dev->mode_config.funcs = (void *)&intel_mode_funcs; 3841 + 3842 + intel_init_display(dev); 4238 3843 4239 3844 if (IS_I965G(dev)) { 4240 3845 dev->mode_config.max_width = 8192; ··· 4302 3903 del_timer_sync(&dev_priv->idle_timer); 4303 3904 4304 3905 mutex_unlock(&dev->struct_mutex); 3906 + 3907 + if (dev_priv->display.disable_fbc) 3908 + dev_priv->display.disable_fbc(dev); 4305 3909 4306 3910 drm_mode_config_cleanup(dev); 4307 3911 }
+3 -2
drivers/gpu/drm/i915/intel_drv.h
··· 28 28 #include <linux/i2c.h> 29 29 #include <linux/i2c-id.h> 30 30 #include <linux/i2c-algo-bit.h> 31 + #include "i915_drv.h" 31 32 #include "drm_crtc.h" 32 33 33 34 #include "drm_crtc_helper.h" ··· 112 111 113 112 struct intel_crtc { 114 113 struct drm_crtc base; 115 - int pipe; 116 - int plane; 114 + enum pipe pipe; 115 + enum plane plane; 117 116 struct drm_gem_object *cursor_bo; 118 117 uint32_t cursor_addr; 119 118 u8 lut_r[256], lut_g[256], lut_b[256];
+57 -6
drivers/gpu/drm/i915/intel_lvds.c
··· 27 27 * Jesse Barnes <jesse.barnes@intel.com> 28 28 */ 29 29 30 + #include <acpi/button.h> 30 31 #include <linux/dmi.h> 31 32 #include <linux/i2c.h> 32 33 #include "drmP.h" ··· 296 295 goto out; 297 296 } 298 297 298 + /* full screen scale for now */ 299 + if (IS_IGDNG(dev)) 300 + goto out; 301 + 299 302 /* 965+ wants fuzzy fitting */ 300 303 if (IS_I965G(dev)) 301 304 pfit_control |= (intel_crtc->pipe << PFIT_PIPE_SHIFT) | ··· 327 322 * to register description and PRM. 328 323 * Change the value here to see the borders for debugging 329 324 */ 330 - I915_WRITE(BCLRPAT_A, 0); 331 - I915_WRITE(BCLRPAT_B, 0); 325 + if (!IS_IGDNG(dev)) { 326 + I915_WRITE(BCLRPAT_A, 0); 327 + I915_WRITE(BCLRPAT_B, 0); 328 + } 332 329 333 330 switch (lvds_priv->fitting_mode) { 334 331 case DRM_MODE_SCALE_CENTER: ··· 579 572 * settings. 580 573 */ 581 574 582 - /* No panel fitting yet, fixme */ 583 575 if (IS_IGDNG(dev)) 584 576 return; 585 577 ··· 591 585 I915_WRITE(PFIT_CONTROL, lvds_priv->pfit_control); 592 586 } 593 587 588 + /* Some lid devices report incorrect lid status, assume they're connected */ 589 + static const struct dmi_system_id bad_lid_status[] = { 590 + { 591 + .ident = "Aspire One", 592 + .matches = { 593 + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), 594 + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire one"), 595 + }, 596 + }, 597 + { } 598 + }; 599 + 594 600 /** 595 601 * Detect the LVDS connection. 596 602 * 597 - * This always returns CONNECTOR_STATUS_CONNECTED. This connector should only have 598 - * been set up if the LVDS was actually connected anyway. 603 + * Since LVDS doesn't have hotlug, we use the lid as a proxy. Open means 604 + * connected and closed means disconnected. We also send hotplug events as 605 + * needed, using lid status notification from the input layer. 599 606 */ 600 607 static enum drm_connector_status intel_lvds_detect(struct drm_connector *connector) 601 608 { 602 - return connector_status_connected; 609 + enum drm_connector_status status = connector_status_connected; 610 + 611 + if (!acpi_lid_open() && !dmi_check_system(bad_lid_status)) 612 + status = connector_status_disconnected; 613 + 614 + return status; 603 615 } 604 616 605 617 /** ··· 656 632 return 0; 657 633 } 658 634 635 + static int intel_lid_notify(struct notifier_block *nb, unsigned long val, 636 + void *unused) 637 + { 638 + struct drm_i915_private *dev_priv = 639 + container_of(nb, struct drm_i915_private, lid_notifier); 640 + struct drm_device *dev = dev_priv->dev; 641 + 642 + if (acpi_lid_open() && !dev_priv->suspended) { 643 + mutex_lock(&dev->mode_config.mutex); 644 + drm_helper_resume_force_mode(dev); 645 + mutex_unlock(&dev->mode_config.mutex); 646 + } 647 + 648 + drm_sysfs_hotplug_event(dev_priv->dev); 649 + 650 + return NOTIFY_OK; 651 + } 652 + 659 653 /** 660 654 * intel_lvds_destroy - unregister and free LVDS structures 661 655 * @connector: connector to free ··· 683 641 */ 684 642 static void intel_lvds_destroy(struct drm_connector *connector) 685 643 { 644 + struct drm_device *dev = connector->dev; 686 645 struct intel_output *intel_output = to_intel_output(connector); 646 + struct drm_i915_private *dev_priv = dev->dev_private; 687 647 688 648 if (intel_output->ddc_bus) 689 649 intel_i2c_destroy(intel_output->ddc_bus); 650 + if (dev_priv->lid_notifier.notifier_call) 651 + acpi_lid_notifier_unregister(&dev_priv->lid_notifier); 690 652 drm_sysfs_connector_remove(connector); 691 653 drm_connector_cleanup(connector); 692 654 kfree(connector); ··· 1056 1010 pwm = I915_READ(BLC_PWM_PCH_CTL1); 1057 1011 pwm |= PWM_PCH_ENABLE; 1058 1012 I915_WRITE(BLC_PWM_PCH_CTL1, pwm); 1013 + } 1014 + dev_priv->lid_notifier.notifier_call = intel_lid_notify; 1015 + if (acpi_lid_notifier_register(&dev_priv->lid_notifier)) { 1016 + DRM_DEBUG("lid notifier registration failed\n"); 1017 + dev_priv->lid_notifier.notifier_call = NULL; 1059 1018 } 1060 1019 drm_sysfs_connector_add(connector); 1061 1020 return;
+498 -4
drivers/gpu/drm/i915/intel_sdvo.c
··· 135 135 struct intel_sdvo_dtd save_input_dtd_1, save_input_dtd_2; 136 136 struct intel_sdvo_dtd save_output_dtd[16]; 137 137 u32 save_SDVOX; 138 + /* add the property for the SDVO-TV */ 139 + struct drm_property *left_property; 140 + struct drm_property *right_property; 141 + struct drm_property *top_property; 142 + struct drm_property *bottom_property; 143 + struct drm_property *hpos_property; 144 + struct drm_property *vpos_property; 145 + 146 + /* add the property for the SDVO-TV/LVDS */ 147 + struct drm_property *brightness_property; 148 + struct drm_property *contrast_property; 149 + struct drm_property *saturation_property; 150 + struct drm_property *hue_property; 151 + 152 + /* Add variable to record current setting for the above property */ 153 + u32 left_margin, right_margin, top_margin, bottom_margin; 154 + /* this is to get the range of margin.*/ 155 + u32 max_hscan, max_vscan; 156 + u32 max_hpos, cur_hpos; 157 + u32 max_vpos, cur_vpos; 158 + u32 cur_brightness, max_brightness; 159 + u32 cur_contrast, max_contrast; 160 + u32 cur_saturation, max_saturation; 161 + u32 cur_hue, max_hue; 138 162 }; 139 163 140 164 static bool ··· 305 281 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT), 306 282 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT), 307 283 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS), 284 + /* Add the op code for SDVO enhancements */ 285 + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_POSITION_H), 286 + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_POSITION_H), 287 + SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_POSITION_H), 288 + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_POSITION_V), 289 + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_POSITION_V), 290 + SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_POSITION_V), 291 + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SATURATION), 292 + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SATURATION), 293 + SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SATURATION), 294 + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HUE), 295 + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HUE), 296 + SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HUE), 297 + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_CONTRAST), 298 + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CONTRAST), 299 + SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTRAST), 300 + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_BRIGHTNESS), 301 + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_BRIGHTNESS), 302 + SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_BRIGHTNESS), 303 + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_H), 304 + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_H), 305 + SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_H), 306 + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_V), 307 + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_V), 308 + SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_V), 308 309 /* HDMI op code */ 309 310 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPP_ENCODE), 310 311 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ENCODE), ··· 1030 981 1031 982 status = intel_sdvo_read_response(output, NULL, 0); 1032 983 if (status != SDVO_CMD_STATUS_SUCCESS) 1033 - DRM_DEBUG("%s: Failed to set TV format\n", 984 + DRM_DEBUG_KMS("%s: Failed to set TV format\n", 1034 985 SDVO_NAME(sdvo_priv)); 1035 986 } 1036 987 ··· 1841 1792 return 1; 1842 1793 } 1843 1794 1795 + static 1796 + void intel_sdvo_destroy_enhance_property(struct drm_connector *connector) 1797 + { 1798 + struct intel_output *intel_output = to_intel_output(connector); 1799 + struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 1800 + struct drm_device *dev = connector->dev; 1801 + 1802 + if (sdvo_priv->is_tv) { 1803 + if (sdvo_priv->left_property) 1804 + drm_property_destroy(dev, sdvo_priv->left_property); 1805 + if (sdvo_priv->right_property) 1806 + drm_property_destroy(dev, sdvo_priv->right_property); 1807 + if (sdvo_priv->top_property) 1808 + drm_property_destroy(dev, sdvo_priv->top_property); 1809 + if (sdvo_priv->bottom_property) 1810 + drm_property_destroy(dev, sdvo_priv->bottom_property); 1811 + if (sdvo_priv->hpos_property) 1812 + drm_property_destroy(dev, sdvo_priv->hpos_property); 1813 + if (sdvo_priv->vpos_property) 1814 + drm_property_destroy(dev, sdvo_priv->vpos_property); 1815 + } 1816 + if (sdvo_priv->is_tv) { 1817 + if (sdvo_priv->saturation_property) 1818 + drm_property_destroy(dev, 1819 + sdvo_priv->saturation_property); 1820 + if (sdvo_priv->contrast_property) 1821 + drm_property_destroy(dev, 1822 + sdvo_priv->contrast_property); 1823 + if (sdvo_priv->hue_property) 1824 + drm_property_destroy(dev, sdvo_priv->hue_property); 1825 + } 1826 + if (sdvo_priv->is_tv || sdvo_priv->is_lvds) { 1827 + if (sdvo_priv->brightness_property) 1828 + drm_property_destroy(dev, 1829 + sdvo_priv->brightness_property); 1830 + } 1831 + return; 1832 + } 1833 + 1844 1834 static void intel_sdvo_destroy(struct drm_connector *connector) 1845 1835 { 1846 1836 struct intel_output *intel_output = to_intel_output(connector); ··· 1900 1812 drm_property_destroy(connector->dev, 1901 1813 sdvo_priv->tv_format_property); 1902 1814 1815 + if (sdvo_priv->is_tv || sdvo_priv->is_lvds) 1816 + intel_sdvo_destroy_enhance_property(connector); 1817 + 1903 1818 drm_sysfs_connector_remove(connector); 1904 1819 drm_connector_cleanup(connector); 1905 1820 ··· 1920 1829 struct drm_crtc *crtc = encoder->crtc; 1921 1830 int ret = 0; 1922 1831 bool changed = false; 1832 + uint8_t cmd, status; 1833 + uint16_t temp_value; 1923 1834 1924 1835 ret = drm_connector_property_set_value(connector, property, val); 1925 1836 if (ret < 0) ··· 1938 1845 1939 1846 sdvo_priv->tv_format_name = sdvo_priv->tv_format_supported[val]; 1940 1847 changed = true; 1941 - } else { 1942 - ret = -EINVAL; 1943 - goto out; 1944 1848 } 1945 1849 1850 + if (sdvo_priv->is_tv || sdvo_priv->is_lvds) { 1851 + cmd = 0; 1852 + temp_value = val; 1853 + if (sdvo_priv->left_property == property) { 1854 + drm_connector_property_set_value(connector, 1855 + sdvo_priv->right_property, val); 1856 + if (sdvo_priv->left_margin == temp_value) 1857 + goto out; 1858 + 1859 + sdvo_priv->left_margin = temp_value; 1860 + sdvo_priv->right_margin = temp_value; 1861 + temp_value = sdvo_priv->max_hscan - 1862 + sdvo_priv->left_margin; 1863 + cmd = SDVO_CMD_SET_OVERSCAN_H; 1864 + } else if (sdvo_priv->right_property == property) { 1865 + drm_connector_property_set_value(connector, 1866 + sdvo_priv->left_property, val); 1867 + if (sdvo_priv->right_margin == temp_value) 1868 + goto out; 1869 + 1870 + sdvo_priv->left_margin = temp_value; 1871 + sdvo_priv->right_margin = temp_value; 1872 + temp_value = sdvo_priv->max_hscan - 1873 + sdvo_priv->left_margin; 1874 + cmd = SDVO_CMD_SET_OVERSCAN_H; 1875 + } else if (sdvo_priv->top_property == property) { 1876 + drm_connector_property_set_value(connector, 1877 + sdvo_priv->bottom_property, val); 1878 + if (sdvo_priv->top_margin == temp_value) 1879 + goto out; 1880 + 1881 + sdvo_priv->top_margin = temp_value; 1882 + sdvo_priv->bottom_margin = temp_value; 1883 + temp_value = sdvo_priv->max_vscan - 1884 + sdvo_priv->top_margin; 1885 + cmd = SDVO_CMD_SET_OVERSCAN_V; 1886 + } else if (sdvo_priv->bottom_property == property) { 1887 + drm_connector_property_set_value(connector, 1888 + sdvo_priv->top_property, val); 1889 + if (sdvo_priv->bottom_margin == temp_value) 1890 + goto out; 1891 + sdvo_priv->top_margin = temp_value; 1892 + sdvo_priv->bottom_margin = temp_value; 1893 + temp_value = sdvo_priv->max_vscan - 1894 + sdvo_priv->top_margin; 1895 + cmd = SDVO_CMD_SET_OVERSCAN_V; 1896 + } else if (sdvo_priv->hpos_property == property) { 1897 + if (sdvo_priv->cur_hpos == temp_value) 1898 + goto out; 1899 + 1900 + cmd = SDVO_CMD_SET_POSITION_H; 1901 + sdvo_priv->cur_hpos = temp_value; 1902 + } else if (sdvo_priv->vpos_property == property) { 1903 + if (sdvo_priv->cur_vpos == temp_value) 1904 + goto out; 1905 + 1906 + cmd = SDVO_CMD_SET_POSITION_V; 1907 + sdvo_priv->cur_vpos = temp_value; 1908 + } else if (sdvo_priv->saturation_property == property) { 1909 + if (sdvo_priv->cur_saturation == temp_value) 1910 + goto out; 1911 + 1912 + cmd = SDVO_CMD_SET_SATURATION; 1913 + sdvo_priv->cur_saturation = temp_value; 1914 + } else if (sdvo_priv->contrast_property == property) { 1915 + if (sdvo_priv->cur_contrast == temp_value) 1916 + goto out; 1917 + 1918 + cmd = SDVO_CMD_SET_CONTRAST; 1919 + sdvo_priv->cur_contrast = temp_value; 1920 + } else if (sdvo_priv->hue_property == property) { 1921 + if (sdvo_priv->cur_hue == temp_value) 1922 + goto out; 1923 + 1924 + cmd = SDVO_CMD_SET_HUE; 1925 + sdvo_priv->cur_hue = temp_value; 1926 + } else if (sdvo_priv->brightness_property == property) { 1927 + if (sdvo_priv->cur_brightness == temp_value) 1928 + goto out; 1929 + 1930 + cmd = SDVO_CMD_SET_BRIGHTNESS; 1931 + sdvo_priv->cur_brightness = temp_value; 1932 + } 1933 + if (cmd) { 1934 + intel_sdvo_write_cmd(intel_output, cmd, &temp_value, 2); 1935 + status = intel_sdvo_read_response(intel_output, 1936 + NULL, 0); 1937 + if (status != SDVO_CMD_STATUS_SUCCESS) { 1938 + DRM_DEBUG_KMS("Incorrect SDVO command \n"); 1939 + return -EINVAL; 1940 + } 1941 + changed = true; 1942 + } 1943 + } 1946 1944 if (changed && crtc) 1947 1945 drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x, 1948 1946 crtc->y, crtc->fb); ··· 2274 2090 sdvo_priv->controlled_output = SDVO_OUTPUT_RGB1; 2275 2091 encoder->encoder_type = DRM_MODE_ENCODER_DAC; 2276 2092 connector->connector_type = DRM_MODE_CONNECTOR_VGA; 2093 + intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | 2094 + (1 << INTEL_ANALOG_CLONE_BIT); 2277 2095 } else if (flags & SDVO_OUTPUT_LVDS0) { 2278 2096 2279 2097 sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0; ··· 2360 2174 drm_connector_attach_property( 2361 2175 connector, sdvo_priv->tv_format_property, 0); 2362 2176 2177 + } 2178 + 2179 + static void intel_sdvo_create_enhance_property(struct drm_connector *connector) 2180 + { 2181 + struct intel_output *intel_output = to_intel_output(connector); 2182 + struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 2183 + struct intel_sdvo_enhancements_reply sdvo_data; 2184 + struct drm_device *dev = connector->dev; 2185 + uint8_t status; 2186 + uint16_t response, data_value[2]; 2187 + 2188 + intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS, 2189 + NULL, 0); 2190 + status = intel_sdvo_read_response(intel_output, &sdvo_data, 2191 + sizeof(sdvo_data)); 2192 + if (status != SDVO_CMD_STATUS_SUCCESS) { 2193 + DRM_DEBUG_KMS(" incorrect response is returned\n"); 2194 + return; 2195 + } 2196 + response = *((uint16_t *)&sdvo_data); 2197 + if (!response) { 2198 + DRM_DEBUG_KMS("No enhancement is supported\n"); 2199 + return; 2200 + } 2201 + if (sdvo_priv->is_tv) { 2202 + /* when horizontal overscan is supported, Add the left/right 2203 + * property 2204 + */ 2205 + if (sdvo_data.overscan_h) { 2206 + intel_sdvo_write_cmd(intel_output, 2207 + SDVO_CMD_GET_MAX_OVERSCAN_H, NULL, 0); 2208 + status = intel_sdvo_read_response(intel_output, 2209 + &data_value, 4); 2210 + if (status != SDVO_CMD_STATUS_SUCCESS) { 2211 + DRM_DEBUG_KMS("Incorrect SDVO max " 2212 + "h_overscan\n"); 2213 + return; 2214 + } 2215 + intel_sdvo_write_cmd(intel_output, 2216 + SDVO_CMD_GET_OVERSCAN_H, NULL, 0); 2217 + status = intel_sdvo_read_response(intel_output, 2218 + &response, 2); 2219 + if (status != SDVO_CMD_STATUS_SUCCESS) { 2220 + DRM_DEBUG_KMS("Incorrect SDVO h_overscan\n"); 2221 + return; 2222 + } 2223 + sdvo_priv->max_hscan = data_value[0]; 2224 + sdvo_priv->left_margin = data_value[0] - response; 2225 + sdvo_priv->right_margin = sdvo_priv->left_margin; 2226 + sdvo_priv->left_property = 2227 + drm_property_create(dev, DRM_MODE_PROP_RANGE, 2228 + "left_margin", 2); 2229 + sdvo_priv->left_property->values[0] = 0; 2230 + sdvo_priv->left_property->values[1] = data_value[0]; 2231 + drm_connector_attach_property(connector, 2232 + sdvo_priv->left_property, 2233 + sdvo_priv->left_margin); 2234 + sdvo_priv->right_property = 2235 + drm_property_create(dev, DRM_MODE_PROP_RANGE, 2236 + "right_margin", 2); 2237 + sdvo_priv->right_property->values[0] = 0; 2238 + sdvo_priv->right_property->values[1] = data_value[0]; 2239 + drm_connector_attach_property(connector, 2240 + sdvo_priv->right_property, 2241 + sdvo_priv->right_margin); 2242 + DRM_DEBUG_KMS("h_overscan: max %d, " 2243 + "default %d, current %d\n", 2244 + data_value[0], data_value[1], response); 2245 + } 2246 + if (sdvo_data.overscan_v) { 2247 + intel_sdvo_write_cmd(intel_output, 2248 + SDVO_CMD_GET_MAX_OVERSCAN_V, NULL, 0); 2249 + status = intel_sdvo_read_response(intel_output, 2250 + &data_value, 4); 2251 + if (status != SDVO_CMD_STATUS_SUCCESS) { 2252 + DRM_DEBUG_KMS("Incorrect SDVO max " 2253 + "v_overscan\n"); 2254 + return; 2255 + } 2256 + intel_sdvo_write_cmd(intel_output, 2257 + SDVO_CMD_GET_OVERSCAN_V, NULL, 0); 2258 + status = intel_sdvo_read_response(intel_output, 2259 + &response, 2); 2260 + if (status != SDVO_CMD_STATUS_SUCCESS) { 2261 + DRM_DEBUG_KMS("Incorrect SDVO v_overscan\n"); 2262 + return; 2263 + } 2264 + sdvo_priv->max_vscan = data_value[0]; 2265 + sdvo_priv->top_margin = data_value[0] - response; 2266 + sdvo_priv->bottom_margin = sdvo_priv->top_margin; 2267 + sdvo_priv->top_property = 2268 + drm_property_create(dev, DRM_MODE_PROP_RANGE, 2269 + "top_margin", 2); 2270 + sdvo_priv->top_property->values[0] = 0; 2271 + sdvo_priv->top_property->values[1] = data_value[0]; 2272 + drm_connector_attach_property(connector, 2273 + sdvo_priv->top_property, 2274 + sdvo_priv->top_margin); 2275 + sdvo_priv->bottom_property = 2276 + drm_property_create(dev, DRM_MODE_PROP_RANGE, 2277 + "bottom_margin", 2); 2278 + sdvo_priv->bottom_property->values[0] = 0; 2279 + sdvo_priv->bottom_property->values[1] = data_value[0]; 2280 + drm_connector_attach_property(connector, 2281 + sdvo_priv->bottom_property, 2282 + sdvo_priv->bottom_margin); 2283 + DRM_DEBUG_KMS("v_overscan: max %d, " 2284 + "default %d, current %d\n", 2285 + data_value[0], data_value[1], response); 2286 + } 2287 + if (sdvo_data.position_h) { 2288 + intel_sdvo_write_cmd(intel_output, 2289 + SDVO_CMD_GET_MAX_POSITION_H, NULL, 0); 2290 + status = intel_sdvo_read_response(intel_output, 2291 + &data_value, 4); 2292 + if (status != SDVO_CMD_STATUS_SUCCESS) { 2293 + DRM_DEBUG_KMS("Incorrect SDVO Max h_pos\n"); 2294 + return; 2295 + } 2296 + intel_sdvo_write_cmd(intel_output, 2297 + SDVO_CMD_GET_POSITION_H, NULL, 0); 2298 + status = intel_sdvo_read_response(intel_output, 2299 + &response, 2); 2300 + if (status != SDVO_CMD_STATUS_SUCCESS) { 2301 + DRM_DEBUG_KMS("Incorrect SDVO get h_postion\n"); 2302 + return; 2303 + } 2304 + sdvo_priv->max_hpos = data_value[0]; 2305 + sdvo_priv->cur_hpos = response; 2306 + sdvo_priv->hpos_property = 2307 + drm_property_create(dev, DRM_MODE_PROP_RANGE, 2308 + "hpos", 2); 2309 + sdvo_priv->hpos_property->values[0] = 0; 2310 + sdvo_priv->hpos_property->values[1] = data_value[0]; 2311 + drm_connector_attach_property(connector, 2312 + sdvo_priv->hpos_property, 2313 + sdvo_priv->cur_hpos); 2314 + DRM_DEBUG_KMS("h_position: max %d, " 2315 + "default %d, current %d\n", 2316 + data_value[0], data_value[1], response); 2317 + } 2318 + if (sdvo_data.position_v) { 2319 + intel_sdvo_write_cmd(intel_output, 2320 + SDVO_CMD_GET_MAX_POSITION_V, NULL, 0); 2321 + status = intel_sdvo_read_response(intel_output, 2322 + &data_value, 4); 2323 + if (status != SDVO_CMD_STATUS_SUCCESS) { 2324 + DRM_DEBUG_KMS("Incorrect SDVO Max v_pos\n"); 2325 + return; 2326 + } 2327 + intel_sdvo_write_cmd(intel_output, 2328 + SDVO_CMD_GET_POSITION_V, NULL, 0); 2329 + status = intel_sdvo_read_response(intel_output, 2330 + &response, 2); 2331 + if (status != SDVO_CMD_STATUS_SUCCESS) { 2332 + DRM_DEBUG_KMS("Incorrect SDVO get v_postion\n"); 2333 + return; 2334 + } 2335 + sdvo_priv->max_vpos = data_value[0]; 2336 + sdvo_priv->cur_vpos = response; 2337 + sdvo_priv->vpos_property = 2338 + drm_property_create(dev, DRM_MODE_PROP_RANGE, 2339 + "vpos", 2); 2340 + sdvo_priv->vpos_property->values[0] = 0; 2341 + sdvo_priv->vpos_property->values[1] = data_value[0]; 2342 + drm_connector_attach_property(connector, 2343 + sdvo_priv->vpos_property, 2344 + sdvo_priv->cur_vpos); 2345 + DRM_DEBUG_KMS("v_position: max %d, " 2346 + "default %d, current %d\n", 2347 + data_value[0], data_value[1], response); 2348 + } 2349 + } 2350 + if (sdvo_priv->is_tv) { 2351 + if (sdvo_data.saturation) { 2352 + intel_sdvo_write_cmd(intel_output, 2353 + SDVO_CMD_GET_MAX_SATURATION, NULL, 0); 2354 + status = intel_sdvo_read_response(intel_output, 2355 + &data_value, 4); 2356 + if (status != SDVO_CMD_STATUS_SUCCESS) { 2357 + DRM_DEBUG_KMS("Incorrect SDVO Max sat\n"); 2358 + return; 2359 + } 2360 + intel_sdvo_write_cmd(intel_output, 2361 + SDVO_CMD_GET_SATURATION, NULL, 0); 2362 + status = intel_sdvo_read_response(intel_output, 2363 + &response, 2); 2364 + if (status != SDVO_CMD_STATUS_SUCCESS) { 2365 + DRM_DEBUG_KMS("Incorrect SDVO get sat\n"); 2366 + return; 2367 + } 2368 + sdvo_priv->max_saturation = data_value[0]; 2369 + sdvo_priv->cur_saturation = response; 2370 + sdvo_priv->saturation_property = 2371 + drm_property_create(dev, DRM_MODE_PROP_RANGE, 2372 + "saturation", 2); 2373 + sdvo_priv->saturation_property->values[0] = 0; 2374 + sdvo_priv->saturation_property->values[1] = 2375 + data_value[0]; 2376 + drm_connector_attach_property(connector, 2377 + sdvo_priv->saturation_property, 2378 + sdvo_priv->cur_saturation); 2379 + DRM_DEBUG_KMS("saturation: max %d, " 2380 + "default %d, current %d\n", 2381 + data_value[0], data_value[1], response); 2382 + } 2383 + if (sdvo_data.contrast) { 2384 + intel_sdvo_write_cmd(intel_output, 2385 + SDVO_CMD_GET_MAX_CONTRAST, NULL, 0); 2386 + status = intel_sdvo_read_response(intel_output, 2387 + &data_value, 4); 2388 + if (status != SDVO_CMD_STATUS_SUCCESS) { 2389 + DRM_DEBUG_KMS("Incorrect SDVO Max contrast\n"); 2390 + return; 2391 + } 2392 + intel_sdvo_write_cmd(intel_output, 2393 + SDVO_CMD_GET_CONTRAST, NULL, 0); 2394 + status = intel_sdvo_read_response(intel_output, 2395 + &response, 2); 2396 + if (status != SDVO_CMD_STATUS_SUCCESS) { 2397 + DRM_DEBUG_KMS("Incorrect SDVO get contrast\n"); 2398 + return; 2399 + } 2400 + sdvo_priv->max_contrast = data_value[0]; 2401 + sdvo_priv->cur_contrast = response; 2402 + sdvo_priv->contrast_property = 2403 + drm_property_create(dev, DRM_MODE_PROP_RANGE, 2404 + "contrast", 2); 2405 + sdvo_priv->contrast_property->values[0] = 0; 2406 + sdvo_priv->contrast_property->values[1] = data_value[0]; 2407 + drm_connector_attach_property(connector, 2408 + sdvo_priv->contrast_property, 2409 + sdvo_priv->cur_contrast); 2410 + DRM_DEBUG_KMS("contrast: max %d, " 2411 + "default %d, current %d\n", 2412 + data_value[0], data_value[1], response); 2413 + } 2414 + if (sdvo_data.hue) { 2415 + intel_sdvo_write_cmd(intel_output, 2416 + SDVO_CMD_GET_MAX_HUE, NULL, 0); 2417 + status = intel_sdvo_read_response(intel_output, 2418 + &data_value, 4); 2419 + if (status != SDVO_CMD_STATUS_SUCCESS) { 2420 + DRM_DEBUG_KMS("Incorrect SDVO Max hue\n"); 2421 + return; 2422 + } 2423 + intel_sdvo_write_cmd(intel_output, 2424 + SDVO_CMD_GET_HUE, NULL, 0); 2425 + status = intel_sdvo_read_response(intel_output, 2426 + &response, 2); 2427 + if (status != SDVO_CMD_STATUS_SUCCESS) { 2428 + DRM_DEBUG_KMS("Incorrect SDVO get hue\n"); 2429 + return; 2430 + } 2431 + sdvo_priv->max_hue = data_value[0]; 2432 + sdvo_priv->cur_hue = response; 2433 + sdvo_priv->hue_property = 2434 + drm_property_create(dev, DRM_MODE_PROP_RANGE, 2435 + "hue", 2); 2436 + sdvo_priv->hue_property->values[0] = 0; 2437 + sdvo_priv->hue_property->values[1] = 2438 + data_value[0]; 2439 + drm_connector_attach_property(connector, 2440 + sdvo_priv->hue_property, 2441 + sdvo_priv->cur_hue); 2442 + DRM_DEBUG_KMS("hue: max %d, default %d, current %d\n", 2443 + data_value[0], data_value[1], response); 2444 + } 2445 + } 2446 + if (sdvo_priv->is_tv || sdvo_priv->is_lvds) { 2447 + if (sdvo_data.brightness) { 2448 + intel_sdvo_write_cmd(intel_output, 2449 + SDVO_CMD_GET_MAX_BRIGHTNESS, NULL, 0); 2450 + status = intel_sdvo_read_response(intel_output, 2451 + &data_value, 4); 2452 + if (status != SDVO_CMD_STATUS_SUCCESS) { 2453 + DRM_DEBUG_KMS("Incorrect SDVO Max bright\n"); 2454 + return; 2455 + } 2456 + intel_sdvo_write_cmd(intel_output, 2457 + SDVO_CMD_GET_BRIGHTNESS, NULL, 0); 2458 + status = intel_sdvo_read_response(intel_output, 2459 + &response, 2); 2460 + if (status != SDVO_CMD_STATUS_SUCCESS) { 2461 + DRM_DEBUG_KMS("Incorrect SDVO get brigh\n"); 2462 + return; 2463 + } 2464 + sdvo_priv->max_brightness = data_value[0]; 2465 + sdvo_priv->cur_brightness = response; 2466 + sdvo_priv->brightness_property = 2467 + drm_property_create(dev, DRM_MODE_PROP_RANGE, 2468 + "brightness", 2); 2469 + sdvo_priv->brightness_property->values[0] = 0; 2470 + sdvo_priv->brightness_property->values[1] = 2471 + data_value[0]; 2472 + drm_connector_attach_property(connector, 2473 + sdvo_priv->brightness_property, 2474 + sdvo_priv->cur_brightness); 2475 + DRM_DEBUG_KMS("brightness: max %d, " 2476 + "default %d, current %d\n", 2477 + data_value[0], data_value[1], response); 2478 + } 2479 + } 2480 + return; 2363 2481 } 2364 2482 2365 2483 bool intel_sdvo_init(struct drm_device *dev, int output_device) ··· 2754 2264 drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc); 2755 2265 if (sdvo_priv->is_tv) 2756 2266 intel_sdvo_tv_create_property(connector); 2267 + 2268 + if (sdvo_priv->is_tv || sdvo_priv->is_lvds) 2269 + intel_sdvo_create_enhance_property(connector); 2270 + 2757 2271 drm_sysfs_connector_add(connector); 2758 2272 2759 2273 intel_sdvo_select_ddc_bus(sdvo_priv);
+25
include/acpi/button.h
··· 1 + #ifndef ACPI_BUTTON_H 2 + #define ACPI_BUTTON_H 3 + 4 + #include <linux/notifier.h> 5 + 6 + #if defined(CONFIG_ACPI_BUTTON) || defined(CONFIG_ACPI_BUTTON_MODULE) 7 + extern int acpi_lid_notifier_register(struct notifier_block *nb); 8 + extern int acpi_lid_notifier_unregister(struct notifier_block *nb); 9 + extern int acpi_lid_open(void); 10 + #else 11 + static inline int acpi_lid_notifier_register(struct notifier_block *nb) 12 + { 13 + return 0; 14 + } 15 + static inline int acpi_lid_notifier_unregister(struct notifier_block *nb) 16 + { 17 + return 0; 18 + } 19 + static inline int acpi_lid_open(void) 20 + { 21 + return 1; 22 + } 23 + #endif /* defined(CONFIG_ACPI_BUTTON) || defined(CONFIG_ACPI_BUTTON_MODULE) */ 24 + 25 + #endif /* ACPI_BUTTON_H */
+1
include/drm/drm_pciids.h
··· 552 552 {0x8086, 0x2e12, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ 553 553 {0x8086, 0x2e22, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ 554 554 {0x8086, 0x2e32, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ 555 + {0x8086, 0x2e42, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ 555 556 {0x8086, 0xa001, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ 556 557 {0x8086, 0xa011, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ 557 558 {0x8086, 0x35e8, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
+19
include/drm/i915_drm.h
··· 185 185 #define DRM_I915_GEM_GET_APERTURE 0x23 186 186 #define DRM_I915_GEM_MMAP_GTT 0x24 187 187 #define DRM_I915_GET_PIPE_FROM_CRTC_ID 0x25 188 + #define DRM_I915_GEM_MADVISE 0x26 188 189 189 190 #define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) 190 191 #define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH) ··· 222 221 #define DRM_IOCTL_I915_GEM_GET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling) 223 222 #define DRM_IOCTL_I915_GEM_GET_APERTURE DRM_IOR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct drm_i915_gem_get_aperture) 224 223 #define DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_PIPE_FROM_CRTC_ID, struct drm_intel_get_pipe_from_crtc_id) 224 + #define DRM_IOCTL_I915_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise) 225 225 226 226 /* Allow drivers to submit batchbuffers directly to hardware, relying 227 227 * on the security mechanisms provided by hardware. ··· 667 665 668 666 /** pipe of requested CRTC **/ 669 667 __u32 pipe; 668 + }; 669 + 670 + #define I915_MADV_WILLNEED 0 671 + #define I915_MADV_DONTNEED 1 672 + #define __I915_MADV_PURGED 2 /* internal state */ 673 + 674 + struct drm_i915_gem_madvise { 675 + /** Handle of the buffer to change the backing store advice */ 676 + __u32 handle; 677 + 678 + /* Advice: either the buffer will be needed again in the near future, 679 + * or wont be and could be discarded under memory pressure. 680 + */ 681 + __u32 madv; 682 + 683 + /** Whether the backing store still exists. */ 684 + __u32 retained; 670 685 }; 671 686 672 687 #endif /* _I915_DRM_H_ */