Merge branch 'drm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6

* 'drm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6: (26 commits)
drm/radeon: update sarea copies of last_ variables on resume.
drm/i915: Keep refs on the object over the lifetime of vmas for GTT mmap.
drm/i915: take struct mutex around fb unref
drm: Use spread spectrum when the bios tells us it's ok.
drm: Collapse identical i8xx_clock() and i9xx_clock().
drm: Bring PLL limits in sync with DDX values.
drm: Add locking around cursor gem operations.
drm: Propagate failure from setting crtc base.
drm: Check for a NULL encoder when reverting on error path
drm/i915: Cleanup the hws on ringbuffer constrution failure.
drm/i915: Don't add panel_fixed_mode to the probed modes list at LVDS init.
drm: Release user fbs in drm_release
drm/i915: Unpin the fb on error during construction.
drm/i915: Unpin the hws if we fail to kmap.
drm/i915: Unpin the ringbuffer if we fail to ioremap it.
drm/i915: unpin for an invalid memory domain.
drm/i915: Release and unlock on mmap_gtt error path.
drm/i915: Set framebuffer alignment based upon the fence constraints.
drm: Do not leak a new reference for flink() on an existing name
drm/i915: Fix potential AB-BA deadlock in i915_gem_execbuffer()
...

+280 -174
+1 -2
drivers/gpu/drm/drm_crtc.c
··· 1741 1741 * RETURNS: 1742 1742 * Zero on success, errno on failure. 1743 1743 */ 1744 - void drm_fb_release(struct file *filp) 1744 + void drm_fb_release(struct drm_file *priv) 1745 1745 { 1746 - struct drm_file *priv = filp->private_data; 1747 1746 struct drm_device *dev = priv->minor->dev; 1748 1747 struct drm_framebuffer *fb, *tfb; 1749 1748
+16 -5
drivers/gpu/drm/drm_crtc_helper.c
··· 512 512 if (drm_mode_equal(&saved_mode, &crtc->mode)) { 513 513 if (saved_x != crtc->x || saved_y != crtc->y || 514 514 depth_changed || bpp_changed) { 515 - crtc_funcs->mode_set_base(crtc, crtc->x, crtc->y, 516 - old_fb); 515 + ret = !crtc_funcs->mode_set_base(crtc, crtc->x, crtc->y, 516 + old_fb); 517 517 goto done; 518 518 } 519 519 } ··· 552 552 /* Set up the DPLL and any encoders state that needs to adjust or depend 553 553 * on the DPLL. 554 554 */ 555 - crtc_funcs->mode_set(crtc, mode, adjusted_mode, x, y, old_fb); 555 + ret = !crtc_funcs->mode_set(crtc, mode, adjusted_mode, x, y, old_fb); 556 + if (!ret) 557 + goto done; 556 558 557 559 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 558 560 ··· 754 752 if (!drm_crtc_helper_set_mode(set->crtc, set->mode, 755 753 set->x, set->y, 756 754 old_fb)) { 755 + DRM_ERROR("failed to set mode on crtc %p\n", 756 + set->crtc); 757 757 ret = -EINVAL; 758 758 goto fail_set_mode; 759 759 } ··· 769 765 old_fb = set->crtc->fb; 770 766 if (set->crtc->fb != set->fb) 771 767 set->crtc->fb = set->fb; 772 - crtc_funcs->mode_set_base(set->crtc, set->x, set->y, old_fb); 768 + ret = crtc_funcs->mode_set_base(set->crtc, 769 + set->x, set->y, old_fb); 770 + if (ret != 0) 771 + goto fail_set_mode; 773 772 } 774 773 775 774 kfree(save_encoders); ··· 782 775 fail_set_mode: 783 776 set->crtc->enabled = save_enabled; 784 777 count = 0; 785 - list_for_each_entry(connector, &dev->mode_config.connector_list, head) 778 + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 779 + if (!connector->encoder) 780 + continue; 781 + 786 782 connector->encoder->crtc = save_crtcs[count++]; 783 + } 787 784 fail_no_encoder: 788 785 kfree(save_crtcs); 789 786 count = 0;
+3
drivers/gpu/drm/drm_fops.c
··· 457 457 if (dev->driver->driver_features & DRIVER_GEM) 458 458 drm_gem_release(dev, file_priv); 459 459 460 + if (dev->driver->driver_features & DRIVER_MODESET) 461 + drm_fb_release(file_priv); 462 + 460 463 mutex_lock(&dev->ctxlist_mutex); 461 464 if (!list_empty(&dev->ctxlist)) { 462 465 struct drm_ctx_list *pos, *n;
+56 -25
drivers/gpu/drm/drm_gem.c
··· 104 104 105 105 if (drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START, 106 106 DRM_FILE_PAGE_OFFSET_SIZE)) { 107 - drm_free(mm, sizeof(struct drm_gem_mm), DRM_MEM_MM); 108 107 drm_ht_remove(&mm->offset_hash); 108 + drm_free(mm, sizeof(struct drm_gem_mm), DRM_MEM_MM); 109 109 return -ENOMEM; 110 110 } 111 111 ··· 295 295 return -EBADF; 296 296 297 297 again: 298 - if (idr_pre_get(&dev->object_name_idr, GFP_KERNEL) == 0) 299 - return -ENOMEM; 298 + if (idr_pre_get(&dev->object_name_idr, GFP_KERNEL) == 0) { 299 + ret = -ENOMEM; 300 + goto err; 301 + } 300 302 301 303 spin_lock(&dev->object_name_lock); 302 - if (obj->name) { 303 - args->name = obj->name; 304 + if (!obj->name) { 305 + ret = idr_get_new_above(&dev->object_name_idr, obj, 1, 306 + &obj->name); 307 + args->name = (uint64_t) obj->name; 304 308 spin_unlock(&dev->object_name_lock); 305 - return 0; 309 + 310 + if (ret == -EAGAIN) 311 + goto again; 312 + 313 + if (ret != 0) 314 + goto err; 315 + 316 + /* Allocate a reference for the name table. */ 317 + drm_gem_object_reference(obj); 318 + } else { 319 + args->name = (uint64_t) obj->name; 320 + spin_unlock(&dev->object_name_lock); 321 + ret = 0; 306 322 } 307 - ret = idr_get_new_above(&dev->object_name_idr, obj, 1, 308 - &obj->name); 309 - spin_unlock(&dev->object_name_lock); 310 - if (ret == -EAGAIN) 311 - goto again; 312 323 313 - if (ret != 0) { 314 - mutex_lock(&dev->struct_mutex); 315 - drm_gem_object_unreference(obj); 316 - mutex_unlock(&dev->struct_mutex); 317 - return ret; 318 - } 319 - 320 - /* 321 - * Leave the reference from the lookup around as the 322 - * name table now holds one 323 - */ 324 - args->name = (uint64_t) obj->name; 325 - 326 - return 0; 324 + err: 325 + mutex_lock(&dev->struct_mutex); 326 + drm_gem_object_unreference(obj); 327 + mutex_unlock(&dev->struct_mutex); 328 + return ret; 327 329 } 328 330 329 331 /** ··· 450 448 spin_lock(&dev->object_name_lock); 451 449 if (obj->name) { 452 450 idr_remove(&dev->object_name_idr, obj->name); 451 + obj->name = 0; 453 452 spin_unlock(&dev->object_name_lock); 454 453 /* 455 454 * The object name held a reference to this object, drop ··· 462 459 463 460 } 464 461 EXPORT_SYMBOL(drm_gem_object_handle_free); 462 + 463 + void drm_gem_vm_open(struct vm_area_struct *vma) 464 + { 465 + struct drm_gem_object *obj = vma->vm_private_data; 466 + 467 + drm_gem_object_reference(obj); 468 + } 469 + EXPORT_SYMBOL(drm_gem_vm_open); 470 + 471 + void drm_gem_vm_close(struct vm_area_struct *vma) 472 + { 473 + struct drm_gem_object *obj = vma->vm_private_data; 474 + struct drm_device *dev = obj->dev; 475 + 476 + mutex_lock(&dev->struct_mutex); 477 + drm_gem_object_unreference(obj); 478 + mutex_unlock(&dev->struct_mutex); 479 + } 480 + EXPORT_SYMBOL(drm_gem_vm_close); 481 + 465 482 466 483 /** 467 484 * drm_gem_mmap - memory map routine for GEM objects ··· 543 520 prot |= _PAGE_CACHE_WC; 544 521 #endif 545 522 vma->vm_page_prot = __pgprot(prot); 523 + 524 + /* Take a ref for this mapping of the object, so that the fault 525 + * handler can dereference the mmap offset's pointer to the object. 526 + * This reference is cleaned up by the corresponding vm_close 527 + * (which should happen whether the vma was created by this call, or 528 + * by a vm_open due to mremap or partial unmap or whatever). 529 + */ 530 + drm_gem_object_reference(obj); 546 531 547 532 vma->vm_file = filp; /* Needed for drm_vm_open() */ 548 533 drm_vm_open_locked(vma);
+2
drivers/gpu/drm/i915/i915_drv.c
··· 94 94 95 95 static struct vm_operations_struct i915_gem_vm_ops = { 96 96 .fault = i915_gem_fault, 97 + .open = drm_gem_vm_open, 98 + .close = drm_gem_vm_close, 97 99 }; 98 100 99 101 static struct drm_driver driver = {
+2
drivers/gpu/drm/i915/i915_drv.h
··· 184 184 unsigned int lvds_dither:1; 185 185 unsigned int lvds_vbt:1; 186 186 unsigned int int_crt_support:1; 187 + unsigned int lvds_use_ssc:1; 188 + int lvds_ssc_freq; 187 189 188 190 struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */ 189 191 int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
+75 -44
drivers/gpu/drm/i915/i915_gem.c
··· 607 607 case -EAGAIN: 608 608 return VM_FAULT_OOM; 609 609 case -EFAULT: 610 - case -EBUSY: 611 - DRM_ERROR("can't insert pfn?? fault or busy...\n"); 612 610 return VM_FAULT_SIGBUS; 613 611 default: 614 612 return VM_FAULT_NOPAGE; ··· 680 682 drm_free(list->map, sizeof(struct drm_map_list), DRM_MEM_DRIVER); 681 683 682 684 return ret; 685 + } 686 + 687 + static void 688 + i915_gem_free_mmap_offset(struct drm_gem_object *obj) 689 + { 690 + struct drm_device *dev = obj->dev; 691 + struct drm_i915_gem_object *obj_priv = obj->driver_private; 692 + struct drm_gem_mm *mm = dev->mm_private; 693 + struct drm_map_list *list; 694 + 695 + list = &obj->map_list; 696 + drm_ht_remove_item(&mm->offset_hash, &list->hash); 697 + 698 + if (list->file_offset_node) { 699 + drm_mm_put_block(list->file_offset_node); 700 + list->file_offset_node = NULL; 701 + } 702 + 703 + if (list->map) { 704 + drm_free(list->map, sizeof(struct drm_map), DRM_MEM_DRIVER); 705 + list->map = NULL; 706 + } 707 + 708 + obj_priv->mmap_offset = 0; 683 709 } 684 710 685 711 /** ··· 780 758 781 759 if (!obj_priv->mmap_offset) { 782 760 ret = i915_gem_create_mmap_offset(obj); 783 - if (ret) 761 + if (ret) { 762 + drm_gem_object_unreference(obj); 763 + mutex_unlock(&dev->struct_mutex); 784 764 return ret; 765 + } 785 766 } 786 767 787 768 args->offset = obj_priv->mmap_offset; ··· 2276 2251 (int) reloc.offset, 2277 2252 reloc.read_domains, 2278 2253 reloc.write_domain); 2254 + drm_gem_object_unreference(target_obj); 2255 + i915_gem_object_unpin(obj); 2279 2256 return -EINVAL; 2280 2257 } 2281 2258 ··· 2507 2480 if (dev_priv->mm.wedged) { 2508 2481 DRM_ERROR("Execbuf while wedged\n"); 2509 2482 mutex_unlock(&dev->struct_mutex); 2510 - return -EIO; 2483 + ret = -EIO; 2484 + goto pre_mutex_err; 2511 2485 } 2512 2486 2513 2487 if (dev_priv->mm.suspended) { 2514 2488 DRM_ERROR("Execbuf while VT-switched.\n"); 2515 2489 mutex_unlock(&dev->struct_mutex); 2516 - return -EBUSY; 2490 + ret = -EBUSY; 2491 + goto pre_mutex_err; 2517 2492 } 2518 2493 2519 2494 /* Look up object handles */ ··· 2661 2632 2662 2633 i915_verify_inactive(dev, __FILE__, __LINE__); 2663 2634 2664 - /* Copy the new buffer offsets back to the user's exec list. */ 2665 - ret = copy_to_user((struct drm_i915_relocation_entry __user *) 2666 - (uintptr_t) args->buffers_ptr, 2667 - exec_list, 2668 - sizeof(*exec_list) * args->buffer_count); 2669 - if (ret) 2670 - DRM_ERROR("failed to copy %d exec entries " 2671 - "back to user (%d)\n", 2672 - args->buffer_count, ret); 2673 2635 err: 2674 2636 for (i = 0; i < pinned; i++) 2675 2637 i915_gem_object_unpin(object_list[i]); ··· 2669 2649 drm_gem_object_unreference(object_list[i]); 2670 2650 2671 2651 mutex_unlock(&dev->struct_mutex); 2652 + 2653 + if (!ret) { 2654 + /* Copy the new buffer offsets back to the user's exec list. */ 2655 + ret = copy_to_user((struct drm_i915_relocation_entry __user *) 2656 + (uintptr_t) args->buffers_ptr, 2657 + exec_list, 2658 + sizeof(*exec_list) * args->buffer_count); 2659 + if (ret) 2660 + DRM_ERROR("failed to copy %d exec entries " 2661 + "back to user (%d)\n", 2662 + args->buffer_count, ret); 2663 + } 2672 2664 2673 2665 pre_mutex_err: 2674 2666 drm_free(object_list, sizeof(*object_list) * args->buffer_count, ··· 2785 2753 if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) { 2786 2754 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n", 2787 2755 args->handle); 2756 + drm_gem_object_unreference(obj); 2788 2757 mutex_unlock(&dev->struct_mutex); 2789 2758 return -EINVAL; 2790 2759 } ··· 2918 2885 void i915_gem_free_object(struct drm_gem_object *obj) 2919 2886 { 2920 2887 struct drm_device *dev = obj->dev; 2921 - struct drm_gem_mm *mm = dev->mm_private; 2922 - struct drm_map_list *list; 2923 - struct drm_map *map; 2924 2888 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2925 2889 2926 2890 while (obj_priv->pin_count > 0) ··· 2928 2898 2929 2899 i915_gem_object_unbind(obj); 2930 2900 2931 - list = &obj->map_list; 2932 - drm_ht_remove_item(&mm->offset_hash, &list->hash); 2933 - 2934 - if (list->file_offset_node) { 2935 - drm_mm_put_block(list->file_offset_node); 2936 - list->file_offset_node = NULL; 2937 - } 2938 - 2939 - map = list->map; 2940 - if (map) { 2941 - drm_free(map, sizeof(*map), DRM_MEM_DRIVER); 2942 - list->map = NULL; 2943 - } 2901 + i915_gem_free_mmap_offset(obj); 2944 2902 2945 2903 drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER); 2946 2904 drm_free(obj->driver_private, 1, DRM_MEM_DRIVER); ··· 3113 3095 if (dev_priv->hw_status_page == NULL) { 3114 3096 DRM_ERROR("Failed to map status page.\n"); 3115 3097 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); 3098 + i915_gem_object_unpin(obj); 3116 3099 drm_gem_object_unreference(obj); 3117 3100 return -EINVAL; 3118 3101 } ··· 3124 3105 DRM_DEBUG("hws offset: 0x%08x\n", dev_priv->status_gfx_addr); 3125 3106 3126 3107 return 0; 3108 + } 3109 + 3110 + static void 3111 + i915_gem_cleanup_hws(struct drm_device *dev) 3112 + { 3113 + drm_i915_private_t *dev_priv = dev->dev_private; 3114 + struct drm_gem_object *obj = dev_priv->hws_obj; 3115 + struct drm_i915_gem_object *obj_priv = obj->driver_private; 3116 + 3117 + if (dev_priv->hws_obj == NULL) 3118 + return; 3119 + 3120 + kunmap(obj_priv->page_list[0]); 3121 + i915_gem_object_unpin(obj); 3122 + drm_gem_object_unreference(obj); 3123 + dev_priv->hws_obj = NULL; 3124 + memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); 3125 + dev_priv->hw_status_page = NULL; 3126 + 3127 + /* Write high address into HWS_PGA when disabling. */ 3128 + I915_WRITE(HWS_PGA, 0x1ffff000); 3127 3129 } 3128 3130 3129 3131 int ··· 3164 3124 obj = drm_gem_object_alloc(dev, 128 * 1024); 3165 3125 if (obj == NULL) { 3166 3126 DRM_ERROR("Failed to allocate ringbuffer\n"); 3127 + i915_gem_cleanup_hws(dev); 3167 3128 return -ENOMEM; 3168 3129 } 3169 3130 obj_priv = obj->driver_private; ··· 3172 3131 ret = i915_gem_object_pin(obj, 4096); 3173 3132 if (ret != 0) { 3174 3133 drm_gem_object_unreference(obj); 3134 + i915_gem_cleanup_hws(dev); 3175 3135 return ret; 3176 3136 } 3177 3137 ··· 3190 3148 if (ring->map.handle == NULL) { 3191 3149 DRM_ERROR("Failed to map ringbuffer.\n"); 3192 3150 memset(&dev_priv->ring, 0, sizeof(dev_priv->ring)); 3151 + i915_gem_object_unpin(obj); 3193 3152 drm_gem_object_unreference(obj); 3153 + i915_gem_cleanup_hws(dev); 3194 3154 return -EINVAL; 3195 3155 } 3196 3156 ring->ring_obj = obj; ··· 3272 3228 dev_priv->ring.ring_obj = NULL; 3273 3229 memset(&dev_priv->ring, 0, sizeof(dev_priv->ring)); 3274 3230 3275 - if (dev_priv->hws_obj != NULL) { 3276 - struct drm_gem_object *obj = dev_priv->hws_obj; 3277 - struct drm_i915_gem_object *obj_priv = obj->driver_private; 3278 - 3279 - kunmap(obj_priv->page_list[0]); 3280 - i915_gem_object_unpin(obj); 3281 - drm_gem_object_unreference(obj); 3282 - dev_priv->hws_obj = NULL; 3283 - memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); 3284 - dev_priv->hw_status_page = NULL; 3285 - 3286 - /* Write high address into HWS_PGA when disabling. */ 3287 - I915_WRITE(HWS_PGA, 0x1ffff000); 3288 - } 3231 + i915_gem_cleanup_hws(dev); 3289 3232 } 3290 3233 3291 3234 int
+2 -4
drivers/gpu/drm/i915/i915_gem_tiling.c
··· 299 299 } 300 300 obj_priv->stride = args->stride; 301 301 302 - mutex_unlock(&dev->struct_mutex); 303 - 304 302 drm_gem_object_unreference(obj); 303 + mutex_unlock(&dev->struct_mutex); 305 304 306 305 return 0; 307 306 } ··· 339 340 DRM_ERROR("unknown tiling mode\n"); 340 341 } 341 342 342 - mutex_unlock(&dev->struct_mutex); 343 - 344 343 drm_gem_object_unreference(obj); 344 + mutex_unlock(&dev->struct_mutex); 345 345 346 346 return 0; 347 347 }
+8
drivers/gpu/drm/i915/intel_bios.c
··· 135 135 if (general) { 136 136 dev_priv->int_tv_support = general->int_tv_support; 137 137 dev_priv->int_crt_support = general->int_crt_support; 138 + dev_priv->lvds_use_ssc = general->enable_ssc; 139 + 140 + if (dev_priv->lvds_use_ssc) { 141 + if (IS_I855(dev_priv->dev)) 142 + dev_priv->lvds_ssc_freq = general->ssc_freq ? 66 : 48; 143 + else 144 + dev_priv->lvds_ssc_freq = general->ssc_freq ? 100 : 96; 145 + } 138 146 } 139 147 } 140 148
+85 -75
drivers/gpu/drm/i915/intel_display.c
··· 90 90 #define I9XX_DOT_MAX 400000 91 91 #define I9XX_VCO_MIN 1400000 92 92 #define I9XX_VCO_MAX 2800000 93 - #define I9XX_N_MIN 3 94 - #define I9XX_N_MAX 8 93 + #define I9XX_N_MIN 1 94 + #define I9XX_N_MAX 6 95 95 #define I9XX_M_MIN 70 96 96 #define I9XX_M_MAX 120 97 97 #define I9XX_M1_MIN 10 98 - #define I9XX_M1_MAX 20 98 + #define I9XX_M1_MAX 22 99 99 #define I9XX_M2_MIN 5 100 100 #define I9XX_M2_MAX 9 101 101 #define I9XX_P_SDVO_DAC_MIN 5 ··· 189 189 return limit; 190 190 } 191 191 192 - /** Derive the pixel clock for the given refclk and divisors for 8xx chips. */ 193 - 194 - static void i8xx_clock(int refclk, intel_clock_t *clock) 192 + static void intel_clock(int refclk, intel_clock_t *clock) 195 193 { 196 194 clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2); 197 195 clock->p = clock->p1 * clock->p2; 198 196 clock->vco = refclk * clock->m / (clock->n + 2); 199 197 clock->dot = clock->vco / clock->p; 200 - } 201 - 202 - /** Derive the pixel clock for the given refclk and divisors for 9xx chips. */ 203 - 204 - static void i9xx_clock(int refclk, intel_clock_t *clock) 205 - { 206 - clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2); 207 - clock->p = clock->p1 * clock->p2; 208 - clock->vco = refclk * clock->m / (clock->n + 2); 209 - clock->dot = clock->vco / clock->p; 210 - } 211 - 212 - static void intel_clock(struct drm_device *dev, int refclk, 213 - intel_clock_t *clock) 214 - { 215 - if (IS_I9XX(dev)) 216 - i9xx_clock (refclk, clock); 217 - else 218 - i8xx_clock (refclk, clock); 219 198 } 220 199 221 200 /** ··· 217 238 return false; 218 239 } 219 240 220 - #define INTELPllInvalid(s) { /* ErrorF (s) */; return false; } 241 + #define INTELPllInvalid(s) do { DRM_DEBUG(s); return false; } while (0) 221 242 /** 222 243 * Returns whether the given set of divisors are valid for a given refclk with 223 244 * the given connectors. ··· 297 318 clock.p1 <= limit->p1.max; clock.p1++) { 298 319 int this_err; 299 320 300 - intel_clock(dev, refclk, &clock); 321 + intel_clock(refclk, &clock); 301 322 302 323 if (!intel_PLL_is_valid(crtc, &clock)) 303 324 continue; ··· 322 343 udelay(20000); 323 344 } 324 345 325 - static void 346 + static int 326 347 intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, 327 348 struct drm_framebuffer *old_fb) 328 349 { ··· 340 361 int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE; 341 362 int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR; 342 363 u32 dspcntr, alignment; 364 + int ret; 343 365 344 366 /* no fb bound */ 345 367 if (!crtc->fb) { 346 368 DRM_DEBUG("No FB bound\n"); 347 - return; 369 + return 0; 370 + } 371 + 372 + switch (pipe) { 373 + case 0: 374 + case 1: 375 + break; 376 + default: 377 + DRM_ERROR("Can't update pipe %d in SAREA\n", pipe); 378 + return -EINVAL; 348 379 } 349 380 350 381 intel_fb = to_intel_framebuffer(crtc->fb); ··· 366 377 alignment = 64 * 1024; 367 378 break; 368 379 case I915_TILING_X: 369 - if (IS_I9XX(dev)) 370 - alignment = 1024 * 1024; 371 - else 372 - alignment = 512 * 1024; 380 + /* pin() will align the object as required by fence */ 381 + alignment = 0; 373 382 break; 374 383 case I915_TILING_Y: 375 384 /* FIXME: Is this true? */ 376 385 DRM_ERROR("Y tiled not allowed for scan out buffers\n"); 377 - return; 386 + return -EINVAL; 378 387 default: 379 388 BUG(); 380 389 } 381 390 382 - if (i915_gem_object_pin(intel_fb->obj, alignment)) 383 - return; 391 + mutex_lock(&dev->struct_mutex); 392 + ret = i915_gem_object_pin(intel_fb->obj, alignment); 393 + if (ret != 0) { 394 + mutex_unlock(&dev->struct_mutex); 395 + return ret; 396 + } 384 397 385 - i915_gem_object_set_to_gtt_domain(intel_fb->obj, 1); 386 - 387 - Start = obj_priv->gtt_offset; 388 - Offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8); 389 - 390 - I915_WRITE(dspstride, crtc->fb->pitch); 398 + ret = i915_gem_object_set_to_gtt_domain(intel_fb->obj, 1); 399 + if (ret != 0) { 400 + i915_gem_object_unpin(intel_fb->obj); 401 + mutex_unlock(&dev->struct_mutex); 402 + return ret; 403 + } 391 404 392 405 dspcntr = I915_READ(dspcntr_reg); 393 406 /* Mask out pixel format bits in case we change it */ ··· 410 419 break; 411 420 default: 412 421 DRM_ERROR("Unknown color depth\n"); 413 - return; 422 + i915_gem_object_unpin(intel_fb->obj); 423 + mutex_unlock(&dev->struct_mutex); 424 + return -EINVAL; 414 425 } 415 426 I915_WRITE(dspcntr_reg, dspcntr); 416 427 428 + Start = obj_priv->gtt_offset; 429 + Offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8); 430 + 417 431 DRM_DEBUG("Writing base %08lX %08lX %d %d\n", Start, Offset, x, y); 432 + I915_WRITE(dspstride, crtc->fb->pitch); 418 433 if (IS_I965G(dev)) { 419 434 I915_WRITE(dspbase, Offset); 420 435 I915_READ(dspbase); ··· 437 440 intel_fb = to_intel_framebuffer(old_fb); 438 441 i915_gem_object_unpin(intel_fb->obj); 439 442 } 443 + mutex_unlock(&dev->struct_mutex); 440 444 441 445 if (!dev->primary->master) 442 - return; 446 + return 0; 443 447 444 448 master_priv = dev->primary->master->driver_priv; 445 449 if (!master_priv->sarea_priv) 446 - return; 450 + return 0; 447 451 448 - switch (pipe) { 449 - case 0: 450 - master_priv->sarea_priv->pipeA_x = x; 451 - master_priv->sarea_priv->pipeA_y = y; 452 - break; 453 - case 1: 452 + if (pipe) { 454 453 master_priv->sarea_priv->pipeB_x = x; 455 454 master_priv->sarea_priv->pipeB_y = y; 456 - break; 457 - default: 458 - DRM_ERROR("Can't update pipe %d in SAREA\n", pipe); 459 - break; 455 + } else { 456 + master_priv->sarea_priv->pipeA_x = x; 457 + master_priv->sarea_priv->pipeA_y = y; 460 458 } 459 + 460 + return 0; 461 461 } 462 462 463 463 ··· 702 708 return 1; 703 709 } 704 710 705 - static void intel_crtc_mode_set(struct drm_crtc *crtc, 706 - struct drm_display_mode *mode, 707 - struct drm_display_mode *adjusted_mode, 708 - int x, int y, 709 - struct drm_framebuffer *old_fb) 711 + static int intel_crtc_mode_set(struct drm_crtc *crtc, 712 + struct drm_display_mode *mode, 713 + struct drm_display_mode *adjusted_mode, 714 + int x, int y, 715 + struct drm_framebuffer *old_fb) 710 716 { 711 717 struct drm_device *dev = crtc->dev; 712 718 struct drm_i915_private *dev_priv = dev->dev_private; ··· 726 732 int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE; 727 733 int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS; 728 734 int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC; 729 - int refclk; 735 + int refclk, num_outputs = 0; 730 736 intel_clock_t clock; 731 737 u32 dpll = 0, fp = 0, dspcntr, pipeconf; 732 738 bool ok, is_sdvo = false, is_dvo = false; 733 739 bool is_crt = false, is_lvds = false, is_tv = false; 734 740 struct drm_mode_config *mode_config = &dev->mode_config; 735 741 struct drm_connector *connector; 742 + int ret; 736 743 737 744 drm_vblank_pre_modeset(dev, pipe); 738 745 ··· 763 768 is_crt = true; 764 769 break; 765 770 } 771 + 772 + num_outputs++; 766 773 } 767 774 768 - if (IS_I9XX(dev)) { 775 + if (is_lvds && dev_priv->lvds_use_ssc && num_outputs < 2) { 776 + refclk = dev_priv->lvds_ssc_freq * 1000; 777 + DRM_DEBUG("using SSC reference clock of %d MHz\n", refclk / 1000); 778 + } else if (IS_I9XX(dev)) { 769 779 refclk = 96000; 770 780 } else { 771 781 refclk = 48000; ··· 779 779 ok = intel_find_best_PLL(crtc, adjusted_mode->clock, refclk, &clock); 780 780 if (!ok) { 781 781 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 782 - return; 782 + return -EINVAL; 783 783 } 784 784 785 785 fp = clock.n << 16 | clock.m1 << 8 | clock.m2; ··· 829 829 } 830 830 } 831 831 832 - if (is_tv) { 832 + if (is_sdvo && is_tv) 833 + dpll |= PLL_REF_INPUT_TVCLKINBC; 834 + else if (is_tv) 833 835 /* XXX: just matching BIOS for now */ 834 - /* dpll |= PLL_REF_INPUT_TVCLKINBC; */ 836 + /* dpll |= PLL_REF_INPUT_TVCLKINBC; */ 835 837 dpll |= 3; 836 - } 838 + else if (is_lvds && dev_priv->lvds_use_ssc && num_outputs < 2) 839 + dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 837 840 else 838 841 dpll |= PLL_REF_INPUT_DREFCLK; 839 842 ··· 953 950 I915_WRITE(dspcntr_reg, dspcntr); 954 951 955 952 /* Flush the plane changes */ 956 - intel_pipe_set_base(crtc, x, y, old_fb); 953 + ret = intel_pipe_set_base(crtc, x, y, old_fb); 954 + if (ret != 0) 955 + return ret; 957 956 958 957 drm_vblank_post_modeset(dev, pipe); 958 + 959 + return 0; 959 960 } 960 961 961 962 /** Loads the palette/gamma unit for the CRTC with the prepared values */ ··· 1030 1023 } 1031 1024 1032 1025 /* we only need to pin inside GTT if cursor is non-phy */ 1026 + mutex_lock(&dev->struct_mutex); 1033 1027 if (!dev_priv->cursor_needs_physical) { 1034 1028 ret = i915_gem_object_pin(bo, PAGE_SIZE); 1035 1029 if (ret) { 1036 1030 DRM_ERROR("failed to pin cursor bo\n"); 1037 - goto fail; 1031 + goto fail_locked; 1038 1032 } 1039 1033 addr = obj_priv->gtt_offset; 1040 1034 } else { 1041 1035 ret = i915_gem_attach_phys_object(dev, bo, (pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1); 1042 1036 if (ret) { 1043 1037 DRM_ERROR("failed to attach phys object\n"); 1044 - goto fail; 1038 + goto fail_locked; 1045 1039 } 1046 1040 addr = obj_priv->phys_obj->handle->busaddr; 1047 1041 } ··· 1062 1054 i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo); 1063 1055 } else 1064 1056 i915_gem_object_unpin(intel_crtc->cursor_bo); 1065 - mutex_lock(&dev->struct_mutex); 1066 1057 drm_gem_object_unreference(intel_crtc->cursor_bo); 1067 - mutex_unlock(&dev->struct_mutex); 1068 1058 } 1059 + mutex_unlock(&dev->struct_mutex); 1069 1060 1070 1061 intel_crtc->cursor_addr = addr; 1071 1062 intel_crtc->cursor_bo = bo; ··· 1072 1065 return 0; 1073 1066 fail: 1074 1067 mutex_lock(&dev->struct_mutex); 1068 + fail_locked: 1075 1069 drm_gem_object_unreference(bo); 1076 1070 mutex_unlock(&dev->struct_mutex); 1077 1071 return ret; ··· 1300 1292 } 1301 1293 1302 1294 /* XXX: Handle the 100Mhz refclk */ 1303 - i9xx_clock(96000, &clock); 1295 + intel_clock(96000, &clock); 1304 1296 } else { 1305 1297 bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN); 1306 1298 ··· 1312 1304 if ((dpll & PLL_REF_INPUT_MASK) == 1313 1305 PLLB_REF_INPUT_SPREADSPECTRUMIN) { 1314 1306 /* XXX: might not be 66MHz */ 1315 - i8xx_clock(66000, &clock); 1307 + intel_clock(66000, &clock); 1316 1308 } else 1317 - i8xx_clock(48000, &clock); 1309 + intel_clock(48000, &clock); 1318 1310 } else { 1319 1311 if (dpll & PLL_P1_DIVIDE_BY_TWO) 1320 1312 clock.p1 = 2; ··· 1327 1319 else 1328 1320 clock.p2 = 2; 1329 1321 1330 - i8xx_clock(48000, &clock); 1322 + intel_clock(48000, &clock); 1331 1323 } 1332 1324 } 1333 1325 ··· 1606 1598 1607 1599 ret = intel_framebuffer_create(dev, mode_cmd, &fb, obj); 1608 1600 if (ret) { 1601 + mutex_lock(&dev->struct_mutex); 1609 1602 drm_gem_object_unreference(obj); 1603 + mutex_unlock(&dev->struct_mutex); 1610 1604 return NULL; 1611 1605 } 1612 1606
+5 -3
drivers/gpu/drm/i915/intel_fb.c
··· 473 473 ret = intel_framebuffer_create(dev, &mode_cmd, &fb, fbo); 474 474 if (ret) { 475 475 DRM_ERROR("failed to allocate fb.\n"); 476 - goto out_unref; 476 + goto out_unpin; 477 477 } 478 478 479 479 list_add(&fb->filp_head, &dev->mode_config.fb_kernel_list); ··· 484 484 info = framebuffer_alloc(sizeof(struct intelfb_par), device); 485 485 if (!info) { 486 486 ret = -ENOMEM; 487 - goto out_unref; 487 + goto out_unpin; 488 488 } 489 489 490 490 par = info->par; ··· 513 513 size); 514 514 if (!info->screen_base) { 515 515 ret = -ENOSPC; 516 - goto out_unref; 516 + goto out_unpin; 517 517 } 518 518 info->screen_size = size; 519 519 ··· 608 608 mutex_unlock(&dev->struct_mutex); 609 609 return 0; 610 610 611 + out_unpin: 612 + i915_gem_object_unpin(fbo); 611 613 out_unref: 612 614 drm_gem_object_unreference(fbo); 613 615 mutex_unlock(&dev->struct_mutex);
-2
drivers/gpu/drm/i915/intel_lvds.c
··· 481 481 if (dev_priv->panel_fixed_mode) { 482 482 dev_priv->panel_fixed_mode->type |= 483 483 DRM_MODE_TYPE_PREFERRED; 484 - drm_mode_probed_add(connector, 485 - dev_priv->panel_fixed_mode); 486 484 goto out; 487 485 } 488 486 }
+1 -1
drivers/gpu/drm/i915/intel_sdvo.c
··· 193 193 194 194 #define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd} 195 195 /** Mapping of command numbers to names, for debug output */ 196 - const static struct _sdvo_cmd_name { 196 + static const struct _sdvo_cmd_name { 197 197 u8 cmd; 198 198 char *name; 199 199 } sdvo_cmd_names[] = {
+1 -1
drivers/gpu/drm/i915/intel_tv.c
··· 411 411 * These values account for -1s required. 412 412 */ 413 413 414 - const static struct tv_mode tv_modes[] = { 414 + static const struct tv_mode tv_modes[] = { 415 415 { 416 416 .name = "NTSC-M", 417 417 .clock = 107520,
+15 -6
drivers/gpu/drm/radeon/radeon_cp.c
··· 557 557 } 558 558 559 559 static void radeon_cp_init_ring_buffer(struct drm_device * dev, 560 - drm_radeon_private_t * dev_priv) 560 + drm_radeon_private_t *dev_priv, 561 + struct drm_file *file_priv) 561 562 { 563 + struct drm_radeon_master_private *master_priv; 562 564 u32 ring_start, cur_read_ptr; 563 565 u32 tmp; 564 566 ··· 678 676 679 677 dev_priv->scratch[2] = 0; 680 678 RADEON_WRITE(RADEON_LAST_CLEAR_REG, 0); 679 + 680 + /* reset sarea copies of these */ 681 + master_priv = file_priv->master->driver_priv; 682 + if (master_priv->sarea_priv) { 683 + master_priv->sarea_priv->last_frame = 0; 684 + master_priv->sarea_priv->last_dispatch = 0; 685 + master_priv->sarea_priv->last_clear = 0; 686 + } 681 687 682 688 radeon_do_wait_for_idle(dev_priv); 683 689 ··· 1225 1215 } 1226 1216 1227 1217 radeon_cp_load_microcode(dev_priv); 1228 - radeon_cp_init_ring_buffer(dev, dev_priv); 1218 + radeon_cp_init_ring_buffer(dev, dev_priv, file_priv); 1229 1219 1230 1220 dev_priv->last_buf = 0; 1231 1221 ··· 1291 1281 * 1292 1282 * Charl P. Botha <http://cpbotha.net> 1293 1283 */ 1294 - static int radeon_do_resume_cp(struct drm_device * dev) 1284 + static int radeon_do_resume_cp(struct drm_device *dev, struct drm_file *file_priv) 1295 1285 { 1296 1286 drm_radeon_private_t *dev_priv = dev->dev_private; 1297 1287 ··· 1314 1304 } 1315 1305 1316 1306 radeon_cp_load_microcode(dev_priv); 1317 - radeon_cp_init_ring_buffer(dev, dev_priv); 1307 + radeon_cp_init_ring_buffer(dev, dev_priv, file_priv); 1318 1308 1319 1309 radeon_do_engine_reset(dev); 1320 1310 radeon_irq_set_state(dev, RADEON_SW_INT_ENABLE, 1); ··· 1489 1479 */ 1490 1480 int radeon_cp_resume(struct drm_device *dev, void *data, struct drm_file *file_priv) 1491 1481 { 1492 - 1493 - return radeon_do_resume_cp(dev); 1482 + return radeon_do_resume_cp(dev, file_priv); 1494 1483 } 1495 1484 1496 1485 int radeon_engine_reset(struct drm_device *dev, void *data, struct drm_file *file_priv)
+2
include/drm/drmP.h
··· 1321 1321 struct drm_gem_object *drm_gem_object_alloc(struct drm_device *dev, 1322 1322 size_t size); 1323 1323 void drm_gem_object_handle_free(struct kref *kref); 1324 + void drm_gem_vm_open(struct vm_area_struct *vma); 1325 + void drm_gem_vm_close(struct vm_area_struct *vma); 1324 1326 int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma); 1325 1327 1326 1328 static inline void
+1 -1
include/drm/drm_crtc.h
··· 609 609 extern char *drm_get_dvi_i_select_name(int val); 610 610 extern char *drm_get_tv_subconnector_name(int val); 611 611 extern char *drm_get_tv_select_name(int val); 612 - extern void drm_fb_release(struct file *filp); 612 + extern void drm_fb_release(struct drm_file *file_priv); 613 613 extern int drm_mode_group_init_legacy_group(struct drm_device *dev, struct drm_mode_group *group); 614 614 extern struct edid *drm_get_edid(struct drm_connector *connector, 615 615 struct i2c_adapter *adapter);
+5 -5
include/drm/drm_crtc_helper.h
··· 54 54 struct drm_display_mode *mode, 55 55 struct drm_display_mode *adjusted_mode); 56 56 /* Actually set the mode */ 57 - void (*mode_set)(struct drm_crtc *crtc, struct drm_display_mode *mode, 58 - struct drm_display_mode *adjusted_mode, int x, int y, 59 - struct drm_framebuffer *old_fb); 57 + int (*mode_set)(struct drm_crtc *crtc, struct drm_display_mode *mode, 58 + struct drm_display_mode *adjusted_mode, int x, int y, 59 + struct drm_framebuffer *old_fb); 60 60 61 61 /* Move the crtc on the current fb to the given position *optional* */ 62 - void (*mode_set_base)(struct drm_crtc *crtc, int x, int y, 63 - struct drm_framebuffer *old_fb); 62 + int (*mode_set_base)(struct drm_crtc *crtc, int x, int y, 63 + struct drm_framebuffer *old_fb); 64 64 }; 65 65 66 66 struct drm_encoder_helper_funcs {