Merge branch 'linux-4.12' of git://github.com/skeggsb/linux into drm-next

Quite a few patches, but not much code changed:
- Fixes regression from atomic when only the source rect of a plane
changes (ie. xrandr --right-of)
- Fixes another issue where atomic changed behaviour underneath us,
potentially causing laggy cursor position updates
- Fixes for a bunch of races in thermal code, which lead to random
lockups for a lot of users

* 'linux-4.12' of git://github.com/skeggsb/linux:
drm/nouveau/therm: remove ineffective workarounds for alarm bugs
drm/nouveau/tmr: avoid processing completed alarms when adding a new one
drm/nouveau/tmr: fix corruption of the pending list when rescheduling an alarm
drm/nouveau/tmr: handle races with hw when updating the next alarm time
drm/nouveau/tmr: ack interrupt before processing alarms
drm/nouveau/core: fix static checker warning
drm/nouveau/fb/ram/gf100-: remove 0x10f200 read
drm/nouveau/kms/nv50: skip core channel cursor update on position-only changes
drm/nouveau/kms/nv50: fix source-rect-only plane updates
drm/nouveau/kms/nv50: remove pointless argument to window atomic_check_acquire()

+60 -41
+15 -14
drivers/gpu/drm/nouveau/nv50_display.c
··· 831 831 static int 832 832 nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw, 833 833 struct nv50_wndw_atom *asyw, 834 - struct nv50_head_atom *asyh, 835 - u32 pflip_flags) 834 + struct nv50_head_atom *asyh) 836 835 { 837 836 struct nouveau_framebuffer *fb = nouveau_framebuffer(asyw->state.fb); 838 837 struct nouveau_drm *drm = nouveau_drm(wndw->plane.dev); ··· 847 848 asyw->image.h = fb->base.height; 848 849 asyw->image.kind = (fb->nvbo->tile_flags & 0x0000ff00) >> 8; 849 850 850 - asyw->interval = pflip_flags & DRM_MODE_PAGE_FLIP_ASYNC ? 0 : 1; 851 + if (asyh->state.pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC) 852 + asyw->interval = 0; 853 + else 854 + asyw->interval = 1; 851 855 852 856 if (asyw->image.kind) { 853 857 asyw->image.layout = 0; ··· 889 887 struct nv50_head_atom *harm = NULL, *asyh = NULL; 890 888 bool varm = false, asyv = false, asym = false; 891 889 int ret; 892 - u32 pflip_flags = 0; 893 890 894 891 NV_ATOMIC(drm, "%s atomic_check\n", plane->name); 895 892 if (asyw->state.crtc) { ··· 897 896 return PTR_ERR(asyh); 898 897 asym = drm_atomic_crtc_needs_modeset(&asyh->state); 899 898 asyv = asyh->state.active; 900 - pflip_flags = asyh->state.pageflip_flags; 901 899 } 902 900 903 901 if (armw->state.crtc) { ··· 912 912 if (memcmp(&armw->point, &asyw->point, sizeof(asyw->point))) 913 913 asyw->set.point = true; 914 914 915 - if (!varm || asym || armw->state.fb != asyw->state.fb) { 916 - ret = nv50_wndw_atomic_check_acquire( 917 - wndw, asyw, asyh, pflip_flags); 918 - if (ret) 919 - return ret; 920 - } 915 + ret = nv50_wndw_atomic_check_acquire(wndw, asyw, asyh); 916 + if (ret) 917 + return ret; 921 918 } else 922 919 if (varm) { 923 920 nv50_wndw_atomic_check_release(wndw, asyw, harm); ··· 1119 1122 nv50_curs_prepare(struct nv50_wndw *wndw, struct nv50_head_atom *asyh, 1120 1123 struct nv50_wndw_atom *asyw) 1121 1124 { 1122 - asyh->curs.handle = nv50_disp(wndw->plane.dev)->mast.base.vram.handle; 1123 - asyh->curs.offset = asyw->image.offset; 1124 - asyh->set.curs = asyh->curs.visible; 1125 + u32 handle = nv50_disp(wndw->plane.dev)->mast.base.vram.handle; 1126 + u32 offset = asyw->image.offset; 1127 + if (asyh->curs.handle != handle || asyh->curs.offset != offset) { 1128 + asyh->curs.handle = handle; 1129 + asyh->curs.offset = offset; 1130 + asyh->set.curs = asyh->curs.visible; 1131 + } 1125 1132 } 1126 1133 1127 1134 static void
+1 -1
drivers/gpu/drm/nouveau/nvkm/core/object.c
··· 295 295 INIT_LIST_HEAD(&object->head); 296 296 INIT_LIST_HEAD(&object->tree); 297 297 RB_CLEAR_NODE(&object->node); 298 - WARN_ON(oclass->engine && !object->engine); 298 + WARN_ON(IS_ERR(object->engine)); 299 299 } 300 300 301 301 int
-1
drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c
··· 638 638 return ret; 639 639 } 640 640 641 - ram->ranks = (nvkm_rd32(device, 0x10f200) & 0x00000004) ? 2 : 1; 642 641 return 0; 643 642 } 644 643
+1 -1
drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
··· 146 146 poll = false; 147 147 } 148 148 149 - if (list_empty(&therm->alarm.head) && poll) 149 + if (poll) 150 150 nvkm_timer_alarm(tmr, 1000000000ULL, &therm->alarm); 151 151 spin_unlock_irqrestore(&therm->lock, flags); 152 152
+1 -1
drivers/gpu/drm/nouveau/nvkm/subdev/therm/fan.c
··· 83 83 spin_unlock_irqrestore(&fan->lock, flags); 84 84 85 85 /* schedule next fan update, if not at target speed already */ 86 - if (list_empty(&fan->alarm.head) && target != duty) { 86 + if (target != duty) { 87 87 u16 bump_period = fan->bios.bump_period; 88 88 u16 slow_down_period = fan->bios.slow_down_period; 89 89 u64 delay;
+1 -1
drivers/gpu/drm/nouveau/nvkm/subdev/therm/fantog.c
··· 53 53 duty = !nvkm_gpio_get(gpio, 0, DCB_GPIO_FAN, 0xff); 54 54 nvkm_gpio_set(gpio, 0, DCB_GPIO_FAN, 0xff, duty); 55 55 56 - if (list_empty(&fan->alarm.head) && percent != (duty * 100)) { 56 + if (percent != (duty * 100)) { 57 57 u64 next_change = (percent * fan->period_us) / 100; 58 58 if (!duty) 59 59 next_change = fan->period_us - next_change;
+1 -1
drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c
··· 185 185 spin_unlock_irqrestore(&therm->sensor.alarm_program_lock, flags); 186 186 187 187 /* schedule the next poll in one second */ 188 - if (therm->func->temp_get(therm) >= 0 && list_empty(&alarm->head)) 188 + if (therm->func->temp_get(therm) >= 0) 189 189 nvkm_timer_alarm(tmr, 1000000000ULL, alarm); 190 190 } 191 191
+39 -20
drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
··· 36 36 unsigned long flags; 37 37 LIST_HEAD(exec); 38 38 39 - /* move any due alarms off the pending list */ 39 + /* Process pending alarms. */ 40 40 spin_lock_irqsave(&tmr->lock, flags); 41 41 list_for_each_entry_safe(alarm, atemp, &tmr->alarms, head) { 42 - if (alarm->timestamp <= nvkm_timer_read(tmr)) 43 - list_move_tail(&alarm->head, &exec); 42 + /* Have we hit the earliest alarm that hasn't gone off? */ 43 + if (alarm->timestamp > nvkm_timer_read(tmr)) { 44 + /* Schedule it. If we didn't race, we're done. */ 45 + tmr->func->alarm_init(tmr, alarm->timestamp); 46 + if (alarm->timestamp > nvkm_timer_read(tmr)) 47 + break; 48 + } 49 + 50 + /* Move to completed list. We'll drop the lock before 51 + * executing the callback so it can reschedule itself. 52 + */ 53 + list_move_tail(&alarm->head, &exec); 44 54 } 45 55 46 - /* reschedule interrupt for next alarm time */ 47 - if (!list_empty(&tmr->alarms)) { 48 - alarm = list_first_entry(&tmr->alarms, typeof(*alarm), head); 49 - tmr->func->alarm_init(tmr, alarm->timestamp); 50 - } else { 56 + /* Shut down interrupt if no more pending alarms. */ 57 + if (list_empty(&tmr->alarms)) 51 58 tmr->func->alarm_fini(tmr); 52 - } 53 59 spin_unlock_irqrestore(&tmr->lock, flags); 54 60 55 - /* execute any pending alarm handlers */ 61 + /* Execute completed callbacks. */ 56 62 list_for_each_entry_safe(alarm, atemp, &exec, head) { 57 63 list_del_init(&alarm->head); 58 64 alarm->func(alarm); ··· 71 65 struct nvkm_alarm *list; 72 66 unsigned long flags; 73 67 74 - alarm->timestamp = nvkm_timer_read(tmr) + nsec; 75 - 76 - /* append new alarm to list, in soonest-alarm-first order */ 68 + /* Remove alarm from pending list. 69 + * 70 + * This both protects against the corruption of the list, 71 + * and implements alarm rescheduling/cancellation. 72 + */ 77 73 spin_lock_irqsave(&tmr->lock, flags); 78 - if (!nsec) { 79 - if (!list_empty(&alarm->head)) 80 - list_del(&alarm->head); 81 - } else { 74 + list_del_init(&alarm->head); 75 + 76 + if (nsec) { 77 + /* Insert into pending list, ordered earliest to latest. */ 78 + alarm->timestamp = nvkm_timer_read(tmr) + nsec; 82 79 list_for_each_entry(list, &tmr->alarms, head) { 83 80 if (list->timestamp > alarm->timestamp) 84 81 break; 85 82 } 83 + 86 84 list_add_tail(&alarm->head, &list->head); 85 + 86 + /* Update HW if this is now the earliest alarm. */ 87 + list = list_first_entry(&tmr->alarms, typeof(*list), head); 88 + if (list == alarm) { 89 + tmr->func->alarm_init(tmr, alarm->timestamp); 90 + /* This shouldn't happen if callers aren't stupid. 91 + * 92 + * Worst case scenario is that it'll take roughly 93 + * 4 seconds for the next alarm to trigger. 94 + */ 95 + WARN_ON(alarm->timestamp <= nvkm_timer_read(tmr)); 96 + } 87 97 } 88 98 spin_unlock_irqrestore(&tmr->lock, flags); 89 - 90 - /* process pending alarms */ 91 - nvkm_timer_alarm_trigger(tmr); 92 99 } 93 100 94 101 void
+1 -1
drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c
··· 76 76 u32 stat = nvkm_rd32(device, NV04_PTIMER_INTR_0); 77 77 78 78 if (stat & 0x00000001) { 79 - nvkm_timer_alarm_trigger(tmr); 80 79 nvkm_wr32(device, NV04_PTIMER_INTR_0, 0x00000001); 80 + nvkm_timer_alarm_trigger(tmr); 81 81 stat &= ~0x00000001; 82 82 } 83 83