Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm: vmwgfx: Use nsec based interfaces

No point in converting timespecs back and forth.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Thomas Hellstrom <thellstrom@vmware.com>
Signed-off-by: John Stultz <john.stultz@linaro.org>

authored by

Thomas Gleixner and committed by
John Stultz
f166e6dc 5ed0bdf2

+16 -32
+2 -2
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
··· 159 159 160 160 struct vmw_marker_queue { 161 161 struct list_head head; 162 - struct timespec lag; 163 - struct timespec lag_time; 162 + u64 lag; 163 + u64 lag_time; 164 164 spinlock_t lock; 165 165 }; 166 166
+14 -30
drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
··· 31 31 struct vmw_marker { 32 32 struct list_head head; 33 33 uint32_t seqno; 34 - struct timespec submitted; 34 + u64 submitted; 35 35 }; 36 36 37 37 void vmw_marker_queue_init(struct vmw_marker_queue *queue) 38 38 { 39 39 INIT_LIST_HEAD(&queue->head); 40 - queue->lag = ns_to_timespec(0); 41 - getrawmonotonic(&queue->lag_time); 40 + queue->lag = 0; 41 + queue->lag_time = ktime_get_raw_ns(); 42 42 spin_lock_init(&queue->lock); 43 43 } 44 44 ··· 62 62 return -ENOMEM; 63 63 64 64 marker->seqno = seqno; 65 - getrawmonotonic(&marker->submitted); 65 + marker->submitted = ktime_get_raw_ns(); 66 66 spin_lock(&queue->lock); 67 67 list_add_tail(&marker->head, &queue->head); 68 68 spin_unlock(&queue->lock); ··· 74 74 uint32_t signaled_seqno) 75 75 { 76 76 struct vmw_marker *marker, *next; 77 - struct timespec now; 78 77 bool updated = false; 78 + u64 now; 79 79 80 80 spin_lock(&queue->lock); 81 - getrawmonotonic(&now); 81 + now = ktime_get_raw_ns(); 82 82 83 83 if (list_empty(&queue->head)) { 84 - queue->lag = ns_to_timespec(0); 84 + queue->lag = 0; 85 85 queue->lag_time = now; 86 86 updated = true; 87 87 goto out_unlock; ··· 91 91 if (signaled_seqno - marker->seqno > (1 << 30)) 92 92 continue; 93 93 94 - queue->lag = timespec_sub(now, marker->submitted); 94 + queue->lag = now - marker->submitted; 95 95 queue->lag_time = now; 96 96 updated = true; 97 97 list_del(&marker->head); ··· 104 104 return (updated) ? 0 : -EBUSY; 105 105 } 106 106 107 - static struct timespec vmw_timespec_add(struct timespec t1, 108 - struct timespec t2) 107 + static u64 vmw_fifo_lag(struct vmw_marker_queue *queue) 109 108 { 110 - t1.tv_sec += t2.tv_sec; 111 - t1.tv_nsec += t2.tv_nsec; 112 - if (t1.tv_nsec >= 1000000000L) { 113 - t1.tv_sec += 1; 114 - t1.tv_nsec -= 1000000000L; 115 - } 116 - 117 - return t1; 118 - } 119 - 120 - static struct timespec vmw_fifo_lag(struct vmw_marker_queue *queue) 121 - { 122 - struct timespec now; 109 + u64 now; 123 110 124 111 spin_lock(&queue->lock); 125 - getrawmonotonic(&now); 126 - queue->lag = vmw_timespec_add(queue->lag, 127 - timespec_sub(now, queue->lag_time)); 112 + now = ktime_get_raw_ns(); 113 + queue->lag += now - queue->lag_time; 128 114 queue->lag_time = now; 129 115 spin_unlock(&queue->lock); 130 116 return queue->lag; ··· 120 134 static bool vmw_lag_lt(struct vmw_marker_queue *queue, 121 135 uint32_t us) 122 136 { 123 - struct timespec lag, cond; 137 + u64 cond = (u64) us * NSEC_PER_USEC; 124 138 125 - cond = ns_to_timespec((s64) us * 1000); 126 - lag = vmw_fifo_lag(queue); 127 - return (timespec_compare(&lag, &cond) < 1); 139 + return vmw_fifo_lag(queue) <= cond; 128 140 } 129 141 130 142 int vmw_wait_lag(struct vmw_private *dev_priv,