Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/i915: Avoid mixing integer types during batch copies

Be consistent and use unsigned long throughout the chunk copies to
avoid the inherent clumsiness of mixing integer types of different
widths and signs. Failing to take acount of a wider unsigned type when
using min_t can lead to treating it as a negative, only for it flip back
to a large unsigned value after passing a boundary check.

Fixes: ed13033f0287 ("drm/i915/cmdparser: Only cache the dst vmap")
Testcase: igt/gen9_exec_parse/bb-large
Reported-by: "Candelaria, Jared" <jared.candelaria@intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: "Candelaria, Jared" <jared.candelaria@intel.com>
Cc: "Bloomfield, Jon" <jon.bloomfield@intel.com>
Cc: <stable@vger.kernel.org> # v4.9+
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200928215942.31917-1-chris@chris-wilson.co.uk
(cherry picked from commit b7eeb2b4132ccf1a7d38f434cde7043913d1ed3c)
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>

authored by

Chris Wilson and committed by
Rodrigo Vivi
c60b93cd 651dabe2

+12 -9
+5 -2
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
··· 2267 2267 struct i915_vma *batch; 2268 2268 struct i915_vma *shadow; 2269 2269 struct i915_vma *trampoline; 2270 - unsigned int batch_offset; 2271 - unsigned int batch_length; 2270 + unsigned long batch_offset; 2271 + unsigned long batch_length; 2272 2272 }; 2273 2273 2274 2274 static int __eb_parse(struct dma_fence_work *work) ··· 2337 2337 { 2338 2338 struct eb_parse_work *pw; 2339 2339 int err; 2340 + 2341 + GEM_BUG_ON(overflows_type(eb->batch_start_offset, pw->batch_offset)); 2342 + GEM_BUG_ON(overflows_type(eb->batch_len, pw->batch_length)); 2340 2343 2341 2344 pw = kzalloc(sizeof(*pw), GFP_KERNEL); 2342 2345 if (!pw)
+5 -5
drivers/gpu/drm/i915/i915_cmd_parser.c
··· 1136 1136 /* Returns a vmap'd pointer to dst_obj, which the caller must unmap */ 1137 1137 static u32 *copy_batch(struct drm_i915_gem_object *dst_obj, 1138 1138 struct drm_i915_gem_object *src_obj, 1139 - u32 offset, u32 length) 1139 + unsigned long offset, unsigned long length) 1140 1140 { 1141 1141 bool needs_clflush; 1142 1142 void *dst, *src; ··· 1166 1166 } 1167 1167 } 1168 1168 if (IS_ERR(src)) { 1169 + unsigned long x, n; 1169 1170 void *ptr; 1170 - int x, n; 1171 1171 1172 1172 /* 1173 1173 * We can avoid clflushing partial cachelines before the write ··· 1184 1184 ptr = dst; 1185 1185 x = offset_in_page(offset); 1186 1186 for (n = offset >> PAGE_SHIFT; length; n++) { 1187 - int len = min_t(int, length, PAGE_SIZE - x); 1187 + int len = min(length, PAGE_SIZE - x); 1188 1188 1189 1189 src = kmap_atomic(i915_gem_object_get_page(src_obj, n)); 1190 1190 if (needs_clflush) ··· 1414 1414 */ 1415 1415 int intel_engine_cmd_parser(struct intel_engine_cs *engine, 1416 1416 struct i915_vma *batch, 1417 - u32 batch_offset, 1418 - u32 batch_length, 1417 + unsigned long batch_offset, 1418 + unsigned long batch_length, 1419 1419 struct i915_vma *shadow, 1420 1420 bool trampoline) 1421 1421 {
+2 -2
drivers/gpu/drm/i915/i915_drv.h
··· 1949 1949 void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine); 1950 1950 int intel_engine_cmd_parser(struct intel_engine_cs *engine, 1951 1951 struct i915_vma *batch, 1952 - u32 batch_offset, 1953 - u32 batch_length, 1952 + unsigned long batch_offset, 1953 + unsigned long batch_length, 1954 1954 struct i915_vma *shadow, 1955 1955 bool trampoline); 1956 1956 #define I915_CMD_PARSER_TRAMPOLINE_SIZE 8