Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/xe/migrate: skip bounce buffer path on xe2

Now that we support MEM_COPY we should be able to use the PAGE_COPY
mode, otherwise falling back to BYTE_COPY mode when we have odd
sizing/alignment.

v2:
- Use info.has_mem_copy_instr
- Rebase on latest changes.
v3 (Matt Brost):
- Allow various pitches including 1byte pitch for MEM_COPY

Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Cc: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Matthew Brost <matthew.brost@intel.com>
Link: https://lore.kernel.org/r/20251022163836.191405-8-matthew.auld@intel.com

+32 -11
+32 -11
drivers/gpu/drm/xe/xe_migrate.c
··· 1920 1920 #define XE_CACHELINE_BYTES 64ull 1921 1921 #define XE_CACHELINE_MASK (XE_CACHELINE_BYTES - 1) 1922 1922 1923 + static u32 xe_migrate_copy_pitch(struct xe_device *xe, u32 len) 1924 + { 1925 + u32 pitch; 1926 + 1927 + if (IS_ALIGNED(len, PAGE_SIZE)) 1928 + pitch = PAGE_SIZE; 1929 + else if (IS_ALIGNED(len, SZ_4K)) 1930 + pitch = SZ_4K; 1931 + else if (IS_ALIGNED(len, SZ_256)) 1932 + pitch = SZ_256; 1933 + else if (IS_ALIGNED(len, 4)) 1934 + pitch = 4; 1935 + else 1936 + pitch = 1; 1937 + 1938 + xe_assert(xe, pitch > 1 || xe->info.has_mem_copy_instr); 1939 + return pitch; 1940 + } 1941 + 1923 1942 static struct dma_fence *xe_migrate_vram(struct xe_migrate *m, 1924 1943 unsigned long len, 1925 1944 unsigned long sram_offset, ··· 1956 1937 struct xe_bb *bb; 1957 1938 u32 update_idx, pt_slot = 0; 1958 1939 unsigned long npages = DIV_ROUND_UP(len + sram_offset, PAGE_SIZE); 1959 - unsigned int pitch = len >= PAGE_SIZE && !(len & ~PAGE_MASK) ? 1960 - PAGE_SIZE : 4; 1940 + unsigned int pitch = xe_migrate_copy_pitch(xe, len); 1961 1941 int err; 1962 1942 unsigned long i, j; 1963 1943 bool use_pde = xe_migrate_vram_use_pde(sram_addr, len + sram_offset); 1964 1944 1965 - if (drm_WARN_ON(&xe->drm, (!IS_ALIGNED(len, pitch)) || 1966 - (sram_offset | vram_addr) & XE_CACHELINE_MASK)) 1945 + if (!xe->info.has_mem_copy_instr && 1946 + drm_WARN_ON(&xe->drm, 1947 + (!IS_ALIGNED(len, pitch)) || (sram_offset | vram_addr) & XE_CACHELINE_MASK)) 1967 1948 return ERR_PTR(-EOPNOTSUPP); 1968 1949 1969 1950 xe_assert(xe, npages * PAGE_SIZE <= MAX_PREEMPTDISABLE_TRANSFER); ··· 2182 2163 xe_bo_assert_held(bo); 2183 2164 2184 2165 /* Use bounce buffer for small access and unaligned access */ 2185 - if (!IS_ALIGNED(len, 4) || 2186 - !IS_ALIGNED(page_offset, XE_CACHELINE_BYTES) || 2187 - !IS_ALIGNED(offset, XE_CACHELINE_BYTES)) { 2166 + if (!xe->info.has_mem_copy_instr && 2167 + (!IS_ALIGNED(len, 4) || 2168 + !IS_ALIGNED(page_offset, XE_CACHELINE_BYTES) || 2169 + !IS_ALIGNED(offset, XE_CACHELINE_BYTES))) { 2188 2170 int buf_offset = 0; 2189 2171 void *bounce; 2190 2172 int err; ··· 2247 2227 u64 vram_addr = vram_region_gpu_offset(bo->ttm.resource) + 2248 2228 cursor.start; 2249 2229 int current_bytes; 2230 + u32 pitch; 2250 2231 2251 2232 if (cursor.size > MAX_PREEMPTDISABLE_TRANSFER) 2252 2233 current_bytes = min_t(int, bytes_left, ··· 2255 2234 else 2256 2235 current_bytes = min_t(int, bytes_left, cursor.size); 2257 2236 2258 - if (current_bytes & ~PAGE_MASK) { 2259 - int pitch = 4; 2260 - 2237 + pitch = xe_migrate_copy_pitch(xe, current_bytes); 2238 + if (xe->info.has_mem_copy_instr) 2239 + current_bytes = min_t(int, current_bytes, U16_MAX * pitch); 2240 + else 2261 2241 current_bytes = min_t(int, current_bytes, 2262 2242 round_down(S16_MAX * pitch, 2263 2243 XE_CACHELINE_BYTES)); 2264 - } 2265 2244 2266 2245 __fence = xe_migrate_vram(m, current_bytes, 2267 2246 (unsigned long)buf & ~PAGE_MASK,