Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

dma-buf/sync-file: fix warning about fence containers

The dma_fence_chain containers can show up in sync_files as well resulting in
warnings that those can't be added to dma_fence_array containers when merging
multiple sync_files together.

Solve this by using the dma_fence_unwrap iterator to deep dive into the
contained fences and then add those flatten out into a dma_fence_array.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20220311110244.1245-2-christian.koenig@amd.com

+73 -68
+73 -68
drivers/dma-buf/sync_file.c
··· 5 5 * Copyright (C) 2012 Google, Inc. 6 6 */ 7 7 8 + #include <linux/dma-fence-unwrap.h> 8 9 #include <linux/export.h> 9 10 #include <linux/file.h> 10 11 #include <linux/fs.h> ··· 173 172 return 0; 174 173 } 175 174 176 - static struct dma_fence **get_fences(struct sync_file *sync_file, 177 - int *num_fences) 178 - { 179 - if (dma_fence_is_array(sync_file->fence)) { 180 - struct dma_fence_array *array = to_dma_fence_array(sync_file->fence); 181 - 182 - *num_fences = array->num_fences; 183 - return array->fences; 184 - } 185 - 186 - *num_fences = 1; 187 - return &sync_file->fence; 188 - } 189 - 190 175 static void add_fence(struct dma_fence **fences, 191 176 int *i, struct dma_fence *fence) 192 177 { ··· 197 210 static struct sync_file *sync_file_merge(const char *name, struct sync_file *a, 198 211 struct sync_file *b) 199 212 { 213 + struct dma_fence *a_fence, *b_fence, **fences; 214 + struct dma_fence_unwrap a_iter, b_iter; 215 + unsigned int index, num_fences; 200 216 struct sync_file *sync_file; 201 - struct dma_fence **fences = NULL, **nfences, **a_fences, **b_fences; 202 - int i = 0, i_a, i_b, num_fences, a_num_fences, b_num_fences; 203 217 204 218 sync_file = sync_file_alloc(); 205 219 if (!sync_file) 206 220 return NULL; 207 221 208 - a_fences = get_fences(a, &a_num_fences); 209 - b_fences = get_fences(b, &b_num_fences); 210 - if (a_num_fences > INT_MAX - b_num_fences) 211 - goto err; 222 + num_fences = 0; 223 + dma_fence_unwrap_for_each(a_fence, &a_iter, a->fence) 224 + ++num_fences; 225 + dma_fence_unwrap_for_each(b_fence, &b_iter, b->fence) 226 + ++num_fences; 212 227 213 - num_fences = a_num_fences + b_num_fences; 228 + if (num_fences > INT_MAX) 229 + goto err_free_sync_file; 214 230 215 231 fences = kcalloc(num_fences, sizeof(*fences), GFP_KERNEL); 216 232 if (!fences) 217 - goto err; 233 + goto err_free_sync_file; 218 234 219 235 /* 220 - * Assume sync_file a and b are both ordered and have no 221 - * duplicates with the same context. 236 + * We can't guarantee that fences in both a and b are ordered, but it is 237 + * still quite likely. 222 238 * 223 - * If a sync_file can only be created with sync_file_merge 224 - * and sync_file_create, this is a reasonable assumption. 239 + * So attempt to order the fences as we pass over them and merge fences 240 + * with the same context. 225 241 */ 226 - for (i_a = i_b = 0; i_a < a_num_fences && i_b < b_num_fences; ) { 227 - struct dma_fence *pt_a = a_fences[i_a]; 228 - struct dma_fence *pt_b = b_fences[i_b]; 229 242 230 - if (pt_a->context < pt_b->context) { 231 - add_fence(fences, &i, pt_a); 243 + index = 0; 244 + for (a_fence = dma_fence_unwrap_first(a->fence, &a_iter), 245 + b_fence = dma_fence_unwrap_first(b->fence, &b_iter); 246 + a_fence || b_fence; ) { 232 247 233 - i_a++; 234 - } else if (pt_a->context > pt_b->context) { 235 - add_fence(fences, &i, pt_b); 248 + if (!b_fence) { 249 + add_fence(fences, &index, a_fence); 250 + a_fence = dma_fence_unwrap_next(&a_iter); 236 251 237 - i_b++; 252 + } else if (!a_fence) { 253 + add_fence(fences, &index, b_fence); 254 + b_fence = dma_fence_unwrap_next(&b_iter); 255 + 256 + } else if (a_fence->context < b_fence->context) { 257 + add_fence(fences, &index, a_fence); 258 + a_fence = dma_fence_unwrap_next(&a_iter); 259 + 260 + } else if (b_fence->context < a_fence->context) { 261 + add_fence(fences, &index, b_fence); 262 + b_fence = dma_fence_unwrap_next(&b_iter); 263 + 264 + } else if (__dma_fence_is_later(a_fence->seqno, b_fence->seqno, 265 + a_fence->ops)) { 266 + add_fence(fences, &index, a_fence); 267 + a_fence = dma_fence_unwrap_next(&a_iter); 268 + b_fence = dma_fence_unwrap_next(&b_iter); 269 + 238 270 } else { 239 - if (__dma_fence_is_later(pt_a->seqno, pt_b->seqno, 240 - pt_a->ops)) 241 - add_fence(fences, &i, pt_a); 242 - else 243 - add_fence(fences, &i, pt_b); 244 - 245 - i_a++; 246 - i_b++; 271 + add_fence(fences, &index, b_fence); 272 + a_fence = dma_fence_unwrap_next(&a_iter); 273 + b_fence = dma_fence_unwrap_next(&b_iter); 247 274 } 248 275 } 249 276 250 - for (; i_a < a_num_fences; i_a++) 251 - add_fence(fences, &i, a_fences[i_a]); 277 + if (index == 0) 278 + add_fence(fences, &index, dma_fence_get_stub()); 252 279 253 - for (; i_b < b_num_fences; i_b++) 254 - add_fence(fences, &i, b_fences[i_b]); 280 + if (num_fences > index) { 281 + struct dma_fence **tmp; 255 282 256 - if (i == 0) 257 - fences[i++] = dma_fence_get(a_fences[0]); 258 - 259 - if (num_fences > i) { 260 - nfences = krealloc_array(fences, i, sizeof(*fences), GFP_KERNEL); 261 - if (!nfences) 262 - goto err; 263 - 264 - fences = nfences; 283 + /* Keep going even when reducing the size failed */ 284 + tmp = krealloc_array(fences, index, sizeof(*fences), 285 + GFP_KERNEL); 286 + if (tmp) 287 + fences = tmp; 265 288 } 266 289 267 - if (sync_file_set_fence(sync_file, fences, i) < 0) 268 - goto err; 290 + if (sync_file_set_fence(sync_file, fences, index) < 0) 291 + goto err_put_fences; 269 292 270 293 strlcpy(sync_file->user_name, name, sizeof(sync_file->user_name)); 271 294 return sync_file; 272 295 273 - err: 274 - while (i) 275 - dma_fence_put(fences[--i]); 296 + err_put_fences: 297 + while (index) 298 + dma_fence_put(fences[--index]); 276 299 kfree(fences); 300 + 301 + err_free_sync_file: 277 302 fput(sync_file->file); 278 303 return NULL; 279 - 280 304 } 281 305 282 306 static int sync_file_release(struct inode *inode, struct file *file) ··· 396 398 static long sync_file_ioctl_fence_info(struct sync_file *sync_file, 397 399 unsigned long arg) 398 400 { 399 - struct sync_file_info info; 400 401 struct sync_fence_info *fence_info = NULL; 401 - struct dma_fence **fences; 402 + struct dma_fence_unwrap iter; 403 + struct sync_file_info info; 404 + unsigned int num_fences; 405 + struct dma_fence *fence; 406 + int ret; 402 407 __u32 size; 403 - int num_fences, ret, i; 404 408 405 409 if (copy_from_user(&info, (void __user *)arg, sizeof(info))) 406 410 return -EFAULT; ··· 410 410 if (info.flags || info.pad) 411 411 return -EINVAL; 412 412 413 - fences = get_fences(sync_file, &num_fences); 413 + num_fences = 0; 414 + dma_fence_unwrap_for_each(fence, &iter, sync_file->fence) 415 + ++num_fences; 414 416 415 417 /* 416 418 * Passing num_fences = 0 means that userspace doesn't want to ··· 435 433 if (!fence_info) 436 434 return -ENOMEM; 437 435 438 - for (i = 0; i < num_fences; i++) { 439 - int status = sync_fill_fence_info(fences[i], &fence_info[i]); 436 + num_fences = 0; 437 + dma_fence_unwrap_for_each(fence, &iter, sync_file->fence) { 438 + int status; 439 + 440 + status = sync_fill_fence_info(fence, &fence_info[num_fences++]); 440 441 info.status = info.status <= 0 ? info.status : status; 441 442 } 442 443