Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'drm-misc-next-fixes-2024-01-04' of git://anongit.freedesktop.org/drm/drm-misc into drm-next

One fix for drm/plane to avoid a use-after-free and some additional
warnings to prevent more of these occurences, a lock inversion
dependency fix and an indentation fix for drm/rockchip, and some doc
warning fixes for imagination and gpuvm.

Signed-off-by: Dave Airlie <airlied@redhat.com>

From: Maxime Ripard <mripard@redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/enhl33v2oeihktta2yfyc4exvezdvm3eexcuwxkethc5ommrjo@lkidkv2kwakq

+79 -65
+4 -1
drivers/gpu/drm/drm_framebuffer.c
··· 461 461 462 462 INIT_WORK_ONSTACK(&arg.work, drm_mode_rmfb_work_fn); 463 463 INIT_LIST_HEAD(&arg.fbs); 464 + drm_WARN_ON(dev, !list_empty(&fb->filp_head)); 464 465 list_add_tail(&fb->filp_head, &arg.fbs); 465 466 466 467 schedule_work(&arg.work); ··· 828 827 container_of(kref, struct drm_framebuffer, base.refcount); 829 828 struct drm_device *dev = fb->dev; 830 829 830 + drm_WARN_ON(dev, !list_empty(&fb->filp_head)); 831 + 831 832 /* 832 833 * The lookup idr holds a weak reference, which has not necessarily been 833 834 * removed at this point. Check for that. ··· 1122 1119 1123 1120 dev = fb->dev; 1124 1121 1125 - WARN_ON(!list_empty(&fb->filp_head)); 1122 + drm_WARN_ON(dev, !list_empty(&fb->filp_head)); 1126 1123 1127 1124 /* 1128 1125 * drm ABI mandates that we remove any deleted framebuffers from active
+1
drivers/gpu/drm/drm_plane.c
··· 1503 1503 out: 1504 1504 if (fb) 1505 1505 drm_framebuffer_put(fb); 1506 + fb = NULL; 1506 1507 if (plane->old_fb) 1507 1508 drm_framebuffer_put(plane->old_fb); 1508 1509 plane->old_fb = NULL;
+25 -21
drivers/gpu/drm/imagination/pvr_device.h
··· 193 193 * @queues: Queue-related fields. 194 194 */ 195 195 struct { 196 - /** @active: Active queue list. */ 196 + /** @queues.active: Active queue list. */ 197 197 struct list_head active; 198 198 199 - /** @idle: Idle queue list. */ 199 + /** @queues.idle: Idle queue list. */ 200 200 struct list_head idle; 201 201 202 - /** @lock: Lock protecting access to the active/idle lists. */ 202 + /** @queues.lock: Lock protecting access to the active/idle 203 + * lists. */ 203 204 struct mutex lock; 204 205 } queues; 205 206 ··· 208 207 * @watchdog: Watchdog for communications with firmware. 209 208 */ 210 209 struct { 211 - /** @work: Work item for watchdog callback. */ 210 + /** @watchdog.work: Work item for watchdog callback. */ 212 211 struct delayed_work work; 213 212 214 213 /** 215 - * @old_kccb_cmds_executed: KCCB command execution count at last 216 - * watchdog poll. 214 + * @watchdog.old_kccb_cmds_executed: KCCB command execution 215 + * count at last watchdog poll. 217 216 */ 218 217 u32 old_kccb_cmds_executed; 219 218 220 219 /** 221 - * @kccb_stall_count: Number of watchdog polls KCCB has been 222 - * stalled for. 220 + * @watchdog.kccb_stall_count: Number of watchdog polls 221 + * KCCB has been stalled for. 223 222 */ 224 223 u32 kccb_stall_count; 225 224 } watchdog; ··· 228 227 * @kccb: Circular buffer for communications with firmware. 229 228 */ 230 229 struct { 231 - /** @ccb: Kernel CCB. */ 230 + /** @kccb.ccb: Kernel CCB. */ 232 231 struct pvr_ccb ccb; 233 232 234 - /** @rtn_q: Waitqueue for KCCB command return waiters. */ 233 + /** @kccb.rtn_q: Waitqueue for KCCB command return waiters. */ 235 234 wait_queue_head_t rtn_q; 236 235 237 - /** @rtn_obj: Object representing KCCB return slots. */ 236 + /** @kccb.rtn_obj: Object representing KCCB return slots. */ 238 237 struct pvr_fw_object *rtn_obj; 239 238 240 239 /** 241 - * @rtn: Pointer to CPU mapping of KCCB return slots. Must be 242 - * accessed by READ_ONCE()/WRITE_ONCE(). 240 + * @kccb.rtn: Pointer to CPU mapping of KCCB return slots. 241 + * Must be accessed by READ_ONCE()/WRITE_ONCE(). 243 242 */ 244 243 u32 *rtn; 245 244 246 - /** @slot_count: Total number of KCCB slots available. */ 245 + /** @kccb.slot_count: Total number of KCCB slots available. */ 247 246 u32 slot_count; 248 247 249 - /** @reserved_count: Number of KCCB slots reserved for future use. */ 248 + /** @kccb.reserved_count: Number of KCCB slots reserved for 249 + * future use. */ 250 250 u32 reserved_count; 251 251 252 252 /** 253 - * @waiters: List of KCCB slot waiters. 253 + * @kccb.waiters: List of KCCB slot waiters. 254 254 */ 255 255 struct list_head waiters; 256 256 257 - /** @fence_ctx: KCCB fence context. */ 257 + /** @kccb.fence_ctx: KCCB fence context. */ 258 258 struct { 259 - /** @id: KCCB fence context ID allocated with dma_fence_context_alloc(). */ 259 + /** @kccb.fence_ctx.id: KCCB fence context ID 260 + * allocated with dma_fence_context_alloc(). */ 260 261 u64 id; 261 262 262 - /** @seqno: Sequence number incremented each time a fence is created. */ 263 + /** @kccb.fence_ctx.seqno: Sequence number incremented 264 + * each time a fence is created. */ 263 265 atomic_t seqno; 264 266 265 267 /** 266 - * @lock: Lock used to synchronize access to fences allocated by this 267 - * context. 268 + * @kccb.fence_ctx.lock: Lock used to synchronize 269 + * access to fences allocated by this context. 268 270 */ 269 271 spinlock_t lock; 270 272 } fence_ctx;
+4 -8
drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
··· 959 959 return; 960 960 } 961 961 962 - ret = regmap_reinit_cache(vop2->map, &vop2_regmap_config); 963 - if (ret) { 964 - drm_err(vop2->drm, "failed to reinit cache: %d\n", ret); 965 - return; 966 - } 967 - 968 962 if (vop2->data->soc_id == 3566) 969 963 vop2_writel(vop2, RK3568_OTP_WIN_EN, 1); 970 964 ··· 989 995 rockchip_drm_dma_detach_device(vop2->drm, vop2->dev); 990 996 991 997 pm_runtime_put_sync(vop2->dev); 998 + 999 + regcache_drop_region(vop2->map, 0, vop2_regmap_config.max_register); 992 1000 993 1001 clk_disable_unprepare(vop2->pclk); 994 1002 clk_disable_unprepare(vop2->aclk); ··· 1701 1705 * *if_pixclk_div = dclk_rate / if_pixclk_rate; 1702 1706 * *if_dclk_div = dclk_rate / if_dclk_rate; 1703 1707 */ 1704 - *if_pixclk_div = 2; 1705 - *if_dclk_div = 4; 1708 + *if_pixclk_div = 2; 1709 + *if_dclk_div = 4; 1706 1710 } else if (vop2_output_if_is_edp(id)) { 1707 1711 /* 1708 1712 * edp_pixclk = edp_dclk > dclk_core
+45 -35
include/drm/drm_gpuvm.h
··· 92 92 */ 93 93 struct { 94 94 /** 95 - * @addr: the start address 95 + * @va.addr: the start address 96 96 */ 97 97 u64 addr; 98 98 ··· 107 107 */ 108 108 struct { 109 109 /** 110 - * @offset: the offset within the &drm_gem_object 110 + * @gem.offset: the offset within the &drm_gem_object 111 111 */ 112 112 u64 offset; 113 113 114 114 /** 115 - * @obj: the mapped &drm_gem_object 115 + * @gem.obj: the mapped &drm_gem_object 116 116 */ 117 117 struct drm_gem_object *obj; 118 118 119 119 /** 120 - * @entry: the &list_head to attach this object to a &drm_gpuvm_bo 120 + * @gem.entry: the &list_head to attach this object to a &drm_gpuvm_bo 121 121 */ 122 122 struct list_head entry; 123 123 } gem; ··· 127 127 */ 128 128 struct { 129 129 /** 130 - * @rb: the rb-tree node 130 + * @rb.node: the rb-tree node 131 131 */ 132 132 struct rb_node node; 133 133 134 134 /** 135 - * @entry: The &list_head to additionally connect &drm_gpuvas 135 + * @rb.entry: The &list_head to additionally connect &drm_gpuvas 136 136 * in the same order they appear in the interval tree. This is 137 137 * useful to keep iterating &drm_gpuvas from a start node found 138 138 * through the rb-tree while doing modifications on the rb-tree ··· 141 141 struct list_head entry; 142 142 143 143 /** 144 - * @__subtree_last: needed by the interval tree, holding last-in-subtree 144 + * @rb.__subtree_last: needed by the interval tree, holding last-in-subtree 145 145 */ 146 146 u64 __subtree_last; 147 147 } rb; ··· 187 187 * drm_gpuva_invalidated() - indicates whether the backing BO of this &drm_gpuva 188 188 * is invalidated 189 189 * @va: the &drm_gpuva to check 190 + * 191 + * Returns: %true if the GPU VA is invalidated, %false otherwise 190 192 */ 191 193 static inline bool drm_gpuva_invalidated(struct drm_gpuva *va) 192 194 { ··· 254 252 */ 255 253 struct { 256 254 /** 257 - * @tree: the rb-tree to track GPU VA mappings 255 + * @rb.tree: the rb-tree to track GPU VA mappings 258 256 */ 259 257 struct rb_root_cached tree; 260 258 261 259 /** 262 - * @list: the &list_head to track GPU VA mappings 260 + * @rb.list: the &list_head to track GPU VA mappings 263 261 */ 264 262 struct list_head list; 265 263 } rb; ··· 292 290 */ 293 291 struct { 294 292 /** 295 - * @list: &list_head storing &drm_gpuvm_bos serving as 293 + * @extobj.list: &list_head storing &drm_gpuvm_bos serving as 296 294 * external object 297 295 */ 298 296 struct list_head list; 299 297 300 298 /** 301 - * @local_list: pointer to the local list temporarily storing 302 - * entries from the external object list 299 + * @extobj.local_list: pointer to the local list temporarily 300 + * storing entries from the external object list 303 301 */ 304 302 struct list_head *local_list; 305 303 306 304 /** 307 - * @lock: spinlock to protect the extobj list 305 + * @extobj.lock: spinlock to protect the extobj list 308 306 */ 309 307 spinlock_t lock; 310 308 } extobj; ··· 314 312 */ 315 313 struct { 316 314 /** 317 - * @list: &list_head storing &drm_gpuvm_bos currently being 318 - * evicted 315 + * @evict.list: &list_head storing &drm_gpuvm_bos currently 316 + * being evicted 319 317 */ 320 318 struct list_head list; 321 319 322 320 /** 323 - * @local_list: pointer to the local list temporarily storing 324 - * entries from the evicted object list 321 + * @evict.local_list: pointer to the local list temporarily 322 + * storing entries from the evicted object list 325 323 */ 326 324 struct list_head *local_list; 327 325 328 326 /** 329 - * @lock: spinlock to protect the evict list 327 + * @evict.lock: spinlock to protect the evict list 330 328 */ 331 329 spinlock_t lock; 332 330 } evict; ··· 346 344 * 347 345 * This function acquires an additional reference to @gpuvm. It is illegal to 348 346 * call this without already holding a reference. No locks required. 347 + * 348 + * Returns: the &struct drm_gpuvm pointer 349 349 */ 350 350 static inline struct drm_gpuvm * 351 351 drm_gpuvm_get(struct drm_gpuvm *gpuvm) ··· 537 533 */ 538 534 struct { 539 535 /** 540 - * @fn: The driver callback to lock additional &drm_gem_objects. 536 + * @extra.fn: The driver callback to lock additional 537 + * &drm_gem_objects. 541 538 */ 542 539 int (*fn)(struct drm_gpuvm_exec *vm_exec); 543 540 544 541 /** 545 - * @priv: driver private data for the @fn callback 542 + * @extra.priv: driver private data for the @fn callback 546 543 */ 547 544 void *priv; 548 545 } extra; ··· 594 589 enum dma_resv_usage extobj_usage); 595 590 596 591 /** 597 - * drm_gpuvm_exec_resv_add_fence() 592 + * drm_gpuvm_exec_resv_add_fence() - add fence to private and all extobj 598 593 * @vm_exec: the &drm_gpuvm_exec wrapper 599 594 * @fence: fence to add 600 595 * @private_usage: private dma-resv usage ··· 613 608 } 614 609 615 610 /** 616 - * drm_gpuvm_exec_validate() 611 + * drm_gpuvm_exec_validate() - validate all BOs marked as evicted 617 612 * @vm_exec: the &drm_gpuvm_exec wrapper 618 613 * 619 614 * See drm_gpuvm_validate(). 615 + * 616 + * Returns: 0 on success, negative error code on failure. 620 617 */ 621 618 static inline int 622 619 drm_gpuvm_exec_validate(struct drm_gpuvm_exec *vm_exec) ··· 671 664 */ 672 665 struct { 673 666 /** 674 - * @gpuva: The list of linked &drm_gpuvas. 667 + * @list.gpuva: The list of linked &drm_gpuvas. 675 668 * 676 669 * It is safe to access entries from this list as long as the 677 670 * GEM's gpuva lock is held. See also struct drm_gem_object. ··· 679 672 struct list_head gpuva; 680 673 681 674 /** 682 - * @entry: Structure containing all &list_heads serving as 675 + * @list.entry: Structure containing all &list_heads serving as 683 676 * entry. 684 677 */ 685 678 struct { 686 679 /** 687 - * @gem: List entry to attach to the &drm_gem_objects 688 - * gpuva list. 680 + * @list.entry.gem: List entry to attach to the 681 + * &drm_gem_objects gpuva list. 689 682 */ 690 683 struct list_head gem; 691 684 692 685 /** 693 - * @evict: List entry to attach to the &drm_gpuvms 694 - * extobj list. 686 + * @list.entry.evict: List entry to attach to the 687 + * &drm_gpuvms extobj list. 695 688 */ 696 689 struct list_head extobj; 697 690 698 691 /** 699 - * @evict: List entry to attach to the &drm_gpuvms evict 700 - * list. 692 + * @list.entry.evict: List entry to attach to the 693 + * &drm_gpuvms evict list. 701 694 */ 702 695 struct list_head evict; 703 696 } entry; ··· 720 713 * 721 714 * This function acquires an additional reference to @vm_bo. It is illegal to 722 715 * call this without already holding a reference. No locks required. 716 + * 717 + * Returns: the &struct vm_bo pointer 723 718 */ 724 719 static inline struct drm_gpuvm_bo * 725 720 drm_gpuvm_bo_get(struct drm_gpuvm_bo *vm_bo) ··· 739 730 void drm_gpuvm_bo_evict(struct drm_gpuvm_bo *vm_bo, bool evict); 740 731 741 732 /** 742 - * drm_gpuvm_bo_gem_evict() 733 + * drm_gpuvm_bo_gem_evict() - add/remove all &drm_gpuvm_bo's in the list 734 + * to/from the &drm_gpuvms evicted list 743 735 * @obj: the &drm_gem_object 744 736 * @evict: indicates whether @obj is evicted 745 737 * ··· 827 817 */ 828 818 struct { 829 819 /** 830 - * @addr: the base address of the new mapping 820 + * @va.addr: the base address of the new mapping 831 821 */ 832 822 u64 addr; 833 823 834 824 /** 835 - * @range: the range of the new mapping 825 + * @va.range: the range of the new mapping 836 826 */ 837 827 u64 range; 838 828 } va; ··· 842 832 */ 843 833 struct { 844 834 /** 845 - * @offset: the offset within the &drm_gem_object 835 + * @gem.offset: the offset within the &drm_gem_object 846 836 */ 847 837 u64 offset; 848 838 849 839 /** 850 - * @obj: the &drm_gem_object to map 840 + * @gem.obj: the &drm_gem_object to map 851 841 */ 852 842 struct drm_gem_object *obj; 853 843 } gem;