Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/sharp-memory: Do not access GEM-DMA vaddr directly

Use DRM's shadow-plane helper to map and access the GEM object's buffer
within kernel address space. Encapsulates the vmap logic in the GEM-DMA
helpers.

The sharp-memory driver currently reads the vaddr field from the GME
buffer object directly. This only works because GEM code 'automagically'
sets vaddr.

Shadow-plane helpers perform the same steps, but with correct abstraction
behind drm_gem_vmap(). The shadow-plane state provides the buffer address
in kernel address space and the format-conversion state.

v2:
- fix typo in commit description

Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
Reviewed-by: Javier Martinez Canillas <javierm@redhat.com>
Link: https://lore.kernel.org/r/20250627152327.8244-1-tzimmermann@suse.de

+13 -14
+13 -14
drivers/gpu/drm/tiny/sharp-memory.c
··· 126 126 127 127 static void sharp_memory_set_tx_buffer_data(u8 *buffer, 128 128 struct drm_framebuffer *fb, 129 + const struct iosys_map *vmap, 129 130 struct drm_rect clip, 130 131 u32 pitch, 131 132 struct drm_format_conv_state *fmtcnv_state) 132 133 { 133 134 int ret; 134 - struct iosys_map dst, vmap; 135 - struct drm_gem_dma_object *dma_obj = drm_fb_dma_get_gem_obj(fb, 0); 135 + struct iosys_map dst; 136 136 137 137 ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE); 138 138 if (ret) 139 139 return; 140 140 141 141 iosys_map_set_vaddr(&dst, buffer); 142 - iosys_map_set_vaddr(&vmap, dma_obj->vaddr); 143 142 144 - drm_fb_xrgb8888_to_mono(&dst, &pitch, &vmap, fb, &clip, fmtcnv_state); 143 + drm_fb_xrgb8888_to_mono(&dst, &pitch, vmap, fb, &clip, fmtcnv_state); 145 144 146 145 drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE); 147 146 } 148 147 149 148 static int sharp_memory_update_display(struct sharp_memory_device *smd, 150 149 struct drm_framebuffer *fb, 150 + const struct iosys_map *vmap, 151 151 struct drm_rect clip, 152 152 struct drm_format_conv_state *fmtcnv_state) 153 153 { ··· 163 163 sharp_memory_set_tx_buffer_mode(&tx_buffer[0], 164 164 SHARP_MEMORY_DISPLAY_UPDATE_MODE, vcom); 165 165 sharp_memory_set_tx_buffer_addresses(&tx_buffer[1], clip, pitch); 166 - sharp_memory_set_tx_buffer_data(&tx_buffer[2], fb, clip, pitch, fmtcnv_state); 166 + sharp_memory_set_tx_buffer_data(&tx_buffer[2], fb, vmap, clip, pitch, fmtcnv_state); 167 167 168 168 ret = sharp_memory_spi_write(smd->spi, tx_buffer, tx_buffer_size); 169 169 ··· 206 206 return ret; 207 207 } 208 208 209 - static void sharp_memory_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect, 209 + static void sharp_memory_fb_dirty(struct drm_framebuffer *fb, const struct iosys_map *vmap, 210 + struct drm_rect *rect, 210 211 struct drm_format_conv_state *fmtconv_state) 211 212 { 212 213 struct drm_rect clip; ··· 219 218 clip.y1 = rect->y1; 220 219 clip.y2 = rect->y2; 221 220 222 - sharp_memory_update_display(smd, fb, clip, fmtconv_state); 221 + sharp_memory_update_display(smd, fb, vmap, clip, fmtconv_state); 223 222 } 224 223 225 224 static int sharp_memory_plane_atomic_check(struct drm_plane *plane, ··· 243 242 { 244 243 struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, plane); 245 244 struct drm_plane_state *plane_state = plane->state; 246 - struct drm_format_conv_state fmtcnv_state = DRM_FORMAT_CONV_STATE_INIT; 245 + struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state); 247 246 struct sharp_memory_device *smd; 248 247 struct drm_rect rect; 249 248 ··· 252 251 return; 253 252 254 253 if (drm_atomic_helper_damage_merged(old_state, plane_state, &rect)) 255 - sharp_memory_fb_dirty(plane_state->fb, &rect, &fmtcnv_state); 256 - 257 - drm_format_conv_state_release(&fmtcnv_state); 254 + sharp_memory_fb_dirty(plane_state->fb, shadow_plane_state->data, 255 + &rect, &shadow_plane_state->fmtcnv_state); 258 256 } 259 257 260 258 static const struct drm_plane_helper_funcs sharp_memory_plane_helper_funcs = { 261 259 .prepare_fb = drm_gem_plane_helper_prepare_fb, 262 260 .atomic_check = sharp_memory_plane_atomic_check, 263 261 .atomic_update = sharp_memory_plane_atomic_update, 262 + DRM_GEM_SHADOW_PLANE_HELPER_FUNCS, 264 263 }; 265 264 266 265 static bool sharp_memory_format_mod_supported(struct drm_plane *plane, ··· 274 273 .update_plane = drm_atomic_helper_update_plane, 275 274 .disable_plane = drm_atomic_helper_disable_plane, 276 275 .destroy = drm_plane_cleanup, 277 - .reset = drm_atomic_helper_plane_reset, 278 - .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, 279 - .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, 276 + DRM_GEM_SHADOW_PLANE_FUNCS, 280 277 .format_mod_supported = sharp_memory_format_mod_supported, 281 278 }; 282 279