Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: MIT
2
3#include <linux/fb.h>
4
5#include <drm/drm_crtc_helper.h>
6#include <drm/drm_drv.h>
7#include <drm/drm_fb_dma_helper.h>
8#include <drm/drm_fb_helper.h>
9#include <drm/drm_framebuffer.h>
10#include <drm/drm_gem_dma_helper.h>
11
12#include <drm/drm_fbdev_dma.h>
13
14/*
15 * struct fb_ops
16 */
17
18static int drm_fbdev_dma_fb_open(struct fb_info *info, int user)
19{
20 struct drm_fb_helper *fb_helper = info->par;
21
22 /* No need to take a ref for fbcon because it unbinds on unregister */
23 if (user && !try_module_get(fb_helper->dev->driver->fops->owner))
24 return -ENODEV;
25
26 return 0;
27}
28
29static int drm_fbdev_dma_fb_release(struct fb_info *info, int user)
30{
31 struct drm_fb_helper *fb_helper = info->par;
32
33 if (user)
34 module_put(fb_helper->dev->driver->fops->owner);
35
36 return 0;
37}
38
39static int drm_fbdev_dma_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
40{
41 struct drm_fb_helper *fb_helper = info->par;
42
43 return drm_gem_prime_mmap(fb_helper->buffer->gem, vma);
44}
45
46static void drm_fbdev_dma_fb_destroy(struct fb_info *info)
47{
48 struct drm_fb_helper *fb_helper = info->par;
49
50 if (!fb_helper->dev)
51 return;
52
53 fb_deferred_io_cleanup(info);
54 drm_fb_helper_fini(fb_helper);
55
56 drm_client_buffer_vunmap(fb_helper->buffer);
57 drm_client_framebuffer_delete(fb_helper->buffer);
58 drm_client_release(&fb_helper->client);
59 drm_fb_helper_unprepare(fb_helper);
60 kfree(fb_helper);
61}
62
63static const struct fb_ops drm_fbdev_dma_fb_ops = {
64 .owner = THIS_MODULE,
65 .fb_open = drm_fbdev_dma_fb_open,
66 .fb_release = drm_fbdev_dma_fb_release,
67 __FB_DEFAULT_DMAMEM_OPS_RDWR,
68 DRM_FB_HELPER_DEFAULT_OPS,
69 __FB_DEFAULT_DMAMEM_OPS_DRAW,
70 .fb_mmap = drm_fbdev_dma_fb_mmap,
71 .fb_destroy = drm_fbdev_dma_fb_destroy,
72};
73
74FB_GEN_DEFAULT_DEFERRED_DMAMEM_OPS(drm_fbdev_dma,
75 drm_fb_helper_damage_range,
76 drm_fb_helper_damage_area);
77
78static int drm_fbdev_dma_deferred_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
79{
80 struct drm_fb_helper *fb_helper = info->par;
81 struct drm_framebuffer *fb = fb_helper->fb;
82 struct drm_gem_dma_object *dma = drm_fb_dma_get_gem_obj(fb, 0);
83
84 if (!dma->map_noncoherent)
85 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
86
87 return fb_deferred_io_mmap(info, vma);
88}
89
90static const struct fb_ops drm_fbdev_dma_deferred_fb_ops = {
91 .owner = THIS_MODULE,
92 .fb_open = drm_fbdev_dma_fb_open,
93 .fb_release = drm_fbdev_dma_fb_release,
94 __FB_DEFAULT_DEFERRED_OPS_RDWR(drm_fbdev_dma),
95 DRM_FB_HELPER_DEFAULT_OPS,
96 __FB_DEFAULT_DEFERRED_OPS_DRAW(drm_fbdev_dma),
97 .fb_mmap = drm_fbdev_dma_deferred_fb_mmap,
98 .fb_destroy = drm_fbdev_dma_fb_destroy,
99};
100
101/*
102 * struct drm_fb_helper
103 */
104
105static int drm_fbdev_dma_helper_fb_probe(struct drm_fb_helper *fb_helper,
106 struct drm_fb_helper_surface_size *sizes)
107{
108 struct drm_client_dev *client = &fb_helper->client;
109 struct drm_device *dev = fb_helper->dev;
110 bool use_deferred_io = false;
111 struct drm_client_buffer *buffer;
112 struct drm_gem_dma_object *dma_obj;
113 struct drm_framebuffer *fb;
114 struct fb_info *info;
115 u32 format;
116 struct iosys_map map;
117 int ret;
118
119 drm_dbg_kms(dev, "surface width(%d), height(%d) and bpp(%d)\n",
120 sizes->surface_width, sizes->surface_height,
121 sizes->surface_bpp);
122
123 format = drm_driver_legacy_fb_format(dev, sizes->surface_bpp,
124 sizes->surface_depth);
125 buffer = drm_client_framebuffer_create(client, sizes->surface_width,
126 sizes->surface_height, format);
127 if (IS_ERR(buffer))
128 return PTR_ERR(buffer);
129 dma_obj = to_drm_gem_dma_obj(buffer->gem);
130
131 fb = buffer->fb;
132
133 /*
134 * Deferred I/O requires struct page for framebuffer memory,
135 * which is not guaranteed for all DMA ranges. We thus only
136 * install deferred I/O if we have a framebuffer that requires
137 * it.
138 */
139 if (fb->funcs->dirty)
140 use_deferred_io = true;
141
142 ret = drm_client_buffer_vmap(buffer, &map);
143 if (ret) {
144 goto err_drm_client_buffer_delete;
145 } else if (drm_WARN_ON(dev, map.is_iomem)) {
146 ret = -ENODEV; /* I/O memory not supported; use generic emulation */
147 goto err_drm_client_buffer_delete;
148 }
149
150 fb_helper->buffer = buffer;
151 fb_helper->fb = fb;
152
153 info = drm_fb_helper_alloc_info(fb_helper);
154 if (IS_ERR(info)) {
155 ret = PTR_ERR(info);
156 goto err_drm_client_buffer_vunmap;
157 }
158
159 drm_fb_helper_fill_info(info, fb_helper, sizes);
160
161 if (use_deferred_io)
162 info->fbops = &drm_fbdev_dma_deferred_fb_ops;
163 else
164 info->fbops = &drm_fbdev_dma_fb_ops;
165
166 /* screen */
167 info->flags |= FBINFO_VIRTFB; /* system memory */
168 if (dma_obj->map_noncoherent)
169 info->flags |= FBINFO_READS_FAST; /* signal caching */
170 info->screen_size = sizes->surface_height * fb->pitches[0];
171 info->screen_buffer = map.vaddr;
172 if (!(info->flags & FBINFO_HIDE_SMEM_START)) {
173 if (!drm_WARN_ON(dev, is_vmalloc_addr(info->screen_buffer)))
174 info->fix.smem_start = page_to_phys(virt_to_page(info->screen_buffer));
175 }
176 info->fix.smem_len = info->screen_size;
177
178 /*
179 * Only set up deferred I/O if the screen buffer supports
180 * it. If this disagrees with the previous test for ->dirty,
181 * mmap on the /dev/fb file might not work correctly.
182 */
183 if (!is_vmalloc_addr(info->screen_buffer) && info->fix.smem_start) {
184 unsigned long pfn = info->fix.smem_start >> PAGE_SHIFT;
185
186 if (drm_WARN_ON(dev, !pfn_to_page(pfn)))
187 use_deferred_io = false;
188 }
189
190 /* deferred I/O */
191 if (use_deferred_io) {
192 fb_helper->fbdefio.delay = HZ / 20;
193 fb_helper->fbdefio.deferred_io = drm_fb_helper_deferred_io;
194
195 info->fbdefio = &fb_helper->fbdefio;
196 ret = fb_deferred_io_init(info);
197 if (ret)
198 goto err_drm_fb_helper_release_info;
199 }
200
201 return 0;
202
203err_drm_fb_helper_release_info:
204 drm_fb_helper_release_info(fb_helper);
205err_drm_client_buffer_vunmap:
206 fb_helper->fb = NULL;
207 fb_helper->buffer = NULL;
208 drm_client_buffer_vunmap(buffer);
209err_drm_client_buffer_delete:
210 drm_client_framebuffer_delete(buffer);
211 return ret;
212}
213
214static int drm_fbdev_dma_helper_fb_dirty(struct drm_fb_helper *helper,
215 struct drm_clip_rect *clip)
216{
217 struct drm_device *dev = helper->dev;
218 int ret;
219
220 /* Call damage handlers only if necessary */
221 if (!(clip->x1 < clip->x2 && clip->y1 < clip->y2))
222 return 0;
223
224 if (helper->fb->funcs->dirty) {
225 ret = helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1);
226 if (drm_WARN_ONCE(dev, ret, "Dirty helper failed: ret=%d\n", ret))
227 return ret;
228 }
229
230 return 0;
231}
232
233static const struct drm_fb_helper_funcs drm_fbdev_dma_helper_funcs = {
234 .fb_probe = drm_fbdev_dma_helper_fb_probe,
235 .fb_dirty = drm_fbdev_dma_helper_fb_dirty,
236};
237
238/*
239 * struct drm_client_funcs
240 */
241
242static void drm_fbdev_dma_client_unregister(struct drm_client_dev *client)
243{
244 struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
245
246 if (fb_helper->info) {
247 drm_fb_helper_unregister_info(fb_helper);
248 } else {
249 drm_client_release(&fb_helper->client);
250 drm_fb_helper_unprepare(fb_helper);
251 kfree(fb_helper);
252 }
253}
254
255static int drm_fbdev_dma_client_restore(struct drm_client_dev *client)
256{
257 drm_fb_helper_lastclose(client->dev);
258
259 return 0;
260}
261
262static int drm_fbdev_dma_client_hotplug(struct drm_client_dev *client)
263{
264 struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
265 struct drm_device *dev = client->dev;
266 int ret;
267
268 if (dev->fb_helper)
269 return drm_fb_helper_hotplug_event(dev->fb_helper);
270
271 ret = drm_fb_helper_init(dev, fb_helper);
272 if (ret)
273 goto err_drm_err;
274
275 if (!drm_drv_uses_atomic_modeset(dev))
276 drm_helper_disable_unused_functions(dev);
277
278 ret = drm_fb_helper_initial_config(fb_helper);
279 if (ret)
280 goto err_drm_fb_helper_fini;
281
282 return 0;
283
284err_drm_fb_helper_fini:
285 drm_fb_helper_fini(fb_helper);
286err_drm_err:
287 drm_err(dev, "fbdev-dma: Failed to setup generic emulation (ret=%d)\n", ret);
288 return ret;
289}
290
291static const struct drm_client_funcs drm_fbdev_dma_client_funcs = {
292 .owner = THIS_MODULE,
293 .unregister = drm_fbdev_dma_client_unregister,
294 .restore = drm_fbdev_dma_client_restore,
295 .hotplug = drm_fbdev_dma_client_hotplug,
296};
297
298/**
299 * drm_fbdev_dma_setup() - Setup fbdev emulation for GEM DMA helpers
300 * @dev: DRM device
301 * @preferred_bpp: Preferred bits per pixel for the device.
302 * 32 is used if this is zero.
303 *
304 * This function sets up fbdev emulation for GEM DMA drivers that support
305 * dumb buffers with a virtual address and that can be mmap'ed.
306 * drm_fbdev_dma_setup() shall be called after the DRM driver registered
307 * the new DRM device with drm_dev_register().
308 *
309 * Restore, hotplug events and teardown are all taken care of. Drivers that do
310 * suspend/resume need to call drm_fb_helper_set_suspend_unlocked() themselves.
311 * Simple drivers might use drm_mode_config_helper_suspend().
312 *
313 * This function is safe to call even when there are no connectors present.
314 * Setup will be retried on the next hotplug event.
315 *
316 * The fbdev is destroyed by drm_dev_unregister().
317 */
318void drm_fbdev_dma_setup(struct drm_device *dev, unsigned int preferred_bpp)
319{
320 struct drm_fb_helper *fb_helper;
321 int ret;
322
323 drm_WARN(dev, !dev->registered, "Device has not been registered.\n");
324 drm_WARN(dev, dev->fb_helper, "fb_helper is already set!\n");
325
326 fb_helper = kzalloc(sizeof(*fb_helper), GFP_KERNEL);
327 if (!fb_helper)
328 return;
329 drm_fb_helper_prepare(dev, fb_helper, preferred_bpp, &drm_fbdev_dma_helper_funcs);
330
331 ret = drm_client_init(dev, &fb_helper->client, "fbdev", &drm_fbdev_dma_client_funcs);
332 if (ret) {
333 drm_err(dev, "Failed to register client: %d\n", ret);
334 goto err_drm_client_init;
335 }
336
337 drm_client_register(&fb_helper->client);
338
339 return;
340
341err_drm_client_init:
342 drm_fb_helper_unprepare(fb_helper);
343 kfree(fb_helper);
344}
345EXPORT_SYMBOL(drm_fbdev_dma_setup);