Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/cred.h>
3#include <linux/device.h>
4#include <linux/dma-buf.h>
5#include <linux/highmem.h>
6#include <linux/init.h>
7#include <linux/kernel.h>
8#include <linux/memfd.h>
9#include <linux/miscdevice.h>
10#include <linux/module.h>
11#include <linux/shmem_fs.h>
12#include <linux/slab.h>
13#include <linux/udmabuf.h>
14#include <linux/hugetlb.h>
15
16static int list_limit = 1024;
17module_param(list_limit, int, 0644);
18MODULE_PARM_DESC(list_limit, "udmabuf_create_list->count limit. Default is 1024.");
19
20static int size_limit_mb = 64;
21module_param(size_limit_mb, int, 0644);
22MODULE_PARM_DESC(size_limit_mb, "Max size of a dmabuf, in megabytes. Default is 64.");
23
24struct udmabuf {
25 pgoff_t pagecount;
26 struct page **pages;
27 struct sg_table *sg;
28 struct miscdevice *device;
29};
30
31static vm_fault_t udmabuf_vm_fault(struct vm_fault *vmf)
32{
33 struct vm_area_struct *vma = vmf->vma;
34 struct udmabuf *ubuf = vma->vm_private_data;
35
36 vmf->page = ubuf->pages[vmf->pgoff];
37 get_page(vmf->page);
38 return 0;
39}
40
41static const struct vm_operations_struct udmabuf_vm_ops = {
42 .fault = udmabuf_vm_fault,
43};
44
45static int mmap_udmabuf(struct dma_buf *buf, struct vm_area_struct *vma)
46{
47 struct udmabuf *ubuf = buf->priv;
48
49 if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0)
50 return -EINVAL;
51
52 vma->vm_ops = &udmabuf_vm_ops;
53 vma->vm_private_data = ubuf;
54 return 0;
55}
56
57static struct sg_table *get_sg_table(struct device *dev, struct dma_buf *buf,
58 enum dma_data_direction direction)
59{
60 struct udmabuf *ubuf = buf->priv;
61 struct sg_table *sg;
62 int ret;
63
64 sg = kzalloc(sizeof(*sg), GFP_KERNEL);
65 if (!sg)
66 return ERR_PTR(-ENOMEM);
67 ret = sg_alloc_table_from_pages(sg, ubuf->pages, ubuf->pagecount,
68 0, ubuf->pagecount << PAGE_SHIFT,
69 GFP_KERNEL);
70 if (ret < 0)
71 goto err;
72 ret = dma_map_sgtable(dev, sg, direction, 0);
73 if (ret < 0)
74 goto err;
75 return sg;
76
77err:
78 sg_free_table(sg);
79 kfree(sg);
80 return ERR_PTR(ret);
81}
82
83static void put_sg_table(struct device *dev, struct sg_table *sg,
84 enum dma_data_direction direction)
85{
86 dma_unmap_sgtable(dev, sg, direction, 0);
87 sg_free_table(sg);
88 kfree(sg);
89}
90
91static struct sg_table *map_udmabuf(struct dma_buf_attachment *at,
92 enum dma_data_direction direction)
93{
94 return get_sg_table(at->dev, at->dmabuf, direction);
95}
96
97static void unmap_udmabuf(struct dma_buf_attachment *at,
98 struct sg_table *sg,
99 enum dma_data_direction direction)
100{
101 return put_sg_table(at->dev, sg, direction);
102}
103
104static void release_udmabuf(struct dma_buf *buf)
105{
106 struct udmabuf *ubuf = buf->priv;
107 struct device *dev = ubuf->device->this_device;
108 pgoff_t pg;
109
110 if (ubuf->sg)
111 put_sg_table(dev, ubuf->sg, DMA_BIDIRECTIONAL);
112
113 for (pg = 0; pg < ubuf->pagecount; pg++)
114 put_page(ubuf->pages[pg]);
115 kfree(ubuf->pages);
116 kfree(ubuf);
117}
118
119static int begin_cpu_udmabuf(struct dma_buf *buf,
120 enum dma_data_direction direction)
121{
122 struct udmabuf *ubuf = buf->priv;
123 struct device *dev = ubuf->device->this_device;
124
125 if (!ubuf->sg) {
126 ubuf->sg = get_sg_table(dev, buf, direction);
127 if (IS_ERR(ubuf->sg))
128 return PTR_ERR(ubuf->sg);
129 } else {
130 dma_sync_sg_for_cpu(dev, ubuf->sg->sgl, ubuf->sg->nents,
131 direction);
132 }
133
134 return 0;
135}
136
137static int end_cpu_udmabuf(struct dma_buf *buf,
138 enum dma_data_direction direction)
139{
140 struct udmabuf *ubuf = buf->priv;
141 struct device *dev = ubuf->device->this_device;
142
143 if (!ubuf->sg)
144 return -EINVAL;
145
146 dma_sync_sg_for_device(dev, ubuf->sg->sgl, ubuf->sg->nents, direction);
147 return 0;
148}
149
150static const struct dma_buf_ops udmabuf_ops = {
151 .cache_sgt_mapping = true,
152 .map_dma_buf = map_udmabuf,
153 .unmap_dma_buf = unmap_udmabuf,
154 .release = release_udmabuf,
155 .mmap = mmap_udmabuf,
156 .begin_cpu_access = begin_cpu_udmabuf,
157 .end_cpu_access = end_cpu_udmabuf,
158};
159
160#define SEALS_WANTED (F_SEAL_SHRINK)
161#define SEALS_DENIED (F_SEAL_WRITE)
162
163static long udmabuf_create(struct miscdevice *device,
164 struct udmabuf_create_list *head,
165 struct udmabuf_create_item *list)
166{
167 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
168 struct file *memfd = NULL;
169 struct address_space *mapping = NULL;
170 struct udmabuf *ubuf;
171 struct dma_buf *buf;
172 pgoff_t pgoff, pgcnt, pgidx, pgbuf = 0, pglimit;
173 struct page *page, *hpage = NULL;
174 pgoff_t subpgoff, maxsubpgs;
175 struct hstate *hpstate;
176 int seals, ret = -EINVAL;
177 u32 i, flags;
178
179 ubuf = kzalloc(sizeof(*ubuf), GFP_KERNEL);
180 if (!ubuf)
181 return -ENOMEM;
182
183 pglimit = (size_limit_mb * 1024 * 1024) >> PAGE_SHIFT;
184 for (i = 0; i < head->count; i++) {
185 if (!IS_ALIGNED(list[i].offset, PAGE_SIZE))
186 goto err;
187 if (!IS_ALIGNED(list[i].size, PAGE_SIZE))
188 goto err;
189 ubuf->pagecount += list[i].size >> PAGE_SHIFT;
190 if (ubuf->pagecount > pglimit)
191 goto err;
192 }
193
194 if (!ubuf->pagecount)
195 goto err;
196
197 ubuf->pages = kmalloc_array(ubuf->pagecount, sizeof(*ubuf->pages),
198 GFP_KERNEL);
199 if (!ubuf->pages) {
200 ret = -ENOMEM;
201 goto err;
202 }
203
204 pgbuf = 0;
205 for (i = 0; i < head->count; i++) {
206 ret = -EBADFD;
207 memfd = fget(list[i].memfd);
208 if (!memfd)
209 goto err;
210 mapping = file_inode(memfd)->i_mapping;
211 if (!shmem_mapping(mapping) && !is_file_hugepages(memfd))
212 goto err;
213 seals = memfd_fcntl(memfd, F_GET_SEALS, 0);
214 if (seals == -EINVAL)
215 goto err;
216 ret = -EINVAL;
217 if ((seals & SEALS_WANTED) != SEALS_WANTED ||
218 (seals & SEALS_DENIED) != 0)
219 goto err;
220 pgoff = list[i].offset >> PAGE_SHIFT;
221 pgcnt = list[i].size >> PAGE_SHIFT;
222 if (is_file_hugepages(memfd)) {
223 hpstate = hstate_file(memfd);
224 pgoff = list[i].offset >> huge_page_shift(hpstate);
225 subpgoff = (list[i].offset &
226 ~huge_page_mask(hpstate)) >> PAGE_SHIFT;
227 maxsubpgs = huge_page_size(hpstate) >> PAGE_SHIFT;
228 }
229 for (pgidx = 0; pgidx < pgcnt; pgidx++) {
230 if (is_file_hugepages(memfd)) {
231 if (!hpage) {
232 hpage = find_get_page_flags(mapping, pgoff,
233 FGP_ACCESSED);
234 if (!hpage) {
235 ret = -EINVAL;
236 goto err;
237 }
238 }
239 page = hpage + subpgoff;
240 get_page(page);
241 subpgoff++;
242 if (subpgoff == maxsubpgs) {
243 put_page(hpage);
244 hpage = NULL;
245 subpgoff = 0;
246 pgoff++;
247 }
248 } else {
249 page = shmem_read_mapping_page(mapping,
250 pgoff + pgidx);
251 if (IS_ERR(page)) {
252 ret = PTR_ERR(page);
253 goto err;
254 }
255 }
256 ubuf->pages[pgbuf++] = page;
257 }
258 fput(memfd);
259 memfd = NULL;
260 if (hpage) {
261 put_page(hpage);
262 hpage = NULL;
263 }
264 }
265
266 exp_info.ops = &udmabuf_ops;
267 exp_info.size = ubuf->pagecount << PAGE_SHIFT;
268 exp_info.priv = ubuf;
269 exp_info.flags = O_RDWR;
270
271 ubuf->device = device;
272 buf = dma_buf_export(&exp_info);
273 if (IS_ERR(buf)) {
274 ret = PTR_ERR(buf);
275 goto err;
276 }
277
278 flags = 0;
279 if (head->flags & UDMABUF_FLAGS_CLOEXEC)
280 flags |= O_CLOEXEC;
281 return dma_buf_fd(buf, flags);
282
283err:
284 while (pgbuf > 0)
285 put_page(ubuf->pages[--pgbuf]);
286 if (memfd)
287 fput(memfd);
288 kfree(ubuf->pages);
289 kfree(ubuf);
290 return ret;
291}
292
293static long udmabuf_ioctl_create(struct file *filp, unsigned long arg)
294{
295 struct udmabuf_create create;
296 struct udmabuf_create_list head;
297 struct udmabuf_create_item list;
298
299 if (copy_from_user(&create, (void __user *)arg,
300 sizeof(create)))
301 return -EFAULT;
302
303 head.flags = create.flags;
304 head.count = 1;
305 list.memfd = create.memfd;
306 list.offset = create.offset;
307 list.size = create.size;
308
309 return udmabuf_create(filp->private_data, &head, &list);
310}
311
312static long udmabuf_ioctl_create_list(struct file *filp, unsigned long arg)
313{
314 struct udmabuf_create_list head;
315 struct udmabuf_create_item *list;
316 int ret = -EINVAL;
317 u32 lsize;
318
319 if (copy_from_user(&head, (void __user *)arg, sizeof(head)))
320 return -EFAULT;
321 if (head.count > list_limit)
322 return -EINVAL;
323 lsize = sizeof(struct udmabuf_create_item) * head.count;
324 list = memdup_user((void __user *)(arg + sizeof(head)), lsize);
325 if (IS_ERR(list))
326 return PTR_ERR(list);
327
328 ret = udmabuf_create(filp->private_data, &head, list);
329 kfree(list);
330 return ret;
331}
332
333static long udmabuf_ioctl(struct file *filp, unsigned int ioctl,
334 unsigned long arg)
335{
336 long ret;
337
338 switch (ioctl) {
339 case UDMABUF_CREATE:
340 ret = udmabuf_ioctl_create(filp, arg);
341 break;
342 case UDMABUF_CREATE_LIST:
343 ret = udmabuf_ioctl_create_list(filp, arg);
344 break;
345 default:
346 ret = -ENOTTY;
347 break;
348 }
349 return ret;
350}
351
352static const struct file_operations udmabuf_fops = {
353 .owner = THIS_MODULE,
354 .unlocked_ioctl = udmabuf_ioctl,
355#ifdef CONFIG_COMPAT
356 .compat_ioctl = udmabuf_ioctl,
357#endif
358};
359
360static struct miscdevice udmabuf_misc = {
361 .minor = MISC_DYNAMIC_MINOR,
362 .name = "udmabuf",
363 .fops = &udmabuf_fops,
364};
365
366static int __init udmabuf_dev_init(void)
367{
368 return misc_register(&udmabuf_misc);
369}
370
371static void __exit udmabuf_dev_exit(void)
372{
373 misc_deregister(&udmabuf_misc);
374}
375
376module_init(udmabuf_dev_init)
377module_exit(udmabuf_dev_exit)
378
379MODULE_AUTHOR("Gerd Hoffmann <kraxel@redhat.com>");
380MODULE_LICENSE("GPL v2");