Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

staging: vme: mmap() support for vme_user

We also make sure that user won't be able to reconfigure the window while it is
mmap'ed.

Signed-off-by: Dmitry Kalinkin <dmitry.kalinkin@gmail.com>
Cc: Martyn Welch <martyn.welch@ge.com>
Cc: Igor Alekseev <igor.alekseev@itep.ru>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

authored by

Dmitry Kalinkin and committed by
Greg Kroah-Hartman
c74a804f 0cd189a4

+112
+85
drivers/staging/vme/devices/vme_user.c
··· 17 17 18 18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 19 19 20 + #include <linux/atomic.h> 20 21 #include <linux/cdev.h> 21 22 #include <linux/delay.h> 22 23 #include <linux/device.h> ··· 100 99 struct device *device; /* Sysfs device */ 101 100 struct vme_resource *resource; /* VME resource */ 102 101 int users; /* Number of current users */ 102 + int mmap_count; /* Number of current mmap's */ 103 103 }; 104 104 static struct image_desc image[VME_DEVS]; 105 105 ··· 136 134 loff_t *); 137 135 static loff_t vme_user_llseek(struct file *, loff_t, int); 138 136 static long vme_user_unlocked_ioctl(struct file *, unsigned int, unsigned long); 137 + static int vme_user_mmap(struct file *file, struct vm_area_struct *vma); 138 + 139 + static void vme_user_vm_open(struct vm_area_struct *vma); 140 + static void vme_user_vm_close(struct vm_area_struct *vma); 139 141 140 142 static int vme_user_match(struct vme_dev *); 141 143 static int vme_user_probe(struct vme_dev *); ··· 153 147 .llseek = vme_user_llseek, 154 148 .unlocked_ioctl = vme_user_unlocked_ioctl, 155 149 .compat_ioctl = vme_user_unlocked_ioctl, 150 + .mmap = vme_user_mmap, 151 + }; 152 + 153 + struct vme_user_vma_priv { 154 + unsigned int minor; 155 + atomic_t refcnt; 156 + }; 157 + 158 + static const struct vm_operations_struct vme_user_vm_ops = { 159 + .open = vme_user_vm_open, 160 + .close = vme_user_vm_close, 156 161 }; 157 162 158 163 ··· 505 488 506 489 case VME_SET_MASTER: 507 490 491 + if (image[minor].mmap_count != 0) { 492 + pr_warn("Can't adjust mapped window\n"); 493 + return -EPERM; 494 + } 495 + 508 496 copied = copy_from_user(&master, argp, sizeof(master)); 509 497 if (copied != 0) { 510 498 pr_warn("Partial copy from userspace\n"); ··· 584 562 mutex_unlock(&image[minor].mutex); 585 563 586 564 return ret; 565 + } 566 + 567 + static void vme_user_vm_open(struct vm_area_struct *vma) 568 + { 569 + struct vme_user_vma_priv *vma_priv = vma->vm_private_data; 570 + 571 + atomic_inc(&vma_priv->refcnt); 572 + } 573 + 574 + static void vme_user_vm_close(struct vm_area_struct *vma) 575 + { 576 + struct vme_user_vma_priv *vma_priv = vma->vm_private_data; 577 + unsigned int minor = vma_priv->minor; 578 + 579 + if (!atomic_dec_and_test(&vma_priv->refcnt)) 580 + return; 581 + 582 + mutex_lock(&image[minor].mutex); 583 + image[minor].mmap_count--; 584 + mutex_unlock(&image[minor].mutex); 585 + 586 + kfree(vma_priv); 587 + } 588 + 589 + static int vme_user_master_mmap(unsigned int minor, struct vm_area_struct *vma) 590 + { 591 + int err; 592 + struct vme_user_vma_priv *vma_priv; 593 + 594 + mutex_lock(&image[minor].mutex); 595 + 596 + err = vme_master_mmap(image[minor].resource, vma); 597 + if (err) { 598 + mutex_unlock(&image[minor].mutex); 599 + return err; 600 + } 601 + 602 + vma_priv = kmalloc(sizeof(struct vme_user_vma_priv), GFP_KERNEL); 603 + if (vma_priv == NULL) { 604 + mutex_unlock(&image[minor].mutex); 605 + return -ENOMEM; 606 + } 607 + 608 + vma_priv->minor = minor; 609 + atomic_set(&vma_priv->refcnt, 1); 610 + vma->vm_ops = &vme_user_vm_ops; 611 + vma->vm_private_data = vma_priv; 612 + 613 + image[minor].mmap_count++; 614 + 615 + mutex_unlock(&image[minor].mutex); 616 + 617 + return 0; 618 + } 619 + 620 + static int vme_user_mmap(struct file *file, struct vm_area_struct *vma) 621 + { 622 + unsigned int minor = MINOR(file_inode(file)->i_rdev); 623 + 624 + if (type[minor] == MASTER_MINOR) 625 + return vme_user_master_mmap(minor, vma); 626 + 627 + return -ENODEV; 587 628 } 588 629 589 630
+26
drivers/vme/vme.c
··· 609 609 } 610 610 EXPORT_SYMBOL(vme_master_rmw); 611 611 612 + int vme_master_mmap(struct vme_resource *resource, struct vm_area_struct *vma) 613 + { 614 + struct vme_master_resource *image; 615 + phys_addr_t phys_addr; 616 + unsigned long vma_size; 617 + 618 + if (resource->type != VME_MASTER) { 619 + pr_err("Not a master resource\n"); 620 + return -EINVAL; 621 + } 622 + 623 + image = list_entry(resource->entry, struct vme_master_resource, list); 624 + phys_addr = image->bus_resource.start + (vma->vm_pgoff << PAGE_SHIFT); 625 + vma_size = vma->vm_end - vma->vm_start; 626 + 627 + if (phys_addr + vma_size > image->bus_resource.end + 1) { 628 + pr_err("Map size cannot exceed the window size\n"); 629 + return -EFAULT; 630 + } 631 + 632 + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 633 + 634 + return vm_iomap_memory(vma, phys_addr, vma->vm_end - vma->vm_start); 635 + } 636 + EXPORT_SYMBOL(vme_master_mmap); 637 + 612 638 void vme_master_free(struct vme_resource *resource) 613 639 { 614 640 struct vme_master_resource *master_image;
+1
include/linux/vme.h
··· 137 137 ssize_t vme_master_write(struct vme_resource *, void *, size_t, loff_t); 138 138 unsigned int vme_master_rmw(struct vme_resource *, unsigned int, unsigned int, 139 139 unsigned int, loff_t); 140 + int vme_master_mmap(struct vme_resource *resource, struct vm_area_struct *vma); 140 141 void vme_master_free(struct vme_resource *); 141 142 142 143 struct vme_resource *vme_dma_request(struct vme_dev *, u32);