at v5.6-rc4 302 lines 7.0 kB view raw
1// SPDX-License-Identifier: GPL-2.0 2/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */ 3#include <linux/init.h> 4#include <linux/kernel.h> 5#include <linux/module.h> 6#include <linux/pci.h> 7#include <linux/device.h> 8#include <linux/sched/task.h> 9#include <linux/intel-svm.h> 10#include <linux/io-64-nonatomic-lo-hi.h> 11#include <linux/cdev.h> 12#include <linux/fs.h> 13#include <linux/poll.h> 14#include <uapi/linux/idxd.h> 15#include "registers.h" 16#include "idxd.h" 17 18struct idxd_cdev_context { 19 const char *name; 20 dev_t devt; 21 struct ida minor_ida; 22}; 23 24/* 25 * ictx is an array based off of accelerator types. enum idxd_type 26 * is used as index 27 */ 28static struct idxd_cdev_context ictx[IDXD_TYPE_MAX] = { 29 { .name = "dsa" }, 30}; 31 32struct idxd_user_context { 33 struct idxd_wq *wq; 34 struct task_struct *task; 35 unsigned int flags; 36}; 37 38enum idxd_cdev_cleanup { 39 CDEV_NORMAL = 0, 40 CDEV_FAILED, 41}; 42 43static void idxd_cdev_dev_release(struct device *dev) 44{ 45 dev_dbg(dev, "releasing cdev device\n"); 46 kfree(dev); 47} 48 49static struct device_type idxd_cdev_device_type = { 50 .name = "idxd_cdev", 51 .release = idxd_cdev_dev_release, 52}; 53 54static inline struct idxd_cdev *inode_idxd_cdev(struct inode *inode) 55{ 56 struct cdev *cdev = inode->i_cdev; 57 58 return container_of(cdev, struct idxd_cdev, cdev); 59} 60 61static inline struct idxd_wq *idxd_cdev_wq(struct idxd_cdev *idxd_cdev) 62{ 63 return container_of(idxd_cdev, struct idxd_wq, idxd_cdev); 64} 65 66static inline struct idxd_wq *inode_wq(struct inode *inode) 67{ 68 return idxd_cdev_wq(inode_idxd_cdev(inode)); 69} 70 71static int idxd_cdev_open(struct inode *inode, struct file *filp) 72{ 73 struct idxd_user_context *ctx; 74 struct idxd_device *idxd; 75 struct idxd_wq *wq; 76 struct device *dev; 77 struct idxd_cdev *idxd_cdev; 78 79 wq = inode_wq(inode); 80 idxd = wq->idxd; 81 dev = &idxd->pdev->dev; 82 idxd_cdev = &wq->idxd_cdev; 83 84 dev_dbg(dev, "%s called\n", __func__); 85 86 if (idxd_wq_refcount(wq) > 1 && wq_dedicated(wq)) 87 return -EBUSY; 88 89 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 90 if (!ctx) 91 return -ENOMEM; 92 93 ctx->wq = wq; 94 filp->private_data = ctx; 95 idxd_wq_get(wq); 96 return 0; 97} 98 99static int idxd_cdev_release(struct inode *node, struct file *filep) 100{ 101 struct idxd_user_context *ctx = filep->private_data; 102 struct idxd_wq *wq = ctx->wq; 103 struct idxd_device *idxd = wq->idxd; 104 struct device *dev = &idxd->pdev->dev; 105 106 dev_dbg(dev, "%s called\n", __func__); 107 filep->private_data = NULL; 108 109 kfree(ctx); 110 idxd_wq_put(wq); 111 return 0; 112} 113 114static int check_vma(struct idxd_wq *wq, struct vm_area_struct *vma, 115 const char *func) 116{ 117 struct device *dev = &wq->idxd->pdev->dev; 118 119 if ((vma->vm_end - vma->vm_start) > PAGE_SIZE) { 120 dev_info_ratelimited(dev, 121 "%s: %s: mapping too large: %lu\n", 122 current->comm, func, 123 vma->vm_end - vma->vm_start); 124 return -EINVAL; 125 } 126 127 return 0; 128} 129 130static int idxd_cdev_mmap(struct file *filp, struct vm_area_struct *vma) 131{ 132 struct idxd_user_context *ctx = filp->private_data; 133 struct idxd_wq *wq = ctx->wq; 134 struct idxd_device *idxd = wq->idxd; 135 struct pci_dev *pdev = idxd->pdev; 136 phys_addr_t base = pci_resource_start(pdev, IDXD_WQ_BAR); 137 unsigned long pfn; 138 int rc; 139 140 dev_dbg(&pdev->dev, "%s called\n", __func__); 141 rc = check_vma(wq, vma, __func__); 142 143 vma->vm_flags |= VM_DONTCOPY; 144 pfn = (base + idxd_get_wq_portal_full_offset(wq->id, 145 IDXD_PORTAL_LIMITED)) >> PAGE_SHIFT; 146 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 147 vma->vm_private_data = ctx; 148 149 return io_remap_pfn_range(vma, vma->vm_start, pfn, PAGE_SIZE, 150 vma->vm_page_prot); 151} 152 153static __poll_t idxd_cdev_poll(struct file *filp, 154 struct poll_table_struct *wait) 155{ 156 struct idxd_user_context *ctx = filp->private_data; 157 struct idxd_wq *wq = ctx->wq; 158 struct idxd_device *idxd = wq->idxd; 159 struct idxd_cdev *idxd_cdev = &wq->idxd_cdev; 160 unsigned long flags; 161 __poll_t out = 0; 162 163 poll_wait(filp, &idxd_cdev->err_queue, wait); 164 spin_lock_irqsave(&idxd->dev_lock, flags); 165 if (idxd->sw_err.valid) 166 out = EPOLLIN | EPOLLRDNORM; 167 spin_unlock_irqrestore(&idxd->dev_lock, flags); 168 169 return out; 170} 171 172static const struct file_operations idxd_cdev_fops = { 173 .owner = THIS_MODULE, 174 .open = idxd_cdev_open, 175 .release = idxd_cdev_release, 176 .mmap = idxd_cdev_mmap, 177 .poll = idxd_cdev_poll, 178}; 179 180int idxd_cdev_get_major(struct idxd_device *idxd) 181{ 182 return MAJOR(ictx[idxd->type].devt); 183} 184 185static int idxd_wq_cdev_dev_setup(struct idxd_wq *wq) 186{ 187 struct idxd_device *idxd = wq->idxd; 188 struct idxd_cdev *idxd_cdev = &wq->idxd_cdev; 189 struct idxd_cdev_context *cdev_ctx; 190 struct device *dev; 191 int minor, rc; 192 193 idxd_cdev->dev = kzalloc(sizeof(*idxd_cdev->dev), GFP_KERNEL); 194 if (!idxd_cdev->dev) 195 return -ENOMEM; 196 197 dev = idxd_cdev->dev; 198 dev->parent = &idxd->pdev->dev; 199 dev_set_name(dev, "%s/wq%u.%u", idxd_get_dev_name(idxd), 200 idxd->id, wq->id); 201 dev->bus = idxd_get_bus_type(idxd); 202 203 cdev_ctx = &ictx[wq->idxd->type]; 204 minor = ida_simple_get(&cdev_ctx->minor_ida, 0, MINORMASK, GFP_KERNEL); 205 if (minor < 0) { 206 rc = minor; 207 goto ida_err; 208 } 209 210 dev->devt = MKDEV(MAJOR(cdev_ctx->devt), minor); 211 dev->type = &idxd_cdev_device_type; 212 rc = device_register(dev); 213 if (rc < 0) { 214 dev_err(&idxd->pdev->dev, "device register failed\n"); 215 put_device(dev); 216 goto dev_reg_err; 217 } 218 idxd_cdev->minor = minor; 219 220 return 0; 221 222 dev_reg_err: 223 ida_simple_remove(&cdev_ctx->minor_ida, MINOR(dev->devt)); 224 ida_err: 225 kfree(dev); 226 idxd_cdev->dev = NULL; 227 return rc; 228} 229 230static void idxd_wq_cdev_cleanup(struct idxd_wq *wq, 231 enum idxd_cdev_cleanup cdev_state) 232{ 233 struct idxd_cdev *idxd_cdev = &wq->idxd_cdev; 234 struct idxd_cdev_context *cdev_ctx; 235 236 cdev_ctx = &ictx[wq->idxd->type]; 237 if (cdev_state == CDEV_NORMAL) 238 cdev_del(&idxd_cdev->cdev); 239 device_unregister(idxd_cdev->dev); 240 /* 241 * The device_type->release() will be called on the device and free 242 * the allocated struct device. We can just forget it. 243 */ 244 ida_simple_remove(&cdev_ctx->minor_ida, idxd_cdev->minor); 245 idxd_cdev->dev = NULL; 246 idxd_cdev->minor = -1; 247} 248 249int idxd_wq_add_cdev(struct idxd_wq *wq) 250{ 251 struct idxd_cdev *idxd_cdev = &wq->idxd_cdev; 252 struct cdev *cdev = &idxd_cdev->cdev; 253 struct device *dev; 254 int rc; 255 256 rc = idxd_wq_cdev_dev_setup(wq); 257 if (rc < 0) 258 return rc; 259 260 dev = idxd_cdev->dev; 261 cdev_init(cdev, &idxd_cdev_fops); 262 cdev_set_parent(cdev, &dev->kobj); 263 rc = cdev_add(cdev, dev->devt, 1); 264 if (rc) { 265 dev_dbg(&wq->idxd->pdev->dev, "cdev_add failed: %d\n", rc); 266 idxd_wq_cdev_cleanup(wq, CDEV_FAILED); 267 return rc; 268 } 269 270 init_waitqueue_head(&idxd_cdev->err_queue); 271 return 0; 272} 273 274void idxd_wq_del_cdev(struct idxd_wq *wq) 275{ 276 idxd_wq_cdev_cleanup(wq, CDEV_NORMAL); 277} 278 279int idxd_cdev_register(void) 280{ 281 int rc, i; 282 283 for (i = 0; i < IDXD_TYPE_MAX; i++) { 284 ida_init(&ictx[i].minor_ida); 285 rc = alloc_chrdev_region(&ictx[i].devt, 0, MINORMASK, 286 ictx[i].name); 287 if (rc) 288 return rc; 289 } 290 291 return 0; 292} 293 294void idxd_cdev_remove(void) 295{ 296 int i; 297 298 for (i = 0; i < IDXD_TYPE_MAX; i++) { 299 unregister_chrdev_region(ictx[i].devt, MINORMASK); 300 ida_destroy(&ictx[i].minor_ida); 301 } 302}