Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

fpga: dfl: afu: add DFL_FPGA_PORT_DMA_MAP/UNMAP ioctls support

DMA memory regions are required for Accelerated Function Unit (AFU) usage.
These two ioctls allow user space applications to map user memory regions
for dma, and unmap them after use. Iova is returned from driver to user
space application via DFL_FPGA_PORT_DMA_MAP ioctl. Application needs to
unmap it after use, otherwise, driver will unmap them in device file
release operation.

Each AFU has its own rb tree to keep track of its mapped DMA regions.

Ioctl interfaces:
* DFL_FPGA_PORT_DMA_MAP
Do the dma mapping per user_addr and length provided by user.
Return iova in provided struct dfl_fpga_port_dma_map.

* DFL_FPGA_PORT_DMA_UNMAP
Unmap the dma region per iova provided by user.

Signed-off-by: Tim Whisonant <tim.whisonant@intel.com>
Signed-off-by: Enno Luebbers <enno.luebbers@intel.com>
Signed-off-by: Shiva Rao <shiva.rao@intel.com>
Signed-off-by: Christopher Rauer <christopher.rauer@intel.com>
Signed-off-by: Xiao Guangrong <guangrong.xiao@linux.intel.com>
Signed-off-by: Wu Hao <hao.wu@intel.com>
Acked-by: Alan Tull <atull@kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

authored by

Wu Hao and committed by
Greg Kroah-Hartman
fa8dda1e 857a2622

+591 -3
+1 -1
drivers/fpga/Makefile
··· 38 38 obj-$(CONFIG_FPGA_DFL_AFU) += dfl-afu.o 39 39 40 40 dfl-fme-objs := dfl-fme-main.o dfl-fme-pr.o 41 - dfl-afu-objs := dfl-afu-main.o dfl-afu-region.o 41 + dfl-afu-objs := dfl-afu-main.o dfl-afu-region.o dfl-afu-dma-region.o 42 42 43 43 # Drivers for FPGAs which implement DFL 44 44 obj-$(CONFIG_FPGA_DFL_PCI) += dfl-pci.o
+463
drivers/fpga/dfl-afu-dma-region.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Driver for FPGA Accelerated Function Unit (AFU) DMA Region Management 4 + * 5 + * Copyright (C) 2017-2018 Intel Corporation, Inc. 6 + * 7 + * Authors: 8 + * Wu Hao <hao.wu@intel.com> 9 + * Xiao Guangrong <guangrong.xiao@linux.intel.com> 10 + */ 11 + 12 + #include <linux/dma-mapping.h> 13 + #include <linux/sched/signal.h> 14 + #include <linux/uaccess.h> 15 + 16 + #include "dfl-afu.h" 17 + 18 + static void put_all_pages(struct page **pages, int npages) 19 + { 20 + int i; 21 + 22 + for (i = 0; i < npages; i++) 23 + if (pages[i]) 24 + put_page(pages[i]); 25 + } 26 + 27 + void afu_dma_region_init(struct dfl_feature_platform_data *pdata) 28 + { 29 + struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata); 30 + 31 + afu->dma_regions = RB_ROOT; 32 + } 33 + 34 + /** 35 + * afu_dma_adjust_locked_vm - adjust locked memory 36 + * @dev: port device 37 + * @npages: number of pages 38 + * @incr: increase or decrease locked memory 39 + * 40 + * Increase or decrease the locked memory size with npages input. 41 + * 42 + * Return 0 on success. 43 + * Return -ENOMEM if locked memory size is over the limit and no CAP_IPC_LOCK. 44 + */ 45 + static int afu_dma_adjust_locked_vm(struct device *dev, long npages, bool incr) 46 + { 47 + unsigned long locked, lock_limit; 48 + int ret = 0; 49 + 50 + /* the task is exiting. */ 51 + if (!current->mm) 52 + return 0; 53 + 54 + down_write(&current->mm->mmap_sem); 55 + 56 + if (incr) { 57 + locked = current->mm->locked_vm + npages; 58 + lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; 59 + 60 + if (locked > lock_limit && !capable(CAP_IPC_LOCK)) 61 + ret = -ENOMEM; 62 + else 63 + current->mm->locked_vm += npages; 64 + } else { 65 + if (WARN_ON_ONCE(npages > current->mm->locked_vm)) 66 + npages = current->mm->locked_vm; 67 + current->mm->locked_vm -= npages; 68 + } 69 + 70 + dev_dbg(dev, "[%d] RLIMIT_MEMLOCK %c%ld %ld/%ld%s\n", current->pid, 71 + incr ? '+' : '-', npages << PAGE_SHIFT, 72 + current->mm->locked_vm << PAGE_SHIFT, rlimit(RLIMIT_MEMLOCK), 73 + ret ? "- execeeded" : ""); 74 + 75 + up_write(&current->mm->mmap_sem); 76 + 77 + return ret; 78 + } 79 + 80 + /** 81 + * afu_dma_pin_pages - pin pages of given dma memory region 82 + * @pdata: feature device platform data 83 + * @region: dma memory region to be pinned 84 + * 85 + * Pin all the pages of given dfl_afu_dma_region. 86 + * Return 0 for success or negative error code. 87 + */ 88 + static int afu_dma_pin_pages(struct dfl_feature_platform_data *pdata, 89 + struct dfl_afu_dma_region *region) 90 + { 91 + int npages = region->length >> PAGE_SHIFT; 92 + struct device *dev = &pdata->dev->dev; 93 + int ret, pinned; 94 + 95 + ret = afu_dma_adjust_locked_vm(dev, npages, true); 96 + if (ret) 97 + return ret; 98 + 99 + region->pages = kcalloc(npages, sizeof(struct page *), GFP_KERNEL); 100 + if (!region->pages) { 101 + ret = -ENOMEM; 102 + goto unlock_vm; 103 + } 104 + 105 + pinned = get_user_pages_fast(region->user_addr, npages, 1, 106 + region->pages); 107 + if (pinned < 0) { 108 + ret = pinned; 109 + goto put_pages; 110 + } else if (pinned != npages) { 111 + ret = -EFAULT; 112 + goto free_pages; 113 + } 114 + 115 + dev_dbg(dev, "%d pages pinned\n", pinned); 116 + 117 + return 0; 118 + 119 + put_pages: 120 + put_all_pages(region->pages, pinned); 121 + free_pages: 122 + kfree(region->pages); 123 + unlock_vm: 124 + afu_dma_adjust_locked_vm(dev, npages, false); 125 + return ret; 126 + } 127 + 128 + /** 129 + * afu_dma_unpin_pages - unpin pages of given dma memory region 130 + * @pdata: feature device platform data 131 + * @region: dma memory region to be unpinned 132 + * 133 + * Unpin all the pages of given dfl_afu_dma_region. 134 + * Return 0 for success or negative error code. 135 + */ 136 + static void afu_dma_unpin_pages(struct dfl_feature_platform_data *pdata, 137 + struct dfl_afu_dma_region *region) 138 + { 139 + long npages = region->length >> PAGE_SHIFT; 140 + struct device *dev = &pdata->dev->dev; 141 + 142 + put_all_pages(region->pages, npages); 143 + kfree(region->pages); 144 + afu_dma_adjust_locked_vm(dev, npages, false); 145 + 146 + dev_dbg(dev, "%ld pages unpinned\n", npages); 147 + } 148 + 149 + /** 150 + * afu_dma_check_continuous_pages - check if pages are continuous 151 + * @region: dma memory region 152 + * 153 + * Return true if pages of given dma memory region have continuous physical 154 + * address, otherwise return false. 155 + */ 156 + static bool afu_dma_check_continuous_pages(struct dfl_afu_dma_region *region) 157 + { 158 + int npages = region->length >> PAGE_SHIFT; 159 + int i; 160 + 161 + for (i = 0; i < npages - 1; i++) 162 + if (page_to_pfn(region->pages[i]) + 1 != 163 + page_to_pfn(region->pages[i + 1])) 164 + return false; 165 + 166 + return true; 167 + } 168 + 169 + /** 170 + * dma_region_check_iova - check if memory area is fully contained in the region 171 + * @region: dma memory region 172 + * @iova: address of the dma memory area 173 + * @size: size of the dma memory area 174 + * 175 + * Compare the dma memory area defined by @iova and @size with given dma region. 176 + * Return true if memory area is fully contained in the region, otherwise false. 177 + */ 178 + static bool dma_region_check_iova(struct dfl_afu_dma_region *region, 179 + u64 iova, u64 size) 180 + { 181 + if (!size && region->iova != iova) 182 + return false; 183 + 184 + return (region->iova <= iova) && 185 + (region->length + region->iova >= iova + size); 186 + } 187 + 188 + /** 189 + * afu_dma_region_add - add given dma region to rbtree 190 + * @pdata: feature device platform data 191 + * @region: dma region to be added 192 + * 193 + * Return 0 for success, -EEXIST if dma region has already been added. 194 + * 195 + * Needs to be called with pdata->lock heold. 196 + */ 197 + static int afu_dma_region_add(struct dfl_feature_platform_data *pdata, 198 + struct dfl_afu_dma_region *region) 199 + { 200 + struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata); 201 + struct rb_node **new, *parent = NULL; 202 + 203 + dev_dbg(&pdata->dev->dev, "add region (iova = %llx)\n", 204 + (unsigned long long)region->iova); 205 + 206 + new = &afu->dma_regions.rb_node; 207 + 208 + while (*new) { 209 + struct dfl_afu_dma_region *this; 210 + 211 + this = container_of(*new, struct dfl_afu_dma_region, node); 212 + 213 + parent = *new; 214 + 215 + if (dma_region_check_iova(this, region->iova, region->length)) 216 + return -EEXIST; 217 + 218 + if (region->iova < this->iova) 219 + new = &((*new)->rb_left); 220 + else if (region->iova > this->iova) 221 + new = &((*new)->rb_right); 222 + else 223 + return -EEXIST; 224 + } 225 + 226 + rb_link_node(&region->node, parent, new); 227 + rb_insert_color(&region->node, &afu->dma_regions); 228 + 229 + return 0; 230 + } 231 + 232 + /** 233 + * afu_dma_region_remove - remove given dma region from rbtree 234 + * @pdata: feature device platform data 235 + * @region: dma region to be removed 236 + * 237 + * Needs to be called with pdata->lock heold. 238 + */ 239 + static void afu_dma_region_remove(struct dfl_feature_platform_data *pdata, 240 + struct dfl_afu_dma_region *region) 241 + { 242 + struct dfl_afu *afu; 243 + 244 + dev_dbg(&pdata->dev->dev, "del region (iova = %llx)\n", 245 + (unsigned long long)region->iova); 246 + 247 + afu = dfl_fpga_pdata_get_private(pdata); 248 + rb_erase(&region->node, &afu->dma_regions); 249 + } 250 + 251 + /** 252 + * afu_dma_region_destroy - destroy all regions in rbtree 253 + * @pdata: feature device platform data 254 + * 255 + * Needs to be called with pdata->lock heold. 256 + */ 257 + void afu_dma_region_destroy(struct dfl_feature_platform_data *pdata) 258 + { 259 + struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata); 260 + struct rb_node *node = rb_first(&afu->dma_regions); 261 + struct dfl_afu_dma_region *region; 262 + 263 + while (node) { 264 + region = container_of(node, struct dfl_afu_dma_region, node); 265 + 266 + dev_dbg(&pdata->dev->dev, "del region (iova = %llx)\n", 267 + (unsigned long long)region->iova); 268 + 269 + rb_erase(node, &afu->dma_regions); 270 + 271 + if (region->iova) 272 + dma_unmap_page(dfl_fpga_pdata_to_parent(pdata), 273 + region->iova, region->length, 274 + DMA_BIDIRECTIONAL); 275 + 276 + if (region->pages) 277 + afu_dma_unpin_pages(pdata, region); 278 + 279 + node = rb_next(node); 280 + kfree(region); 281 + } 282 + } 283 + 284 + /** 285 + * afu_dma_region_find - find the dma region from rbtree based on iova and size 286 + * @pdata: feature device platform data 287 + * @iova: address of the dma memory area 288 + * @size: size of the dma memory area 289 + * 290 + * It finds the dma region from the rbtree based on @iova and @size: 291 + * - if @size == 0, it finds the dma region which starts from @iova 292 + * - otherwise, it finds the dma region which fully contains 293 + * [@iova, @iova+size) 294 + * If nothing is matched returns NULL. 295 + * 296 + * Needs to be called with pdata->lock held. 297 + */ 298 + struct dfl_afu_dma_region * 299 + afu_dma_region_find(struct dfl_feature_platform_data *pdata, u64 iova, u64 size) 300 + { 301 + struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata); 302 + struct rb_node *node = afu->dma_regions.rb_node; 303 + struct device *dev = &pdata->dev->dev; 304 + 305 + while (node) { 306 + struct dfl_afu_dma_region *region; 307 + 308 + region = container_of(node, struct dfl_afu_dma_region, node); 309 + 310 + if (dma_region_check_iova(region, iova, size)) { 311 + dev_dbg(dev, "find region (iova = %llx)\n", 312 + (unsigned long long)region->iova); 313 + return region; 314 + } 315 + 316 + if (iova < region->iova) 317 + node = node->rb_left; 318 + else if (iova > region->iova) 319 + node = node->rb_right; 320 + else 321 + /* the iova region is not fully covered. */ 322 + break; 323 + } 324 + 325 + dev_dbg(dev, "region with iova %llx and size %llx is not found\n", 326 + (unsigned long long)iova, (unsigned long long)size); 327 + 328 + return NULL; 329 + } 330 + 331 + /** 332 + * afu_dma_region_find_iova - find the dma region from rbtree by iova 333 + * @pdata: feature device platform data 334 + * @iova: address of the dma region 335 + * 336 + * Needs to be called with pdata->lock held. 337 + */ 338 + static struct dfl_afu_dma_region * 339 + afu_dma_region_find_iova(struct dfl_feature_platform_data *pdata, u64 iova) 340 + { 341 + return afu_dma_region_find(pdata, iova, 0); 342 + } 343 + 344 + /** 345 + * afu_dma_map_region - map memory region for dma 346 + * @pdata: feature device platform data 347 + * @user_addr: address of the memory region 348 + * @length: size of the memory region 349 + * @iova: pointer of iova address 350 + * 351 + * Map memory region defined by @user_addr and @length, and return dma address 352 + * of the memory region via @iova. 353 + * Return 0 for success, otherwise error code. 354 + */ 355 + int afu_dma_map_region(struct dfl_feature_platform_data *pdata, 356 + u64 user_addr, u64 length, u64 *iova) 357 + { 358 + struct dfl_afu_dma_region *region; 359 + int ret; 360 + 361 + /* 362 + * Check Inputs, only accept page-aligned user memory region with 363 + * valid length. 364 + */ 365 + if (!PAGE_ALIGNED(user_addr) || !PAGE_ALIGNED(length) || !length) 366 + return -EINVAL; 367 + 368 + /* Check overflow */ 369 + if (user_addr + length < user_addr) 370 + return -EINVAL; 371 + 372 + if (!access_ok(VERIFY_WRITE, (void __user *)(unsigned long)user_addr, 373 + length)) 374 + return -EINVAL; 375 + 376 + region = kzalloc(sizeof(*region), GFP_KERNEL); 377 + if (!region) 378 + return -ENOMEM; 379 + 380 + region->user_addr = user_addr; 381 + region->length = length; 382 + 383 + /* Pin the user memory region */ 384 + ret = afu_dma_pin_pages(pdata, region); 385 + if (ret) { 386 + dev_err(&pdata->dev->dev, "failed to pin memory region\n"); 387 + goto free_region; 388 + } 389 + 390 + /* Only accept continuous pages, return error else */ 391 + if (!afu_dma_check_continuous_pages(region)) { 392 + dev_err(&pdata->dev->dev, "pages are not continuous\n"); 393 + ret = -EINVAL; 394 + goto unpin_pages; 395 + } 396 + 397 + /* As pages are continuous then start to do DMA mapping */ 398 + region->iova = dma_map_page(dfl_fpga_pdata_to_parent(pdata), 399 + region->pages[0], 0, 400 + region->length, 401 + DMA_BIDIRECTIONAL); 402 + if (dma_mapping_error(&pdata->dev->dev, region->iova)) { 403 + dev_err(&pdata->dev->dev, "failed to map for dma\n"); 404 + ret = -EFAULT; 405 + goto unpin_pages; 406 + } 407 + 408 + *iova = region->iova; 409 + 410 + mutex_lock(&pdata->lock); 411 + ret = afu_dma_region_add(pdata, region); 412 + mutex_unlock(&pdata->lock); 413 + if (ret) { 414 + dev_err(&pdata->dev->dev, "failed to add dma region\n"); 415 + goto unmap_dma; 416 + } 417 + 418 + return 0; 419 + 420 + unmap_dma: 421 + dma_unmap_page(dfl_fpga_pdata_to_parent(pdata), 422 + region->iova, region->length, DMA_BIDIRECTIONAL); 423 + unpin_pages: 424 + afu_dma_unpin_pages(pdata, region); 425 + free_region: 426 + kfree(region); 427 + return ret; 428 + } 429 + 430 + /** 431 + * afu_dma_unmap_region - unmap dma memory region 432 + * @pdata: feature device platform data 433 + * @iova: dma address of the region 434 + * 435 + * Unmap dma memory region based on @iova. 436 + * Return 0 for success, otherwise error code. 437 + */ 438 + int afu_dma_unmap_region(struct dfl_feature_platform_data *pdata, u64 iova) 439 + { 440 + struct dfl_afu_dma_region *region; 441 + 442 + mutex_lock(&pdata->lock); 443 + region = afu_dma_region_find_iova(pdata, iova); 444 + if (!region) { 445 + mutex_unlock(&pdata->lock); 446 + return -EINVAL; 447 + } 448 + 449 + if (region->in_use) { 450 + mutex_unlock(&pdata->lock); 451 + return -EBUSY; 452 + } 453 + 454 + afu_dma_region_remove(pdata, region); 455 + mutex_unlock(&pdata->lock); 456 + 457 + dma_unmap_page(dfl_fpga_pdata_to_parent(pdata), 458 + region->iova, region->length, DMA_BIDIRECTIONAL); 459 + afu_dma_unpin_pages(pdata, region); 460 + kfree(region); 461 + 462 + return 0; 463 + }
+60 -1
drivers/fpga/dfl-afu-main.c
··· 293 293 294 294 pdata = dev_get_platdata(&pdev->dev); 295 295 296 - port_reset(pdev); 296 + mutex_lock(&pdata->lock); 297 + __port_reset(pdev); 298 + afu_dma_region_destroy(pdata); 299 + mutex_unlock(&pdata->lock); 300 + 297 301 dfl_feature_dev_use_end(pdata); 298 302 299 303 return 0; ··· 368 364 return 0; 369 365 } 370 366 367 + static long 368 + afu_ioctl_dma_map(struct dfl_feature_platform_data *pdata, void __user *arg) 369 + { 370 + struct dfl_fpga_port_dma_map map; 371 + unsigned long minsz; 372 + long ret; 373 + 374 + minsz = offsetofend(struct dfl_fpga_port_dma_map, iova); 375 + 376 + if (copy_from_user(&map, arg, minsz)) 377 + return -EFAULT; 378 + 379 + if (map.argsz < minsz || map.flags) 380 + return -EINVAL; 381 + 382 + ret = afu_dma_map_region(pdata, map.user_addr, map.length, &map.iova); 383 + if (ret) 384 + return ret; 385 + 386 + if (copy_to_user(arg, &map, sizeof(map))) { 387 + afu_dma_unmap_region(pdata, map.iova); 388 + return -EFAULT; 389 + } 390 + 391 + dev_dbg(&pdata->dev->dev, "dma map: ua=%llx, len=%llx, iova=%llx\n", 392 + (unsigned long long)map.user_addr, 393 + (unsigned long long)map.length, 394 + (unsigned long long)map.iova); 395 + 396 + return 0; 397 + } 398 + 399 + static long 400 + afu_ioctl_dma_unmap(struct dfl_feature_platform_data *pdata, void __user *arg) 401 + { 402 + struct dfl_fpga_port_dma_unmap unmap; 403 + unsigned long minsz; 404 + 405 + minsz = offsetofend(struct dfl_fpga_port_dma_unmap, iova); 406 + 407 + if (copy_from_user(&unmap, arg, minsz)) 408 + return -EFAULT; 409 + 410 + if (unmap.argsz < minsz || unmap.flags) 411 + return -EINVAL; 412 + 413 + return afu_dma_unmap_region(pdata, unmap.iova); 414 + } 415 + 371 416 static long afu_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 372 417 { 373 418 struct platform_device *pdev = filp->private_data; ··· 437 384 return afu_ioctl_get_info(pdata, (void __user *)arg); 438 385 case DFL_FPGA_PORT_GET_REGION_INFO: 439 386 return afu_ioctl_get_region_info(pdata, (void __user *)arg); 387 + case DFL_FPGA_PORT_DMA_MAP: 388 + return afu_ioctl_dma_map(pdata, (void __user *)arg); 389 + case DFL_FPGA_PORT_DMA_UNMAP: 390 + return afu_ioctl_dma_unmap(pdata, (void __user *)arg); 440 391 default: 441 392 /* 442 393 * Let sub-feature's ioctl function to handle the cmd ··· 517 460 mutex_lock(&pdata->lock); 518 461 dfl_fpga_pdata_set_private(pdata, afu); 519 462 afu_mmio_region_init(pdata); 463 + afu_dma_region_init(pdata); 520 464 mutex_unlock(&pdata->lock); 521 465 522 466 return 0; ··· 531 473 mutex_lock(&pdata->lock); 532 474 afu = dfl_fpga_pdata_get_private(pdata); 533 475 afu_mmio_region_destroy(pdata); 476 + afu_dma_region_destroy(pdata); 534 477 dfl_fpga_pdata_set_private(pdata, NULL); 535 478 mutex_unlock(&pdata->lock); 536 479
+30 -1
drivers/fpga/dfl-afu.h
··· 41 41 }; 42 42 43 43 /** 44 + * struct fpga_afu_dma_region - afu DMA region data structure 45 + * 46 + * @user_addr: region userspace virtual address. 47 + * @length: region length. 48 + * @iova: region IO virtual address. 49 + * @pages: ptr to pages of this region. 50 + * @node: rb tree node. 51 + * @in_use: flag to indicate if this region is in_use. 52 + */ 53 + struct dfl_afu_dma_region { 54 + u64 user_addr; 55 + u64 length; 56 + u64 iova; 57 + struct page **pages; 58 + struct rb_node node; 59 + bool in_use; 60 + }; 61 + 62 + /** 44 63 * struct dfl_afu - afu device data structure 45 64 * 46 65 * @region_cur_offset: current region offset from start to the device fd. 47 66 * @num_regions: num of mmio regions. 48 67 * @regions: the mmio region linked list of this afu feature device. 68 + * @dma_regions: root of dma regions rb tree. 49 69 * @num_umsgs: num of umsgs. 50 70 * @pdata: afu platform device's pdata. 51 71 */ ··· 74 54 int num_regions; 75 55 u8 num_umsgs; 76 56 struct list_head regions; 57 + struct rb_root dma_regions; 77 58 78 59 struct dfl_feature_platform_data *pdata; 79 60 }; ··· 89 68 int afu_mmio_region_get_by_offset(struct dfl_feature_platform_data *pdata, 90 69 u64 offset, u64 size, 91 70 struct dfl_afu_mmio_region *pregion); 92 - #endif 71 + void afu_dma_region_init(struct dfl_feature_platform_data *pdata); 72 + void afu_dma_region_destroy(struct dfl_feature_platform_data *pdata); 73 + int afu_dma_map_region(struct dfl_feature_platform_data *pdata, 74 + u64 user_addr, u64 length, u64 *iova); 75 + int afu_dma_unmap_region(struct dfl_feature_platform_data *pdata, u64 iova); 76 + struct dfl_afu_dma_region * 77 + afu_dma_region_find(struct dfl_feature_platform_data *pdata, 78 + u64 iova, u64 size); 79 + #endif /* __DFL_AFU_H */
+37
include/uapi/linux/fpga-dfl.h
··· 114 114 115 115 #define DFL_FPGA_PORT_GET_REGION_INFO _IO(DFL_FPGA_MAGIC, DFL_PORT_BASE + 2) 116 116 117 + /** 118 + * DFL_FPGA_PORT_DMA_MAP - _IOWR(DFL_FPGA_MAGIC, DFL_PORT_BASE + 3, 119 + * struct dfl_fpga_port_dma_map) 120 + * 121 + * Map the dma memory per user_addr and length which are provided by caller. 122 + * Driver fills the iova in provided struct afu_port_dma_map. 123 + * This interface only accepts page-size aligned user memory for dma mapping. 124 + * Return: 0 on success, -errno on failure. 125 + */ 126 + struct dfl_fpga_port_dma_map { 127 + /* Input */ 128 + __u32 argsz; /* Structure length */ 129 + __u32 flags; /* Zero for now */ 130 + __u64 user_addr; /* Process virtual address */ 131 + __u64 length; /* Length of mapping (bytes)*/ 132 + /* Output */ 133 + __u64 iova; /* IO virtual address */ 134 + }; 135 + 136 + #define DFL_FPGA_PORT_DMA_MAP _IO(DFL_FPGA_MAGIC, DFL_PORT_BASE + 3) 137 + 138 + /** 139 + * DFL_FPGA_PORT_DMA_UNMAP - _IOW(FPGA_MAGIC, PORT_BASE + 4, 140 + * struct dfl_fpga_port_dma_unmap) 141 + * 142 + * Unmap the dma memory per iova provided by caller. 143 + * Return: 0 on success, -errno on failure. 144 + */ 145 + struct dfl_fpga_port_dma_unmap { 146 + /* Input */ 147 + __u32 argsz; /* Structure length */ 148 + __u32 flags; /* Zero for now */ 149 + __u64 iova; /* IO virtual address */ 150 + }; 151 + 152 + #define DFL_FPGA_PORT_DMA_UNMAP _IO(DFL_FPGA_MAGIC, DFL_PORT_BASE + 4) 153 + 117 154 /* IOCTLs for FME file descriptor */ 118 155 119 156 /**