at for-next 1.3 kB view raw
1#ifndef IO_URING_MEMMAP_H 2#define IO_URING_MEMMAP_H 3 4struct page **io_pin_pages(unsigned long ubuf, unsigned long len, int *npages); 5void io_pages_free(struct page ***pages, int npages); 6int io_uring_mmap_pages(struct io_ring_ctx *ctx, struct vm_area_struct *vma, 7 struct page **pages, int npages); 8 9void *io_pages_map(struct page ***out_pages, unsigned short *npages, 10 size_t size); 11void io_pages_unmap(void *ptr, struct page ***pages, unsigned short *npages, 12 bool put_pages); 13 14void *__io_uaddr_map(struct page ***pages, unsigned short *npages, 15 unsigned long uaddr, size_t size); 16 17#ifndef CONFIG_MMU 18unsigned int io_uring_nommu_mmap_capabilities(struct file *file); 19#endif 20unsigned long io_uring_get_unmapped_area(struct file *file, unsigned long addr, 21 unsigned long len, unsigned long pgoff, 22 unsigned long flags); 23int io_uring_mmap(struct file *file, struct vm_area_struct *vma); 24 25void io_free_region(struct io_ring_ctx *ctx, struct io_mapped_region *mr); 26int io_create_region(struct io_ring_ctx *ctx, struct io_mapped_region *mr, 27 struct io_uring_region_desc *reg); 28 29static inline void *io_region_get_ptr(struct io_mapped_region *mr) 30{ 31 return mr->vmap_ptr; 32} 33 34static inline bool io_region_is_set(struct io_mapped_region *mr) 35{ 36 return !!mr->nr_pages; 37} 38 39#endif