Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Copyright (c) 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Cisco Systems. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5 * Copyright (c) 2020 Intel Corporation. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 */
35
36#include <linux/mm.h>
37#include <linux/dma-mapping.h>
38#include <linux/sched/signal.h>
39#include <linux/sched/mm.h>
40#include <linux/export.h>
41#include <linux/slab.h>
42#include <linux/pagemap.h>
43#include <linux/count_zeros.h>
44#include <rdma/ib_umem_odp.h>
45
46#include "uverbs.h"
47
48static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int dirty)
49{
50 struct sg_page_iter sg_iter;
51 struct page *page;
52
53 if (umem->nmap > 0)
54 ib_dma_unmap_sg(dev, umem->sg_head.sgl, umem->sg_nents,
55 DMA_BIDIRECTIONAL);
56
57 for_each_sg_page(umem->sg_head.sgl, &sg_iter, umem->sg_nents, 0) {
58 page = sg_page_iter_page(&sg_iter);
59 unpin_user_pages_dirty_lock(&page, 1, umem->writable && dirty);
60 }
61
62 sg_free_table(&umem->sg_head);
63}
64
65/**
66 * ib_umem_find_best_pgsz - Find best HW page size to use for this MR
67 *
68 * @umem: umem struct
69 * @pgsz_bitmap: bitmap of HW supported page sizes
70 * @virt: IOVA
71 *
72 * This helper is intended for HW that support multiple page
73 * sizes but can do only a single page size in an MR.
74 *
75 * Returns 0 if the umem requires page sizes not supported by
76 * the driver to be mapped. Drivers always supporting PAGE_SIZE
77 * or smaller will never see a 0 result.
78 */
79unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
80 unsigned long pgsz_bitmap,
81 unsigned long virt)
82{
83 struct scatterlist *sg;
84 unsigned long va, pgoff;
85 dma_addr_t mask;
86 int i;
87
88 if (umem->is_odp) {
89 unsigned int page_size = BIT(to_ib_umem_odp(umem)->page_shift);
90
91 /* ODP must always be self consistent. */
92 if (!(pgsz_bitmap & page_size))
93 return 0;
94 return page_size;
95 }
96
97 /* rdma_for_each_block() has a bug if the page size is smaller than the
98 * page size used to build the umem. For now prevent smaller page sizes
99 * from being returned.
100 */
101 pgsz_bitmap &= GENMASK(BITS_PER_LONG - 1, PAGE_SHIFT);
102
103 /* At minimum, drivers must support PAGE_SIZE or smaller */
104 if (WARN_ON(!(pgsz_bitmap & GENMASK(PAGE_SHIFT, 0))))
105 return 0;
106
107 umem->iova = va = virt;
108 /* The best result is the smallest page size that results in the minimum
109 * number of required pages. Compute the largest page size that could
110 * work based on VA address bits that don't change.
111 */
112 mask = pgsz_bitmap &
113 GENMASK(BITS_PER_LONG - 1,
114 bits_per((umem->length - 1 + virt) ^ virt));
115 /* offset into first SGL */
116 pgoff = umem->address & ~PAGE_MASK;
117
118 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i) {
119 /* Walk SGL and reduce max page size if VA/PA bits differ
120 * for any address.
121 */
122 mask |= (sg_dma_address(sg) + pgoff) ^ va;
123 va += sg_dma_len(sg) - pgoff;
124 /* Except for the last entry, the ending iova alignment sets
125 * the maximum possible page size as the low bits of the iova
126 * must be zero when starting the next chunk.
127 */
128 if (i != (umem->nmap - 1))
129 mask |= va;
130 pgoff = 0;
131 }
132
133 /* The mask accumulates 1's in each position where the VA and physical
134 * address differ, thus the length of trailing 0 is the largest page
135 * size that can pass the VA through to the physical.
136 */
137 if (mask)
138 pgsz_bitmap &= GENMASK(count_trailing_zeros(mask), 0);
139 return pgsz_bitmap ? rounddown_pow_of_two(pgsz_bitmap) : 0;
140}
141EXPORT_SYMBOL(ib_umem_find_best_pgsz);
142
143/**
144 * ib_umem_get - Pin and DMA map userspace memory.
145 *
146 * @device: IB device to connect UMEM
147 * @addr: userspace virtual address to start at
148 * @size: length of region to pin
149 * @access: IB_ACCESS_xxx flags for memory being pinned
150 */
151struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr,
152 size_t size, int access)
153{
154 struct ib_umem *umem;
155 struct page **page_list;
156 unsigned long lock_limit;
157 unsigned long new_pinned;
158 unsigned long cur_base;
159 unsigned long dma_attr = 0;
160 struct mm_struct *mm;
161 unsigned long npages;
162 int ret;
163 struct scatterlist *sg = NULL;
164 unsigned int gup_flags = FOLL_WRITE;
165
166 /*
167 * If the combination of the addr and size requested for this memory
168 * region causes an integer overflow, return error.
169 */
170 if (((addr + size) < addr) ||
171 PAGE_ALIGN(addr + size) < (addr + size))
172 return ERR_PTR(-EINVAL);
173
174 if (!can_do_mlock())
175 return ERR_PTR(-EPERM);
176
177 if (access & IB_ACCESS_ON_DEMAND)
178 return ERR_PTR(-EOPNOTSUPP);
179
180 umem = kzalloc(sizeof(*umem), GFP_KERNEL);
181 if (!umem)
182 return ERR_PTR(-ENOMEM);
183 umem->ibdev = device;
184 umem->length = size;
185 umem->address = addr;
186 /*
187 * Drivers should call ib_umem_find_best_pgsz() to set the iova
188 * correctly.
189 */
190 umem->iova = addr;
191 umem->writable = ib_access_writable(access);
192 umem->owning_mm = mm = current->mm;
193 mmgrab(mm);
194
195 page_list = (struct page **) __get_free_page(GFP_KERNEL);
196 if (!page_list) {
197 ret = -ENOMEM;
198 goto umem_kfree;
199 }
200
201 npages = ib_umem_num_pages(umem);
202 if (npages == 0 || npages > UINT_MAX) {
203 ret = -EINVAL;
204 goto out;
205 }
206
207 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
208
209 new_pinned = atomic64_add_return(npages, &mm->pinned_vm);
210 if (new_pinned > lock_limit && !capable(CAP_IPC_LOCK)) {
211 atomic64_sub(npages, &mm->pinned_vm);
212 ret = -ENOMEM;
213 goto out;
214 }
215
216 cur_base = addr & PAGE_MASK;
217
218 if (!umem->writable)
219 gup_flags |= FOLL_FORCE;
220
221 while (npages) {
222 cond_resched();
223 ret = pin_user_pages_fast(cur_base,
224 min_t(unsigned long, npages,
225 PAGE_SIZE /
226 sizeof(struct page *)),
227 gup_flags | FOLL_LONGTERM, page_list);
228 if (ret < 0)
229 goto umem_release;
230
231 cur_base += ret * PAGE_SIZE;
232 npages -= ret;
233 sg = __sg_alloc_table_from_pages(&umem->sg_head, page_list, ret,
234 0, ret << PAGE_SHIFT,
235 ib_dma_max_seg_size(device), sg, npages,
236 GFP_KERNEL);
237 umem->sg_nents = umem->sg_head.nents;
238 if (IS_ERR(sg)) {
239 unpin_user_pages_dirty_lock(page_list, ret, 0);
240 ret = PTR_ERR(sg);
241 goto umem_release;
242 }
243 }
244
245 if (access & IB_ACCESS_RELAXED_ORDERING)
246 dma_attr |= DMA_ATTR_WEAK_ORDERING;
247
248 umem->nmap =
249 ib_dma_map_sg_attrs(device, umem->sg_head.sgl, umem->sg_nents,
250 DMA_BIDIRECTIONAL, dma_attr);
251
252 if (!umem->nmap) {
253 ret = -ENOMEM;
254 goto umem_release;
255 }
256
257 ret = 0;
258 goto out;
259
260umem_release:
261 __ib_umem_release(device, umem, 0);
262 atomic64_sub(ib_umem_num_pages(umem), &mm->pinned_vm);
263out:
264 free_page((unsigned long) page_list);
265umem_kfree:
266 if (ret) {
267 mmdrop(umem->owning_mm);
268 kfree(umem);
269 }
270 return ret ? ERR_PTR(ret) : umem;
271}
272EXPORT_SYMBOL(ib_umem_get);
273
274/**
275 * ib_umem_release - release memory pinned with ib_umem_get
276 * @umem: umem struct to release
277 */
278void ib_umem_release(struct ib_umem *umem)
279{
280 if (!umem)
281 return;
282 if (umem->is_dmabuf)
283 return ib_umem_dmabuf_release(to_ib_umem_dmabuf(umem));
284 if (umem->is_odp)
285 return ib_umem_odp_release(to_ib_umem_odp(umem));
286
287 __ib_umem_release(umem->ibdev, umem, 1);
288
289 atomic64_sub(ib_umem_num_pages(umem), &umem->owning_mm->pinned_vm);
290 mmdrop(umem->owning_mm);
291 kfree(umem);
292}
293EXPORT_SYMBOL(ib_umem_release);
294
295/*
296 * Copy from the given ib_umem's pages to the given buffer.
297 *
298 * umem - the umem to copy from
299 * offset - offset to start copying from
300 * dst - destination buffer
301 * length - buffer length
302 *
303 * Returns 0 on success, or an error code.
304 */
305int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
306 size_t length)
307{
308 size_t end = offset + length;
309 int ret;
310
311 if (offset > umem->length || length > umem->length - offset) {
312 pr_err("ib_umem_copy_from not in range. offset: %zd umem length: %zd end: %zd\n",
313 offset, umem->length, end);
314 return -EINVAL;
315 }
316
317 ret = sg_pcopy_to_buffer(umem->sg_head.sgl, umem->sg_nents, dst, length,
318 offset + ib_umem_offset(umem));
319
320 if (ret < 0)
321 return ret;
322 else if (ret != length)
323 return -EINVAL;
324 else
325 return 0;
326}
327EXPORT_SYMBOL(ib_umem_copy_from);