Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Copyright (c) 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Cisco Systems. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include <linux/mm.h>
36#include <linux/dma-mapping.h>
37#include <linux/sched/signal.h>
38#include <linux/sched/mm.h>
39#include <linux/export.h>
40#include <linux/hugetlb.h>
41#include <linux/slab.h>
42#include <rdma/ib_umem_odp.h>
43
44#include "uverbs.h"
45
46
47static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int dirty)
48{
49 struct scatterlist *sg;
50 struct page *page;
51 int i;
52
53 if (umem->nmap > 0)
54 ib_dma_unmap_sg(dev, umem->sg_head.sgl,
55 umem->npages,
56 DMA_BIDIRECTIONAL);
57
58 for_each_sg(umem->sg_head.sgl, sg, umem->npages, i) {
59
60 page = sg_page(sg);
61 if (!PageDirty(page) && umem->writable && dirty)
62 set_page_dirty_lock(page);
63 put_page(page);
64 }
65
66 sg_free_table(&umem->sg_head);
67}
68
69/**
70 * ib_umem_get - Pin and DMA map userspace memory.
71 *
72 * If access flags indicate ODP memory, avoid pinning. Instead, stores
73 * the mm for future page fault handling in conjunction with MMU notifiers.
74 *
75 * @udata: userspace context to pin memory for
76 * @addr: userspace virtual address to start at
77 * @size: length of region to pin
78 * @access: IB_ACCESS_xxx flags for memory being pinned
79 * @dmasync: flush in-flight DMA when the memory region is written
80 */
81struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr,
82 size_t size, int access, int dmasync)
83{
84 struct ib_ucontext *context;
85 struct ib_umem *umem;
86 struct page **page_list;
87 struct vm_area_struct **vma_list;
88 unsigned long lock_limit;
89 unsigned long new_pinned;
90 unsigned long cur_base;
91 struct mm_struct *mm;
92 unsigned long npages;
93 int ret;
94 int i;
95 unsigned long dma_attrs = 0;
96 struct scatterlist *sg, *sg_list_start;
97 unsigned int gup_flags = FOLL_WRITE;
98
99 if (!udata)
100 return ERR_PTR(-EIO);
101
102 context = container_of(udata, struct uverbs_attr_bundle, driver_udata)
103 ->context;
104 if (!context)
105 return ERR_PTR(-EIO);
106
107 if (dmasync)
108 dma_attrs |= DMA_ATTR_WRITE_BARRIER;
109
110 /*
111 * If the combination of the addr and size requested for this memory
112 * region causes an integer overflow, return error.
113 */
114 if (((addr + size) < addr) ||
115 PAGE_ALIGN(addr + size) < (addr + size))
116 return ERR_PTR(-EINVAL);
117
118 if (!can_do_mlock())
119 return ERR_PTR(-EPERM);
120
121 if (access & IB_ACCESS_ON_DEMAND) {
122 umem = kzalloc(sizeof(struct ib_umem_odp), GFP_KERNEL);
123 if (!umem)
124 return ERR_PTR(-ENOMEM);
125 umem->is_odp = 1;
126 } else {
127 umem = kzalloc(sizeof(*umem), GFP_KERNEL);
128 if (!umem)
129 return ERR_PTR(-ENOMEM);
130 }
131
132 umem->context = context;
133 umem->length = size;
134 umem->address = addr;
135 umem->page_shift = PAGE_SHIFT;
136 umem->writable = ib_access_writable(access);
137 umem->owning_mm = mm = current->mm;
138 mmgrab(mm);
139
140 if (access & IB_ACCESS_ON_DEMAND) {
141 ret = ib_umem_odp_get(to_ib_umem_odp(umem), access);
142 if (ret)
143 goto umem_kfree;
144 return umem;
145 }
146
147 /* We assume the memory is from hugetlb until proved otherwise */
148 umem->hugetlb = 1;
149
150 page_list = (struct page **) __get_free_page(GFP_KERNEL);
151 if (!page_list) {
152 ret = -ENOMEM;
153 goto umem_kfree;
154 }
155
156 /*
157 * if we can't alloc the vma_list, it's not so bad;
158 * just assume the memory is not hugetlb memory
159 */
160 vma_list = (struct vm_area_struct **) __get_free_page(GFP_KERNEL);
161 if (!vma_list)
162 umem->hugetlb = 0;
163
164 npages = ib_umem_num_pages(umem);
165 if (npages == 0 || npages > UINT_MAX) {
166 ret = -EINVAL;
167 goto out;
168 }
169
170 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
171
172 new_pinned = atomic64_add_return(npages, &mm->pinned_vm);
173 if (new_pinned > lock_limit && !capable(CAP_IPC_LOCK)) {
174 atomic64_sub(npages, &mm->pinned_vm);
175 ret = -ENOMEM;
176 goto out;
177 }
178
179 cur_base = addr & PAGE_MASK;
180
181 ret = sg_alloc_table(&umem->sg_head, npages, GFP_KERNEL);
182 if (ret)
183 goto vma;
184
185 if (!umem->writable)
186 gup_flags |= FOLL_FORCE;
187
188 sg_list_start = umem->sg_head.sgl;
189
190 while (npages) {
191 down_read(&mm->mmap_sem);
192 ret = get_user_pages_longterm(cur_base,
193 min_t(unsigned long, npages,
194 PAGE_SIZE / sizeof (struct page *)),
195 gup_flags, page_list, vma_list);
196 if (ret < 0) {
197 up_read(&mm->mmap_sem);
198 goto umem_release;
199 }
200
201 umem->npages += ret;
202 cur_base += ret * PAGE_SIZE;
203 npages -= ret;
204
205 /* Continue to hold the mmap_sem as vma_list access
206 * needs to be protected.
207 */
208 for_each_sg(sg_list_start, sg, ret, i) {
209 if (vma_list && !is_vm_hugetlb_page(vma_list[i]))
210 umem->hugetlb = 0;
211
212 sg_set_page(sg, page_list[i], PAGE_SIZE, 0);
213 }
214 up_read(&mm->mmap_sem);
215
216 /* preparing for next loop */
217 sg_list_start = sg;
218 }
219
220 umem->nmap = ib_dma_map_sg_attrs(context->device,
221 umem->sg_head.sgl,
222 umem->npages,
223 DMA_BIDIRECTIONAL,
224 dma_attrs);
225
226 if (!umem->nmap) {
227 ret = -ENOMEM;
228 goto umem_release;
229 }
230
231 ret = 0;
232 goto out;
233
234umem_release:
235 __ib_umem_release(context->device, umem, 0);
236vma:
237 atomic64_sub(ib_umem_num_pages(umem), &mm->pinned_vm);
238out:
239 if (vma_list)
240 free_page((unsigned long) vma_list);
241 free_page((unsigned long) page_list);
242umem_kfree:
243 if (ret) {
244 mmdrop(umem->owning_mm);
245 kfree(umem);
246 }
247 return ret ? ERR_PTR(ret) : umem;
248}
249EXPORT_SYMBOL(ib_umem_get);
250
251static void __ib_umem_release_tail(struct ib_umem *umem)
252{
253 mmdrop(umem->owning_mm);
254 if (umem->is_odp)
255 kfree(to_ib_umem_odp(umem));
256 else
257 kfree(umem);
258}
259
260/**
261 * ib_umem_release - release memory pinned with ib_umem_get
262 * @umem: umem struct to release
263 */
264void ib_umem_release(struct ib_umem *umem)
265{
266 if (umem->is_odp) {
267 ib_umem_odp_release(to_ib_umem_odp(umem));
268 __ib_umem_release_tail(umem);
269 return;
270 }
271
272 __ib_umem_release(umem->context->device, umem, 1);
273
274 atomic64_sub(ib_umem_num_pages(umem), &umem->owning_mm->pinned_vm);
275 __ib_umem_release_tail(umem);
276}
277EXPORT_SYMBOL(ib_umem_release);
278
279int ib_umem_page_count(struct ib_umem *umem)
280{
281 int i;
282 int n;
283 struct scatterlist *sg;
284
285 if (umem->is_odp)
286 return ib_umem_num_pages(umem);
287
288 n = 0;
289 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i)
290 n += sg_dma_len(sg) >> umem->page_shift;
291
292 return n;
293}
294EXPORT_SYMBOL(ib_umem_page_count);
295
296/*
297 * Copy from the given ib_umem's pages to the given buffer.
298 *
299 * umem - the umem to copy from
300 * offset - offset to start copying from
301 * dst - destination buffer
302 * length - buffer length
303 *
304 * Returns 0 on success, or an error code.
305 */
306int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
307 size_t length)
308{
309 size_t end = offset + length;
310 int ret;
311
312 if (offset > umem->length || length > umem->length - offset) {
313 pr_err("ib_umem_copy_from not in range. offset: %zd umem length: %zd end: %zd\n",
314 offset, umem->length, end);
315 return -EINVAL;
316 }
317
318 ret = sg_pcopy_to_buffer(umem->sg_head.sgl, umem->npages, dst, length,
319 offset + ib_umem_offset(umem));
320
321 if (ret < 0)
322 return ret;
323 else if (ret != length)
324 return -EINVAL;
325 else
326 return 0;
327}
328EXPORT_SYMBOL(ib_umem_copy_from);