Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2016 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
5 */
6
7#include "msm_drv.h"
8#include "msm_fence.h"
9#include "msm_gem.h"
10#include "msm_mmu.h"
11
12static void
13msm_gem_address_space_destroy(struct kref *kref)
14{
15 struct msm_gem_address_space *aspace = container_of(kref,
16 struct msm_gem_address_space, kref);
17
18 drm_mm_takedown(&aspace->mm);
19 if (aspace->mmu)
20 aspace->mmu->funcs->destroy(aspace->mmu);
21 put_pid(aspace->pid);
22 kfree(aspace);
23}
24
25
26void msm_gem_address_space_put(struct msm_gem_address_space *aspace)
27{
28 if (aspace)
29 kref_put(&aspace->kref, msm_gem_address_space_destroy);
30}
31
32struct msm_gem_address_space *
33msm_gem_address_space_get(struct msm_gem_address_space *aspace)
34{
35 if (!IS_ERR_OR_NULL(aspace))
36 kref_get(&aspace->kref);
37
38 return aspace;
39}
40
41bool msm_gem_vma_inuse(struct msm_gem_vma *vma)
42{
43 bool ret = true;
44
45 spin_lock(&vma->lock);
46
47 if (vma->inuse > 0)
48 goto out;
49
50 while (vma->fence_mask) {
51 unsigned idx = ffs(vma->fence_mask) - 1;
52
53 if (!msm_fence_completed(vma->fctx[idx], vma->fence[idx]))
54 goto out;
55
56 vma->fence_mask &= ~BIT(idx);
57 }
58
59 ret = false;
60
61out:
62 spin_unlock(&vma->lock);
63
64 return ret;
65}
66
67/* Actually unmap memory for the vma */
68void msm_gem_vma_purge(struct msm_gem_vma *vma)
69{
70 struct msm_gem_address_space *aspace = vma->aspace;
71 unsigned size = vma->node.size;
72
73 /* Print a message if we try to purge a vma in use */
74 GEM_WARN_ON(msm_gem_vma_inuse(vma));
75
76 /* Don't do anything if the memory isn't mapped */
77 if (!vma->mapped)
78 return;
79
80 aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, size);
81
82 vma->mapped = false;
83}
84
85static void vma_unpin_locked(struct msm_gem_vma *vma)
86{
87 if (GEM_WARN_ON(!vma->inuse))
88 return;
89 if (!GEM_WARN_ON(!vma->iova))
90 vma->inuse--;
91}
92
93/* Remove reference counts for the mapping */
94void msm_gem_vma_unpin(struct msm_gem_vma *vma)
95{
96 spin_lock(&vma->lock);
97 vma_unpin_locked(vma);
98 spin_unlock(&vma->lock);
99}
100
101/* Replace pin reference with fence: */
102void msm_gem_vma_unpin_fenced(struct msm_gem_vma *vma, struct msm_fence_context *fctx)
103{
104 spin_lock(&vma->lock);
105 vma->fctx[fctx->index] = fctx;
106 vma->fence[fctx->index] = fctx->last_fence;
107 vma->fence_mask |= BIT(fctx->index);
108 vma_unpin_locked(vma);
109 spin_unlock(&vma->lock);
110}
111
112/* Map and pin vma: */
113int
114msm_gem_vma_map(struct msm_gem_vma *vma, int prot,
115 struct sg_table *sgt, int size)
116{
117 struct msm_gem_address_space *aspace = vma->aspace;
118 int ret;
119
120 if (GEM_WARN_ON(!vma->iova))
121 return -EINVAL;
122
123 /* Increase the usage counter */
124 spin_lock(&vma->lock);
125 vma->inuse++;
126 spin_unlock(&vma->lock);
127
128 if (vma->mapped)
129 return 0;
130
131 vma->mapped = true;
132
133 if (!aspace)
134 return 0;
135
136 /*
137 * NOTE: iommu/io-pgtable can allocate pages, so we cannot hold
138 * a lock across map/unmap which is also used in the job_run()
139 * path, as this can cause deadlock in job_run() vs shrinker/
140 * reclaim.
141 *
142 * Revisit this if we can come up with a scheme to pre-alloc pages
143 * for the pgtable in map/unmap ops.
144 */
145 ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt, size, prot);
146
147 if (ret) {
148 vma->mapped = false;
149 spin_lock(&vma->lock);
150 vma->inuse--;
151 spin_unlock(&vma->lock);
152 }
153
154 return ret;
155}
156
157/* Close an iova. Warn if it is still in use */
158void msm_gem_vma_close(struct msm_gem_vma *vma)
159{
160 struct msm_gem_address_space *aspace = vma->aspace;
161
162 GEM_WARN_ON(msm_gem_vma_inuse(vma) || vma->mapped);
163
164 spin_lock(&aspace->lock);
165 if (vma->iova)
166 drm_mm_remove_node(&vma->node);
167 spin_unlock(&aspace->lock);
168
169 vma->iova = 0;
170
171 msm_gem_address_space_put(aspace);
172}
173
174struct msm_gem_vma *msm_gem_vma_new(struct msm_gem_address_space *aspace)
175{
176 struct msm_gem_vma *vma;
177
178 vma = kzalloc(sizeof(*vma), GFP_KERNEL);
179 if (!vma)
180 return NULL;
181
182 spin_lock_init(&vma->lock);
183 vma->aspace = aspace;
184
185 return vma;
186}
187
188/* Initialize a new vma and allocate an iova for it */
189int msm_gem_vma_init(struct msm_gem_vma *vma, int size,
190 u64 range_start, u64 range_end)
191{
192 struct msm_gem_address_space *aspace = vma->aspace;
193 int ret;
194
195 if (GEM_WARN_ON(!aspace))
196 return -EINVAL;
197
198 if (GEM_WARN_ON(vma->iova))
199 return -EBUSY;
200
201 spin_lock(&aspace->lock);
202 ret = drm_mm_insert_node_in_range(&aspace->mm, &vma->node,
203 size, PAGE_SIZE, 0,
204 range_start, range_end, 0);
205 spin_unlock(&aspace->lock);
206
207 if (ret)
208 return ret;
209
210 vma->iova = vma->node.start;
211 vma->mapped = false;
212
213 kref_get(&aspace->kref);
214
215 return 0;
216}
217
218struct msm_gem_address_space *
219msm_gem_address_space_create(struct msm_mmu *mmu, const char *name,
220 u64 va_start, u64 size)
221{
222 struct msm_gem_address_space *aspace;
223
224 if (IS_ERR(mmu))
225 return ERR_CAST(mmu);
226
227 aspace = kzalloc(sizeof(*aspace), GFP_KERNEL);
228 if (!aspace)
229 return ERR_PTR(-ENOMEM);
230
231 spin_lock_init(&aspace->lock);
232 aspace->name = name;
233 aspace->mmu = mmu;
234 aspace->va_start = va_start;
235 aspace->va_size = size;
236
237 drm_mm_init(&aspace->mm, va_start, size);
238
239 kref_init(&aspace->kref);
240
241 return aspace;
242}