Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
4 */
5#include <linux/memremap.h>
6#include <linux/rculist.h>
7#include <linux/export.h>
8#include <linux/ioport.h>
9#include <linux/module.h>
10#include <linux/types.h>
11#include <linux/pfn_t.h>
12#include <linux/acpi.h>
13#include <linux/io.h>
14#include <linux/mm.h>
15#include "nfit_test.h"
16
17static LIST_HEAD(iomap_head);
18
19static struct iomap_ops {
20 nfit_test_lookup_fn nfit_test_lookup;
21 nfit_test_evaluate_dsm_fn evaluate_dsm;
22 struct list_head list;
23} iomap_ops = {
24 .list = LIST_HEAD_INIT(iomap_ops.list),
25};
26
27void nfit_test_setup(nfit_test_lookup_fn lookup,
28 nfit_test_evaluate_dsm_fn evaluate)
29{
30 iomap_ops.nfit_test_lookup = lookup;
31 iomap_ops.evaluate_dsm = evaluate;
32 list_add_rcu(&iomap_ops.list, &iomap_head);
33}
34EXPORT_SYMBOL(nfit_test_setup);
35
36void nfit_test_teardown(void)
37{
38 list_del_rcu(&iomap_ops.list);
39 synchronize_rcu();
40}
41EXPORT_SYMBOL(nfit_test_teardown);
42
43static struct nfit_test_resource *__get_nfit_res(resource_size_t resource)
44{
45 struct iomap_ops *ops;
46
47 ops = list_first_or_null_rcu(&iomap_head, typeof(*ops), list);
48 if (ops)
49 return ops->nfit_test_lookup(resource);
50 return NULL;
51}
52
53struct nfit_test_resource *get_nfit_res(resource_size_t resource)
54{
55 struct nfit_test_resource *res;
56
57 rcu_read_lock();
58 res = __get_nfit_res(resource);
59 rcu_read_unlock();
60
61 return res;
62}
63EXPORT_SYMBOL(get_nfit_res);
64
65void __iomem *__nfit_test_ioremap(resource_size_t offset, unsigned long size,
66 void __iomem *(*fallback_fn)(resource_size_t, unsigned long))
67{
68 struct nfit_test_resource *nfit_res = get_nfit_res(offset);
69
70 if (nfit_res)
71 return (void __iomem *) nfit_res->buf + offset
72 - nfit_res->res.start;
73 return fallback_fn(offset, size);
74}
75
76void __iomem *__wrap_devm_ioremap_nocache(struct device *dev,
77 resource_size_t offset, unsigned long size)
78{
79 struct nfit_test_resource *nfit_res = get_nfit_res(offset);
80
81 if (nfit_res)
82 return (void __iomem *) nfit_res->buf + offset
83 - nfit_res->res.start;
84 return devm_ioremap_nocache(dev, offset, size);
85}
86EXPORT_SYMBOL(__wrap_devm_ioremap_nocache);
87
88void *__wrap_devm_memremap(struct device *dev, resource_size_t offset,
89 size_t size, unsigned long flags)
90{
91 struct nfit_test_resource *nfit_res = get_nfit_res(offset);
92
93 if (nfit_res)
94 return nfit_res->buf + offset - nfit_res->res.start;
95 return devm_memremap(dev, offset, size, flags);
96}
97EXPORT_SYMBOL(__wrap_devm_memremap);
98
99static void nfit_test_kill(void *_pgmap)
100{
101 struct dev_pagemap *pgmap = _pgmap;
102
103 WARN_ON(!pgmap || !pgmap->ref);
104
105 if (pgmap->ops && pgmap->ops->kill)
106 pgmap->ops->kill(pgmap);
107 else
108 percpu_ref_kill(pgmap->ref);
109
110 if (pgmap->ops && pgmap->ops->cleanup) {
111 pgmap->ops->cleanup(pgmap);
112 } else {
113 wait_for_completion(&pgmap->done);
114 percpu_ref_exit(pgmap->ref);
115 }
116}
117
118static void dev_pagemap_percpu_release(struct percpu_ref *ref)
119{
120 struct dev_pagemap *pgmap =
121 container_of(ref, struct dev_pagemap, internal_ref);
122
123 complete(&pgmap->done);
124}
125
126void *__wrap_devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
127{
128 int error;
129 resource_size_t offset = pgmap->res.start;
130 struct nfit_test_resource *nfit_res = get_nfit_res(offset);
131
132 if (!nfit_res)
133 return devm_memremap_pages(dev, pgmap);
134
135 if (!pgmap->ref) {
136 if (pgmap->ops && (pgmap->ops->kill || pgmap->ops->cleanup))
137 return ERR_PTR(-EINVAL);
138
139 init_completion(&pgmap->done);
140 error = percpu_ref_init(&pgmap->internal_ref,
141 dev_pagemap_percpu_release, 0, GFP_KERNEL);
142 if (error)
143 return ERR_PTR(error);
144 pgmap->ref = &pgmap->internal_ref;
145 } else {
146 if (!pgmap->ops || !pgmap->ops->kill || !pgmap->ops->cleanup) {
147 WARN(1, "Missing reference count teardown definition\n");
148 return ERR_PTR(-EINVAL);
149 }
150 }
151
152 error = devm_add_action_or_reset(dev, nfit_test_kill, pgmap);
153 if (error)
154 return ERR_PTR(error);
155 return nfit_res->buf + offset - nfit_res->res.start;
156}
157EXPORT_SYMBOL_GPL(__wrap_devm_memremap_pages);
158
159pfn_t __wrap_phys_to_pfn_t(phys_addr_t addr, unsigned long flags)
160{
161 struct nfit_test_resource *nfit_res = get_nfit_res(addr);
162
163 if (nfit_res)
164 flags &= ~PFN_MAP;
165 return phys_to_pfn_t(addr, flags);
166}
167EXPORT_SYMBOL(__wrap_phys_to_pfn_t);
168
169void *__wrap_memremap(resource_size_t offset, size_t size,
170 unsigned long flags)
171{
172 struct nfit_test_resource *nfit_res = get_nfit_res(offset);
173
174 if (nfit_res)
175 return nfit_res->buf + offset - nfit_res->res.start;
176 return memremap(offset, size, flags);
177}
178EXPORT_SYMBOL(__wrap_memremap);
179
180void __wrap_devm_memunmap(struct device *dev, void *addr)
181{
182 struct nfit_test_resource *nfit_res = get_nfit_res((long) addr);
183
184 if (nfit_res)
185 return;
186 return devm_memunmap(dev, addr);
187}
188EXPORT_SYMBOL(__wrap_devm_memunmap);
189
190void __iomem *__wrap_ioremap_nocache(resource_size_t offset, unsigned long size)
191{
192 return __nfit_test_ioremap(offset, size, ioremap_nocache);
193}
194EXPORT_SYMBOL(__wrap_ioremap_nocache);
195
196void __iomem *__wrap_ioremap(resource_size_t offset, unsigned long size)
197{
198 return __nfit_test_ioremap(offset, size, ioremap);
199}
200EXPORT_SYMBOL(__wrap_ioremap);
201
202void __iomem *__wrap_ioremap_wc(resource_size_t offset, unsigned long size)
203{
204 return __nfit_test_ioremap(offset, size, ioremap_wc);
205}
206EXPORT_SYMBOL(__wrap_ioremap_wc);
207
208void __wrap_iounmap(volatile void __iomem *addr)
209{
210 struct nfit_test_resource *nfit_res = get_nfit_res((long) addr);
211 if (nfit_res)
212 return;
213 return iounmap(addr);
214}
215EXPORT_SYMBOL(__wrap_iounmap);
216
217void __wrap_memunmap(void *addr)
218{
219 struct nfit_test_resource *nfit_res = get_nfit_res((long) addr);
220
221 if (nfit_res)
222 return;
223 return memunmap(addr);
224}
225EXPORT_SYMBOL(__wrap_memunmap);
226
227static bool nfit_test_release_region(struct device *dev,
228 struct resource *parent, resource_size_t start,
229 resource_size_t n);
230
231static void nfit_devres_release(struct device *dev, void *data)
232{
233 struct resource *res = *((struct resource **) data);
234
235 WARN_ON(!nfit_test_release_region(NULL, &iomem_resource, res->start,
236 resource_size(res)));
237}
238
239static int match(struct device *dev, void *__res, void *match_data)
240{
241 struct resource *res = *((struct resource **) __res);
242 resource_size_t start = *((resource_size_t *) match_data);
243
244 return res->start == start;
245}
246
247static bool nfit_test_release_region(struct device *dev,
248 struct resource *parent, resource_size_t start,
249 resource_size_t n)
250{
251 if (parent == &iomem_resource) {
252 struct nfit_test_resource *nfit_res = get_nfit_res(start);
253
254 if (nfit_res) {
255 struct nfit_test_request *req;
256 struct resource *res = NULL;
257
258 if (dev) {
259 devres_release(dev, nfit_devres_release, match,
260 &start);
261 return true;
262 }
263
264 spin_lock(&nfit_res->lock);
265 list_for_each_entry(req, &nfit_res->requests, list)
266 if (req->res.start == start) {
267 res = &req->res;
268 list_del(&req->list);
269 break;
270 }
271 spin_unlock(&nfit_res->lock);
272
273 WARN(!res || resource_size(res) != n,
274 "%s: start: %llx n: %llx mismatch: %pr\n",
275 __func__, start, n, res);
276 if (res)
277 kfree(req);
278 return true;
279 }
280 }
281 return false;
282}
283
284static struct resource *nfit_test_request_region(struct device *dev,
285 struct resource *parent, resource_size_t start,
286 resource_size_t n, const char *name, int flags)
287{
288 struct nfit_test_resource *nfit_res;
289
290 if (parent == &iomem_resource) {
291 nfit_res = get_nfit_res(start);
292 if (nfit_res) {
293 struct nfit_test_request *req;
294 struct resource *res = NULL;
295
296 if (start + n > nfit_res->res.start
297 + resource_size(&nfit_res->res)) {
298 pr_debug("%s: start: %llx n: %llx overflow: %pr\n",
299 __func__, start, n,
300 &nfit_res->res);
301 return NULL;
302 }
303
304 spin_lock(&nfit_res->lock);
305 list_for_each_entry(req, &nfit_res->requests, list)
306 if (start == req->res.start) {
307 res = &req->res;
308 break;
309 }
310 spin_unlock(&nfit_res->lock);
311
312 if (res) {
313 WARN(1, "%pr already busy\n", res);
314 return NULL;
315 }
316
317 req = kzalloc(sizeof(*req), GFP_KERNEL);
318 if (!req)
319 return NULL;
320 INIT_LIST_HEAD(&req->list);
321 res = &req->res;
322
323 res->start = start;
324 res->end = start + n - 1;
325 res->name = name;
326 res->flags = resource_type(parent);
327 res->flags |= IORESOURCE_BUSY | flags;
328 spin_lock(&nfit_res->lock);
329 list_add(&req->list, &nfit_res->requests);
330 spin_unlock(&nfit_res->lock);
331
332 if (dev) {
333 struct resource **d;
334
335 d = devres_alloc(nfit_devres_release,
336 sizeof(struct resource *),
337 GFP_KERNEL);
338 if (!d)
339 return NULL;
340 *d = res;
341 devres_add(dev, d);
342 }
343
344 pr_debug("%s: %pr\n", __func__, res);
345 return res;
346 }
347 }
348 if (dev)
349 return __devm_request_region(dev, parent, start, n, name);
350 return __request_region(parent, start, n, name, flags);
351}
352
353struct resource *__wrap___request_region(struct resource *parent,
354 resource_size_t start, resource_size_t n, const char *name,
355 int flags)
356{
357 return nfit_test_request_region(NULL, parent, start, n, name, flags);
358}
359EXPORT_SYMBOL(__wrap___request_region);
360
361int __wrap_insert_resource(struct resource *parent, struct resource *res)
362{
363 if (get_nfit_res(res->start))
364 return 0;
365 return insert_resource(parent, res);
366}
367EXPORT_SYMBOL(__wrap_insert_resource);
368
369int __wrap_remove_resource(struct resource *res)
370{
371 if (get_nfit_res(res->start))
372 return 0;
373 return remove_resource(res);
374}
375EXPORT_SYMBOL(__wrap_remove_resource);
376
377struct resource *__wrap___devm_request_region(struct device *dev,
378 struct resource *parent, resource_size_t start,
379 resource_size_t n, const char *name)
380{
381 if (!dev)
382 return NULL;
383 return nfit_test_request_region(dev, parent, start, n, name, 0);
384}
385EXPORT_SYMBOL(__wrap___devm_request_region);
386
387void __wrap___release_region(struct resource *parent, resource_size_t start,
388 resource_size_t n)
389{
390 if (!nfit_test_release_region(NULL, parent, start, n))
391 __release_region(parent, start, n);
392}
393EXPORT_SYMBOL(__wrap___release_region);
394
395void __wrap___devm_release_region(struct device *dev, struct resource *parent,
396 resource_size_t start, resource_size_t n)
397{
398 if (!nfit_test_release_region(dev, parent, start, n))
399 __devm_release_region(dev, parent, start, n);
400}
401EXPORT_SYMBOL(__wrap___devm_release_region);
402
403acpi_status __wrap_acpi_evaluate_object(acpi_handle handle, acpi_string path,
404 struct acpi_object_list *p, struct acpi_buffer *buf)
405{
406 struct nfit_test_resource *nfit_res = get_nfit_res((long) handle);
407 union acpi_object **obj;
408
409 if (!nfit_res || strcmp(path, "_FIT") || !buf)
410 return acpi_evaluate_object(handle, path, p, buf);
411
412 obj = nfit_res->buf;
413 buf->length = sizeof(union acpi_object);
414 buf->pointer = *obj;
415 return AE_OK;
416}
417EXPORT_SYMBOL(__wrap_acpi_evaluate_object);
418
419union acpi_object * __wrap_acpi_evaluate_dsm(acpi_handle handle, const guid_t *guid,
420 u64 rev, u64 func, union acpi_object *argv4)
421{
422 union acpi_object *obj = ERR_PTR(-ENXIO);
423 struct iomap_ops *ops;
424
425 rcu_read_lock();
426 ops = list_first_or_null_rcu(&iomap_head, typeof(*ops), list);
427 if (ops)
428 obj = ops->evaluate_dsm(handle, guid, rev, func, argv4);
429 rcu_read_unlock();
430
431 if (IS_ERR(obj))
432 return acpi_evaluate_dsm(handle, guid, rev, func, argv4);
433 return obj;
434}
435EXPORT_SYMBOL(__wrap_acpi_evaluate_dsm);
436
437MODULE_LICENSE("GPL v2");