Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Platform driver for the Synopsys DesignWare DMA Controller
4 *
5 * Copyright (C) 2007-2008 Atmel Corporation
6 * Copyright (C) 2010-2011 ST Microelectronics
7 * Copyright (C) 2013 Intel Corporation
8 *
9 * Some parts of this driver are derived from the original dw_dmac.
10 */
11
12#include <linux/module.h>
13#include <linux/device.h>
14#include <linux/clk.h>
15#include <linux/pm_runtime.h>
16#include <linux/platform_device.h>
17#include <linux/dmaengine.h>
18#include <linux/dma-mapping.h>
19#include <linux/of.h>
20#include <linux/of_dma.h>
21#include <linux/acpi.h>
22#include <linux/acpi_dma.h>
23
24#include "internal.h"
25
26#define DRV_NAME "dw_dmac"
27
28static struct dma_chan *dw_dma_of_xlate(struct of_phandle_args *dma_spec,
29 struct of_dma *ofdma)
30{
31 struct dw_dma *dw = ofdma->of_dma_data;
32 struct dw_dma_slave slave = {
33 .dma_dev = dw->dma.dev,
34 };
35 dma_cap_mask_t cap;
36
37 if (dma_spec->args_count != 3)
38 return NULL;
39
40 slave.src_id = dma_spec->args[0];
41 slave.dst_id = dma_spec->args[0];
42 slave.m_master = dma_spec->args[1];
43 slave.p_master = dma_spec->args[2];
44
45 if (WARN_ON(slave.src_id >= DW_DMA_MAX_NR_REQUESTS ||
46 slave.dst_id >= DW_DMA_MAX_NR_REQUESTS ||
47 slave.m_master >= dw->pdata->nr_masters ||
48 slave.p_master >= dw->pdata->nr_masters))
49 return NULL;
50
51 dma_cap_zero(cap);
52 dma_cap_set(DMA_SLAVE, cap);
53
54 /* TODO: there should be a simpler way to do this */
55 return dma_request_channel(cap, dw_dma_filter, &slave);
56}
57
58#ifdef CONFIG_ACPI
59static bool dw_dma_acpi_filter(struct dma_chan *chan, void *param)
60{
61 struct acpi_dma_spec *dma_spec = param;
62 struct dw_dma_slave slave = {
63 .dma_dev = dma_spec->dev,
64 .src_id = dma_spec->slave_id,
65 .dst_id = dma_spec->slave_id,
66 .m_master = 0,
67 .p_master = 1,
68 };
69
70 return dw_dma_filter(chan, &slave);
71}
72
73static void dw_dma_acpi_controller_register(struct dw_dma *dw)
74{
75 struct device *dev = dw->dma.dev;
76 struct acpi_dma_filter_info *info;
77 int ret;
78
79 info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
80 if (!info)
81 return;
82
83 dma_cap_zero(info->dma_cap);
84 dma_cap_set(DMA_SLAVE, info->dma_cap);
85 info->filter_fn = dw_dma_acpi_filter;
86
87 ret = devm_acpi_dma_controller_register(dev, acpi_dma_simple_xlate,
88 info);
89 if (ret)
90 dev_err(dev, "could not register acpi_dma_controller\n");
91}
92#else /* !CONFIG_ACPI */
93static inline void dw_dma_acpi_controller_register(struct dw_dma *dw) {}
94#endif /* !CONFIG_ACPI */
95
96#ifdef CONFIG_OF
97static struct dw_dma_platform_data *
98dw_dma_parse_dt(struct platform_device *pdev)
99{
100 struct device_node *np = pdev->dev.of_node;
101 struct dw_dma_platform_data *pdata;
102 u32 tmp, arr[DW_DMA_MAX_NR_MASTERS], mb[DW_DMA_MAX_NR_CHANNELS];
103 u32 nr_masters;
104 u32 nr_channels;
105
106 if (!np) {
107 dev_err(&pdev->dev, "Missing DT data\n");
108 return NULL;
109 }
110
111 if (of_property_read_u32(np, "dma-masters", &nr_masters))
112 return NULL;
113 if (nr_masters < 1 || nr_masters > DW_DMA_MAX_NR_MASTERS)
114 return NULL;
115
116 if (of_property_read_u32(np, "dma-channels", &nr_channels))
117 return NULL;
118 if (nr_channels > DW_DMA_MAX_NR_CHANNELS)
119 return NULL;
120
121 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
122 if (!pdata)
123 return NULL;
124
125 pdata->nr_masters = nr_masters;
126 pdata->nr_channels = nr_channels;
127
128 if (!of_property_read_u32(np, "chan_allocation_order", &tmp))
129 pdata->chan_allocation_order = (unsigned char)tmp;
130
131 if (!of_property_read_u32(np, "chan_priority", &tmp))
132 pdata->chan_priority = tmp;
133
134 if (!of_property_read_u32(np, "block_size", &tmp))
135 pdata->block_size = tmp;
136
137 if (!of_property_read_u32_array(np, "data-width", arr, nr_masters)) {
138 for (tmp = 0; tmp < nr_masters; tmp++)
139 pdata->data_width[tmp] = arr[tmp];
140 } else if (!of_property_read_u32_array(np, "data_width", arr, nr_masters)) {
141 for (tmp = 0; tmp < nr_masters; tmp++)
142 pdata->data_width[tmp] = BIT(arr[tmp] & 0x07);
143 }
144
145 if (!of_property_read_u32_array(np, "multi-block", mb, nr_channels)) {
146 for (tmp = 0; tmp < nr_channels; tmp++)
147 pdata->multi_block[tmp] = mb[tmp];
148 } else {
149 for (tmp = 0; tmp < nr_channels; tmp++)
150 pdata->multi_block[tmp] = 1;
151 }
152
153 if (!of_property_read_u32(np, "snps,dma-protection-control", &tmp)) {
154 if (tmp > CHAN_PROTCTL_MASK)
155 return NULL;
156 pdata->protctl = tmp;
157 }
158
159 return pdata;
160}
161#else
162static inline struct dw_dma_platform_data *
163dw_dma_parse_dt(struct platform_device *pdev)
164{
165 return NULL;
166}
167#endif
168
169static int dw_probe(struct platform_device *pdev)
170{
171 struct dw_dma_chip *chip;
172 struct device *dev = &pdev->dev;
173 struct resource *mem;
174 const struct dw_dma_platform_data *pdata;
175 int err;
176
177 chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
178 if (!chip)
179 return -ENOMEM;
180
181 chip->irq = platform_get_irq(pdev, 0);
182 if (chip->irq < 0)
183 return chip->irq;
184
185 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
186 chip->regs = devm_ioremap_resource(dev, mem);
187 if (IS_ERR(chip->regs))
188 return PTR_ERR(chip->regs);
189
190 err = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
191 if (err)
192 return err;
193
194 pdata = dev_get_platdata(dev);
195 if (!pdata)
196 pdata = dw_dma_parse_dt(pdev);
197
198 chip->dev = dev;
199 chip->id = pdev->id;
200 chip->pdata = pdata;
201
202 chip->clk = devm_clk_get(chip->dev, "hclk");
203 if (IS_ERR(chip->clk))
204 return PTR_ERR(chip->clk);
205 err = clk_prepare_enable(chip->clk);
206 if (err)
207 return err;
208
209 pm_runtime_enable(&pdev->dev);
210
211 err = dw_dma_probe(chip);
212 if (err)
213 goto err_dw_dma_probe;
214
215 platform_set_drvdata(pdev, chip);
216
217 if (pdev->dev.of_node) {
218 err = of_dma_controller_register(pdev->dev.of_node,
219 dw_dma_of_xlate, chip->dw);
220 if (err)
221 dev_err(&pdev->dev,
222 "could not register of_dma_controller\n");
223 }
224
225 if (ACPI_HANDLE(&pdev->dev))
226 dw_dma_acpi_controller_register(chip->dw);
227
228 return 0;
229
230err_dw_dma_probe:
231 pm_runtime_disable(&pdev->dev);
232 clk_disable_unprepare(chip->clk);
233 return err;
234}
235
236static int dw_remove(struct platform_device *pdev)
237{
238 struct dw_dma_chip *chip = platform_get_drvdata(pdev);
239
240 if (pdev->dev.of_node)
241 of_dma_controller_free(pdev->dev.of_node);
242
243 dw_dma_remove(chip);
244 pm_runtime_disable(&pdev->dev);
245 clk_disable_unprepare(chip->clk);
246
247 return 0;
248}
249
250static void dw_shutdown(struct platform_device *pdev)
251{
252 struct dw_dma_chip *chip = platform_get_drvdata(pdev);
253
254 /*
255 * We have to call do_dw_dma_disable() to stop any ongoing transfer. On
256 * some platforms we can't do that since DMA device is powered off.
257 * Moreover we have no possibility to check if the platform is affected
258 * or not. That's why we call pm_runtime_get_sync() / pm_runtime_put()
259 * unconditionally. On the other hand we can't use
260 * pm_runtime_suspended() because runtime PM framework is not fully
261 * used by the driver.
262 */
263 pm_runtime_get_sync(chip->dev);
264 do_dw_dma_disable(chip);
265 pm_runtime_put_sync_suspend(chip->dev);
266
267 clk_disable_unprepare(chip->clk);
268}
269
270#ifdef CONFIG_OF
271static const struct of_device_id dw_dma_of_id_table[] = {
272 { .compatible = "snps,dma-spear1340" },
273 {}
274};
275MODULE_DEVICE_TABLE(of, dw_dma_of_id_table);
276#endif
277
278#ifdef CONFIG_ACPI
279static const struct acpi_device_id dw_dma_acpi_id_table[] = {
280 { "INTL9C60", 0 },
281 { "80862286", 0 },
282 { "808622C0", 0 },
283 { }
284};
285MODULE_DEVICE_TABLE(acpi, dw_dma_acpi_id_table);
286#endif
287
288#ifdef CONFIG_PM_SLEEP
289
290static int dw_suspend_late(struct device *dev)
291{
292 struct dw_dma_chip *chip = dev_get_drvdata(dev);
293
294 do_dw_dma_disable(chip);
295 clk_disable_unprepare(chip->clk);
296
297 return 0;
298}
299
300static int dw_resume_early(struct device *dev)
301{
302 struct dw_dma_chip *chip = dev_get_drvdata(dev);
303 int ret;
304
305 ret = clk_prepare_enable(chip->clk);
306 if (ret)
307 return ret;
308
309 return do_dw_dma_enable(chip);
310}
311
312#endif /* CONFIG_PM_SLEEP */
313
314static const struct dev_pm_ops dw_dev_pm_ops = {
315 SET_LATE_SYSTEM_SLEEP_PM_OPS(dw_suspend_late, dw_resume_early)
316};
317
318static struct platform_driver dw_driver = {
319 .probe = dw_probe,
320 .remove = dw_remove,
321 .shutdown = dw_shutdown,
322 .driver = {
323 .name = DRV_NAME,
324 .pm = &dw_dev_pm_ops,
325 .of_match_table = of_match_ptr(dw_dma_of_id_table),
326 .acpi_match_table = ACPI_PTR(dw_dma_acpi_id_table),
327 },
328};
329
330static int __init dw_init(void)
331{
332 return platform_driver_register(&dw_driver);
333}
334subsys_initcall(dw_init);
335
336static void __exit dw_exit(void)
337{
338 platform_driver_unregister(&dw_driver);
339}
340module_exit(dw_exit);
341
342MODULE_LICENSE("GPL v2");
343MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller platform driver");
344MODULE_ALIAS("platform:" DRV_NAME);