Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Tegra host1x driver
3 *
4 * Copyright (c) 2010-2013, NVIDIA Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#include <linux/clk.h>
20#include <linux/dma-mapping.h>
21#include <linux/io.h>
22#include <linux/list.h>
23#include <linux/module.h>
24#include <linux/of_device.h>
25#include <linux/of.h>
26#include <linux/slab.h>
27
28#define CREATE_TRACE_POINTS
29#include <trace/events/host1x.h>
30#undef CREATE_TRACE_POINTS
31
32#include "bus.h"
33#include "channel.h"
34#include "debug.h"
35#include "dev.h"
36#include "intr.h"
37
38#include "hw/host1x01.h"
39#include "hw/host1x02.h"
40#include "hw/host1x04.h"
41#include "hw/host1x05.h"
42#include "hw/host1x06.h"
43
44void host1x_hypervisor_writel(struct host1x *host1x, u32 v, u32 r)
45{
46 writel(v, host1x->hv_regs + r);
47}
48
49u32 host1x_hypervisor_readl(struct host1x *host1x, u32 r)
50{
51 return readl(host1x->hv_regs + r);
52}
53
54void host1x_sync_writel(struct host1x *host1x, u32 v, u32 r)
55{
56 void __iomem *sync_regs = host1x->regs + host1x->info->sync_offset;
57
58 writel(v, sync_regs + r);
59}
60
61u32 host1x_sync_readl(struct host1x *host1x, u32 r)
62{
63 void __iomem *sync_regs = host1x->regs + host1x->info->sync_offset;
64
65 return readl(sync_regs + r);
66}
67
68void host1x_ch_writel(struct host1x_channel *ch, u32 v, u32 r)
69{
70 writel(v, ch->regs + r);
71}
72
73u32 host1x_ch_readl(struct host1x_channel *ch, u32 r)
74{
75 return readl(ch->regs + r);
76}
77
78static const struct host1x_info host1x01_info = {
79 .nb_channels = 8,
80 .nb_pts = 32,
81 .nb_mlocks = 16,
82 .nb_bases = 8,
83 .init = host1x01_init,
84 .sync_offset = 0x3000,
85 .dma_mask = DMA_BIT_MASK(32),
86};
87
88static const struct host1x_info host1x02_info = {
89 .nb_channels = 9,
90 .nb_pts = 32,
91 .nb_mlocks = 16,
92 .nb_bases = 12,
93 .init = host1x02_init,
94 .sync_offset = 0x3000,
95 .dma_mask = DMA_BIT_MASK(32),
96};
97
98static const struct host1x_info host1x04_info = {
99 .nb_channels = 12,
100 .nb_pts = 192,
101 .nb_mlocks = 16,
102 .nb_bases = 64,
103 .init = host1x04_init,
104 .sync_offset = 0x2100,
105 .dma_mask = DMA_BIT_MASK(34),
106};
107
108static const struct host1x_info host1x05_info = {
109 .nb_channels = 14,
110 .nb_pts = 192,
111 .nb_mlocks = 16,
112 .nb_bases = 64,
113 .init = host1x05_init,
114 .sync_offset = 0x2100,
115 .dma_mask = DMA_BIT_MASK(34),
116};
117
118static const struct host1x_info host1x06_info = {
119 .nb_channels = 63,
120 .nb_pts = 576,
121 .nb_mlocks = 24,
122 .nb_bases = 16,
123 .init = host1x06_init,
124 .sync_offset = 0x0,
125 .dma_mask = DMA_BIT_MASK(34),
126 .has_hypervisor = true,
127};
128
129static const struct of_device_id host1x_of_match[] = {
130 { .compatible = "nvidia,tegra186-host1x", .data = &host1x06_info, },
131 { .compatible = "nvidia,tegra210-host1x", .data = &host1x05_info, },
132 { .compatible = "nvidia,tegra124-host1x", .data = &host1x04_info, },
133 { .compatible = "nvidia,tegra114-host1x", .data = &host1x02_info, },
134 { .compatible = "nvidia,tegra30-host1x", .data = &host1x01_info, },
135 { .compatible = "nvidia,tegra20-host1x", .data = &host1x01_info, },
136 { },
137};
138MODULE_DEVICE_TABLE(of, host1x_of_match);
139
140static int host1x_probe(struct platform_device *pdev)
141{
142 struct host1x *host;
143 struct resource *regs, *hv_regs = NULL;
144 int syncpt_irq;
145 int err;
146
147 host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
148 if (!host)
149 return -ENOMEM;
150
151 host->info = of_device_get_match_data(&pdev->dev);
152
153 if (host->info->has_hypervisor) {
154 regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vm");
155 if (!regs) {
156 dev_err(&pdev->dev, "failed to get vm registers\n");
157 return -ENXIO;
158 }
159
160 hv_regs = platform_get_resource_byname(pdev, IORESOURCE_MEM,
161 "hypervisor");
162 if (!hv_regs) {
163 dev_err(&pdev->dev,
164 "failed to get hypervisor registers\n");
165 return -ENXIO;
166 }
167 } else {
168 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
169 if (!regs) {
170 dev_err(&pdev->dev, "failed to get registers\n");
171 return -ENXIO;
172 }
173 }
174
175 syncpt_irq = platform_get_irq(pdev, 0);
176 if (syncpt_irq < 0) {
177 dev_err(&pdev->dev, "failed to get IRQ: %d\n", syncpt_irq);
178 return syncpt_irq;
179 }
180
181 mutex_init(&host->devices_lock);
182 INIT_LIST_HEAD(&host->devices);
183 INIT_LIST_HEAD(&host->list);
184 host->dev = &pdev->dev;
185
186 /* set common host1x device data */
187 platform_set_drvdata(pdev, host);
188
189 host->regs = devm_ioremap_resource(&pdev->dev, regs);
190 if (IS_ERR(host->regs))
191 return PTR_ERR(host->regs);
192
193 if (host->info->has_hypervisor) {
194 host->hv_regs = devm_ioremap_resource(&pdev->dev, hv_regs);
195 if (IS_ERR(host->hv_regs))
196 return PTR_ERR(host->hv_regs);
197 }
198
199 dma_set_mask_and_coherent(host->dev, host->info->dma_mask);
200
201 if (host->info->init) {
202 err = host->info->init(host);
203 if (err)
204 return err;
205 }
206
207 host->clk = devm_clk_get(&pdev->dev, NULL);
208 if (IS_ERR(host->clk)) {
209 dev_err(&pdev->dev, "failed to get clock\n");
210 err = PTR_ERR(host->clk);
211 return err;
212 }
213
214 host->rst = devm_reset_control_get(&pdev->dev, "host1x");
215 if (IS_ERR(host->rst)) {
216 err = PTR_ERR(host->rst);
217 dev_err(&pdev->dev, "failed to get reset: %d\n", err);
218 return err;
219 }
220
221 host->group = iommu_group_get(&pdev->dev);
222 if (host->group) {
223 struct iommu_domain_geometry *geometry;
224 unsigned long order;
225
226 err = iova_cache_get();
227 if (err < 0)
228 goto put_group;
229
230 host->domain = iommu_domain_alloc(&platform_bus_type);
231 if (!host->domain) {
232 err = -ENOMEM;
233 goto put_cache;
234 }
235
236 err = iommu_attach_group(host->domain, host->group);
237 if (err) {
238 if (err == -ENODEV) {
239 iommu_domain_free(host->domain);
240 host->domain = NULL;
241 iova_cache_put();
242 iommu_group_put(host->group);
243 host->group = NULL;
244 goto skip_iommu;
245 }
246
247 goto fail_free_domain;
248 }
249
250 geometry = &host->domain->geometry;
251
252 order = __ffs(host->domain->pgsize_bitmap);
253 init_iova_domain(&host->iova, 1UL << order,
254 geometry->aperture_start >> order);
255 host->iova_end = geometry->aperture_end;
256 }
257
258skip_iommu:
259 err = host1x_channel_list_init(&host->channel_list,
260 host->info->nb_channels);
261 if (err) {
262 dev_err(&pdev->dev, "failed to initialize channel list\n");
263 goto fail_detach_device;
264 }
265
266 err = clk_prepare_enable(host->clk);
267 if (err < 0) {
268 dev_err(&pdev->dev, "failed to enable clock\n");
269 goto fail_free_channels;
270 }
271
272 err = reset_control_deassert(host->rst);
273 if (err < 0) {
274 dev_err(&pdev->dev, "failed to deassert reset: %d\n", err);
275 goto fail_unprepare_disable;
276 }
277
278 err = host1x_syncpt_init(host);
279 if (err) {
280 dev_err(&pdev->dev, "failed to initialize syncpts\n");
281 goto fail_reset_assert;
282 }
283
284 err = host1x_intr_init(host, syncpt_irq);
285 if (err) {
286 dev_err(&pdev->dev, "failed to initialize interrupts\n");
287 goto fail_deinit_syncpt;
288 }
289
290 host1x_debug_init(host);
291
292 err = host1x_register(host);
293 if (err < 0)
294 goto fail_deinit_intr;
295
296 return 0;
297
298fail_deinit_intr:
299 host1x_intr_deinit(host);
300fail_deinit_syncpt:
301 host1x_syncpt_deinit(host);
302fail_reset_assert:
303 reset_control_assert(host->rst);
304fail_unprepare_disable:
305 clk_disable_unprepare(host->clk);
306fail_free_channels:
307 host1x_channel_list_free(&host->channel_list);
308fail_detach_device:
309 if (host->group && host->domain) {
310 put_iova_domain(&host->iova);
311 iommu_detach_group(host->domain, host->group);
312 }
313fail_free_domain:
314 if (host->domain)
315 iommu_domain_free(host->domain);
316put_cache:
317 if (host->group)
318 iova_cache_put();
319put_group:
320 iommu_group_put(host->group);
321
322 return err;
323}
324
325static int host1x_remove(struct platform_device *pdev)
326{
327 struct host1x *host = platform_get_drvdata(pdev);
328
329 host1x_unregister(host);
330 host1x_intr_deinit(host);
331 host1x_syncpt_deinit(host);
332 reset_control_assert(host->rst);
333 clk_disable_unprepare(host->clk);
334
335 if (host->domain) {
336 put_iova_domain(&host->iova);
337 iommu_detach_group(host->domain, host->group);
338 iommu_domain_free(host->domain);
339 iova_cache_put();
340 iommu_group_put(host->group);
341 }
342
343 return 0;
344}
345
346static struct platform_driver tegra_host1x_driver = {
347 .driver = {
348 .name = "tegra-host1x",
349 .of_match_table = host1x_of_match,
350 },
351 .probe = host1x_probe,
352 .remove = host1x_remove,
353};
354
355static struct platform_driver * const drivers[] = {
356 &tegra_host1x_driver,
357 &tegra_mipi_driver,
358};
359
360static int __init tegra_host1x_init(void)
361{
362 int err;
363
364 err = bus_register(&host1x_bus_type);
365 if (err < 0)
366 return err;
367
368 err = platform_register_drivers(drivers, ARRAY_SIZE(drivers));
369 if (err < 0)
370 bus_unregister(&host1x_bus_type);
371
372 return err;
373}
374module_init(tegra_host1x_init);
375
376static void __exit tegra_host1x_exit(void)
377{
378 platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
379 bus_unregister(&host1x_bus_type);
380}
381module_exit(tegra_host1x_exit);
382
383MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
384MODULE_AUTHOR("Terje Bergstrom <tbergstrom@nvidia.com>");
385MODULE_DESCRIPTION("Host1x driver for Tegra products");
386MODULE_LICENSE("GPL");