Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/**************************************************************************
2 *
3 * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
29 */
30
31#include <linux/export.h>
32#include <linux/highmem.h>
33#include <linux/mem_encrypt.h>
34#include <xen/xen.h>
35
36#include <drm/drm_cache.h>
37
38#if defined(CONFIG_X86)
39#include <asm/smp.h>
40
41/*
42 * clflushopt is an unordered instruction which needs fencing with mfence or
43 * sfence to avoid ordering issues. For drm_clflush_page this fencing happens
44 * in the caller.
45 */
46static void
47drm_clflush_page(struct page *page)
48{
49 uint8_t *page_virtual;
50 unsigned int i;
51 const int size = boot_cpu_data.x86_clflush_size;
52
53 if (unlikely(page == NULL))
54 return;
55
56 page_virtual = kmap_atomic(page);
57 for (i = 0; i < PAGE_SIZE; i += size)
58 clflushopt(page_virtual + i);
59 kunmap_atomic(page_virtual);
60}
61
62static void drm_cache_flush_clflush(struct page *pages[],
63 unsigned long num_pages)
64{
65 unsigned long i;
66
67 mb(); /*Full memory barrier used before so that CLFLUSH is ordered*/
68 for (i = 0; i < num_pages; i++)
69 drm_clflush_page(*pages++);
70 mb(); /*Also used after CLFLUSH so that all cache is flushed*/
71}
72#endif
73
74/**
75 * drm_clflush_pages - Flush dcache lines of a set of pages.
76 * @pages: List of pages to be flushed.
77 * @num_pages: Number of pages in the array.
78 *
79 * Flush every data cache line entry that points to an address belonging
80 * to a page in the array.
81 */
82void
83drm_clflush_pages(struct page *pages[], unsigned long num_pages)
84{
85
86#if defined(CONFIG_X86)
87 if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
88 drm_cache_flush_clflush(pages, num_pages);
89 return;
90 }
91
92 if (wbinvd_on_all_cpus())
93 pr_err("Timed out waiting for cache flush\n");
94
95#elif defined(__powerpc__)
96 unsigned long i;
97
98 for (i = 0; i < num_pages; i++) {
99 struct page *page = pages[i];
100 void *page_virtual;
101
102 if (unlikely(page == NULL))
103 continue;
104
105 page_virtual = kmap_atomic(page);
106 flush_dcache_range((unsigned long)page_virtual,
107 (unsigned long)page_virtual + PAGE_SIZE);
108 kunmap_atomic(page_virtual);
109 }
110#else
111 pr_err("Architecture has no drm_cache.c support\n");
112 WARN_ON_ONCE(1);
113#endif
114}
115EXPORT_SYMBOL(drm_clflush_pages);
116
117/**
118 * drm_clflush_sg - Flush dcache lines pointing to a scather-gather.
119 * @st: struct sg_table.
120 *
121 * Flush every data cache line entry that points to an address in the
122 * sg.
123 */
124void
125drm_clflush_sg(struct sg_table *st)
126{
127#if defined(CONFIG_X86)
128 if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
129 struct sg_page_iter sg_iter;
130
131 mb(); /*CLFLUSH is ordered only by using memory barriers*/
132 for_each_sgtable_page(st, &sg_iter, 0)
133 drm_clflush_page(sg_page_iter_page(&sg_iter));
134 mb(); /*Make sure that all cache line entry is flushed*/
135
136 return;
137 }
138
139 if (wbinvd_on_all_cpus())
140 pr_err("Timed out waiting for cache flush\n");
141#else
142 pr_err("Architecture has no drm_cache.c support\n");
143 WARN_ON_ONCE(1);
144#endif
145}
146EXPORT_SYMBOL(drm_clflush_sg);
147
148/**
149 * drm_clflush_virt_range - Flush dcache lines of a region
150 * @addr: Initial kernel memory address.
151 * @length: Region size.
152 *
153 * Flush every data cache line entry that points to an address in the
154 * region requested.
155 */
156void
157drm_clflush_virt_range(void *addr, unsigned long length)
158{
159#if defined(CONFIG_X86)
160 if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
161 const int size = boot_cpu_data.x86_clflush_size;
162 void *end = addr + length;
163
164 addr = (void *)(((unsigned long)addr) & -size);
165 mb(); /*CLFLUSH is only ordered with a full memory barrier*/
166 for (; addr < end; addr += size)
167 clflushopt(addr);
168 clflushopt(end - 1); /* force serialisation */
169 mb(); /*Ensure that evry data cache line entry is flushed*/
170 return;
171 }
172
173 if (wbinvd_on_all_cpus())
174 pr_err("Timed out waiting for cache flush\n");
175#else
176 pr_err("Architecture has no drm_cache.c support\n");
177 WARN_ON_ONCE(1);
178#endif
179}
180EXPORT_SYMBOL(drm_clflush_virt_range);
181
182bool drm_need_swiotlb(int dma_bits)
183{
184 struct resource *tmp;
185 resource_size_t max_iomem = 0;
186
187 /*
188 * Xen paravirtual hosts require swiotlb regardless of requested dma
189 * transfer size.
190 *
191 * NOTE: Really, what it requires is use of the dma_alloc_coherent
192 * allocator used in ttm_dma_populate() instead of
193 * ttm_populate_and_map_pages(), which bounce buffers so much in
194 * Xen it leads to swiotlb buffer exhaustion.
195 */
196 if (xen_pv_domain())
197 return true;
198
199 /*
200 * Enforce dma_alloc_coherent when memory encryption is active as well
201 * for the same reasons as for Xen paravirtual hosts.
202 */
203 if (mem_encrypt_active())
204 return true;
205
206 for (tmp = iomem_resource.child; tmp; tmp = tmp->sibling)
207 max_iomem = max(max_iomem, tmp->end);
208
209 return max_iomem > ((u64)1 << dma_bits);
210}
211EXPORT_SYMBOL(drm_need_swiotlb);