+30
-10
arch/xtensa/kernel/pci-dma.c
+30
-10
arch/xtensa/kernel/pci-dma.c
···
16
16
*/
17
17
18
18
#include <linux/dma-contiguous.h>
19
+
#include <linux/dma-direct.h>
19
20
#include <linux/gfp.h>
20
21
#include <linux/highmem.h>
21
22
#include <linux/mm.h>
···
124
123
unsigned long attrs)
125
124
{
126
125
unsigned long ret;
127
-
unsigned long uncached = 0;
126
+
unsigned long uncached;
128
127
unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
129
128
struct page *page = NULL;
130
129
···
145
144
if (!page)
146
145
return NULL;
147
146
147
+
*handle = phys_to_dma(dev, page_to_phys(page));
148
+
149
+
#ifdef CONFIG_MMU
150
+
if (PageHighMem(page)) {
151
+
void *p;
152
+
153
+
p = dma_common_contiguous_remap(page, size, VM_MAP,
154
+
pgprot_noncached(PAGE_KERNEL),
155
+
__builtin_return_address(0));
156
+
if (!p) {
157
+
if (!dma_release_from_contiguous(dev, page, count))
158
+
__free_pages(page, get_order(size));
159
+
}
160
+
return p;
161
+
}
162
+
#endif
148
163
ret = (unsigned long)page_address(page);
149
-
150
-
/* We currently don't support coherent memory outside KSEG */
151
-
152
164
BUG_ON(ret < XCHAL_KSEG_CACHED_VADDR ||
153
165
ret > XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE - 1);
154
166
155
167
uncached = ret + XCHAL_KSEG_BYPASS_VADDR - XCHAL_KSEG_CACHED_VADDR;
156
-
*handle = virt_to_bus((void *)ret);
157
168
__invalidate_dcache_range(ret, size);
158
169
159
170
return (void *)uncached;
···
174
161
static void xtensa_dma_free(struct device *dev, size_t size, void *vaddr,
175
162
dma_addr_t dma_handle, unsigned long attrs)
176
163
{
177
-
unsigned long addr = (unsigned long)vaddr +
178
-
XCHAL_KSEG_CACHED_VADDR - XCHAL_KSEG_BYPASS_VADDR;
179
-
struct page *page = virt_to_page(addr);
180
164
unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
165
+
unsigned long addr = (unsigned long)vaddr;
166
+
struct page *page;
181
167
182
-
BUG_ON(addr < XCHAL_KSEG_CACHED_VADDR ||
183
-
addr > XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE - 1);
168
+
if (addr >= XCHAL_KSEG_BYPASS_VADDR &&
169
+
addr - XCHAL_KSEG_BYPASS_VADDR < XCHAL_KSEG_SIZE) {
170
+
addr += XCHAL_KSEG_CACHED_VADDR - XCHAL_KSEG_BYPASS_VADDR;
171
+
page = virt_to_page(addr);
172
+
} else {
173
+
#ifdef CONFIG_MMU
174
+
dma_common_free_remap(vaddr, size, VM_MAP);
175
+
#endif
176
+
page = pfn_to_page(PHYS_PFN(dma_to_phys(dev, dma_handle)));
177
+
}
184
178
185
179
if (!dma_release_from_contiguous(dev, page, count))
186
180
__free_pages(page, get_order(size));
+63
-7
arch/xtensa/mm/init.c
+63
-7
arch/xtensa/mm/init.c
···
79
79
free_area_init_node(0, zones_size, ARCH_PFN_OFFSET, NULL);
80
80
}
81
81
82
+
#ifdef CONFIG_HIGHMEM
83
+
static void __init free_area_high(unsigned long pfn, unsigned long end)
84
+
{
85
+
for (; pfn < end; pfn++)
86
+
free_highmem_page(pfn_to_page(pfn));
87
+
}
88
+
89
+
static void __init free_highpages(void)
90
+
{
91
+
unsigned long max_low = max_low_pfn;
92
+
struct memblock_region *mem, *res;
93
+
94
+
reset_all_zones_managed_pages();
95
+
/* set highmem page free */
96
+
for_each_memblock(memory, mem) {
97
+
unsigned long start = memblock_region_memory_base_pfn(mem);
98
+
unsigned long end = memblock_region_memory_end_pfn(mem);
99
+
100
+
/* Ignore complete lowmem entries */
101
+
if (end <= max_low)
102
+
continue;
103
+
104
+
if (memblock_is_nomap(mem))
105
+
continue;
106
+
107
+
/* Truncate partial highmem entries */
108
+
if (start < max_low)
109
+
start = max_low;
110
+
111
+
/* Find and exclude any reserved regions */
112
+
for_each_memblock(reserved, res) {
113
+
unsigned long res_start, res_end;
114
+
115
+
res_start = memblock_region_reserved_base_pfn(res);
116
+
res_end = memblock_region_reserved_end_pfn(res);
117
+
118
+
if (res_end < start)
119
+
continue;
120
+
if (res_start < start)
121
+
res_start = start;
122
+
if (res_start > end)
123
+
res_start = end;
124
+
if (res_end > end)
125
+
res_end = end;
126
+
if (res_start != start)
127
+
free_area_high(start, res_start);
128
+
start = res_end;
129
+
if (start == end)
130
+
break;
131
+
}
132
+
133
+
/* And now free anything which remains */
134
+
if (start < end)
135
+
free_area_high(start, end);
136
+
}
137
+
}
138
+
#else
139
+
static void __init free_highpages(void)
140
+
{
141
+
}
142
+
#endif
143
+
82
144
/*
83
145
* Initialize memory pages.
84
146
*/
85
147
86
148
void __init mem_init(void)
87
149
{
88
-
#ifdef CONFIG_HIGHMEM
89
-
unsigned long tmp;
90
-
91
-
reset_all_zones_managed_pages();
92
-
for (tmp = max_low_pfn; tmp < max_pfn; tmp++)
93
-
free_highmem_page(pfn_to_page(tmp));
94
-
#endif
150
+
free_highpages();
95
151
96
152
max_mapnr = max_pfn - ARCH_PFN_OFFSET;
97
153
high_memory = (void *)__va(max_low_pfn << PAGE_SHIFT);