Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

ion: add helper to zero contiguous region of pages

Add ion_heap_pages_zero for ion heaps to use to zero pages
during initialization or allocation, when a struct ion_buffer
may not be available. Use it from the chunk heap and carveout
heaps.

Signed-off-by: Colin Cross <ccross@android.com>
Signed-off-by: John Stultz <john.stultz@linaro.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

authored by

Colin Cross and committed by
Greg Kroah-Hartman
df6cf5c8 ed5bf01a

+58 -48
+13
drivers/staging/android/ion/ion_carveout_heap.c
··· 150 150 struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data) 151 151 { 152 152 struct ion_carveout_heap *carveout_heap; 153 + int ret; 154 + 155 + struct page *page; 156 + size_t size; 157 + 158 + page = pfn_to_page(PFN_DOWN(heap_data->base)); 159 + size = heap_data->size; 160 + 161 + ion_pages_sync_for_device(NULL, page, size, DMA_BIDIRECTIONAL); 162 + 163 + ret = ion_heap_pages_zero(page, size, pgprot_writecombine(PAGE_KERNEL)); 164 + if (ret) 165 + return ERR_PTR(ret); 153 166 154 167 carveout_heap = kzalloc(sizeof(struct ion_carveout_heap), GFP_KERNEL); 155 168 if (!carveout_heap)
+12 -27
drivers/staging/android/ion/ion_chunk_heap.c
··· 140 140 struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *heap_data) 141 141 { 142 142 struct ion_chunk_heap *chunk_heap; 143 - struct vm_struct *vm_struct; 144 - pgprot_t pgprot = pgprot_writecombine(PAGE_KERNEL); 145 - int i, ret; 143 + int ret; 144 + struct page *page; 145 + size_t size; 146 + 147 + page = pfn_to_page(PFN_DOWN(heap_data->base)); 148 + size = heap_data->size; 149 + 150 + ion_pages_sync_for_device(NULL, page, size, DMA_BIDIRECTIONAL); 151 + 152 + ret = ion_heap_pages_zero(page, size, pgprot_writecombine(PAGE_KERNEL)); 153 + if (ret) 154 + return ERR_PTR(ret); 146 155 147 156 chunk_heap = kzalloc(sizeof(struct ion_chunk_heap), GFP_KERNEL); 148 157 if (!chunk_heap) ··· 168 159 chunk_heap->size = heap_data->size; 169 160 chunk_heap->allocated = 0; 170 161 171 - vm_struct = get_vm_area(PAGE_SIZE, VM_ALLOC); 172 - if (!vm_struct) { 173 - ret = -ENOMEM; 174 - goto error; 175 - } 176 - for (i = 0; i < chunk_heap->size; i += PAGE_SIZE) { 177 - struct page *page = pfn_to_page(PFN_DOWN(chunk_heap->base + i)); 178 - struct page **pages = &page; 179 - 180 - ret = map_vm_area(vm_struct, pgprot, &pages); 181 - if (ret) 182 - goto error_map_vm_area; 183 - memset(vm_struct->addr, 0, PAGE_SIZE); 184 - unmap_kernel_range((unsigned long)vm_struct->addr, PAGE_SIZE); 185 - } 186 - free_vm_area(vm_struct); 187 - 188 - ion_pages_sync_for_device(NULL, pfn_to_page(PFN_DOWN(heap_data->base)), 189 - heap_data->size, DMA_BIDIRECTIONAL); 190 - 191 162 gen_pool_add(chunk_heap->pool, chunk_heap->base, heap_data->size, -1); 192 163 chunk_heap->heap.ops = &chunk_heap_ops; 193 164 chunk_heap->heap.type = ION_HEAP_TYPE_CHUNK; ··· 177 188 178 189 return &chunk_heap->heap; 179 190 180 - error_map_vm_area: 181 - free_vm_area(vm_struct); 182 - error: 183 - gen_pool_destroy(chunk_heap->pool); 184 191 error_gen_pool_create: 185 192 kfree(chunk_heap); 186 193 return ERR_PTR(ret);
+32 -21
drivers/staging/android/ion/ion_heap.c
··· 114 114 return 0; 115 115 } 116 116 117 + static int ion_heap_sglist_zero(struct scatterlist *sgl, unsigned int nents, 118 + pgprot_t pgprot) 119 + { 120 + int p = 0; 121 + int ret = 0; 122 + struct sg_page_iter piter; 123 + struct page *pages[32]; 124 + 125 + for_each_sg_page(sgl, &piter, nents, 0) { 126 + pages[p++] = sg_page_iter_page(&piter); 127 + if (p == ARRAY_SIZE(pages)) { 128 + ret = ion_heap_clear_pages(pages, p, pgprot); 129 + if (ret) 130 + return ret; 131 + p = 0; 132 + } 133 + } 134 + if (p) 135 + ret = ion_heap_clear_pages(pages, p, pgprot); 136 + 137 + return ret; 138 + } 139 + 117 140 int ion_heap_buffer_zero(struct ion_buffer *buffer) 118 141 { 119 142 struct sg_table *table = buffer->sg_table; 120 143 pgprot_t pgprot; 121 - struct scatterlist *sg; 122 - int i, j, ret = 0; 123 - struct page *pages[32]; 124 - int k = 0; 125 144 126 145 if (buffer->flags & ION_FLAG_CACHED) 127 146 pgprot = PAGE_KERNEL; 128 147 else 129 148 pgprot = pgprot_writecombine(PAGE_KERNEL); 130 149 131 - for_each_sg(table->sgl, sg, table->nents, i) { 132 - struct page *page = sg_page(sg); 133 - unsigned long len = sg->length; 150 + return ion_heap_sglist_zero(table->sgl, table->nents, pgprot); 151 + } 134 152 135 - for (j = 0; j < len / PAGE_SIZE; j++) { 136 - pages[k++] = page + j; 137 - if (k == ARRAY_SIZE(pages)) { 138 - ret = ion_heap_clear_pages(pages, k, pgprot); 139 - if (ret) 140 - goto end; 141 - k = 0; 142 - } 143 - } 144 - if (k) 145 - ret = ion_heap_clear_pages(pages, k, pgprot); 146 - } 147 - end: 148 - return ret; 153 + int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot) 154 + { 155 + struct scatterlist sg; 156 + 157 + sg_init_table(&sg, 1); 158 + sg_set_page(&sg, page, size, 0); 159 + return ion_heap_sglist_zero(&sg, 1, pgprot); 149 160 } 150 161 151 162 void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer * buffer)
+1
drivers/staging/android/ion/ion_priv.h
··· 215 215 int ion_heap_map_user(struct ion_heap *, struct ion_buffer *, 216 216 struct vm_area_struct *); 217 217 int ion_heap_buffer_zero(struct ion_buffer *buffer); 218 + int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot); 218 219 219 220 /** 220 221 * ion_heap_init_deferred_free -- initialize deferred free functionality