Merge branch 'stable/swiotlb-0.9' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/swiotlb-2.6

* 'stable/swiotlb-0.9' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/swiotlb-2.6:
swiotlb: Use page alignment for early buffer allocation
swiotlb: make io_tlb_overflow static

+9 -9
+9 -9
lib/swiotlb.c
··· 70 70 */ 71 71 static unsigned long io_tlb_overflow = 32*1024; 72 72 73 - void *io_tlb_overflow_buffer; 73 + static void *io_tlb_overflow_buffer; 74 74 75 75 /* 76 76 * This is a free list describing the number of free entries available from ··· 147 147 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE 148 148 * between io_tlb_start and io_tlb_end. 149 149 */ 150 - io_tlb_list = alloc_bootmem(io_tlb_nslabs * sizeof(int)); 150 + io_tlb_list = alloc_bootmem_pages(PAGE_ALIGN(io_tlb_nslabs * sizeof(int))); 151 151 for (i = 0; i < io_tlb_nslabs; i++) 152 152 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); 153 153 io_tlb_index = 0; 154 - io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(phys_addr_t)); 154 + io_tlb_orig_addr = alloc_bootmem_pages(PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t))); 155 155 156 156 /* 157 157 * Get the overflow emergency buffer 158 158 */ 159 - io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow); 159 + io_tlb_overflow_buffer = alloc_bootmem_low_pages(PAGE_ALIGN(io_tlb_overflow)); 160 160 if (!io_tlb_overflow_buffer) 161 161 panic("Cannot allocate SWIOTLB overflow buffer!\n"); 162 162 if (verbose) ··· 182 182 /* 183 183 * Get IO TLB memory from the low pages 184 184 */ 185 - io_tlb_start = alloc_bootmem_low_pages(bytes); 185 + io_tlb_start = alloc_bootmem_low_pages(PAGE_ALIGN(bytes)); 186 186 if (!io_tlb_start) 187 187 panic("Cannot allocate SWIOTLB buffer"); 188 188 ··· 308 308 get_order(io_tlb_nslabs << IO_TLB_SHIFT)); 309 309 } else { 310 310 free_bootmem_late(__pa(io_tlb_overflow_buffer), 311 - io_tlb_overflow); 311 + PAGE_ALIGN(io_tlb_overflow)); 312 312 free_bootmem_late(__pa(io_tlb_orig_addr), 313 - io_tlb_nslabs * sizeof(phys_addr_t)); 313 + PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t))); 314 314 free_bootmem_late(__pa(io_tlb_list), 315 - io_tlb_nslabs * sizeof(int)); 315 + PAGE_ALIGN(io_tlb_nslabs * sizeof(int))); 316 316 free_bootmem_late(__pa(io_tlb_start), 317 - io_tlb_nslabs << IO_TLB_SHIFT); 317 + PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT)); 318 318 } 319 319 } 320 320