Merge branch 'stable/swiotlb-0.9' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/swiotlb-2.6

* 'stable/swiotlb-0.9' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/swiotlb-2.6:
swiotlb: Use page alignment for early buffer allocation
swiotlb: make io_tlb_overflow static

+9 -9
+9 -9
lib/swiotlb.c
··· 70 */ 71 static unsigned long io_tlb_overflow = 32*1024; 72 73 - void *io_tlb_overflow_buffer; 74 75 /* 76 * This is a free list describing the number of free entries available from ··· 147 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE 148 * between io_tlb_start and io_tlb_end. 149 */ 150 - io_tlb_list = alloc_bootmem(io_tlb_nslabs * sizeof(int)); 151 for (i = 0; i < io_tlb_nslabs; i++) 152 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); 153 io_tlb_index = 0; 154 - io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(phys_addr_t)); 155 156 /* 157 * Get the overflow emergency buffer 158 */ 159 - io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow); 160 if (!io_tlb_overflow_buffer) 161 panic("Cannot allocate SWIOTLB overflow buffer!\n"); 162 if (verbose) ··· 182 /* 183 * Get IO TLB memory from the low pages 184 */ 185 - io_tlb_start = alloc_bootmem_low_pages(bytes); 186 if (!io_tlb_start) 187 panic("Cannot allocate SWIOTLB buffer"); 188 ··· 308 get_order(io_tlb_nslabs << IO_TLB_SHIFT)); 309 } else { 310 free_bootmem_late(__pa(io_tlb_overflow_buffer), 311 - io_tlb_overflow); 312 free_bootmem_late(__pa(io_tlb_orig_addr), 313 - io_tlb_nslabs * sizeof(phys_addr_t)); 314 free_bootmem_late(__pa(io_tlb_list), 315 - io_tlb_nslabs * sizeof(int)); 316 free_bootmem_late(__pa(io_tlb_start), 317 - io_tlb_nslabs << IO_TLB_SHIFT); 318 } 319 } 320
··· 70 */ 71 static unsigned long io_tlb_overflow = 32*1024; 72 73 + static void *io_tlb_overflow_buffer; 74 75 /* 76 * This is a free list describing the number of free entries available from ··· 147 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE 148 * between io_tlb_start and io_tlb_end. 149 */ 150 + io_tlb_list = alloc_bootmem_pages(PAGE_ALIGN(io_tlb_nslabs * sizeof(int))); 151 for (i = 0; i < io_tlb_nslabs; i++) 152 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); 153 io_tlb_index = 0; 154 + io_tlb_orig_addr = alloc_bootmem_pages(PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t))); 155 156 /* 157 * Get the overflow emergency buffer 158 */ 159 + io_tlb_overflow_buffer = alloc_bootmem_low_pages(PAGE_ALIGN(io_tlb_overflow)); 160 if (!io_tlb_overflow_buffer) 161 panic("Cannot allocate SWIOTLB overflow buffer!\n"); 162 if (verbose) ··· 182 /* 183 * Get IO TLB memory from the low pages 184 */ 185 + io_tlb_start = alloc_bootmem_low_pages(PAGE_ALIGN(bytes)); 186 if (!io_tlb_start) 187 panic("Cannot allocate SWIOTLB buffer"); 188 ··· 308 get_order(io_tlb_nslabs << IO_TLB_SHIFT)); 309 } else { 310 free_bootmem_late(__pa(io_tlb_overflow_buffer), 311 + PAGE_ALIGN(io_tlb_overflow)); 312 free_bootmem_late(__pa(io_tlb_orig_addr), 313 + PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t))); 314 free_bootmem_late(__pa(io_tlb_list), 315 + PAGE_ALIGN(io_tlb_nslabs * sizeof(int))); 316 free_bootmem_late(__pa(io_tlb_start), 317 + PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT)); 318 } 319 } 320