Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

swiotlb: remove swiotlb_init_with_tbl and swiotlb_init_late_with_tbl

No users left.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Tested-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>

+20 -59
-2
include/linux/swiotlb.h
··· 34 34 /* default to 64MB */ 35 35 #define IO_TLB_DEFAULT_SIZE (64UL<<20) 36 36 37 - int swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, unsigned int flags); 38 37 unsigned long swiotlb_size_or_default(void); 39 38 void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags, 40 39 int (*remap)(void *tlb, unsigned long nslabs)); 41 40 int swiotlb_init_late(size_t size, gfp_t gfp_mask, 42 41 int (*remap)(void *tlb, unsigned long nslabs)); 43 - extern int swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs); 44 42 extern void __init swiotlb_update_mem_attributes(void); 45 43 46 44 phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t phys,
+20 -57
kernel/dma/swiotlb.c
··· 225 225 return; 226 226 } 227 227 228 - int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, 229 - unsigned int flags) 230 - { 231 - struct io_tlb_mem *mem = &io_tlb_default_mem; 232 - size_t alloc_size; 233 - 234 - if (swiotlb_force_disable) 235 - return 0; 236 - 237 - /* protect against double initialization */ 238 - if (WARN_ON_ONCE(mem->nslabs)) 239 - return -ENOMEM; 240 - 241 - alloc_size = PAGE_ALIGN(array_size(sizeof(*mem->slots), nslabs)); 242 - mem->slots = memblock_alloc(alloc_size, PAGE_SIZE); 243 - if (!mem->slots) 244 - panic("%s: Failed to allocate %zu bytes align=0x%lx\n", 245 - __func__, alloc_size, PAGE_SIZE); 246 - 247 - swiotlb_init_io_tlb_mem(mem, __pa(tlb), nslabs, false); 248 - mem->force_bounce = flags & SWIOTLB_FORCE; 249 - 250 - if (flags & SWIOTLB_VERBOSE) 251 - swiotlb_print_info(); 252 - return 0; 253 - } 254 - 255 228 /* 256 229 * Statically reserve bounce buffer space and initialize bounce buffer data 257 230 * structures for the software IO TLB used to implement the DMA API. ··· 232 259 void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags, 233 260 int (*remap)(void *tlb, unsigned long nslabs)) 234 261 { 262 + struct io_tlb_mem *mem = &io_tlb_default_mem; 235 263 unsigned long nslabs = default_nslabs; 264 + size_t alloc_size = PAGE_ALIGN(array_size(sizeof(*mem->slots), nslabs)); 236 265 size_t bytes; 237 266 void *tlb; 238 267 ··· 255 280 else 256 281 tlb = memblock_alloc_low(bytes, PAGE_SIZE); 257 282 if (!tlb) 258 - goto fail; 283 + panic("%s: failed to allocate tlb structure\n", __func__); 284 + 259 285 if (remap && remap(tlb, nslabs) < 0) { 260 286 memblock_free(tlb, PAGE_ALIGN(bytes)); 261 287 ··· 266 290 __func__, bytes); 267 291 goto retry; 268 292 } 269 - if (swiotlb_init_with_tbl(tlb, default_nslabs, flags)) 270 - goto fail_free_mem; 271 - return; 272 293 273 - fail_free_mem: 274 - memblock_free(tlb, bytes); 275 - fail: 276 - pr_warn("Cannot allocate buffer"); 294 + mem->slots = memblock_alloc(alloc_size, PAGE_SIZE); 295 + if (!mem->slots) 296 + panic("%s: Failed to allocate %zu bytes align=0x%lx\n", 297 + __func__, alloc_size, PAGE_SIZE); 298 + 299 + swiotlb_init_io_tlb_mem(mem, __pa(tlb), default_nslabs, false); 300 + mem->force_bounce = flags & SWIOTLB_FORCE; 301 + 302 + if (flags & SWIOTLB_VERBOSE) 303 + swiotlb_print_info(); 277 304 } 278 305 279 306 void __init swiotlb_init(bool addressing_limit, unsigned int flags) ··· 292 313 int swiotlb_init_late(size_t size, gfp_t gfp_mask, 293 314 int (*remap)(void *tlb, unsigned long nslabs)) 294 315 { 316 + struct io_tlb_mem *mem = &io_tlb_default_mem; 295 317 unsigned long nslabs = ALIGN(size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE); 296 318 unsigned long bytes; 297 319 unsigned char *vstart = NULL; ··· 333 353 return rc; 334 354 goto retry; 335 355 } 336 - rc = swiotlb_late_init_with_tbl(vstart, nslabs); 337 - if (rc) 338 - free_pages((unsigned long)vstart, order); 339 - 340 - return rc; 341 - } 342 - 343 - int 344 - swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs) 345 - { 346 - struct io_tlb_mem *mem = &io_tlb_default_mem; 347 - unsigned long bytes = nslabs << IO_TLB_SHIFT; 348 - 349 - if (swiotlb_force_disable) 350 - return 0; 351 - 352 - /* protect against double initialization */ 353 - if (WARN_ON_ONCE(mem->nslabs)) 354 - return -ENOMEM; 355 356 356 357 mem->slots = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 357 358 get_order(array_size(sizeof(*mem->slots), nslabs))); 358 - if (!mem->slots) 359 + if (!mem->slots) { 360 + free_pages((unsigned long)vstart, order); 359 361 return -ENOMEM; 362 + } 360 363 361 - set_memory_decrypted((unsigned long)tlb, bytes >> PAGE_SHIFT); 362 - swiotlb_init_io_tlb_mem(mem, virt_to_phys(tlb), nslabs, true); 364 + set_memory_decrypted((unsigned long)vstart, bytes >> PAGE_SHIFT); 365 + swiotlb_init_io_tlb_mem(mem, virt_to_phys(vstart), nslabs, true); 363 366 364 367 swiotlb_print_info(); 365 368 return 0;