at v2.6.30 23 kB view raw
1/* 2 * Copyright (C) 2008 Advanced Micro Devices, Inc. 3 * 4 * Author: Joerg Roedel <joerg.roedel@amd.com> 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 as published 8 * by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 */ 19 20#include <linux/scatterlist.h> 21#include <linux/dma-mapping.h> 22#include <linux/stacktrace.h> 23#include <linux/dma-debug.h> 24#include <linux/spinlock.h> 25#include <linux/debugfs.h> 26#include <linux/device.h> 27#include <linux/types.h> 28#include <linux/sched.h> 29#include <linux/list.h> 30#include <linux/slab.h> 31 32#include <asm/sections.h> 33 34#define HASH_SIZE 1024ULL 35#define HASH_FN_SHIFT 13 36#define HASH_FN_MASK (HASH_SIZE - 1) 37 38enum { 39 dma_debug_single, 40 dma_debug_page, 41 dma_debug_sg, 42 dma_debug_coherent, 43}; 44 45#define DMA_DEBUG_STACKTRACE_ENTRIES 5 46 47struct dma_debug_entry { 48 struct list_head list; 49 struct device *dev; 50 int type; 51 phys_addr_t paddr; 52 u64 dev_addr; 53 u64 size; 54 int direction; 55 int sg_call_ents; 56 int sg_mapped_ents; 57#ifdef CONFIG_STACKTRACE 58 struct stack_trace stacktrace; 59 unsigned long st_entries[DMA_DEBUG_STACKTRACE_ENTRIES]; 60#endif 61}; 62 63struct hash_bucket { 64 struct list_head list; 65 spinlock_t lock; 66} ____cacheline_aligned_in_smp; 67 68/* Hash list to save the allocated dma addresses */ 69static struct hash_bucket dma_entry_hash[HASH_SIZE]; 70/* List of pre-allocated dma_debug_entry's */ 71static LIST_HEAD(free_entries); 72/* Lock for the list above */ 73static DEFINE_SPINLOCK(free_entries_lock); 74 75/* Global disable flag - will be set in case of an error */ 76static bool global_disable __read_mostly; 77 78/* Global error count */ 79static u32 error_count; 80 81/* Global error show enable*/ 82static u32 show_all_errors __read_mostly; 83/* Number of errors to show */ 84static u32 show_num_errors = 1; 85 86static u32 num_free_entries; 87static u32 min_free_entries; 88 89/* number of preallocated entries requested by kernel cmdline */ 90static u32 req_entries; 91 92/* debugfs dentry's for the stuff above */ 93static struct dentry *dma_debug_dent __read_mostly; 94static struct dentry *global_disable_dent __read_mostly; 95static struct dentry *error_count_dent __read_mostly; 96static struct dentry *show_all_errors_dent __read_mostly; 97static struct dentry *show_num_errors_dent __read_mostly; 98static struct dentry *num_free_entries_dent __read_mostly; 99static struct dentry *min_free_entries_dent __read_mostly; 100 101static const char *type2name[4] = { "single", "page", 102 "scather-gather", "coherent" }; 103 104static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE", 105 "DMA_FROM_DEVICE", "DMA_NONE" }; 106 107/* 108 * The access to some variables in this macro is racy. We can't use atomic_t 109 * here because all these variables are exported to debugfs. Some of them even 110 * writeable. This is also the reason why a lock won't help much. But anyway, 111 * the races are no big deal. Here is why: 112 * 113 * error_count: the addition is racy, but the worst thing that can happen is 114 * that we don't count some errors 115 * show_num_errors: the subtraction is racy. Also no big deal because in 116 * worst case this will result in one warning more in the 117 * system log than the user configured. This variable is 118 * writeable via debugfs. 119 */ 120static inline void dump_entry_trace(struct dma_debug_entry *entry) 121{ 122#ifdef CONFIG_STACKTRACE 123 if (entry) { 124 printk(KERN_WARNING "Mapped at:\n"); 125 print_stack_trace(&entry->stacktrace, 0); 126 } 127#endif 128} 129 130#define err_printk(dev, entry, format, arg...) do { \ 131 error_count += 1; \ 132 if (show_all_errors || show_num_errors > 0) { \ 133 WARN(1, "%s %s: " format, \ 134 dev_driver_string(dev), \ 135 dev_name(dev) , ## arg); \ 136 dump_entry_trace(entry); \ 137 } \ 138 if (!show_all_errors && show_num_errors > 0) \ 139 show_num_errors -= 1; \ 140 } while (0); 141 142/* 143 * Hash related functions 144 * 145 * Every DMA-API request is saved into a struct dma_debug_entry. To 146 * have quick access to these structs they are stored into a hash. 147 */ 148static int hash_fn(struct dma_debug_entry *entry) 149{ 150 /* 151 * Hash function is based on the dma address. 152 * We use bits 20-27 here as the index into the hash 153 */ 154 return (entry->dev_addr >> HASH_FN_SHIFT) & HASH_FN_MASK; 155} 156 157/* 158 * Request exclusive access to a hash bucket for a given dma_debug_entry. 159 */ 160static struct hash_bucket *get_hash_bucket(struct dma_debug_entry *entry, 161 unsigned long *flags) 162{ 163 int idx = hash_fn(entry); 164 unsigned long __flags; 165 166 spin_lock_irqsave(&dma_entry_hash[idx].lock, __flags); 167 *flags = __flags; 168 return &dma_entry_hash[idx]; 169} 170 171/* 172 * Give up exclusive access to the hash bucket 173 */ 174static void put_hash_bucket(struct hash_bucket *bucket, 175 unsigned long *flags) 176{ 177 unsigned long __flags = *flags; 178 179 spin_unlock_irqrestore(&bucket->lock, __flags); 180} 181 182/* 183 * Search a given entry in the hash bucket list 184 */ 185static struct dma_debug_entry *hash_bucket_find(struct hash_bucket *bucket, 186 struct dma_debug_entry *ref) 187{ 188 struct dma_debug_entry *entry; 189 190 list_for_each_entry(entry, &bucket->list, list) { 191 if ((entry->dev_addr == ref->dev_addr) && 192 (entry->dev == ref->dev)) 193 return entry; 194 } 195 196 return NULL; 197} 198 199/* 200 * Add an entry to a hash bucket 201 */ 202static void hash_bucket_add(struct hash_bucket *bucket, 203 struct dma_debug_entry *entry) 204{ 205 list_add_tail(&entry->list, &bucket->list); 206} 207 208/* 209 * Remove entry from a hash bucket list 210 */ 211static void hash_bucket_del(struct dma_debug_entry *entry) 212{ 213 list_del(&entry->list); 214} 215 216/* 217 * Dump mapping entries for debugging purposes 218 */ 219void debug_dma_dump_mappings(struct device *dev) 220{ 221 int idx; 222 223 for (idx = 0; idx < HASH_SIZE; idx++) { 224 struct hash_bucket *bucket = &dma_entry_hash[idx]; 225 struct dma_debug_entry *entry; 226 unsigned long flags; 227 228 spin_lock_irqsave(&bucket->lock, flags); 229 230 list_for_each_entry(entry, &bucket->list, list) { 231 if (!dev || dev == entry->dev) { 232 dev_info(entry->dev, 233 "%s idx %d P=%Lx D=%Lx L=%Lx %s\n", 234 type2name[entry->type], idx, 235 (unsigned long long)entry->paddr, 236 entry->dev_addr, entry->size, 237 dir2name[entry->direction]); 238 } 239 } 240 241 spin_unlock_irqrestore(&bucket->lock, flags); 242 } 243} 244EXPORT_SYMBOL(debug_dma_dump_mappings); 245 246/* 247 * Wrapper function for adding an entry to the hash. 248 * This function takes care of locking itself. 249 */ 250static void add_dma_entry(struct dma_debug_entry *entry) 251{ 252 struct hash_bucket *bucket; 253 unsigned long flags; 254 255 bucket = get_hash_bucket(entry, &flags); 256 hash_bucket_add(bucket, entry); 257 put_hash_bucket(bucket, &flags); 258} 259 260/* struct dma_entry allocator 261 * 262 * The next two functions implement the allocator for 263 * struct dma_debug_entries. 264 */ 265static struct dma_debug_entry *dma_entry_alloc(void) 266{ 267 struct dma_debug_entry *entry = NULL; 268 unsigned long flags; 269 270 spin_lock_irqsave(&free_entries_lock, flags); 271 272 if (list_empty(&free_entries)) { 273 printk(KERN_ERR "DMA-API: debugging out of memory " 274 "- disabling\n"); 275 global_disable = true; 276 goto out; 277 } 278 279 entry = list_entry(free_entries.next, struct dma_debug_entry, list); 280 list_del(&entry->list); 281 memset(entry, 0, sizeof(*entry)); 282 283#ifdef CONFIG_STACKTRACE 284 entry->stacktrace.max_entries = DMA_DEBUG_STACKTRACE_ENTRIES; 285 entry->stacktrace.entries = entry->st_entries; 286 entry->stacktrace.skip = 2; 287 save_stack_trace(&entry->stacktrace); 288#endif 289 num_free_entries -= 1; 290 if (num_free_entries < min_free_entries) 291 min_free_entries = num_free_entries; 292 293out: 294 spin_unlock_irqrestore(&free_entries_lock, flags); 295 296 return entry; 297} 298 299static void dma_entry_free(struct dma_debug_entry *entry) 300{ 301 unsigned long flags; 302 303 /* 304 * add to beginning of the list - this way the entries are 305 * more likely cache hot when they are reallocated. 306 */ 307 spin_lock_irqsave(&free_entries_lock, flags); 308 list_add(&entry->list, &free_entries); 309 num_free_entries += 1; 310 spin_unlock_irqrestore(&free_entries_lock, flags); 311} 312 313/* 314 * DMA-API debugging init code 315 * 316 * The init code does two things: 317 * 1. Initialize core data structures 318 * 2. Preallocate a given number of dma_debug_entry structs 319 */ 320 321static int prealloc_memory(u32 num_entries) 322{ 323 struct dma_debug_entry *entry, *next_entry; 324 int i; 325 326 for (i = 0; i < num_entries; ++i) { 327 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 328 if (!entry) 329 goto out_err; 330 331 list_add_tail(&entry->list, &free_entries); 332 } 333 334 num_free_entries = num_entries; 335 min_free_entries = num_entries; 336 337 printk(KERN_INFO "DMA-API: preallocated %d debug entries\n", 338 num_entries); 339 340 return 0; 341 342out_err: 343 344 list_for_each_entry_safe(entry, next_entry, &free_entries, list) { 345 list_del(&entry->list); 346 kfree(entry); 347 } 348 349 return -ENOMEM; 350} 351 352static int dma_debug_fs_init(void) 353{ 354 dma_debug_dent = debugfs_create_dir("dma-api", NULL); 355 if (!dma_debug_dent) { 356 printk(KERN_ERR "DMA-API: can not create debugfs directory\n"); 357 return -ENOMEM; 358 } 359 360 global_disable_dent = debugfs_create_bool("disabled", 0444, 361 dma_debug_dent, 362 (u32 *)&global_disable); 363 if (!global_disable_dent) 364 goto out_err; 365 366 error_count_dent = debugfs_create_u32("error_count", 0444, 367 dma_debug_dent, &error_count); 368 if (!error_count_dent) 369 goto out_err; 370 371 show_all_errors_dent = debugfs_create_u32("all_errors", 0644, 372 dma_debug_dent, 373 &show_all_errors); 374 if (!show_all_errors_dent) 375 goto out_err; 376 377 show_num_errors_dent = debugfs_create_u32("num_errors", 0644, 378 dma_debug_dent, 379 &show_num_errors); 380 if (!show_num_errors_dent) 381 goto out_err; 382 383 num_free_entries_dent = debugfs_create_u32("num_free_entries", 0444, 384 dma_debug_dent, 385 &num_free_entries); 386 if (!num_free_entries_dent) 387 goto out_err; 388 389 min_free_entries_dent = debugfs_create_u32("min_free_entries", 0444, 390 dma_debug_dent, 391 &min_free_entries); 392 if (!min_free_entries_dent) 393 goto out_err; 394 395 return 0; 396 397out_err: 398 debugfs_remove_recursive(dma_debug_dent); 399 400 return -ENOMEM; 401} 402 403void dma_debug_add_bus(struct bus_type *bus) 404{ 405 /* FIXME: register notifier */ 406} 407 408/* 409 * Let the architectures decide how many entries should be preallocated. 410 */ 411void dma_debug_init(u32 num_entries) 412{ 413 int i; 414 415 if (global_disable) 416 return; 417 418 for (i = 0; i < HASH_SIZE; ++i) { 419 INIT_LIST_HEAD(&dma_entry_hash[i].list); 420 dma_entry_hash[i].lock = SPIN_LOCK_UNLOCKED; 421 } 422 423 if (dma_debug_fs_init() != 0) { 424 printk(KERN_ERR "DMA-API: error creating debugfs entries " 425 "- disabling\n"); 426 global_disable = true; 427 428 return; 429 } 430 431 if (req_entries) 432 num_entries = req_entries; 433 434 if (prealloc_memory(num_entries) != 0) { 435 printk(KERN_ERR "DMA-API: debugging out of memory error " 436 "- disabled\n"); 437 global_disable = true; 438 439 return; 440 } 441 442 printk(KERN_INFO "DMA-API: debugging enabled by kernel config\n"); 443} 444 445static __init int dma_debug_cmdline(char *str) 446{ 447 if (!str) 448 return -EINVAL; 449 450 if (strncmp(str, "off", 3) == 0) { 451 printk(KERN_INFO "DMA-API: debugging disabled on kernel " 452 "command line\n"); 453 global_disable = true; 454 } 455 456 return 0; 457} 458 459static __init int dma_debug_entries_cmdline(char *str) 460{ 461 int res; 462 463 if (!str) 464 return -EINVAL; 465 466 res = get_option(&str, &req_entries); 467 468 if (!res) 469 req_entries = 0; 470 471 return 0; 472} 473 474__setup("dma_debug=", dma_debug_cmdline); 475__setup("dma_debug_entries=", dma_debug_entries_cmdline); 476 477static void check_unmap(struct dma_debug_entry *ref) 478{ 479 struct dma_debug_entry *entry; 480 struct hash_bucket *bucket; 481 unsigned long flags; 482 483 if (dma_mapping_error(ref->dev, ref->dev_addr)) { 484 err_printk(ref->dev, NULL, "DMA-API: device driver tries " 485 "to free an invalid DMA memory address\n"); 486 return; 487 } 488 489 bucket = get_hash_bucket(ref, &flags); 490 entry = hash_bucket_find(bucket, ref); 491 492 if (!entry) { 493 err_printk(ref->dev, NULL, "DMA-API: device driver tries " 494 "to free DMA memory it has not allocated " 495 "[device address=0x%016llx] [size=%llu bytes]\n", 496 ref->dev_addr, ref->size); 497 goto out; 498 } 499 500 if (ref->size != entry->size) { 501 err_printk(ref->dev, entry, "DMA-API: device driver frees " 502 "DMA memory with different size " 503 "[device address=0x%016llx] [map size=%llu bytes] " 504 "[unmap size=%llu bytes]\n", 505 ref->dev_addr, entry->size, ref->size); 506 } 507 508 if (ref->type != entry->type) { 509 err_printk(ref->dev, entry, "DMA-API: device driver frees " 510 "DMA memory with wrong function " 511 "[device address=0x%016llx] [size=%llu bytes] " 512 "[mapped as %s] [unmapped as %s]\n", 513 ref->dev_addr, ref->size, 514 type2name[entry->type], type2name[ref->type]); 515 } else if ((entry->type == dma_debug_coherent) && 516 (ref->paddr != entry->paddr)) { 517 err_printk(ref->dev, entry, "DMA-API: device driver frees " 518 "DMA memory with different CPU address " 519 "[device address=0x%016llx] [size=%llu bytes] " 520 "[cpu alloc address=%p] [cpu free address=%p]", 521 ref->dev_addr, ref->size, 522 (void *)entry->paddr, (void *)ref->paddr); 523 } 524 525 if (ref->sg_call_ents && ref->type == dma_debug_sg && 526 ref->sg_call_ents != entry->sg_call_ents) { 527 err_printk(ref->dev, entry, "DMA-API: device driver frees " 528 "DMA sg list with different entry count " 529 "[map count=%d] [unmap count=%d]\n", 530 entry->sg_call_ents, ref->sg_call_ents); 531 } 532 533 /* 534 * This may be no bug in reality - but most implementations of the 535 * DMA API don't handle this properly, so check for it here 536 */ 537 if (ref->direction != entry->direction) { 538 err_printk(ref->dev, entry, "DMA-API: device driver frees " 539 "DMA memory with different direction " 540 "[device address=0x%016llx] [size=%llu bytes] " 541 "[mapped with %s] [unmapped with %s]\n", 542 ref->dev_addr, ref->size, 543 dir2name[entry->direction], 544 dir2name[ref->direction]); 545 } 546 547 hash_bucket_del(entry); 548 dma_entry_free(entry); 549 550out: 551 put_hash_bucket(bucket, &flags); 552} 553 554static void check_for_stack(struct device *dev, void *addr) 555{ 556 if (object_is_on_stack(addr)) 557 err_printk(dev, NULL, "DMA-API: device driver maps memory from" 558 "stack [addr=%p]\n", addr); 559} 560 561static inline bool overlap(void *addr, u64 size, void *start, void *end) 562{ 563 void *addr2 = (char *)addr + size; 564 565 return ((addr >= start && addr < end) || 566 (addr2 >= start && addr2 < end) || 567 ((addr < start) && (addr2 >= end))); 568} 569 570static void check_for_illegal_area(struct device *dev, void *addr, u64 size) 571{ 572 if (overlap(addr, size, _text, _etext) || 573 overlap(addr, size, __start_rodata, __end_rodata)) 574 err_printk(dev, NULL, "DMA-API: device driver maps " 575 "memory from kernel text or rodata " 576 "[addr=%p] [size=%llu]\n", addr, size); 577} 578 579static void check_sync(struct device *dev, dma_addr_t addr, 580 u64 size, u64 offset, int direction, bool to_cpu) 581{ 582 struct dma_debug_entry ref = { 583 .dev = dev, 584 .dev_addr = addr, 585 .size = size, 586 .direction = direction, 587 }; 588 struct dma_debug_entry *entry; 589 struct hash_bucket *bucket; 590 unsigned long flags; 591 592 bucket = get_hash_bucket(&ref, &flags); 593 594 entry = hash_bucket_find(bucket, &ref); 595 596 if (!entry) { 597 err_printk(dev, NULL, "DMA-API: device driver tries " 598 "to sync DMA memory it has not allocated " 599 "[device address=0x%016llx] [size=%llu bytes]\n", 600 (unsigned long long)addr, size); 601 goto out; 602 } 603 604 if ((offset + size) > entry->size) { 605 err_printk(dev, entry, "DMA-API: device driver syncs" 606 " DMA memory outside allocated range " 607 "[device address=0x%016llx] " 608 "[allocation size=%llu bytes] [sync offset=%llu] " 609 "[sync size=%llu]\n", entry->dev_addr, entry->size, 610 offset, size); 611 } 612 613 if (direction != entry->direction) { 614 err_printk(dev, entry, "DMA-API: device driver syncs " 615 "DMA memory with different direction " 616 "[device address=0x%016llx] [size=%llu bytes] " 617 "[mapped with %s] [synced with %s]\n", 618 (unsigned long long)addr, entry->size, 619 dir2name[entry->direction], 620 dir2name[direction]); 621 } 622 623 if (entry->direction == DMA_BIDIRECTIONAL) 624 goto out; 625 626 if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) && 627 !(direction == DMA_TO_DEVICE)) 628 err_printk(dev, entry, "DMA-API: device driver syncs " 629 "device read-only DMA memory for cpu " 630 "[device address=0x%016llx] [size=%llu bytes] " 631 "[mapped with %s] [synced with %s]\n", 632 (unsigned long long)addr, entry->size, 633 dir2name[entry->direction], 634 dir2name[direction]); 635 636 if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) && 637 !(direction == DMA_FROM_DEVICE)) 638 err_printk(dev, entry, "DMA-API: device driver syncs " 639 "device write-only DMA memory to device " 640 "[device address=0x%016llx] [size=%llu bytes] " 641 "[mapped with %s] [synced with %s]\n", 642 (unsigned long long)addr, entry->size, 643 dir2name[entry->direction], 644 dir2name[direction]); 645 646out: 647 put_hash_bucket(bucket, &flags); 648 649} 650 651void debug_dma_map_page(struct device *dev, struct page *page, size_t offset, 652 size_t size, int direction, dma_addr_t dma_addr, 653 bool map_single) 654{ 655 struct dma_debug_entry *entry; 656 657 if (unlikely(global_disable)) 658 return; 659 660 if (unlikely(dma_mapping_error(dev, dma_addr))) 661 return; 662 663 entry = dma_entry_alloc(); 664 if (!entry) 665 return; 666 667 entry->dev = dev; 668 entry->type = dma_debug_page; 669 entry->paddr = page_to_phys(page) + offset; 670 entry->dev_addr = dma_addr; 671 entry->size = size; 672 entry->direction = direction; 673 674 if (map_single) 675 entry->type = dma_debug_single; 676 677 if (!PageHighMem(page)) { 678 void *addr = ((char *)page_address(page)) + offset; 679 check_for_stack(dev, addr); 680 check_for_illegal_area(dev, addr, size); 681 } 682 683 add_dma_entry(entry); 684} 685EXPORT_SYMBOL(debug_dma_map_page); 686 687void debug_dma_unmap_page(struct device *dev, dma_addr_t addr, 688 size_t size, int direction, bool map_single) 689{ 690 struct dma_debug_entry ref = { 691 .type = dma_debug_page, 692 .dev = dev, 693 .dev_addr = addr, 694 .size = size, 695 .direction = direction, 696 }; 697 698 if (unlikely(global_disable)) 699 return; 700 701 if (map_single) 702 ref.type = dma_debug_single; 703 704 check_unmap(&ref); 705} 706EXPORT_SYMBOL(debug_dma_unmap_page); 707 708void debug_dma_map_sg(struct device *dev, struct scatterlist *sg, 709 int nents, int mapped_ents, int direction) 710{ 711 struct dma_debug_entry *entry; 712 struct scatterlist *s; 713 int i; 714 715 if (unlikely(global_disable)) 716 return; 717 718 for_each_sg(sg, s, mapped_ents, i) { 719 entry = dma_entry_alloc(); 720 if (!entry) 721 return; 722 723 entry->type = dma_debug_sg; 724 entry->dev = dev; 725 entry->paddr = sg_phys(s); 726 entry->size = s->length; 727 entry->dev_addr = s->dma_address; 728 entry->direction = direction; 729 entry->sg_call_ents = nents; 730 entry->sg_mapped_ents = mapped_ents; 731 732 if (!PageHighMem(sg_page(s))) { 733 check_for_stack(dev, sg_virt(s)); 734 check_for_illegal_area(dev, sg_virt(s), s->length); 735 } 736 737 add_dma_entry(entry); 738 } 739} 740EXPORT_SYMBOL(debug_dma_map_sg); 741 742void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, 743 int nelems, int dir) 744{ 745 struct dma_debug_entry *entry; 746 struct scatterlist *s; 747 int mapped_ents = 0, i; 748 unsigned long flags; 749 750 if (unlikely(global_disable)) 751 return; 752 753 for_each_sg(sglist, s, nelems, i) { 754 755 struct dma_debug_entry ref = { 756 .type = dma_debug_sg, 757 .dev = dev, 758 .paddr = sg_phys(s), 759 .dev_addr = s->dma_address, 760 .size = s->length, 761 .direction = dir, 762 .sg_call_ents = 0, 763 }; 764 765 if (mapped_ents && i >= mapped_ents) 766 break; 767 768 if (mapped_ents == 0) { 769 struct hash_bucket *bucket; 770 ref.sg_call_ents = nelems; 771 bucket = get_hash_bucket(&ref, &flags); 772 entry = hash_bucket_find(bucket, &ref); 773 if (entry) 774 mapped_ents = entry->sg_mapped_ents; 775 put_hash_bucket(bucket, &flags); 776 } 777 778 check_unmap(&ref); 779 } 780} 781EXPORT_SYMBOL(debug_dma_unmap_sg); 782 783void debug_dma_alloc_coherent(struct device *dev, size_t size, 784 dma_addr_t dma_addr, void *virt) 785{ 786 struct dma_debug_entry *entry; 787 788 if (unlikely(global_disable)) 789 return; 790 791 if (unlikely(virt == NULL)) 792 return; 793 794 entry = dma_entry_alloc(); 795 if (!entry) 796 return; 797 798 entry->type = dma_debug_coherent; 799 entry->dev = dev; 800 entry->paddr = virt_to_phys(virt); 801 entry->size = size; 802 entry->dev_addr = dma_addr; 803 entry->direction = DMA_BIDIRECTIONAL; 804 805 add_dma_entry(entry); 806} 807EXPORT_SYMBOL(debug_dma_alloc_coherent); 808 809void debug_dma_free_coherent(struct device *dev, size_t size, 810 void *virt, dma_addr_t addr) 811{ 812 struct dma_debug_entry ref = { 813 .type = dma_debug_coherent, 814 .dev = dev, 815 .paddr = virt_to_phys(virt), 816 .dev_addr = addr, 817 .size = size, 818 .direction = DMA_BIDIRECTIONAL, 819 }; 820 821 if (unlikely(global_disable)) 822 return; 823 824 check_unmap(&ref); 825} 826EXPORT_SYMBOL(debug_dma_free_coherent); 827 828void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, 829 size_t size, int direction) 830{ 831 if (unlikely(global_disable)) 832 return; 833 834 check_sync(dev, dma_handle, size, 0, direction, true); 835} 836EXPORT_SYMBOL(debug_dma_sync_single_for_cpu); 837 838void debug_dma_sync_single_for_device(struct device *dev, 839 dma_addr_t dma_handle, size_t size, 840 int direction) 841{ 842 if (unlikely(global_disable)) 843 return; 844 845 check_sync(dev, dma_handle, size, 0, direction, false); 846} 847EXPORT_SYMBOL(debug_dma_sync_single_for_device); 848 849void debug_dma_sync_single_range_for_cpu(struct device *dev, 850 dma_addr_t dma_handle, 851 unsigned long offset, size_t size, 852 int direction) 853{ 854 if (unlikely(global_disable)) 855 return; 856 857 check_sync(dev, dma_handle, size, offset, direction, true); 858} 859EXPORT_SYMBOL(debug_dma_sync_single_range_for_cpu); 860 861void debug_dma_sync_single_range_for_device(struct device *dev, 862 dma_addr_t dma_handle, 863 unsigned long offset, 864 size_t size, int direction) 865{ 866 if (unlikely(global_disable)) 867 return; 868 869 check_sync(dev, dma_handle, size, offset, direction, false); 870} 871EXPORT_SYMBOL(debug_dma_sync_single_range_for_device); 872 873void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, 874 int nelems, int direction) 875{ 876 struct scatterlist *s; 877 int i; 878 879 if (unlikely(global_disable)) 880 return; 881 882 for_each_sg(sg, s, nelems, i) { 883 check_sync(dev, s->dma_address, s->dma_length, 0, 884 direction, true); 885 } 886} 887EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu); 888 889void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, 890 int nelems, int direction) 891{ 892 struct scatterlist *s; 893 int i; 894 895 if (unlikely(global_disable)) 896 return; 897 898 for_each_sg(sg, s, nelems, i) { 899 check_sync(dev, s->dma_address, s->dma_length, 0, 900 direction, false); 901 } 902} 903EXPORT_SYMBOL(debug_dma_sync_sg_for_device); 904