x86, pat: Preparatory changes in pat.c for bigger rbtree change

Minor changes in pat.c to cleanup code and make it smoother to introduce
bigger rbtree only change in the following patch. The changes are cleaup
only and should not have any functional impact.

Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
LKML-Reference: <20100210195909.792781000@intel.com>
Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>

authored by venkatesh.pallipadi@intel.com and committed by H. Peter Anvin be5a0c12 17d9ddc7

+117 -83
+89 -83
arch/x86/mm/pat.c
··· 30 30 #include <asm/pat.h> 31 31 #include <asm/io.h> 32 32 33 + #include "pat_internal.h" 34 + 33 35 #ifdef CONFIG_X86_PAT 34 36 int __read_mostly pat_enabled = 1; 35 37 ··· 55 53 #endif 56 54 57 55 58 - static int debug_enable; 56 + int pat_debug_enable; 59 57 60 58 static int __init pat_debug_setup(char *str) 61 59 { 62 - debug_enable = 1; 60 + pat_debug_enable = 1; 63 61 return 0; 64 62 } 65 63 __setup("debugpat", pat_debug_setup); 66 - 67 - #define dprintk(fmt, arg...) \ 68 - do { if (debug_enable) printk(KERN_INFO fmt, ##arg); } while (0) 69 - 70 64 71 65 static u64 __read_mostly boot_pat_state; 72 66 ··· 130 132 131 133 #undef PAT 132 134 133 - static char *cattr_name(unsigned long flags) 134 - { 135 - switch (flags & _PAGE_CACHE_MASK) { 136 - case _PAGE_CACHE_UC: return "uncached"; 137 - case _PAGE_CACHE_UC_MINUS: return "uncached-minus"; 138 - case _PAGE_CACHE_WB: return "write-back"; 139 - case _PAGE_CACHE_WC: return "write-combining"; 140 - default: return "broken"; 141 - } 142 - } 143 - 144 135 /* 145 136 * The global memtype list keeps track of memory type for specific 146 137 * physical memory areas. Conflicting memory types in different ··· 145 158 * 146 159 * memtype_lock protects both the linear list and rbtree. 147 160 */ 148 - 149 - struct memtype { 150 - u64 start; 151 - u64 end; 152 - unsigned long type; 153 - struct list_head nd; 154 - struct rb_node rb; 155 - }; 156 161 157 162 static struct rb_root memtype_rbroot = RB_ROOT; 158 163 static LIST_HEAD(memtype_list); ··· 328 349 return 0; 329 350 } 330 351 352 + static int memtype_check_insert(struct memtype *new, unsigned long *new_type) 353 + { 354 + struct memtype *entry; 355 + u64 start, end; 356 + unsigned long actual_type; 357 + struct list_head *where; 358 + int err = 0; 359 + 360 + start = new->start; 361 + end = new->end; 362 + actual_type = new->type; 363 + 364 + /* Search for existing mapping that overlaps the current range */ 365 + where = NULL; 366 + list_for_each_entry(entry, &memtype_list, nd) { 367 + if (end <= entry->start) { 368 + where = entry->nd.prev; 369 + break; 370 + } else if (start <= entry->start) { /* end > entry->start */ 371 + err = chk_conflict(new, entry, new_type); 372 + if (!err) { 373 + dprintk("Overlap at 0x%Lx-0x%Lx\n", 374 + entry->start, entry->end); 375 + where = entry->nd.prev; 376 + } 377 + break; 378 + } else if (start < entry->end) { /* start > entry->start */ 379 + err = chk_conflict(new, entry, new_type); 380 + if (!err) { 381 + dprintk("Overlap at 0x%Lx-0x%Lx\n", 382 + entry->start, entry->end); 383 + 384 + /* 385 + * Move to right position in the linked 386 + * list to add this new entry 387 + */ 388 + list_for_each_entry_continue(entry, 389 + &memtype_list, nd) { 390 + if (start <= entry->start) { 391 + where = entry->nd.prev; 392 + break; 393 + } 394 + } 395 + } 396 + break; 397 + } 398 + } 399 + if (!err) { 400 + if (where) 401 + list_add(&new->nd, where); 402 + else 403 + list_add_tail(&new->nd, &memtype_list); 404 + 405 + memtype_rb_insert(&memtype_rbroot, new); 406 + } 407 + return err; 408 + } 409 + 331 410 /* 332 411 * req_type typically has one of the: 333 412 * - _PAGE_CACHE_WB ··· 401 364 int reserve_memtype(u64 start, u64 end, unsigned long req_type, 402 365 unsigned long *new_type) 403 366 { 404 - struct memtype *new, *entry; 367 + struct memtype *new; 405 368 unsigned long actual_type; 406 - struct list_head *where; 407 369 int is_range_ram; 408 370 int err = 0; 409 371 ··· 459 423 460 424 spin_lock(&memtype_lock); 461 425 462 - /* Search for existing mapping that overlaps the current range */ 463 - where = NULL; 464 - list_for_each_entry(entry, &memtype_list, nd) { 465 - if (end <= entry->start) { 466 - where = entry->nd.prev; 467 - break; 468 - } else if (start <= entry->start) { /* end > entry->start */ 469 - err = chk_conflict(new, entry, new_type); 470 - if (!err) { 471 - dprintk("Overlap at 0x%Lx-0x%Lx\n", 472 - entry->start, entry->end); 473 - where = entry->nd.prev; 474 - } 475 - break; 476 - } else if (start < entry->end) { /* start > entry->start */ 477 - err = chk_conflict(new, entry, new_type); 478 - if (!err) { 479 - dprintk("Overlap at 0x%Lx-0x%Lx\n", 480 - entry->start, entry->end); 481 - 482 - /* 483 - * Move to right position in the linked 484 - * list to add this new entry 485 - */ 486 - list_for_each_entry_continue(entry, 487 - &memtype_list, nd) { 488 - if (start <= entry->start) { 489 - where = entry->nd.prev; 490 - break; 491 - } 492 - } 493 - } 494 - break; 495 - } 496 - } 497 - 426 + err = memtype_check_insert(new, new_type); 498 427 if (err) { 499 428 printk(KERN_INFO "reserve_memtype failed 0x%Lx-0x%Lx, " 500 429 "track %s, req %s\n", ··· 469 468 470 469 return err; 471 470 } 472 - 473 - if (where) 474 - list_add(&new->nd, where); 475 - else 476 - list_add_tail(&new->nd, &memtype_list); 477 - 478 - memtype_rb_insert(&memtype_rbroot, new); 479 471 480 472 spin_unlock(&memtype_lock); 481 473 ··· 931 937 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_X86_PAT) 932 938 933 939 /* get Nth element of the linked list */ 934 - static struct memtype *memtype_get_idx(loff_t pos) 940 + static int copy_memtype_nth_element(struct memtype *out, loff_t pos) 935 941 { 936 - struct memtype *list_node, *print_entry; 942 + struct memtype *list_node; 937 943 int i = 1; 938 944 939 - print_entry = kmalloc(sizeof(struct memtype), GFP_KERNEL); 945 + list_for_each_entry(list_node, &memtype_list, nd) { 946 + if (pos == i) { 947 + *out = *list_node; 948 + return 0; 949 + } 950 + ++i; 951 + } 952 + return 1; 953 + } 954 + 955 + static struct memtype *memtype_get_idx(loff_t pos) 956 + { 957 + struct memtype *print_entry; 958 + int ret; 959 + 960 + print_entry = kzalloc(sizeof(struct memtype), GFP_KERNEL); 940 961 if (!print_entry) 941 962 return NULL; 942 963 943 964 spin_lock(&memtype_lock); 944 - list_for_each_entry(list_node, &memtype_list, nd) { 945 - if (pos == i) { 946 - *print_entry = *list_node; 947 - spin_unlock(&memtype_lock); 948 - return print_entry; 949 - } 950 - ++i; 951 - } 965 + ret = copy_memtype_nth_element(print_entry, pos); 952 966 spin_unlock(&memtype_lock); 953 - kfree(print_entry); 954 967 955 - return NULL; 968 + if (!ret) { 969 + return print_entry; 970 + } else { 971 + kfree(print_entry); 972 + return NULL; 973 + } 956 974 } 957 975 958 976 static void *memtype_seq_start(struct seq_file *seq, loff_t *pos)
+28
arch/x86/mm/pat_internal.h
··· 1 + #ifndef __PAT_INTERNAL_H_ 2 + #define __PAT_INTERNAL_H_ 3 + 4 + extern int pat_debug_enable; 5 + 6 + #define dprintk(fmt, arg...) \ 7 + do { if (pat_debug_enable) printk(KERN_INFO fmt, ##arg); } while (0) 8 + 9 + struct memtype { 10 + u64 start; 11 + u64 end; 12 + unsigned long type; 13 + struct list_head nd; 14 + struct rb_node rb; 15 + }; 16 + 17 + static inline char *cattr_name(unsigned long flags) 18 + { 19 + switch (flags & _PAGE_CACHE_MASK) { 20 + case _PAGE_CACHE_UC: return "uncached"; 21 + case _PAGE_CACHE_UC_MINUS: return "uncached-minus"; 22 + case _PAGE_CACHE_WB: return "write-back"; 23 + case _PAGE_CACHE_WC: return "write-combining"; 24 + default: return "broken"; 25 + } 26 + } 27 + 28 + #endif /* __PAT_INTERNAL_H_ */