x86, pat: Preparatory changes in pat.c for bigger rbtree change

Minor changes in pat.c to cleanup code and make it smoother to introduce
bigger rbtree only change in the following patch. The changes are cleaup
only and should not have any functional impact.

Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
LKML-Reference: <20100210195909.792781000@intel.com>
Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>

authored by venkatesh.pallipadi@intel.com and committed by H. Peter Anvin be5a0c12 17d9ddc7

+117 -83
+89 -83
arch/x86/mm/pat.c
··· 30 #include <asm/pat.h> 31 #include <asm/io.h> 32 33 #ifdef CONFIG_X86_PAT 34 int __read_mostly pat_enabled = 1; 35 ··· 55 #endif 56 57 58 - static int debug_enable; 59 60 static int __init pat_debug_setup(char *str) 61 { 62 - debug_enable = 1; 63 return 0; 64 } 65 __setup("debugpat", pat_debug_setup); 66 - 67 - #define dprintk(fmt, arg...) \ 68 - do { if (debug_enable) printk(KERN_INFO fmt, ##arg); } while (0) 69 - 70 71 static u64 __read_mostly boot_pat_state; 72 ··· 130 131 #undef PAT 132 133 - static char *cattr_name(unsigned long flags) 134 - { 135 - switch (flags & _PAGE_CACHE_MASK) { 136 - case _PAGE_CACHE_UC: return "uncached"; 137 - case _PAGE_CACHE_UC_MINUS: return "uncached-minus"; 138 - case _PAGE_CACHE_WB: return "write-back"; 139 - case _PAGE_CACHE_WC: return "write-combining"; 140 - default: return "broken"; 141 - } 142 - } 143 - 144 /* 145 * The global memtype list keeps track of memory type for specific 146 * physical memory areas. Conflicting memory types in different ··· 145 * 146 * memtype_lock protects both the linear list and rbtree. 147 */ 148 - 149 - struct memtype { 150 - u64 start; 151 - u64 end; 152 - unsigned long type; 153 - struct list_head nd; 154 - struct rb_node rb; 155 - }; 156 157 static struct rb_root memtype_rbroot = RB_ROOT; 158 static LIST_HEAD(memtype_list); ··· 328 return 0; 329 } 330 331 /* 332 * req_type typically has one of the: 333 * - _PAGE_CACHE_WB ··· 401 int reserve_memtype(u64 start, u64 end, unsigned long req_type, 402 unsigned long *new_type) 403 { 404 - struct memtype *new, *entry; 405 unsigned long actual_type; 406 - struct list_head *where; 407 int is_range_ram; 408 int err = 0; 409 ··· 459 460 spin_lock(&memtype_lock); 461 462 - /* Search for existing mapping that overlaps the current range */ 463 - where = NULL; 464 - list_for_each_entry(entry, &memtype_list, nd) { 465 - if (end <= entry->start) { 466 - where = entry->nd.prev; 467 - break; 468 - } else if (start <= entry->start) { /* end > entry->start */ 469 - err = chk_conflict(new, entry, new_type); 470 - if (!err) { 471 - dprintk("Overlap at 0x%Lx-0x%Lx\n", 472 - entry->start, entry->end); 473 - where = entry->nd.prev; 474 - } 475 - break; 476 - } else if (start < entry->end) { /* start > entry->start */ 477 - err = chk_conflict(new, entry, new_type); 478 - if (!err) { 479 - dprintk("Overlap at 0x%Lx-0x%Lx\n", 480 - entry->start, entry->end); 481 - 482 - /* 483 - * Move to right position in the linked 484 - * list to add this new entry 485 - */ 486 - list_for_each_entry_continue(entry, 487 - &memtype_list, nd) { 488 - if (start <= entry->start) { 489 - where = entry->nd.prev; 490 - break; 491 - } 492 - } 493 - } 494 - break; 495 - } 496 - } 497 - 498 if (err) { 499 printk(KERN_INFO "reserve_memtype failed 0x%Lx-0x%Lx, " 500 "track %s, req %s\n", ··· 469 470 return err; 471 } 472 - 473 - if (where) 474 - list_add(&new->nd, where); 475 - else 476 - list_add_tail(&new->nd, &memtype_list); 477 - 478 - memtype_rb_insert(&memtype_rbroot, new); 479 480 spin_unlock(&memtype_lock); 481 ··· 931 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_X86_PAT) 932 933 /* get Nth element of the linked list */ 934 - static struct memtype *memtype_get_idx(loff_t pos) 935 { 936 - struct memtype *list_node, *print_entry; 937 int i = 1; 938 939 - print_entry = kmalloc(sizeof(struct memtype), GFP_KERNEL); 940 if (!print_entry) 941 return NULL; 942 943 spin_lock(&memtype_lock); 944 - list_for_each_entry(list_node, &memtype_list, nd) { 945 - if (pos == i) { 946 - *print_entry = *list_node; 947 - spin_unlock(&memtype_lock); 948 - return print_entry; 949 - } 950 - ++i; 951 - } 952 spin_unlock(&memtype_lock); 953 - kfree(print_entry); 954 955 - return NULL; 956 } 957 958 static void *memtype_seq_start(struct seq_file *seq, loff_t *pos)
··· 30 #include <asm/pat.h> 31 #include <asm/io.h> 32 33 + #include "pat_internal.h" 34 + 35 #ifdef CONFIG_X86_PAT 36 int __read_mostly pat_enabled = 1; 37 ··· 53 #endif 54 55 56 + int pat_debug_enable; 57 58 static int __init pat_debug_setup(char *str) 59 { 60 + pat_debug_enable = 1; 61 return 0; 62 } 63 __setup("debugpat", pat_debug_setup); 64 65 static u64 __read_mostly boot_pat_state; 66 ··· 132 133 #undef PAT 134 135 /* 136 * The global memtype list keeps track of memory type for specific 137 * physical memory areas. Conflicting memory types in different ··· 158 * 159 * memtype_lock protects both the linear list and rbtree. 160 */ 161 162 static struct rb_root memtype_rbroot = RB_ROOT; 163 static LIST_HEAD(memtype_list); ··· 349 return 0; 350 } 351 352 + static int memtype_check_insert(struct memtype *new, unsigned long *new_type) 353 + { 354 + struct memtype *entry; 355 + u64 start, end; 356 + unsigned long actual_type; 357 + struct list_head *where; 358 + int err = 0; 359 + 360 + start = new->start; 361 + end = new->end; 362 + actual_type = new->type; 363 + 364 + /* Search for existing mapping that overlaps the current range */ 365 + where = NULL; 366 + list_for_each_entry(entry, &memtype_list, nd) { 367 + if (end <= entry->start) { 368 + where = entry->nd.prev; 369 + break; 370 + } else if (start <= entry->start) { /* end > entry->start */ 371 + err = chk_conflict(new, entry, new_type); 372 + if (!err) { 373 + dprintk("Overlap at 0x%Lx-0x%Lx\n", 374 + entry->start, entry->end); 375 + where = entry->nd.prev; 376 + } 377 + break; 378 + } else if (start < entry->end) { /* start > entry->start */ 379 + err = chk_conflict(new, entry, new_type); 380 + if (!err) { 381 + dprintk("Overlap at 0x%Lx-0x%Lx\n", 382 + entry->start, entry->end); 383 + 384 + /* 385 + * Move to right position in the linked 386 + * list to add this new entry 387 + */ 388 + list_for_each_entry_continue(entry, 389 + &memtype_list, nd) { 390 + if (start <= entry->start) { 391 + where = entry->nd.prev; 392 + break; 393 + } 394 + } 395 + } 396 + break; 397 + } 398 + } 399 + if (!err) { 400 + if (where) 401 + list_add(&new->nd, where); 402 + else 403 + list_add_tail(&new->nd, &memtype_list); 404 + 405 + memtype_rb_insert(&memtype_rbroot, new); 406 + } 407 + return err; 408 + } 409 + 410 /* 411 * req_type typically has one of the: 412 * - _PAGE_CACHE_WB ··· 364 int reserve_memtype(u64 start, u64 end, unsigned long req_type, 365 unsigned long *new_type) 366 { 367 + struct memtype *new; 368 unsigned long actual_type; 369 int is_range_ram; 370 int err = 0; 371 ··· 423 424 spin_lock(&memtype_lock); 425 426 + err = memtype_check_insert(new, new_type); 427 if (err) { 428 printk(KERN_INFO "reserve_memtype failed 0x%Lx-0x%Lx, " 429 "track %s, req %s\n", ··· 468 469 return err; 470 } 471 472 spin_unlock(&memtype_lock); 473 ··· 937 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_X86_PAT) 938 939 /* get Nth element of the linked list */ 940 + static int copy_memtype_nth_element(struct memtype *out, loff_t pos) 941 { 942 + struct memtype *list_node; 943 int i = 1; 944 945 + list_for_each_entry(list_node, &memtype_list, nd) { 946 + if (pos == i) { 947 + *out = *list_node; 948 + return 0; 949 + } 950 + ++i; 951 + } 952 + return 1; 953 + } 954 + 955 + static struct memtype *memtype_get_idx(loff_t pos) 956 + { 957 + struct memtype *print_entry; 958 + int ret; 959 + 960 + print_entry = kzalloc(sizeof(struct memtype), GFP_KERNEL); 961 if (!print_entry) 962 return NULL; 963 964 spin_lock(&memtype_lock); 965 + ret = copy_memtype_nth_element(print_entry, pos); 966 spin_unlock(&memtype_lock); 967 968 + if (!ret) { 969 + return print_entry; 970 + } else { 971 + kfree(print_entry); 972 + return NULL; 973 + } 974 } 975 976 static void *memtype_seq_start(struct seq_file *seq, loff_t *pos)
+28
arch/x86/mm/pat_internal.h
···
··· 1 + #ifndef __PAT_INTERNAL_H_ 2 + #define __PAT_INTERNAL_H_ 3 + 4 + extern int pat_debug_enable; 5 + 6 + #define dprintk(fmt, arg...) \ 7 + do { if (pat_debug_enable) printk(KERN_INFO fmt, ##arg); } while (0) 8 + 9 + struct memtype { 10 + u64 start; 11 + u64 end; 12 + unsigned long type; 13 + struct list_head nd; 14 + struct rb_node rb; 15 + }; 16 + 17 + static inline char *cattr_name(unsigned long flags) 18 + { 19 + switch (flags & _PAGE_CACHE_MASK) { 20 + case _PAGE_CACHE_UC: return "uncached"; 21 + case _PAGE_CACHE_UC_MINUS: return "uncached-minus"; 22 + case _PAGE_CACHE_WB: return "write-back"; 23 + case _PAGE_CACHE_WC: return "write-combining"; 24 + default: return "broken"; 25 + } 26 + } 27 + 28 + #endif /* __PAT_INTERNAL_H_ */