[S390] Remove open-coded mem_map usage.

Use page_to_phys and pfn_to_page to avoid open-coded mem_map usage.

Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>

authored by Heiko Carstens and committed by Martin Schwidefsky 0b2b6e1d 7676bef9

+17 -19
+6 -4
arch/s390/mm/init.c
··· 62 62 { 63 63 int i, total = 0, reserved = 0; 64 64 int shared = 0, cached = 0; 65 + struct page *page; 65 66 66 67 printk("Mem-info:\n"); 67 68 show_free_areas(); 68 69 printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); 69 70 i = max_mapnr; 70 71 while (i-- > 0) { 72 + page = pfn_to_page(i); 71 73 total++; 72 - if (PageReserved(mem_map+i)) 74 + if (PageReserved(page)) 73 75 reserved++; 74 - else if (PageSwapCache(mem_map+i)) 76 + else if (PageSwapCache(page)) 75 77 cached++; 76 - else if (page_count(mem_map+i)) 77 - shared += page_count(mem_map+i) - 1; 78 + else if (page_count(page)) 79 + shared += page_count(page) - 1; 78 80 } 79 81 printk("%d pages of RAM\n",total); 80 82 printk("%d reserved pages\n",reserved);
-5
include/asm-s390/io.h
··· 45 45 return __io_virt(address); 46 46 } 47 47 48 - /* 49 - * Change "struct page" to physical address. 50 - */ 51 - #define page_to_phys(page) ((page - mem_map) << PAGE_SHIFT) 52 - 53 48 extern void * __ioremap(unsigned long offset, unsigned long size, unsigned long flags); 54 49 55 50 static inline void * ioremap (unsigned long offset, unsigned long size)
+1
include/asm-s390/page.h
··· 137 137 #define __pa(x) (unsigned long)(x) 138 138 #define __va(x) (void *)(unsigned long)(x) 139 139 #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) 140 + #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) 140 141 141 142 #define pfn_valid(pfn) ((pfn) < max_mapnr) 142 143 #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
+1 -1
include/asm-s390/pgalloc.h
··· 116 116 static inline void 117 117 pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *page) 118 118 { 119 - pmd_populate_kernel(mm, pmd, (pte_t *)((page-mem_map) << PAGE_SHIFT)); 119 + pmd_populate_kernel(mm, pmd, (pte_t *)page_to_phys(page)); 120 120 } 121 121 122 122 /*
+9 -9
include/asm-s390/pgtable.h
··· 599 599 */ 600 600 static inline int page_test_and_clear_dirty(struct page *page) 601 601 { 602 - unsigned long physpage = __pa((page - mem_map) << PAGE_SHIFT); 602 + unsigned long physpage = page_to_phys(page); 603 603 int skey = page_get_storage_key(physpage); 604 604 605 605 if (skey & _PAGE_CHANGED) ··· 612 612 */ 613 613 static inline int page_test_and_clear_young(struct page *page) 614 614 { 615 - unsigned long physpage = __pa((page - mem_map) << PAGE_SHIFT); 615 + unsigned long physpage = page_to_phys(page); 616 616 int ccode; 617 617 618 - asm volatile ( 619 - "rrbe 0,%1\n" 620 - "ipm %0\n" 621 - "srl %0,28\n" 618 + asm volatile( 619 + " rrbe 0,%1\n" 620 + " ipm %0\n" 621 + " srl %0,28\n" 622 622 : "=d" (ccode) : "a" (physpage) : "cc" ); 623 623 return ccode & 2; 624 624 } ··· 636 636 637 637 static inline pte_t mk_pte(struct page *page, pgprot_t pgprot) 638 638 { 639 - unsigned long physpage = __pa((page - mem_map) << PAGE_SHIFT); 639 + unsigned long physpage = page_to_phys(page); 640 640 641 641 return mk_pte_phys(physpage, pgprot); 642 642 } ··· 664 664 665 665 #define pmd_page_vaddr(pmd) (pmd_val(pmd) & PAGE_MASK) 666 666 667 - #define pmd_page(pmd) (mem_map+(pmd_val(pmd) >> PAGE_SHIFT)) 667 + #define pmd_page(pmd) pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT) 668 668 669 669 #define pgd_page_vaddr(pgd) (pgd_val(pgd) & PAGE_MASK) 670 670 671 - #define pgd_page(pgd) (mem_map+(pgd_val(pgd) >> PAGE_SHIFT)) 671 + #define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT) 672 672 673 673 /* to find an entry in a page-table-directory */ 674 674 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))