Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm: make FIRST_USER_ADDRESS unsigned long on all archs

LKP has triggered a compiler warning after my recent patch "mm: account
pmd page tables to the process":

mm/mmap.c: In function 'exit_mmap':
>> mm/mmap.c:2857:2: warning: right shift count >= width of type [enabled by default]

The code:

> 2857 WARN_ON(mm_nr_pmds(mm) >
2858 round_up(FIRST_USER_ADDRESS, PUD_SIZE) >> PUD_SHIFT);

In this, on tile, we have FIRST_USER_ADDRESS defined as 0. round_up() has
the same type -- int. PUD_SHIFT.

I think the best way to fix it is to define FIRST_USER_ADDRESS as unsigned
long. On every arch for consistency.

Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Reported-by: Wu Fengguang <fengguang.wu@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Kirill A. Shutemov and committed by
Linus Torvalds
d016bf7e 3ae3ad4e

+29 -29
+1 -1
arch/alpha/include/asm/pgtable.h
··· 45 45 #define PTRS_PER_PMD (1UL << (PAGE_SHIFT-3)) 46 46 #define PTRS_PER_PGD (1UL << (PAGE_SHIFT-3)) 47 47 #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) 48 - #define FIRST_USER_ADDRESS 0 48 + #define FIRST_USER_ADDRESS 0UL 49 49 50 50 /* Number of pointers that fit on a page: this will go away. */ 51 51 #define PTRS_PER_PAGE (1UL << (PAGE_SHIFT-3))
+1 -1
arch/arc/include/asm/pgtable.h
··· 211 211 * No special requirements for lowest virtual address we permit any user space 212 212 * mapping to be mapped at. 213 213 */ 214 - #define FIRST_USER_ADDRESS 0 214 + #define FIRST_USER_ADDRESS 0UL 215 215 216 216 217 217 /****************************************************************
+1 -1
arch/arm/include/asm/pgtable-nommu.h
··· 85 85 #define VMALLOC_START 0UL 86 86 #define VMALLOC_END 0xffffffffUL 87 87 88 - #define FIRST_USER_ADDRESS (0) 88 + #define FIRST_USER_ADDRESS 0UL 89 89 90 90 #include <asm-generic/pgtable.h> 91 91
+1 -1
arch/arm64/include/asm/pgtable.h
··· 45 45 46 46 #define vmemmap ((struct page *)(VMALLOC_END + SZ_64K)) 47 47 48 - #define FIRST_USER_ADDRESS 0 48 + #define FIRST_USER_ADDRESS 0UL 49 49 50 50 #ifndef __ASSEMBLY__ 51 51 extern void __pte_error(const char *file, int line, unsigned long val);
+1 -1
arch/avr32/include/asm/pgtable.h
··· 30 30 #define PGDIR_MASK (~(PGDIR_SIZE-1)) 31 31 32 32 #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) 33 - #define FIRST_USER_ADDRESS 0 33 + #define FIRST_USER_ADDRESS 0UL 34 34 35 35 #ifndef __ASSEMBLY__ 36 36 extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
+1 -1
arch/cris/include/asm/pgtable.h
··· 67 67 */ 68 68 69 69 #define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE) 70 - #define FIRST_USER_ADDRESS 0 70 + #define FIRST_USER_ADDRESS 0UL 71 71 72 72 /* zero page used for uninitialized stuff */ 73 73 #ifndef __ASSEMBLY__
+1 -1
arch/frv/include/asm/pgtable.h
··· 140 140 #define PTRS_PER_PTE 4096 141 141 142 142 #define USER_PGDS_IN_LAST_PML4 (TASK_SIZE / PGDIR_SIZE) 143 - #define FIRST_USER_ADDRESS 0 143 + #define FIRST_USER_ADDRESS 0UL 144 144 145 145 #define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT) 146 146 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - USER_PGD_PTRS)
+1 -1
arch/hexagon/include/asm/pgtable.h
··· 171 171 extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; /* located in head.S */ 172 172 173 173 /* Seems to be zero even in architectures where the zero page is firewalled? */ 174 - #define FIRST_USER_ADDRESS 0 174 + #define FIRST_USER_ADDRESS 0UL 175 175 #define pte_special(pte) 0 176 176 #define pte_mkspecial(pte) (pte) 177 177
+1 -1
arch/ia64/include/asm/pgtable.h
··· 127 127 #define PTRS_PER_PGD_SHIFT PTRS_PER_PTD_SHIFT 128 128 #define PTRS_PER_PGD (1UL << PTRS_PER_PGD_SHIFT) 129 129 #define USER_PTRS_PER_PGD (5*PTRS_PER_PGD/8) /* regions 0-4 are user regions */ 130 - #define FIRST_USER_ADDRESS 0 130 + #define FIRST_USER_ADDRESS 0UL 131 131 132 132 /* 133 133 * All the normal masks have the "page accessed" bits on, as any time
+1 -1
arch/m32r/include/asm/pgtable.h
··· 53 53 #define PGDIR_MASK (~(PGDIR_SIZE - 1)) 54 54 55 55 #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) 56 - #define FIRST_USER_ADDRESS 0 56 + #define FIRST_USER_ADDRESS 0UL 57 57 58 58 #ifndef __ASSEMBLY__ 59 59 /* Just any arbitrary offset to the start of the vmalloc VM area: the
+1 -1
arch/m68k/include/asm/pgtable_mm.h
··· 66 66 #define PTRS_PER_PGD 128 67 67 #endif 68 68 #define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE) 69 - #define FIRST_USER_ADDRESS 0 69 + #define FIRST_USER_ADDRESS 0UL 70 70 71 71 /* Virtual address region for use by kernel_map() */ 72 72 #ifdef CONFIG_SUN3
+1 -1
arch/microblaze/include/asm/pgtable.h
··· 72 72 #include <asm/mmu.h> 73 73 #include <asm/page.h> 74 74 75 - #define FIRST_USER_ADDRESS 0 75 + #define FIRST_USER_ADDRESS 0UL 76 76 77 77 extern unsigned long va_to_phys(unsigned long address); 78 78 extern pte_t *va_to_pte(unsigned long address);
+1 -1
arch/mips/include/asm/pgtable-32.h
··· 57 57 #define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t)) 58 58 59 59 #define USER_PTRS_PER_PGD (0x80000000UL/PGDIR_SIZE) 60 - #define FIRST_USER_ADDRESS 0 60 + #define FIRST_USER_ADDRESS 0UL 61 61 62 62 #define VMALLOC_START MAP_BASE 63 63
+1 -1
arch/mn10300/include/asm/pgtable.h
··· 65 65 #define PGDIR_MASK (~(PGDIR_SIZE - 1)) 66 66 67 67 #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) 68 - #define FIRST_USER_ADDRESS 0 68 + #define FIRST_USER_ADDRESS 0UL 69 69 70 70 #define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT) 71 71 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - USER_PGD_PTRS)
+1 -1
arch/nios2/include/asm/pgtable.h
··· 24 24 #include <asm/pgtable-bits.h> 25 25 #include <asm-generic/pgtable-nopmd.h> 26 26 27 - #define FIRST_USER_ADDRESS 0 27 + #define FIRST_USER_ADDRESS 0UL 28 28 29 29 #define VMALLOC_START CONFIG_NIOS2_KERNEL_MMU_REGION_BASE 30 30 #define VMALLOC_END (CONFIG_NIOS2_KERNEL_REGION_BASE - 1)
+1 -1
arch/openrisc/include/asm/pgtable.h
··· 77 77 */ 78 78 79 79 #define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE) 80 - #define FIRST_USER_ADDRESS 0 80 + #define FIRST_USER_ADDRESS 0UL 81 81 82 82 /* 83 83 * Kernels own virtual memory area.
+1 -1
arch/parisc/include/asm/pgtable.h
··· 134 134 * pgd entries used up by user/kernel: 135 135 */ 136 136 137 - #define FIRST_USER_ADDRESS 0 137 + #define FIRST_USER_ADDRESS 0UL 138 138 139 139 /* NB: The tlb miss handlers make certain assumptions about the order */ 140 140 /* of the following bits, so be careful (One example, bits 25-31 */
+1 -1
arch/powerpc/include/asm/pgtable-ppc32.h
··· 45 45 #define PTRS_PER_PGD (1 << (32 - PGDIR_SHIFT)) 46 46 47 47 #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) 48 - #define FIRST_USER_ADDRESS 0 48 + #define FIRST_USER_ADDRESS 0UL 49 49 50 50 #define pte_ERROR(e) \ 51 51 pr_err("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \
+1 -1
arch/powerpc/include/asm/pgtable-ppc64.h
··· 12 12 #endif 13 13 #include <asm/barrier.h> 14 14 15 - #define FIRST_USER_ADDRESS 0 15 + #define FIRST_USER_ADDRESS 0UL 16 16 17 17 /* 18 18 * Size of EA range mapped by our pagetables.
+1 -1
arch/s390/include/asm/pgtable.h
··· 99 99 #endif /* CONFIG_64BIT */ 100 100 #define PTRS_PER_PGD 2048 101 101 102 - #define FIRST_USER_ADDRESS 0 102 + #define FIRST_USER_ADDRESS 0UL 103 103 104 104 #define pte_ERROR(e) \ 105 105 printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e))
+1 -1
arch/score/include/asm/pgtable.h
··· 27 27 #define PTRS_PER_PTE 1024 28 28 29 29 #define USER_PTRS_PER_PGD (0x80000000UL/PGDIR_SIZE) 30 - #define FIRST_USER_ADDRESS 0 30 + #define FIRST_USER_ADDRESS 0UL 31 31 32 32 #define VMALLOC_START (0xc0000000UL) 33 33
+1 -1
arch/sh/include/asm/pgtable.h
··· 62 62 /* Entries per level */ 63 63 #define PTRS_PER_PTE (PAGE_SIZE / (1 << PTE_MAGNITUDE)) 64 64 65 - #define FIRST_USER_ADDRESS 0 65 + #define FIRST_USER_ADDRESS 0UL 66 66 67 67 #define PHYS_ADDR_MASK29 0x1fffffff 68 68 #define PHYS_ADDR_MASK32 0xffffffff
+1 -1
arch/sparc/include/asm/pgtable_32.h
··· 44 44 #define PTRS_PER_PMD SRMMU_PTRS_PER_PMD 45 45 #define PTRS_PER_PGD SRMMU_PTRS_PER_PGD 46 46 #define USER_PTRS_PER_PGD PAGE_OFFSET / SRMMU_PGDIR_SIZE 47 - #define FIRST_USER_ADDRESS 0 47 + #define FIRST_USER_ADDRESS 0UL 48 48 #define PTE_SIZE (PTRS_PER_PTE*4) 49 49 50 50 #define PAGE_NONE SRMMU_PAGE_NONE
+1 -1
arch/sparc/include/asm/pgtable_64.h
··· 93 93 #define PTRS_PER_PGD (1UL << PGDIR_BITS) 94 94 95 95 /* Kernel has a separate 44bit address space. */ 96 - #define FIRST_USER_ADDRESS 0 96 + #define FIRST_USER_ADDRESS 0UL 97 97 98 98 #define pmd_ERROR(e) \ 99 99 pr_err("%s:%d: bad pmd %p(%016lx) seen at (%pS)\n", \
+1 -1
arch/tile/include/asm/pgtable.h
··· 67 67 extern void paging_init(void); 68 68 extern void set_page_homes(void); 69 69 70 - #define FIRST_USER_ADDRESS 0 70 + #define FIRST_USER_ADDRESS 0UL 71 71 72 72 #define _PAGE_PRESENT HV_PTE_PRESENT 73 73 #define _PAGE_HUGE_PAGE HV_PTE_PAGE
+1 -1
arch/um/include/asm/pgtable-2level.h
··· 23 23 #define PTRS_PER_PTE 1024 24 24 #define USER_PTRS_PER_PGD ((TASK_SIZE + (PGDIR_SIZE - 1)) / PGDIR_SIZE) 25 25 #define PTRS_PER_PGD 1024 26 - #define FIRST_USER_ADDRESS 0 26 + #define FIRST_USER_ADDRESS 0UL 27 27 28 28 #define pte_ERROR(e) \ 29 29 printk("%s:%d: bad pte %p(%08lx).\n", __FILE__, __LINE__, &(e), \
+1 -1
arch/um/include/asm/pgtable-3level.h
··· 41 41 #endif 42 42 43 43 #define USER_PTRS_PER_PGD ((TASK_SIZE + (PGDIR_SIZE - 1)) / PGDIR_SIZE) 44 - #define FIRST_USER_ADDRESS 0 44 + #define FIRST_USER_ADDRESS 0UL 45 45 46 46 #define pte_ERROR(e) \ 47 47 printk("%s:%d: bad pte %p(%016lx).\n", __FILE__, __LINE__, &(e), \
+1 -1
arch/x86/include/asm/pgtable_types.h
··· 4 4 #include <linux/const.h> 5 5 #include <asm/page_types.h> 6 6 7 - #define FIRST_USER_ADDRESS 0 7 + #define FIRST_USER_ADDRESS 0UL 8 8 9 9 #define _PAGE_BIT_PRESENT 0 /* is present */ 10 10 #define _PAGE_BIT_RW 1 /* writeable */
+1 -1
arch/xtensa/include/asm/pgtable.h
··· 57 57 #define PTRS_PER_PGD 1024 58 58 #define PGD_ORDER 0 59 59 #define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE) 60 - #define FIRST_USER_ADDRESS 0 60 + #define FIRST_USER_ADDRESS 0UL 61 61 #define FIRST_USER_PGD_NR (FIRST_USER_ADDRESS >> PGDIR_SHIFT) 62 62 63 63 /*