Merge git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile

* git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile:
arch/tile: mark "hardwall" device as non-seekable
asm-generic/stat.h: support 64-bit file time_t for stat()
arch/tile: don't allow user code to set the PL via ptrace or signal return
arch/tile: correct double syscall restart for nested signals
arch/tile: avoid __must_check warning on one strict_strtol check
arch/tile: bomb raw_local_irq_ to arch_local_irq_
arch/tile: complete migration to new kmap_atomic scheme

+110 -76
-1
arch/tile/include/asm/highmem.h
··· 23 23 24 24 #include <linux/interrupt.h> 25 25 #include <linux/threads.h> 26 - #include <asm/kmap_types.h> 27 26 #include <asm/tlbflush.h> 28 27 #include <asm/homecache.h> 29 28
+24 -10
arch/tile/include/asm/kmap_types.h
··· 16 16 #define _ASM_TILE_KMAP_TYPES_H 17 17 18 18 /* 19 - * In TILE Linux each set of four of these uses another 16MB chunk of 20 - * address space, given 64 tiles and 64KB pages, so we only enable 21 - * ones that are required by the kernel configuration. 19 + * In 32-bit TILE Linux we have to balance the desire to have a lot of 20 + * nested atomic mappings with the fact that large page sizes and many 21 + * processors chew up address space quickly. In a typical 22 + * 64-processor, 64KB-page layout build, making KM_TYPE_NR one larger 23 + * adds 4MB of required address-space. For now we leave KM_TYPE_NR 24 + * set to depth 8. 22 25 */ 23 26 enum km_type { 27 + KM_TYPE_NR = 8 28 + }; 29 + 30 + /* 31 + * We provide dummy definitions of all the stray values that used to be 32 + * required for kmap_atomic() and no longer are. 33 + */ 34 + enum { 24 35 KM_BOUNCE_READ, 25 36 KM_SKB_SUNRPC_DATA, 26 37 KM_SKB_DATA_SOFTIRQ, 27 38 KM_USER0, 28 39 KM_USER1, 29 40 KM_BIO_SRC_IRQ, 41 + KM_BIO_DST_IRQ, 42 + KM_PTE0, 43 + KM_PTE1, 30 44 KM_IRQ0, 31 45 KM_IRQ1, 32 46 KM_SOFTIRQ0, 33 47 KM_SOFTIRQ1, 34 - KM_MEMCPY0, 35 - KM_MEMCPY1, 36 - #if defined(CONFIG_HIGHPTE) 37 - KM_PTE0, 38 - KM_PTE1, 39 - #endif 40 - KM_TYPE_NR 48 + KM_SYNC_ICACHE, 49 + KM_SYNC_DCACHE, 50 + KM_UML_USERCOPY, 51 + KM_IRQ_PTE, 52 + KM_NMI, 53 + KM_NMI_PTE, 54 + KM_KDB 41 55 }; 42 56 43 57 #endif /* _ASM_TILE_KMAP_TYPES_H */
+2 -4
arch/tile/include/asm/pgtable.h
··· 344 344 #define pgd_offset_k(address) pgd_offset(&init_mm, address) 345 345 346 346 #if defined(CONFIG_HIGHPTE) 347 - extern pte_t *_pte_offset_map(pmd_t *, unsigned long address, enum km_type); 348 - #define pte_offset_map(dir, address) \ 349 - _pte_offset_map(dir, address, KM_PTE0) 350 - #define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0) 347 + extern pte_t *pte_offset_map(pmd_t *, unsigned long address); 348 + #define pte_unmap(pte) kunmap_atomic(pte) 351 349 #else 352 350 #define pte_offset_map(dir, address) pte_offset_kernel(dir, address) 353 351 #define pte_unmap(pte) do { } while (0)
+3
arch/tile/include/asm/stat.h
··· 1 + #ifdef CONFIG_COMPAT 2 + #define __ARCH_WANT_STAT64 /* Used for compat_sys_stat64() etc. */ 3 + #endif 1 4 #include <asm-generic/stat.h>
+1
arch/tile/include/asm/unistd.h
··· 41 41 #ifdef CONFIG_COMPAT 42 42 #define __ARCH_WANT_SYS_LLSEEK 43 43 #endif 44 + #define __ARCH_WANT_SYS_NEWFSTATAT 44 45 #endif 45 46 46 47 #endif /* _ASM_TILE_UNISTD_H */
+5 -5
arch/tile/kernel/compat.c
··· 148 148 #define compat_sys_readahead sys32_readahead 149 149 #define compat_sys_sync_file_range compat_sys_sync_file_range2 150 150 151 - /* The native 64-bit "struct stat" matches the 32-bit "struct stat64". */ 152 - #define compat_sys_stat64 sys_newstat 153 - #define compat_sys_lstat64 sys_newlstat 154 - #define compat_sys_fstat64 sys_newfstat 155 - #define compat_sys_fstatat64 sys_newfstatat 151 + /* We leverage the "struct stat64" type for 32-bit time_t/nsec. */ 152 + #define compat_sys_stat64 sys_stat64 153 + #define compat_sys_lstat64 sys_lstat64 154 + #define compat_sys_fstat64 sys_fstat64 155 + #define compat_sys_fstatat64 sys_fstatat64 156 156 157 157 /* The native sys_ptrace dynamically handles compat binaries. */ 158 158 #define compat_sys_ptrace sys_ptrace
+1 -1
arch/tile/kernel/early_printk.c
··· 54 54 void early_panic(const char *fmt, ...) 55 55 { 56 56 va_list ap; 57 - raw_local_irq_disable_all(); 57 + arch_local_irq_disable_all(); 58 58 va_start(ap, fmt); 59 59 early_printk("Kernel panic - not syncing: "); 60 60 early_vprintk(fmt, ap);
+3 -3
arch/tile/kernel/hardwall.c
··· 151 151 152 152 static void enable_firewall_interrupts(void) 153 153 { 154 - raw_local_irq_unmask_now(INT_UDN_FIREWALL); 154 + arch_local_irq_unmask_now(INT_UDN_FIREWALL); 155 155 } 156 156 157 157 static void disable_firewall_interrupts(void) 158 158 { 159 - raw_local_irq_mask_now(INT_UDN_FIREWALL); 159 + arch_local_irq_mask_now(INT_UDN_FIREWALL); 160 160 } 161 161 162 162 /* Set up hardwall on this cpu based on the passed hardwall_info. */ ··· 768 768 } 769 769 770 770 static const struct file_operations dev_hardwall_fops = { 771 + .open = nonseekable_open, 771 772 .unlocked_ioctl = hardwall_ioctl, 772 773 #ifdef CONFIG_COMPAT 773 774 .compat_ioctl = hardwall_compat_ioctl, 774 775 #endif 775 776 .flush = hardwall_flush, 776 777 .release = hardwall_release, 777 - .llseek = noop_llseek, 778 778 }; 779 779 780 780 static struct cdev hardwall_dev;
+2 -2
arch/tile/kernel/irq.c
··· 26 26 #define IS_HW_CLEARED 1 27 27 28 28 /* 29 - * The set of interrupts we enable for raw_local_irq_enable(). 29 + * The set of interrupts we enable for arch_local_irq_enable(). 30 30 * This is initialized to have just a single interrupt that the kernel 31 31 * doesn't actually use as a sentinel. During kernel init, 32 32 * interrupts are added as the kernel gets prepared to support them. ··· 225 225 /* Enable interrupt delivery. */ 226 226 unmask_irqs(~0UL); 227 227 #if CHIP_HAS_IPI() 228 - raw_local_irq_unmask(INT_IPI_K); 228 + arch_local_irq_unmask(INT_IPI_K); 229 229 #endif 230 230 } 231 231
+3 -3
arch/tile/kernel/machine_kexec.c
··· 182 182 183 183 if ((entry & IND_SOURCE)) { 184 184 void *va = 185 - kmap_atomic_pfn(entry >> PAGE_SHIFT, KM_USER0); 185 + kmap_atomic_pfn(entry >> PAGE_SHIFT); 186 186 r = kexec_bn2cl(va); 187 187 if (r) { 188 188 command_line = r; 189 189 break; 190 190 } 191 - kunmap_atomic(va, KM_USER0); 191 + kunmap_atomic(va); 192 192 } 193 193 } 194 194 ··· 198 198 199 199 hverr = hv_set_command_line( 200 200 (HV_VirtAddr) command_line, strlen(command_line)); 201 - kunmap_atomic(command_line, KM_USER0); 201 + kunmap_atomic(command_line); 202 202 } else { 203 203 pr_info("%s: no command line found; making empty\n", 204 204 __func__);
+1 -1
arch/tile/kernel/messaging.c
··· 34 34 panic("hv_register_message_state: error %d", rc); 35 35 36 36 /* Make sure downcall interrupts will be enabled. */ 37 - raw_local_irq_unmask(INT_INTCTRL_K); 37 + arch_local_irq_unmask(INT_INTCTRL_K); 38 38 } 39 39 40 40 void hv_message_intr(struct pt_regs *regs, int intnum)
+21 -18
arch/tile/kernel/ptrace.c
··· 50 50 { 51 51 unsigned long __user *datap = (long __user __force *)data; 52 52 unsigned long tmp; 53 - int i; 54 53 long ret = -EIO; 55 - unsigned long *childregs; 56 54 char *childreg; 55 + struct pt_regs copyregs; 56 + int ex1_offset; 57 57 58 58 switch (request) { 59 59 ··· 80 80 if (addr >= PTREGS_SIZE) 81 81 break; 82 82 childreg = (char *)task_pt_regs(child) + addr; 83 + 84 + /* Guard against overwrites of the privilege level. */ 85 + ex1_offset = PTREGS_OFFSET_EX1; 86 + #if defined(CONFIG_COMPAT) && defined(__BIG_ENDIAN) 87 + if (is_compat_task()) /* point at low word */ 88 + ex1_offset += sizeof(compat_long_t); 89 + #endif 90 + if (addr == ex1_offset) 91 + data = PL_ICS_EX1(USER_PL, EX1_ICS(data)); 92 + 83 93 #ifdef CONFIG_COMPAT 84 94 if (is_compat_task()) { 85 95 if (addr & (sizeof(compat_long_t)-1)) ··· 106 96 break; 107 97 108 98 case PTRACE_GETREGS: /* Get all registers from the child. */ 109 - if (!access_ok(VERIFY_WRITE, datap, PTREGS_SIZE)) 110 - break; 111 - childregs = (long *)task_pt_regs(child); 112 - for (i = 0; i < sizeof(struct pt_regs)/sizeof(unsigned long); 113 - ++i) { 114 - ret = __put_user(childregs[i], &datap[i]); 115 - if (ret != 0) 116 - break; 99 + if (copy_to_user(datap, task_pt_regs(child), 100 + sizeof(struct pt_regs)) == 0) { 101 + ret = 0; 117 102 } 118 103 break; 119 104 120 105 case PTRACE_SETREGS: /* Set all registers in the child. */ 121 - if (!access_ok(VERIFY_READ, datap, PTREGS_SIZE)) 122 - break; 123 - childregs = (long *)task_pt_regs(child); 124 - for (i = 0; i < sizeof(struct pt_regs)/sizeof(unsigned long); 125 - ++i) { 126 - ret = __get_user(childregs[i], &datap[i]); 127 - if (ret != 0) 128 - break; 106 + if (copy_from_user(&copyregs, datap, 107 + sizeof(struct pt_regs)) == 0) { 108 + copyregs.ex1 = 109 + PL_ICS_EX1(USER_PL, EX1_ICS(copyregs.ex1)); 110 + *task_pt_regs(child) = copyregs; 111 + ret = 0; 129 112 } 130 113 break; 131 114
+3 -3
arch/tile/kernel/reboot.c
··· 27 27 void machine_halt(void) 28 28 { 29 29 warn_early_printk(); 30 - raw_local_irq_disable_all(); 30 + arch_local_irq_disable_all(); 31 31 smp_send_stop(); 32 32 hv_halt(); 33 33 } ··· 35 35 void machine_power_off(void) 36 36 { 37 37 warn_early_printk(); 38 - raw_local_irq_disable_all(); 38 + arch_local_irq_disable_all(); 39 39 smp_send_stop(); 40 40 hv_power_off(); 41 41 } 42 42 43 43 void machine_restart(char *cmd) 44 44 { 45 - raw_local_irq_disable_all(); 45 + arch_local_irq_disable_all(); 46 46 smp_send_stop(); 47 47 hv_restart((HV_VirtAddr) "vmlinux", (HV_VirtAddr) cmd); 48 48 }
+4 -4
arch/tile/kernel/setup.c
··· 868 868 869 869 /* Allow asynchronous TLB interrupts. */ 870 870 #if CHIP_HAS_TILE_DMA() 871 - raw_local_irq_unmask(INT_DMATLB_MISS); 872 - raw_local_irq_unmask(INT_DMATLB_ACCESS); 871 + arch_local_irq_unmask(INT_DMATLB_MISS); 872 + arch_local_irq_unmask(INT_DMATLB_ACCESS); 873 873 #endif 874 874 #if CHIP_HAS_SN_PROC() 875 - raw_local_irq_unmask(INT_SNITLB_MISS); 875 + arch_local_irq_unmask(INT_SNITLB_MISS); 876 876 #endif 877 877 #ifdef __tilegx__ 878 - raw_local_irq_unmask(INT_SINGLE_STEP_K); 878 + arch_local_irq_unmask(INT_SINGLE_STEP_K); 879 879 #endif 880 880 881 881 /*
+8 -1
arch/tile/kernel/signal.c
··· 71 71 for (i = 0; i < sizeof(struct pt_regs)/sizeof(long); ++i) 72 72 err |= __get_user(regs->regs[i], &sc->gregs[i]); 73 73 74 + /* Ensure that the PL is always set to USER_PL. */ 75 + regs->ex1 = PL_ICS_EX1(USER_PL, EX1_ICS(regs->ex1)); 76 + 74 77 regs->faultnum = INT_SWINT_1_SIGRETURN; 75 78 76 79 err |= __get_user(*pr0, &sc->gregs[0]); ··· 333 330 current_thread_info()->status &= ~TS_RESTORE_SIGMASK; 334 331 } 335 332 336 - return; 333 + goto done; 337 334 } 338 335 339 336 /* Did we come from a system call? */ ··· 361 358 current_thread_info()->status &= ~TS_RESTORE_SIGMASK; 362 359 sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL); 363 360 } 361 + 362 + done: 363 + /* Avoid double syscall restart if there are nested signals. */ 364 + regs->faultnum = INT_SWINT_1_SIGRETURN; 364 365 }
+1 -1
arch/tile/kernel/smp.c
··· 115 115 static void smp_stop_cpu_interrupt(void) 116 116 { 117 117 set_cpu_online(smp_processor_id(), 0); 118 - raw_local_irq_disable_all(); 118 + arch_local_irq_disable_all(); 119 119 for (;;) 120 120 asm("nap"); 121 121 }
+4 -4
arch/tile/kernel/time.c
··· 132 132 { 133 133 BUG_ON(ticks > MAX_TICK); 134 134 __insn_mtspr(SPR_TILE_TIMER_CONTROL, ticks); 135 - raw_local_irq_unmask_now(INT_TILE_TIMER); 135 + arch_local_irq_unmask_now(INT_TILE_TIMER); 136 136 return 0; 137 137 } 138 138 ··· 143 143 static void tile_timer_set_mode(enum clock_event_mode mode, 144 144 struct clock_event_device *evt) 145 145 { 146 - raw_local_irq_mask_now(INT_TILE_TIMER); 146 + arch_local_irq_mask_now(INT_TILE_TIMER); 147 147 } 148 148 149 149 /* ··· 172 172 evt->cpumask = cpumask_of(smp_processor_id()); 173 173 174 174 /* Start out with timer not firing. */ 175 - raw_local_irq_mask_now(INT_TILE_TIMER); 175 + arch_local_irq_mask_now(INT_TILE_TIMER); 176 176 177 177 /* Register tile timer. */ 178 178 clockevents_register_device(evt); ··· 188 188 * Mask the timer interrupt here, since we are a oneshot timer 189 189 * and there are now by definition no events pending. 190 190 */ 191 - raw_local_irq_mask(INT_TILE_TIMER); 191 + arch_local_irq_mask(INT_TILE_TIMER); 192 192 193 193 /* Track time spent here in an interrupt context */ 194 194 irq_enter();
+8 -3
arch/tile/lib/memcpy_tile64.c
··· 54 54 * we must run with interrupts disabled to avoid the risk of some 55 55 * other code seeing the incoherent data in our cache. (Recall that 56 56 * our cache is indexed by PA, so even if the other code doesn't use 57 - * our KM_MEMCPY virtual addresses, they'll still hit in cache using 57 + * our kmap_atomic virtual addresses, they'll still hit in cache using 58 58 * the normal VAs that aren't supposed to hit in cache.) 59 59 */ 60 60 static void memcpy_multicache(void *dest, const void *source, ··· 64 64 unsigned long flags, newsrc, newdst; 65 65 pmd_t *pmdp; 66 66 pte_t *ptep; 67 + int type0, type1; 67 68 int cpu = get_cpu(); 68 69 69 70 /* ··· 78 77 sim_allow_multiple_caching(1); 79 78 80 79 /* Set up the new dest mapping */ 81 - idx = FIX_KMAP_BEGIN + (KM_TYPE_NR * cpu) + KM_MEMCPY0; 80 + type0 = kmap_atomic_idx_push(); 81 + idx = FIX_KMAP_BEGIN + (KM_TYPE_NR * cpu) + type0; 82 82 newdst = __fix_to_virt(idx) + ((unsigned long)dest & (PAGE_SIZE-1)); 83 83 pmdp = pmd_offset(pud_offset(pgd_offset_k(newdst), newdst), newdst); 84 84 ptep = pte_offset_kernel(pmdp, newdst); ··· 89 87 } 90 88 91 89 /* Set up the new source mapping */ 92 - idx += (KM_MEMCPY0 - KM_MEMCPY1); 90 + type1 = kmap_atomic_idx_push(); 91 + idx += (type0 - type1); 93 92 src_pte = hv_pte_set_nc(src_pte); 94 93 src_pte = hv_pte_clear_writable(src_pte); /* be paranoid */ 95 94 newsrc = __fix_to_virt(idx) + ((unsigned long)source & (PAGE_SIZE-1)); ··· 122 119 * We're done: notify the simulator that all is back to normal, 123 120 * and re-enable interrupts and pre-emption. 124 121 */ 122 + kmap_atomic_idx_pop(); 123 + kmap_atomic_idx_pop(); 125 124 sim_allow_multiple_caching(0); 126 125 local_irq_restore(flags); 127 126 put_cpu();
+1 -1
arch/tile/mm/highmem.c
··· 227 227 void *__kmap_atomic(struct page *page) 228 228 { 229 229 /* PAGE_NONE is a magic value that tells us to check immutability. */ 230 - return kmap_atomic_prot(page, type, PAGE_NONE); 230 + return kmap_atomic_prot(page, PAGE_NONE); 231 231 } 232 232 EXPORT_SYMBOL(__kmap_atomic); 233 233
+6 -2
arch/tile/mm/init.c
··· 988 988 /* Select whether to free (1) or mark unusable (0) the __init pages. */ 989 989 static int __init set_initfree(char *str) 990 990 { 991 - strict_strtol(str, 0, &initfree); 992 - pr_info("initfree: %s free init pages\n", initfree ? "will" : "won't"); 991 + long val; 992 + if (strict_strtol(str, 0, &val)) { 993 + initfree = val; 994 + pr_info("initfree: %s free init pages\n", 995 + initfree ? "will" : "won't"); 996 + } 993 997 return 1; 994 998 } 995 999 __setup("initfree=", set_initfree);
+2 -2
arch/tile/mm/pgtable.c
··· 134 134 } 135 135 136 136 #if defined(CONFIG_HIGHPTE) 137 - pte_t *_pte_offset_map(pmd_t *dir, unsigned long address, enum km_type type) 137 + pte_t *_pte_offset_map(pmd_t *dir, unsigned long address) 138 138 { 139 - pte_t *pte = kmap_atomic(pmd_page(*dir), type) + 139 + pte_t *pte = kmap_atomic(pmd_page(*dir)) + 140 140 (pmd_ptfn(*dir) << HV_LOG2_PAGE_TABLE_ALIGN) & ~PAGE_MASK; 141 141 return &pte[pte_index(address)]; 142 142 }
+7 -7
include/asm-generic/stat.h
··· 33 33 int st_blksize; /* Optimal block size for I/O. */ 34 34 int __pad2; 35 35 long st_blocks; /* Number 512-byte blocks allocated. */ 36 - int st_atime; /* Time of last access. */ 37 - unsigned int st_atime_nsec; 38 - int st_mtime; /* Time of last modification. */ 39 - unsigned int st_mtime_nsec; 40 - int st_ctime; /* Time of last status change. */ 41 - unsigned int st_ctime_nsec; 36 + long st_atime; /* Time of last access. */ 37 + unsigned long st_atime_nsec; 38 + long st_mtime; /* Time of last modification. */ 39 + unsigned long st_mtime_nsec; 40 + long st_ctime; /* Time of last status change. */ 41 + unsigned long st_ctime_nsec; 42 42 unsigned int __unused4; 43 43 unsigned int __unused5; 44 44 }; 45 45 46 - #if __BITS_PER_LONG != 64 47 46 /* This matches struct stat64 in glibc2.1. Only used for 32 bit. */ 47 + #if __BITS_PER_LONG != 64 || defined(__ARCH_WANT_STAT64) 48 48 struct stat64 { 49 49 unsigned long long st_dev; /* Device. */ 50 50 unsigned long long st_ino; /* File serial number. */