Merge git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile

* git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile:
arch/tile: mark "hardwall" device as non-seekable
asm-generic/stat.h: support 64-bit file time_t for stat()
arch/tile: don't allow user code to set the PL via ptrace or signal return
arch/tile: correct double syscall restart for nested signals
arch/tile: avoid __must_check warning on one strict_strtol check
arch/tile: bomb raw_local_irq_ to arch_local_irq_
arch/tile: complete migration to new kmap_atomic scheme

+110 -76
-1
arch/tile/include/asm/highmem.h
··· 23 24 #include <linux/interrupt.h> 25 #include <linux/threads.h> 26 - #include <asm/kmap_types.h> 27 #include <asm/tlbflush.h> 28 #include <asm/homecache.h> 29
··· 23 24 #include <linux/interrupt.h> 25 #include <linux/threads.h> 26 #include <asm/tlbflush.h> 27 #include <asm/homecache.h> 28
+24 -10
arch/tile/include/asm/kmap_types.h
··· 16 #define _ASM_TILE_KMAP_TYPES_H 17 18 /* 19 - * In TILE Linux each set of four of these uses another 16MB chunk of 20 - * address space, given 64 tiles and 64KB pages, so we only enable 21 - * ones that are required by the kernel configuration. 22 */ 23 enum km_type { 24 KM_BOUNCE_READ, 25 KM_SKB_SUNRPC_DATA, 26 KM_SKB_DATA_SOFTIRQ, 27 KM_USER0, 28 KM_USER1, 29 KM_BIO_SRC_IRQ, 30 KM_IRQ0, 31 KM_IRQ1, 32 KM_SOFTIRQ0, 33 KM_SOFTIRQ1, 34 - KM_MEMCPY0, 35 - KM_MEMCPY1, 36 - #if defined(CONFIG_HIGHPTE) 37 - KM_PTE0, 38 - KM_PTE1, 39 - #endif 40 - KM_TYPE_NR 41 }; 42 43 #endif /* _ASM_TILE_KMAP_TYPES_H */
··· 16 #define _ASM_TILE_KMAP_TYPES_H 17 18 /* 19 + * In 32-bit TILE Linux we have to balance the desire to have a lot of 20 + * nested atomic mappings with the fact that large page sizes and many 21 + * processors chew up address space quickly. In a typical 22 + * 64-processor, 64KB-page layout build, making KM_TYPE_NR one larger 23 + * adds 4MB of required address-space. For now we leave KM_TYPE_NR 24 + * set to depth 8. 25 */ 26 enum km_type { 27 + KM_TYPE_NR = 8 28 + }; 29 + 30 + /* 31 + * We provide dummy definitions of all the stray values that used to be 32 + * required for kmap_atomic() and no longer are. 33 + */ 34 + enum { 35 KM_BOUNCE_READ, 36 KM_SKB_SUNRPC_DATA, 37 KM_SKB_DATA_SOFTIRQ, 38 KM_USER0, 39 KM_USER1, 40 KM_BIO_SRC_IRQ, 41 + KM_BIO_DST_IRQ, 42 + KM_PTE0, 43 + KM_PTE1, 44 KM_IRQ0, 45 KM_IRQ1, 46 KM_SOFTIRQ0, 47 KM_SOFTIRQ1, 48 + KM_SYNC_ICACHE, 49 + KM_SYNC_DCACHE, 50 + KM_UML_USERCOPY, 51 + KM_IRQ_PTE, 52 + KM_NMI, 53 + KM_NMI_PTE, 54 + KM_KDB 55 }; 56 57 #endif /* _ASM_TILE_KMAP_TYPES_H */
+2 -4
arch/tile/include/asm/pgtable.h
··· 344 #define pgd_offset_k(address) pgd_offset(&init_mm, address) 345 346 #if defined(CONFIG_HIGHPTE) 347 - extern pte_t *_pte_offset_map(pmd_t *, unsigned long address, enum km_type); 348 - #define pte_offset_map(dir, address) \ 349 - _pte_offset_map(dir, address, KM_PTE0) 350 - #define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0) 351 #else 352 #define pte_offset_map(dir, address) pte_offset_kernel(dir, address) 353 #define pte_unmap(pte) do { } while (0)
··· 344 #define pgd_offset_k(address) pgd_offset(&init_mm, address) 345 346 #if defined(CONFIG_HIGHPTE) 347 + extern pte_t *pte_offset_map(pmd_t *, unsigned long address); 348 + #define pte_unmap(pte) kunmap_atomic(pte) 349 #else 350 #define pte_offset_map(dir, address) pte_offset_kernel(dir, address) 351 #define pte_unmap(pte) do { } while (0)
+3
arch/tile/include/asm/stat.h
··· 1 #include <asm-generic/stat.h>
··· 1 + #ifdef CONFIG_COMPAT 2 + #define __ARCH_WANT_STAT64 /* Used for compat_sys_stat64() etc. */ 3 + #endif 4 #include <asm-generic/stat.h>
+1
arch/tile/include/asm/unistd.h
··· 41 #ifdef CONFIG_COMPAT 42 #define __ARCH_WANT_SYS_LLSEEK 43 #endif 44 #endif 45 46 #endif /* _ASM_TILE_UNISTD_H */
··· 41 #ifdef CONFIG_COMPAT 42 #define __ARCH_WANT_SYS_LLSEEK 43 #endif 44 + #define __ARCH_WANT_SYS_NEWFSTATAT 45 #endif 46 47 #endif /* _ASM_TILE_UNISTD_H */
+5 -5
arch/tile/kernel/compat.c
··· 148 #define compat_sys_readahead sys32_readahead 149 #define compat_sys_sync_file_range compat_sys_sync_file_range2 150 151 - /* The native 64-bit "struct stat" matches the 32-bit "struct stat64". */ 152 - #define compat_sys_stat64 sys_newstat 153 - #define compat_sys_lstat64 sys_newlstat 154 - #define compat_sys_fstat64 sys_newfstat 155 - #define compat_sys_fstatat64 sys_newfstatat 156 157 /* The native sys_ptrace dynamically handles compat binaries. */ 158 #define compat_sys_ptrace sys_ptrace
··· 148 #define compat_sys_readahead sys32_readahead 149 #define compat_sys_sync_file_range compat_sys_sync_file_range2 150 151 + /* We leverage the "struct stat64" type for 32-bit time_t/nsec. */ 152 + #define compat_sys_stat64 sys_stat64 153 + #define compat_sys_lstat64 sys_lstat64 154 + #define compat_sys_fstat64 sys_fstat64 155 + #define compat_sys_fstatat64 sys_fstatat64 156 157 /* The native sys_ptrace dynamically handles compat binaries. */ 158 #define compat_sys_ptrace sys_ptrace
+1 -1
arch/tile/kernel/early_printk.c
··· 54 void early_panic(const char *fmt, ...) 55 { 56 va_list ap; 57 - raw_local_irq_disable_all(); 58 va_start(ap, fmt); 59 early_printk("Kernel panic - not syncing: "); 60 early_vprintk(fmt, ap);
··· 54 void early_panic(const char *fmt, ...) 55 { 56 va_list ap; 57 + arch_local_irq_disable_all(); 58 va_start(ap, fmt); 59 early_printk("Kernel panic - not syncing: "); 60 early_vprintk(fmt, ap);
+3 -3
arch/tile/kernel/hardwall.c
··· 151 152 static void enable_firewall_interrupts(void) 153 { 154 - raw_local_irq_unmask_now(INT_UDN_FIREWALL); 155 } 156 157 static void disable_firewall_interrupts(void) 158 { 159 - raw_local_irq_mask_now(INT_UDN_FIREWALL); 160 } 161 162 /* Set up hardwall on this cpu based on the passed hardwall_info. */ ··· 768 } 769 770 static const struct file_operations dev_hardwall_fops = { 771 .unlocked_ioctl = hardwall_ioctl, 772 #ifdef CONFIG_COMPAT 773 .compat_ioctl = hardwall_compat_ioctl, 774 #endif 775 .flush = hardwall_flush, 776 .release = hardwall_release, 777 - .llseek = noop_llseek, 778 }; 779 780 static struct cdev hardwall_dev;
··· 151 152 static void enable_firewall_interrupts(void) 153 { 154 + arch_local_irq_unmask_now(INT_UDN_FIREWALL); 155 } 156 157 static void disable_firewall_interrupts(void) 158 { 159 + arch_local_irq_mask_now(INT_UDN_FIREWALL); 160 } 161 162 /* Set up hardwall on this cpu based on the passed hardwall_info. */ ··· 768 } 769 770 static const struct file_operations dev_hardwall_fops = { 771 + .open = nonseekable_open, 772 .unlocked_ioctl = hardwall_ioctl, 773 #ifdef CONFIG_COMPAT 774 .compat_ioctl = hardwall_compat_ioctl, 775 #endif 776 .flush = hardwall_flush, 777 .release = hardwall_release, 778 }; 779 780 static struct cdev hardwall_dev;
+2 -2
arch/tile/kernel/irq.c
··· 26 #define IS_HW_CLEARED 1 27 28 /* 29 - * The set of interrupts we enable for raw_local_irq_enable(). 30 * This is initialized to have just a single interrupt that the kernel 31 * doesn't actually use as a sentinel. During kernel init, 32 * interrupts are added as the kernel gets prepared to support them. ··· 225 /* Enable interrupt delivery. */ 226 unmask_irqs(~0UL); 227 #if CHIP_HAS_IPI() 228 - raw_local_irq_unmask(INT_IPI_K); 229 #endif 230 } 231
··· 26 #define IS_HW_CLEARED 1 27 28 /* 29 + * The set of interrupts we enable for arch_local_irq_enable(). 30 * This is initialized to have just a single interrupt that the kernel 31 * doesn't actually use as a sentinel. During kernel init, 32 * interrupts are added as the kernel gets prepared to support them. ··· 225 /* Enable interrupt delivery. */ 226 unmask_irqs(~0UL); 227 #if CHIP_HAS_IPI() 228 + arch_local_irq_unmask(INT_IPI_K); 229 #endif 230 } 231
+3 -3
arch/tile/kernel/machine_kexec.c
··· 182 183 if ((entry & IND_SOURCE)) { 184 void *va = 185 - kmap_atomic_pfn(entry >> PAGE_SHIFT, KM_USER0); 186 r = kexec_bn2cl(va); 187 if (r) { 188 command_line = r; 189 break; 190 } 191 - kunmap_atomic(va, KM_USER0); 192 } 193 } 194 ··· 198 199 hverr = hv_set_command_line( 200 (HV_VirtAddr) command_line, strlen(command_line)); 201 - kunmap_atomic(command_line, KM_USER0); 202 } else { 203 pr_info("%s: no command line found; making empty\n", 204 __func__);
··· 182 183 if ((entry & IND_SOURCE)) { 184 void *va = 185 + kmap_atomic_pfn(entry >> PAGE_SHIFT); 186 r = kexec_bn2cl(va); 187 if (r) { 188 command_line = r; 189 break; 190 } 191 + kunmap_atomic(va); 192 } 193 } 194 ··· 198 199 hverr = hv_set_command_line( 200 (HV_VirtAddr) command_line, strlen(command_line)); 201 + kunmap_atomic(command_line); 202 } else { 203 pr_info("%s: no command line found; making empty\n", 204 __func__);
+1 -1
arch/tile/kernel/messaging.c
··· 34 panic("hv_register_message_state: error %d", rc); 35 36 /* Make sure downcall interrupts will be enabled. */ 37 - raw_local_irq_unmask(INT_INTCTRL_K); 38 } 39 40 void hv_message_intr(struct pt_regs *regs, int intnum)
··· 34 panic("hv_register_message_state: error %d", rc); 35 36 /* Make sure downcall interrupts will be enabled. */ 37 + arch_local_irq_unmask(INT_INTCTRL_K); 38 } 39 40 void hv_message_intr(struct pt_regs *regs, int intnum)
+21 -18
arch/tile/kernel/ptrace.c
··· 50 { 51 unsigned long __user *datap = (long __user __force *)data; 52 unsigned long tmp; 53 - int i; 54 long ret = -EIO; 55 - unsigned long *childregs; 56 char *childreg; 57 58 switch (request) { 59 ··· 80 if (addr >= PTREGS_SIZE) 81 break; 82 childreg = (char *)task_pt_regs(child) + addr; 83 #ifdef CONFIG_COMPAT 84 if (is_compat_task()) { 85 if (addr & (sizeof(compat_long_t)-1)) ··· 106 break; 107 108 case PTRACE_GETREGS: /* Get all registers from the child. */ 109 - if (!access_ok(VERIFY_WRITE, datap, PTREGS_SIZE)) 110 - break; 111 - childregs = (long *)task_pt_regs(child); 112 - for (i = 0; i < sizeof(struct pt_regs)/sizeof(unsigned long); 113 - ++i) { 114 - ret = __put_user(childregs[i], &datap[i]); 115 - if (ret != 0) 116 - break; 117 } 118 break; 119 120 case PTRACE_SETREGS: /* Set all registers in the child. */ 121 - if (!access_ok(VERIFY_READ, datap, PTREGS_SIZE)) 122 - break; 123 - childregs = (long *)task_pt_regs(child); 124 - for (i = 0; i < sizeof(struct pt_regs)/sizeof(unsigned long); 125 - ++i) { 126 - ret = __get_user(childregs[i], &datap[i]); 127 - if (ret != 0) 128 - break; 129 } 130 break; 131
··· 50 { 51 unsigned long __user *datap = (long __user __force *)data; 52 unsigned long tmp; 53 long ret = -EIO; 54 char *childreg; 55 + struct pt_regs copyregs; 56 + int ex1_offset; 57 58 switch (request) { 59 ··· 80 if (addr >= PTREGS_SIZE) 81 break; 82 childreg = (char *)task_pt_regs(child) + addr; 83 + 84 + /* Guard against overwrites of the privilege level. */ 85 + ex1_offset = PTREGS_OFFSET_EX1; 86 + #if defined(CONFIG_COMPAT) && defined(__BIG_ENDIAN) 87 + if (is_compat_task()) /* point at low word */ 88 + ex1_offset += sizeof(compat_long_t); 89 + #endif 90 + if (addr == ex1_offset) 91 + data = PL_ICS_EX1(USER_PL, EX1_ICS(data)); 92 + 93 #ifdef CONFIG_COMPAT 94 if (is_compat_task()) { 95 if (addr & (sizeof(compat_long_t)-1)) ··· 96 break; 97 98 case PTRACE_GETREGS: /* Get all registers from the child. */ 99 + if (copy_to_user(datap, task_pt_regs(child), 100 + sizeof(struct pt_regs)) == 0) { 101 + ret = 0; 102 } 103 break; 104 105 case PTRACE_SETREGS: /* Set all registers in the child. */ 106 + if (copy_from_user(&copyregs, datap, 107 + sizeof(struct pt_regs)) == 0) { 108 + copyregs.ex1 = 109 + PL_ICS_EX1(USER_PL, EX1_ICS(copyregs.ex1)); 110 + *task_pt_regs(child) = copyregs; 111 + ret = 0; 112 } 113 break; 114
+3 -3
arch/tile/kernel/reboot.c
··· 27 void machine_halt(void) 28 { 29 warn_early_printk(); 30 - raw_local_irq_disable_all(); 31 smp_send_stop(); 32 hv_halt(); 33 } ··· 35 void machine_power_off(void) 36 { 37 warn_early_printk(); 38 - raw_local_irq_disable_all(); 39 smp_send_stop(); 40 hv_power_off(); 41 } 42 43 void machine_restart(char *cmd) 44 { 45 - raw_local_irq_disable_all(); 46 smp_send_stop(); 47 hv_restart((HV_VirtAddr) "vmlinux", (HV_VirtAddr) cmd); 48 }
··· 27 void machine_halt(void) 28 { 29 warn_early_printk(); 30 + arch_local_irq_disable_all(); 31 smp_send_stop(); 32 hv_halt(); 33 } ··· 35 void machine_power_off(void) 36 { 37 warn_early_printk(); 38 + arch_local_irq_disable_all(); 39 smp_send_stop(); 40 hv_power_off(); 41 } 42 43 void machine_restart(char *cmd) 44 { 45 + arch_local_irq_disable_all(); 46 smp_send_stop(); 47 hv_restart((HV_VirtAddr) "vmlinux", (HV_VirtAddr) cmd); 48 }
+4 -4
arch/tile/kernel/setup.c
··· 868 869 /* Allow asynchronous TLB interrupts. */ 870 #if CHIP_HAS_TILE_DMA() 871 - raw_local_irq_unmask(INT_DMATLB_MISS); 872 - raw_local_irq_unmask(INT_DMATLB_ACCESS); 873 #endif 874 #if CHIP_HAS_SN_PROC() 875 - raw_local_irq_unmask(INT_SNITLB_MISS); 876 #endif 877 #ifdef __tilegx__ 878 - raw_local_irq_unmask(INT_SINGLE_STEP_K); 879 #endif 880 881 /*
··· 868 869 /* Allow asynchronous TLB interrupts. */ 870 #if CHIP_HAS_TILE_DMA() 871 + arch_local_irq_unmask(INT_DMATLB_MISS); 872 + arch_local_irq_unmask(INT_DMATLB_ACCESS); 873 #endif 874 #if CHIP_HAS_SN_PROC() 875 + arch_local_irq_unmask(INT_SNITLB_MISS); 876 #endif 877 #ifdef __tilegx__ 878 + arch_local_irq_unmask(INT_SINGLE_STEP_K); 879 #endif 880 881 /*
+8 -1
arch/tile/kernel/signal.c
··· 71 for (i = 0; i < sizeof(struct pt_regs)/sizeof(long); ++i) 72 err |= __get_user(regs->regs[i], &sc->gregs[i]); 73 74 regs->faultnum = INT_SWINT_1_SIGRETURN; 75 76 err |= __get_user(*pr0, &sc->gregs[0]); ··· 333 current_thread_info()->status &= ~TS_RESTORE_SIGMASK; 334 } 335 336 - return; 337 } 338 339 /* Did we come from a system call? */ ··· 361 current_thread_info()->status &= ~TS_RESTORE_SIGMASK; 362 sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL); 363 } 364 }
··· 71 for (i = 0; i < sizeof(struct pt_regs)/sizeof(long); ++i) 72 err |= __get_user(regs->regs[i], &sc->gregs[i]); 73 74 + /* Ensure that the PL is always set to USER_PL. */ 75 + regs->ex1 = PL_ICS_EX1(USER_PL, EX1_ICS(regs->ex1)); 76 + 77 regs->faultnum = INT_SWINT_1_SIGRETURN; 78 79 err |= __get_user(*pr0, &sc->gregs[0]); ··· 330 current_thread_info()->status &= ~TS_RESTORE_SIGMASK; 331 } 332 333 + goto done; 334 } 335 336 /* Did we come from a system call? */ ··· 358 current_thread_info()->status &= ~TS_RESTORE_SIGMASK; 359 sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL); 360 } 361 + 362 + done: 363 + /* Avoid double syscall restart if there are nested signals. */ 364 + regs->faultnum = INT_SWINT_1_SIGRETURN; 365 }
+1 -1
arch/tile/kernel/smp.c
··· 115 static void smp_stop_cpu_interrupt(void) 116 { 117 set_cpu_online(smp_processor_id(), 0); 118 - raw_local_irq_disable_all(); 119 for (;;) 120 asm("nap"); 121 }
··· 115 static void smp_stop_cpu_interrupt(void) 116 { 117 set_cpu_online(smp_processor_id(), 0); 118 + arch_local_irq_disable_all(); 119 for (;;) 120 asm("nap"); 121 }
+4 -4
arch/tile/kernel/time.c
··· 132 { 133 BUG_ON(ticks > MAX_TICK); 134 __insn_mtspr(SPR_TILE_TIMER_CONTROL, ticks); 135 - raw_local_irq_unmask_now(INT_TILE_TIMER); 136 return 0; 137 } 138 ··· 143 static void tile_timer_set_mode(enum clock_event_mode mode, 144 struct clock_event_device *evt) 145 { 146 - raw_local_irq_mask_now(INT_TILE_TIMER); 147 } 148 149 /* ··· 172 evt->cpumask = cpumask_of(smp_processor_id()); 173 174 /* Start out with timer not firing. */ 175 - raw_local_irq_mask_now(INT_TILE_TIMER); 176 177 /* Register tile timer. */ 178 clockevents_register_device(evt); ··· 188 * Mask the timer interrupt here, since we are a oneshot timer 189 * and there are now by definition no events pending. 190 */ 191 - raw_local_irq_mask(INT_TILE_TIMER); 192 193 /* Track time spent here in an interrupt context */ 194 irq_enter();
··· 132 { 133 BUG_ON(ticks > MAX_TICK); 134 __insn_mtspr(SPR_TILE_TIMER_CONTROL, ticks); 135 + arch_local_irq_unmask_now(INT_TILE_TIMER); 136 return 0; 137 } 138 ··· 143 static void tile_timer_set_mode(enum clock_event_mode mode, 144 struct clock_event_device *evt) 145 { 146 + arch_local_irq_mask_now(INT_TILE_TIMER); 147 } 148 149 /* ··· 172 evt->cpumask = cpumask_of(smp_processor_id()); 173 174 /* Start out with timer not firing. */ 175 + arch_local_irq_mask_now(INT_TILE_TIMER); 176 177 /* Register tile timer. */ 178 clockevents_register_device(evt); ··· 188 * Mask the timer interrupt here, since we are a oneshot timer 189 * and there are now by definition no events pending. 190 */ 191 + arch_local_irq_mask(INT_TILE_TIMER); 192 193 /* Track time spent here in an interrupt context */ 194 irq_enter();
+8 -3
arch/tile/lib/memcpy_tile64.c
··· 54 * we must run with interrupts disabled to avoid the risk of some 55 * other code seeing the incoherent data in our cache. (Recall that 56 * our cache is indexed by PA, so even if the other code doesn't use 57 - * our KM_MEMCPY virtual addresses, they'll still hit in cache using 58 * the normal VAs that aren't supposed to hit in cache.) 59 */ 60 static void memcpy_multicache(void *dest, const void *source, ··· 64 unsigned long flags, newsrc, newdst; 65 pmd_t *pmdp; 66 pte_t *ptep; 67 int cpu = get_cpu(); 68 69 /* ··· 78 sim_allow_multiple_caching(1); 79 80 /* Set up the new dest mapping */ 81 - idx = FIX_KMAP_BEGIN + (KM_TYPE_NR * cpu) + KM_MEMCPY0; 82 newdst = __fix_to_virt(idx) + ((unsigned long)dest & (PAGE_SIZE-1)); 83 pmdp = pmd_offset(pud_offset(pgd_offset_k(newdst), newdst), newdst); 84 ptep = pte_offset_kernel(pmdp, newdst); ··· 89 } 90 91 /* Set up the new source mapping */ 92 - idx += (KM_MEMCPY0 - KM_MEMCPY1); 93 src_pte = hv_pte_set_nc(src_pte); 94 src_pte = hv_pte_clear_writable(src_pte); /* be paranoid */ 95 newsrc = __fix_to_virt(idx) + ((unsigned long)source & (PAGE_SIZE-1)); ··· 122 * We're done: notify the simulator that all is back to normal, 123 * and re-enable interrupts and pre-emption. 124 */ 125 sim_allow_multiple_caching(0); 126 local_irq_restore(flags); 127 put_cpu();
··· 54 * we must run with interrupts disabled to avoid the risk of some 55 * other code seeing the incoherent data in our cache. (Recall that 56 * our cache is indexed by PA, so even if the other code doesn't use 57 + * our kmap_atomic virtual addresses, they'll still hit in cache using 58 * the normal VAs that aren't supposed to hit in cache.) 59 */ 60 static void memcpy_multicache(void *dest, const void *source, ··· 64 unsigned long flags, newsrc, newdst; 65 pmd_t *pmdp; 66 pte_t *ptep; 67 + int type0, type1; 68 int cpu = get_cpu(); 69 70 /* ··· 77 sim_allow_multiple_caching(1); 78 79 /* Set up the new dest mapping */ 80 + type0 = kmap_atomic_idx_push(); 81 + idx = FIX_KMAP_BEGIN + (KM_TYPE_NR * cpu) + type0; 82 newdst = __fix_to_virt(idx) + ((unsigned long)dest & (PAGE_SIZE-1)); 83 pmdp = pmd_offset(pud_offset(pgd_offset_k(newdst), newdst), newdst); 84 ptep = pte_offset_kernel(pmdp, newdst); ··· 87 } 88 89 /* Set up the new source mapping */ 90 + type1 = kmap_atomic_idx_push(); 91 + idx += (type0 - type1); 92 src_pte = hv_pte_set_nc(src_pte); 93 src_pte = hv_pte_clear_writable(src_pte); /* be paranoid */ 94 newsrc = __fix_to_virt(idx) + ((unsigned long)source & (PAGE_SIZE-1)); ··· 119 * We're done: notify the simulator that all is back to normal, 120 * and re-enable interrupts and pre-emption. 121 */ 122 + kmap_atomic_idx_pop(); 123 + kmap_atomic_idx_pop(); 124 sim_allow_multiple_caching(0); 125 local_irq_restore(flags); 126 put_cpu();
+1 -1
arch/tile/mm/highmem.c
··· 227 void *__kmap_atomic(struct page *page) 228 { 229 /* PAGE_NONE is a magic value that tells us to check immutability. */ 230 - return kmap_atomic_prot(page, type, PAGE_NONE); 231 } 232 EXPORT_SYMBOL(__kmap_atomic); 233
··· 227 void *__kmap_atomic(struct page *page) 228 { 229 /* PAGE_NONE is a magic value that tells us to check immutability. */ 230 + return kmap_atomic_prot(page, PAGE_NONE); 231 } 232 EXPORT_SYMBOL(__kmap_atomic); 233
+6 -2
arch/tile/mm/init.c
··· 988 /* Select whether to free (1) or mark unusable (0) the __init pages. */ 989 static int __init set_initfree(char *str) 990 { 991 - strict_strtol(str, 0, &initfree); 992 - pr_info("initfree: %s free init pages\n", initfree ? "will" : "won't"); 993 return 1; 994 } 995 __setup("initfree=", set_initfree);
··· 988 /* Select whether to free (1) or mark unusable (0) the __init pages. */ 989 static int __init set_initfree(char *str) 990 { 991 + long val; 992 + if (strict_strtol(str, 0, &val)) { 993 + initfree = val; 994 + pr_info("initfree: %s free init pages\n", 995 + initfree ? "will" : "won't"); 996 + } 997 return 1; 998 } 999 __setup("initfree=", set_initfree);
+2 -2
arch/tile/mm/pgtable.c
··· 134 } 135 136 #if defined(CONFIG_HIGHPTE) 137 - pte_t *_pte_offset_map(pmd_t *dir, unsigned long address, enum km_type type) 138 { 139 - pte_t *pte = kmap_atomic(pmd_page(*dir), type) + 140 (pmd_ptfn(*dir) << HV_LOG2_PAGE_TABLE_ALIGN) & ~PAGE_MASK; 141 return &pte[pte_index(address)]; 142 }
··· 134 } 135 136 #if defined(CONFIG_HIGHPTE) 137 + pte_t *_pte_offset_map(pmd_t *dir, unsigned long address) 138 { 139 + pte_t *pte = kmap_atomic(pmd_page(*dir)) + 140 (pmd_ptfn(*dir) << HV_LOG2_PAGE_TABLE_ALIGN) & ~PAGE_MASK; 141 return &pte[pte_index(address)]; 142 }
+7 -7
include/asm-generic/stat.h
··· 33 int st_blksize; /* Optimal block size for I/O. */ 34 int __pad2; 35 long st_blocks; /* Number 512-byte blocks allocated. */ 36 - int st_atime; /* Time of last access. */ 37 - unsigned int st_atime_nsec; 38 - int st_mtime; /* Time of last modification. */ 39 - unsigned int st_mtime_nsec; 40 - int st_ctime; /* Time of last status change. */ 41 - unsigned int st_ctime_nsec; 42 unsigned int __unused4; 43 unsigned int __unused5; 44 }; 45 46 - #if __BITS_PER_LONG != 64 47 /* This matches struct stat64 in glibc2.1. Only used for 32 bit. */ 48 struct stat64 { 49 unsigned long long st_dev; /* Device. */ 50 unsigned long long st_ino; /* File serial number. */
··· 33 int st_blksize; /* Optimal block size for I/O. */ 34 int __pad2; 35 long st_blocks; /* Number 512-byte blocks allocated. */ 36 + long st_atime; /* Time of last access. */ 37 + unsigned long st_atime_nsec; 38 + long st_mtime; /* Time of last modification. */ 39 + unsigned long st_mtime_nsec; 40 + long st_ctime; /* Time of last status change. */ 41 + unsigned long st_ctime_nsec; 42 unsigned int __unused4; 43 unsigned int __unused5; 44 }; 45 46 /* This matches struct stat64 in glibc2.1. Only used for 32 bit. */ 47 + #if __BITS_PER_LONG != 64 || defined(__ARCH_WANT_STAT64) 48 struct stat64 { 49 unsigned long long st_dev; /* Device. */ 50 unsigned long long st_ino; /* File serial number. */