Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

s390/uaccess: always run the kernel in home space

Simplify the uaccess code by removing the user_mode=home option.
The kernel will now always run in the home space mode.

Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>

+51 -459
+2 -8
arch/s390/include/asm/mmu_context.h
··· 40 40 pgd_t *pgd = mm->pgd; 41 41 42 42 S390_lowcore.user_asce = mm->context.asce_bits | __pa(pgd); 43 - if (s390_user_mode != HOME_SPACE_MODE) { 44 - /* Load primary space page table origin. */ 45 - asm volatile(LCTL_OPCODE" 1,1,%0\n" 46 - : : "m" (S390_lowcore.user_asce) ); 47 - } else 48 - /* Load home space page table origin. */ 49 - asm volatile(LCTL_OPCODE" 13,13,%0" 50 - : : "m" (S390_lowcore.user_asce) ); 43 + /* Load primary space page table origin. */ 44 + asm volatile(LCTL_OPCODE" 1,1,%0\n" : : "m" (S390_lowcore.user_asce)); 51 45 set_fs(current->thread.mm_segment); 52 46 } 53 47
+4 -4
arch/s390/include/asm/processor.h
··· 134 134 * Do necessary setup to start up a new thread. 135 135 */ 136 136 #define start_thread(regs, new_psw, new_stackp) do { \ 137 - regs->psw.mask = psw_user_bits | PSW_MASK_EA | PSW_MASK_BA; \ 137 + regs->psw.mask = PSW_USER_BITS | PSW_MASK_EA | PSW_MASK_BA; \ 138 138 regs->psw.addr = new_psw | PSW_ADDR_AMODE; \ 139 139 regs->gprs[15] = new_stackp; \ 140 140 execve_tail(); \ 141 141 } while (0) 142 142 143 143 #define start_thread31(regs, new_psw, new_stackp) do { \ 144 - regs->psw.mask = psw_user_bits | PSW_MASK_BA; \ 144 + regs->psw.mask = PSW_USER_BITS | PSW_MASK_BA; \ 145 145 regs->psw.addr = new_psw | PSW_ADDR_AMODE; \ 146 146 regs->gprs[15] = new_stackp; \ 147 147 __tlb_flush_mm(current->mm); \ ··· 343 343 } 344 344 345 345 #define local_mcck_enable() \ 346 - __set_psw_mask(psw_kernel_bits | PSW_MASK_DAT | PSW_MASK_MCHECK) 346 + __set_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT | PSW_MASK_MCHECK) 347 347 #define local_mcck_disable() \ 348 - __set_psw_mask(psw_kernel_bits | PSW_MASK_DAT) 348 + __set_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT) 349 349 350 350 /* 351 351 * Basic Machine Check/Program Check Handler.
+5 -2
arch/s390/include/asm/ptrace.h
··· 10 10 11 11 #ifndef __ASSEMBLY__ 12 12 13 - extern long psw_kernel_bits; 14 - extern long psw_user_bits; 13 + #define PSW_KERNEL_BITS (PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_ASC_HOME | \ 14 + PSW_MASK_EA | PSW_MASK_BA) 15 + #define PSW_USER_BITS (PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | \ 16 + PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_MCHECK | \ 17 + PSW_MASK_PSTATE | PSW_ASC_PRIMARY) 15 18 16 19 /* 17 20 * The pt_regs struct defines the way the registers are stored on
-7
arch/s390/include/asm/setup.h
··· 48 48 void create_mem_hole(struct mem_chunk mem_chunk[], unsigned long addr, 49 49 unsigned long size); 50 50 51 - #define PRIMARY_SPACE_MODE 0 52 - #define ACCESS_REGISTER_MODE 1 53 - #define SECONDARY_SPACE_MODE 2 54 - #define HOME_SPACE_MODE 3 55 - 56 - extern unsigned int s390_user_mode; 57 - 58 51 /* 59 52 * Machine features detected in head.S 60 53 */
+4 -14
arch/s390/include/asm/uaccess.h
··· 94 94 95 95 struct uaccess_ops { 96 96 size_t (*copy_from_user)(size_t, const void __user *, void *); 97 - size_t (*copy_from_user_small)(size_t, const void __user *, void *); 98 97 size_t (*copy_to_user)(size_t, void __user *, const void *); 99 - size_t (*copy_to_user_small)(size_t, void __user *, const void *); 100 98 size_t (*copy_in_user)(size_t, void __user *, const void __user *); 101 99 size_t (*clear_user)(size_t, void __user *); 102 100 size_t (*strnlen_user)(size_t, const char __user *); ··· 104 106 }; 105 107 106 108 extern struct uaccess_ops uaccess; 107 - extern struct uaccess_ops uaccess_std; 108 109 extern struct uaccess_ops uaccess_mvcos; 109 - extern struct uaccess_ops uaccess_mvcos_switch; 110 110 extern struct uaccess_ops uaccess_pt; 111 111 112 112 extern int __handle_fault(unsigned long, unsigned long, int); 113 113 114 114 static inline int __put_user_fn(size_t size, void __user *ptr, void *x) 115 115 { 116 - size = uaccess.copy_to_user_small(size, ptr, x); 116 + size = uaccess.copy_to_user(size, ptr, x); 117 117 return size ? -EFAULT : size; 118 118 } 119 119 120 120 static inline int __get_user_fn(size_t size, const void __user *ptr, void *x) 121 121 { 122 - size = uaccess.copy_from_user_small(size, ptr, x); 122 + size = uaccess.copy_from_user(size, ptr, x); 123 123 return size ? -EFAULT : size; 124 124 } 125 125 ··· 222 226 static inline unsigned long __must_check 223 227 __copy_to_user(void __user *to, const void *from, unsigned long n) 224 228 { 225 - if (__builtin_constant_p(n) && (n <= 256)) 226 - return uaccess.copy_to_user_small(n, to, from); 227 - else 228 - return uaccess.copy_to_user(n, to, from); 229 + return uaccess.copy_to_user(n, to, from); 229 230 } 230 231 231 232 #define __copy_to_user_inatomic __copy_to_user ··· 268 275 static inline unsigned long __must_check 269 276 __copy_from_user(void *to, const void __user *from, unsigned long n) 270 277 { 271 - if (__builtin_constant_p(n) && (n <= 256)) 272 - return uaccess.copy_from_user_small(n, from, to); 273 - else 274 - return uaccess.copy_from_user(n, from, to); 278 + return uaccess.copy_from_user(n, from, to); 275 279 } 276 280 277 281 extern void copy_from_user_overflow(void)
+4 -4
arch/s390/kernel/compat_signal.c
··· 188 188 (__u64)(regs32.psw.mask & PSW32_MASK_USER) << 32 | 189 189 (__u64)(regs32.psw.addr & PSW32_ADDR_AMODE); 190 190 /* Check for invalid user address space control. */ 191 - if ((regs->psw.mask & PSW_MASK_ASC) >= (psw_kernel_bits & PSW_MASK_ASC)) 192 - regs->psw.mask = (psw_user_bits & PSW_MASK_ASC) | 191 + if ((regs->psw.mask & PSW_MASK_ASC) == PSW_ASC_HOME) 192 + regs->psw.mask = PSW_ASC_PRIMARY | 193 193 (regs->psw.mask & ~PSW_MASK_ASC); 194 194 regs->psw.addr = (__u64)(regs32.psw.addr & PSW32_ADDR_INSN); 195 195 for (i = 0; i < NUM_GPRS; i++) ··· 348 348 regs->gprs[15] = (__force __u64) frame; 349 349 /* Force 31 bit amode and default user address space control. */ 350 350 regs->psw.mask = PSW_MASK_BA | 351 - (psw_user_bits & PSW_MASK_ASC) | 351 + (PSW_USER_BITS & PSW_MASK_ASC) | 352 352 (regs->psw.mask & ~PSW_MASK_ASC); 353 353 regs->psw.addr = (__force __u64) ka->sa.sa_handler; 354 354 ··· 415 415 regs->gprs[15] = (__force __u64) frame; 416 416 /* Force 31 bit amode and default user address space control. */ 417 417 regs->psw.mask = PSW_MASK_BA | 418 - (psw_user_bits & PSW_MASK_ASC) | 418 + (PSW_USER_BITS & PSW_MASK_ASC) | 419 419 (regs->psw.mask & ~PSW_MASK_ASC); 420 420 regs->psw.addr = (__u64 __force) ka->sa.sa_handler; 421 421
+2 -2
arch/s390/kernel/ipl.c
··· 2051 2051 __ctl_clear_bit(0,28); 2052 2052 2053 2053 /* Set new machine check handler */ 2054 - S390_lowcore.mcck_new_psw.mask = psw_kernel_bits | PSW_MASK_DAT; 2054 + S390_lowcore.mcck_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT; 2055 2055 S390_lowcore.mcck_new_psw.addr = 2056 2056 PSW_ADDR_AMODE | (unsigned long) s390_base_mcck_handler; 2057 2057 2058 2058 /* Set new program check handler */ 2059 - S390_lowcore.program_new_psw.mask = psw_kernel_bits | PSW_MASK_DAT; 2059 + S390_lowcore.program_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT; 2060 2060 S390_lowcore.program_new_psw.addr = 2061 2061 PSW_ADDR_AMODE | (unsigned long) s390_base_pgm_handler; 2062 2062
+1 -1
arch/s390/kernel/process.c
··· 139 139 if (unlikely(p->flags & PF_KTHREAD)) { 140 140 /* kernel thread */ 141 141 memset(&frame->childregs, 0, sizeof(struct pt_regs)); 142 - frame->childregs.psw.mask = psw_kernel_bits | PSW_MASK_DAT | 142 + frame->childregs.psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT | 143 143 PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK; 144 144 frame->childregs.psw.addr = PSW_ADDR_AMODE | 145 145 (unsigned long) kernel_thread_starter;
+2 -2
arch/s390/kernel/ptrace.c
··· 200 200 tmp = *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr); 201 201 if (addr == (addr_t) &dummy->regs.psw.mask) 202 202 /* Return a clean psw mask. */ 203 - tmp = psw_user_bits | (tmp & PSW_MASK_USER); 203 + tmp = PSW_USER_BITS | (tmp & PSW_MASK_USER); 204 204 205 205 } else if (addr < (addr_t) &dummy->regs.orig_gpr2) { 206 206 /* ··· 322 322 * psw and gprs are stored on the stack 323 323 */ 324 324 if (addr == (addr_t) &dummy->regs.psw.mask && 325 - ((data & ~PSW_MASK_USER) != psw_user_bits || 325 + ((data & ~PSW_MASK_USER) != PSW_USER_BITS || 326 326 ((data & PSW_MASK_EA) && !(data & PSW_MASK_BA)))) 327 327 /* Invalid psw mask. */ 328 328 return -EINVAL;
-2
arch/s390/kernel/runtime_instr.c
··· 40 40 static void init_runtime_instr_cb(struct runtime_instr_cb *cb) 41 41 { 42 42 cb->buf_limit = 0xfff; 43 - if (s390_user_mode == HOME_SPACE_MODE) 44 - cb->home_space = 1; 45 43 cb->int_requested = 1; 46 44 cb->pstate = 1; 47 45 cb->pstate_set_buf = 1;
+10 -49
arch/s390/kernel/setup.c
··· 64 64 #include <asm/sclp.h> 65 65 #include "entry.h" 66 66 67 - long psw_kernel_bits = PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_ASC_PRIMARY | 68 - PSW_MASK_EA | PSW_MASK_BA; 69 - long psw_user_bits = PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | 70 - PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_MCHECK | 71 - PSW_MASK_PSTATE | PSW_ASC_HOME; 72 - 73 67 /* 74 68 * User copy operations. 75 69 */ ··· 294 300 } 295 301 early_param("vmalloc", parse_vmalloc); 296 302 297 - unsigned int s390_user_mode = PRIMARY_SPACE_MODE; 298 - EXPORT_SYMBOL_GPL(s390_user_mode); 299 - 300 - static void __init set_user_mode_primary(void) 301 - { 302 - psw_kernel_bits = (psw_kernel_bits & ~PSW_MASK_ASC) | PSW_ASC_HOME; 303 - psw_user_bits = (psw_user_bits & ~PSW_MASK_ASC) | PSW_ASC_PRIMARY; 304 - #ifdef CONFIG_COMPAT 305 - psw32_user_bits = 306 - (psw32_user_bits & ~PSW32_MASK_ASC) | PSW32_ASC_PRIMARY; 307 - #endif 308 - uaccess = MACHINE_HAS_MVCOS ? uaccess_mvcos_switch : uaccess_pt; 309 - } 310 - 311 303 static int __init early_parse_user_mode(char *p) 312 304 { 313 - if (p && strcmp(p, "primary") == 0) 314 - s390_user_mode = PRIMARY_SPACE_MODE; 315 - else if (!p || strcmp(p, "home") == 0) 316 - s390_user_mode = HOME_SPACE_MODE; 317 - else 318 - return 1; 319 - return 0; 305 + if (!p || strcmp(p, "primary") == 0) 306 + return 0; 307 + return 1; 320 308 } 321 309 early_param("user_mode", early_parse_user_mode); 322 - 323 - static void __init setup_addressing_mode(void) 324 - { 325 - if (s390_user_mode != PRIMARY_SPACE_MODE) 326 - return; 327 - set_user_mode_primary(); 328 - if (MACHINE_HAS_MVCOS) 329 - pr_info("Address spaces switched, mvcos available\n"); 330 - else 331 - pr_info("Address spaces switched, mvcos not available\n"); 332 - } 333 310 334 311 void *restart_stack __attribute__((__section__(".data"))); 335 312 ··· 313 348 */ 314 349 BUILD_BUG_ON(sizeof(struct _lowcore) != LC_PAGES * 4096); 315 350 lc = __alloc_bootmem_low(LC_PAGES * PAGE_SIZE, LC_PAGES * PAGE_SIZE, 0); 316 - lc->restart_psw.mask = psw_kernel_bits; 351 + lc->restart_psw.mask = PSW_KERNEL_BITS; 317 352 lc->restart_psw.addr = 318 353 PSW_ADDR_AMODE | (unsigned long) restart_int_handler; 319 - lc->external_new_psw.mask = psw_kernel_bits | 354 + lc->external_new_psw.mask = PSW_KERNEL_BITS | 320 355 PSW_MASK_DAT | PSW_MASK_MCHECK; 321 356 lc->external_new_psw.addr = 322 357 PSW_ADDR_AMODE | (unsigned long) ext_int_handler; 323 - lc->svc_new_psw.mask = psw_kernel_bits | 358 + lc->svc_new_psw.mask = PSW_KERNEL_BITS | 324 359 PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK; 325 360 lc->svc_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) system_call; 326 - lc->program_new_psw.mask = psw_kernel_bits | 361 + lc->program_new_psw.mask = PSW_KERNEL_BITS | 327 362 PSW_MASK_DAT | PSW_MASK_MCHECK; 328 363 lc->program_new_psw.addr = 329 364 PSW_ADDR_AMODE | (unsigned long) pgm_check_handler; 330 - lc->mcck_new_psw.mask = psw_kernel_bits; 365 + lc->mcck_new_psw.mask = PSW_KERNEL_BITS; 331 366 lc->mcck_new_psw.addr = 332 367 PSW_ADDR_AMODE | (unsigned long) mcck_int_handler; 333 - lc->io_new_psw.mask = psw_kernel_bits | 368 + lc->io_new_psw.mask = PSW_KERNEL_BITS | 334 369 PSW_MASK_DAT | PSW_MASK_MCHECK; 335 370 lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler; 336 371 lc->clock_comparator = -1ULL; ··· 1008 1043 init_mm.end_data = (unsigned long) &_edata; 1009 1044 init_mm.brk = (unsigned long) &_end; 1010 1045 1011 - if (MACHINE_HAS_MVCOS) 1012 - memcpy(&uaccess, &uaccess_mvcos, sizeof(uaccess)); 1013 - else 1014 - memcpy(&uaccess, &uaccess_std, sizeof(uaccess)); 1046 + uaccess = MACHINE_HAS_MVCOS ? uaccess_mvcos : uaccess_pt; 1015 1047 1016 1048 parse_early_param(); 1017 1049 detect_memory_layout(memory_chunk, memory_end); ··· 1016 1054 setup_ipl(); 1017 1055 reserve_oldmem(); 1018 1056 setup_memory_end(); 1019 - setup_addressing_mode(); 1020 1057 reserve_crashkernel(); 1021 1058 setup_memory(); 1022 1059 setup_resources();
+6 -6
arch/s390/kernel/signal.c
··· 57 57 58 58 /* Copy a 'clean' PSW mask to the user to avoid leaking 59 59 information about whether PER is currently on. */ 60 - user_sregs.regs.psw.mask = psw_user_bits | 60 + user_sregs.regs.psw.mask = PSW_USER_BITS | 61 61 (regs->psw.mask & PSW_MASK_USER); 62 62 user_sregs.regs.psw.addr = regs->psw.addr; 63 63 memcpy(&user_sregs.regs.gprs, &regs->gprs, sizeof(sregs->regs.gprs)); ··· 85 85 err = __copy_from_user(&user_sregs, sregs, sizeof(_sigregs)); 86 86 if (err) 87 87 return err; 88 - /* Use regs->psw.mask instead of psw_user_bits to preserve PER bit. */ 88 + /* Use regs->psw.mask instead of PSW_USER_BITS to preserve PER bit. */ 89 89 regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) | 90 90 (user_sregs.regs.psw.mask & PSW_MASK_USER); 91 91 /* Check for invalid user address space control. */ 92 - if ((regs->psw.mask & PSW_MASK_ASC) >= (psw_kernel_bits & PSW_MASK_ASC)) 93 - regs->psw.mask = (psw_user_bits & PSW_MASK_ASC) | 92 + if ((regs->psw.mask & PSW_MASK_ASC) == PSW_ASC_HOME) 93 + regs->psw.mask = PSW_ASC_PRIMARY | 94 94 (regs->psw.mask & ~PSW_MASK_ASC); 95 95 /* Check for invalid amode */ 96 96 if (regs->psw.mask & PSW_MASK_EA) ··· 224 224 regs->gprs[15] = (unsigned long) frame; 225 225 /* Force default amode and default user address space control. */ 226 226 regs->psw.mask = PSW_MASK_EA | PSW_MASK_BA | 227 - (psw_user_bits & PSW_MASK_ASC) | 227 + (PSW_USER_BITS & PSW_MASK_ASC) | 228 228 (regs->psw.mask & ~PSW_MASK_ASC); 229 229 regs->psw.addr = (unsigned long) ka->sa.sa_handler | PSW_ADDR_AMODE; 230 230 ··· 295 295 regs->gprs[15] = (unsigned long) frame; 296 296 /* Force default amode and default user address space control. */ 297 297 regs->psw.mask = PSW_MASK_EA | PSW_MASK_BA | 298 - (psw_user_bits & PSW_MASK_ASC) | 298 + (PSW_USER_BITS & PSW_MASK_ASC) | 299 299 (regs->psw.mask & ~PSW_MASK_ASC); 300 300 regs->psw.addr = (unsigned long) ka->sa.sa_handler | PSW_ADDR_AMODE; 301 301
+3 -3
arch/s390/kernel/smp.c
··· 283 283 struct _lowcore *lc = lowcore_ptr[pcpu - pcpu_devices]; 284 284 unsigned long source_cpu = stap(); 285 285 286 - __load_psw_mask(psw_kernel_bits); 286 + __load_psw_mask(PSW_KERNEL_BITS); 287 287 if (pcpu->address == source_cpu) 288 288 func(data); /* should not return */ 289 289 /* Stop target cpu (if func returns this stops the current cpu). */ ··· 395 395 int cpu; 396 396 397 397 /* Disable all interrupts/machine checks */ 398 - __load_psw_mask(psw_kernel_bits | PSW_MASK_DAT); 398 + __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT); 399 399 trace_hardirqs_off(); 400 400 401 401 debug_set_critical(); ··· 693 693 S390_lowcore.restart_source = -1UL; 694 694 restore_access_regs(S390_lowcore.access_regs_save_area); 695 695 __ctl_load(S390_lowcore.cregs_save_area, 0, 15); 696 - __load_psw_mask(psw_kernel_bits | PSW_MASK_DAT); 696 + __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT); 697 697 cpu_init(); 698 698 preempt_disable(); 699 699 init_cpu_timer();
+4 -5
arch/s390/kernel/vdso.c
··· 84 84 */ 85 85 static void vdso_init_data(struct vdso_data *vd) 86 86 { 87 - vd->ectg_available = 88 - s390_user_mode != HOME_SPACE_MODE && test_facility(31); 87 + vd->ectg_available = test_facility(31); 89 88 } 90 89 91 90 #ifdef CONFIG_64BIT ··· 101 102 102 103 lowcore->vdso_per_cpu_data = __LC_PASTE; 103 104 104 - if (s390_user_mode == HOME_SPACE_MODE || !vdso_enabled) 105 + if (!vdso_enabled) 105 106 return 0; 106 107 107 108 segment_table = __get_free_pages(GFP_KERNEL, SEGMENT_ORDER); ··· 146 147 unsigned long segment_table, page_table, page_frame; 147 148 u32 *psal, *aste; 148 149 149 - if (s390_user_mode == HOME_SPACE_MODE || !vdso_enabled) 150 + if (!vdso_enabled) 150 151 return; 151 152 152 153 psal = (u32 *)(addr_t) lowcore->paste[4]; ··· 164 165 { 165 166 unsigned long cr5; 166 167 167 - if (s390_user_mode == HOME_SPACE_MODE || !vdso_enabled) 168 + if (!vdso_enabled) 168 169 return; 169 170 cr5 = offsetof(struct _lowcore, paste); 170 171 __ctl_load(cr5, 5, 5);
+1 -1
arch/s390/kernel/vtime.c
··· 161 161 trace_hardirqs_on(); 162 162 163 163 /* Wait for external, I/O or machine check interrupt. */ 164 - psw_mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_DAT | 164 + psw_mask = PSW_KERNEL_BITS | PSW_MASK_WAIT | PSW_MASK_DAT | 165 165 PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK; 166 166 idle->nohz_delay = 0; 167 167
+1 -1
arch/s390/lib/Makefile
··· 2 2 # Makefile for s390-specific library files.. 3 3 # 4 4 5 - lib-y += delay.o string.o uaccess_std.o uaccess_pt.o find.o 5 + lib-y += delay.o string.o uaccess_pt.o find.o 6 6 obj-$(CONFIG_32BIT) += div64.o qrnnd.o ucmpdi2.o mem32.o 7 7 obj-$(CONFIG_64BIT) += mem64.o 8 8 lib-$(CONFIG_64BIT) += uaccess_mvcos.o
-30
arch/s390/lib/uaccess_mvcos.c
··· 65 65 return size; 66 66 } 67 67 68 - static size_t copy_from_user_mvcos_check(size_t size, const void __user *ptr, void *x) 69 - { 70 - if (size <= 256) 71 - return copy_from_user_std(size, ptr, x); 72 - return copy_from_user_mvcos(size, ptr, x); 73 - } 74 - 75 68 static size_t copy_to_user_mvcos(size_t size, void __user *ptr, const void *x) 76 69 { 77 70 register unsigned long reg0 asm("0") = 0x810000UL; ··· 92 99 : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2) 93 100 : "d" (reg0) : "cc", "memory"); 94 101 return size; 95 - } 96 - 97 - static size_t copy_to_user_mvcos_check(size_t size, void __user *ptr, 98 - const void *x) 99 - { 100 - if (size <= 256) 101 - return copy_to_user_std(size, ptr, x); 102 - return copy_to_user_mvcos(size, ptr, x); 103 102 } 104 103 105 104 static size_t copy_in_user_mvcos(size_t size, void __user *to, ··· 186 201 } 187 202 188 203 struct uaccess_ops uaccess_mvcos = { 189 - .copy_from_user = copy_from_user_mvcos_check, 190 - .copy_from_user_small = copy_from_user_std, 191 - .copy_to_user = copy_to_user_mvcos_check, 192 - .copy_to_user_small = copy_to_user_std, 193 - .copy_in_user = copy_in_user_mvcos, 194 - .clear_user = clear_user_mvcos, 195 - .strnlen_user = strnlen_user_std, 196 - .strncpy_from_user = strncpy_from_user_std, 197 - .futex_atomic_op = futex_atomic_op_std, 198 - .futex_atomic_cmpxchg = futex_atomic_cmpxchg_std, 199 - }; 200 - 201 - struct uaccess_ops uaccess_mvcos_switch = { 202 204 .copy_from_user = copy_from_user_mvcos, 203 - .copy_from_user_small = copy_from_user_mvcos, 204 205 .copy_to_user = copy_to_user_mvcos, 205 - .copy_to_user_small = copy_to_user_mvcos, 206 206 .copy_in_user = copy_in_user_mvcos, 207 207 .clear_user = clear_user_mvcos, 208 208 .strnlen_user = strnlen_user_mvcos,
-2
arch/s390/lib/uaccess_pt.c
··· 461 461 462 462 struct uaccess_ops uaccess_pt = { 463 463 .copy_from_user = copy_from_user_pt, 464 - .copy_from_user_small = copy_from_user_pt, 465 464 .copy_to_user = copy_to_user_pt, 466 - .copy_to_user_small = copy_to_user_pt, 467 465 .copy_in_user = copy_in_user_pt, 468 466 .clear_user = clear_user_pt, 469 467 .strnlen_user = strnlen_user_pt,
-305
arch/s390/lib/uaccess_std.c
··· 1 - /* 2 - * Standard user space access functions based on mvcp/mvcs and doing 3 - * interesting things in the secondary space mode. 4 - * 5 - * Copyright IBM Corp. 2006 6 - * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), 7 - * Gerald Schaefer (gerald.schaefer@de.ibm.com) 8 - */ 9 - 10 - #include <linux/errno.h> 11 - #include <linux/mm.h> 12 - #include <linux/uaccess.h> 13 - #include <asm/futex.h> 14 - #include "uaccess.h" 15 - 16 - #ifndef CONFIG_64BIT 17 - #define AHI "ahi" 18 - #define ALR "alr" 19 - #define CLR "clr" 20 - #define LHI "lhi" 21 - #define SLR "slr" 22 - #else 23 - #define AHI "aghi" 24 - #define ALR "algr" 25 - #define CLR "clgr" 26 - #define LHI "lghi" 27 - #define SLR "slgr" 28 - #endif 29 - 30 - size_t copy_from_user_std(size_t size, const void __user *ptr, void *x) 31 - { 32 - unsigned long tmp1, tmp2; 33 - 34 - tmp1 = -256UL; 35 - asm volatile( 36 - "0: mvcp 0(%0,%2),0(%1),%3\n" 37 - "10:jz 8f\n" 38 - "1:"ALR" %0,%3\n" 39 - " la %1,256(%1)\n" 40 - " la %2,256(%2)\n" 41 - "2: mvcp 0(%0,%2),0(%1),%3\n" 42 - "11:jnz 1b\n" 43 - " j 8f\n" 44 - "3: la %4,255(%1)\n" /* %4 = ptr + 255 */ 45 - " "LHI" %3,-4096\n" 46 - " nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */ 47 - " "SLR" %4,%1\n" 48 - " "CLR" %0,%4\n" /* copy crosses next page boundary? */ 49 - " jnh 5f\n" 50 - "4: mvcp 0(%4,%2),0(%1),%3\n" 51 - "12:"SLR" %0,%4\n" 52 - " "ALR" %2,%4\n" 53 - "5:"LHI" %4,-1\n" 54 - " "ALR" %4,%0\n" /* copy remaining size, subtract 1 */ 55 - " bras %3,7f\n" /* memset loop */ 56 - " xc 0(1,%2),0(%2)\n" 57 - "6: xc 0(256,%2),0(%2)\n" 58 - " la %2,256(%2)\n" 59 - "7:"AHI" %4,-256\n" 60 - " jnm 6b\n" 61 - " ex %4,0(%3)\n" 62 - " j 9f\n" 63 - "8:"SLR" %0,%0\n" 64 - "9: \n" 65 - EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,5b) 66 - EX_TABLE(10b,3b) EX_TABLE(11b,3b) EX_TABLE(12b,5b) 67 - : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2) 68 - : : "cc", "memory"); 69 - return size; 70 - } 71 - 72 - static size_t copy_from_user_std_check(size_t size, const void __user *ptr, 73 - void *x) 74 - { 75 - if (size <= 1024) 76 - return copy_from_user_std(size, ptr, x); 77 - return copy_from_user_pt(size, ptr, x); 78 - } 79 - 80 - size_t copy_to_user_std(size_t size, void __user *ptr, const void *x) 81 - { 82 - unsigned long tmp1, tmp2; 83 - 84 - tmp1 = -256UL; 85 - asm volatile( 86 - "0: mvcs 0(%0,%1),0(%2),%3\n" 87 - "7: jz 5f\n" 88 - "1:"ALR" %0,%3\n" 89 - " la %1,256(%1)\n" 90 - " la %2,256(%2)\n" 91 - "2: mvcs 0(%0,%1),0(%2),%3\n" 92 - "8: jnz 1b\n" 93 - " j 5f\n" 94 - "3: la %4,255(%1)\n" /* %4 = ptr + 255 */ 95 - " "LHI" %3,-4096\n" 96 - " nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */ 97 - " "SLR" %4,%1\n" 98 - " "CLR" %0,%4\n" /* copy crosses next page boundary? */ 99 - " jnh 6f\n" 100 - "4: mvcs 0(%4,%1),0(%2),%3\n" 101 - "9:"SLR" %0,%4\n" 102 - " j 6f\n" 103 - "5:"SLR" %0,%0\n" 104 - "6: \n" 105 - EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,6b) 106 - EX_TABLE(7b,3b) EX_TABLE(8b,3b) EX_TABLE(9b,6b) 107 - : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2) 108 - : : "cc", "memory"); 109 - return size; 110 - } 111 - 112 - static size_t copy_to_user_std_check(size_t size, void __user *ptr, 113 - const void *x) 114 - { 115 - if (size <= 1024) 116 - return copy_to_user_std(size, ptr, x); 117 - return copy_to_user_pt(size, ptr, x); 118 - } 119 - 120 - static size_t copy_in_user_std(size_t size, void __user *to, 121 - const void __user *from) 122 - { 123 - unsigned long tmp1; 124 - 125 - asm volatile( 126 - " sacf 256\n" 127 - " "AHI" %0,-1\n" 128 - " jo 5f\n" 129 - " bras %3,3f\n" 130 - "0:"AHI" %0,257\n" 131 - "1: mvc 0(1,%1),0(%2)\n" 132 - " la %1,1(%1)\n" 133 - " la %2,1(%2)\n" 134 - " "AHI" %0,-1\n" 135 - " jnz 1b\n" 136 - " j 5f\n" 137 - "2: mvc 0(256,%1),0(%2)\n" 138 - " la %1,256(%1)\n" 139 - " la %2,256(%2)\n" 140 - "3:"AHI" %0,-256\n" 141 - " jnm 2b\n" 142 - "4: ex %0,1b-0b(%3)\n" 143 - "5: "SLR" %0,%0\n" 144 - "6: sacf 0\n" 145 - EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b) 146 - : "+a" (size), "+a" (to), "+a" (from), "=a" (tmp1) 147 - : : "cc", "memory"); 148 - return size; 149 - } 150 - 151 - static size_t clear_user_std(size_t size, void __user *to) 152 - { 153 - unsigned long tmp1, tmp2; 154 - 155 - asm volatile( 156 - " sacf 256\n" 157 - " "AHI" %0,-1\n" 158 - " jo 5f\n" 159 - " bras %3,3f\n" 160 - " xc 0(1,%1),0(%1)\n" 161 - "0:"AHI" %0,257\n" 162 - " la %2,255(%1)\n" /* %2 = ptr + 255 */ 163 - " srl %2,12\n" 164 - " sll %2,12\n" /* %2 = (ptr + 255) & -4096 */ 165 - " "SLR" %2,%1\n" 166 - " "CLR" %0,%2\n" /* clear crosses next page boundary? */ 167 - " jnh 5f\n" 168 - " "AHI" %2,-1\n" 169 - "1: ex %2,0(%3)\n" 170 - " "AHI" %2,1\n" 171 - " "SLR" %0,%2\n" 172 - " j 5f\n" 173 - "2: xc 0(256,%1),0(%1)\n" 174 - " la %1,256(%1)\n" 175 - "3:"AHI" %0,-256\n" 176 - " jnm 2b\n" 177 - "4: ex %0,0(%3)\n" 178 - "5: "SLR" %0,%0\n" 179 - "6: sacf 0\n" 180 - EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b) 181 - : "+a" (size), "+a" (to), "=a" (tmp1), "=a" (tmp2) 182 - : : "cc", "memory"); 183 - return size; 184 - } 185 - 186 - size_t strnlen_user_std(size_t size, const char __user *src) 187 - { 188 - register unsigned long reg0 asm("0") = 0UL; 189 - unsigned long tmp1, tmp2; 190 - 191 - if (unlikely(!size)) 192 - return 0; 193 - asm volatile( 194 - " la %2,0(%1)\n" 195 - " la %3,0(%0,%1)\n" 196 - " "SLR" %0,%0\n" 197 - " sacf 256\n" 198 - "0: srst %3,%2\n" 199 - " jo 0b\n" 200 - " la %0,1(%3)\n" /* strnlen_user results includes \0 */ 201 - " "SLR" %0,%1\n" 202 - "1: sacf 0\n" 203 - EX_TABLE(0b,1b) 204 - : "+a" (size), "+a" (src), "=a" (tmp1), "=a" (tmp2) 205 - : "d" (reg0) : "cc", "memory"); 206 - return size; 207 - } 208 - 209 - size_t strncpy_from_user_std(size_t count, const char __user *src, char *dst) 210 - { 211 - size_t done, len, offset, len_str; 212 - 213 - if (unlikely(!count)) 214 - return 0; 215 - done = 0; 216 - do { 217 - offset = (size_t)src & ~PAGE_MASK; 218 - len = min(count - done, PAGE_SIZE - offset); 219 - if (copy_from_user_std(len, src, dst)) 220 - return -EFAULT; 221 - len_str = strnlen(dst, len); 222 - done += len_str; 223 - src += len_str; 224 - dst += len_str; 225 - } while ((len_str == len) && (done < count)); 226 - return done; 227 - } 228 - 229 - #define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg) \ 230 - asm volatile( \ 231 - " sacf 256\n" \ 232 - "0: l %1,0(%6)\n" \ 233 - "1:"insn \ 234 - "2: cs %1,%2,0(%6)\n" \ 235 - "3: jl 1b\n" \ 236 - " lhi %0,0\n" \ 237 - "4: sacf 0\n" \ 238 - EX_TABLE(0b,4b) EX_TABLE(2b,4b) EX_TABLE(3b,4b) \ 239 - : "=d" (ret), "=&d" (oldval), "=&d" (newval), \ 240 - "=m" (*uaddr) \ 241 - : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \ 242 - "m" (*uaddr) : "cc"); 243 - 244 - int futex_atomic_op_std(int op, u32 __user *uaddr, int oparg, int *old) 245 - { 246 - int oldval = 0, newval, ret; 247 - 248 - switch (op) { 249 - case FUTEX_OP_SET: 250 - __futex_atomic_op("lr %2,%5\n", 251 - ret, oldval, newval, uaddr, oparg); 252 - break; 253 - case FUTEX_OP_ADD: 254 - __futex_atomic_op("lr %2,%1\nar %2,%5\n", 255 - ret, oldval, newval, uaddr, oparg); 256 - break; 257 - case FUTEX_OP_OR: 258 - __futex_atomic_op("lr %2,%1\nor %2,%5\n", 259 - ret, oldval, newval, uaddr, oparg); 260 - break; 261 - case FUTEX_OP_ANDN: 262 - __futex_atomic_op("lr %2,%1\nnr %2,%5\n", 263 - ret, oldval, newval, uaddr, oparg); 264 - break; 265 - case FUTEX_OP_XOR: 266 - __futex_atomic_op("lr %2,%1\nxr %2,%5\n", 267 - ret, oldval, newval, uaddr, oparg); 268 - break; 269 - default: 270 - ret = -ENOSYS; 271 - } 272 - *old = oldval; 273 - return ret; 274 - } 275 - 276 - int futex_atomic_cmpxchg_std(u32 *uval, u32 __user *uaddr, 277 - u32 oldval, u32 newval) 278 - { 279 - int ret; 280 - 281 - asm volatile( 282 - " sacf 256\n" 283 - "0: cs %1,%4,0(%5)\n" 284 - "1: la %0,0\n" 285 - "2: sacf 0\n" 286 - EX_TABLE(0b,2b) EX_TABLE(1b,2b) 287 - : "=d" (ret), "+d" (oldval), "=m" (*uaddr) 288 - : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr) 289 - : "cc", "memory" ); 290 - *uval = oldval; 291 - return ret; 292 - } 293 - 294 - struct uaccess_ops uaccess_std = { 295 - .copy_from_user = copy_from_user_std_check, 296 - .copy_from_user_small = copy_from_user_std, 297 - .copy_to_user = copy_to_user_std_check, 298 - .copy_to_user_small = copy_to_user_std, 299 - .copy_in_user = copy_in_user_std, 300 - .clear_user = clear_user_std, 301 - .strnlen_user = strnlen_user_std, 302 - .strncpy_from_user = strncpy_from_user_std, 303 - .futex_atomic_op = futex_atomic_op_std, 304 - .futex_atomic_cmpxchg = futex_atomic_cmpxchg_std, 305 - };
+2 -7
arch/s390/mm/fault.c
··· 115 115 if (trans_exc_code == 2) 116 116 /* Access via secondary space, set_fs setting decides */ 117 117 return current->thread.mm_segment.ar4; 118 - if (s390_user_mode == HOME_SPACE_MODE) 119 - /* User space if the access has been done via home space. */ 120 - return trans_exc_code == 3; 121 118 /* 122 - * If the user space is not the home space the kernel runs in home 123 - * space. Access via secondary space has already been covered, 124 - * access via primary space or access register is from user space 119 + * Access via primary space or access register is from user space 125 120 * and access via home space is from the kernel. 126 121 */ 127 122 return trans_exc_code != 3; ··· 466 471 int access, fault; 467 472 468 473 /* Emulate a uaccess fault from kernel mode. */ 469 - regs.psw.mask = psw_kernel_bits | PSW_MASK_DAT | PSW_MASK_MCHECK; 474 + regs.psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT | PSW_MASK_MCHECK; 470 475 if (!irqs_disabled()) 471 476 regs.psw.mask |= PSW_MASK_IO | PSW_MASK_EXT; 472 477 regs.psw.addr = (unsigned long) __builtin_return_address(0);
-4
arch/s390/mm/pgtable.c
··· 1157 1157 struct mm_struct *mm = tsk->mm; 1158 1158 struct mmu_gather tlb; 1159 1159 1160 - /* Do we have switched amode? If no, we cannot do sie */ 1161 - if (s390_user_mode == HOME_SPACE_MODE) 1162 - return -EINVAL; 1163 - 1164 1160 /* Do we have pgstes? if yes, we are done */ 1165 1161 if (mm_has_pgste(tsk->mm)) 1166 1162 return 0;