Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[S390] Remove config options.

On s390 we always want to run with precise cputime accounting.
Remove the config options VIRT_TIMER and VIRT_CPU_ACCOUNTING.

Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>

+20 -143
+4 -14
arch/s390/Kconfig
··· 69 69 bool 70 70 default y if KVM 71 71 72 + config VIRT_CPU_ACCOUNTING 73 + def_bool y 74 + 72 75 mainmenu "Linux Kernel Configuration" 73 76 74 77 config S390 ··· 472 469 hypervisor. The ESSA instruction is used to do the states 473 470 changes between a page that has content and the unused state. 474 471 475 - config VIRT_TIMER 476 - bool "Virtual CPU timer support" 477 - help 478 - This provides a kernel interface for virtual CPU timers. 479 - Default is disabled. 480 - 481 - config VIRT_CPU_ACCOUNTING 482 - bool "Base user process accounting on virtual cpu timer" 483 - depends on VIRT_TIMER 484 - help 485 - Select this option to use CPU timer deltas to do user 486 - process accounting. 487 - 488 472 config APPLDATA_BASE 489 473 bool "Linux - VM Monitor Stream, base infrastructure" 490 - depends on PROC_FS && VIRT_TIMER=y 474 + depends on PROC_FS 491 475 help 492 476 This provides a kernel interface for creating and updating z/VM APPLDATA 493 477 monitor records. The monitor records are updated at certain time
-4
arch/s390/include/asm/system.h
··· 99 99 prev = __switch_to(prev,next); \ 100 100 } while (0) 101 101 102 - #ifdef CONFIG_VIRT_CPU_ACCOUNTING 103 102 extern void account_vtime(struct task_struct *); 104 103 extern void account_tick_vtime(struct task_struct *); 105 104 extern void account_system_vtime(struct task_struct *); 106 - #else 107 - #define account_vtime(x) do { /* empty */ } while (0) 108 - #endif 109 105 110 106 #ifdef CONFIG_PFAULT 111 107 extern void pfault_irq_init(void);
-9
arch/s390/include/asm/timer.h
··· 48 48 extern void init_cpu_vtimer(void); 49 49 extern void vtime_init(void); 50 50 51 - #ifdef CONFIG_VIRT_TIMER 52 - 53 51 extern void vtime_start_cpu_timer(void); 54 52 extern void vtime_stop_cpu_timer(void); 55 - 56 - #else 57 - 58 - static inline void vtime_start_cpu_timer(void) { } 59 - static inline void vtime_stop_cpu_timer(void) { } 60 - 61 - #endif /* CONFIG_VIRT_TIMER */ 62 53 63 54 #endif /* __KERNEL__ */ 64 55
+1 -2
arch/s390/kernel/Makefile
··· 20 20 obj-y := bitmap.o traps.o time.o process.o base.o early.o \ 21 21 setup.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \ 22 22 s390_ext.o debug.o irq.o ipl.o dis.o diag.o mem_detect.o \ 23 - vdso.o 23 + vdso.o vtime.o 24 24 25 25 obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o) 26 26 obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o) ··· 36 36 compat_wrapper.o compat_exec_domain.o \ 37 37 $(compat-obj-y) 38 38 39 - obj-$(CONFIG_VIRT_TIMER) += vtime.o 40 39 obj-$(CONFIG_STACKTRACE) += stacktrace.o 41 40 obj-$(CONFIG_KPROBES) += kprobes.o 42 41 obj-$(CONFIG_FUNCTION_TRACER) += mcount.o
+7 -49
arch/s390/kernel/entry.S
··· 109 109 * R15 - kernel stack pointer 110 110 */ 111 111 112 - .macro STORE_TIMER lc_offset 113 - #ifdef CONFIG_VIRT_CPU_ACCOUNTING 114 - stpt \lc_offset 115 - #endif 116 - .endm 117 - 118 - #ifdef CONFIG_VIRT_CPU_ACCOUNTING 119 112 .macro UPDATE_VTIME lc_from,lc_to,lc_sum 120 113 lm %r10,%r11,\lc_from 121 114 sl %r10,\lc_to ··· 121 128 al %r10,BASED(.Lc_1) 122 129 1: stm %r10,%r11,\lc_sum 123 130 .endm 124 - #endif 125 131 126 132 .macro SAVE_ALL_BASE savearea 127 133 stm %r12,%r15,\savearea ··· 190 198 ni \psworg+1,0xfd # clear wait state bit 191 199 .endif 192 200 lm %r0,%r15,SP_R0(%r15) # load gprs 0-15 of user 193 - STORE_TIMER __LC_EXIT_TIMER 201 + stpt __LC_EXIT_TIMER 194 202 lpsw \psworg # back to caller 195 203 .endm 196 204 ··· 239 247 240 248 .globl system_call 241 249 system_call: 242 - STORE_TIMER __LC_SYNC_ENTER_TIMER 250 + stpt __LC_SYNC_ENTER_TIMER 243 251 sysc_saveall: 244 252 SAVE_ALL_BASE __LC_SAVE_AREA 245 253 SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA 246 254 CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA 247 255 lh %r7,0x8a # get svc number from lowcore 248 - #ifdef CONFIG_VIRT_CPU_ACCOUNTING 249 256 sysc_vtime: 250 257 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER 251 258 sysc_stime: 252 259 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 253 260 sysc_update: 254 261 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER 255 - #endif 256 262 sysc_do_svc: 257 263 l %r9,__LC_THREAD_INFO # load pointer to thread_info struct 258 264 ltr %r7,%r7 # test for svc 0 ··· 480 490 * we just ignore the PER event (FIXME: is there anything we have to do 481 491 * for LPSW?). 482 492 */ 483 - STORE_TIMER __LC_SYNC_ENTER_TIMER 493 + stpt __LC_SYNC_ENTER_TIMER 484 494 SAVE_ALL_BASE __LC_SAVE_AREA 485 495 tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception 486 496 bnz BASED(pgm_per) # got per exception -> special case 487 497 SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA 488 498 CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA 489 - #ifdef CONFIG_VIRT_CPU_ACCOUNTING 490 499 tm SP_PSW+1(%r15),0x01 # interrupting from user ? 491 500 bz BASED(pgm_no_vtime) 492 501 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER 493 502 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 494 503 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER 495 504 pgm_no_vtime: 496 - #endif 497 505 l %r9,__LC_THREAD_INFO # load pointer to thread_info struct 498 506 TRACE_IRQS_OFF 499 507 l %r3,__LC_PGM_ILC # load program interruption code ··· 524 536 pgm_per_std: 525 537 SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA 526 538 CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA 527 - #ifdef CONFIG_VIRT_CPU_ACCOUNTING 528 539 tm SP_PSW+1(%r15),0x01 # interrupting from user ? 529 540 bz BASED(pgm_no_vtime2) 530 541 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER 531 542 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 532 543 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER 533 544 pgm_no_vtime2: 534 - #endif 535 545 l %r9,__LC_THREAD_INFO # load pointer to thread_info struct 536 546 TRACE_IRQS_OFF 537 547 l %r1,__TI_task(%r9) ··· 551 565 pgm_svcper: 552 566 SAVE_ALL_SYNC __LC_SVC_OLD_PSW,__LC_SAVE_AREA 553 567 CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA 554 - #ifdef CONFIG_VIRT_CPU_ACCOUNTING 555 568 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER 556 569 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 557 570 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER 558 - #endif 559 571 lh %r7,0x8a # get svc number from lowcore 560 572 l %r9,__LC_THREAD_INFO # load pointer to thread_info struct 561 573 TRACE_IRQS_OFF ··· 583 599 584 600 .globl io_int_handler 585 601 io_int_handler: 586 - STORE_TIMER __LC_ASYNC_ENTER_TIMER 602 + stpt __LC_ASYNC_ENTER_TIMER 587 603 stck __LC_INT_CLOCK 588 604 SAVE_ALL_BASE __LC_SAVE_AREA+16 589 605 SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+16 590 606 CREATE_STACK_FRAME __LC_IO_OLD_PSW,__LC_SAVE_AREA+16 591 - #ifdef CONFIG_VIRT_CPU_ACCOUNTING 592 607 tm SP_PSW+1(%r15),0x01 # interrupting from user ? 593 608 bz BASED(io_no_vtime) 594 609 UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER 595 610 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 596 611 mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER 597 612 io_no_vtime: 598 - #endif 599 613 l %r9,__LC_THREAD_INFO # load pointer to thread_info struct 600 614 TRACE_IRQS_OFF 601 615 l %r1,BASED(.Ldo_IRQ) # load address of do_IRQ ··· 723 741 724 742 .globl ext_int_handler 725 743 ext_int_handler: 726 - STORE_TIMER __LC_ASYNC_ENTER_TIMER 744 + stpt __LC_ASYNC_ENTER_TIMER 727 745 stck __LC_INT_CLOCK 728 746 SAVE_ALL_BASE __LC_SAVE_AREA+16 729 747 SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16 730 748 CREATE_STACK_FRAME __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16 731 - #ifdef CONFIG_VIRT_CPU_ACCOUNTING 732 749 tm SP_PSW+1(%r15),0x01 # interrupting from user ? 733 750 bz BASED(ext_no_vtime) 734 751 UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER 735 752 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 736 753 mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER 737 754 ext_no_vtime: 738 - #endif 739 755 l %r9,__LC_THREAD_INFO # load pointer to thread_info struct 740 756 TRACE_IRQS_OFF 741 757 la %r2,SP_PTREGS(%r15) # address of register-save area ··· 756 776 la %r12,__LC_MCK_OLD_PSW 757 777 tm __LC_MCCK_CODE,0x80 # system damage? 758 778 bo BASED(mcck_int_main) # yes -> rest of mcck code invalid 759 - #ifdef CONFIG_VIRT_CPU_ACCOUNTING 760 779 mvc __LC_SAVE_AREA+52(8),__LC_ASYNC_ENTER_TIMER 761 780 mvc __LC_ASYNC_ENTER_TIMER(8),__LC_CPU_TIMER_SAVE_AREA 762 781 tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid? ··· 772 793 la %r14,__LC_LAST_UPDATE_TIMER 773 794 0: spt 0(%r14) 774 795 mvc __LC_ASYNC_ENTER_TIMER(8),0(%r14) 775 - 1: 776 - #endif 777 - tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid? 796 + 1: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid? 778 797 bno BASED(mcck_int_main) # no -> skip cleanup critical 779 798 tm __LC_MCK_OLD_PSW+1,0x01 # test problem state bit 780 799 bnz BASED(mcck_int_main) # from user -> load async stack ··· 789 812 be BASED(0f) 790 813 l %r15,__LC_PANIC_STACK # load panic stack 791 814 0: CREATE_STACK_FRAME __LC_MCK_OLD_PSW,__LC_SAVE_AREA+32 792 - #ifdef CONFIG_VIRT_CPU_ACCOUNTING 793 815 tm __LC_MCCK_CODE+2,0x08 # mwp of old psw valid? 794 816 bno BASED(mcck_no_vtime) # no -> skip cleanup critical 795 817 tm SP_PSW+1(%r15),0x01 # interrupting from user ? ··· 797 821 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 798 822 mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER 799 823 mcck_no_vtime: 800 - #endif 801 824 l %r9,__LC_THREAD_INFO # load pointer to thread_info struct 802 825 la %r2,SP_PTREGS(%r15) # load pt_regs 803 826 l %r1,BASED(.Ls390_mcck) ··· 818 843 mcck_return: 819 844 mvc __LC_RETURN_MCCK_PSW(8),SP_PSW(%r15) # move return PSW 820 845 ni __LC_RETURN_MCCK_PSW+1,0xfd # clear wait state bit 821 - #ifdef CONFIG_VIRT_CPU_ACCOUNTING 822 846 mvc __LC_ASYNC_ENTER_TIMER(8),__LC_SAVE_AREA+52 823 847 tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ? 824 848 bno BASED(0f) 825 849 lm %r0,%r15,SP_R0(%r15) # load gprs 0-15 826 850 stpt __LC_EXIT_TIMER 827 851 lpsw __LC_RETURN_MCCK_PSW # back to caller 828 - 0: 829 - #endif 830 - lm %r0,%r15,SP_R0(%r15) # load gprs 0-15 852 + 0: lm %r0,%r15,SP_R0(%r15) # load gprs 0-15 831 853 lpsw __LC_RETURN_MCCK_PSW # back to caller 832 854 833 855 RESTORE_ALL __LC_RETURN_MCCK_PSW,0 ··· 948 976 b BASED(1f) 949 977 0: la %r12,__LC_SAVE_AREA+32 950 978 1: 951 - #ifdef CONFIG_VIRT_CPU_ACCOUNTING 952 979 clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+4) 953 980 bh BASED(0f) 954 981 mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER 955 982 0: clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+8) 956 983 bhe BASED(cleanup_vtime) 957 - #endif 958 984 clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn) 959 985 bh BASED(0f) 960 986 mvc __LC_SAVE_AREA(16),0(%r12) ··· 963 993 l %r12,__LC_SAVE_AREA+48 # argh 964 994 st %r15,12(%r12) 965 995 lh %r7,0x8a 966 - #ifdef CONFIG_VIRT_CPU_ACCOUNTING 967 996 cleanup_vtime: 968 997 clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+12) 969 998 bhe BASED(cleanup_stime) ··· 973 1004 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 974 1005 cleanup_update: 975 1006 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER 976 - #endif 977 1007 mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_system_call+4) 978 1008 la %r12,__LC_RETURN_PSW 979 1009 br %r14 980 1010 cleanup_system_call_insn: 981 1011 .long sysc_saveall + 0x80000000 982 - #ifdef CONFIG_VIRT_CPU_ACCOUNTING 983 1012 .long system_call + 0x80000000 984 1013 .long sysc_vtime + 0x80000000 985 1014 .long sysc_stime + 0x80000000 986 1015 .long sysc_update + 0x80000000 987 - #endif 988 1016 989 1017 cleanup_sysc_return: 990 1018 mvc __LC_RETURN_PSW(4),0(%r12) ··· 992 1026 cleanup_sysc_leave: 993 1027 clc 4(4,%r12),BASED(cleanup_sysc_leave_insn) 994 1028 be BASED(2f) 995 - #ifdef CONFIG_VIRT_CPU_ACCOUNTING 996 1029 mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER 997 1030 clc 4(4,%r12),BASED(cleanup_sysc_leave_insn+4) 998 1031 be BASED(2f) 999 - #endif 1000 1032 mvc __LC_RETURN_PSW(8),SP_PSW(%r15) 1001 1033 c %r12,BASED(.Lmck_old_psw) 1002 1034 bne BASED(0f) ··· 1007 1043 br %r14 1008 1044 cleanup_sysc_leave_insn: 1009 1045 .long sysc_done - 4 + 0x80000000 1010 - #ifdef CONFIG_VIRT_CPU_ACCOUNTING 1011 1046 .long sysc_done - 8 + 0x80000000 1012 - #endif 1013 1047 1014 1048 cleanup_io_return: 1015 1049 mvc __LC_RETURN_PSW(4),0(%r12) ··· 1018 1056 cleanup_io_leave: 1019 1057 clc 4(4,%r12),BASED(cleanup_io_leave_insn) 1020 1058 be BASED(2f) 1021 - #ifdef CONFIG_VIRT_CPU_ACCOUNTING 1022 1059 mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER 1023 1060 clc 4(4,%r12),BASED(cleanup_io_leave_insn+4) 1024 1061 be BASED(2f) 1025 - #endif 1026 1062 mvc __LC_RETURN_PSW(8),SP_PSW(%r15) 1027 1063 c %r12,BASED(.Lmck_old_psw) 1028 1064 bne BASED(0f) ··· 1033 1073 br %r14 1034 1074 cleanup_io_leave_insn: 1035 1075 .long io_done - 4 + 0x80000000 1036 - #ifdef CONFIG_VIRT_CPU_ACCOUNTING 1037 1076 .long io_done - 8 + 0x80000000 1038 - #endif 1039 1077 1040 1078 /* 1041 1079 * Integer constants
+7 -49
arch/s390/kernel/entry64.S
··· 96 96 #define LOCKDEP_SYS_EXIT 97 97 #endif 98 98 99 - .macro STORE_TIMER lc_offset 100 - #ifdef CONFIG_VIRT_CPU_ACCOUNTING 101 - stpt \lc_offset 102 - #endif 103 - .endm 104 - 105 - #ifdef CONFIG_VIRT_CPU_ACCOUNTING 106 99 .macro UPDATE_VTIME lc_from,lc_to,lc_sum 107 100 lg %r10,\lc_from 108 101 slg %r10,\lc_to 109 102 alg %r10,\lc_sum 110 103 stg %r10,\lc_sum 111 104 .endm 112 - #endif 113 105 114 106 /* 115 107 * Register usage in interrupt handlers: ··· 178 186 ni \psworg+1,0xfd # clear wait state bit 179 187 .endif 180 188 lmg %r0,%r15,SP_R0(%r15) # load gprs 0-15 of user 181 - STORE_TIMER __LC_EXIT_TIMER 189 + stpt __LC_EXIT_TIMER 182 190 lpswe \psworg # back to caller 183 191 .endm 184 192 ··· 225 233 226 234 .globl system_call 227 235 system_call: 228 - STORE_TIMER __LC_SYNC_ENTER_TIMER 236 + stpt __LC_SYNC_ENTER_TIMER 229 237 sysc_saveall: 230 238 SAVE_ALL_BASE __LC_SAVE_AREA 231 239 SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA 232 240 CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA 233 241 llgh %r7,__LC_SVC_INT_CODE # get svc number from lowcore 234 - #ifdef CONFIG_VIRT_CPU_ACCOUNTING 235 242 sysc_vtime: 236 243 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER 237 244 sysc_stime: 238 245 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 239 246 sysc_update: 240 247 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER 241 - #endif 242 248 sysc_do_svc: 243 249 lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct 244 250 ltgr %r7,%r7 # test for svc 0 ··· 459 469 * we just ignore the PER event (FIXME: is there anything we have to do 460 470 * for LPSW?). 461 471 */ 462 - STORE_TIMER __LC_SYNC_ENTER_TIMER 472 + stpt __LC_SYNC_ENTER_TIMER 463 473 SAVE_ALL_BASE __LC_SAVE_AREA 464 474 tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception 465 475 jnz pgm_per # got per exception -> special case 466 476 SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA 467 477 CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA 468 - #ifdef CONFIG_VIRT_CPU_ACCOUNTING 469 478 tm SP_PSW+1(%r15),0x01 # interrupting from user ? 470 479 jz pgm_no_vtime 471 480 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER 472 481 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 473 482 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER 474 483 pgm_no_vtime: 475 - #endif 476 484 lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct 477 485 mvc SP_ARGS(8,%r15),__LC_LAST_BREAK 478 486 TRACE_IRQS_OFF ··· 504 516 pgm_per_std: 505 517 SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA 506 518 CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA 507 - #ifdef CONFIG_VIRT_CPU_ACCOUNTING 508 519 tm SP_PSW+1(%r15),0x01 # interrupting from user ? 509 520 jz pgm_no_vtime2 510 521 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER 511 522 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 512 523 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER 513 524 pgm_no_vtime2: 514 - #endif 515 525 lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct 516 526 TRACE_IRQS_OFF 517 527 lg %r1,__TI_task(%r9) ··· 531 545 pgm_svcper: 532 546 SAVE_ALL_SYNC __LC_SVC_OLD_PSW,__LC_SAVE_AREA 533 547 CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA 534 - #ifdef CONFIG_VIRT_CPU_ACCOUNTING 535 548 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER 536 549 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 537 550 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER 538 - #endif 539 551 llgh %r7,__LC_SVC_INT_CODE # get svc number from lowcore 540 552 lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct 541 553 lg %r1,__TI_task(%r9) ··· 559 575 */ 560 576 .globl io_int_handler 561 577 io_int_handler: 562 - STORE_TIMER __LC_ASYNC_ENTER_TIMER 578 + stpt __LC_ASYNC_ENTER_TIMER 563 579 stck __LC_INT_CLOCK 564 580 SAVE_ALL_BASE __LC_SAVE_AREA+32 565 581 SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+32 566 582 CREATE_STACK_FRAME __LC_IO_OLD_PSW,__LC_SAVE_AREA+32 567 - #ifdef CONFIG_VIRT_CPU_ACCOUNTING 568 583 tm SP_PSW+1(%r15),0x01 # interrupting from user ? 569 584 jz io_no_vtime 570 585 UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER 571 586 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 572 587 mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER 573 588 io_no_vtime: 574 - #endif 575 589 lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct 576 590 TRACE_IRQS_OFF 577 591 la %r2,SP_PTREGS(%r15) # address of register-save area ··· 721 739 */ 722 740 .globl ext_int_handler 723 741 ext_int_handler: 724 - STORE_TIMER __LC_ASYNC_ENTER_TIMER 742 + stpt __LC_ASYNC_ENTER_TIMER 725 743 stck __LC_INT_CLOCK 726 744 SAVE_ALL_BASE __LC_SAVE_AREA+32 727 745 SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+32 728 746 CREATE_STACK_FRAME __LC_EXT_OLD_PSW,__LC_SAVE_AREA+32 729 - #ifdef CONFIG_VIRT_CPU_ACCOUNTING 730 747 tm SP_PSW+1(%r15),0x01 # interrupting from user ? 731 748 jz ext_no_vtime 732 749 UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER 733 750 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 734 751 mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER 735 752 ext_no_vtime: 736 - #endif 737 753 lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct 738 754 TRACE_IRQS_OFF 739 755 la %r2,SP_PTREGS(%r15) # address of register-save area ··· 753 773 la %r12,__LC_MCK_OLD_PSW 754 774 tm __LC_MCCK_CODE,0x80 # system damage? 755 775 jo mcck_int_main # yes -> rest of mcck code invalid 756 - #ifdef CONFIG_VIRT_CPU_ACCOUNTING 757 776 la %r14,4095 758 777 mvc __LC_SAVE_AREA+104(8),__LC_ASYNC_ENTER_TIMER 759 778 mvc __LC_ASYNC_ENTER_TIMER(8),__LC_CPU_TIMER_SAVE_AREA-4095(%r14) ··· 770 791 la %r14,__LC_LAST_UPDATE_TIMER 771 792 0: spt 0(%r14) 772 793 mvc __LC_ASYNC_ENTER_TIMER(8),0(%r14) 773 - 1: 774 - #endif 775 - tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid? 794 + 1: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid? 776 795 jno mcck_int_main # no -> skip cleanup critical 777 796 tm __LC_MCK_OLD_PSW+1,0x01 # test problem state bit 778 797 jnz mcck_int_main # from user -> load kernel stack ··· 786 809 jz 0f 787 810 lg %r15,__LC_PANIC_STACK # load panic stack 788 811 0: CREATE_STACK_FRAME __LC_MCK_OLD_PSW,__LC_SAVE_AREA+64 789 - #ifdef CONFIG_VIRT_CPU_ACCOUNTING 790 812 tm __LC_MCCK_CODE+2,0x08 # mwp of old psw valid? 791 813 jno mcck_no_vtime # no -> no timer update 792 814 tm SP_PSW+1(%r15),0x01 # interrupting from user ? ··· 794 818 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 795 819 mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER 796 820 mcck_no_vtime: 797 - #endif 798 821 lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct 799 822 la %r2,SP_PTREGS(%r15) # load pt_regs 800 823 brasl %r14,s390_do_machine_check ··· 814 839 mvc __LC_RETURN_MCCK_PSW(16),SP_PSW(%r15) # move return PSW 815 840 ni __LC_RETURN_MCCK_PSW+1,0xfd # clear wait state bit 816 841 lmg %r0,%r15,SP_R0(%r15) # load gprs 0-15 817 - #ifdef CONFIG_VIRT_CPU_ACCOUNTING 818 842 mvc __LC_ASYNC_ENTER_TIMER(8),__LC_SAVE_AREA+104 819 843 tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ? 820 844 jno 0f 821 845 stpt __LC_EXIT_TIMER 822 - 0: 823 - #endif 824 - lpswe __LC_RETURN_MCCK_PSW # back to caller 846 + 0: lpswe __LC_RETURN_MCCK_PSW # back to caller 825 847 826 848 /* 827 849 * Restart interruption handler, kick starter for additional CPUs ··· 936 964 j 1f 937 965 0: la %r12,__LC_SAVE_AREA+64 938 966 1: 939 - #ifdef CONFIG_VIRT_CPU_ACCOUNTING 940 967 clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+8) 941 968 jh 0f 942 969 mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER 943 970 0: clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+16) 944 971 jhe cleanup_vtime 945 - #endif 946 972 clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn) 947 973 jh 0f 948 974 mvc __LC_SAVE_AREA(32),0(%r12) ··· 951 981 lg %r12,__LC_SAVE_AREA+96 # argh 952 982 stg %r15,24(%r12) 953 983 llgh %r7,__LC_SVC_INT_CODE 954 - #ifdef CONFIG_VIRT_CPU_ACCOUNTING 955 984 cleanup_vtime: 956 985 clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+24) 957 986 jhe cleanup_stime ··· 961 992 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 962 993 cleanup_update: 963 994 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER 964 - #endif 965 995 mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_system_call+8) 966 996 la %r12,__LC_RETURN_PSW 967 997 br %r14 968 998 cleanup_system_call_insn: 969 999 .quad sysc_saveall 970 - #ifdef CONFIG_VIRT_CPU_ACCOUNTING 971 1000 .quad system_call 972 1001 .quad sysc_vtime 973 1002 .quad sysc_stime 974 1003 .quad sysc_update 975 - #endif 976 1004 977 1005 cleanup_sysc_return: 978 1006 mvc __LC_RETURN_PSW(8),0(%r12) ··· 980 1014 cleanup_sysc_leave: 981 1015 clc 8(8,%r12),BASED(cleanup_sysc_leave_insn) 982 1016 je 2f 983 - #ifdef CONFIG_VIRT_CPU_ACCOUNTING 984 1017 mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER 985 1018 clc 8(8,%r12),BASED(cleanup_sysc_leave_insn+8) 986 1019 je 2f 987 - #endif 988 1020 mvc __LC_RETURN_PSW(16),SP_PSW(%r15) 989 1021 cghi %r12,__LC_MCK_OLD_PSW 990 1022 jne 0f ··· 995 1031 br %r14 996 1032 cleanup_sysc_leave_insn: 997 1033 .quad sysc_done - 4 998 - #ifdef CONFIG_VIRT_CPU_ACCOUNTING 999 1034 .quad sysc_done - 8 1000 - #endif 1001 1035 1002 1036 cleanup_io_return: 1003 1037 mvc __LC_RETURN_PSW(8),0(%r12) ··· 1006 1044 cleanup_io_leave: 1007 1045 clc 8(8,%r12),BASED(cleanup_io_leave_insn) 1008 1046 je 2f 1009 - #ifdef CONFIG_VIRT_CPU_ACCOUNTING 1010 1047 mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER 1011 1048 clc 8(8,%r12),BASED(cleanup_io_leave_insn+8) 1012 1049 je 2f 1013 - #endif 1014 1050 mvc __LC_RETURN_PSW(16),SP_PSW(%r15) 1015 1051 cghi %r12,__LC_MCK_OLD_PSW 1016 1052 jne 0f ··· 1021 1061 br %r14 1022 1062 cleanup_io_leave_insn: 1023 1063 .quad io_done - 4 1024 - #ifdef CONFIG_VIRT_CPU_ACCOUNTING 1025 1064 .quad io_done - 8 1026 - #endif 1027 1065 1028 1066 /* 1029 1067 * Integer constants
-2
arch/s390/kernel/smp.c
··· 441 441 preempt_disable(); 442 442 /* Enable TOD clock interrupts on the secondary cpu. */ 443 443 init_cpu_timer(); 444 - #ifdef CONFIG_VIRT_TIMER 445 444 /* Enable cpu timer interrupts on the secondary cpu. */ 446 445 init_cpu_vtimer(); 447 - #endif 448 446 /* Enable pfault pseudo page faults on this cpu. */ 449 447 pfault_init(); 450 448
+1 -3
arch/s390/kernel/time.c
··· 286 286 287 287 /* Enable TOD clock interrupts on the boot cpu. */ 288 288 init_cpu_timer(); 289 - 290 - #ifdef CONFIG_VIRT_TIMER 289 + /* Enable cpu timer interrupts on the boot cpu. */ 291 290 vtime_init(); 292 - #endif 293 291 } 294 292 295 293 /*
-11
arch/s390/kernel/vtime.c
··· 27 27 static ext_int_info_t ext_int_info_timer; 28 28 static DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer); 29 29 30 - #ifdef CONFIG_VIRT_CPU_ACCOUNTING 31 30 /* 32 31 * Update process times based on virtual cpu times stored by entry.S 33 32 * to the lowcore fields user_timer, system_timer & steal_clock. ··· 124 125 /* store expire time for this CPU timer */ 125 126 __get_cpu_var(virt_cpu_timer).to_expire = expires; 126 127 } 127 - #else 128 - static inline void set_vtimer(__u64 expires) 129 - { 130 - S390_lowcore.last_update_timer = expires; 131 - asm volatile ("SPT %0" : : "m" (S390_lowcore.last_update_timer)); 132 - 133 - /* store expire time for this CPU timer */ 134 - __get_cpu_var(virt_cpu_timer).to_expire = expires; 135 - } 136 - #endif 137 128 138 129 void vtime_start_cpu_timer(void) 139 130 {