Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[S390] cleanup sysc_work and io_work code

Cleanup the #ifdef mess at io_work in entry[64].S and streamline the
TIF work code of the system call and io exit path.

Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>

authored by

Martin Schwidefsky and committed by
Martin Schwidefsky
43d399d2 94038a99

+64 -119
+28 -47
arch/s390/kernel/entry.S
··· 301 301 #endif 302 302 303 303 # 304 - # recheck if there is more work to do 305 - # 306 - sysc_work_loop: 307 - tm __TI_flags+3(%r9),_TIF_WORK_SVC 308 - bz BASED(sysc_restore) # there is no work to do 309 - # 310 - # One of the work bits is on. Find out which one. 304 + # There is work to do, but first we need to check if we return to userspace. 311 305 # 312 306 sysc_work: 313 307 tm SP_PSW+1(%r15),0x01 # returning to user ? 314 308 bno BASED(sysc_restore) 309 + 310 + # 311 + # One of the work bits is on. Find out which one. 312 + # 313 + sysc_work_loop: 315 314 tm __TI_flags+3(%r9),_TIF_MCCK_PENDING 316 315 bo BASED(sysc_mcck_pending) 317 316 tm __TI_flags+3(%r9),_TIF_NEED_RESCHED 318 317 bo BASED(sysc_reschedule) 319 318 tm __TI_flags+3(%r9),_TIF_SIGPENDING 320 - bnz BASED(sysc_sigpending) 319 + bo BASED(sysc_sigpending) 321 320 tm __TI_flags+3(%r9),_TIF_NOTIFY_RESUME 322 - bnz BASED(sysc_notify_resume) 321 + bo BASED(sysc_notify_resume) 323 322 tm __TI_flags+3(%r9),_TIF_RESTART_SVC 324 323 bo BASED(sysc_restart) 325 324 tm __TI_flags+3(%r9),_TIF_SINGLE_STEP 326 325 bo BASED(sysc_singlestep) 327 - b BASED(sysc_restore) 328 - sysc_work_done: 326 + b BASED(sysc_return) # beware of critical section cleanup 329 327 330 328 # 331 329 # _TIF_NEED_RESCHED is set, call schedule ··· 384 386 mvi SP_SVCNR+1(%r15),0xff 385 387 la %r2,SP_PTREGS(%r15) # address of register-save area 386 388 l %r1,BASED(.Lhandle_per) # load adr. of per handler 387 - la %r14,BASED(sysc_return) # load adr. of system return 389 + la %r14,BASED(sysc_work_loop) # load adr. of system return 388 390 br %r1 # branch to do_single_step 389 391 390 392 # ··· 634 636 #endif 635 637 636 638 # 637 - # switch to kernel stack, then check the TIF bits 639 + # There is work todo, find out in which context we have been interrupted: 640 + # 1) if we return to user space we can do all _TIF_WORK_INT work 641 + # 2) if we return to kernel code and preemptive scheduling is enabled check 642 + # the preemption counter and if it is zero call preempt_schedule_irq 643 + # Before any work can be done, a switch to the kernel stack is required. 638 644 # 639 645 io_work: 640 646 tm SP_PSW+1(%r15),0x01 # returning to user ? 641 - #ifndef CONFIG_PREEMPT 642 - bno BASED(io_restore) # no-> skip resched & signal 643 - #else 644 - bnz BASED(io_work_user) # no -> check for preemptive scheduling 647 + bo BASED(io_work_user) # yes -> do resched & signal 648 + #ifdef CONFIG_PREEMPT 645 649 # check for preemptive scheduling 646 650 icm %r0,15,__TI_precount(%r9) 647 651 bnz BASED(io_restore) # preemption disabled 652 + # switch to kernel stack 648 653 l %r1,SP_R15(%r15) 649 654 s %r1,BASED(.Lc_spsize) 650 655 mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15) 651 656 xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain 652 657 lr %r15,%r1 653 658 io_resume_loop: 654 - tm __TI_flags+3(%r9),_TIF_NEED_RESCHED 655 - bno BASED(io_restore) 656 659 l %r1,BASED(.Lpreempt_schedule_irq) 657 660 la %r14,BASED(io_resume_loop) 658 - br %r1 # call schedule 661 + tm __TI_flags+3(%r9),_TIF_NEED_RESCHED 662 + bor %r1 # call preempt_schedule_irq 659 663 #endif 664 + b BASED(io_restore) 660 665 666 + # 667 + # Need to do work before returning to userspace, switch to kernel stack 668 + # 661 669 io_work_user: 662 670 l %r1,__LC_KERNEL_STACK 663 671 s %r1,BASED(.Lc_spsize) ··· 672 668 lr %r15,%r1 673 669 # 674 670 # One of the work bits is on. Find out which one. 675 - # Checked are: _TIF_SIGPENDING, _TIF_NEED_RESCHED 671 + # Checked are: _TIF_SIGPENDING, _TIF_NOTIFY_RESUME, _TIF_NEED_RESCHED 676 672 # and _TIF_MCCK_PENDING 677 673 # 678 674 io_work_loop: ··· 681 677 tm __TI_flags+3(%r9),_TIF_NEED_RESCHED 682 678 bo BASED(io_reschedule) 683 679 tm __TI_flags+3(%r9),_TIF_SIGPENDING 684 - bnz BASED(io_sigpending) 680 + bo BASED(io_sigpending) 685 681 tm __TI_flags+3(%r9),_TIF_NOTIFY_RESUME 686 - bnz BASED(io_notify_resume) 687 - b BASED(io_restore) 688 - io_work_done: 682 + bo BASED(io_notify_resume) 683 + b BASED(io_return) # beware of critical section cleanup 689 684 690 685 # 691 686 # _TIF_MCCK_PENDING is set, call handler ··· 704 701 basr %r14,%r1 # call scheduler 705 702 stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts 706 703 TRACE_IRQS_OFF 707 - tm __TI_flags+3(%r9),_TIF_WORK_INT 708 - bz BASED(io_restore) # there is no work to do 709 704 b BASED(io_work_loop) 710 705 711 706 # ··· 922 921 .long sysc_return + 0x80000000, sysc_leave + 0x80000000 923 922 cleanup_table_sysc_leave: 924 923 .long sysc_leave + 0x80000000, sysc_done + 0x80000000 925 - cleanup_table_sysc_work_loop: 926 - .long sysc_work_loop + 0x80000000, sysc_work_done + 0x80000000 927 924 cleanup_table_io_return: 928 925 .long io_return + 0x80000000, io_leave + 0x80000000 929 926 cleanup_table_io_leave: 930 927 .long io_leave + 0x80000000, io_done + 0x80000000 931 - cleanup_table_io_work_loop: 932 - .long io_work_loop + 0x80000000, io_work_done + 0x80000000 933 928 934 929 cleanup_critical: 935 930 clc 4(4,%r12),BASED(cleanup_table_system_call) ··· 943 946 clc 4(4,%r12),BASED(cleanup_table_sysc_leave+4) 944 947 bl BASED(cleanup_sysc_leave) 945 948 0: 946 - clc 4(4,%r12),BASED(cleanup_table_sysc_work_loop) 947 - bl BASED(0f) 948 - clc 4(4,%r12),BASED(cleanup_table_sysc_work_loop+4) 949 - bl BASED(cleanup_sysc_return) 950 - 0: 951 949 clc 4(4,%r12),BASED(cleanup_table_io_return) 952 950 bl BASED(0f) 953 951 clc 4(4,%r12),BASED(cleanup_table_io_return+4) ··· 952 960 bl BASED(0f) 953 961 clc 4(4,%r12),BASED(cleanup_table_io_leave+4) 954 962 bl BASED(cleanup_io_leave) 955 - 0: 956 - clc 4(4,%r12),BASED(cleanup_table_io_work_loop) 957 - bl BASED(0f) 958 - clc 4(4,%r12),BASED(cleanup_table_io_work_loop+4) 959 - bl BASED(cleanup_io_work_loop) 960 963 0: 961 964 br %r14 962 965 ··· 1027 1040 cleanup_io_return: 1028 1041 mvc __LC_RETURN_PSW(4),0(%r12) 1029 1042 mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_io_return) 1030 - la %r12,__LC_RETURN_PSW 1031 - br %r14 1032 - 1033 - cleanup_io_work_loop: 1034 - mvc __LC_RETURN_PSW(4),0(%r12) 1035 - mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_io_work_loop) 1036 1043 la %r12,__LC_RETURN_PSW 1037 1044 br %r14 1038 1045
+36 -72
arch/s390/kernel/entry64.S
··· 291 291 #endif 292 292 293 293 # 294 - # recheck if there is more work to do 295 - # 296 - sysc_work_loop: 297 - tm __TI_flags+7(%r9),_TIF_WORK_SVC 298 - jz sysc_restore # there is no work to do 299 - # 300 - # One of the work bits is on. Find out which one. 294 + # There is work to do, but first we need to check if we return to userspace. 301 295 # 302 296 sysc_work: 303 297 tm SP_PSW+1(%r15),0x01 # returning to user ? 304 298 jno sysc_restore 299 + 300 + # 301 + # One of the work bits is on. Find out which one. 302 + # 303 + sysc_work_loop: 305 304 tm __TI_flags+7(%r9),_TIF_MCCK_PENDING 306 305 jo sysc_mcck_pending 307 306 tm __TI_flags+7(%r9),_TIF_NEED_RESCHED 308 307 jo sysc_reschedule 309 308 tm __TI_flags+7(%r9),_TIF_SIGPENDING 310 - jnz sysc_sigpending 309 + jo sysc_sigpending 311 310 tm __TI_flags+7(%r9),_TIF_NOTIFY_RESUME 312 - jnz sysc_notify_resume 311 + jo sysc_notify_resume 313 312 tm __TI_flags+7(%r9),_TIF_RESTART_SVC 314 313 jo sysc_restart 315 314 tm __TI_flags+7(%r9),_TIF_SINGLE_STEP 316 315 jo sysc_singlestep 317 - j sysc_restore 318 - sysc_work_done: 316 + j sysc_return # beware of critical section cleanup 319 317 320 318 # 321 319 # _TIF_NEED_RESCHED is set, call schedule 322 320 # 323 321 sysc_reschedule: 324 322 larl %r14,sysc_work_loop 325 - jg schedule # return point is sysc_return 323 + jg schedule # return point is sysc_work_loop 326 324 327 325 # 328 326 # _TIF_MCCK_PENDING is set, call handler ··· 367 369 ni __TI_flags+7(%r9),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP 368 370 xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) # clear svc number 369 371 la %r2,SP_PTREGS(%r15) # address of register-save area 370 - larl %r14,sysc_return # load adr. of system return 372 + larl %r14,sysc_work_loop # load adr. of system return 371 373 jg do_single_step # branch to do_sigtrap 372 374 373 375 # ··· 603 605 #endif 604 606 605 607 # 606 - # There is work todo, we need to check if we return to userspace, then 607 - # check, if we are in SIE, if yes leave it 608 + # There is work todo, find out in which context we have been interrupted: 609 + # 1) if we return to user space we can do all _TIF_WORK_INT work 610 + # 2) if we return to kernel code and kvm is enabled check if we need to 611 + # modify the psw to leave SIE 612 + # 3) if we return to kernel code and preemptive scheduling is enabled check 613 + # the preemption counter and if it is zero call preempt_schedule_irq 614 + # Before any work can be done, a switch to the kernel stack is required. 608 615 # 609 616 io_work: 610 617 tm SP_PSW+1(%r15),0x01 # returning to user ? 611 - #ifndef CONFIG_PREEMPT 618 + jo io_work_user # yes -> do resched & signal 612 619 #if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) 613 - jnz io_work_user # yes -> no need to check for SIE 614 - la %r1, BASED(sie_opcode) # we return to kernel here 615 - lg %r2, SP_PSW+8(%r15) 616 - clc 0(2,%r1), 0(%r2) # is current instruction = SIE? 617 - jne io_restore # no-> return to kernel 618 - lg %r1, SP_PSW+8(%r15) # yes-> add 4 bytes to leave SIE 619 - aghi %r1, 4 620 - stg %r1, SP_PSW+8(%r15) 621 - j io_restore # return to kernel 622 - #else 623 - jno io_restore # no-> skip resched & signal 624 - #endif 625 - #else 626 - jnz io_work_user # yes -> do resched & signal 627 - #if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) 628 - la %r1, BASED(sie_opcode) 629 - lg %r2, SP_PSW+8(%r15) 630 - clc 0(2,%r1), 0(%r2) # is current instruction = SIE? 620 + lg %r2,SP_PSW+8(%r15) # check if current instruction is SIE 621 + lh %r1,0(%r2) 622 + chi %r1,-19948 # signed 16 bit compare with 0xb214 631 623 jne 0f # no -> leave PSW alone 632 - lg %r1, SP_PSW+8(%r15) # yes-> add 4 bytes to leave SIE 633 - aghi %r1, 4 634 - stg %r1, SP_PSW+8(%r15) 624 + aghi %r2,4 # yes-> add 4 bytes to leave SIE 625 + stg %r2,SP_PSW+8(%r15) 635 626 0: 636 627 #endif 628 + #ifdef CONFIG_PREEMPT 637 629 # check for preemptive scheduling 638 630 icm %r0,15,__TI_precount(%r9) 639 631 jnz io_restore # preemption is disabled ··· 634 646 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain 635 647 lgr %r15,%r1 636 648 io_resume_loop: 637 - tm __TI_flags+7(%r9),_TIF_NEED_RESCHED 638 - jno io_restore 639 649 larl %r14,io_resume_loop 640 - jg preempt_schedule_irq 650 + tm __TI_flags+7(%r12),_TIF_NEED_RESCHED 651 + jgo preempt_schedule_irq 641 652 #endif 653 + j io_restore 642 654 655 + # 656 + # Need to do work before returning to userspace, switch to kernel stack 657 + # 643 658 io_work_user: 644 659 lg %r1,__LC_KERNEL_STACK 645 660 aghi %r1,-SP_SIZE 646 661 mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15) 647 662 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain 648 663 lgr %r15,%r1 664 + 649 665 # 650 666 # One of the work bits is on. Find out which one. 651 - # Checked are: _TIF_SIGPENDING, _TIF_RESTORE_SIGPENDING, _TIF_NEED_RESCHED 667 + # Checked are: _TIF_SIGPENDING, _TIF_NOTIFY_RESUME, _TIF_NEED_RESCHED 652 668 # and _TIF_MCCK_PENDING 653 669 # 654 670 io_work_loop: ··· 661 669 tm __TI_flags+7(%r9),_TIF_NEED_RESCHED 662 670 jo io_reschedule 663 671 tm __TI_flags+7(%r9),_TIF_SIGPENDING 664 - jnz io_sigpending 672 + jo io_sigpending 665 673 tm __TI_flags+7(%r9),_TIF_NOTIFY_RESUME 666 - jnz io_notify_resume 667 - j io_restore 668 - io_work_done: 669 - 670 - #if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) 671 - sie_opcode: 672 - .long 0xb2140000 673 - #endif 674 + jo io_notify_resume 675 + j io_return # beware of critical section cleanup 674 676 675 677 # 676 678 # _TIF_MCCK_PENDING is set, call handler ··· 682 696 brasl %r14,schedule # call scheduler 683 697 stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts 684 698 TRACE_IRQS_OFF 685 - tm __TI_flags+7(%r9),_TIF_WORK_INT 686 - jz io_restore # there is no work to do 687 699 j io_work_loop 688 700 689 701 # ··· 887 903 .quad sysc_return, sysc_leave 888 904 cleanup_table_sysc_leave: 889 905 .quad sysc_leave, sysc_done 890 - cleanup_table_sysc_work_loop: 891 - .quad sysc_work_loop, sysc_work_done 892 906 cleanup_table_io_return: 893 907 .quad io_return, io_leave 894 908 cleanup_table_io_leave: 895 909 .quad io_leave, io_done 896 - cleanup_table_io_work_loop: 897 - .quad io_work_loop, io_work_done 898 910 899 911 cleanup_critical: 900 912 clc 8(8,%r12),BASED(cleanup_table_system_call) ··· 908 928 clc 8(8,%r12),BASED(cleanup_table_sysc_leave+8) 909 929 jl cleanup_sysc_leave 910 930 0: 911 - clc 8(8,%r12),BASED(cleanup_table_sysc_work_loop) 912 - jl 0f 913 - clc 8(8,%r12),BASED(cleanup_table_sysc_work_loop+8) 914 - jl cleanup_sysc_return 915 - 0: 916 931 clc 8(8,%r12),BASED(cleanup_table_io_return) 917 932 jl 0f 918 933 clc 8(8,%r12),BASED(cleanup_table_io_return+8) ··· 917 942 jl 0f 918 943 clc 8(8,%r12),BASED(cleanup_table_io_leave+8) 919 944 jl cleanup_io_leave 920 - 0: 921 - clc 8(8,%r12),BASED(cleanup_table_io_work_loop) 922 - jl 0f 923 - clc 8(8,%r12),BASED(cleanup_table_io_work_loop+8) 924 - jl cleanup_io_work_loop 925 945 0: 926 946 br %r14 927 947 ··· 992 1022 cleanup_io_return: 993 1023 mvc __LC_RETURN_PSW(8),0(%r12) 994 1024 mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_io_return) 995 - la %r12,__LC_RETURN_PSW 996 - br %r14 997 - 998 - cleanup_io_work_loop: 999 - mvc __LC_RETURN_PSW(8),0(%r12) 1000 - mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_io_work_loop) 1001 1025 la %r12,__LC_RETURN_PSW 1002 1026 br %r14 1003 1027