Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm

Pull kvm fixes from Paolo Bonzini:
"ARM64:

- Yet another fix for non-CPU accesses to the memory backing the
VGICv3 subsystem

- A set of fixes for the setlftest checking for the S1PTW behaviour
after the fix that went in ealier in the cycle"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
KVM: selftests: aarch64: Test read-only PT memory regions
KVM: selftests: aarch64: Fix check of dirty log PT write
KVM: selftests: aarch64: Do not default to dirty PTE pages on all S1PTWs
KVM: selftests: aarch64: Relax userfaultfd read vs. write checks
KVM: arm64: Allow no running vcpu on saving vgic3 pending table
KVM: arm64: Allow no running vcpu on restoring vgic3 LPI pending status
KVM: arm64: Add helper vgic_write_guest_lock()

+132 -98
+7 -3
Documentation/virt/kvm/api.rst
··· 8070 8070 state is final and avoid missing dirty pages from another ioctl ordered 8071 8071 after the bitmap collection. 8072 8072 8073 - NOTE: One example of using the backup bitmap is saving arm64 vgic/its 8074 - tables through KVM_DEV_ARM_{VGIC_GRP_CTRL, ITS_SAVE_TABLES} command on 8075 - KVM device "kvm-arm-vgic-its" when dirty ring is enabled. 8073 + NOTE: Multiple examples of using the backup bitmap: (1) save vgic/its 8074 + tables through command KVM_DEV_ARM_{VGIC_GRP_CTRL, ITS_SAVE_TABLES} on 8075 + KVM device "kvm-arm-vgic-its". (2) restore vgic/its tables through 8076 + command KVM_DEV_ARM_{VGIC_GRP_CTRL, ITS_RESTORE_TABLES} on KVM device 8077 + "kvm-arm-vgic-its". VGICv3 LPI pending status is restored. (3) save 8078 + vgic3 pending table through KVM_DEV_ARM_VGIC_{GRP_CTRL, SAVE_PENDING_TABLES} 8079 + command on KVM device "kvm-arm-vgic-v3". 8076 8080 8077 8081 8.30 KVM_CAP_XEN_HVM 8078 8082 --------------------
+5 -8
arch/arm64/kvm/vgic/vgic-its.c
··· 2187 2187 ((u64)ite->irq->intid << KVM_ITS_ITE_PINTID_SHIFT) | 2188 2188 ite->collection->collection_id; 2189 2189 val = cpu_to_le64(val); 2190 - return kvm_write_guest_lock(kvm, gpa, &val, ite_esz); 2190 + return vgic_write_guest_lock(kvm, gpa, &val, ite_esz); 2191 2191 } 2192 2192 2193 2193 /** ··· 2339 2339 (itt_addr_field << KVM_ITS_DTE_ITTADDR_SHIFT) | 2340 2340 (dev->num_eventid_bits - 1)); 2341 2341 val = cpu_to_le64(val); 2342 - return kvm_write_guest_lock(kvm, ptr, &val, dte_esz); 2342 + return vgic_write_guest_lock(kvm, ptr, &val, dte_esz); 2343 2343 } 2344 2344 2345 2345 /** ··· 2526 2526 ((u64)collection->target_addr << KVM_ITS_CTE_RDBASE_SHIFT) | 2527 2527 collection->collection_id); 2528 2528 val = cpu_to_le64(val); 2529 - return kvm_write_guest_lock(its->dev->kvm, gpa, &val, esz); 2529 + return vgic_write_guest_lock(its->dev->kvm, gpa, &val, esz); 2530 2530 } 2531 2531 2532 2532 /* ··· 2607 2607 */ 2608 2608 val = 0; 2609 2609 BUG_ON(cte_esz > sizeof(val)); 2610 - ret = kvm_write_guest_lock(its->dev->kvm, gpa, &val, cte_esz); 2610 + ret = vgic_write_guest_lock(its->dev->kvm, gpa, &val, cte_esz); 2611 2611 return ret; 2612 2612 } 2613 2613 ··· 2743 2743 static int vgic_its_ctrl(struct kvm *kvm, struct vgic_its *its, u64 attr) 2744 2744 { 2745 2745 const struct vgic_its_abi *abi = vgic_its_get_abi(its); 2746 - struct vgic_dist *dist = &kvm->arch.vgic; 2747 2746 int ret = 0; 2748 2747 2749 2748 if (attr == KVM_DEV_ARM_VGIC_CTRL_INIT) /* Nothing to do */ ··· 2762 2763 vgic_its_reset(kvm, its); 2763 2764 break; 2764 2765 case KVM_DEV_ARM_ITS_SAVE_TABLES: 2765 - dist->save_its_tables_in_progress = true; 2766 2766 ret = abi->save_tables(its); 2767 - dist->save_its_tables_in_progress = false; 2768 2767 break; 2769 2768 case KVM_DEV_ARM_ITS_RESTORE_TABLES: 2770 2769 ret = abi->restore_tables(its); ··· 2789 2792 { 2790 2793 struct vgic_dist *dist = &kvm->arch.vgic; 2791 2794 2792 - return dist->save_its_tables_in_progress; 2795 + return dist->table_write_in_progress; 2793 2796 } 2794 2797 2795 2798 static int vgic_its_set_attr(struct kvm_device *dev,
+2 -2
arch/arm64/kvm/vgic/vgic-v3.c
··· 339 339 if (status) { 340 340 /* clear consumed data */ 341 341 val &= ~(1 << bit_nr); 342 - ret = kvm_write_guest_lock(kvm, ptr, &val, 1); 342 + ret = vgic_write_guest_lock(kvm, ptr, &val, 1); 343 343 if (ret) 344 344 return ret; 345 345 } ··· 434 434 else 435 435 val &= ~(1 << bit_nr); 436 436 437 - ret = kvm_write_guest_lock(kvm, ptr, &val, 1); 437 + ret = vgic_write_guest_lock(kvm, ptr, &val, 1); 438 438 if (ret) 439 439 goto out; 440 440 }
+14
arch/arm64/kvm/vgic/vgic.h
··· 6 6 #define __KVM_ARM_VGIC_NEW_H__ 7 7 8 8 #include <linux/irqchip/arm-gic-common.h> 9 + #include <asm/kvm_mmu.h> 9 10 10 11 #define PRODUCT_ID_KVM 0x4b /* ASCII code K */ 11 12 #define IMPLEMENTER_ARM 0x43b ··· 130 129 static inline bool vgic_irq_is_multi_sgi(struct vgic_irq *irq) 131 130 { 132 131 return vgic_irq_get_lr_count(irq) > 1; 132 + } 133 + 134 + static inline int vgic_write_guest_lock(struct kvm *kvm, gpa_t gpa, 135 + const void *data, unsigned long len) 136 + { 137 + struct vgic_dist *dist = &kvm->arch.vgic; 138 + int ret; 139 + 140 + dist->table_write_in_progress = true; 141 + ret = kvm_write_guest_lock(kvm, gpa, data, len); 142 + dist->table_write_in_progress = false; 143 + 144 + return ret; 133 145 } 134 146 135 147 /*
+1 -1
include/kvm/arm_vgic.h
··· 263 263 struct vgic_io_device dist_iodev; 264 264 265 265 bool has_its; 266 - bool save_its_tables_in_progress; 266 + bool table_write_in_progress; 267 267 268 268 /* 269 269 * Contains the attributes and gpa of the LPI configuration table.
+103 -84
tools/testing/selftests/kvm/aarch64/page_fault_test.c
··· 237 237 GUEST_SYNC(CMD_CHECK_S1PTW_WR_IN_DIRTY_LOG); 238 238 } 239 239 240 + static void guest_check_no_s1ptw_wr_in_dirty_log(void) 241 + { 242 + GUEST_SYNC(CMD_CHECK_NO_S1PTW_WR_IN_DIRTY_LOG); 243 + } 244 + 240 245 static void guest_exec(void) 241 246 { 242 247 int (*code)(void) = (int (*)(void))TEST_EXEC_GVA; ··· 309 304 310 305 /* Returns true to continue the test, and false if it should be skipped. */ 311 306 static int uffd_generic_handler(int uffd_mode, int uffd, struct uffd_msg *msg, 312 - struct uffd_args *args, bool expect_write) 307 + struct uffd_args *args) 313 308 { 314 309 uint64_t addr = msg->arg.pagefault.address; 315 310 uint64_t flags = msg->arg.pagefault.flags; ··· 318 313 319 314 TEST_ASSERT(uffd_mode == UFFDIO_REGISTER_MODE_MISSING, 320 315 "The only expected UFFD mode is MISSING"); 321 - ASSERT_EQ(!!(flags & UFFD_PAGEFAULT_FLAG_WRITE), expect_write); 322 316 ASSERT_EQ(addr, (uint64_t)args->hva); 323 317 324 318 pr_debug("uffd fault: addr=%p write=%d\n", ··· 341 337 return 0; 342 338 } 343 339 344 - static int uffd_pt_write_handler(int mode, int uffd, struct uffd_msg *msg) 340 + static int uffd_pt_handler(int mode, int uffd, struct uffd_msg *msg) 345 341 { 346 - return uffd_generic_handler(mode, uffd, msg, &pt_args, true); 342 + return uffd_generic_handler(mode, uffd, msg, &pt_args); 347 343 } 348 344 349 - static int uffd_data_write_handler(int mode, int uffd, struct uffd_msg *msg) 345 + static int uffd_data_handler(int mode, int uffd, struct uffd_msg *msg) 350 346 { 351 - return uffd_generic_handler(mode, uffd, msg, &data_args, true); 352 - } 353 - 354 - static int uffd_data_read_handler(int mode, int uffd, struct uffd_msg *msg) 355 - { 356 - return uffd_generic_handler(mode, uffd, msg, &data_args, false); 347 + return uffd_generic_handler(mode, uffd, msg, &data_args); 357 348 } 358 349 359 350 static void setup_uffd_args(struct userspace_mem_region *region, ··· 470 471 { 471 472 struct userspace_mem_region *data_region, *pt_region; 472 473 bool continue_test = true; 474 + uint64_t pte_gpa, pte_pg; 473 475 474 476 data_region = vm_get_mem_region(vm, MEM_REGION_TEST_DATA); 475 477 pt_region = vm_get_mem_region(vm, MEM_REGION_PT); 478 + pte_gpa = addr_hva2gpa(vm, virt_get_pte_hva(vm, TEST_GVA)); 479 + pte_pg = (pte_gpa - pt_region->region.guest_phys_addr) / getpagesize(); 476 480 477 481 if (cmd == CMD_SKIP_TEST) 478 482 continue_test = false; ··· 488 486 TEST_ASSERT(check_write_in_dirty_log(vm, data_region, 0), 489 487 "Missing write in dirty log"); 490 488 if (cmd & CMD_CHECK_S1PTW_WR_IN_DIRTY_LOG) 491 - TEST_ASSERT(check_write_in_dirty_log(vm, pt_region, 0), 489 + TEST_ASSERT(check_write_in_dirty_log(vm, pt_region, pte_pg), 492 490 "Missing s1ptw write in dirty log"); 493 491 if (cmd & CMD_CHECK_NO_WRITE_IN_DIRTY_LOG) 494 492 TEST_ASSERT(!check_write_in_dirty_log(vm, data_region, 0), 495 493 "Unexpected write in dirty log"); 496 494 if (cmd & CMD_CHECK_NO_S1PTW_WR_IN_DIRTY_LOG) 497 - TEST_ASSERT(!check_write_in_dirty_log(vm, pt_region, 0), 495 + TEST_ASSERT(!check_write_in_dirty_log(vm, pt_region, pte_pg), 498 496 "Unexpected s1ptw write in dirty log"); 499 497 500 498 return continue_test; ··· 799 797 .expected_events = { .uffd_faults = _uffd_faults, }, \ 800 798 } 801 799 802 - #define TEST_DIRTY_LOG(_access, _with_af, _test_check) \ 800 + #define TEST_DIRTY_LOG(_access, _with_af, _test_check, _pt_check) \ 803 801 { \ 804 802 .name = SCAT3(dirty_log, _access, _with_af), \ 805 803 .data_memslot_flags = KVM_MEM_LOG_DIRTY_PAGES, \ ··· 807 805 .guest_prepare = { _PREPARE(_with_af), \ 808 806 _PREPARE(_access) }, \ 809 807 .guest_test = _access, \ 810 - .guest_test_check = { _CHECK(_with_af), _test_check, \ 811 - guest_check_s1ptw_wr_in_dirty_log}, \ 808 + .guest_test_check = { _CHECK(_with_af), _test_check, _pt_check }, \ 812 809 .expected_events = { 0 }, \ 813 810 } 814 811 815 812 #define TEST_UFFD_AND_DIRTY_LOG(_access, _with_af, _uffd_data_handler, \ 816 - _uffd_faults, _test_check) \ 813 + _uffd_faults, _test_check, _pt_check) \ 817 814 { \ 818 815 .name = SCAT3(uffd_and_dirty_log, _access, _with_af), \ 819 816 .data_memslot_flags = KVM_MEM_LOG_DIRTY_PAGES, \ ··· 821 820 _PREPARE(_access) }, \ 822 821 .guest_test = _access, \ 823 822 .mem_mark_cmd = CMD_HOLE_DATA | CMD_HOLE_PT, \ 824 - .guest_test_check = { _CHECK(_with_af), _test_check }, \ 823 + .guest_test_check = { _CHECK(_with_af), _test_check, _pt_check }, \ 825 824 .uffd_data_handler = _uffd_data_handler, \ 826 - .uffd_pt_handler = uffd_pt_write_handler, \ 825 + .uffd_pt_handler = uffd_pt_handler, \ 827 826 .expected_events = { .uffd_faults = _uffd_faults, }, \ 828 827 } 829 828 830 829 #define TEST_RO_MEMSLOT(_access, _mmio_handler, _mmio_exits) \ 831 830 { \ 832 - .name = SCAT3(ro_memslot, _access, _with_af), \ 831 + .name = SCAT2(ro_memslot, _access), \ 833 832 .data_memslot_flags = KVM_MEM_READONLY, \ 833 + .pt_memslot_flags = KVM_MEM_READONLY, \ 834 834 .guest_prepare = { _PREPARE(_access) }, \ 835 835 .guest_test = _access, \ 836 836 .mmio_handler = _mmio_handler, \ ··· 842 840 { \ 843 841 .name = SCAT2(ro_memslot_no_syndrome, _access), \ 844 842 .data_memslot_flags = KVM_MEM_READONLY, \ 843 + .pt_memslot_flags = KVM_MEM_READONLY, \ 845 844 .guest_test = _access, \ 846 845 .fail_vcpu_run_handler = fail_vcpu_run_mmio_no_syndrome_handler, \ 847 846 .expected_events = { .fail_vcpu_runs = 1 }, \ ··· 851 848 #define TEST_RO_MEMSLOT_AND_DIRTY_LOG(_access, _mmio_handler, _mmio_exits, \ 852 849 _test_check) \ 853 850 { \ 854 - .name = SCAT3(ro_memslot, _access, _with_af), \ 851 + .name = SCAT2(ro_memslot, _access), \ 855 852 .data_memslot_flags = KVM_MEM_READONLY | KVM_MEM_LOG_DIRTY_PAGES, \ 856 - .pt_memslot_flags = KVM_MEM_LOG_DIRTY_PAGES, \ 853 + .pt_memslot_flags = KVM_MEM_READONLY | KVM_MEM_LOG_DIRTY_PAGES, \ 857 854 .guest_prepare = { _PREPARE(_access) }, \ 858 855 .guest_test = _access, \ 859 856 .guest_test_check = { _test_check }, \ ··· 865 862 { \ 866 863 .name = SCAT2(ro_memslot_no_syn_and_dlog, _access), \ 867 864 .data_memslot_flags = KVM_MEM_READONLY | KVM_MEM_LOG_DIRTY_PAGES, \ 868 - .pt_memslot_flags = KVM_MEM_LOG_DIRTY_PAGES, \ 865 + .pt_memslot_flags = KVM_MEM_READONLY | KVM_MEM_LOG_DIRTY_PAGES, \ 869 866 .guest_test = _access, \ 870 867 .guest_test_check = { _test_check }, \ 871 868 .fail_vcpu_run_handler = fail_vcpu_run_mmio_no_syndrome_handler, \ ··· 877 874 { \ 878 875 .name = SCAT2(ro_memslot_uffd, _access), \ 879 876 .data_memslot_flags = KVM_MEM_READONLY, \ 877 + .pt_memslot_flags = KVM_MEM_READONLY, \ 880 878 .mem_mark_cmd = CMD_HOLE_DATA | CMD_HOLE_PT, \ 881 879 .guest_prepare = { _PREPARE(_access) }, \ 882 880 .guest_test = _access, \ 883 881 .uffd_data_handler = _uffd_data_handler, \ 884 - .uffd_pt_handler = uffd_pt_write_handler, \ 882 + .uffd_pt_handler = uffd_pt_handler, \ 885 883 .mmio_handler = _mmio_handler, \ 886 884 .expected_events = { .mmio_exits = _mmio_exits, \ 887 885 .uffd_faults = _uffd_faults }, \ ··· 893 889 { \ 894 890 .name = SCAT2(ro_memslot_no_syndrome, _access), \ 895 891 .data_memslot_flags = KVM_MEM_READONLY, \ 892 + .pt_memslot_flags = KVM_MEM_READONLY, \ 896 893 .mem_mark_cmd = CMD_HOLE_DATA | CMD_HOLE_PT, \ 897 894 .guest_test = _access, \ 898 895 .uffd_data_handler = _uffd_data_handler, \ 899 - .uffd_pt_handler = uffd_pt_write_handler, \ 896 + .uffd_pt_handler = uffd_pt_handler, \ 900 897 .fail_vcpu_run_handler = fail_vcpu_run_mmio_no_syndrome_handler, \ 901 898 .expected_events = { .fail_vcpu_runs = 1, \ 902 899 .uffd_faults = _uffd_faults }, \ ··· 938 933 * (S1PTW). 939 934 */ 940 935 TEST_UFFD(guest_read64, with_af, CMD_HOLE_DATA | CMD_HOLE_PT, 941 - uffd_data_read_handler, uffd_pt_write_handler, 2), 942 - /* no_af should also lead to a PT write. */ 936 + uffd_data_handler, uffd_pt_handler, 2), 943 937 TEST_UFFD(guest_read64, no_af, CMD_HOLE_DATA | CMD_HOLE_PT, 944 - uffd_data_read_handler, uffd_pt_write_handler, 2), 945 - /* Note how that cas invokes the read handler. */ 938 + uffd_data_handler, uffd_pt_handler, 2), 946 939 TEST_UFFD(guest_cas, with_af, CMD_HOLE_DATA | CMD_HOLE_PT, 947 - uffd_data_read_handler, uffd_pt_write_handler, 2), 940 + uffd_data_handler, uffd_pt_handler, 2), 948 941 /* 949 942 * Can't test guest_at with_af as it's IMPDEF whether the AF is set. 950 943 * The S1PTW fault should still be marked as a write. 951 944 */ 952 945 TEST_UFFD(guest_at, no_af, CMD_HOLE_DATA | CMD_HOLE_PT, 953 - uffd_data_read_handler, uffd_pt_write_handler, 1), 946 + uffd_no_handler, uffd_pt_handler, 1), 954 947 TEST_UFFD(guest_ld_preidx, with_af, CMD_HOLE_DATA | CMD_HOLE_PT, 955 - uffd_data_read_handler, uffd_pt_write_handler, 2), 948 + uffd_data_handler, uffd_pt_handler, 2), 956 949 TEST_UFFD(guest_write64, with_af, CMD_HOLE_DATA | CMD_HOLE_PT, 957 - uffd_data_write_handler, uffd_pt_write_handler, 2), 950 + uffd_data_handler, uffd_pt_handler, 2), 958 951 TEST_UFFD(guest_dc_zva, with_af, CMD_HOLE_DATA | CMD_HOLE_PT, 959 - uffd_data_write_handler, uffd_pt_write_handler, 2), 952 + uffd_data_handler, uffd_pt_handler, 2), 960 953 TEST_UFFD(guest_st_preidx, with_af, CMD_HOLE_DATA | CMD_HOLE_PT, 961 - uffd_data_write_handler, uffd_pt_write_handler, 2), 954 + uffd_data_handler, uffd_pt_handler, 2), 962 955 TEST_UFFD(guest_exec, with_af, CMD_HOLE_DATA | CMD_HOLE_PT, 963 - uffd_data_read_handler, uffd_pt_write_handler, 2), 956 + uffd_data_handler, uffd_pt_handler, 2), 964 957 965 958 /* 966 959 * Try accesses when the data and PT memory regions are both 967 960 * tracked for dirty logging. 968 961 */ 969 - TEST_DIRTY_LOG(guest_read64, with_af, guest_check_no_write_in_dirty_log), 970 - /* no_af should also lead to a PT write. */ 971 - TEST_DIRTY_LOG(guest_read64, no_af, guest_check_no_write_in_dirty_log), 972 - TEST_DIRTY_LOG(guest_ld_preidx, with_af, guest_check_no_write_in_dirty_log), 973 - TEST_DIRTY_LOG(guest_at, no_af, guest_check_no_write_in_dirty_log), 974 - TEST_DIRTY_LOG(guest_exec, with_af, guest_check_no_write_in_dirty_log), 975 - TEST_DIRTY_LOG(guest_write64, with_af, guest_check_write_in_dirty_log), 976 - TEST_DIRTY_LOG(guest_cas, with_af, guest_check_write_in_dirty_log), 977 - TEST_DIRTY_LOG(guest_dc_zva, with_af, guest_check_write_in_dirty_log), 978 - TEST_DIRTY_LOG(guest_st_preidx, with_af, guest_check_write_in_dirty_log), 962 + TEST_DIRTY_LOG(guest_read64, with_af, guest_check_no_write_in_dirty_log, 963 + guest_check_s1ptw_wr_in_dirty_log), 964 + TEST_DIRTY_LOG(guest_read64, no_af, guest_check_no_write_in_dirty_log, 965 + guest_check_no_s1ptw_wr_in_dirty_log), 966 + TEST_DIRTY_LOG(guest_ld_preidx, with_af, 967 + guest_check_no_write_in_dirty_log, 968 + guest_check_s1ptw_wr_in_dirty_log), 969 + TEST_DIRTY_LOG(guest_at, no_af, guest_check_no_write_in_dirty_log, 970 + guest_check_no_s1ptw_wr_in_dirty_log), 971 + TEST_DIRTY_LOG(guest_exec, with_af, guest_check_no_write_in_dirty_log, 972 + guest_check_s1ptw_wr_in_dirty_log), 973 + TEST_DIRTY_LOG(guest_write64, with_af, guest_check_write_in_dirty_log, 974 + guest_check_s1ptw_wr_in_dirty_log), 975 + TEST_DIRTY_LOG(guest_cas, with_af, guest_check_write_in_dirty_log, 976 + guest_check_s1ptw_wr_in_dirty_log), 977 + TEST_DIRTY_LOG(guest_dc_zva, with_af, guest_check_write_in_dirty_log, 978 + guest_check_s1ptw_wr_in_dirty_log), 979 + TEST_DIRTY_LOG(guest_st_preidx, with_af, guest_check_write_in_dirty_log, 980 + guest_check_s1ptw_wr_in_dirty_log), 979 981 980 982 /* 981 983 * Access when the data and PT memory regions are both marked for ··· 992 980 * fault, and nothing in the dirty log. Any S1PTW should result in 993 981 * a write in the dirty log and a userfaultfd write. 994 982 */ 995 - TEST_UFFD_AND_DIRTY_LOG(guest_read64, with_af, uffd_data_read_handler, 2, 996 - guest_check_no_write_in_dirty_log), 997 - /* no_af should also lead to a PT write. */ 998 - TEST_UFFD_AND_DIRTY_LOG(guest_read64, no_af, uffd_data_read_handler, 2, 999 - guest_check_no_write_in_dirty_log), 1000 - TEST_UFFD_AND_DIRTY_LOG(guest_ld_preidx, with_af, uffd_data_read_handler, 1001 - 2, guest_check_no_write_in_dirty_log), 1002 - TEST_UFFD_AND_DIRTY_LOG(guest_at, with_af, 0, 1, 1003 - guest_check_no_write_in_dirty_log), 1004 - TEST_UFFD_AND_DIRTY_LOG(guest_exec, with_af, uffd_data_read_handler, 2, 1005 - guest_check_no_write_in_dirty_log), 1006 - TEST_UFFD_AND_DIRTY_LOG(guest_write64, with_af, uffd_data_write_handler, 1007 - 2, guest_check_write_in_dirty_log), 1008 - TEST_UFFD_AND_DIRTY_LOG(guest_cas, with_af, uffd_data_read_handler, 2, 1009 - guest_check_write_in_dirty_log), 1010 - TEST_UFFD_AND_DIRTY_LOG(guest_dc_zva, with_af, uffd_data_write_handler, 1011 - 2, guest_check_write_in_dirty_log), 983 + TEST_UFFD_AND_DIRTY_LOG(guest_read64, with_af, 984 + uffd_data_handler, 2, 985 + guest_check_no_write_in_dirty_log, 986 + guest_check_s1ptw_wr_in_dirty_log), 987 + TEST_UFFD_AND_DIRTY_LOG(guest_read64, no_af, 988 + uffd_data_handler, 2, 989 + guest_check_no_write_in_dirty_log, 990 + guest_check_no_s1ptw_wr_in_dirty_log), 991 + TEST_UFFD_AND_DIRTY_LOG(guest_ld_preidx, with_af, 992 + uffd_data_handler, 993 + 2, guest_check_no_write_in_dirty_log, 994 + guest_check_s1ptw_wr_in_dirty_log), 995 + TEST_UFFD_AND_DIRTY_LOG(guest_at, with_af, uffd_no_handler, 1, 996 + guest_check_no_write_in_dirty_log, 997 + guest_check_s1ptw_wr_in_dirty_log), 998 + TEST_UFFD_AND_DIRTY_LOG(guest_exec, with_af, 999 + uffd_data_handler, 2, 1000 + guest_check_no_write_in_dirty_log, 1001 + guest_check_s1ptw_wr_in_dirty_log), 1002 + TEST_UFFD_AND_DIRTY_LOG(guest_write64, with_af, 1003 + uffd_data_handler, 1004 + 2, guest_check_write_in_dirty_log, 1005 + guest_check_s1ptw_wr_in_dirty_log), 1006 + TEST_UFFD_AND_DIRTY_LOG(guest_cas, with_af, 1007 + uffd_data_handler, 2, 1008 + guest_check_write_in_dirty_log, 1009 + guest_check_s1ptw_wr_in_dirty_log), 1010 + TEST_UFFD_AND_DIRTY_LOG(guest_dc_zva, with_af, 1011 + uffd_data_handler, 1012 + 2, guest_check_write_in_dirty_log, 1013 + guest_check_s1ptw_wr_in_dirty_log), 1012 1014 TEST_UFFD_AND_DIRTY_LOG(guest_st_preidx, with_af, 1013 - uffd_data_write_handler, 2, 1014 - guest_check_write_in_dirty_log), 1015 - 1015 + uffd_data_handler, 2, 1016 + guest_check_write_in_dirty_log, 1017 + guest_check_s1ptw_wr_in_dirty_log), 1016 1018 /* 1017 - * Try accesses when the data memory region is marked read-only 1019 + * Access when both the PT and data regions are marked read-only 1018 1020 * (with KVM_MEM_READONLY). Writes with a syndrome result in an 1019 1021 * MMIO exit, writes with no syndrome (e.g., CAS) result in a 1020 1022 * failed vcpu run, and reads/execs with and without syndroms do ··· 1044 1018 TEST_RO_MEMSLOT_NO_SYNDROME(guest_st_preidx), 1045 1019 1046 1020 /* 1047 - * Access when both the data region is both read-only and marked 1021 + * The PT and data regions are both read-only and marked 1048 1022 * for dirty logging at the same time. The expected result is that 1049 1023 * for writes there should be no write in the dirty log. The 1050 1024 * readonly handling is the same as if the memslot was not marked ··· 1069 1043 guest_check_no_write_in_dirty_log), 1070 1044 1071 1045 /* 1072 - * Access when the data region is both read-only and punched with 1046 + * The PT and data regions are both read-only and punched with 1073 1047 * holes tracked with userfaultfd. The expected result is the 1074 1048 * union of both userfaultfd and read-only behaviors. For example, 1075 1049 * write accesses result in a userfaultfd write fault and an MMIO ··· 1077 1051 * no userfaultfd write fault. Reads result in userfaultfd getting 1078 1052 * triggered. 1079 1053 */ 1080 - TEST_RO_MEMSLOT_AND_UFFD(guest_read64, 0, 0, 1081 - uffd_data_read_handler, 2), 1082 - TEST_RO_MEMSLOT_AND_UFFD(guest_ld_preidx, 0, 0, 1083 - uffd_data_read_handler, 2), 1084 - TEST_RO_MEMSLOT_AND_UFFD(guest_at, 0, 0, 1085 - uffd_no_handler, 1), 1086 - TEST_RO_MEMSLOT_AND_UFFD(guest_exec, 0, 0, 1087 - uffd_data_read_handler, 2), 1054 + TEST_RO_MEMSLOT_AND_UFFD(guest_read64, 0, 0, uffd_data_handler, 2), 1055 + TEST_RO_MEMSLOT_AND_UFFD(guest_ld_preidx, 0, 0, uffd_data_handler, 2), 1056 + TEST_RO_MEMSLOT_AND_UFFD(guest_at, 0, 0, uffd_no_handler, 1), 1057 + TEST_RO_MEMSLOT_AND_UFFD(guest_exec, 0, 0, uffd_data_handler, 2), 1088 1058 TEST_RO_MEMSLOT_AND_UFFD(guest_write64, mmio_on_test_gpa_handler, 1, 1089 - uffd_data_write_handler, 2), 1090 - TEST_RO_MEMSLOT_NO_SYNDROME_AND_UFFD(guest_cas, 1091 - uffd_data_read_handler, 2), 1092 - TEST_RO_MEMSLOT_NO_SYNDROME_AND_UFFD(guest_dc_zva, 1093 - uffd_no_handler, 1), 1094 - TEST_RO_MEMSLOT_NO_SYNDROME_AND_UFFD(guest_st_preidx, 1095 - uffd_no_handler, 1), 1059 + uffd_data_handler, 2), 1060 + TEST_RO_MEMSLOT_NO_SYNDROME_AND_UFFD(guest_cas, uffd_data_handler, 2), 1061 + TEST_RO_MEMSLOT_NO_SYNDROME_AND_UFFD(guest_dc_zva, uffd_no_handler, 1), 1062 + TEST_RO_MEMSLOT_NO_SYNDROME_AND_UFFD(guest_st_preidx, uffd_no_handler, 1), 1096 1063 1097 1064 { 0 } 1098 1065 };