Merge tag 'kvm-s390-master-4.15-3' of git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux

KVM: s390: another fix for cmma migration

This fixes races and potential use after free in the
cmma migration code.

+11 -7
+11 -7
arch/s390/kvm/kvm-s390.c
··· 769 770 /* 771 * Must be called with kvm->srcu held to avoid races on memslots, and with 772 - * kvm->lock to avoid races with ourselves and kvm_s390_vm_stop_migration. 773 */ 774 static int kvm_s390_vm_start_migration(struct kvm *kvm) 775 { ··· 825 } 826 827 /* 828 - * Must be called with kvm->lock to avoid races with ourselves and 829 * kvm_s390_vm_start_migration. 830 */ 831 static int kvm_s390_vm_stop_migration(struct kvm *kvm) ··· 840 841 if (kvm->arch.use_cmma) { 842 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_STOP_MIGRATION); 843 vfree(mgs->pgste_bitmap); 844 } 845 kfree(mgs); ··· 851 static int kvm_s390_vm_set_migration(struct kvm *kvm, 852 struct kvm_device_attr *attr) 853 { 854 - int idx, res = -ENXIO; 855 856 - mutex_lock(&kvm->lock); 857 switch (attr->attr) { 858 case KVM_S390_VM_MIGRATION_START: 859 - idx = srcu_read_lock(&kvm->srcu); 860 res = kvm_s390_vm_start_migration(kvm); 861 - srcu_read_unlock(&kvm->srcu, idx); 862 break; 863 case KVM_S390_VM_MIGRATION_STOP: 864 res = kvm_s390_vm_stop_migration(kvm); ··· 864 default: 865 break; 866 } 867 - mutex_unlock(&kvm->lock); 868 869 return res; 870 } ··· 1754 r = -EFAULT; 1755 if (copy_from_user(&args, argp, sizeof(args))) 1756 break; 1757 r = kvm_s390_get_cmma_bits(kvm, &args); 1758 if (!r) { 1759 r = copy_to_user(argp, &args, sizeof(args)); 1760 if (r) ··· 1770 r = -EFAULT; 1771 if (copy_from_user(&args, argp, sizeof(args))) 1772 break; 1773 r = kvm_s390_set_cmma_bits(kvm, &args); 1774 break; 1775 } 1776 default:
··· 769 770 /* 771 * Must be called with kvm->srcu held to avoid races on memslots, and with 772 + * kvm->slots_lock to avoid races with ourselves and kvm_s390_vm_stop_migration. 773 */ 774 static int kvm_s390_vm_start_migration(struct kvm *kvm) 775 { ··· 825 } 826 827 /* 828 + * Must be called with kvm->slots_lock to avoid races with ourselves and 829 * kvm_s390_vm_start_migration. 830 */ 831 static int kvm_s390_vm_stop_migration(struct kvm *kvm) ··· 840 841 if (kvm->arch.use_cmma) { 842 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_STOP_MIGRATION); 843 + /* We have to wait for the essa emulation to finish */ 844 + synchronize_srcu(&kvm->srcu); 845 vfree(mgs->pgste_bitmap); 846 } 847 kfree(mgs); ··· 849 static int kvm_s390_vm_set_migration(struct kvm *kvm, 850 struct kvm_device_attr *attr) 851 { 852 + int res = -ENXIO; 853 854 + mutex_lock(&kvm->slots_lock); 855 switch (attr->attr) { 856 case KVM_S390_VM_MIGRATION_START: 857 res = kvm_s390_vm_start_migration(kvm); 858 break; 859 case KVM_S390_VM_MIGRATION_STOP: 860 res = kvm_s390_vm_stop_migration(kvm); ··· 864 default: 865 break; 866 } 867 + mutex_unlock(&kvm->slots_lock); 868 869 return res; 870 } ··· 1754 r = -EFAULT; 1755 if (copy_from_user(&args, argp, sizeof(args))) 1756 break; 1757 + mutex_lock(&kvm->slots_lock); 1758 r = kvm_s390_get_cmma_bits(kvm, &args); 1759 + mutex_unlock(&kvm->slots_lock); 1760 if (!r) { 1761 r = copy_to_user(argp, &args, sizeof(args)); 1762 if (r) ··· 1768 r = -EFAULT; 1769 if (copy_from_user(&args, argp, sizeof(args))) 1770 break; 1771 + mutex_lock(&kvm->slots_lock); 1772 r = kvm_s390_set_cmma_bits(kvm, &args); 1773 + mutex_unlock(&kvm->slots_lock); 1774 break; 1775 } 1776 default: