KVM: s390: Change guestaddr type in gaccess

All registers are unsigned long types. This patch changes all occurences
of guestaddr in gaccess from u64 to unsigned long.

Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>

authored by

Martin Schwidefsky and committed by
Avi Kivity
0096369d 99e65c92

+37 -32
+33 -29
arch/s390/kvm/gaccess.h
··· 18 #include <asm/uaccess.h> 19 20 static inline void __user *__guestaddr_to_user(struct kvm_vcpu *vcpu, 21 - u64 guestaddr) 22 { 23 - u64 prefix = vcpu->arch.sie_block->prefix; 24 - u64 origin = vcpu->kvm->arch.guest_origin; 25 - u64 memsize = vcpu->kvm->arch.guest_memsize; 26 27 if (guestaddr < 2 * PAGE_SIZE) 28 guestaddr += prefix; ··· 37 return (void __user *) guestaddr; 38 } 39 40 - static inline int get_guest_u64(struct kvm_vcpu *vcpu, u64 guestaddr, 41 u64 *result) 42 { 43 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); ··· 47 if (IS_ERR((void __force *) uptr)) 48 return PTR_ERR((void __force *) uptr); 49 50 - return get_user(*result, (u64 __user *) uptr); 51 } 52 53 - static inline int get_guest_u32(struct kvm_vcpu *vcpu, u64 guestaddr, 54 u32 *result) 55 { 56 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); ··· 63 return get_user(*result, (u32 __user *) uptr); 64 } 65 66 - static inline int get_guest_u16(struct kvm_vcpu *vcpu, u64 guestaddr, 67 u16 *result) 68 { 69 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); ··· 76 return get_user(*result, (u16 __user *) uptr); 77 } 78 79 - static inline int get_guest_u8(struct kvm_vcpu *vcpu, u64 guestaddr, 80 u8 *result) 81 { 82 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); ··· 87 return get_user(*result, (u8 __user *) uptr); 88 } 89 90 - static inline int put_guest_u64(struct kvm_vcpu *vcpu, u64 guestaddr, 91 u64 value) 92 { 93 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); ··· 100 return put_user(value, (u64 __user *) uptr); 101 } 102 103 - static inline int put_guest_u32(struct kvm_vcpu *vcpu, u64 guestaddr, 104 u32 value) 105 { 106 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); ··· 113 return put_user(value, (u32 __user *) uptr); 114 } 115 116 - static inline int put_guest_u16(struct kvm_vcpu *vcpu, u64 guestaddr, 117 u16 value) 118 { 119 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); ··· 126 return put_user(value, (u16 __user *) uptr); 127 } 128 129 - static inline int put_guest_u8(struct kvm_vcpu *vcpu, u64 guestaddr, 130 u8 value) 131 { 132 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); ··· 138 } 139 140 141 - static inline int __copy_to_guest_slow(struct kvm_vcpu *vcpu, u64 guestdest, 142 const void *from, unsigned long n) 143 { 144 int rc; ··· 154 return 0; 155 } 156 157 - static inline int copy_to_guest(struct kvm_vcpu *vcpu, u64 guestdest, 158 const void *from, unsigned long n) 159 { 160 - u64 prefix = vcpu->arch.sie_block->prefix; 161 - u64 origin = vcpu->kvm->arch.guest_origin; 162 - u64 memsize = vcpu->kvm->arch.guest_memsize; 163 164 if ((guestdest < 2 * PAGE_SIZE) && (guestdest + n > 2 * PAGE_SIZE)) 165 goto slowpath; ··· 190 } 191 192 static inline int __copy_from_guest_slow(struct kvm_vcpu *vcpu, void *to, 193 - u64 guestsrc, unsigned long n) 194 { 195 int rc; 196 unsigned long i; ··· 206 } 207 208 static inline int copy_from_guest(struct kvm_vcpu *vcpu, void *to, 209 - u64 guestsrc, unsigned long n) 210 { 211 - u64 prefix = vcpu->arch.sie_block->prefix; 212 - u64 origin = vcpu->kvm->arch.guest_origin; 213 - u64 memsize = vcpu->kvm->arch.guest_memsize; 214 215 if ((guestsrc < 2 * PAGE_SIZE) && (guestsrc + n > 2 * PAGE_SIZE)) 216 goto slowpath; ··· 240 return __copy_from_guest_slow(vcpu, to, guestsrc, n); 241 } 242 243 - static inline int copy_to_guest_absolute(struct kvm_vcpu *vcpu, u64 guestdest, 244 const void *from, unsigned long n) 245 { 246 - u64 origin = vcpu->kvm->arch.guest_origin; 247 - u64 memsize = vcpu->kvm->arch.guest_memsize; 248 249 if (guestdest + n > memsize) 250 return -EFAULT; ··· 259 } 260 261 static inline int copy_from_guest_absolute(struct kvm_vcpu *vcpu, void *to, 262 - u64 guestsrc, unsigned long n) 263 { 264 - u64 origin = vcpu->kvm->arch.guest_origin; 265 - u64 memsize = vcpu->kvm->arch.guest_memsize; 266 267 if (guestsrc + n > memsize) 268 return -EFAULT;
··· 18 #include <asm/uaccess.h> 19 20 static inline void __user *__guestaddr_to_user(struct kvm_vcpu *vcpu, 21 + unsigned long guestaddr) 22 { 23 + unsigned long prefix = vcpu->arch.sie_block->prefix; 24 + unsigned long origin = vcpu->kvm->arch.guest_origin; 25 + unsigned long memsize = vcpu->kvm->arch.guest_memsize; 26 27 if (guestaddr < 2 * PAGE_SIZE) 28 guestaddr += prefix; ··· 37 return (void __user *) guestaddr; 38 } 39 40 + static inline int get_guest_u64(struct kvm_vcpu *vcpu, unsigned long guestaddr, 41 u64 *result) 42 { 43 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); ··· 47 if (IS_ERR((void __force *) uptr)) 48 return PTR_ERR((void __force *) uptr); 49 50 + return get_user(*result, (unsigned long __user *) uptr); 51 } 52 53 + static inline int get_guest_u32(struct kvm_vcpu *vcpu, unsigned long guestaddr, 54 u32 *result) 55 { 56 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); ··· 63 return get_user(*result, (u32 __user *) uptr); 64 } 65 66 + static inline int get_guest_u16(struct kvm_vcpu *vcpu, unsigned long guestaddr, 67 u16 *result) 68 { 69 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); ··· 76 return get_user(*result, (u16 __user *) uptr); 77 } 78 79 + static inline int get_guest_u8(struct kvm_vcpu *vcpu, unsigned long guestaddr, 80 u8 *result) 81 { 82 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); ··· 87 return get_user(*result, (u8 __user *) uptr); 88 } 89 90 + static inline int put_guest_u64(struct kvm_vcpu *vcpu, unsigned long guestaddr, 91 u64 value) 92 { 93 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); ··· 100 return put_user(value, (u64 __user *) uptr); 101 } 102 103 + static inline int put_guest_u32(struct kvm_vcpu *vcpu, unsigned long guestaddr, 104 u32 value) 105 { 106 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); ··· 113 return put_user(value, (u32 __user *) uptr); 114 } 115 116 + static inline int put_guest_u16(struct kvm_vcpu *vcpu, unsigned long guestaddr, 117 u16 value) 118 { 119 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); ··· 126 return put_user(value, (u16 __user *) uptr); 127 } 128 129 + static inline int put_guest_u8(struct kvm_vcpu *vcpu, unsigned long guestaddr, 130 u8 value) 131 { 132 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); ··· 138 } 139 140 141 + static inline int __copy_to_guest_slow(struct kvm_vcpu *vcpu, 142 + unsigned long guestdest, 143 const void *from, unsigned long n) 144 { 145 int rc; ··· 153 return 0; 154 } 155 156 + static inline int copy_to_guest(struct kvm_vcpu *vcpu, unsigned long guestdest, 157 const void *from, unsigned long n) 158 { 159 + unsigned long prefix = vcpu->arch.sie_block->prefix; 160 + unsigned long origin = vcpu->kvm->arch.guest_origin; 161 + unsigned long memsize = vcpu->kvm->arch.guest_memsize; 162 163 if ((guestdest < 2 * PAGE_SIZE) && (guestdest + n > 2 * PAGE_SIZE)) 164 goto slowpath; ··· 189 } 190 191 static inline int __copy_from_guest_slow(struct kvm_vcpu *vcpu, void *to, 192 + unsigned long guestsrc, 193 + unsigned long n) 194 { 195 int rc; 196 unsigned long i; ··· 204 } 205 206 static inline int copy_from_guest(struct kvm_vcpu *vcpu, void *to, 207 + unsigned long guestsrc, unsigned long n) 208 { 209 + unsigned long prefix = vcpu->arch.sie_block->prefix; 210 + unsigned long origin = vcpu->kvm->arch.guest_origin; 211 + unsigned long memsize = vcpu->kvm->arch.guest_memsize; 212 213 if ((guestsrc < 2 * PAGE_SIZE) && (guestsrc + n > 2 * PAGE_SIZE)) 214 goto slowpath; ··· 238 return __copy_from_guest_slow(vcpu, to, guestsrc, n); 239 } 240 241 + static inline int copy_to_guest_absolute(struct kvm_vcpu *vcpu, 242 + unsigned long guestdest, 243 const void *from, unsigned long n) 244 { 245 + unsigned long origin = vcpu->kvm->arch.guest_origin; 246 + unsigned long memsize = vcpu->kvm->arch.guest_memsize; 247 248 if (guestdest + n > memsize) 249 return -EFAULT; ··· 256 } 257 258 static inline int copy_from_guest_absolute(struct kvm_vcpu *vcpu, void *to, 259 + unsigned long guestsrc, 260 + unsigned long n) 261 { 262 + unsigned long origin = vcpu->kvm->arch.guest_origin; 263 + unsigned long memsize = vcpu->kvm->arch.guest_memsize; 264 265 if (guestsrc + n > memsize) 266 return -EFAULT;
+3 -2
arch/s390/kvm/sigp.c
··· 43 #define SIGP_STAT_RECEIVER_CHECK 0x00000001UL 44 45 46 - static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr, u64 *reg) 47 { 48 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; 49 int rc; ··· 168 } 169 170 static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address, 171 - u64 *reg) 172 { 173 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; 174 struct kvm_s390_local_interrupt *li;
··· 43 #define SIGP_STAT_RECEIVER_CHECK 0x00000001UL 44 45 46 + static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr, 47 + unsigned long *reg) 48 { 49 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; 50 int rc; ··· 167 } 168 169 static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address, 170 + unsigned long *reg) 171 { 172 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; 173 struct kvm_s390_local_interrupt *li;
+1 -1
include/asm-s390/kvm_host.h
··· 231 struct kvm_s390_float_interrupt float_int; 232 }; 233 234 - extern int sie64a(struct kvm_s390_sie_block *, __u64 *); 235 #endif
··· 231 struct kvm_s390_float_interrupt float_int; 232 }; 233 234 + extern int sie64a(struct kvm_s390_sie_block *, unsigned long *); 235 #endif