KVM: s390: Change guestaddr type in gaccess

All registers are unsigned long types. This patch changes all occurences
of guestaddr in gaccess from u64 to unsigned long.

Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>

authored by

Martin Schwidefsky and committed by
Avi Kivity
0096369d 99e65c92

+37 -32
+33 -29
arch/s390/kvm/gaccess.h
··· 18 18 #include <asm/uaccess.h> 19 19 20 20 static inline void __user *__guestaddr_to_user(struct kvm_vcpu *vcpu, 21 - u64 guestaddr) 21 + unsigned long guestaddr) 22 22 { 23 - u64 prefix = vcpu->arch.sie_block->prefix; 24 - u64 origin = vcpu->kvm->arch.guest_origin; 25 - u64 memsize = vcpu->kvm->arch.guest_memsize; 23 + unsigned long prefix = vcpu->arch.sie_block->prefix; 24 + unsigned long origin = vcpu->kvm->arch.guest_origin; 25 + unsigned long memsize = vcpu->kvm->arch.guest_memsize; 26 26 27 27 if (guestaddr < 2 * PAGE_SIZE) 28 28 guestaddr += prefix; ··· 37 37 return (void __user *) guestaddr; 38 38 } 39 39 40 - static inline int get_guest_u64(struct kvm_vcpu *vcpu, u64 guestaddr, 40 + static inline int get_guest_u64(struct kvm_vcpu *vcpu, unsigned long guestaddr, 41 41 u64 *result) 42 42 { 43 43 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); ··· 47 47 if (IS_ERR((void __force *) uptr)) 48 48 return PTR_ERR((void __force *) uptr); 49 49 50 - return get_user(*result, (u64 __user *) uptr); 50 + return get_user(*result, (unsigned long __user *) uptr); 51 51 } 52 52 53 - static inline int get_guest_u32(struct kvm_vcpu *vcpu, u64 guestaddr, 53 + static inline int get_guest_u32(struct kvm_vcpu *vcpu, unsigned long guestaddr, 54 54 u32 *result) 55 55 { 56 56 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); ··· 63 63 return get_user(*result, (u32 __user *) uptr); 64 64 } 65 65 66 - static inline int get_guest_u16(struct kvm_vcpu *vcpu, u64 guestaddr, 66 + static inline int get_guest_u16(struct kvm_vcpu *vcpu, unsigned long guestaddr, 67 67 u16 *result) 68 68 { 69 69 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); ··· 76 76 return get_user(*result, (u16 __user *) uptr); 77 77 } 78 78 79 - static inline int get_guest_u8(struct kvm_vcpu *vcpu, u64 guestaddr, 79 + static inline int get_guest_u8(struct kvm_vcpu *vcpu, unsigned long guestaddr, 80 80 u8 *result) 81 81 { 82 82 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); ··· 87 87 return get_user(*result, (u8 __user *) uptr); 88 88 } 89 89 90 - static inline int put_guest_u64(struct kvm_vcpu *vcpu, u64 guestaddr, 90 + static inline int put_guest_u64(struct kvm_vcpu *vcpu, unsigned long guestaddr, 91 91 u64 value) 92 92 { 93 93 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); ··· 100 100 return put_user(value, (u64 __user *) uptr); 101 101 } 102 102 103 - static inline int put_guest_u32(struct kvm_vcpu *vcpu, u64 guestaddr, 103 + static inline int put_guest_u32(struct kvm_vcpu *vcpu, unsigned long guestaddr, 104 104 u32 value) 105 105 { 106 106 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); ··· 113 113 return put_user(value, (u32 __user *) uptr); 114 114 } 115 115 116 - static inline int put_guest_u16(struct kvm_vcpu *vcpu, u64 guestaddr, 116 + static inline int put_guest_u16(struct kvm_vcpu *vcpu, unsigned long guestaddr, 117 117 u16 value) 118 118 { 119 119 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); ··· 126 126 return put_user(value, (u16 __user *) uptr); 127 127 } 128 128 129 - static inline int put_guest_u8(struct kvm_vcpu *vcpu, u64 guestaddr, 129 + static inline int put_guest_u8(struct kvm_vcpu *vcpu, unsigned long guestaddr, 130 130 u8 value) 131 131 { 132 132 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); ··· 138 138 } 139 139 140 140 141 - static inline int __copy_to_guest_slow(struct kvm_vcpu *vcpu, u64 guestdest, 141 + static inline int __copy_to_guest_slow(struct kvm_vcpu *vcpu, 142 + unsigned long guestdest, 142 143 const void *from, unsigned long n) 143 144 { 144 145 int rc; ··· 154 153 return 0; 155 154 } 156 155 157 - static inline int copy_to_guest(struct kvm_vcpu *vcpu, u64 guestdest, 156 + static inline int copy_to_guest(struct kvm_vcpu *vcpu, unsigned long guestdest, 158 157 const void *from, unsigned long n) 159 158 { 160 - u64 prefix = vcpu->arch.sie_block->prefix; 161 - u64 origin = vcpu->kvm->arch.guest_origin; 162 - u64 memsize = vcpu->kvm->arch.guest_memsize; 159 + unsigned long prefix = vcpu->arch.sie_block->prefix; 160 + unsigned long origin = vcpu->kvm->arch.guest_origin; 161 + unsigned long memsize = vcpu->kvm->arch.guest_memsize; 163 162 164 163 if ((guestdest < 2 * PAGE_SIZE) && (guestdest + n > 2 * PAGE_SIZE)) 165 164 goto slowpath; ··· 190 189 } 191 190 192 191 static inline int __copy_from_guest_slow(struct kvm_vcpu *vcpu, void *to, 193 - u64 guestsrc, unsigned long n) 192 + unsigned long guestsrc, 193 + unsigned long n) 194 194 { 195 195 int rc; 196 196 unsigned long i; ··· 206 204 } 207 205 208 206 static inline int copy_from_guest(struct kvm_vcpu *vcpu, void *to, 209 - u64 guestsrc, unsigned long n) 207 + unsigned long guestsrc, unsigned long n) 210 208 { 211 - u64 prefix = vcpu->arch.sie_block->prefix; 212 - u64 origin = vcpu->kvm->arch.guest_origin; 213 - u64 memsize = vcpu->kvm->arch.guest_memsize; 209 + unsigned long prefix = vcpu->arch.sie_block->prefix; 210 + unsigned long origin = vcpu->kvm->arch.guest_origin; 211 + unsigned long memsize = vcpu->kvm->arch.guest_memsize; 214 212 215 213 if ((guestsrc < 2 * PAGE_SIZE) && (guestsrc + n > 2 * PAGE_SIZE)) 216 214 goto slowpath; ··· 240 238 return __copy_from_guest_slow(vcpu, to, guestsrc, n); 241 239 } 242 240 243 - static inline int copy_to_guest_absolute(struct kvm_vcpu *vcpu, u64 guestdest, 241 + static inline int copy_to_guest_absolute(struct kvm_vcpu *vcpu, 242 + unsigned long guestdest, 244 243 const void *from, unsigned long n) 245 244 { 246 - u64 origin = vcpu->kvm->arch.guest_origin; 247 - u64 memsize = vcpu->kvm->arch.guest_memsize; 245 + unsigned long origin = vcpu->kvm->arch.guest_origin; 246 + unsigned long memsize = vcpu->kvm->arch.guest_memsize; 248 247 249 248 if (guestdest + n > memsize) 250 249 return -EFAULT; ··· 259 256 } 260 257 261 258 static inline int copy_from_guest_absolute(struct kvm_vcpu *vcpu, void *to, 262 - u64 guestsrc, unsigned long n) 259 + unsigned long guestsrc, 260 + unsigned long n) 263 261 { 264 - u64 origin = vcpu->kvm->arch.guest_origin; 265 - u64 memsize = vcpu->kvm->arch.guest_memsize; 262 + unsigned long origin = vcpu->kvm->arch.guest_origin; 263 + unsigned long memsize = vcpu->kvm->arch.guest_memsize; 266 264 267 265 if (guestsrc + n > memsize) 268 266 return -EFAULT;
+3 -2
arch/s390/kvm/sigp.c
··· 43 43 #define SIGP_STAT_RECEIVER_CHECK 0x00000001UL 44 44 45 45 46 - static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr, u64 *reg) 46 + static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr, 47 + unsigned long *reg) 47 48 { 48 49 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; 49 50 int rc; ··· 168 167 } 169 168 170 169 static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address, 171 - u64 *reg) 170 + unsigned long *reg) 172 171 { 173 172 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; 174 173 struct kvm_s390_local_interrupt *li;
+1 -1
include/asm-s390/kvm_host.h
··· 231 231 struct kvm_s390_float_interrupt float_int; 232 232 }; 233 233 234 - extern int sie64a(struct kvm_s390_sie_block *, __u64 *); 234 + extern int sie64a(struct kvm_s390_sie_block *, unsigned long *); 235 235 #endif