Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: Use 'unsigned long' as kvm_for_each_vcpu()'s index

Everywhere we use kvm_for_each_vpcu(), we use an int as the vcpu
index. Unfortunately, we're about to move rework the iterator,
which requires this to be upgrade to an unsigned long.

Let's bite the bullet and repaint all of it in one go.

Signed-off-by: Marc Zyngier <maz@kernel.org>
Message-Id: <20211116160403.4074052-7-maz@kernel.org>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>

authored by

Marc Zyngier and committed by
Paolo Bonzini
46808a4c c5b07754

+118 -104
+4 -4
arch/arm64/kvm/arch_timer.c
··· 750 750 /* Make the updates of cntvoff for all vtimer contexts atomic */ 751 751 static void update_vtimer_cntvoff(struct kvm_vcpu *vcpu, u64 cntvoff) 752 752 { 753 - int i; 753 + unsigned long i; 754 754 struct kvm *kvm = vcpu->kvm; 755 755 struct kvm_vcpu *tmp; 756 756 ··· 1189 1189 1190 1190 static bool timer_irqs_are_valid(struct kvm_vcpu *vcpu) 1191 1191 { 1192 - int vtimer_irq, ptimer_irq; 1193 - int i, ret; 1192 + int vtimer_irq, ptimer_irq, ret; 1193 + unsigned long i; 1194 1194 1195 1195 vtimer_irq = vcpu_vtimer(vcpu)->irq.irq; 1196 1196 ret = kvm_vgic_set_owner(vcpu, vtimer_irq, vcpu_vtimer(vcpu)); ··· 1297 1297 static void set_timer_irqs(struct kvm *kvm, int vtimer_irq, int ptimer_irq) 1298 1298 { 1299 1299 struct kvm_vcpu *vcpu; 1300 - int i; 1300 + unsigned long i; 1301 1301 1302 1302 kvm_for_each_vcpu(i, vcpu, kvm) { 1303 1303 vcpu_vtimer(vcpu)->irq.irq = vtimer_irq;
+3 -3
arch/arm64/kvm/arm.c
··· 631 631 632 632 void kvm_arm_halt_guest(struct kvm *kvm) 633 633 { 634 - int i; 634 + unsigned long i; 635 635 struct kvm_vcpu *vcpu; 636 636 637 637 kvm_for_each_vcpu(i, vcpu, kvm) ··· 641 641 642 642 void kvm_arm_resume_guest(struct kvm *kvm) 643 643 { 644 - int i; 644 + unsigned long i; 645 645 struct kvm_vcpu *vcpu; 646 646 647 647 kvm_for_each_vcpu(i, vcpu, kvm) { ··· 2027 2027 struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr) 2028 2028 { 2029 2029 struct kvm_vcpu *vcpu; 2030 - int i; 2030 + unsigned long i; 2031 2031 2032 2032 mpidr &= MPIDR_HWID_BITMASK; 2033 2033 kvm_for_each_vcpu(i, vcpu, kvm) {
+1 -1
arch/arm64/kvm/pmu-emul.c
··· 900 900 */ 901 901 static bool pmu_irq_is_valid(struct kvm *kvm, int irq) 902 902 { 903 - int i; 903 + unsigned long i; 904 904 struct kvm_vcpu *vcpu; 905 905 906 906 kvm_for_each_vcpu(i, vcpu, kvm) {
+3 -3
arch/arm64/kvm/psci.c
··· 121 121 122 122 static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu) 123 123 { 124 - int i, matching_cpus = 0; 125 - unsigned long mpidr; 124 + int matching_cpus = 0; 125 + unsigned long i, mpidr; 126 126 unsigned long target_affinity; 127 127 unsigned long target_affinity_mask; 128 128 unsigned long lowest_affinity_level; ··· 164 164 165 165 static void kvm_prepare_system_event(struct kvm_vcpu *vcpu, u32 type) 166 166 { 167 - int i; 167 + unsigned long i; 168 168 struct kvm_vcpu *tmp; 169 169 170 170 /*
+1 -1
arch/arm64/kvm/reset.c
··· 170 170 { 171 171 struct kvm_vcpu *tmp; 172 172 bool is32bit; 173 - int i; 173 + unsigned long i; 174 174 175 175 is32bit = vcpu_has_feature(vcpu, KVM_ARM_VCPU_EL1_32BIT); 176 176 if (!cpus_have_const_cap(ARM64_HAS_32BIT_EL1) && is32bit)
+6 -4
arch/arm64/kvm/vgic/vgic-init.c
··· 70 70 */ 71 71 int kvm_vgic_create(struct kvm *kvm, u32 type) 72 72 { 73 - int i, ret; 74 73 struct kvm_vcpu *vcpu; 74 + unsigned long i; 75 + int ret; 75 76 76 77 if (irqchip_in_kernel(kvm)) 77 78 return -EEXIST; ··· 256 255 { 257 256 struct vgic_dist *dist = &kvm->arch.vgic; 258 257 struct kvm_vcpu *vcpu; 259 - int ret = 0, i, idx; 258 + int ret = 0, i; 259 + unsigned long idx; 260 260 261 261 if (vgic_initialized(kvm)) 262 262 return 0; ··· 310 308 goto out; 311 309 } 312 310 313 - kvm_for_each_vcpu(i, vcpu, kvm) 311 + kvm_for_each_vcpu(idx, vcpu, kvm) 314 312 kvm_vgic_vcpu_enable(vcpu); 315 313 316 314 ret = kvm_vgic_setup_default_irq_routing(kvm); ··· 372 370 static void __kvm_vgic_destroy(struct kvm *kvm) 373 371 { 374 372 struct kvm_vcpu *vcpu; 375 - int i; 373 + unsigned long i; 376 374 377 375 vgic_debug_destroy(kvm); 378 376
+1 -1
arch/arm64/kvm/vgic/vgic-kvm-device.c
··· 325 325 bool lock_all_vcpus(struct kvm *kvm) 326 326 { 327 327 struct kvm_vcpu *tmp_vcpu; 328 - int c; 328 + unsigned long c; 329 329 330 330 /* 331 331 * Any time a vcpu is run, vcpu_load is called which tries to grab the
+1 -2
arch/arm64/kvm/vgic/vgic-mmio-v2.c
··· 113 113 int intid = val & 0xf; 114 114 int targets = (val >> 16) & 0xff; 115 115 int mode = (val >> 24) & 0x03; 116 - int c; 117 116 struct kvm_vcpu *vcpu; 118 - unsigned long flags; 117 + unsigned long flags, c; 119 118 120 119 switch (mode) { 121 120 case 0x0: /* as specified by targets */
+4 -3
arch/arm64/kvm/vgic/vgic-mmio-v3.c
··· 754 754 static int vgic_register_all_redist_iodevs(struct kvm *kvm) 755 755 { 756 756 struct kvm_vcpu *vcpu; 757 - int c, ret = 0; 757 + unsigned long c; 758 + int ret = 0; 758 759 759 760 kvm_for_each_vcpu(c, vcpu, kvm) { 760 761 ret = vgic_register_redist_iodev(vcpu); ··· 996 995 struct kvm_vcpu *c_vcpu; 997 996 u16 target_cpus; 998 997 u64 mpidr; 999 - int sgi, c; 998 + int sgi; 1000 999 int vcpu_id = vcpu->vcpu_id; 1001 1000 bool broadcast; 1002 - unsigned long flags; 1001 + unsigned long c, flags; 1003 1002 1004 1003 sgi = (reg & ICC_SGI1R_SGI_ID_MASK) >> ICC_SGI1R_SGI_ID_SHIFT; 1005 1004 broadcast = reg & BIT_ULL(ICC_SGI1R_IRQ_ROUTING_MODE_BIT);
+2 -2
arch/arm64/kvm/vgic/vgic-v3.c
··· 542 542 struct vgic_dist *dist = &kvm->arch.vgic; 543 543 struct kvm_vcpu *vcpu; 544 544 int ret = 0; 545 - int c; 545 + unsigned long c; 546 546 547 547 kvm_for_each_vcpu(c, vcpu, kvm) { 548 548 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; 549 549 550 550 if (IS_VGIC_ADDR_UNDEF(vgic_cpu->rd_iodev.base_addr)) { 551 - kvm_debug("vcpu %d redistributor base not set\n", c); 551 + kvm_debug("vcpu %ld redistributor base not set\n", c); 552 552 return -ENXIO; 553 553 } 554 554 }
+3 -2
arch/arm64/kvm/vgic/vgic-v4.c
··· 189 189 { 190 190 struct vgic_dist *dist = &kvm->arch.vgic; 191 191 struct kvm_vcpu *vcpu; 192 - int i; 192 + unsigned long i; 193 193 194 194 kvm_arm_halt_guest(kvm); 195 195 ··· 235 235 { 236 236 struct vgic_dist *dist = &kvm->arch.vgic; 237 237 struct kvm_vcpu *vcpu; 238 - int i, nr_vcpus, ret; 238 + int nr_vcpus, ret; 239 + unsigned long i; 239 240 240 241 if (!kvm_vgic_global_state.has_gicv4) 241 242 return 0; /* Nothing to see here... move along. */
+1 -1
arch/arm64/kvm/vgic/vgic.c
··· 990 990 void vgic_kick_vcpus(struct kvm *kvm) 991 991 { 992 992 struct kvm_vcpu *vcpu; 993 - int c; 993 + unsigned long c; 994 994 995 995 /* 996 996 * We've injected an interrupt, time to find out who deserves
+1 -1
arch/powerpc/kvm/book3s_32_mmu.c
··· 337 337 338 338 static void kvmppc_mmu_book3s_32_tlbie(struct kvm_vcpu *vcpu, ulong ea, bool large) 339 339 { 340 - int i; 340 + unsigned long i; 341 341 struct kvm_vcpu *v; 342 342 343 343 /* flush this VA on all cpus */
+1 -1
arch/powerpc/kvm/book3s_64_mmu.c
··· 530 530 bool large) 531 531 { 532 532 u64 mask = 0xFFFFFFFFFULL; 533 - long i; 533 + unsigned long i; 534 534 struct kvm_vcpu *v; 535 535 536 536 dprintk("KVM MMU: tlbie(0x%lx)\n", va);
+4 -4
arch/powerpc/kvm/book3s_hv.c
··· 1993 1993 */ 1994 1994 if ((new_lpcr & LPCR_ILE) != (vc->lpcr & LPCR_ILE)) { 1995 1995 struct kvm_vcpu *vcpu; 1996 - int i; 1996 + unsigned long i; 1997 1997 1998 1998 kvm_for_each_vcpu(i, vcpu, kvm) { 1999 1999 if (vcpu->arch.vcore != vc) ··· 4786 4786 { 4787 4787 struct kvm_memslots *slots; 4788 4788 struct kvm_memory_slot *memslot; 4789 - int i, r; 4790 - unsigned long n; 4789 + int r; 4790 + unsigned long n, i; 4791 4791 unsigned long *buf, *p; 4792 4792 struct kvm_vcpu *vcpu; 4793 4793 ··· 5861 5861 int mmu_was_ready; 5862 5862 int srcu_idx; 5863 5863 int ret = 0; 5864 - int i; 5864 + unsigned long i; 5865 5865 5866 5866 if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START)) 5867 5867 return ret;
+1 -1
arch/powerpc/kvm/book3s_pr.c
··· 428 428 /************* MMU Notifiers *************/ 429 429 static bool do_kvm_unmap_gfn(struct kvm *kvm, struct kvm_gfn_range *range) 430 430 { 431 - long i; 431 + unsigned long i; 432 432 struct kvm_vcpu *vcpu; 433 433 434 434 kvm_for_each_vcpu(i, vcpu, kvm)
+3 -3
arch/powerpc/kvm/book3s_xics.c
··· 942 942 struct kvmppc_xics *xics = m->private; 943 943 struct kvm *kvm = xics->kvm; 944 944 struct kvm_vcpu *vcpu; 945 - int icsid, i; 946 - unsigned long flags; 945 + int icsid; 946 + unsigned long flags, i; 947 947 unsigned long t_rm_kick_vcpu, t_rm_check_resend; 948 948 unsigned long t_rm_notify_eoi; 949 949 unsigned long t_reject, t_check_resend; ··· 1340 1340 static void kvmppc_xics_release(struct kvm_device *dev) 1341 1341 { 1342 1342 struct kvmppc_xics *xics = dev->private; 1343 - int i; 1343 + unsigned long i; 1344 1344 struct kvm *kvm = xics->kvm; 1345 1345 struct kvm_vcpu *vcpu; 1346 1346
+1 -1
arch/powerpc/kvm/book3s_xics.h
··· 116 116 u32 nr) 117 117 { 118 118 struct kvm_vcpu *vcpu = NULL; 119 - int i; 119 + unsigned long i; 120 120 121 121 kvm_for_each_vcpu(i, vcpu, kvm) { 122 122 if (vcpu->arch.icp && nr == vcpu->arch.icp->server_num)
+9 -6
arch/powerpc/kvm/book3s_xive.c
··· 368 368 { 369 369 struct kvmppc_xive *xive = kvm->arch.xive; 370 370 struct kvm_vcpu *vcpu; 371 - int i, rc; 371 + unsigned long i; 372 + int rc; 372 373 373 374 lockdep_assert_held(&xive->lock); 374 375 ··· 440 439 int kvmppc_xive_select_target(struct kvm *kvm, u32 *server, u8 prio) 441 440 { 442 441 struct kvm_vcpu *vcpu; 443 - int i, rc; 442 + unsigned long i; 443 + int rc; 444 444 445 445 /* Locate target server */ 446 446 vcpu = kvmppc_xive_find_server(kvm, *server); ··· 1521 1519 static void xive_pre_save_scan(struct kvmppc_xive *xive) 1522 1520 { 1523 1521 struct kvm_vcpu *vcpu = NULL; 1524 - int i, j; 1522 + unsigned long i; 1523 + int j; 1525 1524 1526 1525 /* 1527 1526 * See comment in xive_get_source() about how this ··· 1703 1700 { 1704 1701 struct kvm *kvm = xive->kvm; 1705 1702 struct kvm_vcpu *vcpu = NULL; 1706 - int i; 1703 + unsigned long i; 1707 1704 1708 1705 kvm_for_each_vcpu(i, vcpu, kvm) { 1709 1706 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; ··· 2040 2037 struct kvmppc_xive *xive = dev->private; 2041 2038 struct kvm *kvm = xive->kvm; 2042 2039 struct kvm_vcpu *vcpu; 2043 - int i; 2040 + unsigned long i; 2044 2041 2045 2042 pr_devel("Releasing xive device\n"); 2046 2043 ··· 2294 2291 u64 t_vm_h_cppr = 0; 2295 2292 u64 t_vm_h_eoi = 0; 2296 2293 u64 t_vm_h_ipi = 0; 2297 - unsigned int i; 2294 + unsigned long i; 2298 2295 2299 2296 if (!kvm) 2300 2297 return 0;
+2 -2
arch/powerpc/kvm/book3s_xive.h
··· 199 199 static inline struct kvm_vcpu *kvmppc_xive_find_server(struct kvm *kvm, u32 nr) 200 200 { 201 201 struct kvm_vcpu *vcpu = NULL; 202 - int i; 202 + unsigned long i; 203 203 204 204 kvm_for_each_vcpu(i, vcpu, kvm) { 205 205 if (vcpu->arch.xive_vcpu && nr == vcpu->arch.xive_vcpu->server_num) ··· 240 240 static inline bool kvmppc_xive_vp_in_use(struct kvm *kvm, u32 vp_id) 241 241 { 242 242 struct kvm_vcpu *vcpu = NULL; 243 - int i; 243 + unsigned long i; 244 244 245 245 kvm_for_each_vcpu(i, vcpu, kvm) { 246 246 if (vcpu->arch.xive_vcpu && vp_id == vcpu->arch.xive_vcpu->vp_id)
+4 -4
arch/powerpc/kvm/book3s_xive_native.c
··· 807 807 { 808 808 struct kvm *kvm = xive->kvm; 809 809 struct kvm_vcpu *vcpu; 810 - unsigned int i; 810 + unsigned long i; 811 811 812 812 pr_devel("%s\n", __func__); 813 813 ··· 916 916 { 917 917 struct kvm *kvm = xive->kvm; 918 918 struct kvm_vcpu *vcpu; 919 - unsigned int i; 919 + unsigned long i; 920 920 921 921 pr_devel("%s\n", __func__); 922 922 ··· 1017 1017 struct kvmppc_xive *xive = dev->private; 1018 1018 struct kvm *kvm = xive->kvm; 1019 1019 struct kvm_vcpu *vcpu; 1020 - int i; 1020 + unsigned long i; 1021 1021 1022 1022 pr_devel("Releasing xive native device\n"); 1023 1023 ··· 1214 1214 struct kvmppc_xive *xive = m->private; 1215 1215 struct kvm *kvm = xive->kvm; 1216 1216 struct kvm_vcpu *vcpu; 1217 - unsigned int i; 1217 + unsigned long i; 1218 1218 1219 1219 if (!kvm) 1220 1220 return 0;
+1 -1
arch/powerpc/kvm/e500_emulate.c
··· 65 65 ulong param = vcpu->arch.regs.gpr[rb]; 66 66 int prio = dbell2prio(rb); 67 67 int pir = param & PPC_DBELL_PIR_MASK; 68 - int i; 68 + unsigned long i; 69 69 struct kvm_vcpu *cvcpu; 70 70 71 71 if (prio < 0)
+1 -1
arch/riscv/kvm/vcpu_sbi.c
··· 60 60 static void kvm_sbi_system_shutdown(struct kvm_vcpu *vcpu, 61 61 struct kvm_run *run, u32 type) 62 62 { 63 - int i; 63 + unsigned long i; 64 64 struct kvm_vcpu *tmp; 65 65 66 66 kvm_for_each_vcpu(i, tmp, vcpu->kvm)
+1 -1
arch/riscv/kvm/vmid.c
··· 65 65 66 66 void kvm_riscv_stage2_vmid_update(struct kvm_vcpu *vcpu) 67 67 { 68 - int i; 68 + unsigned long i; 69 69 struct kvm_vcpu *v; 70 70 struct cpumask hmask; 71 71 struct kvm_vmid *vmid = &vcpu->kvm->arch.vmid;
+1 -1
arch/s390/kvm/interrupt.c
··· 2659 2659 static int flic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr) 2660 2660 { 2661 2661 int r = 0; 2662 - unsigned int i; 2662 + unsigned long i; 2663 2663 struct kvm_vcpu *vcpu; 2664 2664 2665 2665 switch (attr->group) {
+11 -10
arch/s390/kvm/kvm-s390.c
··· 295 295 { 296 296 struct kvm *kvm; 297 297 struct kvm_vcpu *vcpu; 298 - int i; 298 + unsigned long i; 299 299 unsigned long long *delta = v; 300 300 301 301 list_for_each_entry(kvm, &vm_list, vm_list) { ··· 682 682 683 683 static void icpt_operexc_on_all_vcpus(struct kvm *kvm) 684 684 { 685 - unsigned int i; 685 + unsigned long i; 686 686 struct kvm_vcpu *vcpu; 687 687 688 688 kvm_for_each_vcpu(i, vcpu, kvm) { ··· 936 936 void kvm_s390_vcpu_crypto_reset_all(struct kvm *kvm) 937 937 { 938 938 struct kvm_vcpu *vcpu; 939 - int i; 939 + unsigned long i; 940 940 941 941 kvm_s390_vcpu_block_all(kvm); 942 942 ··· 1021 1021 1022 1022 static void kvm_s390_sync_request_broadcast(struct kvm *kvm, int req) 1023 1023 { 1024 - int cx; 1024 + unsigned long cx; 1025 1025 struct kvm_vcpu *vcpu; 1026 1026 1027 1027 kvm_for_each_vcpu(cx, vcpu, kvm) ··· 2206 2206 struct kvm_vcpu *vcpu; 2207 2207 u16 rc, rrc; 2208 2208 int ret = 0; 2209 - int i; 2209 + unsigned long i; 2210 2210 2211 2211 /* 2212 2212 * We ignore failures and try to destroy as many CPUs as possible. ··· 2230 2230 2231 2231 static int kvm_s390_cpus_to_pv(struct kvm *kvm, u16 *rc, u16 *rrc) 2232 2232 { 2233 - int i, r = 0; 2233 + unsigned long i; 2234 + int r = 0; 2234 2235 u16 dummy; 2235 2236 2236 2237 struct kvm_vcpu *vcpu; ··· 2930 2929 struct bsca_block *old_sca = kvm->arch.sca; 2931 2930 struct esca_block *new_sca; 2932 2931 struct kvm_vcpu *vcpu; 2933 - unsigned int vcpu_idx; 2932 + unsigned long vcpu_idx; 2934 2933 u32 scaol, scaoh; 2935 2934 2936 2935 if (kvm->arch.use_esca) ··· 3412 3411 struct kvm *kvm = gmap->private; 3413 3412 struct kvm_vcpu *vcpu; 3414 3413 unsigned long prefix; 3415 - int i; 3414 + unsigned long i; 3416 3415 3417 3416 if (gmap_is_shadow(gmap)) 3418 3417 return; ··· 3905 3904 { 3906 3905 struct kvm_vcpu *vcpu; 3907 3906 union tod_clock clk; 3908 - int i; 3907 + unsigned long i; 3909 3908 3910 3909 mutex_lock(&kvm->lock); 3911 3910 preempt_disable(); ··· 4537 4536 4538 4537 static void __disable_ibs_on_all_vcpus(struct kvm *kvm) 4539 4538 { 4540 - unsigned int i; 4539 + unsigned long i; 4541 4540 struct kvm_vcpu *vcpu; 4542 4541 4543 4542 kvm_for_each_vcpu(i, vcpu, kvm) {
+2 -2
arch/s390/kvm/kvm-s390.h
··· 357 357 358 358 static inline void kvm_s390_vcpu_block_all(struct kvm *kvm) 359 359 { 360 - int i; 360 + unsigned long i; 361 361 struct kvm_vcpu *vcpu; 362 362 363 363 WARN_ON(!mutex_is_locked(&kvm->lock)); ··· 367 367 368 368 static inline void kvm_s390_vcpu_unblock_all(struct kvm *kvm) 369 369 { 370 - int i; 370 + unsigned long i; 371 371 struct kvm_vcpu *vcpu; 372 372 373 373 kvm_for_each_vcpu(i, vcpu, kvm)
+4 -3
arch/x86/kvm/hyperv.c
··· 164 164 static struct kvm_vcpu *get_vcpu_by_vpidx(struct kvm *kvm, u32 vpidx) 165 165 { 166 166 struct kvm_vcpu *vcpu = NULL; 167 - int i; 167 + unsigned long i; 168 168 169 169 if (vpidx >= KVM_MAX_VCPUS) 170 170 return NULL; ··· 1716 1716 { 1717 1717 struct kvm_hv *hv = to_kvm_hv(kvm); 1718 1718 struct kvm_vcpu *vcpu; 1719 - int i, bank, sbank = 0; 1719 + int bank, sbank = 0; 1720 + unsigned long i; 1720 1721 1721 1722 memset(vp_bitmap, 0, 1722 1723 KVM_HV_MAX_SPARSE_VCPU_SET_BITS * sizeof(*vp_bitmap)); ··· 1864 1863 .vector = vector 1865 1864 }; 1866 1865 struct kvm_vcpu *vcpu; 1867 - int i; 1866 + unsigned long i; 1868 1867 1869 1868 kvm_for_each_vcpu(i, vcpu, kvm) { 1870 1869 if (vcpu_bitmap && !test_bit(i, vcpu_bitmap))
+1 -1
arch/x86/kvm/i8254.c
··· 242 242 struct kvm_pit *pit = container_of(work, struct kvm_pit, expired); 243 243 struct kvm *kvm = pit->kvm; 244 244 struct kvm_vcpu *vcpu; 245 - int i; 245 + unsigned long i; 246 246 struct kvm_kpit_state *ps = &pit->pit_state; 247 247 248 248 if (atomic_read(&ps->reinject) && !atomic_xchg(&ps->irq_ack, 0))
+3 -2
arch/x86/kvm/i8259.c
··· 50 50 { 51 51 bool wakeup = s->wakeup_needed; 52 52 struct kvm_vcpu *vcpu; 53 - int i; 53 + unsigned long i; 54 54 55 55 s->wakeup_needed = false; 56 56 ··· 270 270 271 271 static void kvm_pic_reset(struct kvm_kpic_state *s) 272 272 { 273 - int irq, i; 273 + int irq; 274 + unsigned long i; 274 275 struct kvm_vcpu *vcpu; 275 276 u8 edge_irr = s->irr & ~s->elcr; 276 277 bool found = false;
+2 -2
arch/x86/kvm/ioapic.c
··· 149 149 static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic) 150 150 { 151 151 struct kvm_vcpu *vcpu; 152 - int i; 152 + unsigned long i; 153 153 154 154 if (RTC_GSI >= IOAPIC_NUM_PINS) 155 155 return; ··· 184 184 185 185 static void ioapic_lazy_update_eoi(struct kvm_ioapic *ioapic, int irq) 186 186 { 187 - int i; 187 + unsigned long i; 188 188 struct kvm_vcpu *vcpu; 189 189 union kvm_ioapic_redirect_entry *entry = &ioapic->redirtbl[irq]; 190 190
+4 -3
arch/x86/kvm/irq_comm.c
··· 45 45 int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src, 46 46 struct kvm_lapic_irq *irq, struct dest_map *dest_map) 47 47 { 48 - int i, r = -1; 48 + int r = -1; 49 49 struct kvm_vcpu *vcpu, *lowest = NULL; 50 - unsigned long dest_vcpu_bitmap[BITS_TO_LONGS(KVM_MAX_VCPUS)]; 50 + unsigned long i, dest_vcpu_bitmap[BITS_TO_LONGS(KVM_MAX_VCPUS)]; 51 51 unsigned int dest_vcpus = 0; 52 52 53 53 if (kvm_irq_delivery_to_apic_fast(kvm, src, irq, &r, dest_map)) ··· 320 320 bool kvm_intr_is_single_vcpu(struct kvm *kvm, struct kvm_lapic_irq *irq, 321 321 struct kvm_vcpu **dest_vcpu) 322 322 { 323 - int i, r = 0; 323 + int r = 0; 324 + unsigned long i; 324 325 struct kvm_vcpu *vcpu; 325 326 326 327 if (kvm_intr_is_single_vcpu_fast(kvm, irq, dest_vcpu))
+2 -1
arch/x86/kvm/kvm_onhyperv.c
··· 33 33 { 34 34 struct kvm_arch *kvm_arch = &kvm->arch; 35 35 struct kvm_vcpu *vcpu; 36 - int ret = 0, i, nr_unique_valid_roots; 36 + int ret = 0, nr_unique_valid_roots; 37 + unsigned long i; 37 38 hpa_t root; 38 39 39 40 spin_lock(&kvm_arch->hv_root_tdp_lock);
+3 -3
arch/x86/kvm/lapic.c
··· 185 185 { 186 186 struct kvm_apic_map *new, *old = NULL; 187 187 struct kvm_vcpu *vcpu; 188 - int i; 188 + unsigned long i; 189 189 u32 max_id = 255; /* enough space for any xAPIC ID */ 190 190 191 191 /* Read kvm->arch.apic_map_dirty before kvm->arch.apic_map. */ ··· 1172 1172 struct kvm_lapic *src = NULL; 1173 1173 struct kvm_apic_map *map; 1174 1174 struct kvm_vcpu *vcpu; 1175 - unsigned long bitmap; 1176 - int i, vcpu_idx; 1175 + unsigned long bitmap, i; 1176 + int vcpu_idx; 1177 1177 bool ret; 1178 1178 1179 1179 rcu_read_lock();
+1 -1
arch/x86/kvm/svm/avic.c
··· 293 293 u32 icrl, u32 icrh) 294 294 { 295 295 struct kvm_vcpu *vcpu; 296 - int i; 296 + unsigned long i; 297 297 298 298 kvm_for_each_vcpu(i, vcpu, kvm) { 299 299 bool m = kvm_apic_match_dest(vcpu, source,
+5 -4
arch/x86/kvm/svm/sev.c
··· 636 636 static int sev_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp) 637 637 { 638 638 struct kvm_vcpu *vcpu; 639 - int i, ret; 639 + unsigned long i; 640 + int ret; 640 641 641 642 if (!sev_es_guest(kvm)) 642 643 return -ENOTTY; ··· 1594 1593 static int sev_lock_vcpus_for_migration(struct kvm *kvm) 1595 1594 { 1596 1595 struct kvm_vcpu *vcpu; 1597 - int i, j; 1596 + unsigned long i, j; 1598 1597 1599 1598 kvm_for_each_vcpu(i, vcpu, kvm) { 1600 1599 if (mutex_lock_killable(&vcpu->mutex)) ··· 1616 1615 static void sev_unlock_vcpus_for_migration(struct kvm *kvm) 1617 1616 { 1618 1617 struct kvm_vcpu *vcpu; 1619 - int i; 1618 + unsigned long i; 1620 1619 1621 1620 kvm_for_each_vcpu(i, vcpu, kvm) { 1622 1621 mutex_unlock(&vcpu->mutex); ··· 1643 1642 1644 1643 static int sev_es_migrate_from(struct kvm *dst, struct kvm *src) 1645 1644 { 1646 - int i; 1645 + unsigned long i; 1647 1646 struct kvm_vcpu *dst_vcpu, *src_vcpu; 1648 1647 struct vcpu_svm *dst_svm, *src_svm; 1649 1648
+12 -11
arch/x86/kvm/x86.c
··· 2816 2816 { 2817 2817 struct kvm_arch *ka = &kvm->arch; 2818 2818 struct kvm_vcpu *vcpu; 2819 - int i; 2819 + unsigned long i; 2820 2820 2821 2821 write_seqcount_end(&ka->pvclock_sc); 2822 2822 raw_spin_unlock_irq(&ka->tsc_write_lock); ··· 3065 3065 3066 3066 static void kvmclock_update_fn(struct work_struct *work) 3067 3067 { 3068 - int i; 3068 + unsigned long i; 3069 3069 struct delayed_work *dwork = to_delayed_work(work); 3070 3070 struct kvm_arch *ka = container_of(dwork, struct kvm_arch, 3071 3071 kvmclock_update_work); ··· 5692 5692 * VM-Exit. 5693 5693 */ 5694 5694 struct kvm_vcpu *vcpu; 5695 - int i; 5695 + unsigned long i; 5696 5696 5697 5697 kvm_for_each_vcpu(i, vcpu, kvm) 5698 5698 kvm_vcpu_kick(vcpu); ··· 5961 5961 static int kvm_arch_suspend_notifier(struct kvm *kvm) 5962 5962 { 5963 5963 struct kvm_vcpu *vcpu; 5964 - int i, ret = 0; 5964 + unsigned long i; 5965 + int ret = 0; 5965 5966 5966 5967 mutex_lock(&kvm->lock); 5967 5968 kvm_for_each_vcpu(i, vcpu, kvm) { ··· 8389 8388 { 8390 8389 struct kvm *kvm; 8391 8390 struct kvm_vcpu *vcpu; 8392 - int i, send_ipi = 0; 8391 + int send_ipi = 0; 8392 + unsigned long i; 8393 8393 8394 8394 /* 8395 8395 * We allow guests to temporarily run on slowing clocks, ··· 8563 8561 static void pvclock_gtod_update_fn(struct work_struct *work) 8564 8562 { 8565 8563 struct kvm *kvm; 8566 - 8567 8564 struct kvm_vcpu *vcpu; 8568 - int i; 8565 + unsigned long i; 8569 8566 8570 8567 mutex_lock(&kvm_lock); 8571 8568 list_for_each_entry(kvm, &vm_list, vm_list) ··· 10673 10672 { 10674 10673 bool inhibit = false; 10675 10674 struct kvm_vcpu *vcpu; 10676 - int i; 10675 + unsigned long i; 10677 10676 10678 10677 down_write(&kvm->arch.apicv_update_lock); 10679 10678 ··· 11161 11160 { 11162 11161 struct kvm *kvm; 11163 11162 struct kvm_vcpu *vcpu; 11164 - int i; 11163 + unsigned long i; 11165 11164 int ret; 11166 11165 u64 local_tsc; 11167 11166 u64 max_tsc = 0; ··· 11414 11413 11415 11414 static void kvm_free_vcpus(struct kvm *kvm) 11416 11415 { 11417 - unsigned int i; 11416 + unsigned long i; 11418 11417 struct kvm_vcpu *vcpu; 11419 11418 11420 11419 /* ··· 11660 11659 void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) 11661 11660 { 11662 11661 struct kvm_vcpu *vcpu; 11663 - int i; 11662 + unsigned long i; 11664 11663 11665 11664 /* 11666 11665 * memslots->generation has been incremented.
+1 -1
include/linux/kvm_host.h
··· 714 714 static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id) 715 715 { 716 716 struct kvm_vcpu *vcpu = NULL; 717 - int i; 717 + unsigned long i; 718 718 719 719 if (id < 0) 720 720 return NULL;
+7 -6
virt/kvm/kvm_main.c
··· 305 305 { 306 306 struct kvm_vcpu *vcpu; 307 307 struct cpumask *cpus; 308 + unsigned long i; 308 309 bool called; 309 - int i, me; 310 + int me; 310 311 311 312 me = get_cpu(); 312 313 ··· 454 453 455 454 void kvm_destroy_vcpus(struct kvm *kvm) 456 455 { 457 - unsigned int i; 456 + unsigned long i; 458 457 struct kvm_vcpu *vcpu; 459 458 460 459 kvm_for_each_vcpu(i, vcpu, kvm) { ··· 3390 3389 struct kvm *kvm = me->kvm; 3391 3390 struct kvm_vcpu *vcpu; 3392 3391 int last_boosted_vcpu = me->kvm->last_boosted_vcpu; 3392 + unsigned long i; 3393 3393 int yielded = 0; 3394 3394 int try = 3; 3395 3395 int pass; 3396 - int i; 3397 3396 3398 3397 kvm_vcpu_set_in_spin_loop(me, true); 3399 3398 /* ··· 4202 4201 4203 4202 static int kvm_vm_ioctl_reset_dirty_pages(struct kvm *kvm) 4204 4203 { 4205 - int i; 4204 + unsigned long i; 4206 4205 struct kvm_vcpu *vcpu; 4207 4206 int cleared = 0; 4208 4207 ··· 5121 5120 5122 5121 static int kvm_get_stat_per_vcpu(struct kvm *kvm, size_t offset, u64 *val) 5123 5122 { 5124 - int i; 5123 + unsigned long i; 5125 5124 struct kvm_vcpu *vcpu; 5126 5125 5127 5126 *val = 0; ··· 5134 5133 5135 5134 static int kvm_clear_stat_per_vcpu(struct kvm *kvm, size_t offset) 5136 5135 { 5137 - int i; 5136 + unsigned long i; 5138 5137 struct kvm_vcpu *vcpu; 5139 5138 5140 5139 kvm_for_each_vcpu(i, vcpu, kvm)