KVM: x86: Mark expected switch fall-throughs

In preparation to enabling -Wimplicit-fallthrough, mark switch
cases where we are expecting to fall through.

This patch fixes the following warnings:

arch/x86/kvm/lapic.c:1037:27: warning: this statement may fall through [-Wimplicit-fallthrough=]
arch/x86/kvm/lapic.c:1876:3: warning: this statement may fall through [-Wimplicit-fallthrough=]
arch/x86/kvm/hyperv.c:1637:6: warning: this statement may fall through [-Wimplicit-fallthrough=]
arch/x86/kvm/svm.c:4396:6: warning: this statement may fall through [-Wimplicit-fallthrough=]
arch/x86/kvm/mmu.c:4372:36: warning: this statement may fall through [-Wimplicit-fallthrough=]
arch/x86/kvm/x86.c:3835:6: warning: this statement may fall through [-Wimplicit-fallthrough=]
arch/x86/kvm/x86.c:7938:23: warning: this statement may fall through [-Wimplicit-fallthrough=]
arch/x86/kvm/vmx/vmx.c:2015:6: warning: this statement may fall through [-Wimplicit-fallthrough=]
arch/x86/kvm/vmx/vmx.c:1773:6: warning: this statement may fall through [-Wimplicit-fallthrough=]

Warning level 3 was used: -Wimplicit-fallthrough=3

This patch is part of the ongoing efforts to enabling -Wimplicit-fallthrough.

Signed-off-by: Gustavo A. R. Silva <gustavo@embeddedor.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>

authored by

Gustavo A. R. Silva and committed by
Paolo Bonzini
b2869f28 5cd5548f

+10 -4
+1 -1
arch/x86/kvm/hyperv.c
··· 1636 ret = kvm_hvcall_signal_event(vcpu, fast, ingpa); 1637 if (ret != HV_STATUS_INVALID_PORT_ID) 1638 break; 1639 - /* maybe userspace knows this conn_id: fall through */ 1640 case HVCALL_POST_MESSAGE: 1641 /* don't bother userspace if it has no way to handle it */ 1642 if (unlikely(rep || !vcpu_to_synic(vcpu)->active)) {
··· 1636 ret = kvm_hvcall_signal_event(vcpu, fast, ingpa); 1637 if (ret != HV_STATUS_INVALID_PORT_ID) 1638 break; 1639 + /* fall through - maybe userspace knows this conn_id. */ 1640 case HVCALL_POST_MESSAGE: 1641 /* don't bother userspace if it has no way to handle it */ 1642 if (unlikely(rep || !vcpu_to_synic(vcpu)->active)) {
+2
arch/x86/kvm/lapic.c
··· 1035 switch (delivery_mode) { 1036 case APIC_DM_LOWEST: 1037 vcpu->arch.apic_arb_prio++; 1038 case APIC_DM_FIXED: 1039 if (unlikely(trig_mode && !level)) 1040 break; ··· 1875 1876 case APIC_LVT0: 1877 apic_manage_nmi_watchdog(apic, val); 1878 case APIC_LVTTHMR: 1879 case APIC_LVTPC: 1880 case APIC_LVT1:
··· 1035 switch (delivery_mode) { 1036 case APIC_DM_LOWEST: 1037 vcpu->arch.apic_arb_prio++; 1038 + /* fall through */ 1039 case APIC_DM_FIXED: 1040 if (unlikely(trig_mode && !level)) 1041 break; ··· 1874 1875 case APIC_LVT0: 1876 apic_manage_nmi_watchdog(apic, val); 1877 + /* fall through */ 1878 case APIC_LVTTHMR: 1879 case APIC_LVTPC: 1880 case APIC_LVT1:
+1
arch/x86/kvm/mmu.c
··· 4371 rsvd_bits(maxphyaddr, 51); 4372 rsvd_check->rsvd_bits_mask[1][4] = 4373 rsvd_check->rsvd_bits_mask[0][4]; 4374 case PT64_ROOT_4LEVEL: 4375 rsvd_check->rsvd_bits_mask[0][3] = exb_bit_rsvd | 4376 nonleaf_bit8_rsvd | rsvd_bits(7, 7) |
··· 4371 rsvd_bits(maxphyaddr, 51); 4372 rsvd_check->rsvd_bits_mask[1][4] = 4373 rsvd_check->rsvd_bits_mask[0][4]; 4374 + /* fall through */ 4375 case PT64_ROOT_4LEVEL: 4376 rsvd_check->rsvd_bits_mask[0][3] = exb_bit_rsvd | 4377 nonleaf_bit8_rsvd | rsvd_bits(7, 7) |
+1 -1
arch/x86/kvm/svm.c
··· 4403 case MSR_IA32_APICBASE: 4404 if (kvm_vcpu_apicv_active(vcpu)) 4405 avic_update_vapic_bar(to_svm(vcpu), data); 4406 - /* Follow through */ 4407 default: 4408 return kvm_set_msr_common(vcpu, msr); 4409 }
··· 4403 case MSR_IA32_APICBASE: 4404 if (kvm_vcpu_apicv_active(vcpu)) 4405 avic_update_vapic_bar(to_svm(vcpu), data); 4406 + /* Fall through */ 4407 default: 4408 return kvm_set_msr_common(vcpu, msr); 4409 }
+2 -2
arch/x86/kvm/vmx/vmx.c
··· 1773 if (!msr_info->host_initiated && 1774 !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP)) 1775 return 1; 1776 - /* Otherwise falls through */ 1777 default: 1778 msr = find_msr_entry(vmx, msr_info->index); 1779 if (msr) { ··· 2014 /* Check reserved bit, higher 32 bits should be zero */ 2015 if ((data >> 32) != 0) 2016 return 1; 2017 - /* Otherwise falls through */ 2018 default: 2019 msr = find_msr_entry(vmx, msr_index); 2020 if (msr) {
··· 1773 if (!msr_info->host_initiated && 1774 !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP)) 1775 return 1; 1776 + /* Else, falls through */ 1777 default: 1778 msr = find_msr_entry(vmx, msr_info->index); 1779 if (msr) { ··· 2014 /* Check reserved bit, higher 32 bits should be zero */ 2015 if ((data >> 32) != 0) 2016 return 1; 2017 + /* Else, falls through */ 2018 default: 2019 msr = find_msr_entry(vmx, msr_index); 2020 if (msr) {
+3
arch/x86/kvm/x86.c
··· 3834 case KVM_CAP_HYPERV_SYNIC2: 3835 if (cap->args[0]) 3836 return -EINVAL; 3837 case KVM_CAP_HYPERV_SYNIC: 3838 if (!irqchip_in_kernel(vcpu->kvm)) 3839 return -EINVAL; ··· 7938 vcpu->arch.pv.pv_unhalted = false; 7939 vcpu->arch.mp_state = 7940 KVM_MP_STATE_RUNNABLE; 7941 case KVM_MP_STATE_RUNNABLE: 7942 vcpu->arch.apf.halted = false; 7943 break;
··· 3834 case KVM_CAP_HYPERV_SYNIC2: 3835 if (cap->args[0]) 3836 return -EINVAL; 3837 + /* fall through */ 3838 + 3839 case KVM_CAP_HYPERV_SYNIC: 3840 if (!irqchip_in_kernel(vcpu->kvm)) 3841 return -EINVAL; ··· 7936 vcpu->arch.pv.pv_unhalted = false; 7937 vcpu->arch.mp_state = 7938 KVM_MP_STATE_RUNNABLE; 7939 + /* fall through */ 7940 case KVM_MP_STATE_RUNNABLE: 7941 vcpu->arch.apf.halted = false; 7942 break;