Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v4.14 334 lines 8.8 kB view raw
1/* 2 * Copyright (C) 2012 - ARM Ltd 3 * Author: Marc Zyngier <marc.zyngier@arm.com> 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License version 2 as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program. If not, see <http://www.gnu.org/licenses/>. 16 */ 17 18#include <linux/preempt.h> 19#include <linux/kvm_host.h> 20#include <linux/wait.h> 21 22#include <asm/cputype.h> 23#include <asm/kvm_emulate.h> 24#include <asm/kvm_psci.h> 25#include <asm/kvm_host.h> 26 27#include <uapi/linux/psci.h> 28 29/* 30 * This is an implementation of the Power State Coordination Interface 31 * as described in ARM document number ARM DEN 0022A. 32 */ 33 34#define AFFINITY_MASK(level) ~((0x1UL << ((level) * MPIDR_LEVEL_BITS)) - 1) 35 36static unsigned long psci_affinity_mask(unsigned long affinity_level) 37{ 38 if (affinity_level <= 3) 39 return MPIDR_HWID_BITMASK & AFFINITY_MASK(affinity_level); 40 41 return 0; 42} 43 44static unsigned long kvm_psci_vcpu_suspend(struct kvm_vcpu *vcpu) 45{ 46 /* 47 * NOTE: For simplicity, we make VCPU suspend emulation to be 48 * same-as WFI (Wait-for-interrupt) emulation. 49 * 50 * This means for KVM the wakeup events are interrupts and 51 * this is consistent with intended use of StateID as described 52 * in section 5.4.1 of PSCI v0.2 specification (ARM DEN 0022A). 53 * 54 * Further, we also treat power-down request to be same as 55 * stand-by request as-per section 5.4.2 clause 3 of PSCI v0.2 56 * specification (ARM DEN 0022A). This means all suspend states 57 * for KVM will preserve the register state. 58 */ 59 kvm_vcpu_block(vcpu); 60 kvm_clear_request(KVM_REQ_UNHALT, vcpu); 61 62 return PSCI_RET_SUCCESS; 63} 64 65static void kvm_psci_vcpu_off(struct kvm_vcpu *vcpu) 66{ 67 vcpu->arch.power_off = true; 68 kvm_make_request(KVM_REQ_SLEEP, vcpu); 69 kvm_vcpu_kick(vcpu); 70} 71 72static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu) 73{ 74 struct kvm *kvm = source_vcpu->kvm; 75 struct kvm_vcpu *vcpu = NULL; 76 struct swait_queue_head *wq; 77 unsigned long cpu_id; 78 unsigned long context_id; 79 phys_addr_t target_pc; 80 81 cpu_id = vcpu_get_reg(source_vcpu, 1) & MPIDR_HWID_BITMASK; 82 if (vcpu_mode_is_32bit(source_vcpu)) 83 cpu_id &= ~((u32) 0); 84 85 vcpu = kvm_mpidr_to_vcpu(kvm, cpu_id); 86 87 /* 88 * Make sure the caller requested a valid CPU and that the CPU is 89 * turned off. 90 */ 91 if (!vcpu) 92 return PSCI_RET_INVALID_PARAMS; 93 if (!vcpu->arch.power_off) { 94 if (kvm_psci_version(source_vcpu) != KVM_ARM_PSCI_0_1) 95 return PSCI_RET_ALREADY_ON; 96 else 97 return PSCI_RET_INVALID_PARAMS; 98 } 99 100 target_pc = vcpu_get_reg(source_vcpu, 2); 101 context_id = vcpu_get_reg(source_vcpu, 3); 102 103 kvm_reset_vcpu(vcpu); 104 105 /* Gracefully handle Thumb2 entry point */ 106 if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) { 107 target_pc &= ~((phys_addr_t) 1); 108 vcpu_set_thumb(vcpu); 109 } 110 111 /* Propagate caller endianness */ 112 if (kvm_vcpu_is_be(source_vcpu)) 113 kvm_vcpu_set_be(vcpu); 114 115 *vcpu_pc(vcpu) = target_pc; 116 /* 117 * NOTE: We always update r0 (or x0) because for PSCI v0.1 118 * the general puspose registers are undefined upon CPU_ON. 119 */ 120 vcpu_set_reg(vcpu, 0, context_id); 121 vcpu->arch.power_off = false; 122 smp_mb(); /* Make sure the above is visible */ 123 124 wq = kvm_arch_vcpu_wq(vcpu); 125 swake_up(wq); 126 127 return PSCI_RET_SUCCESS; 128} 129 130static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu) 131{ 132 int i, matching_cpus = 0; 133 unsigned long mpidr; 134 unsigned long target_affinity; 135 unsigned long target_affinity_mask; 136 unsigned long lowest_affinity_level; 137 struct kvm *kvm = vcpu->kvm; 138 struct kvm_vcpu *tmp; 139 140 target_affinity = vcpu_get_reg(vcpu, 1); 141 lowest_affinity_level = vcpu_get_reg(vcpu, 2); 142 143 /* Determine target affinity mask */ 144 target_affinity_mask = psci_affinity_mask(lowest_affinity_level); 145 if (!target_affinity_mask) 146 return PSCI_RET_INVALID_PARAMS; 147 148 /* Ignore other bits of target affinity */ 149 target_affinity &= target_affinity_mask; 150 151 /* 152 * If one or more VCPU matching target affinity are running 153 * then ON else OFF 154 */ 155 kvm_for_each_vcpu(i, tmp, kvm) { 156 mpidr = kvm_vcpu_get_mpidr_aff(tmp); 157 if ((mpidr & target_affinity_mask) == target_affinity) { 158 matching_cpus++; 159 if (!tmp->arch.power_off) 160 return PSCI_0_2_AFFINITY_LEVEL_ON; 161 } 162 } 163 164 if (!matching_cpus) 165 return PSCI_RET_INVALID_PARAMS; 166 167 return PSCI_0_2_AFFINITY_LEVEL_OFF; 168} 169 170static void kvm_prepare_system_event(struct kvm_vcpu *vcpu, u32 type) 171{ 172 int i; 173 struct kvm_vcpu *tmp; 174 175 /* 176 * The KVM ABI specifies that a system event exit may call KVM_RUN 177 * again and may perform shutdown/reboot at a later time that when the 178 * actual request is made. Since we are implementing PSCI and a 179 * caller of PSCI reboot and shutdown expects that the system shuts 180 * down or reboots immediately, let's make sure that VCPUs are not run 181 * after this call is handled and before the VCPUs have been 182 * re-initialized. 183 */ 184 kvm_for_each_vcpu(i, tmp, vcpu->kvm) 185 tmp->arch.power_off = true; 186 kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_SLEEP); 187 188 memset(&vcpu->run->system_event, 0, sizeof(vcpu->run->system_event)); 189 vcpu->run->system_event.type = type; 190 vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT; 191} 192 193static void kvm_psci_system_off(struct kvm_vcpu *vcpu) 194{ 195 kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_SHUTDOWN); 196} 197 198static void kvm_psci_system_reset(struct kvm_vcpu *vcpu) 199{ 200 kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_RESET); 201} 202 203int kvm_psci_version(struct kvm_vcpu *vcpu) 204{ 205 if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features)) 206 return KVM_ARM_PSCI_0_2; 207 208 return KVM_ARM_PSCI_0_1; 209} 210 211static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu) 212{ 213 struct kvm *kvm = vcpu->kvm; 214 unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0); 215 unsigned long val; 216 int ret = 1; 217 218 switch (psci_fn) { 219 case PSCI_0_2_FN_PSCI_VERSION: 220 /* 221 * Bits[31:16] = Major Version = 0 222 * Bits[15:0] = Minor Version = 2 223 */ 224 val = 2; 225 break; 226 case PSCI_0_2_FN_CPU_SUSPEND: 227 case PSCI_0_2_FN64_CPU_SUSPEND: 228 val = kvm_psci_vcpu_suspend(vcpu); 229 break; 230 case PSCI_0_2_FN_CPU_OFF: 231 kvm_psci_vcpu_off(vcpu); 232 val = PSCI_RET_SUCCESS; 233 break; 234 case PSCI_0_2_FN_CPU_ON: 235 case PSCI_0_2_FN64_CPU_ON: 236 mutex_lock(&kvm->lock); 237 val = kvm_psci_vcpu_on(vcpu); 238 mutex_unlock(&kvm->lock); 239 break; 240 case PSCI_0_2_FN_AFFINITY_INFO: 241 case PSCI_0_2_FN64_AFFINITY_INFO: 242 val = kvm_psci_vcpu_affinity_info(vcpu); 243 break; 244 case PSCI_0_2_FN_MIGRATE_INFO_TYPE: 245 /* 246 * Trusted OS is MP hence does not require migration 247 * or 248 * Trusted OS is not present 249 */ 250 val = PSCI_0_2_TOS_MP; 251 break; 252 case PSCI_0_2_FN_SYSTEM_OFF: 253 kvm_psci_system_off(vcpu); 254 /* 255 * We should'nt be going back to guest VCPU after 256 * receiving SYSTEM_OFF request. 257 * 258 * If user space accidently/deliberately resumes 259 * guest VCPU after SYSTEM_OFF request then guest 260 * VCPU should see internal failure from PSCI return 261 * value. To achieve this, we preload r0 (or x0) with 262 * PSCI return value INTERNAL_FAILURE. 263 */ 264 val = PSCI_RET_INTERNAL_FAILURE; 265 ret = 0; 266 break; 267 case PSCI_0_2_FN_SYSTEM_RESET: 268 kvm_psci_system_reset(vcpu); 269 /* 270 * Same reason as SYSTEM_OFF for preloading r0 (or x0) 271 * with PSCI return value INTERNAL_FAILURE. 272 */ 273 val = PSCI_RET_INTERNAL_FAILURE; 274 ret = 0; 275 break; 276 default: 277 val = PSCI_RET_NOT_SUPPORTED; 278 break; 279 } 280 281 vcpu_set_reg(vcpu, 0, val); 282 return ret; 283} 284 285static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu) 286{ 287 struct kvm *kvm = vcpu->kvm; 288 unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0); 289 unsigned long val; 290 291 switch (psci_fn) { 292 case KVM_PSCI_FN_CPU_OFF: 293 kvm_psci_vcpu_off(vcpu); 294 val = PSCI_RET_SUCCESS; 295 break; 296 case KVM_PSCI_FN_CPU_ON: 297 mutex_lock(&kvm->lock); 298 val = kvm_psci_vcpu_on(vcpu); 299 mutex_unlock(&kvm->lock); 300 break; 301 default: 302 val = PSCI_RET_NOT_SUPPORTED; 303 break; 304 } 305 306 vcpu_set_reg(vcpu, 0, val); 307 return 1; 308} 309 310/** 311 * kvm_psci_call - handle PSCI call if r0 value is in range 312 * @vcpu: Pointer to the VCPU struct 313 * 314 * Handle PSCI calls from guests through traps from HVC instructions. 315 * The calling convention is similar to SMC calls to the secure world 316 * where the function number is placed in r0. 317 * 318 * This function returns: > 0 (success), 0 (success but exit to user 319 * space), and < 0 (errors) 320 * 321 * Errors: 322 * -EINVAL: Unrecognized PSCI function 323 */ 324int kvm_psci_call(struct kvm_vcpu *vcpu) 325{ 326 switch (kvm_psci_version(vcpu)) { 327 case KVM_ARM_PSCI_0_2: 328 return kvm_psci_0_2_call(vcpu); 329 case KVM_ARM_PSCI_0_1: 330 return kvm_psci_0_1_call(vcpu); 331 default: 332 return -EINVAL; 333 }; 334}