Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Hosting Protected Virtual Machines
4 *
5 * Copyright IBM Corp. 2019, 2020
6 * Author(s): Janosch Frank <frankja@linux.ibm.com>
7 */
8#include <linux/kvm.h>
9#include <linux/kvm_host.h>
10#include <linux/minmax.h>
11#include <linux/pagemap.h>
12#include <linux/sched/signal.h>
13#include <asm/gmap.h>
14#include <asm/uv.h>
15#include <asm/mman.h>
16#include <linux/pagewalk.h>
17#include <linux/sched/mm.h>
18#include <linux/mmu_notifier.h>
19#include "kvm-s390.h"
20
21/**
22 * struct pv_vm_to_be_destroyed - Represents a protected VM that needs to
23 * be destroyed
24 *
25 * @list: list head for the list of leftover VMs
26 * @old_gmap_table: the gmap table of the leftover protected VM
27 * @handle: the handle of the leftover protected VM
28 * @stor_var: pointer to the variable storage of the leftover protected VM
29 * @stor_base: address of the base storage of the leftover protected VM
30 *
31 * Represents a protected VM that is still registered with the Ultravisor,
32 * but which does not correspond any longer to an active KVM VM. It should
33 * be destroyed at some point later, either asynchronously or when the
34 * process terminates.
35 */
36struct pv_vm_to_be_destroyed {
37 struct list_head list;
38 unsigned long old_gmap_table;
39 u64 handle;
40 void *stor_var;
41 unsigned long stor_base;
42};
43
44static void kvm_s390_clear_pv_state(struct kvm *kvm)
45{
46 kvm->arch.pv.handle = 0;
47 kvm->arch.pv.guest_len = 0;
48 kvm->arch.pv.stor_base = 0;
49 kvm->arch.pv.stor_var = NULL;
50}
51
52int kvm_s390_pv_destroy_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc)
53{
54 int cc;
55
56 if (!kvm_s390_pv_cpu_get_handle(vcpu))
57 return 0;
58
59 cc = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu), UVC_CMD_DESTROY_SEC_CPU, rc, rrc);
60
61 KVM_UV_EVENT(vcpu->kvm, 3, "PROTVIRT DESTROY VCPU %d: rc %x rrc %x",
62 vcpu->vcpu_id, *rc, *rrc);
63 WARN_ONCE(cc, "protvirt destroy cpu failed rc %x rrc %x", *rc, *rrc);
64
65 /* Intended memory leak for something that should never happen. */
66 if (!cc)
67 free_pages(vcpu->arch.pv.stor_base,
68 get_order(uv_info.guest_cpu_stor_len));
69
70 free_page((unsigned long)sida_addr(vcpu->arch.sie_block));
71 vcpu->arch.sie_block->pv_handle_cpu = 0;
72 vcpu->arch.sie_block->pv_handle_config = 0;
73 memset(&vcpu->arch.pv, 0, sizeof(vcpu->arch.pv));
74 vcpu->arch.sie_block->sdf = 0;
75 /*
76 * The sidad field (for sdf == 2) is now the gbea field (for sdf == 0).
77 * Use the reset value of gbea to avoid leaking the kernel pointer of
78 * the just freed sida.
79 */
80 vcpu->arch.sie_block->gbea = 1;
81 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
82
83 return cc ? EIO : 0;
84}
85
86int kvm_s390_pv_create_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc)
87{
88 struct uv_cb_csc uvcb = {
89 .header.cmd = UVC_CMD_CREATE_SEC_CPU,
90 .header.len = sizeof(uvcb),
91 };
92 void *sida_addr;
93 int cc;
94
95 if (kvm_s390_pv_cpu_get_handle(vcpu))
96 return -EINVAL;
97
98 vcpu->arch.pv.stor_base = __get_free_pages(GFP_KERNEL_ACCOUNT,
99 get_order(uv_info.guest_cpu_stor_len));
100 if (!vcpu->arch.pv.stor_base)
101 return -ENOMEM;
102
103 /* Input */
104 uvcb.guest_handle = kvm_s390_pv_get_handle(vcpu->kvm);
105 uvcb.num = vcpu->arch.sie_block->icpua;
106 uvcb.state_origin = virt_to_phys(vcpu->arch.sie_block);
107 uvcb.stor_origin = virt_to_phys((void *)vcpu->arch.pv.stor_base);
108
109 /* Alloc Secure Instruction Data Area Designation */
110 sida_addr = (void *)__get_free_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
111 if (!sida_addr) {
112 free_pages(vcpu->arch.pv.stor_base,
113 get_order(uv_info.guest_cpu_stor_len));
114 return -ENOMEM;
115 }
116 vcpu->arch.sie_block->sidad = virt_to_phys(sida_addr);
117
118 cc = uv_call(0, (u64)&uvcb);
119 *rc = uvcb.header.rc;
120 *rrc = uvcb.header.rrc;
121 KVM_UV_EVENT(vcpu->kvm, 3,
122 "PROTVIRT CREATE VCPU: cpu %d handle %llx rc %x rrc %x",
123 vcpu->vcpu_id, uvcb.cpu_handle, uvcb.header.rc,
124 uvcb.header.rrc);
125
126 if (cc) {
127 u16 dummy;
128
129 kvm_s390_pv_destroy_cpu(vcpu, &dummy, &dummy);
130 return -EIO;
131 }
132
133 /* Output */
134 vcpu->arch.pv.handle = uvcb.cpu_handle;
135 vcpu->arch.sie_block->pv_handle_cpu = uvcb.cpu_handle;
136 vcpu->arch.sie_block->pv_handle_config = kvm_s390_pv_get_handle(vcpu->kvm);
137 vcpu->arch.sie_block->sdf = 2;
138 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
139 return 0;
140}
141
142/* only free resources when the destroy was successful */
143static void kvm_s390_pv_dealloc_vm(struct kvm *kvm)
144{
145 vfree(kvm->arch.pv.stor_var);
146 free_pages(kvm->arch.pv.stor_base,
147 get_order(uv_info.guest_base_stor_len));
148 kvm_s390_clear_pv_state(kvm);
149}
150
151static int kvm_s390_pv_alloc_vm(struct kvm *kvm)
152{
153 unsigned long base = uv_info.guest_base_stor_len;
154 unsigned long virt = uv_info.guest_virt_var_stor_len;
155 unsigned long npages = 0, vlen = 0;
156
157 kvm->arch.pv.stor_var = NULL;
158 kvm->arch.pv.stor_base = __get_free_pages(GFP_KERNEL_ACCOUNT, get_order(base));
159 if (!kvm->arch.pv.stor_base)
160 return -ENOMEM;
161
162 /*
163 * Calculate current guest storage for allocation of the
164 * variable storage, which is based on the length in MB.
165 *
166 * Slots are sorted by GFN
167 */
168 mutex_lock(&kvm->slots_lock);
169 npages = kvm_s390_get_gfn_end(kvm_memslots(kvm));
170 mutex_unlock(&kvm->slots_lock);
171
172 kvm->arch.pv.guest_len = npages * PAGE_SIZE;
173
174 /* Allocate variable storage */
175 vlen = ALIGN(virt * ((npages * PAGE_SIZE) / HPAGE_SIZE), PAGE_SIZE);
176 vlen += uv_info.guest_virt_base_stor_len;
177 kvm->arch.pv.stor_var = vzalloc(vlen);
178 if (!kvm->arch.pv.stor_var)
179 goto out_err;
180 return 0;
181
182out_err:
183 kvm_s390_pv_dealloc_vm(kvm);
184 return -ENOMEM;
185}
186
187/**
188 * kvm_s390_pv_dispose_one_leftover - Clean up one leftover protected VM.
189 * @kvm: the KVM that was associated with this leftover protected VM
190 * @leftover: details about the leftover protected VM that needs a clean up
191 * @rc: the RC code of the Destroy Secure Configuration UVC
192 * @rrc: the RRC code of the Destroy Secure Configuration UVC
193 *
194 * Destroy one leftover protected VM.
195 * On success, kvm->mm->context.protected_count will be decremented atomically
196 * and all other resources used by the VM will be freed.
197 *
198 * Return: 0 in case of success, otherwise 1
199 */
200static int kvm_s390_pv_dispose_one_leftover(struct kvm *kvm,
201 struct pv_vm_to_be_destroyed *leftover,
202 u16 *rc, u16 *rrc)
203{
204 int cc;
205
206 /* It used the destroy-fast UVC, nothing left to do here */
207 if (!leftover->handle)
208 goto done_fast;
209 cc = uv_cmd_nodata(leftover->handle, UVC_CMD_DESTROY_SEC_CONF, rc, rrc);
210 KVM_UV_EVENT(kvm, 3, "PROTVIRT DESTROY LEFTOVER VM: rc %x rrc %x", *rc, *rrc);
211 WARN_ONCE(cc, "protvirt destroy leftover vm failed rc %x rrc %x", *rc, *rrc);
212 if (cc)
213 return cc;
214 /*
215 * Intentionally leak unusable memory. If the UVC fails, the memory
216 * used for the VM and its metadata is permanently unusable.
217 * This can only happen in case of a serious KVM or hardware bug; it
218 * is not expected to happen in normal operation.
219 */
220 free_pages(leftover->stor_base, get_order(uv_info.guest_base_stor_len));
221 free_pages(leftover->old_gmap_table, CRST_ALLOC_ORDER);
222 vfree(leftover->stor_var);
223done_fast:
224 atomic_dec(&kvm->mm->context.protected_count);
225 return 0;
226}
227
228/**
229 * kvm_s390_destroy_lower_2g - Destroy the first 2GB of protected guest memory.
230 * @kvm: the VM whose memory is to be cleared.
231 *
232 * Destroy the first 2GB of guest memory, to avoid prefix issues after reboot.
233 * The CPUs of the protected VM need to be destroyed beforehand.
234 */
235static void kvm_s390_destroy_lower_2g(struct kvm *kvm)
236{
237 const unsigned long pages_2g = SZ_2G / PAGE_SIZE;
238 struct kvm_memory_slot *slot;
239 unsigned long len;
240 int srcu_idx;
241
242 srcu_idx = srcu_read_lock(&kvm->srcu);
243
244 /* Take the memslot containing guest absolute address 0 */
245 slot = gfn_to_memslot(kvm, 0);
246 /* Clear all slots or parts thereof that are below 2GB */
247 while (slot && slot->base_gfn < pages_2g) {
248 len = min_t(u64, slot->npages, pages_2g - slot->base_gfn) * PAGE_SIZE;
249 s390_uv_destroy_range(kvm->mm, slot->userspace_addr, slot->userspace_addr + len);
250 /* Take the next memslot */
251 slot = gfn_to_memslot(kvm, slot->base_gfn + slot->npages);
252 }
253
254 srcu_read_unlock(&kvm->srcu, srcu_idx);
255}
256
257static int kvm_s390_pv_deinit_vm_fast(struct kvm *kvm, u16 *rc, u16 *rrc)
258{
259 struct uv_cb_destroy_fast uvcb = {
260 .header.cmd = UVC_CMD_DESTROY_SEC_CONF_FAST,
261 .header.len = sizeof(uvcb),
262 .handle = kvm_s390_pv_get_handle(kvm),
263 };
264 int cc;
265
266 cc = uv_call_sched(0, (u64)&uvcb);
267 if (rc)
268 *rc = uvcb.header.rc;
269 if (rrc)
270 *rrc = uvcb.header.rrc;
271 WRITE_ONCE(kvm->arch.gmap->guest_handle, 0);
272 KVM_UV_EVENT(kvm, 3, "PROTVIRT DESTROY VM FAST: rc %x rrc %x",
273 uvcb.header.rc, uvcb.header.rrc);
274 WARN_ONCE(cc, "protvirt destroy vm fast failed handle %llx rc %x rrc %x",
275 kvm_s390_pv_get_handle(kvm), uvcb.header.rc, uvcb.header.rrc);
276 /* Inteded memory leak on "impossible" error */
277 if (!cc)
278 kvm_s390_pv_dealloc_vm(kvm);
279 return cc ? -EIO : 0;
280}
281
282static inline bool is_destroy_fast_available(void)
283{
284 return test_bit_inv(BIT_UVC_CMD_DESTROY_SEC_CONF_FAST, uv_info.inst_calls_list);
285}
286
287/**
288 * kvm_s390_pv_set_aside - Set aside a protected VM for later teardown.
289 * @kvm: the VM
290 * @rc: return value for the RC field of the UVCB
291 * @rrc: return value for the RRC field of the UVCB
292 *
293 * Set aside the protected VM for a subsequent teardown. The VM will be able
294 * to continue immediately as a non-secure VM, and the information needed to
295 * properly tear down the protected VM is set aside. If another protected VM
296 * was already set aside without starting its teardown, this function will
297 * fail.
298 * The CPUs of the protected VM need to be destroyed beforehand.
299 *
300 * Context: kvm->lock needs to be held
301 *
302 * Return: 0 in case of success, -EINVAL if another protected VM was already set
303 * aside, -ENOMEM if the system ran out of memory.
304 */
305int kvm_s390_pv_set_aside(struct kvm *kvm, u16 *rc, u16 *rrc)
306{
307 struct pv_vm_to_be_destroyed *priv;
308 int res = 0;
309
310 lockdep_assert_held(&kvm->lock);
311 /*
312 * If another protected VM was already prepared for teardown, refuse.
313 * A normal deinitialization has to be performed instead.
314 */
315 if (kvm->arch.pv.set_aside)
316 return -EINVAL;
317
318 /* Guest with segment type ASCE, refuse to destroy asynchronously */
319 if ((kvm->arch.gmap->asce & _ASCE_TYPE_MASK) == _ASCE_TYPE_SEGMENT)
320 return -EINVAL;
321
322 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
323 if (!priv)
324 return -ENOMEM;
325
326 if (is_destroy_fast_available()) {
327 res = kvm_s390_pv_deinit_vm_fast(kvm, rc, rrc);
328 } else {
329 priv->stor_var = kvm->arch.pv.stor_var;
330 priv->stor_base = kvm->arch.pv.stor_base;
331 priv->handle = kvm_s390_pv_get_handle(kvm);
332 priv->old_gmap_table = (unsigned long)kvm->arch.gmap->table;
333 WRITE_ONCE(kvm->arch.gmap->guest_handle, 0);
334 if (s390_replace_asce(kvm->arch.gmap))
335 res = -ENOMEM;
336 }
337
338 if (res) {
339 kfree(priv);
340 return res;
341 }
342
343 kvm_s390_destroy_lower_2g(kvm);
344 kvm_s390_clear_pv_state(kvm);
345 kvm->arch.pv.set_aside = priv;
346
347 *rc = UVC_RC_EXECUTED;
348 *rrc = 42;
349 return 0;
350}
351
352/**
353 * kvm_s390_pv_deinit_vm - Deinitialize the current protected VM
354 * @kvm: the KVM whose protected VM needs to be deinitialized
355 * @rc: the RC code of the UVC
356 * @rrc: the RRC code of the UVC
357 *
358 * Deinitialize the current protected VM. This function will destroy and
359 * cleanup the current protected VM, but it will not cleanup the guest
360 * memory. This function should only be called when the protected VM has
361 * just been created and therefore does not have any guest memory, or when
362 * the caller cleans up the guest memory separately.
363 *
364 * This function should not fail, but if it does, the donated memory must
365 * not be freed.
366 *
367 * Context: kvm->lock needs to be held
368 *
369 * Return: 0 in case of success, otherwise -EIO
370 */
371int kvm_s390_pv_deinit_vm(struct kvm *kvm, u16 *rc, u16 *rrc)
372{
373 int cc;
374
375 cc = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
376 UVC_CMD_DESTROY_SEC_CONF, rc, rrc);
377 WRITE_ONCE(kvm->arch.gmap->guest_handle, 0);
378 if (!cc) {
379 atomic_dec(&kvm->mm->context.protected_count);
380 kvm_s390_pv_dealloc_vm(kvm);
381 } else {
382 /* Intended memory leak on "impossible" error */
383 s390_replace_asce(kvm->arch.gmap);
384 }
385 KVM_UV_EVENT(kvm, 3, "PROTVIRT DESTROY VM: rc %x rrc %x", *rc, *rrc);
386 WARN_ONCE(cc, "protvirt destroy vm failed rc %x rrc %x", *rc, *rrc);
387
388 return cc ? -EIO : 0;
389}
390
391/**
392 * kvm_s390_pv_deinit_cleanup_all - Clean up all protected VMs associated
393 * with a specific KVM.
394 * @kvm: the KVM to be cleaned up
395 * @rc: the RC code of the first failing UVC
396 * @rrc: the RRC code of the first failing UVC
397 *
398 * This function will clean up all protected VMs associated with a KVM.
399 * This includes the active one, the one prepared for deinitialization with
400 * kvm_s390_pv_set_aside, and any still pending in the need_cleanup list.
401 *
402 * Context: kvm->lock needs to be held unless being called from
403 * kvm_arch_destroy_vm.
404 *
405 * Return: 0 if all VMs are successfully cleaned up, otherwise -EIO
406 */
407int kvm_s390_pv_deinit_cleanup_all(struct kvm *kvm, u16 *rc, u16 *rrc)
408{
409 struct pv_vm_to_be_destroyed *cur;
410 bool need_zap = false;
411 u16 _rc, _rrc;
412 int cc = 0;
413
414 /* Make sure the counter does not reach 0 before calling s390_uv_destroy_range */
415 atomic_inc(&kvm->mm->context.protected_count);
416
417 *rc = 1;
418 /* If the current VM is protected, destroy it */
419 if (kvm_s390_pv_get_handle(kvm)) {
420 cc = kvm_s390_pv_deinit_vm(kvm, rc, rrc);
421 need_zap = true;
422 }
423
424 /* If a previous protected VM was set aside, put it in the need_cleanup list */
425 if (kvm->arch.pv.set_aside) {
426 list_add(kvm->arch.pv.set_aside, &kvm->arch.pv.need_cleanup);
427 kvm->arch.pv.set_aside = NULL;
428 }
429
430 /* Cleanup all protected VMs in the need_cleanup list */
431 while (!list_empty(&kvm->arch.pv.need_cleanup)) {
432 cur = list_first_entry(&kvm->arch.pv.need_cleanup, typeof(*cur), list);
433 need_zap = true;
434 if (kvm_s390_pv_dispose_one_leftover(kvm, cur, &_rc, &_rrc)) {
435 cc = 1;
436 /*
437 * Only return the first error rc and rrc, so make
438 * sure it is not overwritten. All destroys will
439 * additionally be reported via KVM_UV_EVENT().
440 */
441 if (*rc == UVC_RC_EXECUTED) {
442 *rc = _rc;
443 *rrc = _rrc;
444 }
445 }
446 list_del(&cur->list);
447 kfree(cur);
448 }
449
450 /*
451 * If the mm still has a mapping, try to mark all its pages as
452 * accessible. The counter should not reach zero before this
453 * cleanup has been performed.
454 */
455 if (need_zap && mmget_not_zero(kvm->mm)) {
456 s390_uv_destroy_range(kvm->mm, 0, TASK_SIZE);
457 mmput(kvm->mm);
458 }
459
460 /* Now the counter can safely reach 0 */
461 atomic_dec(&kvm->mm->context.protected_count);
462 return cc ? -EIO : 0;
463}
464
465/**
466 * kvm_s390_pv_deinit_aside_vm - Teardown a previously set aside protected VM.
467 * @kvm: the VM previously associated with the protected VM
468 * @rc: return value for the RC field of the UVCB
469 * @rrc: return value for the RRC field of the UVCB
470 *
471 * Tear down the protected VM that had been previously prepared for teardown
472 * using kvm_s390_pv_set_aside_vm. Ideally this should be called by
473 * userspace asynchronously from a separate thread.
474 *
475 * Context: kvm->lock must not be held.
476 *
477 * Return: 0 in case of success, -EINVAL if no protected VM had been
478 * prepared for asynchronous teardowm, -EIO in case of other errors.
479 */
480int kvm_s390_pv_deinit_aside_vm(struct kvm *kvm, u16 *rc, u16 *rrc)
481{
482 struct pv_vm_to_be_destroyed *p;
483 int ret = 0;
484
485 lockdep_assert_not_held(&kvm->lock);
486 mutex_lock(&kvm->lock);
487 p = kvm->arch.pv.set_aside;
488 kvm->arch.pv.set_aside = NULL;
489 mutex_unlock(&kvm->lock);
490 if (!p)
491 return -EINVAL;
492
493 /* When a fatal signal is received, stop immediately */
494 if (s390_uv_destroy_range_interruptible(kvm->mm, 0, TASK_SIZE_MAX))
495 goto done;
496 if (kvm_s390_pv_dispose_one_leftover(kvm, p, rc, rrc))
497 ret = -EIO;
498 kfree(p);
499 p = NULL;
500done:
501 /*
502 * p is not NULL if we aborted because of a fatal signal, in which
503 * case queue the leftover for later cleanup.
504 */
505 if (p) {
506 mutex_lock(&kvm->lock);
507 list_add(&p->list, &kvm->arch.pv.need_cleanup);
508 mutex_unlock(&kvm->lock);
509 /* Did not finish, but pretend things went well */
510 *rc = UVC_RC_EXECUTED;
511 *rrc = 42;
512 }
513 return ret;
514}
515
516static void kvm_s390_pv_mmu_notifier_release(struct mmu_notifier *subscription,
517 struct mm_struct *mm)
518{
519 struct kvm *kvm = container_of(subscription, struct kvm, arch.pv.mmu_notifier);
520 u16 dummy;
521 int r;
522
523 /*
524 * No locking is needed since this is the last thread of the last user of this
525 * struct mm.
526 * When the struct kvm gets deinitialized, this notifier is also
527 * unregistered. This means that if this notifier runs, then the
528 * struct kvm is still valid.
529 */
530 r = kvm_s390_cpus_from_pv(kvm, &dummy, &dummy);
531 if (!r && is_destroy_fast_available() && kvm_s390_pv_get_handle(kvm))
532 kvm_s390_pv_deinit_vm_fast(kvm, &dummy, &dummy);
533}
534
535static const struct mmu_notifier_ops kvm_s390_pv_mmu_notifier_ops = {
536 .release = kvm_s390_pv_mmu_notifier_release,
537};
538
539int kvm_s390_pv_init_vm(struct kvm *kvm, u16 *rc, u16 *rrc)
540{
541 struct uv_cb_cgc uvcb = {
542 .header.cmd = UVC_CMD_CREATE_SEC_CONF,
543 .header.len = sizeof(uvcb)
544 };
545 int cc, ret;
546 u16 dummy;
547
548 ret = kvm_s390_pv_alloc_vm(kvm);
549 if (ret)
550 return ret;
551
552 /* Inputs */
553 uvcb.guest_stor_origin = 0; /* MSO is 0 for KVM */
554 uvcb.guest_stor_len = kvm->arch.pv.guest_len;
555 uvcb.guest_asce = kvm->arch.gmap->asce;
556 uvcb.guest_sca = virt_to_phys(kvm->arch.sca);
557 uvcb.conf_base_stor_origin =
558 virt_to_phys((void *)kvm->arch.pv.stor_base);
559 uvcb.conf_virt_stor_origin = (u64)kvm->arch.pv.stor_var;
560
561 cc = uv_call_sched(0, (u64)&uvcb);
562 *rc = uvcb.header.rc;
563 *rrc = uvcb.header.rrc;
564 KVM_UV_EVENT(kvm, 3, "PROTVIRT CREATE VM: handle %llx len %llx rc %x rrc %x",
565 uvcb.guest_handle, uvcb.guest_stor_len, *rc, *rrc);
566
567 /* Outputs */
568 kvm->arch.pv.handle = uvcb.guest_handle;
569
570 atomic_inc(&kvm->mm->context.protected_count);
571 if (cc) {
572 if (uvcb.header.rc & UVC_RC_NEED_DESTROY) {
573 kvm_s390_pv_deinit_vm(kvm, &dummy, &dummy);
574 } else {
575 atomic_dec(&kvm->mm->context.protected_count);
576 kvm_s390_pv_dealloc_vm(kvm);
577 }
578 return -EIO;
579 }
580 kvm->arch.gmap->guest_handle = uvcb.guest_handle;
581 /* Add the notifier only once. No races because we hold kvm->lock */
582 if (kvm->arch.pv.mmu_notifier.ops != &kvm_s390_pv_mmu_notifier_ops) {
583 kvm->arch.pv.mmu_notifier.ops = &kvm_s390_pv_mmu_notifier_ops;
584 mmu_notifier_register(&kvm->arch.pv.mmu_notifier, kvm->mm);
585 }
586 return 0;
587}
588
589int kvm_s390_pv_set_sec_parms(struct kvm *kvm, void *hdr, u64 length, u16 *rc,
590 u16 *rrc)
591{
592 struct uv_cb_ssc uvcb = {
593 .header.cmd = UVC_CMD_SET_SEC_CONF_PARAMS,
594 .header.len = sizeof(uvcb),
595 .sec_header_origin = (u64)hdr,
596 .sec_header_len = length,
597 .guest_handle = kvm_s390_pv_get_handle(kvm),
598 };
599 int cc = uv_call(0, (u64)&uvcb);
600
601 *rc = uvcb.header.rc;
602 *rrc = uvcb.header.rrc;
603 KVM_UV_EVENT(kvm, 3, "PROTVIRT VM SET PARMS: rc %x rrc %x",
604 *rc, *rrc);
605 return cc ? -EINVAL : 0;
606}
607
608static int unpack_one(struct kvm *kvm, unsigned long addr, u64 tweak,
609 u64 offset, u16 *rc, u16 *rrc)
610{
611 struct uv_cb_unp uvcb = {
612 .header.cmd = UVC_CMD_UNPACK_IMG,
613 .header.len = sizeof(uvcb),
614 .guest_handle = kvm_s390_pv_get_handle(kvm),
615 .gaddr = addr,
616 .tweak[0] = tweak,
617 .tweak[1] = offset,
618 };
619 int ret = gmap_make_secure(kvm->arch.gmap, addr, &uvcb);
620
621 *rc = uvcb.header.rc;
622 *rrc = uvcb.header.rrc;
623
624 if (ret && ret != -EAGAIN)
625 KVM_UV_EVENT(kvm, 3, "PROTVIRT VM UNPACK: failed addr %llx with rc %x rrc %x",
626 uvcb.gaddr, *rc, *rrc);
627 return ret;
628}
629
630int kvm_s390_pv_unpack(struct kvm *kvm, unsigned long addr, unsigned long size,
631 unsigned long tweak, u16 *rc, u16 *rrc)
632{
633 u64 offset = 0;
634 int ret = 0;
635
636 if (addr & ~PAGE_MASK || !size || size & ~PAGE_MASK)
637 return -EINVAL;
638
639 KVM_UV_EVENT(kvm, 3, "PROTVIRT VM UNPACK: start addr %lx size %lx",
640 addr, size);
641
642 while (offset < size) {
643 ret = unpack_one(kvm, addr, tweak, offset, rc, rrc);
644 if (ret == -EAGAIN) {
645 cond_resched();
646 if (fatal_signal_pending(current))
647 break;
648 continue;
649 }
650 if (ret)
651 break;
652 addr += PAGE_SIZE;
653 offset += PAGE_SIZE;
654 }
655 if (!ret)
656 KVM_UV_EVENT(kvm, 3, "%s", "PROTVIRT VM UNPACK: successful");
657 return ret;
658}
659
660int kvm_s390_pv_set_cpu_state(struct kvm_vcpu *vcpu, u8 state)
661{
662 struct uv_cb_cpu_set_state uvcb = {
663 .header.cmd = UVC_CMD_CPU_SET_STATE,
664 .header.len = sizeof(uvcb),
665 .cpu_handle = kvm_s390_pv_cpu_get_handle(vcpu),
666 .state = state,
667 };
668 int cc;
669
670 cc = uv_call(0, (u64)&uvcb);
671 KVM_UV_EVENT(vcpu->kvm, 3, "PROTVIRT SET CPU %d STATE %d rc %x rrc %x",
672 vcpu->vcpu_id, state, uvcb.header.rc, uvcb.header.rrc);
673 if (cc)
674 return -EINVAL;
675 return 0;
676}
677
678int kvm_s390_pv_dump_cpu(struct kvm_vcpu *vcpu, void *buff, u16 *rc, u16 *rrc)
679{
680 struct uv_cb_dump_cpu uvcb = {
681 .header.cmd = UVC_CMD_DUMP_CPU,
682 .header.len = sizeof(uvcb),
683 .cpu_handle = vcpu->arch.pv.handle,
684 .dump_area_origin = (u64)buff,
685 };
686 int cc;
687
688 cc = uv_call_sched(0, (u64)&uvcb);
689 *rc = uvcb.header.rc;
690 *rrc = uvcb.header.rrc;
691 return cc;
692}
693
694/* Size of the cache for the storage state dump data. 1MB for now */
695#define DUMP_BUFF_LEN HPAGE_SIZE
696
697/**
698 * kvm_s390_pv_dump_stor_state
699 *
700 * @kvm: pointer to the guest's KVM struct
701 * @buff_user: Userspace pointer where we will write the results to
702 * @gaddr: Starting absolute guest address for which the storage state
703 * is requested.
704 * @buff_user_len: Length of the buff_user buffer
705 * @rc: Pointer to where the uvcb return code is stored
706 * @rrc: Pointer to where the uvcb return reason code is stored
707 *
708 * Stores buff_len bytes of tweak component values to buff_user
709 * starting with the 1MB block specified by the absolute guest address
710 * (gaddr). The gaddr pointer will be updated with the last address
711 * for which data was written when returning to userspace. buff_user
712 * might be written to even if an error rc is returned. For instance
713 * if we encounter a fault after writing the first page of data.
714 *
715 * Context: kvm->lock needs to be held
716 *
717 * Return:
718 * 0 on success
719 * -ENOMEM if allocating the cache fails
720 * -EINVAL if gaddr is not aligned to 1MB
721 * -EINVAL if buff_user_len is not aligned to uv_info.conf_dump_storage_state_len
722 * -EINVAL if the UV call fails, rc and rrc will be set in this case
723 * -EFAULT if copying the result to buff_user failed
724 */
725int kvm_s390_pv_dump_stor_state(struct kvm *kvm, void __user *buff_user,
726 u64 *gaddr, u64 buff_user_len, u16 *rc, u16 *rrc)
727{
728 struct uv_cb_dump_stor_state uvcb = {
729 .header.cmd = UVC_CMD_DUMP_CONF_STOR_STATE,
730 .header.len = sizeof(uvcb),
731 .config_handle = kvm->arch.pv.handle,
732 .gaddr = *gaddr,
733 .dump_area_origin = 0,
734 };
735 const u64 increment_len = uv_info.conf_dump_storage_state_len;
736 size_t buff_kvm_size;
737 size_t size_done = 0;
738 u8 *buff_kvm = NULL;
739 int cc, ret;
740
741 ret = -EINVAL;
742 /* UV call processes 1MB guest storage chunks at a time */
743 if (!IS_ALIGNED(*gaddr, HPAGE_SIZE))
744 goto out;
745
746 /*
747 * We provide the storage state for 1MB chunks of guest
748 * storage. The buffer will need to be aligned to
749 * conf_dump_storage_state_len so we don't end on a partial
750 * chunk.
751 */
752 if (!buff_user_len ||
753 !IS_ALIGNED(buff_user_len, increment_len))
754 goto out;
755
756 /*
757 * Allocate a buffer from which we will later copy to the user
758 * process. We don't want userspace to dictate our buffer size
759 * so we limit it to DUMP_BUFF_LEN.
760 */
761 ret = -ENOMEM;
762 buff_kvm_size = min_t(u64, buff_user_len, DUMP_BUFF_LEN);
763 buff_kvm = vzalloc(buff_kvm_size);
764 if (!buff_kvm)
765 goto out;
766
767 ret = 0;
768 uvcb.dump_area_origin = (u64)buff_kvm;
769 /* We will loop until the user buffer is filled or an error occurs */
770 do {
771 /* Get 1MB worth of guest storage state data */
772 cc = uv_call_sched(0, (u64)&uvcb);
773
774 /* All or nothing */
775 if (cc) {
776 ret = -EINVAL;
777 break;
778 }
779
780 size_done += increment_len;
781 uvcb.dump_area_origin += increment_len;
782 buff_user_len -= increment_len;
783 uvcb.gaddr += HPAGE_SIZE;
784
785 /* KVM Buffer full, time to copy to the process */
786 if (!buff_user_len || size_done == DUMP_BUFF_LEN) {
787 if (copy_to_user(buff_user, buff_kvm, size_done)) {
788 ret = -EFAULT;
789 break;
790 }
791
792 buff_user += size_done;
793 size_done = 0;
794 uvcb.dump_area_origin = (u64)buff_kvm;
795 }
796 } while (buff_user_len);
797
798 /* Report back where we ended dumping */
799 *gaddr = uvcb.gaddr;
800
801 /* Lets only log errors, we don't want to spam */
802out:
803 if (ret)
804 KVM_UV_EVENT(kvm, 3,
805 "PROTVIRT DUMP STORAGE STATE: addr %llx ret %d, uvcb rc %x rrc %x",
806 uvcb.gaddr, ret, uvcb.header.rc, uvcb.header.rrc);
807 *rc = uvcb.header.rc;
808 *rrc = uvcb.header.rrc;
809 vfree(buff_kvm);
810
811 return ret;
812}
813
814/**
815 * kvm_s390_pv_dump_complete
816 *
817 * @kvm: pointer to the guest's KVM struct
818 * @buff_user: Userspace pointer where we will write the results to
819 * @rc: Pointer to where the uvcb return code is stored
820 * @rrc: Pointer to where the uvcb return reason code is stored
821 *
822 * Completes the dumping operation and writes the completion data to
823 * user space.
824 *
825 * Context: kvm->lock needs to be held
826 *
827 * Return:
828 * 0 on success
829 * -ENOMEM if allocating the completion buffer fails
830 * -EINVAL if the UV call fails, rc and rrc will be set in this case
831 * -EFAULT if copying the result to buff_user failed
832 */
833int kvm_s390_pv_dump_complete(struct kvm *kvm, void __user *buff_user,
834 u16 *rc, u16 *rrc)
835{
836 struct uv_cb_dump_complete complete = {
837 .header.len = sizeof(complete),
838 .header.cmd = UVC_CMD_DUMP_COMPLETE,
839 .config_handle = kvm_s390_pv_get_handle(kvm),
840 };
841 u64 *compl_data;
842 int ret;
843
844 /* Allocate dump area */
845 compl_data = vzalloc(uv_info.conf_dump_finalize_len);
846 if (!compl_data)
847 return -ENOMEM;
848 complete.dump_area_origin = (u64)compl_data;
849
850 ret = uv_call_sched(0, (u64)&complete);
851 *rc = complete.header.rc;
852 *rrc = complete.header.rrc;
853 KVM_UV_EVENT(kvm, 3, "PROTVIRT DUMP COMPLETE: rc %x rrc %x",
854 complete.header.rc, complete.header.rrc);
855
856 if (!ret) {
857 /*
858 * kvm_s390_pv_dealloc_vm() will also (mem)set
859 * this to false on a reboot or other destroy
860 * operation for this vm.
861 */
862 kvm->arch.pv.dumping = false;
863 kvm_s390_vcpu_unblock_all(kvm);
864 ret = copy_to_user(buff_user, compl_data, uv_info.conf_dump_finalize_len);
865 if (ret)
866 ret = -EFAULT;
867 }
868 vfree(compl_data);
869 /* If the UVC returned an error, translate it to -EINVAL */
870 if (ret > 0)
871 ret = -EINVAL;
872 return ret;
873}