1 // SPDX-License-Identifier: GPL-2.0
3 * hosting IBM Z kernel virtual machines (s390x)
5 * Copyright IBM Corp. 2008, 2018
7 * Author(s): Carsten Otte <cotte@de.ibm.com>
8 * Christian Borntraeger <borntraeger@de.ibm.com>
9 * Heiko Carstens <heiko.carstens@de.ibm.com>
10 * Christian Ehrhardt <ehrhardt@de.ibm.com>
11 * Jason J. Herne <jjherne@us.ibm.com>
14 #define KMSG_COMPONENT "kvm-s390"
15 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
17 #include <linux/compiler.h>
18 #include <linux/err.h>
20 #include <linux/hrtimer.h>
21 #include <linux/init.h>
22 #include <linux/kvm.h>
23 #include <linux/kvm_host.h>
24 #include <linux/mman.h>
25 #include <linux/module.h>
26 #include <linux/moduleparam.h>
27 #include <linux/random.h>
28 #include <linux/slab.h>
29 #include <linux/timer.h>
30 #include <linux/vmalloc.h>
31 #include <linux/bitmap.h>
32 #include <linux/sched/signal.h>
33 #include <linux/string.h>
35 #include <asm/asm-offsets.h>
36 #include <asm/lowcore.h>
38 #include <asm/pgtable.h>
41 #include <asm/switch_to.h>
44 #include <asm/cpacf.h>
45 #include <asm/timex.h>
50 #define CREATE_TRACE_POINTS
52 #include "trace-s390.h"
54 #define MEM_OP_MAX_SIZE 65536 /* Maximum transfer size for KVM_S390_MEM_OP */
56 #define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
57 (KVM_MAX_VCPUS + LOCAL_IRQS))
59 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
60 #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
62 struct kvm_stats_debugfs_item debugfs_entries[] = {
63 { "userspace_handled", VCPU_STAT(exit_userspace) },
64 { "exit_null", VCPU_STAT(exit_null) },
65 { "exit_validity", VCPU_STAT(exit_validity) },
66 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
67 { "exit_external_request", VCPU_STAT(exit_external_request) },
68 { "exit_io_request", VCPU_STAT(exit_io_request) },
69 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
70 { "exit_instruction", VCPU_STAT(exit_instruction) },
71 { "exit_pei", VCPU_STAT(exit_pei) },
72 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
73 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
74 { "exit_operation_exception", VCPU_STAT(exit_operation_exception) },
75 { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
76 { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
77 { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
78 { "halt_no_poll_steal", VCPU_STAT(halt_no_poll_steal) },
79 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
80 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
81 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
82 { "instruction_stctl", VCPU_STAT(instruction_stctl) },
83 { "instruction_stctg", VCPU_STAT(instruction_stctg) },
84 { "deliver_ckc", VCPU_STAT(deliver_ckc) },
85 { "deliver_cputm", VCPU_STAT(deliver_cputm) },
86 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
87 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
88 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
89 { "deliver_virtio", VCPU_STAT(deliver_virtio) },
90 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
91 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
92 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
93 { "deliver_program", VCPU_STAT(deliver_program) },
94 { "deliver_io", VCPU_STAT(deliver_io) },
95 { "deliver_machine_check", VCPU_STAT(deliver_machine_check) },
96 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
97 { "inject_ckc", VCPU_STAT(inject_ckc) },
98 { "inject_cputm", VCPU_STAT(inject_cputm) },
99 { "inject_external_call", VCPU_STAT(inject_external_call) },
100 { "inject_float_mchk", VM_STAT(inject_float_mchk) },
101 { "inject_emergency_signal", VCPU_STAT(inject_emergency_signal) },
102 { "inject_io", VM_STAT(inject_io) },
103 { "inject_mchk", VCPU_STAT(inject_mchk) },
104 { "inject_pfault_done", VM_STAT(inject_pfault_done) },
105 { "inject_program", VCPU_STAT(inject_program) },
106 { "inject_restart", VCPU_STAT(inject_restart) },
107 { "inject_service_signal", VM_STAT(inject_service_signal) },
108 { "inject_set_prefix", VCPU_STAT(inject_set_prefix) },
109 { "inject_stop_signal", VCPU_STAT(inject_stop_signal) },
110 { "inject_pfault_init", VCPU_STAT(inject_pfault_init) },
111 { "inject_virtio", VM_STAT(inject_virtio) },
112 { "instruction_epsw", VCPU_STAT(instruction_epsw) },
113 { "instruction_gs", VCPU_STAT(instruction_gs) },
114 { "instruction_io_other", VCPU_STAT(instruction_io_other) },
115 { "instruction_lpsw", VCPU_STAT(instruction_lpsw) },
116 { "instruction_lpswe", VCPU_STAT(instruction_lpswe) },
117 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
118 { "instruction_ptff", VCPU_STAT(instruction_ptff) },
119 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
120 { "instruction_sck", VCPU_STAT(instruction_sck) },
121 { "instruction_sckpf", VCPU_STAT(instruction_sckpf) },
122 { "instruction_spx", VCPU_STAT(instruction_spx) },
123 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
124 { "instruction_stap", VCPU_STAT(instruction_stap) },
125 { "instruction_iske", VCPU_STAT(instruction_iske) },
126 { "instruction_ri", VCPU_STAT(instruction_ri) },
127 { "instruction_rrbe", VCPU_STAT(instruction_rrbe) },
128 { "instruction_sske", VCPU_STAT(instruction_sske) },
129 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
130 { "instruction_essa", VCPU_STAT(instruction_essa) },
131 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
132 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
133 { "instruction_tb", VCPU_STAT(instruction_tb) },
134 { "instruction_tpi", VCPU_STAT(instruction_tpi) },
135 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
136 { "instruction_tsch", VCPU_STAT(instruction_tsch) },
137 { "instruction_sthyi", VCPU_STAT(instruction_sthyi) },
138 { "instruction_sie", VCPU_STAT(instruction_sie) },
139 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
140 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
141 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
142 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
143 { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
144 { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
145 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
146 { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
147 { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
148 { "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) },
149 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
150 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
151 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
152 { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
153 { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
154 { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
155 { "instruction_diag_10", VCPU_STAT(diagnose_10) },
156 { "instruction_diag_44", VCPU_STAT(diagnose_44) },
157 { "instruction_diag_9c", VCPU_STAT(diagnose_9c) },
158 { "instruction_diag_258", VCPU_STAT(diagnose_258) },
159 { "instruction_diag_308", VCPU_STAT(diagnose_308) },
160 { "instruction_diag_500", VCPU_STAT(diagnose_500) },
161 { "instruction_diag_other", VCPU_STAT(diagnose_other) },
165 struct kvm_s390_tod_clock_ext {
171 /* allow nested virtualization in KVM (if enabled by user space) */
173 module_param(nested, int, S_IRUGO);
174 MODULE_PARM_DESC(nested, "Nested virtualization support");
176 /* allow 1m huge page guest backing, if !nested */
178 module_param(hpage, int, 0444);
179 MODULE_PARM_DESC(hpage, "1m huge page backing support");
181 /* maximum percentage of steal time for polling. >100 is treated like 100 */
182 static u8 halt_poll_max_steal = 10;
183 module_param(halt_poll_max_steal, byte, 0644);
184 MODULE_PARM_DESC(halt_poll_max_steal, "Maximum percentage of steal time to allow polling");
187 * For now we handle at most 16 double words as this is what the s390 base
188 * kernel handles and stores in the prefix page. If we ever need to go beyond
189 * this, this requires changes to code, but the external uapi can stay.
191 #define SIZE_INTERNAL 16
194 * Base feature mask that defines default mask for facilities. Consists of the
195 * defines in FACILITIES_KVM and the non-hypervisor managed bits.
197 static unsigned long kvm_s390_fac_base[SIZE_INTERNAL] = { FACILITIES_KVM };
199 * Extended feature mask. Consists of the defines in FACILITIES_KVM_CPUMODEL
200 * and defines the facilities that can be enabled via a cpu model.
202 static unsigned long kvm_s390_fac_ext[SIZE_INTERNAL] = { FACILITIES_KVM_CPUMODEL };
204 static unsigned long kvm_s390_fac_size(void)
206 BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_MASK_SIZE_U64);
207 BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_LIST_SIZE_U64);
208 BUILD_BUG_ON(SIZE_INTERNAL * sizeof(unsigned long) >
209 sizeof(S390_lowcore.stfle_fac_list));
211 return SIZE_INTERNAL;
214 /* available cpu features supported by kvm */
215 static DECLARE_BITMAP(kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
216 /* available subfunctions indicated via query / "test bit" */
217 static struct kvm_s390_vm_cpu_subfunc kvm_s390_available_subfunc;
219 static struct gmap_notifier gmap_notifier;
220 static struct gmap_notifier vsie_gmap_notifier;
221 debug_info_t *kvm_s390_dbf;
223 /* Section: not file related */
224 int kvm_arch_hardware_enable(void)
226 /* every s390 is virtualization enabled ;-) */
230 static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
233 static void kvm_clock_sync_scb(struct kvm_s390_sie_block *scb, u64 delta)
238 * The TOD jumps by delta, we have to compensate this by adding
239 * -delta to the epoch.
243 /* sign-extension - we're adding to signed values below */
248 if (scb->ecd & ECD_MEF) {
249 scb->epdx += delta_idx;
250 if (scb->epoch < delta)
256 * This callback is executed during stop_machine(). All CPUs are therefore
257 * temporarily stopped. In order not to change guest behavior, we have to
258 * disable preemption whenever we touch the epoch of kvm and the VCPUs,
259 * so a CPU won't be stopped while calculating with the epoch.
261 static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
265 struct kvm_vcpu *vcpu;
267 unsigned long long *delta = v;
269 list_for_each_entry(kvm, &vm_list, vm_list) {
270 kvm_for_each_vcpu(i, vcpu, kvm) {
271 kvm_clock_sync_scb(vcpu->arch.sie_block, *delta);
273 kvm->arch.epoch = vcpu->arch.sie_block->epoch;
274 kvm->arch.epdx = vcpu->arch.sie_block->epdx;
276 if (vcpu->arch.cputm_enabled)
277 vcpu->arch.cputm_start += *delta;
278 if (vcpu->arch.vsie_block)
279 kvm_clock_sync_scb(vcpu->arch.vsie_block,
286 static struct notifier_block kvm_clock_notifier = {
287 .notifier_call = kvm_clock_sync,
290 int kvm_arch_hardware_setup(void)
292 gmap_notifier.notifier_call = kvm_gmap_notifier;
293 gmap_register_pte_notifier(&gmap_notifier);
294 vsie_gmap_notifier.notifier_call = kvm_s390_vsie_gmap_notifier;
295 gmap_register_pte_notifier(&vsie_gmap_notifier);
296 atomic_notifier_chain_register(&s390_epoch_delta_notifier,
297 &kvm_clock_notifier);
301 void kvm_arch_hardware_unsetup(void)
303 gmap_unregister_pte_notifier(&gmap_notifier);
304 gmap_unregister_pte_notifier(&vsie_gmap_notifier);
305 atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
306 &kvm_clock_notifier);
309 static void allow_cpu_feat(unsigned long nr)
311 set_bit_inv(nr, kvm_s390_available_cpu_feat);
314 static inline int plo_test_bit(unsigned char nr)
316 register unsigned long r0 asm("0") = (unsigned long) nr | 0x100;
320 /* Parameter registers are ignored for "test bit" */
330 static inline void __insn32_query(unsigned int opcode, u8 query[32])
332 register unsigned long r0 asm("0") = 0; /* query function */
333 register unsigned long r1 asm("1") = (unsigned long) query;
336 /* Parameter regs are ignored */
337 " .insn rrf,%[opc] << 16,2,4,6,0\n"
339 : "d" (r0), "a" (r1), [opc] "i" (opcode)
343 #define INSN_SORTL 0xb938
344 #define INSN_DFLTCC 0xb939
346 static void kvm_s390_cpu_feat_init(void)
350 for (i = 0; i < 256; ++i) {
352 kvm_s390_available_subfunc.plo[i >> 3] |= 0x80 >> (i & 7);
355 if (test_facility(28)) /* TOD-clock steering */
356 ptff(kvm_s390_available_subfunc.ptff,
357 sizeof(kvm_s390_available_subfunc.ptff),
360 if (test_facility(17)) { /* MSA */
361 __cpacf_query(CPACF_KMAC, (cpacf_mask_t *)
362 kvm_s390_available_subfunc.kmac);
363 __cpacf_query(CPACF_KMC, (cpacf_mask_t *)
364 kvm_s390_available_subfunc.kmc);
365 __cpacf_query(CPACF_KM, (cpacf_mask_t *)
366 kvm_s390_available_subfunc.km);
367 __cpacf_query(CPACF_KIMD, (cpacf_mask_t *)
368 kvm_s390_available_subfunc.kimd);
369 __cpacf_query(CPACF_KLMD, (cpacf_mask_t *)
370 kvm_s390_available_subfunc.klmd);
372 if (test_facility(76)) /* MSA3 */
373 __cpacf_query(CPACF_PCKMO, (cpacf_mask_t *)
374 kvm_s390_available_subfunc.pckmo);
375 if (test_facility(77)) { /* MSA4 */
376 __cpacf_query(CPACF_KMCTR, (cpacf_mask_t *)
377 kvm_s390_available_subfunc.kmctr);
378 __cpacf_query(CPACF_KMF, (cpacf_mask_t *)
379 kvm_s390_available_subfunc.kmf);
380 __cpacf_query(CPACF_KMO, (cpacf_mask_t *)
381 kvm_s390_available_subfunc.kmo);
382 __cpacf_query(CPACF_PCC, (cpacf_mask_t *)
383 kvm_s390_available_subfunc.pcc);
385 if (test_facility(57)) /* MSA5 */
386 __cpacf_query(CPACF_PRNO, (cpacf_mask_t *)
387 kvm_s390_available_subfunc.ppno);
389 if (test_facility(146)) /* MSA8 */
390 __cpacf_query(CPACF_KMA, (cpacf_mask_t *)
391 kvm_s390_available_subfunc.kma);
393 if (test_facility(155)) /* MSA9 */
394 __cpacf_query(CPACF_KDSA, (cpacf_mask_t *)
395 kvm_s390_available_subfunc.kdsa);
397 if (test_facility(150)) /* SORTL */
398 __insn32_query(INSN_SORTL, kvm_s390_available_subfunc.sortl);
400 if (test_facility(151)) /* DFLTCC */
401 __insn32_query(INSN_DFLTCC, kvm_s390_available_subfunc.dfltcc);
403 if (MACHINE_HAS_ESOP)
404 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP);
406 * We need SIE support, ESOP (PROT_READ protection for gmap_shadow),
407 * 64bit SCAO (SCA passthrough) and IDTE (for gmap_shadow unshadowing).
409 if (!sclp.has_sief2 || !MACHINE_HAS_ESOP || !sclp.has_64bscao ||
410 !test_facility(3) || !nested)
412 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIEF2);
413 if (sclp.has_64bscao)
414 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_64BSCAO);
416 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIIF);
418 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GPERE);
420 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GSLS);
422 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IB);
424 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_CEI);
426 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IBS);
428 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_KSS);
430 * KVM_S390_VM_CPU_FEAT_SKEY: Wrong shadow of PTE.I bits will make
431 * all skey handling functions read/set the skey from the PGSTE
432 * instead of the real storage key.
434 * KVM_S390_VM_CPU_FEAT_CMMA: Wrong shadow of PTE.I bits will make
435 * pages being detected as preserved although they are resident.
437 * KVM_S390_VM_CPU_FEAT_PFMFI: Wrong shadow of PTE.I bits will
438 * have the same effect as for KVM_S390_VM_CPU_FEAT_SKEY.
440 * For KVM_S390_VM_CPU_FEAT_SKEY, KVM_S390_VM_CPU_FEAT_CMMA and
441 * KVM_S390_VM_CPU_FEAT_PFMFI, all PTE.I and PGSTE bits have to be
442 * correctly shadowed. We can do that for the PGSTE but not for PTE.I.
444 * KVM_S390_VM_CPU_FEAT_SIGPIF: Wrong SCB addresses in the SCA. We
445 * cannot easily shadow the SCA because of the ipte lock.
449 int kvm_arch_init(void *opaque)
453 kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
457 if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view)) {
459 goto out_debug_unreg;
462 kvm_s390_cpu_feat_init();
464 /* Register floating interrupt controller interface. */
465 rc = kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
467 pr_err("A FLIC registration call failed with rc=%d\n", rc);
468 goto out_debug_unreg;
471 rc = kvm_s390_gib_init(GAL_ISC);
473 goto out_gib_destroy;
478 kvm_s390_gib_destroy();
480 debug_unregister(kvm_s390_dbf);
484 void kvm_arch_exit(void)
486 kvm_s390_gib_destroy();
487 debug_unregister(kvm_s390_dbf);
490 /* Section: device related */
491 long kvm_arch_dev_ioctl(struct file *filp,
492 unsigned int ioctl, unsigned long arg)
494 if (ioctl == KVM_S390_ENABLE_SIE)
495 return s390_enable_sie();
499 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
504 case KVM_CAP_S390_PSW:
505 case KVM_CAP_S390_GMAP:
506 case KVM_CAP_SYNC_MMU:
507 #ifdef CONFIG_KVM_S390_UCONTROL
508 case KVM_CAP_S390_UCONTROL:
510 case KVM_CAP_ASYNC_PF:
511 case KVM_CAP_SYNC_REGS:
512 case KVM_CAP_ONE_REG:
513 case KVM_CAP_ENABLE_CAP:
514 case KVM_CAP_S390_CSS_SUPPORT:
515 case KVM_CAP_IOEVENTFD:
516 case KVM_CAP_DEVICE_CTRL:
517 case KVM_CAP_S390_IRQCHIP:
518 case KVM_CAP_VM_ATTRIBUTES:
519 case KVM_CAP_MP_STATE:
520 case KVM_CAP_IMMEDIATE_EXIT:
521 case KVM_CAP_S390_INJECT_IRQ:
522 case KVM_CAP_S390_USER_SIGP:
523 case KVM_CAP_S390_USER_STSI:
524 case KVM_CAP_S390_SKEYS:
525 case KVM_CAP_S390_IRQ_STATE:
526 case KVM_CAP_S390_USER_INSTR0:
527 case KVM_CAP_S390_CMMA_MIGRATION:
528 case KVM_CAP_S390_AIS:
529 case KVM_CAP_S390_AIS_MIGRATION:
532 case KVM_CAP_S390_HPAGE_1M:
534 if (hpage && !kvm_is_ucontrol(kvm))
537 case KVM_CAP_S390_MEM_OP:
540 case KVM_CAP_NR_VCPUS:
541 case KVM_CAP_MAX_VCPUS:
542 case KVM_CAP_MAX_VCPU_ID:
543 r = KVM_S390_BSCA_CPU_SLOTS;
544 if (!kvm_s390_use_sca_entries())
546 else if (sclp.has_esca && sclp.has_64bscao)
547 r = KVM_S390_ESCA_CPU_SLOTS;
549 case KVM_CAP_S390_COW:
550 r = MACHINE_HAS_ESOP;
552 case KVM_CAP_S390_VECTOR_REGISTERS:
555 case KVM_CAP_S390_RI:
556 r = test_facility(64);
558 case KVM_CAP_S390_GS:
559 r = test_facility(133);
561 case KVM_CAP_S390_BPB:
562 r = test_facility(82);
570 static void kvm_s390_sync_dirty_log(struct kvm *kvm,
571 struct kvm_memory_slot *memslot)
574 gfn_t cur_gfn, last_gfn;
575 unsigned long gaddr, vmaddr;
576 struct gmap *gmap = kvm->arch.gmap;
577 DECLARE_BITMAP(bitmap, _PAGE_ENTRIES);
579 /* Loop over all guest segments */
580 cur_gfn = memslot->base_gfn;
581 last_gfn = memslot->base_gfn + memslot->npages;
582 for (; cur_gfn <= last_gfn; cur_gfn += _PAGE_ENTRIES) {
583 gaddr = gfn_to_gpa(cur_gfn);
584 vmaddr = gfn_to_hva_memslot(memslot, cur_gfn);
585 if (kvm_is_error_hva(vmaddr))
588 bitmap_zero(bitmap, _PAGE_ENTRIES);
589 gmap_sync_dirty_log_pmd(gmap, bitmap, gaddr, vmaddr);
590 for (i = 0; i < _PAGE_ENTRIES; i++) {
591 if (test_bit(i, bitmap))
592 mark_page_dirty(kvm, cur_gfn + i);
595 if (fatal_signal_pending(current))
601 /* Section: vm related */
602 static void sca_del_vcpu(struct kvm_vcpu *vcpu);
605 * Get (and clear) the dirty memory log for a memory slot.
607 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
608 struct kvm_dirty_log *log)
612 struct kvm_memslots *slots;
613 struct kvm_memory_slot *memslot;
616 if (kvm_is_ucontrol(kvm))
619 mutex_lock(&kvm->slots_lock);
622 if (log->slot >= KVM_USER_MEM_SLOTS)
625 slots = kvm_memslots(kvm);
626 memslot = id_to_memslot(slots, log->slot);
628 if (!memslot->dirty_bitmap)
631 kvm_s390_sync_dirty_log(kvm, memslot);
632 r = kvm_get_dirty_log(kvm, log, &is_dirty);
636 /* Clear the dirty log */
638 n = kvm_dirty_bitmap_bytes(memslot);
639 memset(memslot->dirty_bitmap, 0, n);
643 mutex_unlock(&kvm->slots_lock);
647 static void icpt_operexc_on_all_vcpus(struct kvm *kvm)
650 struct kvm_vcpu *vcpu;
652 kvm_for_each_vcpu(i, vcpu, kvm) {
653 kvm_s390_sync_request(KVM_REQ_ICPT_OPEREXC, vcpu);
657 int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
665 case KVM_CAP_S390_IRQCHIP:
666 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
667 kvm->arch.use_irqchip = 1;
670 case KVM_CAP_S390_USER_SIGP:
671 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
672 kvm->arch.user_sigp = 1;
675 case KVM_CAP_S390_VECTOR_REGISTERS:
676 mutex_lock(&kvm->lock);
677 if (kvm->created_vcpus) {
679 } else if (MACHINE_HAS_VX) {
680 set_kvm_facility(kvm->arch.model.fac_mask, 129);
681 set_kvm_facility(kvm->arch.model.fac_list, 129);
682 if (test_facility(134)) {
683 set_kvm_facility(kvm->arch.model.fac_mask, 134);
684 set_kvm_facility(kvm->arch.model.fac_list, 134);
686 if (test_facility(135)) {
687 set_kvm_facility(kvm->arch.model.fac_mask, 135);
688 set_kvm_facility(kvm->arch.model.fac_list, 135);
690 if (test_facility(148)) {
691 set_kvm_facility(kvm->arch.model.fac_mask, 148);
692 set_kvm_facility(kvm->arch.model.fac_list, 148);
694 if (test_facility(152)) {
695 set_kvm_facility(kvm->arch.model.fac_mask, 152);
696 set_kvm_facility(kvm->arch.model.fac_list, 152);
701 mutex_unlock(&kvm->lock);
702 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
703 r ? "(not available)" : "(success)");
705 case KVM_CAP_S390_RI:
707 mutex_lock(&kvm->lock);
708 if (kvm->created_vcpus) {
710 } else if (test_facility(64)) {
711 set_kvm_facility(kvm->arch.model.fac_mask, 64);
712 set_kvm_facility(kvm->arch.model.fac_list, 64);
715 mutex_unlock(&kvm->lock);
716 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s",
717 r ? "(not available)" : "(success)");
719 case KVM_CAP_S390_AIS:
720 mutex_lock(&kvm->lock);
721 if (kvm->created_vcpus) {
724 set_kvm_facility(kvm->arch.model.fac_mask, 72);
725 set_kvm_facility(kvm->arch.model.fac_list, 72);
728 mutex_unlock(&kvm->lock);
729 VM_EVENT(kvm, 3, "ENABLE: AIS %s",
730 r ? "(not available)" : "(success)");
732 case KVM_CAP_S390_GS:
734 mutex_lock(&kvm->lock);
735 if (kvm->created_vcpus) {
737 } else if (test_facility(133)) {
738 set_kvm_facility(kvm->arch.model.fac_mask, 133);
739 set_kvm_facility(kvm->arch.model.fac_list, 133);
742 mutex_unlock(&kvm->lock);
743 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_GS %s",
744 r ? "(not available)" : "(success)");
746 case KVM_CAP_S390_HPAGE_1M:
747 mutex_lock(&kvm->lock);
748 if (kvm->created_vcpus)
750 else if (!hpage || kvm->arch.use_cmma || kvm_is_ucontrol(kvm))
754 down_write(&kvm->mm->mmap_sem);
755 kvm->mm->context.allow_gmap_hpage_1m = 1;
756 up_write(&kvm->mm->mmap_sem);
758 * We might have to create fake 4k page
759 * tables. To avoid that the hardware works on
760 * stale PGSTEs, we emulate these instructions.
762 kvm->arch.use_skf = 0;
763 kvm->arch.use_pfmfi = 0;
765 mutex_unlock(&kvm->lock);
766 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_HPAGE %s",
767 r ? "(not available)" : "(success)");
769 case KVM_CAP_S390_USER_STSI:
770 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
771 kvm->arch.user_stsi = 1;
774 case KVM_CAP_S390_USER_INSTR0:
775 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_INSTR0");
776 kvm->arch.user_instr0 = 1;
777 icpt_operexc_on_all_vcpus(kvm);
787 static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
791 switch (attr->attr) {
792 case KVM_S390_VM_MEM_LIMIT_SIZE:
794 VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
795 kvm->arch.mem_limit);
796 if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr))
806 static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
810 switch (attr->attr) {
811 case KVM_S390_VM_MEM_ENABLE_CMMA:
816 VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
817 mutex_lock(&kvm->lock);
818 if (kvm->created_vcpus)
820 else if (kvm->mm->context.allow_gmap_hpage_1m)
823 kvm->arch.use_cmma = 1;
824 /* Not compatible with cmma. */
825 kvm->arch.use_pfmfi = 0;
828 mutex_unlock(&kvm->lock);
830 case KVM_S390_VM_MEM_CLR_CMMA:
835 if (!kvm->arch.use_cmma)
838 VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
839 mutex_lock(&kvm->lock);
840 idx = srcu_read_lock(&kvm->srcu);
841 s390_reset_cmma(kvm->arch.gmap->mm);
842 srcu_read_unlock(&kvm->srcu, idx);
843 mutex_unlock(&kvm->lock);
846 case KVM_S390_VM_MEM_LIMIT_SIZE: {
847 unsigned long new_limit;
849 if (kvm_is_ucontrol(kvm))
852 if (get_user(new_limit, (u64 __user *)attr->addr))
855 if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT &&
856 new_limit > kvm->arch.mem_limit)
862 /* gmap_create takes last usable address */
863 if (new_limit != KVM_S390_NO_MEM_LIMIT)
867 mutex_lock(&kvm->lock);
868 if (!kvm->created_vcpus) {
869 /* gmap_create will round the limit up */
870 struct gmap *new = gmap_create(current->mm, new_limit);
875 gmap_remove(kvm->arch.gmap);
877 kvm->arch.gmap = new;
881 mutex_unlock(&kvm->lock);
882 VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
883 VM_EVENT(kvm, 3, "New guest asce: 0x%pK",
884 (void *) kvm->arch.gmap->asce);
894 static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
896 void kvm_s390_vcpu_crypto_reset_all(struct kvm *kvm)
898 struct kvm_vcpu *vcpu;
901 kvm_s390_vcpu_block_all(kvm);
903 kvm_for_each_vcpu(i, vcpu, kvm) {
904 kvm_s390_vcpu_crypto_setup(vcpu);
905 /* recreate the shadow crycb by leaving the VSIE handler */
906 kvm_s390_sync_request(KVM_REQ_VSIE_RESTART, vcpu);
909 kvm_s390_vcpu_unblock_all(kvm);
912 static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
914 mutex_lock(&kvm->lock);
915 switch (attr->attr) {
916 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
917 if (!test_kvm_facility(kvm, 76)) {
918 mutex_unlock(&kvm->lock);
922 kvm->arch.crypto.crycb->aes_wrapping_key_mask,
923 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
924 kvm->arch.crypto.aes_kw = 1;
925 VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
927 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
928 if (!test_kvm_facility(kvm, 76)) {
929 mutex_unlock(&kvm->lock);
933 kvm->arch.crypto.crycb->dea_wrapping_key_mask,
934 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
935 kvm->arch.crypto.dea_kw = 1;
936 VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
938 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
939 if (!test_kvm_facility(kvm, 76)) {
940 mutex_unlock(&kvm->lock);
943 kvm->arch.crypto.aes_kw = 0;
944 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
945 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
946 VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
948 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
949 if (!test_kvm_facility(kvm, 76)) {
950 mutex_unlock(&kvm->lock);
953 kvm->arch.crypto.dea_kw = 0;
954 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
955 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
956 VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
958 case KVM_S390_VM_CRYPTO_ENABLE_APIE:
959 if (!ap_instructions_available()) {
960 mutex_unlock(&kvm->lock);
963 kvm->arch.crypto.apie = 1;
965 case KVM_S390_VM_CRYPTO_DISABLE_APIE:
966 if (!ap_instructions_available()) {
967 mutex_unlock(&kvm->lock);
970 kvm->arch.crypto.apie = 0;
973 mutex_unlock(&kvm->lock);
977 kvm_s390_vcpu_crypto_reset_all(kvm);
978 mutex_unlock(&kvm->lock);
982 static void kvm_s390_sync_request_broadcast(struct kvm *kvm, int req)
985 struct kvm_vcpu *vcpu;
987 kvm_for_each_vcpu(cx, vcpu, kvm)
988 kvm_s390_sync_request(req, vcpu);
992 * Must be called with kvm->srcu held to avoid races on memslots, and with
993 * kvm->slots_lock to avoid races with ourselves and kvm_s390_vm_stop_migration.
995 static int kvm_s390_vm_start_migration(struct kvm *kvm)
997 struct kvm_memory_slot *ms;
998 struct kvm_memslots *slots;
999 unsigned long ram_pages = 0;
1002 /* migration mode already enabled */
1003 if (kvm->arch.migration_mode)
1005 slots = kvm_memslots(kvm);
1006 if (!slots || !slots->used_slots)
1009 if (!kvm->arch.use_cmma) {
1010 kvm->arch.migration_mode = 1;
1013 /* mark all the pages in active slots as dirty */
1014 for (slotnr = 0; slotnr < slots->used_slots; slotnr++) {
1015 ms = slots->memslots + slotnr;
1017 * The second half of the bitmap is only used on x86,
1018 * and would be wasted otherwise, so we put it to good
1019 * use here to keep track of the state of the storage
1022 memset(kvm_second_dirty_bitmap(ms), 0xff, kvm_dirty_bitmap_bytes(ms));
1023 ram_pages += ms->npages;
1025 atomic64_set(&kvm->arch.cmma_dirty_pages, ram_pages);
1026 kvm->arch.migration_mode = 1;
1027 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_START_MIGRATION);
1032 * Must be called with kvm->slots_lock to avoid races with ourselves and
1033 * kvm_s390_vm_start_migration.
1035 static int kvm_s390_vm_stop_migration(struct kvm *kvm)
1037 /* migration mode already disabled */
1038 if (!kvm->arch.migration_mode)
1040 kvm->arch.migration_mode = 0;
1041 if (kvm->arch.use_cmma)
1042 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_STOP_MIGRATION);
1046 static int kvm_s390_vm_set_migration(struct kvm *kvm,
1047 struct kvm_device_attr *attr)
1051 mutex_lock(&kvm->slots_lock);
1052 switch (attr->attr) {
1053 case KVM_S390_VM_MIGRATION_START:
1054 res = kvm_s390_vm_start_migration(kvm);
1056 case KVM_S390_VM_MIGRATION_STOP:
1057 res = kvm_s390_vm_stop_migration(kvm);
1062 mutex_unlock(&kvm->slots_lock);
1067 static int kvm_s390_vm_get_migration(struct kvm *kvm,
1068 struct kvm_device_attr *attr)
1070 u64 mig = kvm->arch.migration_mode;
1072 if (attr->attr != KVM_S390_VM_MIGRATION_STATUS)
1075 if (copy_to_user((void __user *)attr->addr, &mig, sizeof(mig)))
1080 static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
1082 struct kvm_s390_vm_tod_clock gtod;
1084 if (copy_from_user(>od, (void __user *)attr->addr, sizeof(gtod)))
1087 if (!test_kvm_facility(kvm, 139) && gtod.epoch_idx)
1089 kvm_s390_set_tod_clock(kvm, >od);
1091 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx",
1092 gtod.epoch_idx, gtod.tod);
1097 static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
1101 if (copy_from_user(>od_high, (void __user *)attr->addr,
1107 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high);
1112 static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
1114 struct kvm_s390_vm_tod_clock gtod = { 0 };
1116 if (copy_from_user(>od.tod, (void __user *)attr->addr,
1120 kvm_s390_set_tod_clock(kvm, >od);
1121 VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod.tod);
1125 static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
1132 switch (attr->attr) {
1133 case KVM_S390_VM_TOD_EXT:
1134 ret = kvm_s390_set_tod_ext(kvm, attr);
1136 case KVM_S390_VM_TOD_HIGH:
1137 ret = kvm_s390_set_tod_high(kvm, attr);
1139 case KVM_S390_VM_TOD_LOW:
1140 ret = kvm_s390_set_tod_low(kvm, attr);
1149 static void kvm_s390_get_tod_clock(struct kvm *kvm,
1150 struct kvm_s390_vm_tod_clock *gtod)
1152 struct kvm_s390_tod_clock_ext htod;
1156 get_tod_clock_ext((char *)&htod);
1158 gtod->tod = htod.tod + kvm->arch.epoch;
1159 gtod->epoch_idx = 0;
1160 if (test_kvm_facility(kvm, 139)) {
1161 gtod->epoch_idx = htod.epoch_idx + kvm->arch.epdx;
1162 if (gtod->tod < htod.tod)
1163 gtod->epoch_idx += 1;
1169 static int kvm_s390_get_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
1171 struct kvm_s390_vm_tod_clock gtod;
1173 memset(>od, 0, sizeof(gtod));
1174 kvm_s390_get_tod_clock(kvm, >od);
1175 if (copy_to_user((void __user *)attr->addr, >od, sizeof(gtod)))
1178 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x, TOD base: 0x%llx",
1179 gtod.epoch_idx, gtod.tod);
1183 static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
1187 if (copy_to_user((void __user *)attr->addr, >od_high,
1190 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high);
1195 static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
1199 gtod = kvm_s390_get_tod_clock_fast(kvm);
1200 if (copy_to_user((void __user *)attr->addr, >od, sizeof(gtod)))
1202 VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod);
1207 static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
1214 switch (attr->attr) {
1215 case KVM_S390_VM_TOD_EXT:
1216 ret = kvm_s390_get_tod_ext(kvm, attr);
1218 case KVM_S390_VM_TOD_HIGH:
1219 ret = kvm_s390_get_tod_high(kvm, attr);
1221 case KVM_S390_VM_TOD_LOW:
1222 ret = kvm_s390_get_tod_low(kvm, attr);
1231 static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1233 struct kvm_s390_vm_cpu_processor *proc;
1234 u16 lowest_ibc, unblocked_ibc;
1237 mutex_lock(&kvm->lock);
1238 if (kvm->created_vcpus) {
1242 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
1247 if (!copy_from_user(proc, (void __user *)attr->addr,
1249 kvm->arch.model.cpuid = proc->cpuid;
1250 lowest_ibc = sclp.ibc >> 16 & 0xfff;
1251 unblocked_ibc = sclp.ibc & 0xfff;
1252 if (lowest_ibc && proc->ibc) {
1253 if (proc->ibc > unblocked_ibc)
1254 kvm->arch.model.ibc = unblocked_ibc;
1255 else if (proc->ibc < lowest_ibc)
1256 kvm->arch.model.ibc = lowest_ibc;
1258 kvm->arch.model.ibc = proc->ibc;
1260 memcpy(kvm->arch.model.fac_list, proc->fac_list,
1261 S390_ARCH_FAC_LIST_SIZE_BYTE);
1262 VM_EVENT(kvm, 3, "SET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1263 kvm->arch.model.ibc,
1264 kvm->arch.model.cpuid);
1265 VM_EVENT(kvm, 3, "SET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1266 kvm->arch.model.fac_list[0],
1267 kvm->arch.model.fac_list[1],
1268 kvm->arch.model.fac_list[2]);
1273 mutex_unlock(&kvm->lock);
1277 static int kvm_s390_set_processor_feat(struct kvm *kvm,
1278 struct kvm_device_attr *attr)
1280 struct kvm_s390_vm_cpu_feat data;
1282 if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data)))
1284 if (!bitmap_subset((unsigned long *) data.feat,
1285 kvm_s390_available_cpu_feat,
1286 KVM_S390_VM_CPU_FEAT_NR_BITS))
1289 mutex_lock(&kvm->lock);
1290 if (kvm->created_vcpus) {
1291 mutex_unlock(&kvm->lock);
1294 bitmap_copy(kvm->arch.cpu_feat, (unsigned long *) data.feat,
1295 KVM_S390_VM_CPU_FEAT_NR_BITS);
1296 mutex_unlock(&kvm->lock);
1297 VM_EVENT(kvm, 3, "SET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1304 static int kvm_s390_set_processor_subfunc(struct kvm *kvm,
1305 struct kvm_device_attr *attr)
1307 mutex_lock(&kvm->lock);
1308 if (kvm->created_vcpus) {
1309 mutex_unlock(&kvm->lock);
1313 if (copy_from_user(&kvm->arch.model.subfuncs, (void __user *)attr->addr,
1314 sizeof(struct kvm_s390_vm_cpu_subfunc))) {
1315 mutex_unlock(&kvm->lock);
1318 mutex_unlock(&kvm->lock);
1320 VM_EVENT(kvm, 3, "SET: guest PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1321 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
1322 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
1323 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
1324 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
1325 VM_EVENT(kvm, 3, "SET: guest PTFF subfunc 0x%16.16lx.%16.16lx",
1326 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
1327 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
1328 VM_EVENT(kvm, 3, "SET: guest KMAC subfunc 0x%16.16lx.%16.16lx",
1329 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
1330 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
1331 VM_EVENT(kvm, 3, "SET: guest KMC subfunc 0x%16.16lx.%16.16lx",
1332 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
1333 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
1334 VM_EVENT(kvm, 3, "SET: guest KM subfunc 0x%16.16lx.%16.16lx",
1335 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
1336 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
1337 VM_EVENT(kvm, 3, "SET: guest KIMD subfunc 0x%16.16lx.%16.16lx",
1338 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
1339 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
1340 VM_EVENT(kvm, 3, "SET: guest KLMD subfunc 0x%16.16lx.%16.16lx",
1341 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
1342 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
1343 VM_EVENT(kvm, 3, "SET: guest PCKMO subfunc 0x%16.16lx.%16.16lx",
1344 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
1345 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
1346 VM_EVENT(kvm, 3, "SET: guest KMCTR subfunc 0x%16.16lx.%16.16lx",
1347 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
1348 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
1349 VM_EVENT(kvm, 3, "SET: guest KMF subfunc 0x%16.16lx.%16.16lx",
1350 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
1351 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
1352 VM_EVENT(kvm, 3, "SET: guest KMO subfunc 0x%16.16lx.%16.16lx",
1353 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
1354 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
1355 VM_EVENT(kvm, 3, "SET: guest PCC subfunc 0x%16.16lx.%16.16lx",
1356 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
1357 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
1358 VM_EVENT(kvm, 3, "SET: guest PPNO subfunc 0x%16.16lx.%16.16lx",
1359 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
1360 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
1361 VM_EVENT(kvm, 3, "SET: guest KMA subfunc 0x%16.16lx.%16.16lx",
1362 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
1363 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
1364 VM_EVENT(kvm, 3, "SET: guest KDSA subfunc 0x%16.16lx.%16.16lx",
1365 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
1366 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
1367 VM_EVENT(kvm, 3, "SET: guest SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1368 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
1369 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
1370 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
1371 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
1372 VM_EVENT(kvm, 3, "SET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1373 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
1374 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
1375 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
1376 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
1381 static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1385 switch (attr->attr) {
1386 case KVM_S390_VM_CPU_PROCESSOR:
1387 ret = kvm_s390_set_processor(kvm, attr);
1389 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1390 ret = kvm_s390_set_processor_feat(kvm, attr);
1392 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1393 ret = kvm_s390_set_processor_subfunc(kvm, attr);
1399 static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1401 struct kvm_s390_vm_cpu_processor *proc;
1404 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
1409 proc->cpuid = kvm->arch.model.cpuid;
1410 proc->ibc = kvm->arch.model.ibc;
1411 memcpy(&proc->fac_list, kvm->arch.model.fac_list,
1412 S390_ARCH_FAC_LIST_SIZE_BYTE);
1413 VM_EVENT(kvm, 3, "GET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1414 kvm->arch.model.ibc,
1415 kvm->arch.model.cpuid);
1416 VM_EVENT(kvm, 3, "GET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1417 kvm->arch.model.fac_list[0],
1418 kvm->arch.model.fac_list[1],
1419 kvm->arch.model.fac_list[2]);
1420 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
1427 static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
1429 struct kvm_s390_vm_cpu_machine *mach;
1432 mach = kzalloc(sizeof(*mach), GFP_KERNEL);
1437 get_cpu_id((struct cpuid *) &mach->cpuid);
1438 mach->ibc = sclp.ibc;
1439 memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
1440 S390_ARCH_FAC_LIST_SIZE_BYTE);
1441 memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
1442 sizeof(S390_lowcore.stfle_fac_list));
1443 VM_EVENT(kvm, 3, "GET: host ibc: 0x%4.4x, host cpuid: 0x%16.16llx",
1444 kvm->arch.model.ibc,
1445 kvm->arch.model.cpuid);
1446 VM_EVENT(kvm, 3, "GET: host facmask: 0x%16.16llx.%16.16llx.%16.16llx",
1450 VM_EVENT(kvm, 3, "GET: host faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1454 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
1461 static int kvm_s390_get_processor_feat(struct kvm *kvm,
1462 struct kvm_device_attr *attr)
1464 struct kvm_s390_vm_cpu_feat data;
1466 bitmap_copy((unsigned long *) data.feat, kvm->arch.cpu_feat,
1467 KVM_S390_VM_CPU_FEAT_NR_BITS);
1468 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1470 VM_EVENT(kvm, 3, "GET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1477 static int kvm_s390_get_machine_feat(struct kvm *kvm,
1478 struct kvm_device_attr *attr)
1480 struct kvm_s390_vm_cpu_feat data;
1482 bitmap_copy((unsigned long *) data.feat,
1483 kvm_s390_available_cpu_feat,
1484 KVM_S390_VM_CPU_FEAT_NR_BITS);
1485 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1487 VM_EVENT(kvm, 3, "GET: host feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1494 static int kvm_s390_get_processor_subfunc(struct kvm *kvm,
1495 struct kvm_device_attr *attr)
1497 if (copy_to_user((void __user *)attr->addr, &kvm->arch.model.subfuncs,
1498 sizeof(struct kvm_s390_vm_cpu_subfunc)))
1501 VM_EVENT(kvm, 3, "GET: guest PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1502 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
1503 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
1504 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
1505 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
1506 VM_EVENT(kvm, 3, "GET: guest PTFF subfunc 0x%16.16lx.%16.16lx",
1507 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
1508 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
1509 VM_EVENT(kvm, 3, "GET: guest KMAC subfunc 0x%16.16lx.%16.16lx",
1510 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
1511 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
1512 VM_EVENT(kvm, 3, "GET: guest KMC subfunc 0x%16.16lx.%16.16lx",
1513 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
1514 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
1515 VM_EVENT(kvm, 3, "GET: guest KM subfunc 0x%16.16lx.%16.16lx",
1516 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
1517 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
1518 VM_EVENT(kvm, 3, "GET: guest KIMD subfunc 0x%16.16lx.%16.16lx",
1519 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
1520 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
1521 VM_EVENT(kvm, 3, "GET: guest KLMD subfunc 0x%16.16lx.%16.16lx",
1522 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
1523 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
1524 VM_EVENT(kvm, 3, "GET: guest PCKMO subfunc 0x%16.16lx.%16.16lx",
1525 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
1526 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
1527 VM_EVENT(kvm, 3, "GET: guest KMCTR subfunc 0x%16.16lx.%16.16lx",
1528 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
1529 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
1530 VM_EVENT(kvm, 3, "GET: guest KMF subfunc 0x%16.16lx.%16.16lx",
1531 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
1532 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
1533 VM_EVENT(kvm, 3, "GET: guest KMO subfunc 0x%16.16lx.%16.16lx",
1534 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
1535 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
1536 VM_EVENT(kvm, 3, "GET: guest PCC subfunc 0x%16.16lx.%16.16lx",
1537 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
1538 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
1539 VM_EVENT(kvm, 3, "GET: guest PPNO subfunc 0x%16.16lx.%16.16lx",
1540 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
1541 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
1542 VM_EVENT(kvm, 3, "GET: guest KMA subfunc 0x%16.16lx.%16.16lx",
1543 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
1544 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
1545 VM_EVENT(kvm, 3, "GET: guest KDSA subfunc 0x%16.16lx.%16.16lx",
1546 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
1547 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
1548 VM_EVENT(kvm, 3, "GET: guest SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1549 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
1550 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
1551 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
1552 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
1553 VM_EVENT(kvm, 3, "GET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1554 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
1555 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
1556 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
1557 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
1562 static int kvm_s390_get_machine_subfunc(struct kvm *kvm,
1563 struct kvm_device_attr *attr)
1565 if (copy_to_user((void __user *)attr->addr, &kvm_s390_available_subfunc,
1566 sizeof(struct kvm_s390_vm_cpu_subfunc)))
1569 VM_EVENT(kvm, 3, "GET: host PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1570 ((unsigned long *) &kvm_s390_available_subfunc.plo)[0],
1571 ((unsigned long *) &kvm_s390_available_subfunc.plo)[1],
1572 ((unsigned long *) &kvm_s390_available_subfunc.plo)[2],
1573 ((unsigned long *) &kvm_s390_available_subfunc.plo)[3]);
1574 VM_EVENT(kvm, 3, "GET: host PTFF subfunc 0x%16.16lx.%16.16lx",
1575 ((unsigned long *) &kvm_s390_available_subfunc.ptff)[0],
1576 ((unsigned long *) &kvm_s390_available_subfunc.ptff)[1]);
1577 VM_EVENT(kvm, 3, "GET: host KMAC subfunc 0x%16.16lx.%16.16lx",
1578 ((unsigned long *) &kvm_s390_available_subfunc.kmac)[0],
1579 ((unsigned long *) &kvm_s390_available_subfunc.kmac)[1]);
1580 VM_EVENT(kvm, 3, "GET: host KMC subfunc 0x%16.16lx.%16.16lx",
1581 ((unsigned long *) &kvm_s390_available_subfunc.kmc)[0],
1582 ((unsigned long *) &kvm_s390_available_subfunc.kmc)[1]);
1583 VM_EVENT(kvm, 3, "GET: host KM subfunc 0x%16.16lx.%16.16lx",
1584 ((unsigned long *) &kvm_s390_available_subfunc.km)[0],
1585 ((unsigned long *) &kvm_s390_available_subfunc.km)[1]);
1586 VM_EVENT(kvm, 3, "GET: host KIMD subfunc 0x%16.16lx.%16.16lx",
1587 ((unsigned long *) &kvm_s390_available_subfunc.kimd)[0],
1588 ((unsigned long *) &kvm_s390_available_subfunc.kimd)[1]);
1589 VM_EVENT(kvm, 3, "GET: host KLMD subfunc 0x%16.16lx.%16.16lx",
1590 ((unsigned long *) &kvm_s390_available_subfunc.klmd)[0],
1591 ((unsigned long *) &kvm_s390_available_subfunc.klmd)[1]);
1592 VM_EVENT(kvm, 3, "GET: host PCKMO subfunc 0x%16.16lx.%16.16lx",
1593 ((unsigned long *) &kvm_s390_available_subfunc.pckmo)[0],
1594 ((unsigned long *) &kvm_s390_available_subfunc.pckmo)[1]);
1595 VM_EVENT(kvm, 3, "GET: host KMCTR subfunc 0x%16.16lx.%16.16lx",
1596 ((unsigned long *) &kvm_s390_available_subfunc.kmctr)[0],
1597 ((unsigned long *) &kvm_s390_available_subfunc.kmctr)[1]);
1598 VM_EVENT(kvm, 3, "GET: host KMF subfunc 0x%16.16lx.%16.16lx",
1599 ((unsigned long *) &kvm_s390_available_subfunc.kmf)[0],
1600 ((unsigned long *) &kvm_s390_available_subfunc.kmf)[1]);
1601 VM_EVENT(kvm, 3, "GET: host KMO subfunc 0x%16.16lx.%16.16lx",
1602 ((unsigned long *) &kvm_s390_available_subfunc.kmo)[0],
1603 ((unsigned long *) &kvm_s390_available_subfunc.kmo)[1]);
1604 VM_EVENT(kvm, 3, "GET: host PCC subfunc 0x%16.16lx.%16.16lx",
1605 ((unsigned long *) &kvm_s390_available_subfunc.pcc)[0],
1606 ((unsigned long *) &kvm_s390_available_subfunc.pcc)[1]);
1607 VM_EVENT(kvm, 3, "GET: host PPNO subfunc 0x%16.16lx.%16.16lx",
1608 ((unsigned long *) &kvm_s390_available_subfunc.ppno)[0],
1609 ((unsigned long *) &kvm_s390_available_subfunc.ppno)[1]);
1610 VM_EVENT(kvm, 3, "GET: host KMA subfunc 0x%16.16lx.%16.16lx",
1611 ((unsigned long *) &kvm_s390_available_subfunc.kma)[0],
1612 ((unsigned long *) &kvm_s390_available_subfunc.kma)[1]);
1613 VM_EVENT(kvm, 3, "GET: host KDSA subfunc 0x%16.16lx.%16.16lx",
1614 ((unsigned long *) &kvm_s390_available_subfunc.kdsa)[0],
1615 ((unsigned long *) &kvm_s390_available_subfunc.kdsa)[1]);
1616 VM_EVENT(kvm, 3, "GET: host SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1617 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[0],
1618 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[1],
1619 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[2],
1620 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[3]);
1621 VM_EVENT(kvm, 3, "GET: host DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1622 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[0],
1623 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[1],
1624 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[2],
1625 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[3]);
1630 static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1634 switch (attr->attr) {
1635 case KVM_S390_VM_CPU_PROCESSOR:
1636 ret = kvm_s390_get_processor(kvm, attr);
1638 case KVM_S390_VM_CPU_MACHINE:
1639 ret = kvm_s390_get_machine(kvm, attr);
1641 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1642 ret = kvm_s390_get_processor_feat(kvm, attr);
1644 case KVM_S390_VM_CPU_MACHINE_FEAT:
1645 ret = kvm_s390_get_machine_feat(kvm, attr);
1647 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1648 ret = kvm_s390_get_processor_subfunc(kvm, attr);
1650 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
1651 ret = kvm_s390_get_machine_subfunc(kvm, attr);
1657 static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1661 switch (attr->group) {
1662 case KVM_S390_VM_MEM_CTRL:
1663 ret = kvm_s390_set_mem_control(kvm, attr);
1665 case KVM_S390_VM_TOD:
1666 ret = kvm_s390_set_tod(kvm, attr);
1668 case KVM_S390_VM_CPU_MODEL:
1669 ret = kvm_s390_set_cpu_model(kvm, attr);
1671 case KVM_S390_VM_CRYPTO:
1672 ret = kvm_s390_vm_set_crypto(kvm, attr);
1674 case KVM_S390_VM_MIGRATION:
1675 ret = kvm_s390_vm_set_migration(kvm, attr);
1685 static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1689 switch (attr->group) {
1690 case KVM_S390_VM_MEM_CTRL:
1691 ret = kvm_s390_get_mem_control(kvm, attr);
1693 case KVM_S390_VM_TOD:
1694 ret = kvm_s390_get_tod(kvm, attr);
1696 case KVM_S390_VM_CPU_MODEL:
1697 ret = kvm_s390_get_cpu_model(kvm, attr);
1699 case KVM_S390_VM_MIGRATION:
1700 ret = kvm_s390_vm_get_migration(kvm, attr);
1710 static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1714 switch (attr->group) {
1715 case KVM_S390_VM_MEM_CTRL:
1716 switch (attr->attr) {
1717 case KVM_S390_VM_MEM_ENABLE_CMMA:
1718 case KVM_S390_VM_MEM_CLR_CMMA:
1719 ret = sclp.has_cmma ? 0 : -ENXIO;
1721 case KVM_S390_VM_MEM_LIMIT_SIZE:
1729 case KVM_S390_VM_TOD:
1730 switch (attr->attr) {
1731 case KVM_S390_VM_TOD_LOW:
1732 case KVM_S390_VM_TOD_HIGH:
1740 case KVM_S390_VM_CPU_MODEL:
1741 switch (attr->attr) {
1742 case KVM_S390_VM_CPU_PROCESSOR:
1743 case KVM_S390_VM_CPU_MACHINE:
1744 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1745 case KVM_S390_VM_CPU_MACHINE_FEAT:
1746 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
1747 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1755 case KVM_S390_VM_CRYPTO:
1756 switch (attr->attr) {
1757 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
1758 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
1759 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
1760 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
1763 case KVM_S390_VM_CRYPTO_ENABLE_APIE:
1764 case KVM_S390_VM_CRYPTO_DISABLE_APIE:
1765 ret = ap_instructions_available() ? 0 : -ENXIO;
1772 case KVM_S390_VM_MIGRATION:
1783 static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1787 int srcu_idx, i, r = 0;
1789 if (args->flags != 0)
1792 /* Is this guest using storage keys? */
1793 if (!mm_uses_skeys(current->mm))
1794 return KVM_S390_GET_SKEYS_NONE;
1796 /* Enforce sane limit on memory allocation */
1797 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1800 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL);
1804 down_read(¤t->mm->mmap_sem);
1805 srcu_idx = srcu_read_lock(&kvm->srcu);
1806 for (i = 0; i < args->count; i++) {
1807 hva = gfn_to_hva(kvm, args->start_gfn + i);
1808 if (kvm_is_error_hva(hva)) {
1813 r = get_guest_storage_key(current->mm, hva, &keys[i]);
1817 srcu_read_unlock(&kvm->srcu, srcu_idx);
1818 up_read(¤t->mm->mmap_sem);
1821 r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
1822 sizeof(uint8_t) * args->count);
1831 static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1835 int srcu_idx, i, r = 0;
1838 if (args->flags != 0)
1841 /* Enforce sane limit on memory allocation */
1842 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1845 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL);
1849 r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
1850 sizeof(uint8_t) * args->count);
1856 /* Enable storage key handling for the guest */
1857 r = s390_enable_skey();
1862 down_read(¤t->mm->mmap_sem);
1863 srcu_idx = srcu_read_lock(&kvm->srcu);
1864 while (i < args->count) {
1866 hva = gfn_to_hva(kvm, args->start_gfn + i);
1867 if (kvm_is_error_hva(hva)) {
1872 /* Lowest order bit is reserved */
1873 if (keys[i] & 0x01) {
1878 r = set_guest_storage_key(current->mm, hva, keys[i], 0);
1880 r = fixup_user_fault(current, current->mm, hva,
1881 FAULT_FLAG_WRITE, &unlocked);
1888 srcu_read_unlock(&kvm->srcu, srcu_idx);
1889 up_read(¤t->mm->mmap_sem);
1896 * Base address and length must be sent at the start of each block, therefore
1897 * it's cheaper to send some clean data, as long as it's less than the size of
1900 #define KVM_S390_MAX_BIT_DISTANCE (2 * sizeof(void *))
1901 /* for consistency */
1902 #define KVM_S390_CMMA_SIZE_MAX ((u32)KVM_S390_SKEYS_MAX)
1905 * Similar to gfn_to_memslot, but returns the index of a memslot also when the
1906 * address falls in a hole. In that case the index of one of the memslots
1907 * bordering the hole is returned.
1909 static int gfn_to_memslot_approx(struct kvm_memslots *slots, gfn_t gfn)
1911 int start = 0, end = slots->used_slots;
1912 int slot = atomic_read(&slots->lru_slot);
1913 struct kvm_memory_slot *memslots = slots->memslots;
1915 if (gfn >= memslots[slot].base_gfn &&
1916 gfn < memslots[slot].base_gfn + memslots[slot].npages)
1919 while (start < end) {
1920 slot = start + (end - start) / 2;
1922 if (gfn >= memslots[slot].base_gfn)
1928 if (gfn >= memslots[start].base_gfn &&
1929 gfn < memslots[start].base_gfn + memslots[start].npages) {
1930 atomic_set(&slots->lru_slot, start);
1936 static int kvm_s390_peek_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
1937 u8 *res, unsigned long bufsize)
1939 unsigned long pgstev, hva, cur_gfn = args->start_gfn;
1942 while (args->count < bufsize) {
1943 hva = gfn_to_hva(kvm, cur_gfn);
1945 * We return an error if the first value was invalid, but we
1946 * return successfully if at least one value was copied.
1948 if (kvm_is_error_hva(hva))
1949 return args->count ? 0 : -EFAULT;
1950 if (get_pgste(kvm->mm, hva, &pgstev) < 0)
1952 res[args->count++] = (pgstev >> 24) & 0x43;
1959 static unsigned long kvm_s390_next_dirty_cmma(struct kvm_memslots *slots,
1960 unsigned long cur_gfn)
1962 int slotidx = gfn_to_memslot_approx(slots, cur_gfn);
1963 struct kvm_memory_slot *ms = slots->memslots + slotidx;
1964 unsigned long ofs = cur_gfn - ms->base_gfn;
1966 if (ms->base_gfn + ms->npages <= cur_gfn) {
1968 /* If we are above the highest slot, wrap around */
1970 slotidx = slots->used_slots - 1;
1972 ms = slots->memslots + slotidx;
1975 ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, ofs);
1976 while ((slotidx > 0) && (ofs >= ms->npages)) {
1978 ms = slots->memslots + slotidx;
1979 ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, 0);
1981 return ms->base_gfn + ofs;
1984 static int kvm_s390_get_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
1985 u8 *res, unsigned long bufsize)
1987 unsigned long mem_end, cur_gfn, next_gfn, hva, pgstev;
1988 struct kvm_memslots *slots = kvm_memslots(kvm);
1989 struct kvm_memory_slot *ms;
1991 cur_gfn = kvm_s390_next_dirty_cmma(slots, args->start_gfn);
1992 ms = gfn_to_memslot(kvm, cur_gfn);
1994 args->start_gfn = cur_gfn;
1997 next_gfn = kvm_s390_next_dirty_cmma(slots, cur_gfn + 1);
1998 mem_end = slots->memslots[0].base_gfn + slots->memslots[0].npages;
2000 while (args->count < bufsize) {
2001 hva = gfn_to_hva(kvm, cur_gfn);
2002 if (kvm_is_error_hva(hva))
2004 /* Decrement only if we actually flipped the bit to 0 */
2005 if (test_and_clear_bit(cur_gfn - ms->base_gfn, kvm_second_dirty_bitmap(ms)))
2006 atomic64_dec(&kvm->arch.cmma_dirty_pages);
2007 if (get_pgste(kvm->mm, hva, &pgstev) < 0)
2009 /* Save the value */
2010 res[args->count++] = (pgstev >> 24) & 0x43;
2011 /* If the next bit is too far away, stop. */
2012 if (next_gfn > cur_gfn + KVM_S390_MAX_BIT_DISTANCE)
2014 /* If we reached the previous "next", find the next one */
2015 if (cur_gfn == next_gfn)
2016 next_gfn = kvm_s390_next_dirty_cmma(slots, cur_gfn + 1);
2017 /* Reached the end of memory or of the buffer, stop */
2018 if ((next_gfn >= mem_end) ||
2019 (next_gfn - args->start_gfn >= bufsize))
2022 /* Reached the end of the current memslot, take the next one. */
2023 if (cur_gfn - ms->base_gfn >= ms->npages) {
2024 ms = gfn_to_memslot(kvm, cur_gfn);
2033 * This function searches for the next page with dirty CMMA attributes, and
2034 * saves the attributes in the buffer up to either the end of the buffer or
2035 * until a block of at least KVM_S390_MAX_BIT_DISTANCE clean bits is found;
2036 * no trailing clean bytes are saved.
2037 * In case no dirty bits were found, or if CMMA was not enabled or used, the
2038 * output buffer will indicate 0 as length.
2040 static int kvm_s390_get_cmma_bits(struct kvm *kvm,
2041 struct kvm_s390_cmma_log *args)
2043 unsigned long bufsize;
2044 int srcu_idx, peek, ret;
2047 if (!kvm->arch.use_cmma)
2049 /* Invalid/unsupported flags were specified */
2050 if (args->flags & ~KVM_S390_CMMA_PEEK)
2052 /* Migration mode query, and we are not doing a migration */
2053 peek = !!(args->flags & KVM_S390_CMMA_PEEK);
2054 if (!peek && !kvm->arch.migration_mode)
2056 /* CMMA is disabled or was not used, or the buffer has length zero */
2057 bufsize = min(args->count, KVM_S390_CMMA_SIZE_MAX);
2058 if (!bufsize || !kvm->mm->context.uses_cmm) {
2059 memset(args, 0, sizeof(*args));
2062 /* We are not peeking, and there are no dirty pages */
2063 if (!peek && !atomic64_read(&kvm->arch.cmma_dirty_pages)) {
2064 memset(args, 0, sizeof(*args));
2068 values = vmalloc(bufsize);
2072 down_read(&kvm->mm->mmap_sem);
2073 srcu_idx = srcu_read_lock(&kvm->srcu);
2075 ret = kvm_s390_peek_cmma(kvm, args, values, bufsize);
2077 ret = kvm_s390_get_cmma(kvm, args, values, bufsize);
2078 srcu_read_unlock(&kvm->srcu, srcu_idx);
2079 up_read(&kvm->mm->mmap_sem);
2081 if (kvm->arch.migration_mode)
2082 args->remaining = atomic64_read(&kvm->arch.cmma_dirty_pages);
2084 args->remaining = 0;
2086 if (copy_to_user((void __user *)args->values, values, args->count))
2094 * This function sets the CMMA attributes for the given pages. If the input
2095 * buffer has zero length, no action is taken, otherwise the attributes are
2096 * set and the mm->context.uses_cmm flag is set.
2098 static int kvm_s390_set_cmma_bits(struct kvm *kvm,
2099 const struct kvm_s390_cmma_log *args)
2101 unsigned long hva, mask, pgstev, i;
2103 int srcu_idx, r = 0;
2107 if (!kvm->arch.use_cmma)
2109 /* invalid/unsupported flags */
2110 if (args->flags != 0)
2112 /* Enforce sane limit on memory allocation */
2113 if (args->count > KVM_S390_CMMA_SIZE_MAX)
2116 if (args->count == 0)
2119 bits = vmalloc(array_size(sizeof(*bits), args->count));
2123 r = copy_from_user(bits, (void __user *)args->values, args->count);
2129 down_read(&kvm->mm->mmap_sem);
2130 srcu_idx = srcu_read_lock(&kvm->srcu);
2131 for (i = 0; i < args->count; i++) {
2132 hva = gfn_to_hva(kvm, args->start_gfn + i);
2133 if (kvm_is_error_hva(hva)) {
2139 pgstev = pgstev << 24;
2140 mask &= _PGSTE_GPS_USAGE_MASK | _PGSTE_GPS_NODAT;
2141 set_pgste_bits(kvm->mm, hva, mask, pgstev);
2143 srcu_read_unlock(&kvm->srcu, srcu_idx);
2144 up_read(&kvm->mm->mmap_sem);
2146 if (!kvm->mm->context.uses_cmm) {
2147 down_write(&kvm->mm->mmap_sem);
2148 kvm->mm->context.uses_cmm = 1;
2149 up_write(&kvm->mm->mmap_sem);
2156 long kvm_arch_vm_ioctl(struct file *filp,
2157 unsigned int ioctl, unsigned long arg)
2159 struct kvm *kvm = filp->private_data;
2160 void __user *argp = (void __user *)arg;
2161 struct kvm_device_attr attr;
2165 case KVM_S390_INTERRUPT: {
2166 struct kvm_s390_interrupt s390int;
2169 if (copy_from_user(&s390int, argp, sizeof(s390int)))
2171 r = kvm_s390_inject_vm(kvm, &s390int);
2174 case KVM_CREATE_IRQCHIP: {
2175 struct kvm_irq_routing_entry routing;
2178 if (kvm->arch.use_irqchip) {
2179 /* Set up dummy routing. */
2180 memset(&routing, 0, sizeof(routing));
2181 r = kvm_set_irq_routing(kvm, &routing, 0, 0);
2185 case KVM_SET_DEVICE_ATTR: {
2187 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2189 r = kvm_s390_vm_set_attr(kvm, &attr);
2192 case KVM_GET_DEVICE_ATTR: {
2194 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2196 r = kvm_s390_vm_get_attr(kvm, &attr);
2199 case KVM_HAS_DEVICE_ATTR: {
2201 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2203 r = kvm_s390_vm_has_attr(kvm, &attr);
2206 case KVM_S390_GET_SKEYS: {
2207 struct kvm_s390_skeys args;
2210 if (copy_from_user(&args, argp,
2211 sizeof(struct kvm_s390_skeys)))
2213 r = kvm_s390_get_skeys(kvm, &args);
2216 case KVM_S390_SET_SKEYS: {
2217 struct kvm_s390_skeys args;
2220 if (copy_from_user(&args, argp,
2221 sizeof(struct kvm_s390_skeys)))
2223 r = kvm_s390_set_skeys(kvm, &args);
2226 case KVM_S390_GET_CMMA_BITS: {
2227 struct kvm_s390_cmma_log args;
2230 if (copy_from_user(&args, argp, sizeof(args)))
2232 mutex_lock(&kvm->slots_lock);
2233 r = kvm_s390_get_cmma_bits(kvm, &args);
2234 mutex_unlock(&kvm->slots_lock);
2236 r = copy_to_user(argp, &args, sizeof(args));
2242 case KVM_S390_SET_CMMA_BITS: {
2243 struct kvm_s390_cmma_log args;
2246 if (copy_from_user(&args, argp, sizeof(args)))
2248 mutex_lock(&kvm->slots_lock);
2249 r = kvm_s390_set_cmma_bits(kvm, &args);
2250 mutex_unlock(&kvm->slots_lock);
2260 static int kvm_s390_apxa_installed(void)
2262 struct ap_config_info info;
2264 if (ap_instructions_available()) {
2265 if (ap_qci(&info) == 0)
2273 * The format of the crypto control block (CRYCB) is specified in the 3 low
2274 * order bits of the CRYCB designation (CRYCBD) field as follows:
2275 * Format 0: Neither the message security assist extension 3 (MSAX3) nor the
2276 * AP extended addressing (APXA) facility are installed.
2277 * Format 1: The APXA facility is not installed but the MSAX3 facility is.
2278 * Format 2: Both the APXA and MSAX3 facilities are installed
2280 static void kvm_s390_set_crycb_format(struct kvm *kvm)
2282 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
2284 /* Clear the CRYCB format bits - i.e., set format 0 by default */
2285 kvm->arch.crypto.crycbd &= ~(CRYCB_FORMAT_MASK);
2287 /* Check whether MSAX3 is installed */
2288 if (!test_kvm_facility(kvm, 76))
2291 if (kvm_s390_apxa_installed())
2292 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
2294 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
2297 void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm,
2298 unsigned long *aqm, unsigned long *adm)
2300 struct kvm_s390_crypto_cb *crycb = kvm->arch.crypto.crycb;
2302 mutex_lock(&kvm->lock);
2303 kvm_s390_vcpu_block_all(kvm);
2305 switch (kvm->arch.crypto.crycbd & CRYCB_FORMAT_MASK) {
2306 case CRYCB_FORMAT2: /* APCB1 use 256 bits */
2307 memcpy(crycb->apcb1.apm, apm, 32);
2308 VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx %016lx %016lx %016lx",
2309 apm[0], apm[1], apm[2], apm[3]);
2310 memcpy(crycb->apcb1.aqm, aqm, 32);
2311 VM_EVENT(kvm, 3, "SET CRYCB: aqm %016lx %016lx %016lx %016lx",
2312 aqm[0], aqm[1], aqm[2], aqm[3]);
2313 memcpy(crycb->apcb1.adm, adm, 32);
2314 VM_EVENT(kvm, 3, "SET CRYCB: adm %016lx %016lx %016lx %016lx",
2315 adm[0], adm[1], adm[2], adm[3]);
2318 case CRYCB_FORMAT0: /* Fall through both use APCB0 */
2319 memcpy(crycb->apcb0.apm, apm, 8);
2320 memcpy(crycb->apcb0.aqm, aqm, 2);
2321 memcpy(crycb->apcb0.adm, adm, 2);
2322 VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx aqm %04x adm %04x",
2323 apm[0], *((unsigned short *)aqm),
2324 *((unsigned short *)adm));
2326 default: /* Can not happen */
2330 /* recreate the shadow crycb for each vcpu */
2331 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
2332 kvm_s390_vcpu_unblock_all(kvm);
2333 mutex_unlock(&kvm->lock);
2335 EXPORT_SYMBOL_GPL(kvm_arch_crypto_set_masks);
2337 void kvm_arch_crypto_clear_masks(struct kvm *kvm)
2339 mutex_lock(&kvm->lock);
2340 kvm_s390_vcpu_block_all(kvm);
2342 memset(&kvm->arch.crypto.crycb->apcb0, 0,
2343 sizeof(kvm->arch.crypto.crycb->apcb0));
2344 memset(&kvm->arch.crypto.crycb->apcb1, 0,
2345 sizeof(kvm->arch.crypto.crycb->apcb1));
2347 VM_EVENT(kvm, 3, "%s", "CLR CRYCB:");
2348 /* recreate the shadow crycb for each vcpu */
2349 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
2350 kvm_s390_vcpu_unblock_all(kvm);
2351 mutex_unlock(&kvm->lock);
2353 EXPORT_SYMBOL_GPL(kvm_arch_crypto_clear_masks);
2355 static u64 kvm_s390_get_initial_cpuid(void)
2360 cpuid.version = 0xff;
2361 return *((u64 *) &cpuid);
2364 static void kvm_s390_crypto_init(struct kvm *kvm)
2366 kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb;
2367 kvm_s390_set_crycb_format(kvm);
2369 if (!test_kvm_facility(kvm, 76))
2372 /* Enable AES/DEA protected key functions by default */
2373 kvm->arch.crypto.aes_kw = 1;
2374 kvm->arch.crypto.dea_kw = 1;
2375 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
2376 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
2377 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
2378 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
2381 static void sca_dispose(struct kvm *kvm)
2383 if (kvm->arch.use_esca)
2384 free_pages_exact(kvm->arch.sca, sizeof(struct esca_block));
2386 free_page((unsigned long)(kvm->arch.sca));
2387 kvm->arch.sca = NULL;
2390 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
2392 gfp_t alloc_flags = GFP_KERNEL;
2394 char debug_name[16];
2395 static unsigned long sca_offset;
2398 #ifdef CONFIG_KVM_S390_UCONTROL
2399 if (type & ~KVM_VM_S390_UCONTROL)
2401 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
2408 rc = s390_enable_sie();
2414 if (!sclp.has_64bscao)
2415 alloc_flags |= GFP_DMA;
2416 rwlock_init(&kvm->arch.sca_lock);
2417 /* start with basic SCA */
2418 kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags);
2421 spin_lock(&kvm_lock);
2423 if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE)
2425 kvm->arch.sca = (struct bsca_block *)
2426 ((char *) kvm->arch.sca + sca_offset);
2427 spin_unlock(&kvm_lock);
2429 sprintf(debug_name, "kvm-%u", current->pid);
2431 kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
2435 BUILD_BUG_ON(sizeof(struct sie_page2) != 4096);
2436 kvm->arch.sie_page2 =
2437 (struct sie_page2 *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
2438 if (!kvm->arch.sie_page2)
2441 kvm->arch.sie_page2->kvm = kvm;
2442 kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list;
2444 for (i = 0; i < kvm_s390_fac_size(); i++) {
2445 kvm->arch.model.fac_mask[i] = S390_lowcore.stfle_fac_list[i] &
2446 (kvm_s390_fac_base[i] |
2447 kvm_s390_fac_ext[i]);
2448 kvm->arch.model.fac_list[i] = S390_lowcore.stfle_fac_list[i] &
2449 kvm_s390_fac_base[i];
2451 kvm->arch.model.subfuncs = kvm_s390_available_subfunc;
2453 /* we are always in czam mode - even on pre z14 machines */
2454 set_kvm_facility(kvm->arch.model.fac_mask, 138);
2455 set_kvm_facility(kvm->arch.model.fac_list, 138);
2456 /* we emulate STHYI in kvm */
2457 set_kvm_facility(kvm->arch.model.fac_mask, 74);
2458 set_kvm_facility(kvm->arch.model.fac_list, 74);
2459 if (MACHINE_HAS_TLB_GUEST) {
2460 set_kvm_facility(kvm->arch.model.fac_mask, 147);
2461 set_kvm_facility(kvm->arch.model.fac_list, 147);
2464 kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid();
2465 kvm->arch.model.ibc = sclp.ibc & 0x0fff;
2467 kvm_s390_crypto_init(kvm);
2469 mutex_init(&kvm->arch.float_int.ais_lock);
2470 spin_lock_init(&kvm->arch.float_int.lock);
2471 for (i = 0; i < FIRQ_LIST_COUNT; i++)
2472 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
2473 init_waitqueue_head(&kvm->arch.ipte_wq);
2474 mutex_init(&kvm->arch.ipte_mutex);
2476 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
2477 VM_EVENT(kvm, 3, "vm created with type %lu", type);
2479 if (type & KVM_VM_S390_UCONTROL) {
2480 kvm->arch.gmap = NULL;
2481 kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT;
2483 if (sclp.hamax == U64_MAX)
2484 kvm->arch.mem_limit = TASK_SIZE_MAX;
2486 kvm->arch.mem_limit = min_t(unsigned long, TASK_SIZE_MAX,
2488 kvm->arch.gmap = gmap_create(current->mm, kvm->arch.mem_limit - 1);
2489 if (!kvm->arch.gmap)
2491 kvm->arch.gmap->private = kvm;
2492 kvm->arch.gmap->pfault_enabled = 0;
2495 kvm->arch.use_pfmfi = sclp.has_pfmfi;
2496 kvm->arch.use_skf = sclp.has_skey;
2497 spin_lock_init(&kvm->arch.start_stop_lock);
2498 kvm_s390_vsie_init(kvm);
2499 kvm_s390_gisa_init(kvm);
2500 KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
2504 free_page((unsigned long)kvm->arch.sie_page2);
2505 debug_unregister(kvm->arch.dbf);
2507 KVM_EVENT(3, "creation of vm failed: %d", rc);
2511 bool kvm_arch_has_vcpu_debugfs(void)
2516 int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
2521 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
2523 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
2524 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
2525 kvm_s390_clear_local_irqs(vcpu);
2526 kvm_clear_async_pf_completion_queue(vcpu);
2527 if (!kvm_is_ucontrol(vcpu->kvm))
2530 if (kvm_is_ucontrol(vcpu->kvm))
2531 gmap_remove(vcpu->arch.gmap);
2533 if (vcpu->kvm->arch.use_cmma)
2534 kvm_s390_vcpu_unsetup_cmma(vcpu);
2535 free_page((unsigned long)(vcpu->arch.sie_block));
2537 kvm_vcpu_uninit(vcpu);
2538 kmem_cache_free(kvm_vcpu_cache, vcpu);
2541 static void kvm_free_vcpus(struct kvm *kvm)
2544 struct kvm_vcpu *vcpu;
2546 kvm_for_each_vcpu(i, vcpu, kvm)
2547 kvm_arch_vcpu_destroy(vcpu);
2549 mutex_lock(&kvm->lock);
2550 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
2551 kvm->vcpus[i] = NULL;
2553 atomic_set(&kvm->online_vcpus, 0);
2554 mutex_unlock(&kvm->lock);
2557 void kvm_arch_destroy_vm(struct kvm *kvm)
2559 kvm_free_vcpus(kvm);
2561 debug_unregister(kvm->arch.dbf);
2562 kvm_s390_gisa_destroy(kvm);
2563 free_page((unsigned long)kvm->arch.sie_page2);
2564 if (!kvm_is_ucontrol(kvm))
2565 gmap_remove(kvm->arch.gmap);
2566 kvm_s390_destroy_adapters(kvm);
2567 kvm_s390_clear_float_irqs(kvm);
2568 kvm_s390_vsie_destroy(kvm);
2569 KVM_EVENT(3, "vm 0x%pK destroyed", kvm);
2572 /* Section: vcpu related */
2573 static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
2575 vcpu->arch.gmap = gmap_create(current->mm, -1UL);
2576 if (!vcpu->arch.gmap)
2578 vcpu->arch.gmap->private = vcpu->kvm;
2583 static void sca_del_vcpu(struct kvm_vcpu *vcpu)
2585 if (!kvm_s390_use_sca_entries())
2587 read_lock(&vcpu->kvm->arch.sca_lock);
2588 if (vcpu->kvm->arch.use_esca) {
2589 struct esca_block *sca = vcpu->kvm->arch.sca;
2591 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
2592 sca->cpu[vcpu->vcpu_id].sda = 0;
2594 struct bsca_block *sca = vcpu->kvm->arch.sca;
2596 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
2597 sca->cpu[vcpu->vcpu_id].sda = 0;
2599 read_unlock(&vcpu->kvm->arch.sca_lock);
2602 static void sca_add_vcpu(struct kvm_vcpu *vcpu)
2604 if (!kvm_s390_use_sca_entries()) {
2605 struct bsca_block *sca = vcpu->kvm->arch.sca;
2607 /* we still need the basic sca for the ipte control */
2608 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2609 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
2612 read_lock(&vcpu->kvm->arch.sca_lock);
2613 if (vcpu->kvm->arch.use_esca) {
2614 struct esca_block *sca = vcpu->kvm->arch.sca;
2616 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
2617 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2618 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU;
2619 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
2620 set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
2622 struct bsca_block *sca = vcpu->kvm->arch.sca;
2624 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
2625 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2626 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
2627 set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
2629 read_unlock(&vcpu->kvm->arch.sca_lock);
2632 /* Basic SCA to Extended SCA data copy routines */
2633 static inline void sca_copy_entry(struct esca_entry *d, struct bsca_entry *s)
2636 d->sigp_ctrl.c = s->sigp_ctrl.c;
2637 d->sigp_ctrl.scn = s->sigp_ctrl.scn;
2640 static void sca_copy_b_to_e(struct esca_block *d, struct bsca_block *s)
2644 d->ipte_control = s->ipte_control;
2646 for (i = 0; i < KVM_S390_BSCA_CPU_SLOTS; i++)
2647 sca_copy_entry(&d->cpu[i], &s->cpu[i]);
2650 static int sca_switch_to_extended(struct kvm *kvm)
2652 struct bsca_block *old_sca = kvm->arch.sca;
2653 struct esca_block *new_sca;
2654 struct kvm_vcpu *vcpu;
2655 unsigned int vcpu_idx;
2658 new_sca = alloc_pages_exact(sizeof(*new_sca), GFP_KERNEL|__GFP_ZERO);
2662 scaoh = (u32)((u64)(new_sca) >> 32);
2663 scaol = (u32)(u64)(new_sca) & ~0x3fU;
2665 kvm_s390_vcpu_block_all(kvm);
2666 write_lock(&kvm->arch.sca_lock);
2668 sca_copy_b_to_e(new_sca, old_sca);
2670 kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) {
2671 vcpu->arch.sie_block->scaoh = scaoh;
2672 vcpu->arch.sie_block->scaol = scaol;
2673 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
2675 kvm->arch.sca = new_sca;
2676 kvm->arch.use_esca = 1;
2678 write_unlock(&kvm->arch.sca_lock);
2679 kvm_s390_vcpu_unblock_all(kvm);
2681 free_page((unsigned long)old_sca);
2683 VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)",
2684 old_sca, kvm->arch.sca);
2688 static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
2692 if (!kvm_s390_use_sca_entries()) {
2693 if (id < KVM_MAX_VCPUS)
2697 if (id < KVM_S390_BSCA_CPU_SLOTS)
2699 if (!sclp.has_esca || !sclp.has_64bscao)
2702 mutex_lock(&kvm->lock);
2703 rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm);
2704 mutex_unlock(&kvm->lock);
2706 return rc == 0 && id < KVM_S390_ESCA_CPU_SLOTS;
2709 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
2711 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
2712 kvm_clear_async_pf_completion_queue(vcpu);
2713 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
2719 kvm_s390_set_prefix(vcpu, 0);
2720 if (test_kvm_facility(vcpu->kvm, 64))
2721 vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
2722 if (test_kvm_facility(vcpu->kvm, 82))
2723 vcpu->run->kvm_valid_regs |= KVM_SYNC_BPBC;
2724 if (test_kvm_facility(vcpu->kvm, 133))
2725 vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB;
2726 if (test_kvm_facility(vcpu->kvm, 156))
2727 vcpu->run->kvm_valid_regs |= KVM_SYNC_ETOKEN;
2728 /* fprs can be synchronized via vrs, even if the guest has no vx. With
2729 * MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format.
2732 vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
2734 vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS;
2736 if (kvm_is_ucontrol(vcpu->kvm))
2737 return __kvm_ucontrol_vcpu_init(vcpu);
2742 /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2743 static void __start_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2745 WARN_ON_ONCE(vcpu->arch.cputm_start != 0);
2746 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
2747 vcpu->arch.cputm_start = get_tod_clock_fast();
2748 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
2751 /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2752 static void __stop_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2754 WARN_ON_ONCE(vcpu->arch.cputm_start == 0);
2755 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
2756 vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start;
2757 vcpu->arch.cputm_start = 0;
2758 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
2761 /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2762 static void __enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2764 WARN_ON_ONCE(vcpu->arch.cputm_enabled);
2765 vcpu->arch.cputm_enabled = true;
2766 __start_cpu_timer_accounting(vcpu);
2769 /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2770 static void __disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2772 WARN_ON_ONCE(!vcpu->arch.cputm_enabled);
2773 __stop_cpu_timer_accounting(vcpu);
2774 vcpu->arch.cputm_enabled = false;
2777 static void enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2779 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
2780 __enable_cpu_timer_accounting(vcpu);
2784 static void disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2786 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
2787 __disable_cpu_timer_accounting(vcpu);
2791 /* set the cpu timer - may only be called from the VCPU thread itself */
2792 void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm)
2794 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
2795 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
2796 if (vcpu->arch.cputm_enabled)
2797 vcpu->arch.cputm_start = get_tod_clock_fast();
2798 vcpu->arch.sie_block->cputm = cputm;
2799 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
2803 /* update and get the cpu timer - can also be called from other VCPU threads */
2804 __u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu)
2809 if (unlikely(!vcpu->arch.cputm_enabled))
2810 return vcpu->arch.sie_block->cputm;
2812 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
2814 seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount);
2816 * If the writer would ever execute a read in the critical
2817 * section, e.g. in irq context, we have a deadlock.
2819 WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu);
2820 value = vcpu->arch.sie_block->cputm;
2821 /* if cputm_start is 0, accounting is being started/stopped */
2822 if (likely(vcpu->arch.cputm_start))
2823 value -= get_tod_clock_fast() - vcpu->arch.cputm_start;
2824 } while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1));
2829 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2832 gmap_enable(vcpu->arch.enabled_gmap);
2833 kvm_s390_set_cpuflags(vcpu, CPUSTAT_RUNNING);
2834 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
2835 __start_cpu_timer_accounting(vcpu);
2839 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
2842 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
2843 __stop_cpu_timer_accounting(vcpu);
2844 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_RUNNING);
2845 vcpu->arch.enabled_gmap = gmap_get_enabled();
2846 gmap_disable(vcpu->arch.enabled_gmap);
2850 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
2852 /* this equals initial cpu reset in pop, but we don't switch to ESA */
2853 vcpu->arch.sie_block->gpsw.mask = 0UL;
2854 vcpu->arch.sie_block->gpsw.addr = 0UL;
2855 kvm_s390_set_prefix(vcpu, 0);
2856 kvm_s390_set_cpu_timer(vcpu, 0);
2857 vcpu->arch.sie_block->ckc = 0UL;
2858 vcpu->arch.sie_block->todpr = 0;
2859 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
2860 vcpu->arch.sie_block->gcr[0] = CR0_UNUSED_56 |
2861 CR0_INTERRUPT_KEY_SUBMASK |
2862 CR0_MEASUREMENT_ALERT_SUBMASK;
2863 vcpu->arch.sie_block->gcr[14] = CR14_UNUSED_32 |
2865 CR14_EXTERNAL_DAMAGE_SUBMASK;
2866 /* make sure the new fpc will be lazily loaded */
2868 current->thread.fpu.fpc = 0;
2869 vcpu->arch.sie_block->gbea = 1;
2870 vcpu->arch.sie_block->pp = 0;
2871 vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
2872 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
2873 kvm_clear_async_pf_completion_queue(vcpu);
2874 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
2875 kvm_s390_vcpu_stop(vcpu);
2876 kvm_s390_clear_local_irqs(vcpu);
2879 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
2881 mutex_lock(&vcpu->kvm->lock);
2883 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
2884 vcpu->arch.sie_block->epdx = vcpu->kvm->arch.epdx;
2886 mutex_unlock(&vcpu->kvm->lock);
2887 if (!kvm_is_ucontrol(vcpu->kvm)) {
2888 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
2891 if (test_kvm_facility(vcpu->kvm, 74) || vcpu->kvm->arch.user_instr0)
2892 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
2893 /* make vcpu_load load the right gmap on the first trigger */
2894 vcpu->arch.enabled_gmap = vcpu->arch.gmap;
2897 static bool kvm_has_pckmo_subfunc(struct kvm *kvm, unsigned long nr)
2899 if (test_bit_inv(nr, (unsigned long *)&kvm->arch.model.subfuncs.pckmo) &&
2900 test_bit_inv(nr, (unsigned long *)&kvm_s390_available_subfunc.pckmo))
2905 static bool kvm_has_pckmo_ecc(struct kvm *kvm)
2907 /* At least one ECC subfunction must be present */
2908 return kvm_has_pckmo_subfunc(kvm, 32) ||
2909 kvm_has_pckmo_subfunc(kvm, 33) ||
2910 kvm_has_pckmo_subfunc(kvm, 34) ||
2911 kvm_has_pckmo_subfunc(kvm, 40) ||
2912 kvm_has_pckmo_subfunc(kvm, 41);
2916 static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
2919 * If the AP instructions are not being interpreted and the MSAX3
2920 * facility is not configured for the guest, there is nothing to set up.
2922 if (!vcpu->kvm->arch.crypto.apie && !test_kvm_facility(vcpu->kvm, 76))
2925 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
2926 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
2927 vcpu->arch.sie_block->eca &= ~ECA_APIE;
2928 vcpu->arch.sie_block->ecd &= ~ECD_ECC;
2930 if (vcpu->kvm->arch.crypto.apie)
2931 vcpu->arch.sie_block->eca |= ECA_APIE;
2933 /* Set up protected key support */
2934 if (vcpu->kvm->arch.crypto.aes_kw) {
2935 vcpu->arch.sie_block->ecb3 |= ECB3_AES;
2936 /* ecc is also wrapped with AES key */
2937 if (kvm_has_pckmo_ecc(vcpu->kvm))
2938 vcpu->arch.sie_block->ecd |= ECD_ECC;
2941 if (vcpu->kvm->arch.crypto.dea_kw)
2942 vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
2945 void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
2947 free_page(vcpu->arch.sie_block->cbrlo);
2948 vcpu->arch.sie_block->cbrlo = 0;
2951 int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
2953 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
2954 if (!vcpu->arch.sie_block->cbrlo)
2959 static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
2961 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
2963 vcpu->arch.sie_block->ibc = model->ibc;
2964 if (test_kvm_facility(vcpu->kvm, 7))
2965 vcpu->arch.sie_block->fac = (u32)(u64) model->fac_list;
2968 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
2972 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
2976 if (test_kvm_facility(vcpu->kvm, 78))
2977 kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED2);
2978 else if (test_kvm_facility(vcpu->kvm, 8))
2979 kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED);
2981 kvm_s390_vcpu_setup_model(vcpu);
2983 /* pgste_set_pte has special handling for !MACHINE_HAS_ESOP */
2984 if (MACHINE_HAS_ESOP)
2985 vcpu->arch.sie_block->ecb |= ECB_HOSTPROTINT;
2986 if (test_kvm_facility(vcpu->kvm, 9))
2987 vcpu->arch.sie_block->ecb |= ECB_SRSI;
2988 if (test_kvm_facility(vcpu->kvm, 73))
2989 vcpu->arch.sie_block->ecb |= ECB_TE;
2991 if (test_kvm_facility(vcpu->kvm, 8) && vcpu->kvm->arch.use_pfmfi)
2992 vcpu->arch.sie_block->ecb2 |= ECB2_PFMFI;
2993 if (test_kvm_facility(vcpu->kvm, 130))
2994 vcpu->arch.sie_block->ecb2 |= ECB2_IEP;
2995 vcpu->arch.sie_block->eca = ECA_MVPGI | ECA_PROTEXCI;
2997 vcpu->arch.sie_block->eca |= ECA_CEI;
2999 vcpu->arch.sie_block->eca |= ECA_IB;
3001 vcpu->arch.sie_block->eca |= ECA_SII;
3002 if (sclp.has_sigpif)
3003 vcpu->arch.sie_block->eca |= ECA_SIGPI;
3004 if (test_kvm_facility(vcpu->kvm, 129)) {
3005 vcpu->arch.sie_block->eca |= ECA_VX;
3006 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
3008 if (test_kvm_facility(vcpu->kvm, 139))
3009 vcpu->arch.sie_block->ecd |= ECD_MEF;
3010 if (test_kvm_facility(vcpu->kvm, 156))
3011 vcpu->arch.sie_block->ecd |= ECD_ETOKENF;
3012 if (vcpu->arch.sie_block->gd) {
3013 vcpu->arch.sie_block->eca |= ECA_AIV;
3014 VCPU_EVENT(vcpu, 3, "AIV gisa format-%u enabled for cpu %03u",
3015 vcpu->arch.sie_block->gd & 0x3, vcpu->vcpu_id);
3017 vcpu->arch.sie_block->sdnxo = ((unsigned long) &vcpu->run->s.regs.sdnx)
3019 vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb;
3022 kvm_s390_set_cpuflags(vcpu, CPUSTAT_KSS);
3024 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
3026 if (vcpu->kvm->arch.use_cmma) {
3027 rc = kvm_s390_vcpu_setup_cmma(vcpu);
3031 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3032 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
3034 vcpu->arch.sie_block->hpid = HPID_KVM;
3036 kvm_s390_vcpu_crypto_setup(vcpu);
3041 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
3044 struct kvm_vcpu *vcpu;
3045 struct sie_page *sie_page;
3048 if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id))
3053 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
3057 BUILD_BUG_ON(sizeof(struct sie_page) != 4096);
3058 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
3062 vcpu->arch.sie_block = &sie_page->sie_block;
3063 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
3065 /* the real guest size will always be smaller than msl */
3066 vcpu->arch.sie_block->mso = 0;
3067 vcpu->arch.sie_block->msl = sclp.hamax;
3069 vcpu->arch.sie_block->icpua = id;
3070 spin_lock_init(&vcpu->arch.local_int.lock);
3071 vcpu->arch.sie_block->gd = (u32)(u64)kvm->arch.gisa_int.origin;
3072 if (vcpu->arch.sie_block->gd && sclp.has_gisaf)
3073 vcpu->arch.sie_block->gd |= GISA_FORMAT1;
3074 seqcount_init(&vcpu->arch.cputm_seqcount);
3076 rc = kvm_vcpu_init(vcpu, kvm, id);
3078 goto out_free_sie_block;
3079 VM_EVENT(kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK", id, vcpu,
3080 vcpu->arch.sie_block);
3081 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
3085 free_page((unsigned long)(vcpu->arch.sie_block));
3087 kmem_cache_free(kvm_vcpu_cache, vcpu);
3092 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
3094 return kvm_s390_vcpu_has_irq(vcpu, 0);
3097 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
3099 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE);
3102 void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
3104 atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
3108 void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
3110 atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
3113 static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
3115 atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
3119 bool kvm_s390_vcpu_sie_inhibited(struct kvm_vcpu *vcpu)
3121 return atomic_read(&vcpu->arch.sie_block->prog20) &
3122 (PROG_BLOCK_SIE | PROG_REQUEST);
3125 static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
3127 atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
3131 * Kick a guest cpu out of (v)SIE and wait until (v)SIE is not running.
3132 * If the CPU is not running (e.g. waiting as idle) the function will
3133 * return immediately. */
3134 void exit_sie(struct kvm_vcpu *vcpu)
3136 kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT);
3137 kvm_s390_vsie_kick(vcpu);
3138 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
3142 /* Kick a guest cpu out of SIE to process a request synchronously */
3143 void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
3145 kvm_make_request(req, vcpu);
3146 kvm_s390_vcpu_request(vcpu);
3149 static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
3152 struct kvm *kvm = gmap->private;
3153 struct kvm_vcpu *vcpu;
3154 unsigned long prefix;
3157 if (gmap_is_shadow(gmap))
3159 if (start >= 1UL << 31)
3160 /* We are only interested in prefix pages */
3162 kvm_for_each_vcpu(i, vcpu, kvm) {
3163 /* match against both prefix pages */
3164 prefix = kvm_s390_get_prefix(vcpu);
3165 if (prefix <= end && start <= prefix + 2*PAGE_SIZE - 1) {
3166 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx-%lx",
3168 kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu);
3173 bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
3175 /* do not poll with more than halt_poll_max_steal percent of steal time */
3176 if (S390_lowcore.avg_steal_timer * 100 / (TICK_USEC << 12) >=
3177 halt_poll_max_steal) {
3178 vcpu->stat.halt_no_poll_steal++;
3184 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
3186 /* kvm common code refers to this, but never calls it */
3191 static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
3192 struct kvm_one_reg *reg)
3197 case KVM_REG_S390_TODPR:
3198 r = put_user(vcpu->arch.sie_block->todpr,
3199 (u32 __user *)reg->addr);
3201 case KVM_REG_S390_EPOCHDIFF:
3202 r = put_user(vcpu->arch.sie_block->epoch,
3203 (u64 __user *)reg->addr);
3205 case KVM_REG_S390_CPU_TIMER:
3206 r = put_user(kvm_s390_get_cpu_timer(vcpu),
3207 (u64 __user *)reg->addr);
3209 case KVM_REG_S390_CLOCK_COMP:
3210 r = put_user(vcpu->arch.sie_block->ckc,
3211 (u64 __user *)reg->addr);
3213 case KVM_REG_S390_PFTOKEN:
3214 r = put_user(vcpu->arch.pfault_token,
3215 (u64 __user *)reg->addr);
3217 case KVM_REG_S390_PFCOMPARE:
3218 r = put_user(vcpu->arch.pfault_compare,
3219 (u64 __user *)reg->addr);
3221 case KVM_REG_S390_PFSELECT:
3222 r = put_user(vcpu->arch.pfault_select,
3223 (u64 __user *)reg->addr);
3225 case KVM_REG_S390_PP:
3226 r = put_user(vcpu->arch.sie_block->pp,
3227 (u64 __user *)reg->addr);
3229 case KVM_REG_S390_GBEA:
3230 r = put_user(vcpu->arch.sie_block->gbea,
3231 (u64 __user *)reg->addr);
3240 static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
3241 struct kvm_one_reg *reg)
3247 case KVM_REG_S390_TODPR:
3248 r = get_user(vcpu->arch.sie_block->todpr,
3249 (u32 __user *)reg->addr);
3251 case KVM_REG_S390_EPOCHDIFF:
3252 r = get_user(vcpu->arch.sie_block->epoch,
3253 (u64 __user *)reg->addr);
3255 case KVM_REG_S390_CPU_TIMER:
3256 r = get_user(val, (u64 __user *)reg->addr);
3258 kvm_s390_set_cpu_timer(vcpu, val);
3260 case KVM_REG_S390_CLOCK_COMP:
3261 r = get_user(vcpu->arch.sie_block->ckc,
3262 (u64 __user *)reg->addr);
3264 case KVM_REG_S390_PFTOKEN:
3265 r = get_user(vcpu->arch.pfault_token,
3266 (u64 __user *)reg->addr);
3267 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
3268 kvm_clear_async_pf_completion_queue(vcpu);
3270 case KVM_REG_S390_PFCOMPARE:
3271 r = get_user(vcpu->arch.pfault_compare,
3272 (u64 __user *)reg->addr);
3274 case KVM_REG_S390_PFSELECT:
3275 r = get_user(vcpu->arch.pfault_select,
3276 (u64 __user *)reg->addr);
3278 case KVM_REG_S390_PP:
3279 r = get_user(vcpu->arch.sie_block->pp,
3280 (u64 __user *)reg->addr);
3282 case KVM_REG_S390_GBEA:
3283 r = get_user(vcpu->arch.sie_block->gbea,
3284 (u64 __user *)reg->addr);
3293 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
3295 kvm_s390_vcpu_initial_reset(vcpu);
3299 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
3302 memcpy(&vcpu->run->s.regs.gprs, ®s->gprs, sizeof(regs->gprs));
3307 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
3310 memcpy(®s->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
3315 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
3316 struct kvm_sregs *sregs)
3320 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
3321 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
3327 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
3328 struct kvm_sregs *sregs)
3332 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
3333 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
3339 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
3345 if (test_fp_ctl(fpu->fpc)) {
3349 vcpu->run->s.regs.fpc = fpu->fpc;
3351 convert_fp_to_vx((__vector128 *) vcpu->run->s.regs.vrs,
3352 (freg_t *) fpu->fprs);
3354 memcpy(vcpu->run->s.regs.fprs, &fpu->fprs, sizeof(fpu->fprs));
3361 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
3365 /* make sure we have the latest values */
3368 convert_vx_to_fp((freg_t *) fpu->fprs,
3369 (__vector128 *) vcpu->run->s.regs.vrs);
3371 memcpy(fpu->fprs, vcpu->run->s.regs.fprs, sizeof(fpu->fprs));
3372 fpu->fpc = vcpu->run->s.regs.fpc;
3378 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
3382 if (!is_vcpu_stopped(vcpu))
3385 vcpu->run->psw_mask = psw.mask;
3386 vcpu->run->psw_addr = psw.addr;
3391 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
3392 struct kvm_translation *tr)
3394 return -EINVAL; /* not implemented yet */
3397 #define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
3398 KVM_GUESTDBG_USE_HW_BP | \
3399 KVM_GUESTDBG_ENABLE)
3401 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
3402 struct kvm_guest_debug *dbg)
3408 vcpu->guest_debug = 0;
3409 kvm_s390_clear_bp_data(vcpu);
3411 if (dbg->control & ~VALID_GUESTDBG_FLAGS) {
3415 if (!sclp.has_gpere) {
3420 if (dbg->control & KVM_GUESTDBG_ENABLE) {
3421 vcpu->guest_debug = dbg->control;
3422 /* enforce guest PER */
3423 kvm_s390_set_cpuflags(vcpu, CPUSTAT_P);
3425 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
3426 rc = kvm_s390_import_bp_data(vcpu, dbg);
3428 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
3429 vcpu->arch.guestdbg.last_bp = 0;
3433 vcpu->guest_debug = 0;
3434 kvm_s390_clear_bp_data(vcpu);
3435 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
3443 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
3444 struct kvm_mp_state *mp_state)
3450 /* CHECK_STOP and LOAD are not supported yet */
3451 ret = is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
3452 KVM_MP_STATE_OPERATING;
3458 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
3459 struct kvm_mp_state *mp_state)
3465 /* user space knows about this interface - let it control the state */
3466 vcpu->kvm->arch.user_cpu_state_ctrl = 1;
3468 switch (mp_state->mp_state) {
3469 case KVM_MP_STATE_STOPPED:
3470 kvm_s390_vcpu_stop(vcpu);
3472 case KVM_MP_STATE_OPERATING:
3473 kvm_s390_vcpu_start(vcpu);
3475 case KVM_MP_STATE_LOAD:
3476 case KVM_MP_STATE_CHECK_STOP:
3477 /* fall through - CHECK_STOP and LOAD are not supported yet */
3486 static bool ibs_enabled(struct kvm_vcpu *vcpu)
3488 return kvm_s390_test_cpuflags(vcpu, CPUSTAT_IBS);
3491 static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
3494 kvm_s390_vcpu_request_handled(vcpu);
3495 if (!kvm_request_pending(vcpu))
3498 * We use MMU_RELOAD just to re-arm the ipte notifier for the
3499 * guest prefix page. gmap_mprotect_notify will wait on the ptl lock.
3500 * This ensures that the ipte instruction for this request has
3501 * already finished. We might race against a second unmapper that
3502 * wants to set the blocking bit. Lets just retry the request loop.
3504 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
3506 rc = gmap_mprotect_notify(vcpu->arch.gmap,
3507 kvm_s390_get_prefix(vcpu),
3508 PAGE_SIZE * 2, PROT_WRITE);
3510 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
3516 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
3517 vcpu->arch.sie_block->ihcpu = 0xffff;
3521 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
3522 if (!ibs_enabled(vcpu)) {
3523 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
3524 kvm_s390_set_cpuflags(vcpu, CPUSTAT_IBS);
3529 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
3530 if (ibs_enabled(vcpu)) {
3531 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
3532 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_IBS);
3537 if (kvm_check_request(KVM_REQ_ICPT_OPEREXC, vcpu)) {
3538 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
3542 if (kvm_check_request(KVM_REQ_START_MIGRATION, vcpu)) {
3544 * Disable CMM virtualization; we will emulate the ESSA
3545 * instruction manually, in order to provide additional
3546 * functionalities needed for live migration.
3548 vcpu->arch.sie_block->ecb2 &= ~ECB2_CMMA;
3552 if (kvm_check_request(KVM_REQ_STOP_MIGRATION, vcpu)) {
3554 * Re-enable CMM virtualization if CMMA is available and
3555 * CMM has been used.
3557 if ((vcpu->kvm->arch.use_cmma) &&
3558 (vcpu->kvm->mm->context.uses_cmm))
3559 vcpu->arch.sie_block->ecb2 |= ECB2_CMMA;
3563 /* nothing to do, just clear the request */
3564 kvm_clear_request(KVM_REQ_UNHALT, vcpu);
3565 /* we left the vsie handler, nothing to do, just clear the request */
3566 kvm_clear_request(KVM_REQ_VSIE_RESTART, vcpu);
3571 void kvm_s390_set_tod_clock(struct kvm *kvm,
3572 const struct kvm_s390_vm_tod_clock *gtod)
3574 struct kvm_vcpu *vcpu;
3575 struct kvm_s390_tod_clock_ext htod;
3578 mutex_lock(&kvm->lock);
3581 get_tod_clock_ext((char *)&htod);
3583 kvm->arch.epoch = gtod->tod - htod.tod;
3585 if (test_kvm_facility(kvm, 139)) {
3586 kvm->arch.epdx = gtod->epoch_idx - htod.epoch_idx;
3587 if (kvm->arch.epoch > gtod->tod)
3588 kvm->arch.epdx -= 1;
3591 kvm_s390_vcpu_block_all(kvm);
3592 kvm_for_each_vcpu(i, vcpu, kvm) {
3593 vcpu->arch.sie_block->epoch = kvm->arch.epoch;
3594 vcpu->arch.sie_block->epdx = kvm->arch.epdx;
3597 kvm_s390_vcpu_unblock_all(kvm);
3599 mutex_unlock(&kvm->lock);
3603 * kvm_arch_fault_in_page - fault-in guest page if necessary
3604 * @vcpu: The corresponding virtual cpu
3605 * @gpa: Guest physical address
3606 * @writable: Whether the page should be writable or not
3608 * Make sure that a guest page has been faulted-in on the host.
3610 * Return: Zero on success, negative error code otherwise.
3612 long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
3614 return gmap_fault(vcpu->arch.gmap, gpa,
3615 writable ? FAULT_FLAG_WRITE : 0);
3618 static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
3619 unsigned long token)
3621 struct kvm_s390_interrupt inti;
3622 struct kvm_s390_irq irq;
3625 irq.u.ext.ext_params2 = token;
3626 irq.type = KVM_S390_INT_PFAULT_INIT;
3627 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
3629 inti.type = KVM_S390_INT_PFAULT_DONE;
3630 inti.parm64 = token;
3631 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
3635 void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
3636 struct kvm_async_pf *work)
3638 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
3639 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
3642 void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
3643 struct kvm_async_pf *work)
3645 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
3646 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
3649 void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
3650 struct kvm_async_pf *work)
3652 /* s390 will always inject the page directly */
3655 bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
3658 * s390 will always inject the page directly,
3659 * but we still want check_async_completion to cleanup
3664 static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
3667 struct kvm_arch_async_pf arch;
3670 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
3672 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
3673 vcpu->arch.pfault_compare)
3675 if (psw_extint_disabled(vcpu))
3677 if (kvm_s390_vcpu_has_irq(vcpu, 0))
3679 if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK))
3681 if (!vcpu->arch.gmap->pfault_enabled)
3684 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
3685 hva += current->thread.gmap_addr & ~PAGE_MASK;
3686 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
3689 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
3693 static int vcpu_pre_run(struct kvm_vcpu *vcpu)
3698 * On s390 notifications for arriving pages will be delivered directly
3699 * to the guest but the house keeping for completed pfaults is
3700 * handled outside the worker.
3702 kvm_check_async_pf_completion(vcpu);
3704 vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14];
3705 vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15];
3710 if (test_cpu_flag(CIF_MCCK_PENDING))
3713 if (!kvm_is_ucontrol(vcpu->kvm)) {
3714 rc = kvm_s390_deliver_pending_interrupts(vcpu);
3719 rc = kvm_s390_handle_requests(vcpu);
3723 if (guestdbg_enabled(vcpu)) {
3724 kvm_s390_backup_guest_per_regs(vcpu);
3725 kvm_s390_patch_guest_per_regs(vcpu);
3728 clear_bit(vcpu->vcpu_id, vcpu->kvm->arch.gisa_int.kicked_mask);
3730 vcpu->arch.sie_block->icptcode = 0;
3731 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
3732 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
3733 trace_kvm_s390_sie_enter(vcpu, cpuflags);
3738 static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
3740 struct kvm_s390_pgm_info pgm_info = {
3741 .code = PGM_ADDRESSING,
3746 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
3747 trace_kvm_s390_sie_fault(vcpu);
3750 * We want to inject an addressing exception, which is defined as a
3751 * suppressing or terminating exception. However, since we came here
3752 * by a DAT access exception, the PSW still points to the faulting
3753 * instruction since DAT exceptions are nullifying. So we've got
3754 * to look up the current opcode to get the length of the instruction
3755 * to be able to forward the PSW.
3757 rc = read_guest_instr(vcpu, vcpu->arch.sie_block->gpsw.addr, &opcode, 1);
3758 ilen = insn_length(opcode);
3762 /* Instruction-Fetching Exceptions - we can't detect the ilen.
3763 * Forward by arbitrary ilc, injection will take care of
3764 * nullification if necessary.
3766 pgm_info = vcpu->arch.pgm;
3769 pgm_info.flags = ilen | KVM_S390_PGM_FLAGS_ILC_VALID;
3770 kvm_s390_forward_psw(vcpu, ilen);
3771 return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
3774 static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
3776 struct mcck_volatile_info *mcck_info;
3777 struct sie_page *sie_page;
3779 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
3780 vcpu->arch.sie_block->icptcode);
3781 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
3783 if (guestdbg_enabled(vcpu))
3784 kvm_s390_restore_guest_per_regs(vcpu);
3786 vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14;
3787 vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15;
3789 if (exit_reason == -EINTR) {
3790 VCPU_EVENT(vcpu, 3, "%s", "machine check");
3791 sie_page = container_of(vcpu->arch.sie_block,
3792 struct sie_page, sie_block);
3793 mcck_info = &sie_page->mcck_info;
3794 kvm_s390_reinject_machine_check(vcpu, mcck_info);
3798 if (vcpu->arch.sie_block->icptcode > 0) {
3799 int rc = kvm_handle_sie_intercept(vcpu);
3801 if (rc != -EOPNOTSUPP)
3803 vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC;
3804 vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
3805 vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
3806 vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
3808 } else if (exit_reason != -EFAULT) {
3809 vcpu->stat.exit_null++;
3811 } else if (kvm_is_ucontrol(vcpu->kvm)) {
3812 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
3813 vcpu->run->s390_ucontrol.trans_exc_code =
3814 current->thread.gmap_addr;
3815 vcpu->run->s390_ucontrol.pgm_code = 0x10;
3817 } else if (current->thread.gmap_pfault) {
3818 trace_kvm_s390_major_guest_pfault(vcpu);
3819 current->thread.gmap_pfault = 0;
3820 if (kvm_arch_setup_async_pf(vcpu))
3822 return kvm_arch_fault_in_page(vcpu, current->thread.gmap_addr, 1);
3824 return vcpu_post_run_fault_in_sie(vcpu);
3827 static int __vcpu_run(struct kvm_vcpu *vcpu)
3829 int rc, exit_reason;
3832 * We try to hold kvm->srcu during most of vcpu_run (except when run-
3833 * ning the guest), so that memslots (and other stuff) are protected
3835 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
3838 rc = vcpu_pre_run(vcpu);
3842 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
3844 * As PF_VCPU will be used in fault handler, between
3845 * guest_enter and guest_exit should be no uaccess.
3847 local_irq_disable();
3848 guest_enter_irqoff();
3849 __disable_cpu_timer_accounting(vcpu);
3851 exit_reason = sie64a(vcpu->arch.sie_block,
3852 vcpu->run->s.regs.gprs);
3853 local_irq_disable();
3854 __enable_cpu_timer_accounting(vcpu);
3855 guest_exit_irqoff();
3857 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
3859 rc = vcpu_post_run(vcpu, exit_reason);
3860 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
3862 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
3866 static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3868 struct runtime_instr_cb *riccb;
3871 riccb = (struct runtime_instr_cb *) &kvm_run->s.regs.riccb;
3872 gscb = (struct gs_cb *) &kvm_run->s.regs.gscb;
3873 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
3874 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
3875 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
3876 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
3877 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
3878 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
3879 /* some control register changes require a tlb flush */
3880 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
3882 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
3883 kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm);
3884 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
3885 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
3886 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
3887 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
3889 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
3890 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
3891 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
3892 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
3893 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
3894 kvm_clear_async_pf_completion_queue(vcpu);
3897 * If userspace sets the riccb (e.g. after migration) to a valid state,
3898 * we should enable RI here instead of doing the lazy enablement.
3900 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_RICCB) &&
3901 test_kvm_facility(vcpu->kvm, 64) &&
3903 !(vcpu->arch.sie_block->ecb3 & ECB3_RI)) {
3904 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (sync_regs)");
3905 vcpu->arch.sie_block->ecb3 |= ECB3_RI;
3908 * If userspace sets the gscb (e.g. after migration) to non-zero,
3909 * we should enable GS here instead of doing the lazy enablement.
3911 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_GSCB) &&
3912 test_kvm_facility(vcpu->kvm, 133) &&
3914 !vcpu->arch.gs_enabled) {
3915 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (sync_regs)");
3916 vcpu->arch.sie_block->ecb |= ECB_GS;
3917 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
3918 vcpu->arch.gs_enabled = 1;
3920 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_BPBC) &&
3921 test_kvm_facility(vcpu->kvm, 82)) {
3922 vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
3923 vcpu->arch.sie_block->fpf |= kvm_run->s.regs.bpbc ? FPF_BPBC : 0;
3925 save_access_regs(vcpu->arch.host_acrs);
3926 restore_access_regs(vcpu->run->s.regs.acrs);
3927 /* save host (userspace) fprs/vrs */
3929 vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
3930 vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
3932 current->thread.fpu.regs = vcpu->run->s.regs.vrs;
3934 current->thread.fpu.regs = vcpu->run->s.regs.fprs;
3935 current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
3936 if (test_fp_ctl(current->thread.fpu.fpc))
3937 /* User space provided an invalid FPC, let's clear it */
3938 current->thread.fpu.fpc = 0;
3939 if (MACHINE_HAS_GS) {
3941 __ctl_set_bit(2, 4);
3942 if (current->thread.gs_cb) {
3943 vcpu->arch.host_gscb = current->thread.gs_cb;
3944 save_gs_cb(vcpu->arch.host_gscb);
3946 if (vcpu->arch.gs_enabled) {
3947 current->thread.gs_cb = (struct gs_cb *)
3948 &vcpu->run->s.regs.gscb;
3949 restore_gs_cb(current->thread.gs_cb);
3953 /* SIE will load etoken directly from SDNX and therefore kvm_run */
3955 kvm_run->kvm_dirty_regs = 0;
3958 static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3960 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
3961 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
3962 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
3963 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
3964 kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu);
3965 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
3966 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
3967 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
3968 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
3969 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
3970 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
3971 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
3972 kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC;
3973 save_access_regs(vcpu->run->s.regs.acrs);
3974 restore_access_regs(vcpu->arch.host_acrs);
3975 /* Save guest register state */
3977 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
3978 /* Restore will be done lazily at return */
3979 current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
3980 current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
3981 if (MACHINE_HAS_GS) {
3982 __ctl_set_bit(2, 4);
3983 if (vcpu->arch.gs_enabled)
3984 save_gs_cb(current->thread.gs_cb);
3986 current->thread.gs_cb = vcpu->arch.host_gscb;
3987 restore_gs_cb(vcpu->arch.host_gscb);
3989 if (!vcpu->arch.host_gscb)
3990 __ctl_clear_bit(2, 4);
3991 vcpu->arch.host_gscb = NULL;
3993 /* SIE will save etoken directly into SDNX and therefore kvm_run */
3996 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
4000 if (kvm_run->immediate_exit)
4005 if (guestdbg_exit_pending(vcpu)) {
4006 kvm_s390_prepare_debug_exit(vcpu);
4011 kvm_sigset_activate(vcpu);
4013 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
4014 kvm_s390_vcpu_start(vcpu);
4015 } else if (is_vcpu_stopped(vcpu)) {
4016 pr_err_ratelimited("can't run stopped vcpu %d\n",
4022 sync_regs(vcpu, kvm_run);
4023 enable_cpu_timer_accounting(vcpu);
4026 rc = __vcpu_run(vcpu);
4028 if (signal_pending(current) && !rc) {
4029 kvm_run->exit_reason = KVM_EXIT_INTR;
4033 if (guestdbg_exit_pending(vcpu) && !rc) {
4034 kvm_s390_prepare_debug_exit(vcpu);
4038 if (rc == -EREMOTE) {
4039 /* userspace support is needed, kvm_run has been prepared */
4043 disable_cpu_timer_accounting(vcpu);
4044 store_regs(vcpu, kvm_run);
4046 kvm_sigset_deactivate(vcpu);
4048 vcpu->stat.exit_userspace++;
4055 * store status at address
4056 * we use have two special cases:
4057 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
4058 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
4060 int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
4062 unsigned char archmode = 1;
4063 freg_t fprs[NUM_FPRS];
4068 px = kvm_s390_get_prefix(vcpu);
4069 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
4070 if (write_guest_abs(vcpu, 163, &archmode, 1))
4073 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
4074 if (write_guest_real(vcpu, 163, &archmode, 1))
4078 gpa -= __LC_FPREGS_SAVE_AREA;
4080 /* manually convert vector registers if necessary */
4081 if (MACHINE_HAS_VX) {
4082 convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
4083 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
4086 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
4087 vcpu->run->s.regs.fprs, 128);
4089 rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
4090 vcpu->run->s.regs.gprs, 128);
4091 rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
4092 &vcpu->arch.sie_block->gpsw, 16);
4093 rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
4095 rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
4096 &vcpu->run->s.regs.fpc, 4);
4097 rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
4098 &vcpu->arch.sie_block->todpr, 4);
4099 cputm = kvm_s390_get_cpu_timer(vcpu);
4100 rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
4102 clkcomp = vcpu->arch.sie_block->ckc >> 8;
4103 rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
4105 rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
4106 &vcpu->run->s.regs.acrs, 64);
4107 rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
4108 &vcpu->arch.sie_block->gcr, 128);
4109 return rc ? -EFAULT : 0;
4112 int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
4115 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
4116 * switch in the run ioctl. Let's update our copies before we save
4117 * it into the save area
4120 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
4121 save_access_regs(vcpu->run->s.regs.acrs);
4123 return kvm_s390_store_status_unloaded(vcpu, addr);
4126 static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
4128 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
4129 kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
4132 static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
4135 struct kvm_vcpu *vcpu;
4137 kvm_for_each_vcpu(i, vcpu, kvm) {
4138 __disable_ibs_on_vcpu(vcpu);
4142 static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
4146 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
4147 kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
4150 void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
4152 int i, online_vcpus, started_vcpus = 0;
4154 if (!is_vcpu_stopped(vcpu))
4157 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
4158 /* Only one cpu at a time may enter/leave the STOPPED state. */
4159 spin_lock(&vcpu->kvm->arch.start_stop_lock);
4160 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
4162 for (i = 0; i < online_vcpus; i++) {
4163 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
4167 if (started_vcpus == 0) {
4168 /* we're the only active VCPU -> speed it up */
4169 __enable_ibs_on_vcpu(vcpu);
4170 } else if (started_vcpus == 1) {
4172 * As we are starting a second VCPU, we have to disable
4173 * the IBS facility on all VCPUs to remove potentially
4174 * oustanding ENABLE requests.
4176 __disable_ibs_on_all_vcpus(vcpu->kvm);
4179 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_STOPPED);
4181 * Another VCPU might have used IBS while we were offline.
4182 * Let's play safe and flush the VCPU at startup.
4184 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
4185 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
4189 void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
4191 int i, online_vcpus, started_vcpus = 0;
4192 struct kvm_vcpu *started_vcpu = NULL;
4194 if (is_vcpu_stopped(vcpu))
4197 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
4198 /* Only one cpu at a time may enter/leave the STOPPED state. */
4199 spin_lock(&vcpu->kvm->arch.start_stop_lock);
4200 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
4202 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
4203 kvm_s390_clear_stop_irq(vcpu);
4205 kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOPPED);
4206 __disable_ibs_on_vcpu(vcpu);
4208 for (i = 0; i < online_vcpus; i++) {
4209 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
4211 started_vcpu = vcpu->kvm->vcpus[i];
4215 if (started_vcpus == 1) {
4217 * As we only have one VCPU left, we want to enable the
4218 * IBS facility for that VCPU to speed it up.
4220 __enable_ibs_on_vcpu(started_vcpu);
4223 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
4227 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
4228 struct kvm_enable_cap *cap)
4236 case KVM_CAP_S390_CSS_SUPPORT:
4237 if (!vcpu->kvm->arch.css_support) {
4238 vcpu->kvm->arch.css_support = 1;
4239 VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
4240 trace_kvm_s390_enable_css(vcpu->kvm);
4251 static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
4252 struct kvm_s390_mem_op *mop)
4254 void __user *uaddr = (void __user *)mop->buf;
4255 void *tmpbuf = NULL;
4257 const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
4258 | KVM_S390_MEMOP_F_CHECK_ONLY;
4260 if (mop->flags & ~supported_flags)
4263 if (mop->size > MEM_OP_MAX_SIZE)
4266 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
4267 tmpbuf = vmalloc(mop->size);
4272 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
4275 case KVM_S390_MEMOP_LOGICAL_READ:
4276 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
4277 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
4278 mop->size, GACC_FETCH);
4281 r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
4283 if (copy_to_user(uaddr, tmpbuf, mop->size))
4287 case KVM_S390_MEMOP_LOGICAL_WRITE:
4288 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
4289 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
4290 mop->size, GACC_STORE);
4293 if (copy_from_user(tmpbuf, uaddr, mop->size)) {
4297 r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
4303 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
4305 if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
4306 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
4312 long kvm_arch_vcpu_async_ioctl(struct file *filp,
4313 unsigned int ioctl, unsigned long arg)
4315 struct kvm_vcpu *vcpu = filp->private_data;
4316 void __user *argp = (void __user *)arg;
4319 case KVM_S390_IRQ: {
4320 struct kvm_s390_irq s390irq;
4322 if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
4324 return kvm_s390_inject_vcpu(vcpu, &s390irq);
4326 case KVM_S390_INTERRUPT: {
4327 struct kvm_s390_interrupt s390int;
4328 struct kvm_s390_irq s390irq;
4330 if (copy_from_user(&s390int, argp, sizeof(s390int)))
4332 if (s390int_to_s390irq(&s390int, &s390irq))
4334 return kvm_s390_inject_vcpu(vcpu, &s390irq);
4337 return -ENOIOCTLCMD;
4340 long kvm_arch_vcpu_ioctl(struct file *filp,
4341 unsigned int ioctl, unsigned long arg)
4343 struct kvm_vcpu *vcpu = filp->private_data;
4344 void __user *argp = (void __user *)arg;
4351 case KVM_S390_STORE_STATUS:
4352 idx = srcu_read_lock(&vcpu->kvm->srcu);
4353 r = kvm_s390_vcpu_store_status(vcpu, arg);
4354 srcu_read_unlock(&vcpu->kvm->srcu, idx);
4356 case KVM_S390_SET_INITIAL_PSW: {
4360 if (copy_from_user(&psw, argp, sizeof(psw)))
4362 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
4365 case KVM_S390_INITIAL_RESET:
4366 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
4368 case KVM_SET_ONE_REG:
4369 case KVM_GET_ONE_REG: {
4370 struct kvm_one_reg reg;
4372 if (copy_from_user(®, argp, sizeof(reg)))
4374 if (ioctl == KVM_SET_ONE_REG)
4375 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, ®);
4377 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, ®);
4380 #ifdef CONFIG_KVM_S390_UCONTROL
4381 case KVM_S390_UCAS_MAP: {
4382 struct kvm_s390_ucas_mapping ucasmap;
4384 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
4389 if (!kvm_is_ucontrol(vcpu->kvm)) {
4394 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
4395 ucasmap.vcpu_addr, ucasmap.length);
4398 case KVM_S390_UCAS_UNMAP: {
4399 struct kvm_s390_ucas_mapping ucasmap;
4401 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
4406 if (!kvm_is_ucontrol(vcpu->kvm)) {
4411 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
4416 case KVM_S390_VCPU_FAULT: {
4417 r = gmap_fault(vcpu->arch.gmap, arg, 0);
4420 case KVM_ENABLE_CAP:
4422 struct kvm_enable_cap cap;
4424 if (copy_from_user(&cap, argp, sizeof(cap)))
4426 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
4429 case KVM_S390_MEM_OP: {
4430 struct kvm_s390_mem_op mem_op;
4432 if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
4433 r = kvm_s390_guest_mem_op(vcpu, &mem_op);
4438 case KVM_S390_SET_IRQ_STATE: {
4439 struct kvm_s390_irq_state irq_state;
4442 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
4444 if (irq_state.len > VCPU_IRQS_MAX_BUF ||
4445 irq_state.len == 0 ||
4446 irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
4450 /* do not use irq_state.flags, it will break old QEMUs */
4451 r = kvm_s390_set_irq_state(vcpu,
4452 (void __user *) irq_state.buf,
4456 case KVM_S390_GET_IRQ_STATE: {
4457 struct kvm_s390_irq_state irq_state;
4460 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
4462 if (irq_state.len == 0) {
4466 /* do not use irq_state.flags, it will break old QEMUs */
4467 r = kvm_s390_get_irq_state(vcpu,
4468 (__u8 __user *) irq_state.buf,
4480 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
4482 #ifdef CONFIG_KVM_S390_UCONTROL
4483 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
4484 && (kvm_is_ucontrol(vcpu->kvm))) {
4485 vmf->page = virt_to_page(vcpu->arch.sie_block);
4486 get_page(vmf->page);
4490 return VM_FAULT_SIGBUS;
4493 int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
4494 unsigned long npages)
4499 /* Section: memory related */
4500 int kvm_arch_prepare_memory_region(struct kvm *kvm,
4501 struct kvm_memory_slot *memslot,
4502 const struct kvm_userspace_memory_region *mem,
4503 enum kvm_mr_change change)
4505 /* A few sanity checks. We can have memory slots which have to be
4506 located/ended at a segment boundary (1MB). The memory in userland is
4507 ok to be fragmented into various different vmas. It is okay to mmap()
4508 and munmap() stuff in this slot after doing this call at any time */
4510 if (mem->userspace_addr & 0xffffful)
4513 if (mem->memory_size & 0xffffful)
4516 if (mem->guest_phys_addr + mem->memory_size > kvm->arch.mem_limit)
4522 void kvm_arch_commit_memory_region(struct kvm *kvm,
4523 const struct kvm_userspace_memory_region *mem,
4524 const struct kvm_memory_slot *old,
4525 const struct kvm_memory_slot *new,
4526 enum kvm_mr_change change)
4532 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
4533 old->npages * PAGE_SIZE);
4536 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
4537 old->npages * PAGE_SIZE);
4542 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
4543 mem->guest_phys_addr, mem->memory_size);
4545 case KVM_MR_FLAGS_ONLY:
4548 WARN(1, "Unknown KVM MR CHANGE: %d\n", change);
4551 pr_warn("failed to commit memory region\n");
4555 static inline unsigned long nonhyp_mask(int i)
4557 unsigned int nonhyp_fai = (sclp.hmfai << i * 2) >> 30;
4559 return 0x0000ffffffffffffUL >> (nonhyp_fai << 4);
4562 void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu)
4564 vcpu->valid_wakeup = false;
4567 static int __init kvm_s390_init(void)
4571 if (!sclp.has_sief2) {
4572 pr_info("SIE is not available\n");
4576 if (nested && hpage) {
4577 pr_info("A KVM host that supports nesting cannot back its KVM guests with huge pages\n");
4581 for (i = 0; i < 16; i++)
4582 kvm_s390_fac_base[i] |=
4583 S390_lowcore.stfle_fac_list[i] & nonhyp_mask(i);
4585 return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
4588 static void __exit kvm_s390_exit(void)
4593 module_init(kvm_s390_init);
4594 module_exit(kvm_s390_exit);
4597 * Enable autoloading of the kvm module.
4598 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
4599 * since x86 takes a different approach.
4601 #include <linux/miscdevice.h>
4602 MODULE_ALIAS_MISCDEV(KVM_MINOR);
4603 MODULE_ALIAS("devname:kvm");