Merge tag 'kvm-arm-fixes-4.0-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git...
[sfrench/cifs-2.6.git] / arch / s390 / kvm / kvm-s390.c
1 /*
2  * hosting zSeries kernel virtual machines
3  *
4  * Copyright IBM Corp. 2008, 2009
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License (version 2 only)
8  * as published by the Free Software Foundation.
9  *
10  *    Author(s): Carsten Otte <cotte@de.ibm.com>
11  *               Christian Borntraeger <borntraeger@de.ibm.com>
12  *               Heiko Carstens <heiko.carstens@de.ibm.com>
13  *               Christian Ehrhardt <ehrhardt@de.ibm.com>
14  *               Jason J. Herne <jjherne@us.ibm.com>
15  */
16
17 #include <linux/compiler.h>
18 #include <linux/err.h>
19 #include <linux/fs.h>
20 #include <linux/hrtimer.h>
21 #include <linux/init.h>
22 #include <linux/kvm.h>
23 #include <linux/kvm_host.h>
24 #include <linux/module.h>
25 #include <linux/random.h>
26 #include <linux/slab.h>
27 #include <linux/timer.h>
28 #include <linux/vmalloc.h>
29 #include <asm/asm-offsets.h>
30 #include <asm/lowcore.h>
31 #include <asm/pgtable.h>
32 #include <asm/nmi.h>
33 #include <asm/switch_to.h>
34 #include <asm/sclp.h>
35 #include "kvm-s390.h"
36 #include "gaccess.h"
37
38 #define CREATE_TRACE_POINTS
39 #include "trace.h"
40 #include "trace-s390.h"
41
42 #define MEM_OP_MAX_SIZE 65536   /* Maximum transfer size for KVM_S390_MEM_OP */
43
44 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
45
46 struct kvm_stats_debugfs_item debugfs_entries[] = {
47         { "userspace_handled", VCPU_STAT(exit_userspace) },
48         { "exit_null", VCPU_STAT(exit_null) },
49         { "exit_validity", VCPU_STAT(exit_validity) },
50         { "exit_stop_request", VCPU_STAT(exit_stop_request) },
51         { "exit_external_request", VCPU_STAT(exit_external_request) },
52         { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
53         { "exit_instruction", VCPU_STAT(exit_instruction) },
54         { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
55         { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
56         { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
57         { "halt_wakeup", VCPU_STAT(halt_wakeup) },
58         { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
59         { "instruction_lctl", VCPU_STAT(instruction_lctl) },
60         { "instruction_stctl", VCPU_STAT(instruction_stctl) },
61         { "instruction_stctg", VCPU_STAT(instruction_stctg) },
62         { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
63         { "deliver_external_call", VCPU_STAT(deliver_external_call) },
64         { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
65         { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
66         { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
67         { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
68         { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
69         { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
70         { "exit_wait_state", VCPU_STAT(exit_wait_state) },
71         { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
72         { "instruction_stidp", VCPU_STAT(instruction_stidp) },
73         { "instruction_spx", VCPU_STAT(instruction_spx) },
74         { "instruction_stpx", VCPU_STAT(instruction_stpx) },
75         { "instruction_stap", VCPU_STAT(instruction_stap) },
76         { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
77         { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
78         { "instruction_stsch", VCPU_STAT(instruction_stsch) },
79         { "instruction_chsc", VCPU_STAT(instruction_chsc) },
80         { "instruction_essa", VCPU_STAT(instruction_essa) },
81         { "instruction_stsi", VCPU_STAT(instruction_stsi) },
82         { "instruction_stfl", VCPU_STAT(instruction_stfl) },
83         { "instruction_tprot", VCPU_STAT(instruction_tprot) },
84         { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
85         { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
86         { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
87         { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
88         { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
89         { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
90         { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
91         { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
92         { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
93         { "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) },
94         { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
95         { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
96         { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
97         { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
98         { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
99         { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
100         { "diagnose_10", VCPU_STAT(diagnose_10) },
101         { "diagnose_44", VCPU_STAT(diagnose_44) },
102         { "diagnose_9c", VCPU_STAT(diagnose_9c) },
103         { NULL }
104 };
105
106 /* upper facilities limit for kvm */
107 unsigned long kvm_s390_fac_list_mask[] = {
108         0xff82fffbf4fc2000UL,
109         0x005c000000000000UL,
110 };
111
112 unsigned long kvm_s390_fac_list_mask_size(void)
113 {
114         BUILD_BUG_ON(ARRAY_SIZE(kvm_s390_fac_list_mask) > S390_ARCH_FAC_MASK_SIZE_U64);
115         return ARRAY_SIZE(kvm_s390_fac_list_mask);
116 }
117
118 static struct gmap_notifier gmap_notifier;
119
120 /* Section: not file related */
121 int kvm_arch_hardware_enable(void)
122 {
123         /* every s390 is virtualization enabled ;-) */
124         return 0;
125 }
126
127 static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
128
129 int kvm_arch_hardware_setup(void)
130 {
131         gmap_notifier.notifier_call = kvm_gmap_notifier;
132         gmap_register_ipte_notifier(&gmap_notifier);
133         return 0;
134 }
135
136 void kvm_arch_hardware_unsetup(void)
137 {
138         gmap_unregister_ipte_notifier(&gmap_notifier);
139 }
140
141 int kvm_arch_init(void *opaque)
142 {
143         /* Register floating interrupt controller interface. */
144         return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
145 }
146
147 /* Section: device related */
148 long kvm_arch_dev_ioctl(struct file *filp,
149                         unsigned int ioctl, unsigned long arg)
150 {
151         if (ioctl == KVM_S390_ENABLE_SIE)
152                 return s390_enable_sie();
153         return -EINVAL;
154 }
155
156 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
157 {
158         int r;
159
160         switch (ext) {
161         case KVM_CAP_S390_PSW:
162         case KVM_CAP_S390_GMAP:
163         case KVM_CAP_SYNC_MMU:
164 #ifdef CONFIG_KVM_S390_UCONTROL
165         case KVM_CAP_S390_UCONTROL:
166 #endif
167         case KVM_CAP_ASYNC_PF:
168         case KVM_CAP_SYNC_REGS:
169         case KVM_CAP_ONE_REG:
170         case KVM_CAP_ENABLE_CAP:
171         case KVM_CAP_S390_CSS_SUPPORT:
172         case KVM_CAP_IRQFD:
173         case KVM_CAP_IOEVENTFD:
174         case KVM_CAP_DEVICE_CTRL:
175         case KVM_CAP_ENABLE_CAP_VM:
176         case KVM_CAP_S390_IRQCHIP:
177         case KVM_CAP_VM_ATTRIBUTES:
178         case KVM_CAP_MP_STATE:
179         case KVM_CAP_S390_USER_SIGP:
180         case KVM_CAP_S390_USER_STSI:
181         case KVM_CAP_S390_SKEYS:
182                 r = 1;
183                 break;
184         case KVM_CAP_S390_MEM_OP:
185                 r = MEM_OP_MAX_SIZE;
186                 break;
187         case KVM_CAP_NR_VCPUS:
188         case KVM_CAP_MAX_VCPUS:
189                 r = KVM_MAX_VCPUS;
190                 break;
191         case KVM_CAP_NR_MEMSLOTS:
192                 r = KVM_USER_MEM_SLOTS;
193                 break;
194         case KVM_CAP_S390_COW:
195                 r = MACHINE_HAS_ESOP;
196                 break;
197         case KVM_CAP_S390_VECTOR_REGISTERS:
198                 r = MACHINE_HAS_VX;
199                 break;
200         default:
201                 r = 0;
202         }
203         return r;
204 }
205
206 static void kvm_s390_sync_dirty_log(struct kvm *kvm,
207                                         struct kvm_memory_slot *memslot)
208 {
209         gfn_t cur_gfn, last_gfn;
210         unsigned long address;
211         struct gmap *gmap = kvm->arch.gmap;
212
213         down_read(&gmap->mm->mmap_sem);
214         /* Loop over all guest pages */
215         last_gfn = memslot->base_gfn + memslot->npages;
216         for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
217                 address = gfn_to_hva_memslot(memslot, cur_gfn);
218
219                 if (gmap_test_and_clear_dirty(address, gmap))
220                         mark_page_dirty(kvm, cur_gfn);
221         }
222         up_read(&gmap->mm->mmap_sem);
223 }
224
225 /* Section: vm related */
226 /*
227  * Get (and clear) the dirty memory log for a memory slot.
228  */
229 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
230                                struct kvm_dirty_log *log)
231 {
232         int r;
233         unsigned long n;
234         struct kvm_memory_slot *memslot;
235         int is_dirty = 0;
236
237         mutex_lock(&kvm->slots_lock);
238
239         r = -EINVAL;
240         if (log->slot >= KVM_USER_MEM_SLOTS)
241                 goto out;
242
243         memslot = id_to_memslot(kvm->memslots, log->slot);
244         r = -ENOENT;
245         if (!memslot->dirty_bitmap)
246                 goto out;
247
248         kvm_s390_sync_dirty_log(kvm, memslot);
249         r = kvm_get_dirty_log(kvm, log, &is_dirty);
250         if (r)
251                 goto out;
252
253         /* Clear the dirty log */
254         if (is_dirty) {
255                 n = kvm_dirty_bitmap_bytes(memslot);
256                 memset(memslot->dirty_bitmap, 0, n);
257         }
258         r = 0;
259 out:
260         mutex_unlock(&kvm->slots_lock);
261         return r;
262 }
263
264 static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
265 {
266         int r;
267
268         if (cap->flags)
269                 return -EINVAL;
270
271         switch (cap->cap) {
272         case KVM_CAP_S390_IRQCHIP:
273                 kvm->arch.use_irqchip = 1;
274                 r = 0;
275                 break;
276         case KVM_CAP_S390_USER_SIGP:
277                 kvm->arch.user_sigp = 1;
278                 r = 0;
279                 break;
280         case KVM_CAP_S390_VECTOR_REGISTERS:
281                 if (MACHINE_HAS_VX) {
282                         set_kvm_facility(kvm->arch.model.fac->mask, 129);
283                         set_kvm_facility(kvm->arch.model.fac->list, 129);
284                         r = 0;
285                 } else
286                         r = -EINVAL;
287                 break;
288         case KVM_CAP_S390_USER_STSI:
289                 kvm->arch.user_stsi = 1;
290                 r = 0;
291                 break;
292         default:
293                 r = -EINVAL;
294                 break;
295         }
296         return r;
297 }
298
299 static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
300 {
301         int ret;
302
303         switch (attr->attr) {
304         case KVM_S390_VM_MEM_LIMIT_SIZE:
305                 ret = 0;
306                 if (put_user(kvm->arch.gmap->asce_end, (u64 __user *)attr->addr))
307                         ret = -EFAULT;
308                 break;
309         default:
310                 ret = -ENXIO;
311                 break;
312         }
313         return ret;
314 }
315
316 static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
317 {
318         int ret;
319         unsigned int idx;
320         switch (attr->attr) {
321         case KVM_S390_VM_MEM_ENABLE_CMMA:
322                 ret = -EBUSY;
323                 mutex_lock(&kvm->lock);
324                 if (atomic_read(&kvm->online_vcpus) == 0) {
325                         kvm->arch.use_cmma = 1;
326                         ret = 0;
327                 }
328                 mutex_unlock(&kvm->lock);
329                 break;
330         case KVM_S390_VM_MEM_CLR_CMMA:
331                 mutex_lock(&kvm->lock);
332                 idx = srcu_read_lock(&kvm->srcu);
333                 s390_reset_cmma(kvm->arch.gmap->mm);
334                 srcu_read_unlock(&kvm->srcu, idx);
335                 mutex_unlock(&kvm->lock);
336                 ret = 0;
337                 break;
338         case KVM_S390_VM_MEM_LIMIT_SIZE: {
339                 unsigned long new_limit;
340
341                 if (kvm_is_ucontrol(kvm))
342                         return -EINVAL;
343
344                 if (get_user(new_limit, (u64 __user *)attr->addr))
345                         return -EFAULT;
346
347                 if (new_limit > kvm->arch.gmap->asce_end)
348                         return -E2BIG;
349
350                 ret = -EBUSY;
351                 mutex_lock(&kvm->lock);
352                 if (atomic_read(&kvm->online_vcpus) == 0) {
353                         /* gmap_alloc will round the limit up */
354                         struct gmap *new = gmap_alloc(current->mm, new_limit);
355
356                         if (!new) {
357                                 ret = -ENOMEM;
358                         } else {
359                                 gmap_free(kvm->arch.gmap);
360                                 new->private = kvm;
361                                 kvm->arch.gmap = new;
362                                 ret = 0;
363                         }
364                 }
365                 mutex_unlock(&kvm->lock);
366                 break;
367         }
368         default:
369                 ret = -ENXIO;
370                 break;
371         }
372         return ret;
373 }
374
375 static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
376
377 static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
378 {
379         struct kvm_vcpu *vcpu;
380         int i;
381
382         if (!test_kvm_facility(kvm, 76))
383                 return -EINVAL;
384
385         mutex_lock(&kvm->lock);
386         switch (attr->attr) {
387         case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
388                 get_random_bytes(
389                         kvm->arch.crypto.crycb->aes_wrapping_key_mask,
390                         sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
391                 kvm->arch.crypto.aes_kw = 1;
392                 break;
393         case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
394                 get_random_bytes(
395                         kvm->arch.crypto.crycb->dea_wrapping_key_mask,
396                         sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
397                 kvm->arch.crypto.dea_kw = 1;
398                 break;
399         case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
400                 kvm->arch.crypto.aes_kw = 0;
401                 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
402                         sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
403                 break;
404         case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
405                 kvm->arch.crypto.dea_kw = 0;
406                 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
407                         sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
408                 break;
409         default:
410                 mutex_unlock(&kvm->lock);
411                 return -ENXIO;
412         }
413
414         kvm_for_each_vcpu(i, vcpu, kvm) {
415                 kvm_s390_vcpu_crypto_setup(vcpu);
416                 exit_sie(vcpu);
417         }
418         mutex_unlock(&kvm->lock);
419         return 0;
420 }
421
422 static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
423 {
424         u8 gtod_high;
425
426         if (copy_from_user(&gtod_high, (void __user *)attr->addr,
427                                            sizeof(gtod_high)))
428                 return -EFAULT;
429
430         if (gtod_high != 0)
431                 return -EINVAL;
432
433         return 0;
434 }
435
436 static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
437 {
438         struct kvm_vcpu *cur_vcpu;
439         unsigned int vcpu_idx;
440         u64 host_tod, gtod;
441         int r;
442
443         if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
444                 return -EFAULT;
445
446         r = store_tod_clock(&host_tod);
447         if (r)
448                 return r;
449
450         mutex_lock(&kvm->lock);
451         kvm->arch.epoch = gtod - host_tod;
452         kvm_for_each_vcpu(vcpu_idx, cur_vcpu, kvm) {
453                 cur_vcpu->arch.sie_block->epoch = kvm->arch.epoch;
454                 exit_sie(cur_vcpu);
455         }
456         mutex_unlock(&kvm->lock);
457         return 0;
458 }
459
460 static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
461 {
462         int ret;
463
464         if (attr->flags)
465                 return -EINVAL;
466
467         switch (attr->attr) {
468         case KVM_S390_VM_TOD_HIGH:
469                 ret = kvm_s390_set_tod_high(kvm, attr);
470                 break;
471         case KVM_S390_VM_TOD_LOW:
472                 ret = kvm_s390_set_tod_low(kvm, attr);
473                 break;
474         default:
475                 ret = -ENXIO;
476                 break;
477         }
478         return ret;
479 }
480
481 static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
482 {
483         u8 gtod_high = 0;
484
485         if (copy_to_user((void __user *)attr->addr, &gtod_high,
486                                          sizeof(gtod_high)))
487                 return -EFAULT;
488
489         return 0;
490 }
491
492 static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
493 {
494         u64 host_tod, gtod;
495         int r;
496
497         r = store_tod_clock(&host_tod);
498         if (r)
499                 return r;
500
501         gtod = host_tod + kvm->arch.epoch;
502         if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
503                 return -EFAULT;
504
505         return 0;
506 }
507
508 static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
509 {
510         int ret;
511
512         if (attr->flags)
513                 return -EINVAL;
514
515         switch (attr->attr) {
516         case KVM_S390_VM_TOD_HIGH:
517                 ret = kvm_s390_get_tod_high(kvm, attr);
518                 break;
519         case KVM_S390_VM_TOD_LOW:
520                 ret = kvm_s390_get_tod_low(kvm, attr);
521                 break;
522         default:
523                 ret = -ENXIO;
524                 break;
525         }
526         return ret;
527 }
528
529 static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
530 {
531         struct kvm_s390_vm_cpu_processor *proc;
532         int ret = 0;
533
534         mutex_lock(&kvm->lock);
535         if (atomic_read(&kvm->online_vcpus)) {
536                 ret = -EBUSY;
537                 goto out;
538         }
539         proc = kzalloc(sizeof(*proc), GFP_KERNEL);
540         if (!proc) {
541                 ret = -ENOMEM;
542                 goto out;
543         }
544         if (!copy_from_user(proc, (void __user *)attr->addr,
545                             sizeof(*proc))) {
546                 memcpy(&kvm->arch.model.cpu_id, &proc->cpuid,
547                        sizeof(struct cpuid));
548                 kvm->arch.model.ibc = proc->ibc;
549                 memcpy(kvm->arch.model.fac->list, proc->fac_list,
550                        S390_ARCH_FAC_LIST_SIZE_BYTE);
551         } else
552                 ret = -EFAULT;
553         kfree(proc);
554 out:
555         mutex_unlock(&kvm->lock);
556         return ret;
557 }
558
559 static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
560 {
561         int ret = -ENXIO;
562
563         switch (attr->attr) {
564         case KVM_S390_VM_CPU_PROCESSOR:
565                 ret = kvm_s390_set_processor(kvm, attr);
566                 break;
567         }
568         return ret;
569 }
570
571 static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
572 {
573         struct kvm_s390_vm_cpu_processor *proc;
574         int ret = 0;
575
576         proc = kzalloc(sizeof(*proc), GFP_KERNEL);
577         if (!proc) {
578                 ret = -ENOMEM;
579                 goto out;
580         }
581         memcpy(&proc->cpuid, &kvm->arch.model.cpu_id, sizeof(struct cpuid));
582         proc->ibc = kvm->arch.model.ibc;
583         memcpy(&proc->fac_list, kvm->arch.model.fac->list, S390_ARCH_FAC_LIST_SIZE_BYTE);
584         if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
585                 ret = -EFAULT;
586         kfree(proc);
587 out:
588         return ret;
589 }
590
591 static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
592 {
593         struct kvm_s390_vm_cpu_machine *mach;
594         int ret = 0;
595
596         mach = kzalloc(sizeof(*mach), GFP_KERNEL);
597         if (!mach) {
598                 ret = -ENOMEM;
599                 goto out;
600         }
601         get_cpu_id((struct cpuid *) &mach->cpuid);
602         mach->ibc = sclp_get_ibc();
603         memcpy(&mach->fac_mask, kvm->arch.model.fac->mask,
604                S390_ARCH_FAC_LIST_SIZE_BYTE);
605         memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
606                S390_ARCH_FAC_LIST_SIZE_BYTE);
607         if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
608                 ret = -EFAULT;
609         kfree(mach);
610 out:
611         return ret;
612 }
613
614 static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
615 {
616         int ret = -ENXIO;
617
618         switch (attr->attr) {
619         case KVM_S390_VM_CPU_PROCESSOR:
620                 ret = kvm_s390_get_processor(kvm, attr);
621                 break;
622         case KVM_S390_VM_CPU_MACHINE:
623                 ret = kvm_s390_get_machine(kvm, attr);
624                 break;
625         }
626         return ret;
627 }
628
629 static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
630 {
631         int ret;
632
633         switch (attr->group) {
634         case KVM_S390_VM_MEM_CTRL:
635                 ret = kvm_s390_set_mem_control(kvm, attr);
636                 break;
637         case KVM_S390_VM_TOD:
638                 ret = kvm_s390_set_tod(kvm, attr);
639                 break;
640         case KVM_S390_VM_CPU_MODEL:
641                 ret = kvm_s390_set_cpu_model(kvm, attr);
642                 break;
643         case KVM_S390_VM_CRYPTO:
644                 ret = kvm_s390_vm_set_crypto(kvm, attr);
645                 break;
646         default:
647                 ret = -ENXIO;
648                 break;
649         }
650
651         return ret;
652 }
653
654 static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
655 {
656         int ret;
657
658         switch (attr->group) {
659         case KVM_S390_VM_MEM_CTRL:
660                 ret = kvm_s390_get_mem_control(kvm, attr);
661                 break;
662         case KVM_S390_VM_TOD:
663                 ret = kvm_s390_get_tod(kvm, attr);
664                 break;
665         case KVM_S390_VM_CPU_MODEL:
666                 ret = kvm_s390_get_cpu_model(kvm, attr);
667                 break;
668         default:
669                 ret = -ENXIO;
670                 break;
671         }
672
673         return ret;
674 }
675
676 static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
677 {
678         int ret;
679
680         switch (attr->group) {
681         case KVM_S390_VM_MEM_CTRL:
682                 switch (attr->attr) {
683                 case KVM_S390_VM_MEM_ENABLE_CMMA:
684                 case KVM_S390_VM_MEM_CLR_CMMA:
685                 case KVM_S390_VM_MEM_LIMIT_SIZE:
686                         ret = 0;
687                         break;
688                 default:
689                         ret = -ENXIO;
690                         break;
691                 }
692                 break;
693         case KVM_S390_VM_TOD:
694                 switch (attr->attr) {
695                 case KVM_S390_VM_TOD_LOW:
696                 case KVM_S390_VM_TOD_HIGH:
697                         ret = 0;
698                         break;
699                 default:
700                         ret = -ENXIO;
701                         break;
702                 }
703                 break;
704         case KVM_S390_VM_CPU_MODEL:
705                 switch (attr->attr) {
706                 case KVM_S390_VM_CPU_PROCESSOR:
707                 case KVM_S390_VM_CPU_MACHINE:
708                         ret = 0;
709                         break;
710                 default:
711                         ret = -ENXIO;
712                         break;
713                 }
714                 break;
715         case KVM_S390_VM_CRYPTO:
716                 switch (attr->attr) {
717                 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
718                 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
719                 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
720                 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
721                         ret = 0;
722                         break;
723                 default:
724                         ret = -ENXIO;
725                         break;
726                 }
727                 break;
728         default:
729                 ret = -ENXIO;
730                 break;
731         }
732
733         return ret;
734 }
735
736 static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
737 {
738         uint8_t *keys;
739         uint64_t hva;
740         unsigned long curkey;
741         int i, r = 0;
742
743         if (args->flags != 0)
744                 return -EINVAL;
745
746         /* Is this guest using storage keys? */
747         if (!mm_use_skey(current->mm))
748                 return KVM_S390_GET_SKEYS_NONE;
749
750         /* Enforce sane limit on memory allocation */
751         if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
752                 return -EINVAL;
753
754         keys = kmalloc_array(args->count, sizeof(uint8_t),
755                              GFP_KERNEL | __GFP_NOWARN);
756         if (!keys)
757                 keys = vmalloc(sizeof(uint8_t) * args->count);
758         if (!keys)
759                 return -ENOMEM;
760
761         for (i = 0; i < args->count; i++) {
762                 hva = gfn_to_hva(kvm, args->start_gfn + i);
763                 if (kvm_is_error_hva(hva)) {
764                         r = -EFAULT;
765                         goto out;
766                 }
767
768                 curkey = get_guest_storage_key(current->mm, hva);
769                 if (IS_ERR_VALUE(curkey)) {
770                         r = curkey;
771                         goto out;
772                 }
773                 keys[i] = curkey;
774         }
775
776         r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
777                          sizeof(uint8_t) * args->count);
778         if (r)
779                 r = -EFAULT;
780 out:
781         kvfree(keys);
782         return r;
783 }
784
785 static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
786 {
787         uint8_t *keys;
788         uint64_t hva;
789         int i, r = 0;
790
791         if (args->flags != 0)
792                 return -EINVAL;
793
794         /* Enforce sane limit on memory allocation */
795         if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
796                 return -EINVAL;
797
798         keys = kmalloc_array(args->count, sizeof(uint8_t),
799                              GFP_KERNEL | __GFP_NOWARN);
800         if (!keys)
801                 keys = vmalloc(sizeof(uint8_t) * args->count);
802         if (!keys)
803                 return -ENOMEM;
804
805         r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
806                            sizeof(uint8_t) * args->count);
807         if (r) {
808                 r = -EFAULT;
809                 goto out;
810         }
811
812         /* Enable storage key handling for the guest */
813         s390_enable_skey();
814
815         for (i = 0; i < args->count; i++) {
816                 hva = gfn_to_hva(kvm, args->start_gfn + i);
817                 if (kvm_is_error_hva(hva)) {
818                         r = -EFAULT;
819                         goto out;
820                 }
821
822                 /* Lowest order bit is reserved */
823                 if (keys[i] & 0x01) {
824                         r = -EINVAL;
825                         goto out;
826                 }
827
828                 r = set_guest_storage_key(current->mm, hva,
829                                           (unsigned long)keys[i], 0);
830                 if (r)
831                         goto out;
832         }
833 out:
834         kvfree(keys);
835         return r;
836 }
837
838 long kvm_arch_vm_ioctl(struct file *filp,
839                        unsigned int ioctl, unsigned long arg)
840 {
841         struct kvm *kvm = filp->private_data;
842         void __user *argp = (void __user *)arg;
843         struct kvm_device_attr attr;
844         int r;
845
846         switch (ioctl) {
847         case KVM_S390_INTERRUPT: {
848                 struct kvm_s390_interrupt s390int;
849
850                 r = -EFAULT;
851                 if (copy_from_user(&s390int, argp, sizeof(s390int)))
852                         break;
853                 r = kvm_s390_inject_vm(kvm, &s390int);
854                 break;
855         }
856         case KVM_ENABLE_CAP: {
857                 struct kvm_enable_cap cap;
858                 r = -EFAULT;
859                 if (copy_from_user(&cap, argp, sizeof(cap)))
860                         break;
861                 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
862                 break;
863         }
864         case KVM_CREATE_IRQCHIP: {
865                 struct kvm_irq_routing_entry routing;
866
867                 r = -EINVAL;
868                 if (kvm->arch.use_irqchip) {
869                         /* Set up dummy routing. */
870                         memset(&routing, 0, sizeof(routing));
871                         kvm_set_irq_routing(kvm, &routing, 0, 0);
872                         r = 0;
873                 }
874                 break;
875         }
876         case KVM_SET_DEVICE_ATTR: {
877                 r = -EFAULT;
878                 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
879                         break;
880                 r = kvm_s390_vm_set_attr(kvm, &attr);
881                 break;
882         }
883         case KVM_GET_DEVICE_ATTR: {
884                 r = -EFAULT;
885                 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
886                         break;
887                 r = kvm_s390_vm_get_attr(kvm, &attr);
888                 break;
889         }
890         case KVM_HAS_DEVICE_ATTR: {
891                 r = -EFAULT;
892                 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
893                         break;
894                 r = kvm_s390_vm_has_attr(kvm, &attr);
895                 break;
896         }
897         case KVM_S390_GET_SKEYS: {
898                 struct kvm_s390_skeys args;
899
900                 r = -EFAULT;
901                 if (copy_from_user(&args, argp,
902                                    sizeof(struct kvm_s390_skeys)))
903                         break;
904                 r = kvm_s390_get_skeys(kvm, &args);
905                 break;
906         }
907         case KVM_S390_SET_SKEYS: {
908                 struct kvm_s390_skeys args;
909
910                 r = -EFAULT;
911                 if (copy_from_user(&args, argp,
912                                    sizeof(struct kvm_s390_skeys)))
913                         break;
914                 r = kvm_s390_set_skeys(kvm, &args);
915                 break;
916         }
917         default:
918                 r = -ENOTTY;
919         }
920
921         return r;
922 }
923
924 static int kvm_s390_query_ap_config(u8 *config)
925 {
926         u32 fcn_code = 0x04000000UL;
927         u32 cc = 0;
928
929         memset(config, 0, 128);
930         asm volatile(
931                 "lgr 0,%1\n"
932                 "lgr 2,%2\n"
933                 ".long 0xb2af0000\n"            /* PQAP(QCI) */
934                 "0: ipm %0\n"
935                 "srl %0,28\n"
936                 "1:\n"
937                 EX_TABLE(0b, 1b)
938                 : "+r" (cc)
939                 : "r" (fcn_code), "r" (config)
940                 : "cc", "0", "2", "memory"
941         );
942
943         return cc;
944 }
945
946 static int kvm_s390_apxa_installed(void)
947 {
948         u8 config[128];
949         int cc;
950
951         if (test_facility(2) && test_facility(12)) {
952                 cc = kvm_s390_query_ap_config(config);
953
954                 if (cc)
955                         pr_err("PQAP(QCI) failed with cc=%d", cc);
956                 else
957                         return config[0] & 0x40;
958         }
959
960         return 0;
961 }
962
963 static void kvm_s390_set_crycb_format(struct kvm *kvm)
964 {
965         kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
966
967         if (kvm_s390_apxa_installed())
968                 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
969         else
970                 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
971 }
972
973 static void kvm_s390_get_cpu_id(struct cpuid *cpu_id)
974 {
975         get_cpu_id(cpu_id);
976         cpu_id->version = 0xff;
977 }
978
979 static int kvm_s390_crypto_init(struct kvm *kvm)
980 {
981         if (!test_kvm_facility(kvm, 76))
982                 return 0;
983
984         kvm->arch.crypto.crycb = kzalloc(sizeof(*kvm->arch.crypto.crycb),
985                                          GFP_KERNEL | GFP_DMA);
986         if (!kvm->arch.crypto.crycb)
987                 return -ENOMEM;
988
989         kvm_s390_set_crycb_format(kvm);
990
991         /* Enable AES/DEA protected key functions by default */
992         kvm->arch.crypto.aes_kw = 1;
993         kvm->arch.crypto.dea_kw = 1;
994         get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
995                          sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
996         get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
997                          sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
998
999         return 0;
1000 }
1001
1002 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
1003 {
1004         int i, rc;
1005         char debug_name[16];
1006         static unsigned long sca_offset;
1007
1008         rc = -EINVAL;
1009 #ifdef CONFIG_KVM_S390_UCONTROL
1010         if (type & ~KVM_VM_S390_UCONTROL)
1011                 goto out_err;
1012         if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
1013                 goto out_err;
1014 #else
1015         if (type)
1016                 goto out_err;
1017 #endif
1018
1019         rc = s390_enable_sie();
1020         if (rc)
1021                 goto out_err;
1022
1023         rc = -ENOMEM;
1024
1025         kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
1026         if (!kvm->arch.sca)
1027                 goto out_err;
1028         spin_lock(&kvm_lock);
1029         sca_offset = (sca_offset + 16) & 0x7f0;
1030         kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset);
1031         spin_unlock(&kvm_lock);
1032
1033         sprintf(debug_name, "kvm-%u", current->pid);
1034
1035         kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
1036         if (!kvm->arch.dbf)
1037                 goto out_err;
1038
1039         /*
1040          * The architectural maximum amount of facilities is 16 kbit. To store
1041          * this amount, 2 kbyte of memory is required. Thus we need a full
1042          * page to hold the guest facility list (arch.model.fac->list) and the
1043          * facility mask (arch.model.fac->mask). Its address size has to be
1044          * 31 bits and word aligned.
1045          */
1046         kvm->arch.model.fac =
1047                 (struct kvm_s390_fac *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1048         if (!kvm->arch.model.fac)
1049                 goto out_err;
1050
1051         /* Populate the facility mask initially. */
1052         memcpy(kvm->arch.model.fac->mask, S390_lowcore.stfle_fac_list,
1053                S390_ARCH_FAC_LIST_SIZE_BYTE);
1054         for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
1055                 if (i < kvm_s390_fac_list_mask_size())
1056                         kvm->arch.model.fac->mask[i] &= kvm_s390_fac_list_mask[i];
1057                 else
1058                         kvm->arch.model.fac->mask[i] = 0UL;
1059         }
1060
1061         /* Populate the facility list initially. */
1062         memcpy(kvm->arch.model.fac->list, kvm->arch.model.fac->mask,
1063                S390_ARCH_FAC_LIST_SIZE_BYTE);
1064
1065         kvm_s390_get_cpu_id(&kvm->arch.model.cpu_id);
1066         kvm->arch.model.ibc = sclp_get_ibc() & 0x0fff;
1067
1068         if (kvm_s390_crypto_init(kvm) < 0)
1069                 goto out_err;
1070
1071         spin_lock_init(&kvm->arch.float_int.lock);
1072         INIT_LIST_HEAD(&kvm->arch.float_int.list);
1073         init_waitqueue_head(&kvm->arch.ipte_wq);
1074         mutex_init(&kvm->arch.ipte_mutex);
1075
1076         debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
1077         VM_EVENT(kvm, 3, "%s", "vm created");
1078
1079         if (type & KVM_VM_S390_UCONTROL) {
1080                 kvm->arch.gmap = NULL;
1081         } else {
1082                 kvm->arch.gmap = gmap_alloc(current->mm, (1UL << 44) - 1);
1083                 if (!kvm->arch.gmap)
1084                         goto out_err;
1085                 kvm->arch.gmap->private = kvm;
1086                 kvm->arch.gmap->pfault_enabled = 0;
1087         }
1088
1089         kvm->arch.css_support = 0;
1090         kvm->arch.use_irqchip = 0;
1091         kvm->arch.epoch = 0;
1092
1093         spin_lock_init(&kvm->arch.start_stop_lock);
1094
1095         return 0;
1096 out_err:
1097         kfree(kvm->arch.crypto.crycb);
1098         free_page((unsigned long)kvm->arch.model.fac);
1099         debug_unregister(kvm->arch.dbf);
1100         free_page((unsigned long)(kvm->arch.sca));
1101         return rc;
1102 }
1103
1104 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
1105 {
1106         VCPU_EVENT(vcpu, 3, "%s", "free cpu");
1107         trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
1108         kvm_s390_clear_local_irqs(vcpu);
1109         kvm_clear_async_pf_completion_queue(vcpu);
1110         if (!kvm_is_ucontrol(vcpu->kvm)) {
1111                 clear_bit(63 - vcpu->vcpu_id,
1112                           (unsigned long *) &vcpu->kvm->arch.sca->mcn);
1113                 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
1114                     (__u64) vcpu->arch.sie_block)
1115                         vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
1116         }
1117         smp_mb();
1118
1119         if (kvm_is_ucontrol(vcpu->kvm))
1120                 gmap_free(vcpu->arch.gmap);
1121
1122         if (kvm_s390_cmma_enabled(vcpu->kvm))
1123                 kvm_s390_vcpu_unsetup_cmma(vcpu);
1124         free_page((unsigned long)(vcpu->arch.sie_block));
1125
1126         kvm_vcpu_uninit(vcpu);
1127         kmem_cache_free(kvm_vcpu_cache, vcpu);
1128 }
1129
1130 static void kvm_free_vcpus(struct kvm *kvm)
1131 {
1132         unsigned int i;
1133         struct kvm_vcpu *vcpu;
1134
1135         kvm_for_each_vcpu(i, vcpu, kvm)
1136                 kvm_arch_vcpu_destroy(vcpu);
1137
1138         mutex_lock(&kvm->lock);
1139         for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
1140                 kvm->vcpus[i] = NULL;
1141
1142         atomic_set(&kvm->online_vcpus, 0);
1143         mutex_unlock(&kvm->lock);
1144 }
1145
1146 void kvm_arch_destroy_vm(struct kvm *kvm)
1147 {
1148         kvm_free_vcpus(kvm);
1149         free_page((unsigned long)kvm->arch.model.fac);
1150         free_page((unsigned long)(kvm->arch.sca));
1151         debug_unregister(kvm->arch.dbf);
1152         kfree(kvm->arch.crypto.crycb);
1153         if (!kvm_is_ucontrol(kvm))
1154                 gmap_free(kvm->arch.gmap);
1155         kvm_s390_destroy_adapters(kvm);
1156         kvm_s390_clear_float_irqs(kvm);
1157 }
1158
1159 /* Section: vcpu related */
1160 static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
1161 {
1162         vcpu->arch.gmap = gmap_alloc(current->mm, -1UL);
1163         if (!vcpu->arch.gmap)
1164                 return -ENOMEM;
1165         vcpu->arch.gmap->private = vcpu->kvm;
1166
1167         return 0;
1168 }
1169
1170 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1171 {
1172         vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1173         kvm_clear_async_pf_completion_queue(vcpu);
1174         vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
1175                                     KVM_SYNC_GPRS |
1176                                     KVM_SYNC_ACRS |
1177                                     KVM_SYNC_CRS |
1178                                     KVM_SYNC_ARCH0 |
1179                                     KVM_SYNC_PFAULT;
1180         if (test_kvm_facility(vcpu->kvm, 129))
1181                 vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
1182
1183         if (kvm_is_ucontrol(vcpu->kvm))
1184                 return __kvm_ucontrol_vcpu_init(vcpu);
1185
1186         return 0;
1187 }
1188
1189 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1190 {
1191         save_fp_ctl(&vcpu->arch.host_fpregs.fpc);
1192         if (test_kvm_facility(vcpu->kvm, 129))
1193                 save_vx_regs((__vector128 *)&vcpu->arch.host_vregs->vrs);
1194         else
1195                 save_fp_regs(vcpu->arch.host_fpregs.fprs);
1196         save_access_regs(vcpu->arch.host_acrs);
1197         if (test_kvm_facility(vcpu->kvm, 129)) {
1198                 restore_fp_ctl(&vcpu->run->s.regs.fpc);
1199                 restore_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
1200         } else {
1201                 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1202                 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
1203         }
1204         restore_access_regs(vcpu->run->s.regs.acrs);
1205         gmap_enable(vcpu->arch.gmap);
1206         atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
1207 }
1208
1209 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1210 {
1211         atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
1212         gmap_disable(vcpu->arch.gmap);
1213         if (test_kvm_facility(vcpu->kvm, 129)) {
1214                 save_fp_ctl(&vcpu->run->s.regs.fpc);
1215                 save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
1216         } else {
1217                 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1218                 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
1219         }
1220         save_access_regs(vcpu->run->s.regs.acrs);
1221         restore_fp_ctl(&vcpu->arch.host_fpregs.fpc);
1222         if (test_kvm_facility(vcpu->kvm, 129))
1223                 restore_vx_regs((__vector128 *)&vcpu->arch.host_vregs->vrs);
1224         else
1225                 restore_fp_regs(vcpu->arch.host_fpregs.fprs);
1226         restore_access_regs(vcpu->arch.host_acrs);
1227 }
1228
1229 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
1230 {
1231         /* this equals initial cpu reset in pop, but we don't switch to ESA */
1232         vcpu->arch.sie_block->gpsw.mask = 0UL;
1233         vcpu->arch.sie_block->gpsw.addr = 0UL;
1234         kvm_s390_set_prefix(vcpu, 0);
1235         vcpu->arch.sie_block->cputm     = 0UL;
1236         vcpu->arch.sie_block->ckc       = 0UL;
1237         vcpu->arch.sie_block->todpr     = 0;
1238         memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
1239         vcpu->arch.sie_block->gcr[0]  = 0xE0UL;
1240         vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
1241         vcpu->arch.guest_fpregs.fpc = 0;
1242         asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
1243         vcpu->arch.sie_block->gbea = 1;
1244         vcpu->arch.sie_block->pp = 0;
1245         vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1246         kvm_clear_async_pf_completion_queue(vcpu);
1247         if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
1248                 kvm_s390_vcpu_stop(vcpu);
1249         kvm_s390_clear_local_irqs(vcpu);
1250 }
1251
1252 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
1253 {
1254         mutex_lock(&vcpu->kvm->lock);
1255         vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
1256         mutex_unlock(&vcpu->kvm->lock);
1257         if (!kvm_is_ucontrol(vcpu->kvm))
1258                 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
1259 }
1260
1261 static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
1262 {
1263         if (!test_kvm_facility(vcpu->kvm, 76))
1264                 return;
1265
1266         vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
1267
1268         if (vcpu->kvm->arch.crypto.aes_kw)
1269                 vcpu->arch.sie_block->ecb3 |= ECB3_AES;
1270         if (vcpu->kvm->arch.crypto.dea_kw)
1271                 vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
1272
1273         vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
1274 }
1275
1276 void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
1277 {
1278         free_page(vcpu->arch.sie_block->cbrlo);
1279         vcpu->arch.sie_block->cbrlo = 0;
1280 }
1281
1282 int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
1283 {
1284         vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
1285         if (!vcpu->arch.sie_block->cbrlo)
1286                 return -ENOMEM;
1287
1288         vcpu->arch.sie_block->ecb2 |= 0x80;
1289         vcpu->arch.sie_block->ecb2 &= ~0x08;
1290         return 0;
1291 }
1292
1293 static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
1294 {
1295         struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
1296
1297         vcpu->arch.cpu_id = model->cpu_id;
1298         vcpu->arch.sie_block->ibc = model->ibc;
1299         vcpu->arch.sie_block->fac = (int) (long) model->fac->list;
1300 }
1301
1302 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1303 {
1304         int rc = 0;
1305
1306         atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
1307                                                     CPUSTAT_SM |
1308                                                     CPUSTAT_STOPPED |
1309                                                     CPUSTAT_GED);
1310         kvm_s390_vcpu_setup_model(vcpu);
1311
1312         vcpu->arch.sie_block->ecb   = 6;
1313         if (test_kvm_facility(vcpu->kvm, 50) && test_kvm_facility(vcpu->kvm, 73))
1314                 vcpu->arch.sie_block->ecb |= 0x10;
1315
1316         vcpu->arch.sie_block->ecb2  = 8;
1317         vcpu->arch.sie_block->eca   = 0xC1002000U;
1318         if (sclp_has_siif())
1319                 vcpu->arch.sie_block->eca |= 1;
1320         if (sclp_has_sigpif())
1321                 vcpu->arch.sie_block->eca |= 0x10000000U;
1322         if (test_kvm_facility(vcpu->kvm, 129)) {
1323                 vcpu->arch.sie_block->eca |= 0x00020000;
1324                 vcpu->arch.sie_block->ecd |= 0x20000000;
1325         }
1326         vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
1327
1328         if (kvm_s390_cmma_enabled(vcpu->kvm)) {
1329                 rc = kvm_s390_vcpu_setup_cmma(vcpu);
1330                 if (rc)
1331                         return rc;
1332         }
1333         hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1334         vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
1335
1336         kvm_s390_vcpu_crypto_setup(vcpu);
1337
1338         return rc;
1339 }
1340
1341 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
1342                                       unsigned int id)
1343 {
1344         struct kvm_vcpu *vcpu;
1345         struct sie_page *sie_page;
1346         int rc = -EINVAL;
1347
1348         if (id >= KVM_MAX_VCPUS)
1349                 goto out;
1350
1351         rc = -ENOMEM;
1352
1353         vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
1354         if (!vcpu)
1355                 goto out;
1356
1357         sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
1358         if (!sie_page)
1359                 goto out_free_cpu;
1360
1361         vcpu->arch.sie_block = &sie_page->sie_block;
1362         vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
1363         vcpu->arch.host_vregs = &sie_page->vregs;
1364
1365         vcpu->arch.sie_block->icpua = id;
1366         if (!kvm_is_ucontrol(kvm)) {
1367                 if (!kvm->arch.sca) {
1368                         WARN_ON_ONCE(1);
1369                         goto out_free_cpu;
1370                 }
1371                 if (!kvm->arch.sca->cpu[id].sda)
1372                         kvm->arch.sca->cpu[id].sda =
1373                                 (__u64) vcpu->arch.sie_block;
1374                 vcpu->arch.sie_block->scaoh =
1375                         (__u32)(((__u64)kvm->arch.sca) >> 32);
1376                 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
1377                 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
1378         }
1379
1380         spin_lock_init(&vcpu->arch.local_int.lock);
1381         vcpu->arch.local_int.float_int = &kvm->arch.float_int;
1382         vcpu->arch.local_int.wq = &vcpu->wq;
1383         vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
1384
1385         rc = kvm_vcpu_init(vcpu, kvm, id);
1386         if (rc)
1387                 goto out_free_sie_block;
1388         VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
1389                  vcpu->arch.sie_block);
1390         trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
1391
1392         return vcpu;
1393 out_free_sie_block:
1394         free_page((unsigned long)(vcpu->arch.sie_block));
1395 out_free_cpu:
1396         kmem_cache_free(kvm_vcpu_cache, vcpu);
1397 out:
1398         return ERR_PTR(rc);
1399 }
1400
1401 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
1402 {
1403         return kvm_s390_vcpu_has_irq(vcpu, 0);
1404 }
1405
1406 void s390_vcpu_block(struct kvm_vcpu *vcpu)
1407 {
1408         atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
1409 }
1410
1411 void s390_vcpu_unblock(struct kvm_vcpu *vcpu)
1412 {
1413         atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
1414 }
1415
1416 /*
1417  * Kick a guest cpu out of SIE and wait until SIE is not running.
1418  * If the CPU is not running (e.g. waiting as idle) the function will
1419  * return immediately. */
1420 void exit_sie(struct kvm_vcpu *vcpu)
1421 {
1422         atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
1423         while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
1424                 cpu_relax();
1425 }
1426
1427 /* Kick a guest cpu out of SIE and prevent SIE-reentry */
1428 void exit_sie_sync(struct kvm_vcpu *vcpu)
1429 {
1430         s390_vcpu_block(vcpu);
1431         exit_sie(vcpu);
1432 }
1433
1434 static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
1435 {
1436         int i;
1437         struct kvm *kvm = gmap->private;
1438         struct kvm_vcpu *vcpu;
1439
1440         kvm_for_each_vcpu(i, vcpu, kvm) {
1441                 /* match against both prefix pages */
1442                 if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) {
1443                         VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
1444                         kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
1445                         exit_sie_sync(vcpu);
1446                 }
1447         }
1448 }
1449
1450 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
1451 {
1452         /* kvm common code refers to this, but never calls it */
1453         BUG();
1454         return 0;
1455 }
1456
1457 static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
1458                                            struct kvm_one_reg *reg)
1459 {
1460         int r = -EINVAL;
1461
1462         switch (reg->id) {
1463         case KVM_REG_S390_TODPR:
1464                 r = put_user(vcpu->arch.sie_block->todpr,
1465                              (u32 __user *)reg->addr);
1466                 break;
1467         case KVM_REG_S390_EPOCHDIFF:
1468                 r = put_user(vcpu->arch.sie_block->epoch,
1469                              (u64 __user *)reg->addr);
1470                 break;
1471         case KVM_REG_S390_CPU_TIMER:
1472                 r = put_user(vcpu->arch.sie_block->cputm,
1473                              (u64 __user *)reg->addr);
1474                 break;
1475         case KVM_REG_S390_CLOCK_COMP:
1476                 r = put_user(vcpu->arch.sie_block->ckc,
1477                              (u64 __user *)reg->addr);
1478                 break;
1479         case KVM_REG_S390_PFTOKEN:
1480                 r = put_user(vcpu->arch.pfault_token,
1481                              (u64 __user *)reg->addr);
1482                 break;
1483         case KVM_REG_S390_PFCOMPARE:
1484                 r = put_user(vcpu->arch.pfault_compare,
1485                              (u64 __user *)reg->addr);
1486                 break;
1487         case KVM_REG_S390_PFSELECT:
1488                 r = put_user(vcpu->arch.pfault_select,
1489                              (u64 __user *)reg->addr);
1490                 break;
1491         case KVM_REG_S390_PP:
1492                 r = put_user(vcpu->arch.sie_block->pp,
1493                              (u64 __user *)reg->addr);
1494                 break;
1495         case KVM_REG_S390_GBEA:
1496                 r = put_user(vcpu->arch.sie_block->gbea,
1497                              (u64 __user *)reg->addr);
1498                 break;
1499         default:
1500                 break;
1501         }
1502
1503         return r;
1504 }
1505
1506 static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
1507                                            struct kvm_one_reg *reg)
1508 {
1509         int r = -EINVAL;
1510
1511         switch (reg->id) {
1512         case KVM_REG_S390_TODPR:
1513                 r = get_user(vcpu->arch.sie_block->todpr,
1514                              (u32 __user *)reg->addr);
1515                 break;
1516         case KVM_REG_S390_EPOCHDIFF:
1517                 r = get_user(vcpu->arch.sie_block->epoch,
1518                              (u64 __user *)reg->addr);
1519                 break;
1520         case KVM_REG_S390_CPU_TIMER:
1521                 r = get_user(vcpu->arch.sie_block->cputm,
1522                              (u64 __user *)reg->addr);
1523                 break;
1524         case KVM_REG_S390_CLOCK_COMP:
1525                 r = get_user(vcpu->arch.sie_block->ckc,
1526                              (u64 __user *)reg->addr);
1527                 break;
1528         case KVM_REG_S390_PFTOKEN:
1529                 r = get_user(vcpu->arch.pfault_token,
1530                              (u64 __user *)reg->addr);
1531                 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1532                         kvm_clear_async_pf_completion_queue(vcpu);
1533                 break;
1534         case KVM_REG_S390_PFCOMPARE:
1535                 r = get_user(vcpu->arch.pfault_compare,
1536                              (u64 __user *)reg->addr);
1537                 break;
1538         case KVM_REG_S390_PFSELECT:
1539                 r = get_user(vcpu->arch.pfault_select,
1540                              (u64 __user *)reg->addr);
1541                 break;
1542         case KVM_REG_S390_PP:
1543                 r = get_user(vcpu->arch.sie_block->pp,
1544                              (u64 __user *)reg->addr);
1545                 break;
1546         case KVM_REG_S390_GBEA:
1547                 r = get_user(vcpu->arch.sie_block->gbea,
1548                              (u64 __user *)reg->addr);
1549                 break;
1550         default:
1551                 break;
1552         }
1553
1554         return r;
1555 }
1556
1557 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
1558 {
1559         kvm_s390_vcpu_initial_reset(vcpu);
1560         return 0;
1561 }
1562
1563 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1564 {
1565         memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
1566         return 0;
1567 }
1568
1569 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1570 {
1571         memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
1572         return 0;
1573 }
1574
1575 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1576                                   struct kvm_sregs *sregs)
1577 {
1578         memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
1579         memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
1580         restore_access_regs(vcpu->run->s.regs.acrs);
1581         return 0;
1582 }
1583
1584 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1585                                   struct kvm_sregs *sregs)
1586 {
1587         memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
1588         memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
1589         return 0;
1590 }
1591
1592 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1593 {
1594         if (test_fp_ctl(fpu->fpc))
1595                 return -EINVAL;
1596         memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
1597         vcpu->arch.guest_fpregs.fpc = fpu->fpc;
1598         restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1599         restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
1600         return 0;
1601 }
1602
1603 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1604 {
1605         memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
1606         fpu->fpc = vcpu->arch.guest_fpregs.fpc;
1607         return 0;
1608 }
1609
1610 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
1611 {
1612         int rc = 0;
1613
1614         if (!is_vcpu_stopped(vcpu))
1615                 rc = -EBUSY;
1616         else {
1617                 vcpu->run->psw_mask = psw.mask;
1618                 vcpu->run->psw_addr = psw.addr;
1619         }
1620         return rc;
1621 }
1622
1623 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1624                                   struct kvm_translation *tr)
1625 {
1626         return -EINVAL; /* not implemented yet */
1627 }
1628
1629 #define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
1630                               KVM_GUESTDBG_USE_HW_BP | \
1631                               KVM_GUESTDBG_ENABLE)
1632
1633 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
1634                                         struct kvm_guest_debug *dbg)
1635 {
1636         int rc = 0;
1637
1638         vcpu->guest_debug = 0;
1639         kvm_s390_clear_bp_data(vcpu);
1640
1641         if (dbg->control & ~VALID_GUESTDBG_FLAGS)
1642                 return -EINVAL;
1643
1644         if (dbg->control & KVM_GUESTDBG_ENABLE) {
1645                 vcpu->guest_debug = dbg->control;
1646                 /* enforce guest PER */
1647                 atomic_set_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1648
1649                 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
1650                         rc = kvm_s390_import_bp_data(vcpu, dbg);
1651         } else {
1652                 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1653                 vcpu->arch.guestdbg.last_bp = 0;
1654         }
1655
1656         if (rc) {
1657                 vcpu->guest_debug = 0;
1658                 kvm_s390_clear_bp_data(vcpu);
1659                 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1660         }
1661
1662         return rc;
1663 }
1664
1665 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
1666                                     struct kvm_mp_state *mp_state)
1667 {
1668         /* CHECK_STOP and LOAD are not supported yet */
1669         return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
1670                                        KVM_MP_STATE_OPERATING;
1671 }
1672
1673 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
1674                                     struct kvm_mp_state *mp_state)
1675 {
1676         int rc = 0;
1677
1678         /* user space knows about this interface - let it control the state */
1679         vcpu->kvm->arch.user_cpu_state_ctrl = 1;
1680
1681         switch (mp_state->mp_state) {
1682         case KVM_MP_STATE_STOPPED:
1683                 kvm_s390_vcpu_stop(vcpu);
1684                 break;
1685         case KVM_MP_STATE_OPERATING:
1686                 kvm_s390_vcpu_start(vcpu);
1687                 break;
1688         case KVM_MP_STATE_LOAD:
1689         case KVM_MP_STATE_CHECK_STOP:
1690                 /* fall through - CHECK_STOP and LOAD are not supported yet */
1691         default:
1692                 rc = -ENXIO;
1693         }
1694
1695         return rc;
1696 }
1697
1698 bool kvm_s390_cmma_enabled(struct kvm *kvm)
1699 {
1700         if (!MACHINE_IS_LPAR)
1701                 return false;
1702         /* only enable for z10 and later */
1703         if (!MACHINE_HAS_EDAT1)
1704                 return false;
1705         if (!kvm->arch.use_cmma)
1706                 return false;
1707         return true;
1708 }
1709
1710 static bool ibs_enabled(struct kvm_vcpu *vcpu)
1711 {
1712         return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
1713 }
1714
1715 static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
1716 {
1717 retry:
1718         s390_vcpu_unblock(vcpu);
1719         /*
1720          * We use MMU_RELOAD just to re-arm the ipte notifier for the
1721          * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
1722          * This ensures that the ipte instruction for this request has
1723          * already finished. We might race against a second unmapper that
1724          * wants to set the blocking bit. Lets just retry the request loop.
1725          */
1726         if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
1727                 int rc;
1728                 rc = gmap_ipte_notify(vcpu->arch.gmap,
1729                                       kvm_s390_get_prefix(vcpu),
1730                                       PAGE_SIZE * 2);
1731                 if (rc)
1732                         return rc;
1733                 goto retry;
1734         }
1735
1736         if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
1737                 vcpu->arch.sie_block->ihcpu = 0xffff;
1738                 goto retry;
1739         }
1740
1741         if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
1742                 if (!ibs_enabled(vcpu)) {
1743                         trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
1744                         atomic_set_mask(CPUSTAT_IBS,
1745                                         &vcpu->arch.sie_block->cpuflags);
1746                 }
1747                 goto retry;
1748         }
1749
1750         if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
1751                 if (ibs_enabled(vcpu)) {
1752                         trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
1753                         atomic_clear_mask(CPUSTAT_IBS,
1754                                           &vcpu->arch.sie_block->cpuflags);
1755                 }
1756                 goto retry;
1757         }
1758
1759         /* nothing to do, just clear the request */
1760         clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
1761
1762         return 0;
1763 }
1764
1765 /**
1766  * kvm_arch_fault_in_page - fault-in guest page if necessary
1767  * @vcpu: The corresponding virtual cpu
1768  * @gpa: Guest physical address
1769  * @writable: Whether the page should be writable or not
1770  *
1771  * Make sure that a guest page has been faulted-in on the host.
1772  *
1773  * Return: Zero on success, negative error code otherwise.
1774  */
1775 long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
1776 {
1777         return gmap_fault(vcpu->arch.gmap, gpa,
1778                           writable ? FAULT_FLAG_WRITE : 0);
1779 }
1780
1781 static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
1782                                       unsigned long token)
1783 {
1784         struct kvm_s390_interrupt inti;
1785         struct kvm_s390_irq irq;
1786
1787         if (start_token) {
1788                 irq.u.ext.ext_params2 = token;
1789                 irq.type = KVM_S390_INT_PFAULT_INIT;
1790                 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
1791         } else {
1792                 inti.type = KVM_S390_INT_PFAULT_DONE;
1793                 inti.parm64 = token;
1794                 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
1795         }
1796 }
1797
1798 void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
1799                                      struct kvm_async_pf *work)
1800 {
1801         trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
1802         __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
1803 }
1804
1805 void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
1806                                  struct kvm_async_pf *work)
1807 {
1808         trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
1809         __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
1810 }
1811
1812 void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
1813                                struct kvm_async_pf *work)
1814 {
1815         /* s390 will always inject the page directly */
1816 }
1817
1818 bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
1819 {
1820         /*
1821          * s390 will always inject the page directly,
1822          * but we still want check_async_completion to cleanup
1823          */
1824         return true;
1825 }
1826
1827 static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
1828 {
1829         hva_t hva;
1830         struct kvm_arch_async_pf arch;
1831         int rc;
1832
1833         if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1834                 return 0;
1835         if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
1836             vcpu->arch.pfault_compare)
1837                 return 0;
1838         if (psw_extint_disabled(vcpu))
1839                 return 0;
1840         if (kvm_s390_vcpu_has_irq(vcpu, 0))
1841                 return 0;
1842         if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
1843                 return 0;
1844         if (!vcpu->arch.gmap->pfault_enabled)
1845                 return 0;
1846
1847         hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
1848         hva += current->thread.gmap_addr & ~PAGE_MASK;
1849         if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
1850                 return 0;
1851
1852         rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
1853         return rc;
1854 }
1855
1856 static int vcpu_pre_run(struct kvm_vcpu *vcpu)
1857 {
1858         int rc, cpuflags;
1859
1860         /*
1861          * On s390 notifications for arriving pages will be delivered directly
1862          * to the guest but the house keeping for completed pfaults is
1863          * handled outside the worker.
1864          */
1865         kvm_check_async_pf_completion(vcpu);
1866
1867         memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
1868
1869         if (need_resched())
1870                 schedule();
1871
1872         if (test_cpu_flag(CIF_MCCK_PENDING))
1873                 s390_handle_mcck();
1874
1875         if (!kvm_is_ucontrol(vcpu->kvm)) {
1876                 rc = kvm_s390_deliver_pending_interrupts(vcpu);
1877                 if (rc)
1878                         return rc;
1879         }
1880
1881         rc = kvm_s390_handle_requests(vcpu);
1882         if (rc)
1883                 return rc;
1884
1885         if (guestdbg_enabled(vcpu)) {
1886                 kvm_s390_backup_guest_per_regs(vcpu);
1887                 kvm_s390_patch_guest_per_regs(vcpu);
1888         }
1889
1890         vcpu->arch.sie_block->icptcode = 0;
1891         cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
1892         VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
1893         trace_kvm_s390_sie_enter(vcpu, cpuflags);
1894
1895         return 0;
1896 }
1897
1898 static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
1899 {
1900         psw_t *psw = &vcpu->arch.sie_block->gpsw;
1901         u8 opcode;
1902         int rc;
1903
1904         VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
1905         trace_kvm_s390_sie_fault(vcpu);
1906
1907         /*
1908          * We want to inject an addressing exception, which is defined as a
1909          * suppressing or terminating exception. However, since we came here
1910          * by a DAT access exception, the PSW still points to the faulting
1911          * instruction since DAT exceptions are nullifying. So we've got
1912          * to look up the current opcode to get the length of the instruction
1913          * to be able to forward the PSW.
1914          */
1915         rc = read_guest(vcpu, psw->addr, 0, &opcode, 1);
1916         if (rc)
1917                 return kvm_s390_inject_prog_cond(vcpu, rc);
1918         psw->addr = __rewind_psw(*psw, -insn_length(opcode));
1919
1920         return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
1921 }
1922
1923 static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
1924 {
1925         int rc = -1;
1926
1927         VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
1928                    vcpu->arch.sie_block->icptcode);
1929         trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
1930
1931         if (guestdbg_enabled(vcpu))
1932                 kvm_s390_restore_guest_per_regs(vcpu);
1933
1934         if (exit_reason >= 0) {
1935                 rc = 0;
1936         } else if (kvm_is_ucontrol(vcpu->kvm)) {
1937                 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
1938                 vcpu->run->s390_ucontrol.trans_exc_code =
1939                                                 current->thread.gmap_addr;
1940                 vcpu->run->s390_ucontrol.pgm_code = 0x10;
1941                 rc = -EREMOTE;
1942
1943         } else if (current->thread.gmap_pfault) {
1944                 trace_kvm_s390_major_guest_pfault(vcpu);
1945                 current->thread.gmap_pfault = 0;
1946                 if (kvm_arch_setup_async_pf(vcpu)) {
1947                         rc = 0;
1948                 } else {
1949                         gpa_t gpa = current->thread.gmap_addr;
1950                         rc = kvm_arch_fault_in_page(vcpu, gpa, 1);
1951                 }
1952         }
1953
1954         if (rc == -1)
1955                 rc = vcpu_post_run_fault_in_sie(vcpu);
1956
1957         memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
1958
1959         if (rc == 0) {
1960                 if (kvm_is_ucontrol(vcpu->kvm))
1961                         /* Don't exit for host interrupts. */
1962                         rc = vcpu->arch.sie_block->icptcode ? -EOPNOTSUPP : 0;
1963                 else
1964                         rc = kvm_handle_sie_intercept(vcpu);
1965         }
1966
1967         return rc;
1968 }
1969
1970 static int __vcpu_run(struct kvm_vcpu *vcpu)
1971 {
1972         int rc, exit_reason;
1973
1974         /*
1975          * We try to hold kvm->srcu during most of vcpu_run (except when run-
1976          * ning the guest), so that memslots (and other stuff) are protected
1977          */
1978         vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1979
1980         do {
1981                 rc = vcpu_pre_run(vcpu);
1982                 if (rc)
1983                         break;
1984
1985                 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
1986                 /*
1987                  * As PF_VCPU will be used in fault handler, between
1988                  * guest_enter and guest_exit should be no uaccess.
1989                  */
1990                 preempt_disable();
1991                 kvm_guest_enter();
1992                 preempt_enable();
1993                 exit_reason = sie64a(vcpu->arch.sie_block,
1994                                      vcpu->run->s.regs.gprs);
1995                 kvm_guest_exit();
1996                 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1997
1998                 rc = vcpu_post_run(vcpu, exit_reason);
1999         } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
2000
2001         srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
2002         return rc;
2003 }
2004
2005 static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2006 {
2007         vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
2008         vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
2009         if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
2010                 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
2011         if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
2012                 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
2013                 /* some control register changes require a tlb flush */
2014                 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
2015         }
2016         if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
2017                 vcpu->arch.sie_block->cputm = kvm_run->s.regs.cputm;
2018                 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
2019                 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
2020                 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
2021                 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
2022         }
2023         if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
2024                 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
2025                 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
2026                 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
2027                 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
2028                         kvm_clear_async_pf_completion_queue(vcpu);
2029         }
2030         kvm_run->kvm_dirty_regs = 0;
2031 }
2032
2033 static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2034 {
2035         kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
2036         kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
2037         kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
2038         memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
2039         kvm_run->s.regs.cputm = vcpu->arch.sie_block->cputm;
2040         kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
2041         kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
2042         kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
2043         kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
2044         kvm_run->s.regs.pft = vcpu->arch.pfault_token;
2045         kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
2046         kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
2047 }
2048
2049 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2050 {
2051         int rc;
2052         sigset_t sigsaved;
2053
2054         if (guestdbg_exit_pending(vcpu)) {
2055                 kvm_s390_prepare_debug_exit(vcpu);
2056                 return 0;
2057         }
2058
2059         if (vcpu->sigset_active)
2060                 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
2061
2062         if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
2063                 kvm_s390_vcpu_start(vcpu);
2064         } else if (is_vcpu_stopped(vcpu)) {
2065                 pr_err_ratelimited("kvm-s390: can't run stopped vcpu %d\n",
2066                                    vcpu->vcpu_id);
2067                 return -EINVAL;
2068         }
2069
2070         sync_regs(vcpu, kvm_run);
2071
2072         might_fault();
2073         rc = __vcpu_run(vcpu);
2074
2075         if (signal_pending(current) && !rc) {
2076                 kvm_run->exit_reason = KVM_EXIT_INTR;
2077                 rc = -EINTR;
2078         }
2079
2080         if (guestdbg_exit_pending(vcpu) && !rc)  {
2081                 kvm_s390_prepare_debug_exit(vcpu);
2082                 rc = 0;
2083         }
2084
2085         if (rc == -EOPNOTSUPP) {
2086                 /* intercept cannot be handled in-kernel, prepare kvm-run */
2087                 kvm_run->exit_reason         = KVM_EXIT_S390_SIEIC;
2088                 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
2089                 kvm_run->s390_sieic.ipa      = vcpu->arch.sie_block->ipa;
2090                 kvm_run->s390_sieic.ipb      = vcpu->arch.sie_block->ipb;
2091                 rc = 0;
2092         }
2093
2094         if (rc == -EREMOTE) {
2095                 /* intercept was handled, but userspace support is needed
2096                  * kvm_run has been prepared by the handler */
2097                 rc = 0;
2098         }
2099
2100         store_regs(vcpu, kvm_run);
2101
2102         if (vcpu->sigset_active)
2103                 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
2104
2105         vcpu->stat.exit_userspace++;
2106         return rc;
2107 }
2108
2109 /*
2110  * store status at address
2111  * we use have two special cases:
2112  * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
2113  * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
2114  */
2115 int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
2116 {
2117         unsigned char archmode = 1;
2118         unsigned int px;
2119         u64 clkcomp;
2120         int rc;
2121
2122         if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
2123                 if (write_guest_abs(vcpu, 163, &archmode, 1))
2124                         return -EFAULT;
2125                 gpa = SAVE_AREA_BASE;
2126         } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
2127                 if (write_guest_real(vcpu, 163, &archmode, 1))
2128                         return -EFAULT;
2129                 gpa = kvm_s390_real_to_abs(vcpu, SAVE_AREA_BASE);
2130         }
2131         rc = write_guest_abs(vcpu, gpa + offsetof(struct save_area, fp_regs),
2132                              vcpu->arch.guest_fpregs.fprs, 128);
2133         rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, gp_regs),
2134                               vcpu->run->s.regs.gprs, 128);
2135         rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, psw),
2136                               &vcpu->arch.sie_block->gpsw, 16);
2137         px = kvm_s390_get_prefix(vcpu);
2138         rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, pref_reg),
2139                               &px, 4);
2140         rc |= write_guest_abs(vcpu,
2141                               gpa + offsetof(struct save_area, fp_ctrl_reg),
2142                               &vcpu->arch.guest_fpregs.fpc, 4);
2143         rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, tod_reg),
2144                               &vcpu->arch.sie_block->todpr, 4);
2145         rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, timer),
2146                               &vcpu->arch.sie_block->cputm, 8);
2147         clkcomp = vcpu->arch.sie_block->ckc >> 8;
2148         rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, clk_cmp),
2149                               &clkcomp, 8);
2150         rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, acc_regs),
2151                               &vcpu->run->s.regs.acrs, 64);
2152         rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, ctrl_regs),
2153                               &vcpu->arch.sie_block->gcr, 128);
2154         return rc ? -EFAULT : 0;
2155 }
2156
2157 int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
2158 {
2159         /*
2160          * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
2161          * copying in vcpu load/put. Lets update our copies before we save
2162          * it into the save area
2163          */
2164         save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
2165         save_fp_regs(vcpu->arch.guest_fpregs.fprs);
2166         save_access_regs(vcpu->run->s.regs.acrs);
2167
2168         return kvm_s390_store_status_unloaded(vcpu, addr);
2169 }
2170
2171 /*
2172  * store additional status at address
2173  */
2174 int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu *vcpu,
2175                                         unsigned long gpa)
2176 {
2177         /* Only bits 0-53 are used for address formation */
2178         if (!(gpa & ~0x3ff))
2179                 return 0;
2180
2181         return write_guest_abs(vcpu, gpa & ~0x3ff,
2182                                (void *)&vcpu->run->s.regs.vrs, 512);
2183 }
2184
2185 int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr)
2186 {
2187         if (!test_kvm_facility(vcpu->kvm, 129))
2188                 return 0;
2189
2190         /*
2191          * The guest VXRS are in the host VXRs due to the lazy
2192          * copying in vcpu load/put. Let's update our copies before we save
2193          * it into the save area.
2194          */
2195         save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
2196
2197         return kvm_s390_store_adtl_status_unloaded(vcpu, addr);
2198 }
2199
2200 static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
2201 {
2202         kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
2203         kvm_make_request(KVM_REQ_DISABLE_IBS, vcpu);
2204         exit_sie_sync(vcpu);
2205 }
2206
2207 static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
2208 {
2209         unsigned int i;
2210         struct kvm_vcpu *vcpu;
2211
2212         kvm_for_each_vcpu(i, vcpu, kvm) {
2213                 __disable_ibs_on_vcpu(vcpu);
2214         }
2215 }
2216
2217 static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
2218 {
2219         kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
2220         kvm_make_request(KVM_REQ_ENABLE_IBS, vcpu);
2221         exit_sie_sync(vcpu);
2222 }
2223
2224 void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
2225 {
2226         int i, online_vcpus, started_vcpus = 0;
2227
2228         if (!is_vcpu_stopped(vcpu))
2229                 return;
2230
2231         trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
2232         /* Only one cpu at a time may enter/leave the STOPPED state. */
2233         spin_lock(&vcpu->kvm->arch.start_stop_lock);
2234         online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
2235
2236         for (i = 0; i < online_vcpus; i++) {
2237                 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
2238                         started_vcpus++;
2239         }
2240
2241         if (started_vcpus == 0) {
2242                 /* we're the only active VCPU -> speed it up */
2243                 __enable_ibs_on_vcpu(vcpu);
2244         } else if (started_vcpus == 1) {
2245                 /*
2246                  * As we are starting a second VCPU, we have to disable
2247                  * the IBS facility on all VCPUs to remove potentially
2248                  * oustanding ENABLE requests.
2249                  */
2250                 __disable_ibs_on_all_vcpus(vcpu->kvm);
2251         }
2252
2253         atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
2254         /*
2255          * Another VCPU might have used IBS while we were offline.
2256          * Let's play safe and flush the VCPU at startup.
2257          */
2258         kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
2259         spin_unlock(&vcpu->kvm->arch.start_stop_lock);
2260         return;
2261 }
2262
2263 void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
2264 {
2265         int i, online_vcpus, started_vcpus = 0;
2266         struct kvm_vcpu *started_vcpu = NULL;
2267
2268         if (is_vcpu_stopped(vcpu))
2269                 return;
2270
2271         trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
2272         /* Only one cpu at a time may enter/leave the STOPPED state. */
2273         spin_lock(&vcpu->kvm->arch.start_stop_lock);
2274         online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
2275
2276         /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
2277         kvm_s390_clear_stop_irq(vcpu);
2278
2279         atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
2280         __disable_ibs_on_vcpu(vcpu);
2281
2282         for (i = 0; i < online_vcpus; i++) {
2283                 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
2284                         started_vcpus++;
2285                         started_vcpu = vcpu->kvm->vcpus[i];
2286                 }
2287         }
2288
2289         if (started_vcpus == 1) {
2290                 /*
2291                  * As we only have one VCPU left, we want to enable the
2292                  * IBS facility for that VCPU to speed it up.
2293                  */
2294                 __enable_ibs_on_vcpu(started_vcpu);
2295         }
2296
2297         spin_unlock(&vcpu->kvm->arch.start_stop_lock);
2298         return;
2299 }
2300
2301 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
2302                                      struct kvm_enable_cap *cap)
2303 {
2304         int r;
2305
2306         if (cap->flags)
2307                 return -EINVAL;
2308
2309         switch (cap->cap) {
2310         case KVM_CAP_S390_CSS_SUPPORT:
2311                 if (!vcpu->kvm->arch.css_support) {
2312                         vcpu->kvm->arch.css_support = 1;
2313                         trace_kvm_s390_enable_css(vcpu->kvm);
2314                 }
2315                 r = 0;
2316                 break;
2317         default:
2318                 r = -EINVAL;
2319                 break;
2320         }
2321         return r;
2322 }
2323
2324 static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
2325                                   struct kvm_s390_mem_op *mop)
2326 {
2327         void __user *uaddr = (void __user *)mop->buf;
2328         void *tmpbuf = NULL;
2329         int r, srcu_idx;
2330         const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
2331                                     | KVM_S390_MEMOP_F_CHECK_ONLY;
2332
2333         if (mop->flags & ~supported_flags)
2334                 return -EINVAL;
2335
2336         if (mop->size > MEM_OP_MAX_SIZE)
2337                 return -E2BIG;
2338
2339         if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
2340                 tmpbuf = vmalloc(mop->size);
2341                 if (!tmpbuf)
2342                         return -ENOMEM;
2343         }
2344
2345         srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2346
2347         switch (mop->op) {
2348         case KVM_S390_MEMOP_LOGICAL_READ:
2349                 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
2350                         r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, false);
2351                         break;
2352                 }
2353                 r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
2354                 if (r == 0) {
2355                         if (copy_to_user(uaddr, tmpbuf, mop->size))
2356                                 r = -EFAULT;
2357                 }
2358                 break;
2359         case KVM_S390_MEMOP_LOGICAL_WRITE:
2360                 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
2361                         r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, true);
2362                         break;
2363                 }
2364                 if (copy_from_user(tmpbuf, uaddr, mop->size)) {
2365                         r = -EFAULT;
2366                         break;
2367                 }
2368                 r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
2369                 break;
2370         default:
2371                 r = -EINVAL;
2372         }
2373
2374         srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
2375
2376         if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
2377                 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
2378
2379         vfree(tmpbuf);
2380         return r;
2381 }
2382
2383 long kvm_arch_vcpu_ioctl(struct file *filp,
2384                          unsigned int ioctl, unsigned long arg)
2385 {
2386         struct kvm_vcpu *vcpu = filp->private_data;
2387         void __user *argp = (void __user *)arg;
2388         int idx;
2389         long r;
2390
2391         switch (ioctl) {
2392         case KVM_S390_INTERRUPT: {
2393                 struct kvm_s390_interrupt s390int;
2394                 struct kvm_s390_irq s390irq;
2395
2396                 r = -EFAULT;
2397                 if (copy_from_user(&s390int, argp, sizeof(s390int)))
2398                         break;
2399                 if (s390int_to_s390irq(&s390int, &s390irq))
2400                         return -EINVAL;
2401                 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
2402                 break;
2403         }
2404         case KVM_S390_STORE_STATUS:
2405                 idx = srcu_read_lock(&vcpu->kvm->srcu);
2406                 r = kvm_s390_vcpu_store_status(vcpu, arg);
2407                 srcu_read_unlock(&vcpu->kvm->srcu, idx);
2408                 break;
2409         case KVM_S390_SET_INITIAL_PSW: {
2410                 psw_t psw;
2411
2412                 r = -EFAULT;
2413                 if (copy_from_user(&psw, argp, sizeof(psw)))
2414                         break;
2415                 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
2416                 break;
2417         }
2418         case KVM_S390_INITIAL_RESET:
2419                 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
2420                 break;
2421         case KVM_SET_ONE_REG:
2422         case KVM_GET_ONE_REG: {
2423                 struct kvm_one_reg reg;
2424                 r = -EFAULT;
2425                 if (copy_from_user(&reg, argp, sizeof(reg)))
2426                         break;
2427                 if (ioctl == KVM_SET_ONE_REG)
2428                         r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
2429                 else
2430                         r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
2431                 break;
2432         }
2433 #ifdef CONFIG_KVM_S390_UCONTROL
2434         case KVM_S390_UCAS_MAP: {
2435                 struct kvm_s390_ucas_mapping ucasmap;
2436
2437                 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
2438                         r = -EFAULT;
2439                         break;
2440                 }
2441
2442                 if (!kvm_is_ucontrol(vcpu->kvm)) {
2443                         r = -EINVAL;
2444                         break;
2445                 }
2446
2447                 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
2448                                      ucasmap.vcpu_addr, ucasmap.length);
2449                 break;
2450         }
2451         case KVM_S390_UCAS_UNMAP: {
2452                 struct kvm_s390_ucas_mapping ucasmap;
2453
2454                 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
2455                         r = -EFAULT;
2456                         break;
2457                 }
2458
2459                 if (!kvm_is_ucontrol(vcpu->kvm)) {
2460                         r = -EINVAL;
2461                         break;
2462                 }
2463
2464                 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
2465                         ucasmap.length);
2466                 break;
2467         }
2468 #endif
2469         case KVM_S390_VCPU_FAULT: {
2470                 r = gmap_fault(vcpu->arch.gmap, arg, 0);
2471                 break;
2472         }
2473         case KVM_ENABLE_CAP:
2474         {
2475                 struct kvm_enable_cap cap;
2476                 r = -EFAULT;
2477                 if (copy_from_user(&cap, argp, sizeof(cap)))
2478                         break;
2479                 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
2480                 break;
2481         }
2482         case KVM_S390_MEM_OP: {
2483                 struct kvm_s390_mem_op mem_op;
2484
2485                 if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
2486                         r = kvm_s390_guest_mem_op(vcpu, &mem_op);
2487                 else
2488                         r = -EFAULT;
2489                 break;
2490         }
2491         default:
2492                 r = -ENOTTY;
2493         }
2494         return r;
2495 }
2496
2497 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
2498 {
2499 #ifdef CONFIG_KVM_S390_UCONTROL
2500         if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
2501                  && (kvm_is_ucontrol(vcpu->kvm))) {
2502                 vmf->page = virt_to_page(vcpu->arch.sie_block);
2503                 get_page(vmf->page);
2504                 return 0;
2505         }
2506 #endif
2507         return VM_FAULT_SIGBUS;
2508 }
2509
2510 int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
2511                             unsigned long npages)
2512 {
2513         return 0;
2514 }
2515
2516 /* Section: memory related */
2517 int kvm_arch_prepare_memory_region(struct kvm *kvm,
2518                                    struct kvm_memory_slot *memslot,
2519                                    struct kvm_userspace_memory_region *mem,
2520                                    enum kvm_mr_change change)
2521 {
2522         /* A few sanity checks. We can have memory slots which have to be
2523            located/ended at a segment boundary (1MB). The memory in userland is
2524            ok to be fragmented into various different vmas. It is okay to mmap()
2525            and munmap() stuff in this slot after doing this call at any time */
2526
2527         if (mem->userspace_addr & 0xffffful)
2528                 return -EINVAL;
2529
2530         if (mem->memory_size & 0xffffful)
2531                 return -EINVAL;
2532
2533         return 0;
2534 }
2535
2536 void kvm_arch_commit_memory_region(struct kvm *kvm,
2537                                 struct kvm_userspace_memory_region *mem,
2538                                 const struct kvm_memory_slot *old,
2539                                 enum kvm_mr_change change)
2540 {
2541         int rc;
2542
2543         /* If the basics of the memslot do not change, we do not want
2544          * to update the gmap. Every update causes several unnecessary
2545          * segment translation exceptions. This is usually handled just
2546          * fine by the normal fault handler + gmap, but it will also
2547          * cause faults on the prefix page of running guest CPUs.
2548          */
2549         if (old->userspace_addr == mem->userspace_addr &&
2550             old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
2551             old->npages * PAGE_SIZE == mem->memory_size)
2552                 return;
2553
2554         rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
2555                 mem->guest_phys_addr, mem->memory_size);
2556         if (rc)
2557                 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
2558         return;
2559 }
2560
2561 static int __init kvm_s390_init(void)
2562 {
2563         return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
2564 }
2565
2566 static void __exit kvm_s390_exit(void)
2567 {
2568         kvm_exit();
2569 }
2570
2571 module_init(kvm_s390_init);
2572 module_exit(kvm_s390_exit);
2573
2574 /*
2575  * Enable autoloading of the kvm module.
2576  * Note that we add the module alias here instead of virt/kvm/kvm_main.c
2577  * since x86 takes a different approach.
2578  */
2579 #include <linux/miscdevice.h>
2580 MODULE_ALIAS_MISCDEV(KVM_MINOR);
2581 MODULE_ALIAS("devname:kvm");