Merge tag 'auxdisplay-for-linus-v4.18-rc1' of git://github.com/ojeda/linux
[sfrench/cifs-2.6.git] / arch / s390 / kvm / interrupt.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * handling kvm guest interrupts
4  *
5  * Copyright IBM Corp. 2008, 2015
6  *
7  *    Author(s): Carsten Otte <cotte@de.ibm.com>
8  */
9
10 #include <linux/interrupt.h>
11 #include <linux/kvm_host.h>
12 #include <linux/hrtimer.h>
13 #include <linux/mmu_context.h>
14 #include <linux/signal.h>
15 #include <linux/slab.h>
16 #include <linux/bitmap.h>
17 #include <linux/vmalloc.h>
18 #include <asm/asm-offsets.h>
19 #include <asm/dis.h>
20 #include <linux/uaccess.h>
21 #include <asm/sclp.h>
22 #include <asm/isc.h>
23 #include <asm/gmap.h>
24 #include <asm/switch_to.h>
25 #include <asm/nmi.h>
26 #include "kvm-s390.h"
27 #include "gaccess.h"
28 #include "trace-s390.h"
29
30 #define PFAULT_INIT 0x0600
31 #define PFAULT_DONE 0x0680
32 #define VIRTIO_PARAM 0x0d00
33
34 /* handle external calls via sigp interpretation facility */
35 static int sca_ext_call_pending(struct kvm_vcpu *vcpu, int *src_id)
36 {
37         int c, scn;
38
39         if (!kvm_s390_test_cpuflags(vcpu, CPUSTAT_ECALL_PEND))
40                 return 0;
41
42         BUG_ON(!kvm_s390_use_sca_entries());
43         read_lock(&vcpu->kvm->arch.sca_lock);
44         if (vcpu->kvm->arch.use_esca) {
45                 struct esca_block *sca = vcpu->kvm->arch.sca;
46                 union esca_sigp_ctrl sigp_ctrl =
47                         sca->cpu[vcpu->vcpu_id].sigp_ctrl;
48
49                 c = sigp_ctrl.c;
50                 scn = sigp_ctrl.scn;
51         } else {
52                 struct bsca_block *sca = vcpu->kvm->arch.sca;
53                 union bsca_sigp_ctrl sigp_ctrl =
54                         sca->cpu[vcpu->vcpu_id].sigp_ctrl;
55
56                 c = sigp_ctrl.c;
57                 scn = sigp_ctrl.scn;
58         }
59         read_unlock(&vcpu->kvm->arch.sca_lock);
60
61         if (src_id)
62                 *src_id = scn;
63
64         return c;
65 }
66
67 static int sca_inject_ext_call(struct kvm_vcpu *vcpu, int src_id)
68 {
69         int expect, rc;
70
71         BUG_ON(!kvm_s390_use_sca_entries());
72         read_lock(&vcpu->kvm->arch.sca_lock);
73         if (vcpu->kvm->arch.use_esca) {
74                 struct esca_block *sca = vcpu->kvm->arch.sca;
75                 union esca_sigp_ctrl *sigp_ctrl =
76                         &(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
77                 union esca_sigp_ctrl new_val = {0}, old_val = *sigp_ctrl;
78
79                 new_val.scn = src_id;
80                 new_val.c = 1;
81                 old_val.c = 0;
82
83                 expect = old_val.value;
84                 rc = cmpxchg(&sigp_ctrl->value, old_val.value, new_val.value);
85         } else {
86                 struct bsca_block *sca = vcpu->kvm->arch.sca;
87                 union bsca_sigp_ctrl *sigp_ctrl =
88                         &(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
89                 union bsca_sigp_ctrl new_val = {0}, old_val = *sigp_ctrl;
90
91                 new_val.scn = src_id;
92                 new_val.c = 1;
93                 old_val.c = 0;
94
95                 expect = old_val.value;
96                 rc = cmpxchg(&sigp_ctrl->value, old_val.value, new_val.value);
97         }
98         read_unlock(&vcpu->kvm->arch.sca_lock);
99
100         if (rc != expect) {
101                 /* another external call is pending */
102                 return -EBUSY;
103         }
104         kvm_s390_set_cpuflags(vcpu, CPUSTAT_ECALL_PEND);
105         return 0;
106 }
107
108 static void sca_clear_ext_call(struct kvm_vcpu *vcpu)
109 {
110         int rc, expect;
111
112         if (!kvm_s390_use_sca_entries())
113                 return;
114         kvm_s390_clear_cpuflags(vcpu, CPUSTAT_ECALL_PEND);
115         read_lock(&vcpu->kvm->arch.sca_lock);
116         if (vcpu->kvm->arch.use_esca) {
117                 struct esca_block *sca = vcpu->kvm->arch.sca;
118                 union esca_sigp_ctrl *sigp_ctrl =
119                         &(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
120                 union esca_sigp_ctrl old = *sigp_ctrl;
121
122                 expect = old.value;
123                 rc = cmpxchg(&sigp_ctrl->value, old.value, 0);
124         } else {
125                 struct bsca_block *sca = vcpu->kvm->arch.sca;
126                 union bsca_sigp_ctrl *sigp_ctrl =
127                         &(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
128                 union bsca_sigp_ctrl old = *sigp_ctrl;
129
130                 expect = old.value;
131                 rc = cmpxchg(&sigp_ctrl->value, old.value, 0);
132         }
133         read_unlock(&vcpu->kvm->arch.sca_lock);
134         WARN_ON(rc != expect); /* cannot clear? */
135 }
136
137 int psw_extint_disabled(struct kvm_vcpu *vcpu)
138 {
139         return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT);
140 }
141
142 static int psw_ioint_disabled(struct kvm_vcpu *vcpu)
143 {
144         return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO);
145 }
146
147 static int psw_mchk_disabled(struct kvm_vcpu *vcpu)
148 {
149         return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_MCHECK);
150 }
151
152 static int psw_interrupts_disabled(struct kvm_vcpu *vcpu)
153 {
154         return psw_extint_disabled(vcpu) &&
155                psw_ioint_disabled(vcpu) &&
156                psw_mchk_disabled(vcpu);
157 }
158
159 static int ckc_interrupts_enabled(struct kvm_vcpu *vcpu)
160 {
161         if (psw_extint_disabled(vcpu) ||
162             !(vcpu->arch.sie_block->gcr[0] & 0x800ul))
163                 return 0;
164         if (guestdbg_enabled(vcpu) && guestdbg_sstep_enabled(vcpu))
165                 /* No timer interrupts when single stepping */
166                 return 0;
167         return 1;
168 }
169
170 static int ckc_irq_pending(struct kvm_vcpu *vcpu)
171 {
172         const u64 now = kvm_s390_get_tod_clock_fast(vcpu->kvm);
173         const u64 ckc = vcpu->arch.sie_block->ckc;
174
175         if (vcpu->arch.sie_block->gcr[0] & 0x0020000000000000ul) {
176                 if ((s64)ckc >= (s64)now)
177                         return 0;
178         } else if (ckc >= now) {
179                 return 0;
180         }
181         return ckc_interrupts_enabled(vcpu);
182 }
183
184 static int cpu_timer_interrupts_enabled(struct kvm_vcpu *vcpu)
185 {
186         return !psw_extint_disabled(vcpu) &&
187                (vcpu->arch.sie_block->gcr[0] & 0x400ul);
188 }
189
190 static int cpu_timer_irq_pending(struct kvm_vcpu *vcpu)
191 {
192         if (!cpu_timer_interrupts_enabled(vcpu))
193                 return 0;
194         return kvm_s390_get_cpu_timer(vcpu) >> 63;
195 }
196
197 static uint64_t isc_to_isc_bits(int isc)
198 {
199         return (0x80 >> isc) << 24;
200 }
201
202 static inline u32 isc_to_int_word(u8 isc)
203 {
204         return ((u32)isc << 27) | 0x80000000;
205 }
206
207 static inline u8 int_word_to_isc(u32 int_word)
208 {
209         return (int_word & 0x38000000) >> 27;
210 }
211
212 /*
213  * To use atomic bitmap functions, we have to provide a bitmap address
214  * that is u64 aligned. However, the ipm might be u32 aligned.
215  * Therefore, we logically start the bitmap at the very beginning of the
216  * struct and fixup the bit number.
217  */
218 #define IPM_BIT_OFFSET (offsetof(struct kvm_s390_gisa, ipm) * BITS_PER_BYTE)
219
220 static inline void kvm_s390_gisa_set_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc)
221 {
222         set_bit_inv(IPM_BIT_OFFSET + gisc, (unsigned long *) gisa);
223 }
224
225 static inline u8 kvm_s390_gisa_get_ipm(struct kvm_s390_gisa *gisa)
226 {
227         return READ_ONCE(gisa->ipm);
228 }
229
230 static inline void kvm_s390_gisa_clear_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc)
231 {
232         clear_bit_inv(IPM_BIT_OFFSET + gisc, (unsigned long *) gisa);
233 }
234
235 static inline int kvm_s390_gisa_tac_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc)
236 {
237         return test_and_clear_bit_inv(IPM_BIT_OFFSET + gisc, (unsigned long *) gisa);
238 }
239
240 static inline unsigned long pending_irqs_no_gisa(struct kvm_vcpu *vcpu)
241 {
242         return vcpu->kvm->arch.float_int.pending_irqs |
243                 vcpu->arch.local_int.pending_irqs;
244 }
245
246 static inline unsigned long pending_irqs(struct kvm_vcpu *vcpu)
247 {
248         return pending_irqs_no_gisa(vcpu) |
249                 kvm_s390_gisa_get_ipm(vcpu->kvm->arch.gisa) << IRQ_PEND_IO_ISC_7;
250 }
251
252 static inline int isc_to_irq_type(unsigned long isc)
253 {
254         return IRQ_PEND_IO_ISC_0 - isc;
255 }
256
257 static inline int irq_type_to_isc(unsigned long irq_type)
258 {
259         return IRQ_PEND_IO_ISC_0 - irq_type;
260 }
261
262 static unsigned long disable_iscs(struct kvm_vcpu *vcpu,
263                                    unsigned long active_mask)
264 {
265         int i;
266
267         for (i = 0; i <= MAX_ISC; i++)
268                 if (!(vcpu->arch.sie_block->gcr[6] & isc_to_isc_bits(i)))
269                         active_mask &= ~(1UL << (isc_to_irq_type(i)));
270
271         return active_mask;
272 }
273
274 static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu)
275 {
276         unsigned long active_mask;
277
278         active_mask = pending_irqs(vcpu);
279         if (!active_mask)
280                 return 0;
281
282         if (psw_extint_disabled(vcpu))
283                 active_mask &= ~IRQ_PEND_EXT_MASK;
284         if (psw_ioint_disabled(vcpu))
285                 active_mask &= ~IRQ_PEND_IO_MASK;
286         else
287                 active_mask = disable_iscs(vcpu, active_mask);
288         if (!(vcpu->arch.sie_block->gcr[0] & 0x2000ul))
289                 __clear_bit(IRQ_PEND_EXT_EXTERNAL, &active_mask);
290         if (!(vcpu->arch.sie_block->gcr[0] & 0x4000ul))
291                 __clear_bit(IRQ_PEND_EXT_EMERGENCY, &active_mask);
292         if (!(vcpu->arch.sie_block->gcr[0] & 0x800ul))
293                 __clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &active_mask);
294         if (!(vcpu->arch.sie_block->gcr[0] & 0x400ul))
295                 __clear_bit(IRQ_PEND_EXT_CPU_TIMER, &active_mask);
296         if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
297                 __clear_bit(IRQ_PEND_EXT_SERVICE, &active_mask);
298         if (psw_mchk_disabled(vcpu))
299                 active_mask &= ~IRQ_PEND_MCHK_MASK;
300         /*
301          * Check both floating and local interrupt's cr14 because
302          * bit IRQ_PEND_MCHK_REP could be set in both cases.
303          */
304         if (!(vcpu->arch.sie_block->gcr[14] &
305            (vcpu->kvm->arch.float_int.mchk.cr14 |
306            vcpu->arch.local_int.irq.mchk.cr14)))
307                 __clear_bit(IRQ_PEND_MCHK_REP, &active_mask);
308
309         /*
310          * STOP irqs will never be actively delivered. They are triggered via
311          * intercept requests and cleared when the stop intercept is performed.
312          */
313         __clear_bit(IRQ_PEND_SIGP_STOP, &active_mask);
314
315         return active_mask;
316 }
317
318 static void __set_cpu_idle(struct kvm_vcpu *vcpu)
319 {
320         kvm_s390_set_cpuflags(vcpu, CPUSTAT_WAIT);
321         set_bit(vcpu->vcpu_id, vcpu->kvm->arch.float_int.idle_mask);
322 }
323
324 static void __unset_cpu_idle(struct kvm_vcpu *vcpu)
325 {
326         kvm_s390_clear_cpuflags(vcpu, CPUSTAT_WAIT);
327         clear_bit(vcpu->vcpu_id, vcpu->kvm->arch.float_int.idle_mask);
328 }
329
330 static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
331 {
332         kvm_s390_clear_cpuflags(vcpu, CPUSTAT_IO_INT | CPUSTAT_EXT_INT |
333                                       CPUSTAT_STOP_INT);
334         vcpu->arch.sie_block->lctl = 0x0000;
335         vcpu->arch.sie_block->ictl &= ~(ICTL_LPSW | ICTL_STCTL | ICTL_PINT);
336
337         if (guestdbg_enabled(vcpu)) {
338                 vcpu->arch.sie_block->lctl |= (LCTL_CR0 | LCTL_CR9 |
339                                                LCTL_CR10 | LCTL_CR11);
340                 vcpu->arch.sie_block->ictl |= (ICTL_STCTL | ICTL_PINT);
341         }
342 }
343
344 static void set_intercept_indicators_io(struct kvm_vcpu *vcpu)
345 {
346         if (!(pending_irqs_no_gisa(vcpu) & IRQ_PEND_IO_MASK))
347                 return;
348         else if (psw_ioint_disabled(vcpu))
349                 kvm_s390_set_cpuflags(vcpu, CPUSTAT_IO_INT);
350         else
351                 vcpu->arch.sie_block->lctl |= LCTL_CR6;
352 }
353
354 static void set_intercept_indicators_ext(struct kvm_vcpu *vcpu)
355 {
356         if (!(pending_irqs(vcpu) & IRQ_PEND_EXT_MASK))
357                 return;
358         if (psw_extint_disabled(vcpu))
359                 kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT);
360         else
361                 vcpu->arch.sie_block->lctl |= LCTL_CR0;
362 }
363
364 static void set_intercept_indicators_mchk(struct kvm_vcpu *vcpu)
365 {
366         if (!(pending_irqs(vcpu) & IRQ_PEND_MCHK_MASK))
367                 return;
368         if (psw_mchk_disabled(vcpu))
369                 vcpu->arch.sie_block->ictl |= ICTL_LPSW;
370         else
371                 vcpu->arch.sie_block->lctl |= LCTL_CR14;
372 }
373
374 static void set_intercept_indicators_stop(struct kvm_vcpu *vcpu)
375 {
376         if (kvm_s390_is_stop_irq_pending(vcpu))
377                 kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT);
378 }
379
380 /* Set interception request for non-deliverable interrupts */
381 static void set_intercept_indicators(struct kvm_vcpu *vcpu)
382 {
383         set_intercept_indicators_io(vcpu);
384         set_intercept_indicators_ext(vcpu);
385         set_intercept_indicators_mchk(vcpu);
386         set_intercept_indicators_stop(vcpu);
387 }
388
389 static int __must_check __deliver_cpu_timer(struct kvm_vcpu *vcpu)
390 {
391         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
392         int rc;
393
394         vcpu->stat.deliver_cputm++;
395         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER,
396                                          0, 0);
397
398         rc  = put_guest_lc(vcpu, EXT_IRQ_CPU_TIMER,
399                            (u16 *)__LC_EXT_INT_CODE);
400         rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
401         rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
402                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
403         rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
404                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
405         clear_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
406         return rc ? -EFAULT : 0;
407 }
408
409 static int __must_check __deliver_ckc(struct kvm_vcpu *vcpu)
410 {
411         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
412         int rc;
413
414         vcpu->stat.deliver_ckc++;
415         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP,
416                                          0, 0);
417
418         rc  = put_guest_lc(vcpu, EXT_IRQ_CLK_COMP,
419                            (u16 __user *)__LC_EXT_INT_CODE);
420         rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
421         rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
422                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
423         rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
424                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
425         clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
426         return rc ? -EFAULT : 0;
427 }
428
429 static int __must_check __deliver_pfault_init(struct kvm_vcpu *vcpu)
430 {
431         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
432         struct kvm_s390_ext_info ext;
433         int rc;
434
435         spin_lock(&li->lock);
436         ext = li->irq.ext;
437         clear_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs);
438         li->irq.ext.ext_params2 = 0;
439         spin_unlock(&li->lock);
440
441         VCPU_EVENT(vcpu, 4, "deliver: pfault init token 0x%llx",
442                    ext.ext_params2);
443         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
444                                          KVM_S390_INT_PFAULT_INIT,
445                                          0, ext.ext_params2);
446
447         rc  = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, (u16 *) __LC_EXT_INT_CODE);
448         rc |= put_guest_lc(vcpu, PFAULT_INIT, (u16 *) __LC_EXT_CPU_ADDR);
449         rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
450                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
451         rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
452                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
453         rc |= put_guest_lc(vcpu, ext.ext_params2, (u64 *) __LC_EXT_PARAMS2);
454         return rc ? -EFAULT : 0;
455 }
456
457 static int __write_machine_check(struct kvm_vcpu *vcpu,
458                                  struct kvm_s390_mchk_info *mchk)
459 {
460         unsigned long ext_sa_addr;
461         unsigned long lc;
462         freg_t fprs[NUM_FPRS];
463         union mci mci;
464         int rc;
465
466         mci.val = mchk->mcic;
467         /* take care of lazy register loading */
468         save_fpu_regs();
469         save_access_regs(vcpu->run->s.regs.acrs);
470         if (MACHINE_HAS_GS && vcpu->arch.gs_enabled)
471                 save_gs_cb(current->thread.gs_cb);
472
473         /* Extended save area */
474         rc = read_guest_lc(vcpu, __LC_MCESAD, &ext_sa_addr,
475                            sizeof(unsigned long));
476         /* Only bits 0 through 63-LC are used for address formation */
477         lc = ext_sa_addr & MCESA_LC_MASK;
478         if (test_kvm_facility(vcpu->kvm, 133)) {
479                 switch (lc) {
480                 case 0:
481                 case 10:
482                         ext_sa_addr &= ~0x3ffUL;
483                         break;
484                 case 11:
485                         ext_sa_addr &= ~0x7ffUL;
486                         break;
487                 case 12:
488                         ext_sa_addr &= ~0xfffUL;
489                         break;
490                 default:
491                         ext_sa_addr = 0;
492                         break;
493                 }
494         } else {
495                 ext_sa_addr &= ~0x3ffUL;
496         }
497
498         if (!rc && mci.vr && ext_sa_addr && test_kvm_facility(vcpu->kvm, 129)) {
499                 if (write_guest_abs(vcpu, ext_sa_addr, vcpu->run->s.regs.vrs,
500                                     512))
501                         mci.vr = 0;
502         } else {
503                 mci.vr = 0;
504         }
505         if (!rc && mci.gs && ext_sa_addr && test_kvm_facility(vcpu->kvm, 133)
506             && (lc == 11 || lc == 12)) {
507                 if (write_guest_abs(vcpu, ext_sa_addr + 1024,
508                                     &vcpu->run->s.regs.gscb, 32))
509                         mci.gs = 0;
510         } else {
511                 mci.gs = 0;
512         }
513
514         /* General interruption information */
515         rc |= put_guest_lc(vcpu, 1, (u8 __user *) __LC_AR_MODE_ID);
516         rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW,
517                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
518         rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW,
519                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
520         rc |= put_guest_lc(vcpu, mci.val, (u64 __user *) __LC_MCCK_CODE);
521
522         /* Register-save areas */
523         if (MACHINE_HAS_VX) {
524                 convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
525                 rc |= write_guest_lc(vcpu, __LC_FPREGS_SAVE_AREA, fprs, 128);
526         } else {
527                 rc |= write_guest_lc(vcpu, __LC_FPREGS_SAVE_AREA,
528                                      vcpu->run->s.regs.fprs, 128);
529         }
530         rc |= write_guest_lc(vcpu, __LC_GPREGS_SAVE_AREA,
531                              vcpu->run->s.regs.gprs, 128);
532         rc |= put_guest_lc(vcpu, current->thread.fpu.fpc,
533                            (u32 __user *) __LC_FP_CREG_SAVE_AREA);
534         rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->todpr,
535                            (u32 __user *) __LC_TOD_PROGREG_SAVE_AREA);
536         rc |= put_guest_lc(vcpu, kvm_s390_get_cpu_timer(vcpu),
537                            (u64 __user *) __LC_CPU_TIMER_SAVE_AREA);
538         rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->ckc >> 8,
539                            (u64 __user *) __LC_CLOCK_COMP_SAVE_AREA);
540         rc |= write_guest_lc(vcpu, __LC_AREGS_SAVE_AREA,
541                              &vcpu->run->s.regs.acrs, 64);
542         rc |= write_guest_lc(vcpu, __LC_CREGS_SAVE_AREA,
543                              &vcpu->arch.sie_block->gcr, 128);
544
545         /* Extended interruption information */
546         rc |= put_guest_lc(vcpu, mchk->ext_damage_code,
547                            (u32 __user *) __LC_EXT_DAMAGE_CODE);
548         rc |= put_guest_lc(vcpu, mchk->failing_storage_address,
549                            (u64 __user *) __LC_MCCK_FAIL_STOR_ADDR);
550         rc |= write_guest_lc(vcpu, __LC_PSW_SAVE_AREA, &mchk->fixed_logout,
551                              sizeof(mchk->fixed_logout));
552         return rc ? -EFAULT : 0;
553 }
554
555 static int __must_check __deliver_machine_check(struct kvm_vcpu *vcpu)
556 {
557         struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
558         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
559         struct kvm_s390_mchk_info mchk = {};
560         int deliver = 0;
561         int rc = 0;
562
563         spin_lock(&fi->lock);
564         spin_lock(&li->lock);
565         if (test_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs) ||
566             test_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs)) {
567                 /*
568                  * If there was an exigent machine check pending, then any
569                  * repressible machine checks that might have been pending
570                  * are indicated along with it, so always clear bits for
571                  * repressible and exigent interrupts
572                  */
573                 mchk = li->irq.mchk;
574                 clear_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs);
575                 clear_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs);
576                 memset(&li->irq.mchk, 0, sizeof(mchk));
577                 deliver = 1;
578         }
579         /*
580          * We indicate floating repressible conditions along with
581          * other pending conditions. Channel Report Pending and Channel
582          * Subsystem damage are the only two and and are indicated by
583          * bits in mcic and masked in cr14.
584          */
585         if (test_and_clear_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs)) {
586                 mchk.mcic |= fi->mchk.mcic;
587                 mchk.cr14 |= fi->mchk.cr14;
588                 memset(&fi->mchk, 0, sizeof(mchk));
589                 deliver = 1;
590         }
591         spin_unlock(&li->lock);
592         spin_unlock(&fi->lock);
593
594         if (deliver) {
595                 VCPU_EVENT(vcpu, 3, "deliver: machine check mcic 0x%llx",
596                            mchk.mcic);
597                 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
598                                                  KVM_S390_MCHK,
599                                                  mchk.cr14, mchk.mcic);
600                 vcpu->stat.deliver_machine_check++;
601                 rc = __write_machine_check(vcpu, &mchk);
602         }
603         return rc;
604 }
605
606 static int __must_check __deliver_restart(struct kvm_vcpu *vcpu)
607 {
608         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
609         int rc;
610
611         VCPU_EVENT(vcpu, 3, "%s", "deliver: cpu restart");
612         vcpu->stat.deliver_restart_signal++;
613         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0);
614
615         rc  = write_guest_lc(vcpu,
616                              offsetof(struct lowcore, restart_old_psw),
617                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
618         rc |= read_guest_lc(vcpu, offsetof(struct lowcore, restart_psw),
619                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
620         clear_bit(IRQ_PEND_RESTART, &li->pending_irqs);
621         return rc ? -EFAULT : 0;
622 }
623
624 static int __must_check __deliver_set_prefix(struct kvm_vcpu *vcpu)
625 {
626         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
627         struct kvm_s390_prefix_info prefix;
628
629         spin_lock(&li->lock);
630         prefix = li->irq.prefix;
631         li->irq.prefix.address = 0;
632         clear_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs);
633         spin_unlock(&li->lock);
634
635         vcpu->stat.deliver_prefix_signal++;
636         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
637                                          KVM_S390_SIGP_SET_PREFIX,
638                                          prefix.address, 0);
639
640         kvm_s390_set_prefix(vcpu, prefix.address);
641         return 0;
642 }
643
644 static int __must_check __deliver_emergency_signal(struct kvm_vcpu *vcpu)
645 {
646         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
647         int rc;
648         int cpu_addr;
649
650         spin_lock(&li->lock);
651         cpu_addr = find_first_bit(li->sigp_emerg_pending, KVM_MAX_VCPUS);
652         clear_bit(cpu_addr, li->sigp_emerg_pending);
653         if (bitmap_empty(li->sigp_emerg_pending, KVM_MAX_VCPUS))
654                 clear_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
655         spin_unlock(&li->lock);
656
657         VCPU_EVENT(vcpu, 4, "%s", "deliver: sigp emerg");
658         vcpu->stat.deliver_emergency_signal++;
659         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY,
660                                          cpu_addr, 0);
661
662         rc  = put_guest_lc(vcpu, EXT_IRQ_EMERGENCY_SIG,
663                            (u16 *)__LC_EXT_INT_CODE);
664         rc |= put_guest_lc(vcpu, cpu_addr, (u16 *)__LC_EXT_CPU_ADDR);
665         rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
666                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
667         rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
668                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
669         return rc ? -EFAULT : 0;
670 }
671
672 static int __must_check __deliver_external_call(struct kvm_vcpu *vcpu)
673 {
674         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
675         struct kvm_s390_extcall_info extcall;
676         int rc;
677
678         spin_lock(&li->lock);
679         extcall = li->irq.extcall;
680         li->irq.extcall.code = 0;
681         clear_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs);
682         spin_unlock(&li->lock);
683
684         VCPU_EVENT(vcpu, 4, "%s", "deliver: sigp ext call");
685         vcpu->stat.deliver_external_call++;
686         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
687                                          KVM_S390_INT_EXTERNAL_CALL,
688                                          extcall.code, 0);
689
690         rc  = put_guest_lc(vcpu, EXT_IRQ_EXTERNAL_CALL,
691                            (u16 *)__LC_EXT_INT_CODE);
692         rc |= put_guest_lc(vcpu, extcall.code, (u16 *)__LC_EXT_CPU_ADDR);
693         rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
694                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
695         rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, &vcpu->arch.sie_block->gpsw,
696                             sizeof(psw_t));
697         return rc ? -EFAULT : 0;
698 }
699
700 static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
701 {
702         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
703         struct kvm_s390_pgm_info pgm_info;
704         int rc = 0, nullifying = false;
705         u16 ilen;
706
707         spin_lock(&li->lock);
708         pgm_info = li->irq.pgm;
709         clear_bit(IRQ_PEND_PROG, &li->pending_irqs);
710         memset(&li->irq.pgm, 0, sizeof(pgm_info));
711         spin_unlock(&li->lock);
712
713         ilen = pgm_info.flags & KVM_S390_PGM_FLAGS_ILC_MASK;
714         VCPU_EVENT(vcpu, 3, "deliver: program irq code 0x%x, ilen:%d",
715                    pgm_info.code, ilen);
716         vcpu->stat.deliver_program++;
717         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
718                                          pgm_info.code, 0);
719
720         switch (pgm_info.code & ~PGM_PER) {
721         case PGM_AFX_TRANSLATION:
722         case PGM_ASX_TRANSLATION:
723         case PGM_EX_TRANSLATION:
724         case PGM_LFX_TRANSLATION:
725         case PGM_LSTE_SEQUENCE:
726         case PGM_LSX_TRANSLATION:
727         case PGM_LX_TRANSLATION:
728         case PGM_PRIMARY_AUTHORITY:
729         case PGM_SECONDARY_AUTHORITY:
730                 nullifying = true;
731                 /* fall through */
732         case PGM_SPACE_SWITCH:
733                 rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
734                                   (u64 *)__LC_TRANS_EXC_CODE);
735                 break;
736         case PGM_ALEN_TRANSLATION:
737         case PGM_ALE_SEQUENCE:
738         case PGM_ASTE_INSTANCE:
739         case PGM_ASTE_SEQUENCE:
740         case PGM_ASTE_VALIDITY:
741         case PGM_EXTENDED_AUTHORITY:
742                 rc = put_guest_lc(vcpu, pgm_info.exc_access_id,
743                                   (u8 *)__LC_EXC_ACCESS_ID);
744                 nullifying = true;
745                 break;
746         case PGM_ASCE_TYPE:
747         case PGM_PAGE_TRANSLATION:
748         case PGM_REGION_FIRST_TRANS:
749         case PGM_REGION_SECOND_TRANS:
750         case PGM_REGION_THIRD_TRANS:
751         case PGM_SEGMENT_TRANSLATION:
752                 rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
753                                   (u64 *)__LC_TRANS_EXC_CODE);
754                 rc |= put_guest_lc(vcpu, pgm_info.exc_access_id,
755                                    (u8 *)__LC_EXC_ACCESS_ID);
756                 rc |= put_guest_lc(vcpu, pgm_info.op_access_id,
757                                    (u8 *)__LC_OP_ACCESS_ID);
758                 nullifying = true;
759                 break;
760         case PGM_MONITOR:
761                 rc = put_guest_lc(vcpu, pgm_info.mon_class_nr,
762                                   (u16 *)__LC_MON_CLASS_NR);
763                 rc |= put_guest_lc(vcpu, pgm_info.mon_code,
764                                    (u64 *)__LC_MON_CODE);
765                 break;
766         case PGM_VECTOR_PROCESSING:
767         case PGM_DATA:
768                 rc = put_guest_lc(vcpu, pgm_info.data_exc_code,
769                                   (u32 *)__LC_DATA_EXC_CODE);
770                 break;
771         case PGM_PROTECTION:
772                 rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
773                                   (u64 *)__LC_TRANS_EXC_CODE);
774                 rc |= put_guest_lc(vcpu, pgm_info.exc_access_id,
775                                    (u8 *)__LC_EXC_ACCESS_ID);
776                 break;
777         case PGM_STACK_FULL:
778         case PGM_STACK_EMPTY:
779         case PGM_STACK_SPECIFICATION:
780         case PGM_STACK_TYPE:
781         case PGM_STACK_OPERATION:
782         case PGM_TRACE_TABEL:
783         case PGM_CRYPTO_OPERATION:
784                 nullifying = true;
785                 break;
786         }
787
788         if (pgm_info.code & PGM_PER) {
789                 rc |= put_guest_lc(vcpu, pgm_info.per_code,
790                                    (u8 *) __LC_PER_CODE);
791                 rc |= put_guest_lc(vcpu, pgm_info.per_atmid,
792                                    (u8 *)__LC_PER_ATMID);
793                 rc |= put_guest_lc(vcpu, pgm_info.per_address,
794                                    (u64 *) __LC_PER_ADDRESS);
795                 rc |= put_guest_lc(vcpu, pgm_info.per_access_id,
796                                    (u8 *) __LC_PER_ACCESS_ID);
797         }
798
799         if (nullifying && !(pgm_info.flags & KVM_S390_PGM_FLAGS_NO_REWIND))
800                 kvm_s390_rewind_psw(vcpu, ilen);
801
802         /* bit 1+2 of the target are the ilc, so we can directly use ilen */
803         rc |= put_guest_lc(vcpu, ilen, (u16 *) __LC_PGM_ILC);
804         rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->gbea,
805                                  (u64 *) __LC_LAST_BREAK);
806         rc |= put_guest_lc(vcpu, pgm_info.code,
807                            (u16 *)__LC_PGM_INT_CODE);
808         rc |= write_guest_lc(vcpu, __LC_PGM_OLD_PSW,
809                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
810         rc |= read_guest_lc(vcpu, __LC_PGM_NEW_PSW,
811                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
812         return rc ? -EFAULT : 0;
813 }
814
815 static int __must_check __deliver_service(struct kvm_vcpu *vcpu)
816 {
817         struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
818         struct kvm_s390_ext_info ext;
819         int rc = 0;
820
821         spin_lock(&fi->lock);
822         if (!(test_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs))) {
823                 spin_unlock(&fi->lock);
824                 return 0;
825         }
826         ext = fi->srv_signal;
827         memset(&fi->srv_signal, 0, sizeof(ext));
828         clear_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs);
829         spin_unlock(&fi->lock);
830
831         VCPU_EVENT(vcpu, 4, "deliver: sclp parameter 0x%x",
832                    ext.ext_params);
833         vcpu->stat.deliver_service_signal++;
834         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_SERVICE,
835                                          ext.ext_params, 0);
836
837         rc  = put_guest_lc(vcpu, EXT_IRQ_SERVICE_SIG, (u16 *)__LC_EXT_INT_CODE);
838         rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
839         rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
840                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
841         rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
842                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
843         rc |= put_guest_lc(vcpu, ext.ext_params,
844                            (u32 *)__LC_EXT_PARAMS);
845
846         return rc ? -EFAULT : 0;
847 }
848
849 static int __must_check __deliver_pfault_done(struct kvm_vcpu *vcpu)
850 {
851         struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
852         struct kvm_s390_interrupt_info *inti;
853         int rc = 0;
854
855         spin_lock(&fi->lock);
856         inti = list_first_entry_or_null(&fi->lists[FIRQ_LIST_PFAULT],
857                                         struct kvm_s390_interrupt_info,
858                                         list);
859         if (inti) {
860                 list_del(&inti->list);
861                 fi->counters[FIRQ_CNTR_PFAULT] -= 1;
862         }
863         if (list_empty(&fi->lists[FIRQ_LIST_PFAULT]))
864                 clear_bit(IRQ_PEND_PFAULT_DONE, &fi->pending_irqs);
865         spin_unlock(&fi->lock);
866
867         if (inti) {
868                 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
869                                                  KVM_S390_INT_PFAULT_DONE, 0,
870                                                  inti->ext.ext_params2);
871                 VCPU_EVENT(vcpu, 4, "deliver: pfault done token 0x%llx",
872                            inti->ext.ext_params2);
873
874                 rc  = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE,
875                                 (u16 *)__LC_EXT_INT_CODE);
876                 rc |= put_guest_lc(vcpu, PFAULT_DONE,
877                                 (u16 *)__LC_EXT_CPU_ADDR);
878                 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
879                                 &vcpu->arch.sie_block->gpsw,
880                                 sizeof(psw_t));
881                 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
882                                 &vcpu->arch.sie_block->gpsw,
883                                 sizeof(psw_t));
884                 rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
885                                 (u64 *)__LC_EXT_PARAMS2);
886                 kfree(inti);
887         }
888         return rc ? -EFAULT : 0;
889 }
890
891 static int __must_check __deliver_virtio(struct kvm_vcpu *vcpu)
892 {
893         struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
894         struct kvm_s390_interrupt_info *inti;
895         int rc = 0;
896
897         spin_lock(&fi->lock);
898         inti = list_first_entry_or_null(&fi->lists[FIRQ_LIST_VIRTIO],
899                                         struct kvm_s390_interrupt_info,
900                                         list);
901         if (inti) {
902                 VCPU_EVENT(vcpu, 4,
903                            "deliver: virtio parm: 0x%x,parm64: 0x%llx",
904                            inti->ext.ext_params, inti->ext.ext_params2);
905                 vcpu->stat.deliver_virtio++;
906                 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
907                                 inti->type,
908                                 inti->ext.ext_params,
909                                 inti->ext.ext_params2);
910                 list_del(&inti->list);
911                 fi->counters[FIRQ_CNTR_VIRTIO] -= 1;
912         }
913         if (list_empty(&fi->lists[FIRQ_LIST_VIRTIO]))
914                 clear_bit(IRQ_PEND_VIRTIO, &fi->pending_irqs);
915         spin_unlock(&fi->lock);
916
917         if (inti) {
918                 rc  = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE,
919                                 (u16 *)__LC_EXT_INT_CODE);
920                 rc |= put_guest_lc(vcpu, VIRTIO_PARAM,
921                                 (u16 *)__LC_EXT_CPU_ADDR);
922                 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
923                                 &vcpu->arch.sie_block->gpsw,
924                                 sizeof(psw_t));
925                 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
926                                 &vcpu->arch.sie_block->gpsw,
927                                 sizeof(psw_t));
928                 rc |= put_guest_lc(vcpu, inti->ext.ext_params,
929                                 (u32 *)__LC_EXT_PARAMS);
930                 rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
931                                 (u64 *)__LC_EXT_PARAMS2);
932                 kfree(inti);
933         }
934         return rc ? -EFAULT : 0;
935 }
936
937 static int __do_deliver_io(struct kvm_vcpu *vcpu, struct kvm_s390_io_info *io)
938 {
939         int rc;
940
941         rc  = put_guest_lc(vcpu, io->subchannel_id, (u16 *)__LC_SUBCHANNEL_ID);
942         rc |= put_guest_lc(vcpu, io->subchannel_nr, (u16 *)__LC_SUBCHANNEL_NR);
943         rc |= put_guest_lc(vcpu, io->io_int_parm, (u32 *)__LC_IO_INT_PARM);
944         rc |= put_guest_lc(vcpu, io->io_int_word, (u32 *)__LC_IO_INT_WORD);
945         rc |= write_guest_lc(vcpu, __LC_IO_OLD_PSW,
946                              &vcpu->arch.sie_block->gpsw,
947                              sizeof(psw_t));
948         rc |= read_guest_lc(vcpu, __LC_IO_NEW_PSW,
949                             &vcpu->arch.sie_block->gpsw,
950                             sizeof(psw_t));
951         return rc ? -EFAULT : 0;
952 }
953
954 static int __must_check __deliver_io(struct kvm_vcpu *vcpu,
955                                      unsigned long irq_type)
956 {
957         struct list_head *isc_list;
958         struct kvm_s390_float_interrupt *fi;
959         struct kvm_s390_interrupt_info *inti = NULL;
960         struct kvm_s390_io_info io;
961         u32 isc;
962         int rc = 0;
963
964         fi = &vcpu->kvm->arch.float_int;
965
966         spin_lock(&fi->lock);
967         isc = irq_type_to_isc(irq_type);
968         isc_list = &fi->lists[isc];
969         inti = list_first_entry_or_null(isc_list,
970                                         struct kvm_s390_interrupt_info,
971                                         list);
972         if (inti) {
973                 if (inti->type & KVM_S390_INT_IO_AI_MASK)
974                         VCPU_EVENT(vcpu, 4, "%s", "deliver: I/O (AI)");
975                 else
976                         VCPU_EVENT(vcpu, 4, "deliver: I/O %x ss %x schid %04x",
977                         inti->io.subchannel_id >> 8,
978                         inti->io.subchannel_id >> 1 & 0x3,
979                         inti->io.subchannel_nr);
980
981                 vcpu->stat.deliver_io++;
982                 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
983                                 inti->type,
984                                 ((__u32)inti->io.subchannel_id << 16) |
985                                 inti->io.subchannel_nr,
986                                 ((__u64)inti->io.io_int_parm << 32) |
987                                 inti->io.io_int_word);
988                 list_del(&inti->list);
989                 fi->counters[FIRQ_CNTR_IO] -= 1;
990         }
991         if (list_empty(isc_list))
992                 clear_bit(irq_type, &fi->pending_irqs);
993         spin_unlock(&fi->lock);
994
995         if (inti) {
996                 rc = __do_deliver_io(vcpu, &(inti->io));
997                 kfree(inti);
998                 goto out;
999         }
1000
1001         if (vcpu->kvm->arch.gisa &&
1002             kvm_s390_gisa_tac_ipm_gisc(vcpu->kvm->arch.gisa, isc)) {
1003                 /*
1004                  * in case an adapter interrupt was not delivered
1005                  * in SIE context KVM will handle the delivery
1006                  */
1007                 VCPU_EVENT(vcpu, 4, "%s isc %u", "deliver: I/O (AI/gisa)", isc);
1008                 memset(&io, 0, sizeof(io));
1009                 io.io_int_word = isc_to_int_word(isc);
1010                 vcpu->stat.deliver_io++;
1011                 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
1012                         KVM_S390_INT_IO(1, 0, 0, 0),
1013                         ((__u32)io.subchannel_id << 16) |
1014                         io.subchannel_nr,
1015                         ((__u64)io.io_int_parm << 32) |
1016                         io.io_int_word);
1017                 rc = __do_deliver_io(vcpu, &io);
1018         }
1019 out:
1020         return rc;
1021 }
1022
1023 /* Check whether an external call is pending (deliverable or not) */
1024 int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu)
1025 {
1026         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1027
1028         if (!sclp.has_sigpif)
1029                 return test_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs);
1030
1031         return sca_ext_call_pending(vcpu, NULL);
1032 }
1033
1034 int kvm_s390_vcpu_has_irq(struct kvm_vcpu *vcpu, int exclude_stop)
1035 {
1036         if (deliverable_irqs(vcpu))
1037                 return 1;
1038
1039         if (kvm_cpu_has_pending_timer(vcpu))
1040                 return 1;
1041
1042         /* external call pending and deliverable */
1043         if (kvm_s390_ext_call_pending(vcpu) &&
1044             !psw_extint_disabled(vcpu) &&
1045             (vcpu->arch.sie_block->gcr[0] & 0x2000ul))
1046                 return 1;
1047
1048         if (!exclude_stop && kvm_s390_is_stop_irq_pending(vcpu))
1049                 return 1;
1050         return 0;
1051 }
1052
1053 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
1054 {
1055         return ckc_irq_pending(vcpu) || cpu_timer_irq_pending(vcpu);
1056 }
1057
1058 static u64 __calculate_sltime(struct kvm_vcpu *vcpu)
1059 {
1060         const u64 now = kvm_s390_get_tod_clock_fast(vcpu->kvm);
1061         const u64 ckc = vcpu->arch.sie_block->ckc;
1062         u64 cputm, sltime = 0;
1063
1064         if (ckc_interrupts_enabled(vcpu)) {
1065                 if (vcpu->arch.sie_block->gcr[0] & 0x0020000000000000ul) {
1066                         if ((s64)now < (s64)ckc)
1067                                 sltime = tod_to_ns((s64)ckc - (s64)now);
1068                 } else if (now < ckc) {
1069                         sltime = tod_to_ns(ckc - now);
1070                 }
1071                 /* already expired */
1072                 if (!sltime)
1073                         return 0;
1074                 if (cpu_timer_interrupts_enabled(vcpu)) {
1075                         cputm = kvm_s390_get_cpu_timer(vcpu);
1076                         /* already expired? */
1077                         if (cputm >> 63)
1078                                 return 0;
1079                         return min(sltime, tod_to_ns(cputm));
1080                 }
1081         } else if (cpu_timer_interrupts_enabled(vcpu)) {
1082                 sltime = kvm_s390_get_cpu_timer(vcpu);
1083                 /* already expired? */
1084                 if (sltime >> 63)
1085                         return 0;
1086         }
1087         return sltime;
1088 }
1089
1090 int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
1091 {
1092         u64 sltime;
1093
1094         vcpu->stat.exit_wait_state++;
1095
1096         /* fast path */
1097         if (kvm_arch_vcpu_runnable(vcpu))
1098                 return 0;
1099
1100         if (psw_interrupts_disabled(vcpu)) {
1101                 VCPU_EVENT(vcpu, 3, "%s", "disabled wait");
1102                 return -EOPNOTSUPP; /* disabled wait */
1103         }
1104
1105         if (!ckc_interrupts_enabled(vcpu) &&
1106             !cpu_timer_interrupts_enabled(vcpu)) {
1107                 VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer");
1108                 __set_cpu_idle(vcpu);
1109                 goto no_timer;
1110         }
1111
1112         sltime = __calculate_sltime(vcpu);
1113         if (!sltime)
1114                 return 0;
1115
1116         __set_cpu_idle(vcpu);
1117         hrtimer_start(&vcpu->arch.ckc_timer, sltime, HRTIMER_MODE_REL);
1118         VCPU_EVENT(vcpu, 4, "enabled wait: %llu ns", sltime);
1119 no_timer:
1120         srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
1121         kvm_vcpu_block(vcpu);
1122         __unset_cpu_idle(vcpu);
1123         vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1124
1125         hrtimer_cancel(&vcpu->arch.ckc_timer);
1126         return 0;
1127 }
1128
1129 void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu)
1130 {
1131         /*
1132          * We cannot move this into the if, as the CPU might be already
1133          * in kvm_vcpu_block without having the waitqueue set (polling)
1134          */
1135         vcpu->valid_wakeup = true;
1136         /*
1137          * This is mostly to document, that the read in swait_active could
1138          * be moved before other stores, leading to subtle races.
1139          * All current users do not store or use an atomic like update
1140          */
1141         smp_mb__after_atomic();
1142         if (swait_active(&vcpu->wq)) {
1143                 /*
1144                  * The vcpu gave up the cpu voluntarily, mark it as a good
1145                  * yield-candidate.
1146                  */
1147                 vcpu->preempted = true;
1148                 swake_up(&vcpu->wq);
1149                 vcpu->stat.halt_wakeup++;
1150         }
1151         /*
1152          * The VCPU might not be sleeping but is executing the VSIE. Let's
1153          * kick it, so it leaves the SIE to process the request.
1154          */
1155         kvm_s390_vsie_kick(vcpu);
1156 }
1157
1158 enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer)
1159 {
1160         struct kvm_vcpu *vcpu;
1161         u64 sltime;
1162
1163         vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer);
1164         sltime = __calculate_sltime(vcpu);
1165
1166         /*
1167          * If the monotonic clock runs faster than the tod clock we might be
1168          * woken up too early and have to go back to sleep to avoid deadlocks.
1169          */
1170         if (sltime && hrtimer_forward_now(timer, ns_to_ktime(sltime)))
1171                 return HRTIMER_RESTART;
1172         kvm_s390_vcpu_wakeup(vcpu);
1173         return HRTIMER_NORESTART;
1174 }
1175
1176 void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu)
1177 {
1178         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1179
1180         spin_lock(&li->lock);
1181         li->pending_irqs = 0;
1182         bitmap_zero(li->sigp_emerg_pending, KVM_MAX_VCPUS);
1183         memset(&li->irq, 0, sizeof(li->irq));
1184         spin_unlock(&li->lock);
1185
1186         sca_clear_ext_call(vcpu);
1187 }
1188
1189 int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
1190 {
1191         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1192         int rc = 0;
1193         unsigned long irq_type;
1194         unsigned long irqs;
1195
1196         __reset_intercept_indicators(vcpu);
1197
1198         /* pending ckc conditions might have been invalidated */
1199         clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
1200         if (ckc_irq_pending(vcpu))
1201                 set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
1202
1203         /* pending cpu timer conditions might have been invalidated */
1204         clear_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
1205         if (cpu_timer_irq_pending(vcpu))
1206                 set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
1207
1208         while ((irqs = deliverable_irqs(vcpu)) && !rc) {
1209                 /* bits are in the reverse order of interrupt priority */
1210                 irq_type = find_last_bit(&irqs, IRQ_PEND_COUNT);
1211                 switch (irq_type) {
1212                 case IRQ_PEND_IO_ISC_0:
1213                 case IRQ_PEND_IO_ISC_1:
1214                 case IRQ_PEND_IO_ISC_2:
1215                 case IRQ_PEND_IO_ISC_3:
1216                 case IRQ_PEND_IO_ISC_4:
1217                 case IRQ_PEND_IO_ISC_5:
1218                 case IRQ_PEND_IO_ISC_6:
1219                 case IRQ_PEND_IO_ISC_7:
1220                         rc = __deliver_io(vcpu, irq_type);
1221                         break;
1222                 case IRQ_PEND_MCHK_EX:
1223                 case IRQ_PEND_MCHK_REP:
1224                         rc = __deliver_machine_check(vcpu);
1225                         break;
1226                 case IRQ_PEND_PROG:
1227                         rc = __deliver_prog(vcpu);
1228                         break;
1229                 case IRQ_PEND_EXT_EMERGENCY:
1230                         rc = __deliver_emergency_signal(vcpu);
1231                         break;
1232                 case IRQ_PEND_EXT_EXTERNAL:
1233                         rc = __deliver_external_call(vcpu);
1234                         break;
1235                 case IRQ_PEND_EXT_CLOCK_COMP:
1236                         rc = __deliver_ckc(vcpu);
1237                         break;
1238                 case IRQ_PEND_EXT_CPU_TIMER:
1239                         rc = __deliver_cpu_timer(vcpu);
1240                         break;
1241                 case IRQ_PEND_RESTART:
1242                         rc = __deliver_restart(vcpu);
1243                         break;
1244                 case IRQ_PEND_SET_PREFIX:
1245                         rc = __deliver_set_prefix(vcpu);
1246                         break;
1247                 case IRQ_PEND_PFAULT_INIT:
1248                         rc = __deliver_pfault_init(vcpu);
1249                         break;
1250                 case IRQ_PEND_EXT_SERVICE:
1251                         rc = __deliver_service(vcpu);
1252                         break;
1253                 case IRQ_PEND_PFAULT_DONE:
1254                         rc = __deliver_pfault_done(vcpu);
1255                         break;
1256                 case IRQ_PEND_VIRTIO:
1257                         rc = __deliver_virtio(vcpu);
1258                         break;
1259                 default:
1260                         WARN_ONCE(1, "Unknown pending irq type %ld", irq_type);
1261                         clear_bit(irq_type, &li->pending_irqs);
1262                 }
1263         }
1264
1265         set_intercept_indicators(vcpu);
1266
1267         return rc;
1268 }
1269
1270 static int __inject_prog(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1271 {
1272         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1273
1274         vcpu->stat.inject_program++;
1275         VCPU_EVENT(vcpu, 3, "inject: program irq code 0x%x", irq->u.pgm.code);
1276         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
1277                                    irq->u.pgm.code, 0);
1278
1279         if (!(irq->u.pgm.flags & KVM_S390_PGM_FLAGS_ILC_VALID)) {
1280                 /* auto detection if no valid ILC was given */
1281                 irq->u.pgm.flags &= ~KVM_S390_PGM_FLAGS_ILC_MASK;
1282                 irq->u.pgm.flags |= kvm_s390_get_ilen(vcpu);
1283                 irq->u.pgm.flags |= KVM_S390_PGM_FLAGS_ILC_VALID;
1284         }
1285
1286         if (irq->u.pgm.code == PGM_PER) {
1287                 li->irq.pgm.code |= PGM_PER;
1288                 li->irq.pgm.flags = irq->u.pgm.flags;
1289                 /* only modify PER related information */
1290                 li->irq.pgm.per_address = irq->u.pgm.per_address;
1291                 li->irq.pgm.per_code = irq->u.pgm.per_code;
1292                 li->irq.pgm.per_atmid = irq->u.pgm.per_atmid;
1293                 li->irq.pgm.per_access_id = irq->u.pgm.per_access_id;
1294         } else if (!(irq->u.pgm.code & PGM_PER)) {
1295                 li->irq.pgm.code = (li->irq.pgm.code & PGM_PER) |
1296                                    irq->u.pgm.code;
1297                 li->irq.pgm.flags = irq->u.pgm.flags;
1298                 /* only modify non-PER information */
1299                 li->irq.pgm.trans_exc_code = irq->u.pgm.trans_exc_code;
1300                 li->irq.pgm.mon_code = irq->u.pgm.mon_code;
1301                 li->irq.pgm.data_exc_code = irq->u.pgm.data_exc_code;
1302                 li->irq.pgm.mon_class_nr = irq->u.pgm.mon_class_nr;
1303                 li->irq.pgm.exc_access_id = irq->u.pgm.exc_access_id;
1304                 li->irq.pgm.op_access_id = irq->u.pgm.op_access_id;
1305         } else {
1306                 li->irq.pgm = irq->u.pgm;
1307         }
1308         set_bit(IRQ_PEND_PROG, &li->pending_irqs);
1309         return 0;
1310 }
1311
1312 static int __inject_pfault_init(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1313 {
1314         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1315
1316         vcpu->stat.inject_pfault_init++;
1317         VCPU_EVENT(vcpu, 4, "inject: pfault init parameter block at 0x%llx",
1318                    irq->u.ext.ext_params2);
1319         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_PFAULT_INIT,
1320                                    irq->u.ext.ext_params,
1321                                    irq->u.ext.ext_params2);
1322
1323         li->irq.ext = irq->u.ext;
1324         set_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs);
1325         kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT);
1326         return 0;
1327 }
1328
1329 static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1330 {
1331         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1332         struct kvm_s390_extcall_info *extcall = &li->irq.extcall;
1333         uint16_t src_id = irq->u.extcall.code;
1334
1335         vcpu->stat.inject_external_call++;
1336         VCPU_EVENT(vcpu, 4, "inject: external call source-cpu:%u",
1337                    src_id);
1338         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EXTERNAL_CALL,
1339                                    src_id, 0);
1340
1341         /* sending vcpu invalid */
1342         if (kvm_get_vcpu_by_id(vcpu->kvm, src_id) == NULL)
1343                 return -EINVAL;
1344
1345         if (sclp.has_sigpif)
1346                 return sca_inject_ext_call(vcpu, src_id);
1347
1348         if (test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs))
1349                 return -EBUSY;
1350         *extcall = irq->u.extcall;
1351         kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT);
1352         return 0;
1353 }
1354
1355 static int __inject_set_prefix(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1356 {
1357         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1358         struct kvm_s390_prefix_info *prefix = &li->irq.prefix;
1359
1360         vcpu->stat.inject_set_prefix++;
1361         VCPU_EVENT(vcpu, 3, "inject: set prefix to %x",
1362                    irq->u.prefix.address);
1363         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_SET_PREFIX,
1364                                    irq->u.prefix.address, 0);
1365
1366         if (!is_vcpu_stopped(vcpu))
1367                 return -EBUSY;
1368
1369         *prefix = irq->u.prefix;
1370         set_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs);
1371         return 0;
1372 }
1373
1374 #define KVM_S390_STOP_SUPP_FLAGS (KVM_S390_STOP_FLAG_STORE_STATUS)
1375 static int __inject_sigp_stop(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1376 {
1377         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1378         struct kvm_s390_stop_info *stop = &li->irq.stop;
1379         int rc = 0;
1380
1381         vcpu->stat.inject_stop_signal++;
1382         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_STOP, 0, 0);
1383
1384         if (irq->u.stop.flags & ~KVM_S390_STOP_SUPP_FLAGS)
1385                 return -EINVAL;
1386
1387         if (is_vcpu_stopped(vcpu)) {
1388                 if (irq->u.stop.flags & KVM_S390_STOP_FLAG_STORE_STATUS)
1389                         rc = kvm_s390_store_status_unloaded(vcpu,
1390                                                 KVM_S390_STORE_STATUS_NOADDR);
1391                 return rc;
1392         }
1393
1394         if (test_and_set_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs))
1395                 return -EBUSY;
1396         stop->flags = irq->u.stop.flags;
1397         kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT);
1398         return 0;
1399 }
1400
1401 static int __inject_sigp_restart(struct kvm_vcpu *vcpu,
1402                                  struct kvm_s390_irq *irq)
1403 {
1404         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1405
1406         vcpu->stat.inject_restart++;
1407         VCPU_EVENT(vcpu, 3, "%s", "inject: restart int");
1408         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0);
1409
1410         set_bit(IRQ_PEND_RESTART, &li->pending_irqs);
1411         return 0;
1412 }
1413
1414 static int __inject_sigp_emergency(struct kvm_vcpu *vcpu,
1415                                    struct kvm_s390_irq *irq)
1416 {
1417         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1418
1419         vcpu->stat.inject_emergency_signal++;
1420         VCPU_EVENT(vcpu, 4, "inject: emergency from cpu %u",
1421                    irq->u.emerg.code);
1422         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY,
1423                                    irq->u.emerg.code, 0);
1424
1425         /* sending vcpu invalid */
1426         if (kvm_get_vcpu_by_id(vcpu->kvm, irq->u.emerg.code) == NULL)
1427                 return -EINVAL;
1428
1429         set_bit(irq->u.emerg.code, li->sigp_emerg_pending);
1430         set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
1431         kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT);
1432         return 0;
1433 }
1434
1435 static int __inject_mchk(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1436 {
1437         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1438         struct kvm_s390_mchk_info *mchk = &li->irq.mchk;
1439
1440         vcpu->stat.inject_mchk++;
1441         VCPU_EVENT(vcpu, 3, "inject: machine check mcic 0x%llx",
1442                    irq->u.mchk.mcic);
1443         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_MCHK, 0,
1444                                    irq->u.mchk.mcic);
1445
1446         /*
1447          * Because repressible machine checks can be indicated along with
1448          * exigent machine checks (PoP, Chapter 11, Interruption action)
1449          * we need to combine cr14, mcic and external damage code.
1450          * Failing storage address and the logout area should not be or'ed
1451          * together, we just indicate the last occurrence of the corresponding
1452          * machine check
1453          */
1454         mchk->cr14 |= irq->u.mchk.cr14;
1455         mchk->mcic |= irq->u.mchk.mcic;
1456         mchk->ext_damage_code |= irq->u.mchk.ext_damage_code;
1457         mchk->failing_storage_address = irq->u.mchk.failing_storage_address;
1458         memcpy(&mchk->fixed_logout, &irq->u.mchk.fixed_logout,
1459                sizeof(mchk->fixed_logout));
1460         if (mchk->mcic & MCHK_EX_MASK)
1461                 set_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs);
1462         else if (mchk->mcic & MCHK_REP_MASK)
1463                 set_bit(IRQ_PEND_MCHK_REP,  &li->pending_irqs);
1464         return 0;
1465 }
1466
1467 static int __inject_ckc(struct kvm_vcpu *vcpu)
1468 {
1469         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1470
1471         vcpu->stat.inject_ckc++;
1472         VCPU_EVENT(vcpu, 3, "%s", "inject: clock comparator external");
1473         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP,
1474                                    0, 0);
1475
1476         set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
1477         kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT);
1478         return 0;
1479 }
1480
1481 static int __inject_cpu_timer(struct kvm_vcpu *vcpu)
1482 {
1483         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1484
1485         vcpu->stat.inject_cputm++;
1486         VCPU_EVENT(vcpu, 3, "%s", "inject: cpu timer external");
1487         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER,
1488                                    0, 0);
1489
1490         set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
1491         kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT);
1492         return 0;
1493 }
1494
1495 static struct kvm_s390_interrupt_info *get_io_int(struct kvm *kvm,
1496                                                   int isc, u32 schid)
1497 {
1498         struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1499         struct list_head *isc_list = &fi->lists[FIRQ_LIST_IO_ISC_0 + isc];
1500         struct kvm_s390_interrupt_info *iter;
1501         u16 id = (schid & 0xffff0000U) >> 16;
1502         u16 nr = schid & 0x0000ffffU;
1503
1504         spin_lock(&fi->lock);
1505         list_for_each_entry(iter, isc_list, list) {
1506                 if (schid && (id != iter->io.subchannel_id ||
1507                               nr != iter->io.subchannel_nr))
1508                         continue;
1509                 /* found an appropriate entry */
1510                 list_del_init(&iter->list);
1511                 fi->counters[FIRQ_CNTR_IO] -= 1;
1512                 if (list_empty(isc_list))
1513                         clear_bit(isc_to_irq_type(isc), &fi->pending_irqs);
1514                 spin_unlock(&fi->lock);
1515                 return iter;
1516         }
1517         spin_unlock(&fi->lock);
1518         return NULL;
1519 }
1520
1521 static struct kvm_s390_interrupt_info *get_top_io_int(struct kvm *kvm,
1522                                                       u64 isc_mask, u32 schid)
1523 {
1524         struct kvm_s390_interrupt_info *inti = NULL;
1525         int isc;
1526
1527         for (isc = 0; isc <= MAX_ISC && !inti; isc++) {
1528                 if (isc_mask & isc_to_isc_bits(isc))
1529                         inti = get_io_int(kvm, isc, schid);
1530         }
1531         return inti;
1532 }
1533
1534 static int get_top_gisa_isc(struct kvm *kvm, u64 isc_mask, u32 schid)
1535 {
1536         unsigned long active_mask;
1537         int isc;
1538
1539         if (schid)
1540                 goto out;
1541         if (!kvm->arch.gisa)
1542                 goto out;
1543
1544         active_mask = (isc_mask & kvm_s390_gisa_get_ipm(kvm->arch.gisa) << 24) << 32;
1545         while (active_mask) {
1546                 isc = __fls(active_mask) ^ (BITS_PER_LONG - 1);
1547                 if (kvm_s390_gisa_tac_ipm_gisc(kvm->arch.gisa, isc))
1548                         return isc;
1549                 clear_bit_inv(isc, &active_mask);
1550         }
1551 out:
1552         return -EINVAL;
1553 }
1554
1555 /*
1556  * Dequeue and return an I/O interrupt matching any of the interruption
1557  * subclasses as designated by the isc mask in cr6 and the schid (if != 0).
1558  * Take into account the interrupts pending in the interrupt list and in GISA.
1559  *
1560  * Note that for a guest that does not enable I/O interrupts
1561  * but relies on TPI, a flood of classic interrupts may starve
1562  * out adapter interrupts on the same isc. Linux does not do
1563  * that, and it is possible to work around the issue by configuring
1564  * different iscs for classic and adapter interrupts in the guest,
1565  * but we may want to revisit this in the future.
1566  */
1567 struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
1568                                                     u64 isc_mask, u32 schid)
1569 {
1570         struct kvm_s390_interrupt_info *inti, *tmp_inti;
1571         int isc;
1572
1573         inti = get_top_io_int(kvm, isc_mask, schid);
1574
1575         isc = get_top_gisa_isc(kvm, isc_mask, schid);
1576         if (isc < 0)
1577                 /* no AI in GISA */
1578                 goto out;
1579
1580         if (!inti)
1581                 /* AI in GISA but no classical IO int */
1582                 goto gisa_out;
1583
1584         /* both types of interrupts present */
1585         if (int_word_to_isc(inti->io.io_int_word) <= isc) {
1586                 /* classical IO int with higher priority */
1587                 kvm_s390_gisa_set_ipm_gisc(kvm->arch.gisa, isc);
1588                 goto out;
1589         }
1590 gisa_out:
1591         tmp_inti = kzalloc(sizeof(*inti), GFP_KERNEL);
1592         if (tmp_inti) {
1593                 tmp_inti->type = KVM_S390_INT_IO(1, 0, 0, 0);
1594                 tmp_inti->io.io_int_word = isc_to_int_word(isc);
1595                 if (inti)
1596                         kvm_s390_reinject_io_int(kvm, inti);
1597                 inti = tmp_inti;
1598         } else
1599                 kvm_s390_gisa_set_ipm_gisc(kvm->arch.gisa, isc);
1600 out:
1601         return inti;
1602 }
1603
1604 #define SCCB_MASK 0xFFFFFFF8
1605 #define SCCB_EVENT_PENDING 0x3
1606
1607 static int __inject_service(struct kvm *kvm,
1608                              struct kvm_s390_interrupt_info *inti)
1609 {
1610         struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1611
1612         kvm->stat.inject_service_signal++;
1613         spin_lock(&fi->lock);
1614         fi->srv_signal.ext_params |= inti->ext.ext_params & SCCB_EVENT_PENDING;
1615         /*
1616          * Early versions of the QEMU s390 bios will inject several
1617          * service interrupts after another without handling a
1618          * condition code indicating busy.
1619          * We will silently ignore those superfluous sccb values.
1620          * A future version of QEMU will take care of serialization
1621          * of servc requests
1622          */
1623         if (fi->srv_signal.ext_params & SCCB_MASK)
1624                 goto out;
1625         fi->srv_signal.ext_params |= inti->ext.ext_params & SCCB_MASK;
1626         set_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs);
1627 out:
1628         spin_unlock(&fi->lock);
1629         kfree(inti);
1630         return 0;
1631 }
1632
1633 static int __inject_virtio(struct kvm *kvm,
1634                             struct kvm_s390_interrupt_info *inti)
1635 {
1636         struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1637
1638         kvm->stat.inject_virtio++;
1639         spin_lock(&fi->lock);
1640         if (fi->counters[FIRQ_CNTR_VIRTIO] >= KVM_S390_MAX_VIRTIO_IRQS) {
1641                 spin_unlock(&fi->lock);
1642                 return -EBUSY;
1643         }
1644         fi->counters[FIRQ_CNTR_VIRTIO] += 1;
1645         list_add_tail(&inti->list, &fi->lists[FIRQ_LIST_VIRTIO]);
1646         set_bit(IRQ_PEND_VIRTIO, &fi->pending_irqs);
1647         spin_unlock(&fi->lock);
1648         return 0;
1649 }
1650
1651 static int __inject_pfault_done(struct kvm *kvm,
1652                                  struct kvm_s390_interrupt_info *inti)
1653 {
1654         struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1655
1656         kvm->stat.inject_pfault_done++;
1657         spin_lock(&fi->lock);
1658         if (fi->counters[FIRQ_CNTR_PFAULT] >=
1659                 (ASYNC_PF_PER_VCPU * KVM_MAX_VCPUS)) {
1660                 spin_unlock(&fi->lock);
1661                 return -EBUSY;
1662         }
1663         fi->counters[FIRQ_CNTR_PFAULT] += 1;
1664         list_add_tail(&inti->list, &fi->lists[FIRQ_LIST_PFAULT]);
1665         set_bit(IRQ_PEND_PFAULT_DONE, &fi->pending_irqs);
1666         spin_unlock(&fi->lock);
1667         return 0;
1668 }
1669
1670 #define CR_PENDING_SUBCLASS 28
1671 static int __inject_float_mchk(struct kvm *kvm,
1672                                 struct kvm_s390_interrupt_info *inti)
1673 {
1674         struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1675
1676         kvm->stat.inject_float_mchk++;
1677         spin_lock(&fi->lock);
1678         fi->mchk.cr14 |= inti->mchk.cr14 & (1UL << CR_PENDING_SUBCLASS);
1679         fi->mchk.mcic |= inti->mchk.mcic;
1680         set_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs);
1681         spin_unlock(&fi->lock);
1682         kfree(inti);
1683         return 0;
1684 }
1685
1686 static int __inject_io(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
1687 {
1688         struct kvm_s390_float_interrupt *fi;
1689         struct list_head *list;
1690         int isc;
1691
1692         kvm->stat.inject_io++;
1693         isc = int_word_to_isc(inti->io.io_int_word);
1694
1695         if (kvm->arch.gisa && inti->type & KVM_S390_INT_IO_AI_MASK) {
1696                 VM_EVENT(kvm, 4, "%s isc %1u", "inject: I/O (AI/gisa)", isc);
1697                 kvm_s390_gisa_set_ipm_gisc(kvm->arch.gisa, isc);
1698                 kfree(inti);
1699                 return 0;
1700         }
1701
1702         fi = &kvm->arch.float_int;
1703         spin_lock(&fi->lock);
1704         if (fi->counters[FIRQ_CNTR_IO] >= KVM_S390_MAX_FLOAT_IRQS) {
1705                 spin_unlock(&fi->lock);
1706                 return -EBUSY;
1707         }
1708         fi->counters[FIRQ_CNTR_IO] += 1;
1709
1710         if (inti->type & KVM_S390_INT_IO_AI_MASK)
1711                 VM_EVENT(kvm, 4, "%s", "inject: I/O (AI)");
1712         else
1713                 VM_EVENT(kvm, 4, "inject: I/O %x ss %x schid %04x",
1714                         inti->io.subchannel_id >> 8,
1715                         inti->io.subchannel_id >> 1 & 0x3,
1716                         inti->io.subchannel_nr);
1717         list = &fi->lists[FIRQ_LIST_IO_ISC_0 + isc];
1718         list_add_tail(&inti->list, list);
1719         set_bit(isc_to_irq_type(isc), &fi->pending_irqs);
1720         spin_unlock(&fi->lock);
1721         return 0;
1722 }
1723
1724 /*
1725  * Find a destination VCPU for a floating irq and kick it.
1726  */
1727 static void __floating_irq_kick(struct kvm *kvm, u64 type)
1728 {
1729         struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1730         struct kvm_vcpu *dst_vcpu;
1731         int sigcpu, online_vcpus, nr_tries = 0;
1732
1733         online_vcpus = atomic_read(&kvm->online_vcpus);
1734         if (!online_vcpus)
1735                 return;
1736
1737         /* find idle VCPUs first, then round robin */
1738         sigcpu = find_first_bit(fi->idle_mask, online_vcpus);
1739         if (sigcpu == online_vcpus) {
1740                 do {
1741                         sigcpu = fi->next_rr_cpu;
1742                         fi->next_rr_cpu = (fi->next_rr_cpu + 1) % online_vcpus;
1743                         /* avoid endless loops if all vcpus are stopped */
1744                         if (nr_tries++ >= online_vcpus)
1745                                 return;
1746                 } while (is_vcpu_stopped(kvm_get_vcpu(kvm, sigcpu)));
1747         }
1748         dst_vcpu = kvm_get_vcpu(kvm, sigcpu);
1749
1750         /* make the VCPU drop out of the SIE, or wake it up if sleeping */
1751         switch (type) {
1752         case KVM_S390_MCHK:
1753                 kvm_s390_set_cpuflags(dst_vcpu, CPUSTAT_STOP_INT);
1754                 break;
1755         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1756                 if (!(type & KVM_S390_INT_IO_AI_MASK && kvm->arch.gisa))
1757                         kvm_s390_set_cpuflags(dst_vcpu, CPUSTAT_IO_INT);
1758                 break;
1759         default:
1760                 kvm_s390_set_cpuflags(dst_vcpu, CPUSTAT_EXT_INT);
1761                 break;
1762         }
1763         kvm_s390_vcpu_wakeup(dst_vcpu);
1764 }
1765
1766 static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
1767 {
1768         u64 type = READ_ONCE(inti->type);
1769         int rc;
1770
1771         switch (type) {
1772         case KVM_S390_MCHK:
1773                 rc = __inject_float_mchk(kvm, inti);
1774                 break;
1775         case KVM_S390_INT_VIRTIO:
1776                 rc = __inject_virtio(kvm, inti);
1777                 break;
1778         case KVM_S390_INT_SERVICE:
1779                 rc = __inject_service(kvm, inti);
1780                 break;
1781         case KVM_S390_INT_PFAULT_DONE:
1782                 rc = __inject_pfault_done(kvm, inti);
1783                 break;
1784         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1785                 rc = __inject_io(kvm, inti);
1786                 break;
1787         default:
1788                 rc = -EINVAL;
1789         }
1790         if (rc)
1791                 return rc;
1792
1793         __floating_irq_kick(kvm, type);
1794         return 0;
1795 }
1796
1797 int kvm_s390_inject_vm(struct kvm *kvm,
1798                        struct kvm_s390_interrupt *s390int)
1799 {
1800         struct kvm_s390_interrupt_info *inti;
1801         int rc;
1802
1803         inti = kzalloc(sizeof(*inti), GFP_KERNEL);
1804         if (!inti)
1805                 return -ENOMEM;
1806
1807         inti->type = s390int->type;
1808         switch (inti->type) {
1809         case KVM_S390_INT_VIRTIO:
1810                 VM_EVENT(kvm, 5, "inject: virtio parm:%x,parm64:%llx",
1811                          s390int->parm, s390int->parm64);
1812                 inti->ext.ext_params = s390int->parm;
1813                 inti->ext.ext_params2 = s390int->parm64;
1814                 break;
1815         case KVM_S390_INT_SERVICE:
1816                 VM_EVENT(kvm, 4, "inject: sclp parm:%x", s390int->parm);
1817                 inti->ext.ext_params = s390int->parm;
1818                 break;
1819         case KVM_S390_INT_PFAULT_DONE:
1820                 inti->ext.ext_params2 = s390int->parm64;
1821                 break;
1822         case KVM_S390_MCHK:
1823                 VM_EVENT(kvm, 3, "inject: machine check mcic 0x%llx",
1824                          s390int->parm64);
1825                 inti->mchk.cr14 = s390int->parm; /* upper bits are not used */
1826                 inti->mchk.mcic = s390int->parm64;
1827                 break;
1828         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1829                 inti->io.subchannel_id = s390int->parm >> 16;
1830                 inti->io.subchannel_nr = s390int->parm & 0x0000ffffu;
1831                 inti->io.io_int_parm = s390int->parm64 >> 32;
1832                 inti->io.io_int_word = s390int->parm64 & 0x00000000ffffffffull;
1833                 break;
1834         default:
1835                 kfree(inti);
1836                 return -EINVAL;
1837         }
1838         trace_kvm_s390_inject_vm(s390int->type, s390int->parm, s390int->parm64,
1839                                  2);
1840
1841         rc = __inject_vm(kvm, inti);
1842         if (rc)
1843                 kfree(inti);
1844         return rc;
1845 }
1846
1847 int kvm_s390_reinject_io_int(struct kvm *kvm,
1848                               struct kvm_s390_interrupt_info *inti)
1849 {
1850         return __inject_vm(kvm, inti);
1851 }
1852
1853 int s390int_to_s390irq(struct kvm_s390_interrupt *s390int,
1854                        struct kvm_s390_irq *irq)
1855 {
1856         irq->type = s390int->type;
1857         switch (irq->type) {
1858         case KVM_S390_PROGRAM_INT:
1859                 if (s390int->parm & 0xffff0000)
1860                         return -EINVAL;
1861                 irq->u.pgm.code = s390int->parm;
1862                 break;
1863         case KVM_S390_SIGP_SET_PREFIX:
1864                 irq->u.prefix.address = s390int->parm;
1865                 break;
1866         case KVM_S390_SIGP_STOP:
1867                 irq->u.stop.flags = s390int->parm;
1868                 break;
1869         case KVM_S390_INT_EXTERNAL_CALL:
1870                 if (s390int->parm & 0xffff0000)
1871                         return -EINVAL;
1872                 irq->u.extcall.code = s390int->parm;
1873                 break;
1874         case KVM_S390_INT_EMERGENCY:
1875                 if (s390int->parm & 0xffff0000)
1876                         return -EINVAL;
1877                 irq->u.emerg.code = s390int->parm;
1878                 break;
1879         case KVM_S390_MCHK:
1880                 irq->u.mchk.mcic = s390int->parm64;
1881                 break;
1882         }
1883         return 0;
1884 }
1885
1886 int kvm_s390_is_stop_irq_pending(struct kvm_vcpu *vcpu)
1887 {
1888         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1889
1890         return test_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs);
1891 }
1892
1893 void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu)
1894 {
1895         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1896
1897         spin_lock(&li->lock);
1898         li->irq.stop.flags = 0;
1899         clear_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs);
1900         spin_unlock(&li->lock);
1901 }
1902
1903 static int do_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1904 {
1905         int rc;
1906
1907         switch (irq->type) {
1908         case KVM_S390_PROGRAM_INT:
1909                 rc = __inject_prog(vcpu, irq);
1910                 break;
1911         case KVM_S390_SIGP_SET_PREFIX:
1912                 rc = __inject_set_prefix(vcpu, irq);
1913                 break;
1914         case KVM_S390_SIGP_STOP:
1915                 rc = __inject_sigp_stop(vcpu, irq);
1916                 break;
1917         case KVM_S390_RESTART:
1918                 rc = __inject_sigp_restart(vcpu, irq);
1919                 break;
1920         case KVM_S390_INT_CLOCK_COMP:
1921                 rc = __inject_ckc(vcpu);
1922                 break;
1923         case KVM_S390_INT_CPU_TIMER:
1924                 rc = __inject_cpu_timer(vcpu);
1925                 break;
1926         case KVM_S390_INT_EXTERNAL_CALL:
1927                 rc = __inject_extcall(vcpu, irq);
1928                 break;
1929         case KVM_S390_INT_EMERGENCY:
1930                 rc = __inject_sigp_emergency(vcpu, irq);
1931                 break;
1932         case KVM_S390_MCHK:
1933                 rc = __inject_mchk(vcpu, irq);
1934                 break;
1935         case KVM_S390_INT_PFAULT_INIT:
1936                 rc = __inject_pfault_init(vcpu, irq);
1937                 break;
1938         case KVM_S390_INT_VIRTIO:
1939         case KVM_S390_INT_SERVICE:
1940         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1941         default:
1942                 rc = -EINVAL;
1943         }
1944
1945         return rc;
1946 }
1947
1948 int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1949 {
1950         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1951         int rc;
1952
1953         spin_lock(&li->lock);
1954         rc = do_inject_vcpu(vcpu, irq);
1955         spin_unlock(&li->lock);
1956         if (!rc)
1957                 kvm_s390_vcpu_wakeup(vcpu);
1958         return rc;
1959 }
1960
1961 static inline void clear_irq_list(struct list_head *_list)
1962 {
1963         struct kvm_s390_interrupt_info *inti, *n;
1964
1965         list_for_each_entry_safe(inti, n, _list, list) {
1966                 list_del(&inti->list);
1967                 kfree(inti);
1968         }
1969 }
1970
1971 static void inti_to_irq(struct kvm_s390_interrupt_info *inti,
1972                        struct kvm_s390_irq *irq)
1973 {
1974         irq->type = inti->type;
1975         switch (inti->type) {
1976         case KVM_S390_INT_PFAULT_INIT:
1977         case KVM_S390_INT_PFAULT_DONE:
1978         case KVM_S390_INT_VIRTIO:
1979                 irq->u.ext = inti->ext;
1980                 break;
1981         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1982                 irq->u.io = inti->io;
1983                 break;
1984         }
1985 }
1986
1987 void kvm_s390_clear_float_irqs(struct kvm *kvm)
1988 {
1989         struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1990         int i;
1991
1992         spin_lock(&fi->lock);
1993         fi->pending_irqs = 0;
1994         memset(&fi->srv_signal, 0, sizeof(fi->srv_signal));
1995         memset(&fi->mchk, 0, sizeof(fi->mchk));
1996         for (i = 0; i < FIRQ_LIST_COUNT; i++)
1997                 clear_irq_list(&fi->lists[i]);
1998         for (i = 0; i < FIRQ_MAX_COUNT; i++)
1999                 fi->counters[i] = 0;
2000         spin_unlock(&fi->lock);
2001         kvm_s390_gisa_clear(kvm);
2002 };
2003
2004 static int get_all_floating_irqs(struct kvm *kvm, u8 __user *usrbuf, u64 len)
2005 {
2006         struct kvm_s390_interrupt_info *inti;
2007         struct kvm_s390_float_interrupt *fi;
2008         struct kvm_s390_irq *buf;
2009         struct kvm_s390_irq *irq;
2010         int max_irqs;
2011         int ret = 0;
2012         int n = 0;
2013         int i;
2014
2015         if (len > KVM_S390_FLIC_MAX_BUFFER || len == 0)
2016                 return -EINVAL;
2017
2018         /*
2019          * We are already using -ENOMEM to signal
2020          * userspace it may retry with a bigger buffer,
2021          * so we need to use something else for this case
2022          */
2023         buf = vzalloc(len);
2024         if (!buf)
2025                 return -ENOBUFS;
2026
2027         max_irqs = len / sizeof(struct kvm_s390_irq);
2028
2029         if (kvm->arch.gisa &&
2030             kvm_s390_gisa_get_ipm(kvm->arch.gisa)) {
2031                 for (i = 0; i <= MAX_ISC; i++) {
2032                         if (n == max_irqs) {
2033                                 /* signal userspace to try again */
2034                                 ret = -ENOMEM;
2035                                 goto out_nolock;
2036                         }
2037                         if (kvm_s390_gisa_tac_ipm_gisc(kvm->arch.gisa, i)) {
2038                                 irq = (struct kvm_s390_irq *) &buf[n];
2039                                 irq->type = KVM_S390_INT_IO(1, 0, 0, 0);
2040                                 irq->u.io.io_int_word = isc_to_int_word(i);
2041                                 n++;
2042                         }
2043                 }
2044         }
2045         fi = &kvm->arch.float_int;
2046         spin_lock(&fi->lock);
2047         for (i = 0; i < FIRQ_LIST_COUNT; i++) {
2048                 list_for_each_entry(inti, &fi->lists[i], list) {
2049                         if (n == max_irqs) {
2050                                 /* signal userspace to try again */
2051                                 ret = -ENOMEM;
2052                                 goto out;
2053                         }
2054                         inti_to_irq(inti, &buf[n]);
2055                         n++;
2056                 }
2057         }
2058         if (test_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs)) {
2059                 if (n == max_irqs) {
2060                         /* signal userspace to try again */
2061                         ret = -ENOMEM;
2062                         goto out;
2063                 }
2064                 irq = (struct kvm_s390_irq *) &buf[n];
2065                 irq->type = KVM_S390_INT_SERVICE;
2066                 irq->u.ext = fi->srv_signal;
2067                 n++;
2068         }
2069         if (test_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs)) {
2070                 if (n == max_irqs) {
2071                                 /* signal userspace to try again */
2072                                 ret = -ENOMEM;
2073                                 goto out;
2074                 }
2075                 irq = (struct kvm_s390_irq *) &buf[n];
2076                 irq->type = KVM_S390_MCHK;
2077                 irq->u.mchk = fi->mchk;
2078                 n++;
2079 }
2080
2081 out:
2082         spin_unlock(&fi->lock);
2083 out_nolock:
2084         if (!ret && n > 0) {
2085                 if (copy_to_user(usrbuf, buf, sizeof(struct kvm_s390_irq) * n))
2086                         ret = -EFAULT;
2087         }
2088         vfree(buf);
2089
2090         return ret < 0 ? ret : n;
2091 }
2092
2093 static int flic_ais_mode_get_all(struct kvm *kvm, struct kvm_device_attr *attr)
2094 {
2095         struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
2096         struct kvm_s390_ais_all ais;
2097
2098         if (attr->attr < sizeof(ais))
2099                 return -EINVAL;
2100
2101         if (!test_kvm_facility(kvm, 72))
2102                 return -ENOTSUPP;
2103
2104         mutex_lock(&fi->ais_lock);
2105         ais.simm = fi->simm;
2106         ais.nimm = fi->nimm;
2107         mutex_unlock(&fi->ais_lock);
2108
2109         if (copy_to_user((void __user *)attr->addr, &ais, sizeof(ais)))
2110                 return -EFAULT;
2111
2112         return 0;
2113 }
2114
2115 static int flic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
2116 {
2117         int r;
2118
2119         switch (attr->group) {
2120         case KVM_DEV_FLIC_GET_ALL_IRQS:
2121                 r = get_all_floating_irqs(dev->kvm, (u8 __user *) attr->addr,
2122                                           attr->attr);
2123                 break;
2124         case KVM_DEV_FLIC_AISM_ALL:
2125                 r = flic_ais_mode_get_all(dev->kvm, attr);
2126                 break;
2127         default:
2128                 r = -EINVAL;
2129         }
2130
2131         return r;
2132 }
2133
2134 static inline int copy_irq_from_user(struct kvm_s390_interrupt_info *inti,
2135                                      u64 addr)
2136 {
2137         struct kvm_s390_irq __user *uptr = (struct kvm_s390_irq __user *) addr;
2138         void *target = NULL;
2139         void __user *source;
2140         u64 size;
2141
2142         if (get_user(inti->type, (u64 __user *)addr))
2143                 return -EFAULT;
2144
2145         switch (inti->type) {
2146         case KVM_S390_INT_PFAULT_INIT:
2147         case KVM_S390_INT_PFAULT_DONE:
2148         case KVM_S390_INT_VIRTIO:
2149         case KVM_S390_INT_SERVICE:
2150                 target = (void *) &inti->ext;
2151                 source = &uptr->u.ext;
2152                 size = sizeof(inti->ext);
2153                 break;
2154         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
2155                 target = (void *) &inti->io;
2156                 source = &uptr->u.io;
2157                 size = sizeof(inti->io);
2158                 break;
2159         case KVM_S390_MCHK:
2160                 target = (void *) &inti->mchk;
2161                 source = &uptr->u.mchk;
2162                 size = sizeof(inti->mchk);
2163                 break;
2164         default:
2165                 return -EINVAL;
2166         }
2167
2168         if (copy_from_user(target, source, size))
2169                 return -EFAULT;
2170
2171         return 0;
2172 }
2173
2174 static int enqueue_floating_irq(struct kvm_device *dev,
2175                                 struct kvm_device_attr *attr)
2176 {
2177         struct kvm_s390_interrupt_info *inti = NULL;
2178         int r = 0;
2179         int len = attr->attr;
2180
2181         if (len % sizeof(struct kvm_s390_irq) != 0)
2182                 return -EINVAL;
2183         else if (len > KVM_S390_FLIC_MAX_BUFFER)
2184                 return -EINVAL;
2185
2186         while (len >= sizeof(struct kvm_s390_irq)) {
2187                 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
2188                 if (!inti)
2189                         return -ENOMEM;
2190
2191                 r = copy_irq_from_user(inti, attr->addr);
2192                 if (r) {
2193                         kfree(inti);
2194                         return r;
2195                 }
2196                 r = __inject_vm(dev->kvm, inti);
2197                 if (r) {
2198                         kfree(inti);
2199                         return r;
2200                 }
2201                 len -= sizeof(struct kvm_s390_irq);
2202                 attr->addr += sizeof(struct kvm_s390_irq);
2203         }
2204
2205         return r;
2206 }
2207
2208 static struct s390_io_adapter *get_io_adapter(struct kvm *kvm, unsigned int id)
2209 {
2210         if (id >= MAX_S390_IO_ADAPTERS)
2211                 return NULL;
2212         return kvm->arch.adapters[id];
2213 }
2214
2215 static int register_io_adapter(struct kvm_device *dev,
2216                                struct kvm_device_attr *attr)
2217 {
2218         struct s390_io_adapter *adapter;
2219         struct kvm_s390_io_adapter adapter_info;
2220
2221         if (copy_from_user(&adapter_info,
2222                            (void __user *)attr->addr, sizeof(adapter_info)))
2223                 return -EFAULT;
2224
2225         if ((adapter_info.id >= MAX_S390_IO_ADAPTERS) ||
2226             (dev->kvm->arch.adapters[adapter_info.id] != NULL))
2227                 return -EINVAL;
2228
2229         adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
2230         if (!adapter)
2231                 return -ENOMEM;
2232
2233         INIT_LIST_HEAD(&adapter->maps);
2234         init_rwsem(&adapter->maps_lock);
2235         atomic_set(&adapter->nr_maps, 0);
2236         adapter->id = adapter_info.id;
2237         adapter->isc = adapter_info.isc;
2238         adapter->maskable = adapter_info.maskable;
2239         adapter->masked = false;
2240         adapter->swap = adapter_info.swap;
2241         adapter->suppressible = (adapter_info.flags) &
2242                                 KVM_S390_ADAPTER_SUPPRESSIBLE;
2243         dev->kvm->arch.adapters[adapter->id] = adapter;
2244
2245         return 0;
2246 }
2247
2248 int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked)
2249 {
2250         int ret;
2251         struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
2252
2253         if (!adapter || !adapter->maskable)
2254                 return -EINVAL;
2255         ret = adapter->masked;
2256         adapter->masked = masked;
2257         return ret;
2258 }
2259
2260 static int kvm_s390_adapter_map(struct kvm *kvm, unsigned int id, __u64 addr)
2261 {
2262         struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
2263         struct s390_map_info *map;
2264         int ret;
2265
2266         if (!adapter || !addr)
2267                 return -EINVAL;
2268
2269         map = kzalloc(sizeof(*map), GFP_KERNEL);
2270         if (!map) {
2271                 ret = -ENOMEM;
2272                 goto out;
2273         }
2274         INIT_LIST_HEAD(&map->list);
2275         map->guest_addr = addr;
2276         map->addr = gmap_translate(kvm->arch.gmap, addr);
2277         if (map->addr == -EFAULT) {
2278                 ret = -EFAULT;
2279                 goto out;
2280         }
2281         ret = get_user_pages_fast(map->addr, 1, 1, &map->page);
2282         if (ret < 0)
2283                 goto out;
2284         BUG_ON(ret != 1);
2285         down_write(&adapter->maps_lock);
2286         if (atomic_inc_return(&adapter->nr_maps) < MAX_S390_ADAPTER_MAPS) {
2287                 list_add_tail(&map->list, &adapter->maps);
2288                 ret = 0;
2289         } else {
2290                 put_page(map->page);
2291                 ret = -EINVAL;
2292         }
2293         up_write(&adapter->maps_lock);
2294 out:
2295         if (ret)
2296                 kfree(map);
2297         return ret;
2298 }
2299
2300 static int kvm_s390_adapter_unmap(struct kvm *kvm, unsigned int id, __u64 addr)
2301 {
2302         struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
2303         struct s390_map_info *map, *tmp;
2304         int found = 0;
2305
2306         if (!adapter || !addr)
2307                 return -EINVAL;
2308
2309         down_write(&adapter->maps_lock);
2310         list_for_each_entry_safe(map, tmp, &adapter->maps, list) {
2311                 if (map->guest_addr == addr) {
2312                         found = 1;
2313                         atomic_dec(&adapter->nr_maps);
2314                         list_del(&map->list);
2315                         put_page(map->page);
2316                         kfree(map);
2317                         break;
2318                 }
2319         }
2320         up_write(&adapter->maps_lock);
2321
2322         return found ? 0 : -EINVAL;
2323 }
2324
2325 void kvm_s390_destroy_adapters(struct kvm *kvm)
2326 {
2327         int i;
2328         struct s390_map_info *map, *tmp;
2329
2330         for (i = 0; i < MAX_S390_IO_ADAPTERS; i++) {
2331                 if (!kvm->arch.adapters[i])
2332                         continue;
2333                 list_for_each_entry_safe(map, tmp,
2334                                          &kvm->arch.adapters[i]->maps, list) {
2335                         list_del(&map->list);
2336                         put_page(map->page);
2337                         kfree(map);
2338                 }
2339                 kfree(kvm->arch.adapters[i]);
2340         }
2341 }
2342
2343 static int modify_io_adapter(struct kvm_device *dev,
2344                              struct kvm_device_attr *attr)
2345 {
2346         struct kvm_s390_io_adapter_req req;
2347         struct s390_io_adapter *adapter;
2348         int ret;
2349
2350         if (copy_from_user(&req, (void __user *)attr->addr, sizeof(req)))
2351                 return -EFAULT;
2352
2353         adapter = get_io_adapter(dev->kvm, req.id);
2354         if (!adapter)
2355                 return -EINVAL;
2356         switch (req.type) {
2357         case KVM_S390_IO_ADAPTER_MASK:
2358                 ret = kvm_s390_mask_adapter(dev->kvm, req.id, req.mask);
2359                 if (ret > 0)
2360                         ret = 0;
2361                 break;
2362         case KVM_S390_IO_ADAPTER_MAP:
2363                 ret = kvm_s390_adapter_map(dev->kvm, req.id, req.addr);
2364                 break;
2365         case KVM_S390_IO_ADAPTER_UNMAP:
2366                 ret = kvm_s390_adapter_unmap(dev->kvm, req.id, req.addr);
2367                 break;
2368         default:
2369                 ret = -EINVAL;
2370         }
2371
2372         return ret;
2373 }
2374
2375 static int clear_io_irq(struct kvm *kvm, struct kvm_device_attr *attr)
2376
2377 {
2378         const u64 isc_mask = 0xffUL << 24; /* all iscs set */
2379         u32 schid;
2380
2381         if (attr->flags)
2382                 return -EINVAL;
2383         if (attr->attr != sizeof(schid))
2384                 return -EINVAL;
2385         if (copy_from_user(&schid, (void __user *) attr->addr, sizeof(schid)))
2386                 return -EFAULT;
2387         if (!schid)
2388                 return -EINVAL;
2389         kfree(kvm_s390_get_io_int(kvm, isc_mask, schid));
2390         /*
2391          * If userspace is conforming to the architecture, we can have at most
2392          * one pending I/O interrupt per subchannel, so this is effectively a
2393          * clear all.
2394          */
2395         return 0;
2396 }
2397
2398 static int modify_ais_mode(struct kvm *kvm, struct kvm_device_attr *attr)
2399 {
2400         struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
2401         struct kvm_s390_ais_req req;
2402         int ret = 0;
2403
2404         if (!test_kvm_facility(kvm, 72))
2405                 return -ENOTSUPP;
2406
2407         if (copy_from_user(&req, (void __user *)attr->addr, sizeof(req)))
2408                 return -EFAULT;
2409
2410         if (req.isc > MAX_ISC)
2411                 return -EINVAL;
2412
2413         trace_kvm_s390_modify_ais_mode(req.isc,
2414                                        (fi->simm & AIS_MODE_MASK(req.isc)) ?
2415                                        (fi->nimm & AIS_MODE_MASK(req.isc)) ?
2416                                        2 : KVM_S390_AIS_MODE_SINGLE :
2417                                        KVM_S390_AIS_MODE_ALL, req.mode);
2418
2419         mutex_lock(&fi->ais_lock);
2420         switch (req.mode) {
2421         case KVM_S390_AIS_MODE_ALL:
2422                 fi->simm &= ~AIS_MODE_MASK(req.isc);
2423                 fi->nimm &= ~AIS_MODE_MASK(req.isc);
2424                 break;
2425         case KVM_S390_AIS_MODE_SINGLE:
2426                 fi->simm |= AIS_MODE_MASK(req.isc);
2427                 fi->nimm &= ~AIS_MODE_MASK(req.isc);
2428                 break;
2429         default:
2430                 ret = -EINVAL;
2431         }
2432         mutex_unlock(&fi->ais_lock);
2433
2434         return ret;
2435 }
2436
2437 static int kvm_s390_inject_airq(struct kvm *kvm,
2438                                 struct s390_io_adapter *adapter)
2439 {
2440         struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
2441         struct kvm_s390_interrupt s390int = {
2442                 .type = KVM_S390_INT_IO(1, 0, 0, 0),
2443                 .parm = 0,
2444                 .parm64 = isc_to_int_word(adapter->isc),
2445         };
2446         int ret = 0;
2447
2448         if (!test_kvm_facility(kvm, 72) || !adapter->suppressible)
2449                 return kvm_s390_inject_vm(kvm, &s390int);
2450
2451         mutex_lock(&fi->ais_lock);
2452         if (fi->nimm & AIS_MODE_MASK(adapter->isc)) {
2453                 trace_kvm_s390_airq_suppressed(adapter->id, adapter->isc);
2454                 goto out;
2455         }
2456
2457         ret = kvm_s390_inject_vm(kvm, &s390int);
2458         if (!ret && (fi->simm & AIS_MODE_MASK(adapter->isc))) {
2459                 fi->nimm |= AIS_MODE_MASK(adapter->isc);
2460                 trace_kvm_s390_modify_ais_mode(adapter->isc,
2461                                                KVM_S390_AIS_MODE_SINGLE, 2);
2462         }
2463 out:
2464         mutex_unlock(&fi->ais_lock);
2465         return ret;
2466 }
2467
2468 static int flic_inject_airq(struct kvm *kvm, struct kvm_device_attr *attr)
2469 {
2470         unsigned int id = attr->attr;
2471         struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
2472
2473         if (!adapter)
2474                 return -EINVAL;
2475
2476         return kvm_s390_inject_airq(kvm, adapter);
2477 }
2478
2479 static int flic_ais_mode_set_all(struct kvm *kvm, struct kvm_device_attr *attr)
2480 {
2481         struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
2482         struct kvm_s390_ais_all ais;
2483
2484         if (!test_kvm_facility(kvm, 72))
2485                 return -ENOTSUPP;
2486
2487         if (copy_from_user(&ais, (void __user *)attr->addr, sizeof(ais)))
2488                 return -EFAULT;
2489
2490         mutex_lock(&fi->ais_lock);
2491         fi->simm = ais.simm;
2492         fi->nimm = ais.nimm;
2493         mutex_unlock(&fi->ais_lock);
2494
2495         return 0;
2496 }
2497
2498 static int flic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
2499 {
2500         int r = 0;
2501         unsigned int i;
2502         struct kvm_vcpu *vcpu;
2503
2504         switch (attr->group) {
2505         case KVM_DEV_FLIC_ENQUEUE:
2506                 r = enqueue_floating_irq(dev, attr);
2507                 break;
2508         case KVM_DEV_FLIC_CLEAR_IRQS:
2509                 kvm_s390_clear_float_irqs(dev->kvm);
2510                 break;
2511         case KVM_DEV_FLIC_APF_ENABLE:
2512                 dev->kvm->arch.gmap->pfault_enabled = 1;
2513                 break;
2514         case KVM_DEV_FLIC_APF_DISABLE_WAIT:
2515                 dev->kvm->arch.gmap->pfault_enabled = 0;
2516                 /*
2517                  * Make sure no async faults are in transition when
2518                  * clearing the queues. So we don't need to worry
2519                  * about late coming workers.
2520                  */
2521                 synchronize_srcu(&dev->kvm->srcu);
2522                 kvm_for_each_vcpu(i, vcpu, dev->kvm)
2523                         kvm_clear_async_pf_completion_queue(vcpu);
2524                 break;
2525         case KVM_DEV_FLIC_ADAPTER_REGISTER:
2526                 r = register_io_adapter(dev, attr);
2527                 break;
2528         case KVM_DEV_FLIC_ADAPTER_MODIFY:
2529                 r = modify_io_adapter(dev, attr);
2530                 break;
2531         case KVM_DEV_FLIC_CLEAR_IO_IRQ:
2532                 r = clear_io_irq(dev->kvm, attr);
2533                 break;
2534         case KVM_DEV_FLIC_AISM:
2535                 r = modify_ais_mode(dev->kvm, attr);
2536                 break;
2537         case KVM_DEV_FLIC_AIRQ_INJECT:
2538                 r = flic_inject_airq(dev->kvm, attr);
2539                 break;
2540         case KVM_DEV_FLIC_AISM_ALL:
2541                 r = flic_ais_mode_set_all(dev->kvm, attr);
2542                 break;
2543         default:
2544                 r = -EINVAL;
2545         }
2546
2547         return r;
2548 }
2549
2550 static int flic_has_attr(struct kvm_device *dev,
2551                              struct kvm_device_attr *attr)
2552 {
2553         switch (attr->group) {
2554         case KVM_DEV_FLIC_GET_ALL_IRQS:
2555         case KVM_DEV_FLIC_ENQUEUE:
2556         case KVM_DEV_FLIC_CLEAR_IRQS:
2557         case KVM_DEV_FLIC_APF_ENABLE:
2558         case KVM_DEV_FLIC_APF_DISABLE_WAIT:
2559         case KVM_DEV_FLIC_ADAPTER_REGISTER:
2560         case KVM_DEV_FLIC_ADAPTER_MODIFY:
2561         case KVM_DEV_FLIC_CLEAR_IO_IRQ:
2562         case KVM_DEV_FLIC_AISM:
2563         case KVM_DEV_FLIC_AIRQ_INJECT:
2564         case KVM_DEV_FLIC_AISM_ALL:
2565                 return 0;
2566         }
2567         return -ENXIO;
2568 }
2569
2570 static int flic_create(struct kvm_device *dev, u32 type)
2571 {
2572         if (!dev)
2573                 return -EINVAL;
2574         if (dev->kvm->arch.flic)
2575                 return -EINVAL;
2576         dev->kvm->arch.flic = dev;
2577         return 0;
2578 }
2579
2580 static void flic_destroy(struct kvm_device *dev)
2581 {
2582         dev->kvm->arch.flic = NULL;
2583         kfree(dev);
2584 }
2585
2586 /* s390 floating irq controller (flic) */
2587 struct kvm_device_ops kvm_flic_ops = {
2588         .name = "kvm-flic",
2589         .get_attr = flic_get_attr,
2590         .set_attr = flic_set_attr,
2591         .has_attr = flic_has_attr,
2592         .create = flic_create,
2593         .destroy = flic_destroy,
2594 };
2595
2596 static unsigned long get_ind_bit(__u64 addr, unsigned long bit_nr, bool swap)
2597 {
2598         unsigned long bit;
2599
2600         bit = bit_nr + (addr % PAGE_SIZE) * 8;
2601
2602         return swap ? (bit ^ (BITS_PER_LONG - 1)) : bit;
2603 }
2604
2605 static struct s390_map_info *get_map_info(struct s390_io_adapter *adapter,
2606                                           u64 addr)
2607 {
2608         struct s390_map_info *map;
2609
2610         if (!adapter)
2611                 return NULL;
2612
2613         list_for_each_entry(map, &adapter->maps, list) {
2614                 if (map->guest_addr == addr)
2615                         return map;
2616         }
2617         return NULL;
2618 }
2619
2620 static int adapter_indicators_set(struct kvm *kvm,
2621                                   struct s390_io_adapter *adapter,
2622                                   struct kvm_s390_adapter_int *adapter_int)
2623 {
2624         unsigned long bit;
2625         int summary_set, idx;
2626         struct s390_map_info *info;
2627         void *map;
2628
2629         info = get_map_info(adapter, adapter_int->ind_addr);
2630         if (!info)
2631                 return -1;
2632         map = page_address(info->page);
2633         bit = get_ind_bit(info->addr, adapter_int->ind_offset, adapter->swap);
2634         set_bit(bit, map);
2635         idx = srcu_read_lock(&kvm->srcu);
2636         mark_page_dirty(kvm, info->guest_addr >> PAGE_SHIFT);
2637         set_page_dirty_lock(info->page);
2638         info = get_map_info(adapter, adapter_int->summary_addr);
2639         if (!info) {
2640                 srcu_read_unlock(&kvm->srcu, idx);
2641                 return -1;
2642         }
2643         map = page_address(info->page);
2644         bit = get_ind_bit(info->addr, adapter_int->summary_offset,
2645                           adapter->swap);
2646         summary_set = test_and_set_bit(bit, map);
2647         mark_page_dirty(kvm, info->guest_addr >> PAGE_SHIFT);
2648         set_page_dirty_lock(info->page);
2649         srcu_read_unlock(&kvm->srcu, idx);
2650         return summary_set ? 0 : 1;
2651 }
2652
2653 /*
2654  * < 0 - not injected due to error
2655  * = 0 - coalesced, summary indicator already active
2656  * > 0 - injected interrupt
2657  */
2658 static int set_adapter_int(struct kvm_kernel_irq_routing_entry *e,
2659                            struct kvm *kvm, int irq_source_id, int level,
2660                            bool line_status)
2661 {
2662         int ret;
2663         struct s390_io_adapter *adapter;
2664
2665         /* We're only interested in the 0->1 transition. */
2666         if (!level)
2667                 return 0;
2668         adapter = get_io_adapter(kvm, e->adapter.adapter_id);
2669         if (!adapter)
2670                 return -1;
2671         down_read(&adapter->maps_lock);
2672         ret = adapter_indicators_set(kvm, adapter, &e->adapter);
2673         up_read(&adapter->maps_lock);
2674         if ((ret > 0) && !adapter->masked) {
2675                 ret = kvm_s390_inject_airq(kvm, adapter);
2676                 if (ret == 0)
2677                         ret = 1;
2678         }
2679         return ret;
2680 }
2681
2682 /*
2683  * Inject the machine check to the guest.
2684  */
2685 void kvm_s390_reinject_machine_check(struct kvm_vcpu *vcpu,
2686                                      struct mcck_volatile_info *mcck_info)
2687 {
2688         struct kvm_s390_interrupt_info inti;
2689         struct kvm_s390_irq irq;
2690         struct kvm_s390_mchk_info *mchk;
2691         union mci mci;
2692         __u64 cr14 = 0;         /* upper bits are not used */
2693         int rc;
2694
2695         mci.val = mcck_info->mcic;
2696         if (mci.sr)
2697                 cr14 |= CR14_RECOVERY_SUBMASK;
2698         if (mci.dg)
2699                 cr14 |= CR14_DEGRADATION_SUBMASK;
2700         if (mci.w)
2701                 cr14 |= CR14_WARNING_SUBMASK;
2702
2703         mchk = mci.ck ? &inti.mchk : &irq.u.mchk;
2704         mchk->cr14 = cr14;
2705         mchk->mcic = mcck_info->mcic;
2706         mchk->ext_damage_code = mcck_info->ext_damage_code;
2707         mchk->failing_storage_address = mcck_info->failing_storage_address;
2708         if (mci.ck) {
2709                 /* Inject the floating machine check */
2710                 inti.type = KVM_S390_MCHK;
2711                 rc = __inject_vm(vcpu->kvm, &inti);
2712         } else {
2713                 /* Inject the machine check to specified vcpu */
2714                 irq.type = KVM_S390_MCHK;
2715                 rc = kvm_s390_inject_vcpu(vcpu, &irq);
2716         }
2717         WARN_ON_ONCE(rc);
2718 }
2719
2720 int kvm_set_routing_entry(struct kvm *kvm,
2721                           struct kvm_kernel_irq_routing_entry *e,
2722                           const struct kvm_irq_routing_entry *ue)
2723 {
2724         int ret;
2725
2726         switch (ue->type) {
2727         case KVM_IRQ_ROUTING_S390_ADAPTER:
2728                 e->set = set_adapter_int;
2729                 e->adapter.summary_addr = ue->u.adapter.summary_addr;
2730                 e->adapter.ind_addr = ue->u.adapter.ind_addr;
2731                 e->adapter.summary_offset = ue->u.adapter.summary_offset;
2732                 e->adapter.ind_offset = ue->u.adapter.ind_offset;
2733                 e->adapter.adapter_id = ue->u.adapter.adapter_id;
2734                 ret = 0;
2735                 break;
2736         default:
2737                 ret = -EINVAL;
2738         }
2739
2740         return ret;
2741 }
2742
2743 int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm,
2744                 int irq_source_id, int level, bool line_status)
2745 {
2746         return -EINVAL;
2747 }
2748
2749 int kvm_s390_set_irq_state(struct kvm_vcpu *vcpu, void __user *irqstate, int len)
2750 {
2751         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
2752         struct kvm_s390_irq *buf;
2753         int r = 0;
2754         int n;
2755
2756         buf = vmalloc(len);
2757         if (!buf)
2758                 return -ENOMEM;
2759
2760         if (copy_from_user((void *) buf, irqstate, len)) {
2761                 r = -EFAULT;
2762                 goto out_free;
2763         }
2764
2765         /*
2766          * Don't allow setting the interrupt state
2767          * when there are already interrupts pending
2768          */
2769         spin_lock(&li->lock);
2770         if (li->pending_irqs) {
2771                 r = -EBUSY;
2772                 goto out_unlock;
2773         }
2774
2775         for (n = 0; n < len / sizeof(*buf); n++) {
2776                 r = do_inject_vcpu(vcpu, &buf[n]);
2777                 if (r)
2778                         break;
2779         }
2780
2781 out_unlock:
2782         spin_unlock(&li->lock);
2783 out_free:
2784         vfree(buf);
2785
2786         return r;
2787 }
2788
2789 static void store_local_irq(struct kvm_s390_local_interrupt *li,
2790                             struct kvm_s390_irq *irq,
2791                             unsigned long irq_type)
2792 {
2793         switch (irq_type) {
2794         case IRQ_PEND_MCHK_EX:
2795         case IRQ_PEND_MCHK_REP:
2796                 irq->type = KVM_S390_MCHK;
2797                 irq->u.mchk = li->irq.mchk;
2798                 break;
2799         case IRQ_PEND_PROG:
2800                 irq->type = KVM_S390_PROGRAM_INT;
2801                 irq->u.pgm = li->irq.pgm;
2802                 break;
2803         case IRQ_PEND_PFAULT_INIT:
2804                 irq->type = KVM_S390_INT_PFAULT_INIT;
2805                 irq->u.ext = li->irq.ext;
2806                 break;
2807         case IRQ_PEND_EXT_EXTERNAL:
2808                 irq->type = KVM_S390_INT_EXTERNAL_CALL;
2809                 irq->u.extcall = li->irq.extcall;
2810                 break;
2811         case IRQ_PEND_EXT_CLOCK_COMP:
2812                 irq->type = KVM_S390_INT_CLOCK_COMP;
2813                 break;
2814         case IRQ_PEND_EXT_CPU_TIMER:
2815                 irq->type = KVM_S390_INT_CPU_TIMER;
2816                 break;
2817         case IRQ_PEND_SIGP_STOP:
2818                 irq->type = KVM_S390_SIGP_STOP;
2819                 irq->u.stop = li->irq.stop;
2820                 break;
2821         case IRQ_PEND_RESTART:
2822                 irq->type = KVM_S390_RESTART;
2823                 break;
2824         case IRQ_PEND_SET_PREFIX:
2825                 irq->type = KVM_S390_SIGP_SET_PREFIX;
2826                 irq->u.prefix = li->irq.prefix;
2827                 break;
2828         }
2829 }
2830
2831 int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu, __u8 __user *buf, int len)
2832 {
2833         int scn;
2834         unsigned long sigp_emerg_pending[BITS_TO_LONGS(KVM_MAX_VCPUS)];
2835         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
2836         unsigned long pending_irqs;
2837         struct kvm_s390_irq irq;
2838         unsigned long irq_type;
2839         int cpuaddr;
2840         int n = 0;
2841
2842         spin_lock(&li->lock);
2843         pending_irqs = li->pending_irqs;
2844         memcpy(&sigp_emerg_pending, &li->sigp_emerg_pending,
2845                sizeof(sigp_emerg_pending));
2846         spin_unlock(&li->lock);
2847
2848         for_each_set_bit(irq_type, &pending_irqs, IRQ_PEND_COUNT) {
2849                 memset(&irq, 0, sizeof(irq));
2850                 if (irq_type == IRQ_PEND_EXT_EMERGENCY)
2851                         continue;
2852                 if (n + sizeof(irq) > len)
2853                         return -ENOBUFS;
2854                 store_local_irq(&vcpu->arch.local_int, &irq, irq_type);
2855                 if (copy_to_user(&buf[n], &irq, sizeof(irq)))
2856                         return -EFAULT;
2857                 n += sizeof(irq);
2858         }
2859
2860         if (test_bit(IRQ_PEND_EXT_EMERGENCY, &pending_irqs)) {
2861                 for_each_set_bit(cpuaddr, sigp_emerg_pending, KVM_MAX_VCPUS) {
2862                         memset(&irq, 0, sizeof(irq));
2863                         if (n + sizeof(irq) > len)
2864                                 return -ENOBUFS;
2865                         irq.type = KVM_S390_INT_EMERGENCY;
2866                         irq.u.emerg.code = cpuaddr;
2867                         if (copy_to_user(&buf[n], &irq, sizeof(irq)))
2868                                 return -EFAULT;
2869                         n += sizeof(irq);
2870                 }
2871         }
2872
2873         if (sca_ext_call_pending(vcpu, &scn)) {
2874                 if (n + sizeof(irq) > len)
2875                         return -ENOBUFS;
2876                 memset(&irq, 0, sizeof(irq));
2877                 irq.type = KVM_S390_INT_EXTERNAL_CALL;
2878                 irq.u.extcall.code = scn;
2879                 if (copy_to_user(&buf[n], &irq, sizeof(irq)))
2880                         return -EFAULT;
2881                 n += sizeof(irq);
2882         }
2883
2884         return n;
2885 }
2886
2887 void kvm_s390_gisa_clear(struct kvm *kvm)
2888 {
2889         if (kvm->arch.gisa) {
2890                 memset(kvm->arch.gisa, 0, sizeof(struct kvm_s390_gisa));
2891                 kvm->arch.gisa->next_alert = (u32)(u64)kvm->arch.gisa;
2892                 VM_EVENT(kvm, 3, "gisa 0x%pK cleared", kvm->arch.gisa);
2893         }
2894 }
2895
2896 void kvm_s390_gisa_init(struct kvm *kvm)
2897 {
2898         if (css_general_characteristics.aiv) {
2899                 kvm->arch.gisa = &kvm->arch.sie_page2->gisa;
2900                 VM_EVENT(kvm, 3, "gisa 0x%pK initialized", kvm->arch.gisa);
2901                 kvm_s390_gisa_clear(kvm);
2902         }
2903 }
2904
2905 void kvm_s390_gisa_destroy(struct kvm *kvm)
2906 {
2907         if (!kvm->arch.gisa)
2908                 return;
2909         kvm->arch.gisa = NULL;
2910 }