Merge tag 'kvm-3.6-1' of git://git.kernel.org/pub/scm/virt/kvm/kvm
[sfrench/cifs-2.6.git] / arch / s390 / kvm / sigp.c
1 /*
2  * handling interprocessor communication
3  *
4  * Copyright IBM Corp. 2008, 2009
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License (version 2 only)
8  * as published by the Free Software Foundation.
9  *
10  *    Author(s): Carsten Otte <cotte@de.ibm.com>
11  *               Christian Borntraeger <borntraeger@de.ibm.com>
12  *               Christian Ehrhardt <ehrhardt@de.ibm.com>
13  */
14
15 #include <linux/kvm.h>
16 #include <linux/kvm_host.h>
17 #include <linux/slab.h>
18 #include <asm/sigp.h>
19 #include "gaccess.h"
20 #include "kvm-s390.h"
21
22 static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr,
23                         u64 *reg)
24 {
25         struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
26         int rc;
27
28         if (cpu_addr >= KVM_MAX_VCPUS)
29                 return SIGP_CC_NOT_OPERATIONAL;
30
31         spin_lock(&fi->lock);
32         if (fi->local_int[cpu_addr] == NULL)
33                 rc = SIGP_CC_NOT_OPERATIONAL;
34         else if (!(atomic_read(fi->local_int[cpu_addr]->cpuflags)
35                    & (CPUSTAT_ECALL_PEND | CPUSTAT_STOPPED)))
36                 rc = SIGP_CC_ORDER_CODE_ACCEPTED;
37         else {
38                 *reg &= 0xffffffff00000000UL;
39                 if (atomic_read(fi->local_int[cpu_addr]->cpuflags)
40                     & CPUSTAT_ECALL_PEND)
41                         *reg |= SIGP_STATUS_EXT_CALL_PENDING;
42                 if (atomic_read(fi->local_int[cpu_addr]->cpuflags)
43                     & CPUSTAT_STOPPED)
44                         *reg |= SIGP_STATUS_STOPPED;
45                 rc = SIGP_CC_STATUS_STORED;
46         }
47         spin_unlock(&fi->lock);
48
49         VCPU_EVENT(vcpu, 4, "sensed status of cpu %x rc %x", cpu_addr, rc);
50         return rc;
51 }
52
53 static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr)
54 {
55         struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
56         struct kvm_s390_local_interrupt *li;
57         struct kvm_s390_interrupt_info *inti;
58         int rc;
59
60         if (cpu_addr >= KVM_MAX_VCPUS)
61                 return SIGP_CC_NOT_OPERATIONAL;
62
63         inti = kzalloc(sizeof(*inti), GFP_KERNEL);
64         if (!inti)
65                 return -ENOMEM;
66
67         inti->type = KVM_S390_INT_EMERGENCY;
68         inti->emerg.code = vcpu->vcpu_id;
69
70         spin_lock(&fi->lock);
71         li = fi->local_int[cpu_addr];
72         if (li == NULL) {
73                 rc = SIGP_CC_NOT_OPERATIONAL;
74                 kfree(inti);
75                 goto unlock;
76         }
77         spin_lock_bh(&li->lock);
78         list_add_tail(&inti->list, &li->list);
79         atomic_set(&li->active, 1);
80         atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
81         if (waitqueue_active(&li->wq))
82                 wake_up_interruptible(&li->wq);
83         spin_unlock_bh(&li->lock);
84         rc = SIGP_CC_ORDER_CODE_ACCEPTED;
85         VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", cpu_addr);
86 unlock:
87         spin_unlock(&fi->lock);
88         return rc;
89 }
90
91 static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr)
92 {
93         struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
94         struct kvm_s390_local_interrupt *li;
95         struct kvm_s390_interrupt_info *inti;
96         int rc;
97
98         if (cpu_addr >= KVM_MAX_VCPUS)
99                 return SIGP_CC_NOT_OPERATIONAL;
100
101         inti = kzalloc(sizeof(*inti), GFP_KERNEL);
102         if (!inti)
103                 return -ENOMEM;
104
105         inti->type = KVM_S390_INT_EXTERNAL_CALL;
106         inti->extcall.code = vcpu->vcpu_id;
107
108         spin_lock(&fi->lock);
109         li = fi->local_int[cpu_addr];
110         if (li == NULL) {
111                 rc = SIGP_CC_NOT_OPERATIONAL;
112                 kfree(inti);
113                 goto unlock;
114         }
115         spin_lock_bh(&li->lock);
116         list_add_tail(&inti->list, &li->list);
117         atomic_set(&li->active, 1);
118         atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
119         if (waitqueue_active(&li->wq))
120                 wake_up_interruptible(&li->wq);
121         spin_unlock_bh(&li->lock);
122         rc = SIGP_CC_ORDER_CODE_ACCEPTED;
123         VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x", cpu_addr);
124 unlock:
125         spin_unlock(&fi->lock);
126         return rc;
127 }
128
129 static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action)
130 {
131         struct kvm_s390_interrupt_info *inti;
132
133         inti = kzalloc(sizeof(*inti), GFP_ATOMIC);
134         if (!inti)
135                 return -ENOMEM;
136         inti->type = KVM_S390_SIGP_STOP;
137
138         spin_lock_bh(&li->lock);
139         if ((atomic_read(li->cpuflags) & CPUSTAT_STOPPED))
140                 goto out;
141         list_add_tail(&inti->list, &li->list);
142         atomic_set(&li->active, 1);
143         atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
144         li->action_bits |= action;
145         if (waitqueue_active(&li->wq))
146                 wake_up_interruptible(&li->wq);
147 out:
148         spin_unlock_bh(&li->lock);
149
150         return SIGP_CC_ORDER_CODE_ACCEPTED;
151 }
152
153 static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int action)
154 {
155         struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
156         struct kvm_s390_local_interrupt *li;
157         int rc;
158
159         if (cpu_addr >= KVM_MAX_VCPUS)
160                 return SIGP_CC_NOT_OPERATIONAL;
161
162         spin_lock(&fi->lock);
163         li = fi->local_int[cpu_addr];
164         if (li == NULL) {
165                 rc = SIGP_CC_NOT_OPERATIONAL;
166                 goto unlock;
167         }
168
169         rc = __inject_sigp_stop(li, action);
170
171 unlock:
172         spin_unlock(&fi->lock);
173         VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", cpu_addr);
174         return rc;
175 }
176
177 int kvm_s390_inject_sigp_stop(struct kvm_vcpu *vcpu, int action)
178 {
179         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
180         return __inject_sigp_stop(li, action);
181 }
182
183 static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter)
184 {
185         int rc;
186
187         switch (parameter & 0xff) {
188         case 0:
189                 rc = SIGP_CC_NOT_OPERATIONAL;
190                 break;
191         case 1:
192         case 2:
193                 rc = SIGP_CC_ORDER_CODE_ACCEPTED;
194                 break;
195         default:
196                 rc = -EOPNOTSUPP;
197         }
198         return rc;
199 }
200
201 static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
202                              u64 *reg)
203 {
204         struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
205         struct kvm_s390_local_interrupt *li = NULL;
206         struct kvm_s390_interrupt_info *inti;
207         int rc;
208         u8 tmp;
209
210         /* make sure that the new value is valid memory */
211         address = address & 0x7fffe000u;
212         if (copy_from_guest_absolute(vcpu, &tmp, address, 1) ||
213            copy_from_guest_absolute(vcpu, &tmp, address + PAGE_SIZE, 1)) {
214                 *reg &= 0xffffffff00000000UL;
215                 *reg |= SIGP_STATUS_INVALID_PARAMETER;
216                 return SIGP_CC_STATUS_STORED;
217         }
218
219         inti = kzalloc(sizeof(*inti), GFP_KERNEL);
220         if (!inti)
221                 return SIGP_CC_BUSY;
222
223         spin_lock(&fi->lock);
224         if (cpu_addr < KVM_MAX_VCPUS)
225                 li = fi->local_int[cpu_addr];
226
227         if (li == NULL) {
228                 *reg &= 0xffffffff00000000UL;
229                 *reg |= SIGP_STATUS_INCORRECT_STATE;
230                 rc = SIGP_CC_STATUS_STORED;
231                 kfree(inti);
232                 goto out_fi;
233         }
234
235         spin_lock_bh(&li->lock);
236         /* cpu must be in stopped state */
237         if (!(atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
238                 *reg &= 0xffffffff00000000UL;
239                 *reg |= SIGP_STATUS_INCORRECT_STATE;
240                 rc = SIGP_CC_STATUS_STORED;
241                 kfree(inti);
242                 goto out_li;
243         }
244
245         inti->type = KVM_S390_SIGP_SET_PREFIX;
246         inti->prefix.address = address;
247
248         list_add_tail(&inti->list, &li->list);
249         atomic_set(&li->active, 1);
250         if (waitqueue_active(&li->wq))
251                 wake_up_interruptible(&li->wq);
252         rc = SIGP_CC_ORDER_CODE_ACCEPTED;
253
254         VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", cpu_addr, address);
255 out_li:
256         spin_unlock_bh(&li->lock);
257 out_fi:
258         spin_unlock(&fi->lock);
259         return rc;
260 }
261
262 static int __sigp_sense_running(struct kvm_vcpu *vcpu, u16 cpu_addr,
263                                 u64 *reg)
264 {
265         int rc;
266         struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
267
268         if (cpu_addr >= KVM_MAX_VCPUS)
269                 return SIGP_CC_NOT_OPERATIONAL;
270
271         spin_lock(&fi->lock);
272         if (fi->local_int[cpu_addr] == NULL)
273                 rc = SIGP_CC_NOT_OPERATIONAL;
274         else {
275                 if (atomic_read(fi->local_int[cpu_addr]->cpuflags)
276                     & CPUSTAT_RUNNING) {
277                         /* running */
278                         rc = SIGP_CC_ORDER_CODE_ACCEPTED;
279                 } else {
280                         /* not running */
281                         *reg &= 0xffffffff00000000UL;
282                         *reg |= SIGP_STATUS_NOT_RUNNING;
283                         rc = SIGP_CC_STATUS_STORED;
284                 }
285         }
286         spin_unlock(&fi->lock);
287
288         VCPU_EVENT(vcpu, 4, "sensed running status of cpu %x rc %x", cpu_addr,
289                    rc);
290
291         return rc;
292 }
293
294 static int __sigp_restart(struct kvm_vcpu *vcpu, u16 cpu_addr)
295 {
296         struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
297         struct kvm_s390_local_interrupt *li;
298         int rc = SIGP_CC_ORDER_CODE_ACCEPTED;
299
300         if (cpu_addr >= KVM_MAX_VCPUS)
301                 return SIGP_CC_NOT_OPERATIONAL;
302
303         spin_lock(&fi->lock);
304         li = fi->local_int[cpu_addr];
305         if (li == NULL) {
306                 rc = SIGP_CC_NOT_OPERATIONAL;
307                 goto out;
308         }
309
310         spin_lock_bh(&li->lock);
311         if (li->action_bits & ACTION_STOP_ON_STOP)
312                 rc = SIGP_CC_BUSY;
313         else
314                 VCPU_EVENT(vcpu, 4, "sigp restart %x to handle userspace",
315                         cpu_addr);
316         spin_unlock_bh(&li->lock);
317 out:
318         spin_unlock(&fi->lock);
319         return rc;
320 }
321
322 int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
323 {
324         int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
325         int r3 = vcpu->arch.sie_block->ipa & 0x000f;
326         int base2 = vcpu->arch.sie_block->ipb >> 28;
327         int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
328         u32 parameter;
329         u16 cpu_addr = vcpu->run->s.regs.gprs[r3];
330         u8 order_code;
331         int rc;
332
333         /* sigp in userspace can exit */
334         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
335                 return kvm_s390_inject_program_int(vcpu,
336                                                    PGM_PRIVILEGED_OPERATION);
337
338         order_code = disp2;
339         if (base2)
340                 order_code += vcpu->run->s.regs.gprs[base2];
341
342         if (r1 % 2)
343                 parameter = vcpu->run->s.regs.gprs[r1];
344         else
345                 parameter = vcpu->run->s.regs.gprs[r1 + 1];
346
347         switch (order_code) {
348         case SIGP_SENSE:
349                 vcpu->stat.instruction_sigp_sense++;
350                 rc = __sigp_sense(vcpu, cpu_addr,
351                                   &vcpu->run->s.regs.gprs[r1]);
352                 break;
353         case SIGP_EXTERNAL_CALL:
354                 vcpu->stat.instruction_sigp_external_call++;
355                 rc = __sigp_external_call(vcpu, cpu_addr);
356                 break;
357         case SIGP_EMERGENCY_SIGNAL:
358                 vcpu->stat.instruction_sigp_emergency++;
359                 rc = __sigp_emergency(vcpu, cpu_addr);
360                 break;
361         case SIGP_STOP:
362                 vcpu->stat.instruction_sigp_stop++;
363                 rc = __sigp_stop(vcpu, cpu_addr, ACTION_STOP_ON_STOP);
364                 break;
365         case SIGP_STOP_AND_STORE_STATUS:
366                 vcpu->stat.instruction_sigp_stop++;
367                 rc = __sigp_stop(vcpu, cpu_addr, ACTION_STORE_ON_STOP |
368                                                  ACTION_STOP_ON_STOP);
369                 break;
370         case SIGP_SET_ARCHITECTURE:
371                 vcpu->stat.instruction_sigp_arch++;
372                 rc = __sigp_set_arch(vcpu, parameter);
373                 break;
374         case SIGP_SET_PREFIX:
375                 vcpu->stat.instruction_sigp_prefix++;
376                 rc = __sigp_set_prefix(vcpu, cpu_addr, parameter,
377                                        &vcpu->run->s.regs.gprs[r1]);
378                 break;
379         case SIGP_SENSE_RUNNING:
380                 vcpu->stat.instruction_sigp_sense_running++;
381                 rc = __sigp_sense_running(vcpu, cpu_addr,
382                                           &vcpu->run->s.regs.gprs[r1]);
383                 break;
384         case SIGP_RESTART:
385                 vcpu->stat.instruction_sigp_restart++;
386                 rc = __sigp_restart(vcpu, cpu_addr);
387                 if (rc == SIGP_CC_BUSY)
388                         break;
389                 /* user space must know about restart */
390         default:
391                 return -EOPNOTSUPP;
392         }
393
394         if (rc < 0)
395                 return rc;
396
397         vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
398         vcpu->arch.sie_block->gpsw.mask |= (rc & 3ul) << 44;
399         return 0;
400 }