Merge remote-tracking branches 'asoc/fix/msm8916', 'asoc/fix/nau8825', 'asoc/fix...
[sfrench/cifs-2.6.git] / arch / powerpc / kvm / powerpc.c
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License, version 2, as
4  * published by the Free Software Foundation.
5  *
6  * This program is distributed in the hope that it will be useful,
7  * but WITHOUT ANY WARRANTY; without even the implied warranty of
8  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9  * GNU General Public License for more details.
10  *
11  * You should have received a copy of the GNU General Public License
12  * along with this program; if not, write to the Free Software
13  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
14  *
15  * Copyright IBM Corp. 2007
16  *
17  * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18  *          Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
19  */
20
21 #include <linux/errno.h>
22 #include <linux/err.h>
23 #include <linux/kvm_host.h>
24 #include <linux/vmalloc.h>
25 #include <linux/hrtimer.h>
26 #include <linux/sched/signal.h>
27 #include <linux/fs.h>
28 #include <linux/slab.h>
29 #include <linux/file.h>
30 #include <linux/module.h>
31 #include <linux/irqbypass.h>
32 #include <linux/kvm_irqfd.h>
33 #include <asm/cputable.h>
34 #include <linux/uaccess.h>
35 #include <asm/kvm_ppc.h>
36 #include <asm/tlbflush.h>
37 #include <asm/cputhreads.h>
38 #include <asm/irqflags.h>
39 #include <asm/iommu.h>
40 #include <asm/switch_to.h>
41 #include <asm/xive.h>
42
43 #include "timing.h"
44 #include "irq.h"
45 #include "../mm/mmu_decl.h"
46
47 #define CREATE_TRACE_POINTS
48 #include "trace.h"
49
50 struct kvmppc_ops *kvmppc_hv_ops;
51 EXPORT_SYMBOL_GPL(kvmppc_hv_ops);
52 struct kvmppc_ops *kvmppc_pr_ops;
53 EXPORT_SYMBOL_GPL(kvmppc_pr_ops);
54
55
56 int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
57 {
58         return !!(v->arch.pending_exceptions) || kvm_request_pending(v);
59 }
60
61 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
62 {
63         return false;
64 }
65
66 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
67 {
68         return 1;
69 }
70
71 /*
72  * Common checks before entering the guest world.  Call with interrupts
73  * disabled.
74  *
75  * returns:
76  *
77  * == 1 if we're ready to go into guest state
78  * <= 0 if we need to go back to the host with return value
79  */
80 int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
81 {
82         int r;
83
84         WARN_ON(irqs_disabled());
85         hard_irq_disable();
86
87         while (true) {
88                 if (need_resched()) {
89                         local_irq_enable();
90                         cond_resched();
91                         hard_irq_disable();
92                         continue;
93                 }
94
95                 if (signal_pending(current)) {
96                         kvmppc_account_exit(vcpu, SIGNAL_EXITS);
97                         vcpu->run->exit_reason = KVM_EXIT_INTR;
98                         r = -EINTR;
99                         break;
100                 }
101
102                 vcpu->mode = IN_GUEST_MODE;
103
104                 /*
105                  * Reading vcpu->requests must happen after setting vcpu->mode,
106                  * so we don't miss a request because the requester sees
107                  * OUTSIDE_GUEST_MODE and assumes we'll be checking requests
108                  * before next entering the guest (and thus doesn't IPI).
109                  * This also orders the write to mode from any reads
110                  * to the page tables done while the VCPU is running.
111                  * Please see the comment in kvm_flush_remote_tlbs.
112                  */
113                 smp_mb();
114
115                 if (kvm_request_pending(vcpu)) {
116                         /* Make sure we process requests preemptable */
117                         local_irq_enable();
118                         trace_kvm_check_requests(vcpu);
119                         r = kvmppc_core_check_requests(vcpu);
120                         hard_irq_disable();
121                         if (r > 0)
122                                 continue;
123                         break;
124                 }
125
126                 if (kvmppc_core_prepare_to_enter(vcpu)) {
127                         /* interrupts got enabled in between, so we
128                            are back at square 1 */
129                         continue;
130                 }
131
132                 guest_enter_irqoff();
133                 return 1;
134         }
135
136         /* return to host */
137         local_irq_enable();
138         return r;
139 }
140 EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter);
141
142 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
143 static void kvmppc_swab_shared(struct kvm_vcpu *vcpu)
144 {
145         struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared;
146         int i;
147
148         shared->sprg0 = swab64(shared->sprg0);
149         shared->sprg1 = swab64(shared->sprg1);
150         shared->sprg2 = swab64(shared->sprg2);
151         shared->sprg3 = swab64(shared->sprg3);
152         shared->srr0 = swab64(shared->srr0);
153         shared->srr1 = swab64(shared->srr1);
154         shared->dar = swab64(shared->dar);
155         shared->msr = swab64(shared->msr);
156         shared->dsisr = swab32(shared->dsisr);
157         shared->int_pending = swab32(shared->int_pending);
158         for (i = 0; i < ARRAY_SIZE(shared->sr); i++)
159                 shared->sr[i] = swab32(shared->sr[i]);
160 }
161 #endif
162
163 int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
164 {
165         int nr = kvmppc_get_gpr(vcpu, 11);
166         int r;
167         unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3);
168         unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4);
169         unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5);
170         unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6);
171         unsigned long r2 = 0;
172
173         if (!(kvmppc_get_msr(vcpu) & MSR_SF)) {
174                 /* 32 bit mode */
175                 param1 &= 0xffffffff;
176                 param2 &= 0xffffffff;
177                 param3 &= 0xffffffff;
178                 param4 &= 0xffffffff;
179         }
180
181         switch (nr) {
182         case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE):
183         {
184 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
185                 /* Book3S can be little endian, find it out here */
186                 int shared_big_endian = true;
187                 if (vcpu->arch.intr_msr & MSR_LE)
188                         shared_big_endian = false;
189                 if (shared_big_endian != vcpu->arch.shared_big_endian)
190                         kvmppc_swab_shared(vcpu);
191                 vcpu->arch.shared_big_endian = shared_big_endian;
192 #endif
193
194                 if (!(param2 & MAGIC_PAGE_FLAG_NOT_MAPPED_NX)) {
195                         /*
196                          * Older versions of the Linux magic page code had
197                          * a bug where they would map their trampoline code
198                          * NX. If that's the case, remove !PR NX capability.
199                          */
200                         vcpu->arch.disable_kernel_nx = true;
201                         kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
202                 }
203
204                 vcpu->arch.magic_page_pa = param1 & ~0xfffULL;
205                 vcpu->arch.magic_page_ea = param2 & ~0xfffULL;
206
207 #ifdef CONFIG_PPC_64K_PAGES
208                 /*
209                  * Make sure our 4k magic page is in the same window of a 64k
210                  * page within the guest and within the host's page.
211                  */
212                 if ((vcpu->arch.magic_page_pa & 0xf000) !=
213                     ((ulong)vcpu->arch.shared & 0xf000)) {
214                         void *old_shared = vcpu->arch.shared;
215                         ulong shared = (ulong)vcpu->arch.shared;
216                         void *new_shared;
217
218                         shared &= PAGE_MASK;
219                         shared |= vcpu->arch.magic_page_pa & 0xf000;
220                         new_shared = (void*)shared;
221                         memcpy(new_shared, old_shared, 0x1000);
222                         vcpu->arch.shared = new_shared;
223                 }
224 #endif
225
226                 r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7;
227
228                 r = EV_SUCCESS;
229                 break;
230         }
231         case KVM_HCALL_TOKEN(KVM_HC_FEATURES):
232                 r = EV_SUCCESS;
233 #if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2)
234                 r2 |= (1 << KVM_FEATURE_MAGIC_PAGE);
235 #endif
236
237                 /* Second return value is in r4 */
238                 break;
239         case EV_HCALL_TOKEN(EV_IDLE):
240                 r = EV_SUCCESS;
241                 kvm_vcpu_block(vcpu);
242                 kvm_clear_request(KVM_REQ_UNHALT, vcpu);
243                 break;
244         default:
245                 r = EV_UNIMPLEMENTED;
246                 break;
247         }
248
249         kvmppc_set_gpr(vcpu, 4, r2);
250
251         return r;
252 }
253 EXPORT_SYMBOL_GPL(kvmppc_kvm_pv);
254
255 int kvmppc_sanity_check(struct kvm_vcpu *vcpu)
256 {
257         int r = false;
258
259         /* We have to know what CPU to virtualize */
260         if (!vcpu->arch.pvr)
261                 goto out;
262
263         /* PAPR only works with book3s_64 */
264         if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled)
265                 goto out;
266
267         /* HV KVM can only do PAPR mode for now */
268         if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm))
269                 goto out;
270
271 #ifdef CONFIG_KVM_BOOKE_HV
272         if (!cpu_has_feature(CPU_FTR_EMB_HV))
273                 goto out;
274 #endif
275
276         r = true;
277
278 out:
279         vcpu->arch.sane = r;
280         return r ? 0 : -EINVAL;
281 }
282 EXPORT_SYMBOL_GPL(kvmppc_sanity_check);
283
284 int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
285 {
286         enum emulation_result er;
287         int r;
288
289         er = kvmppc_emulate_loadstore(vcpu);
290         switch (er) {
291         case EMULATE_DONE:
292                 /* Future optimization: only reload non-volatiles if they were
293                  * actually modified. */
294                 r = RESUME_GUEST_NV;
295                 break;
296         case EMULATE_AGAIN:
297                 r = RESUME_GUEST;
298                 break;
299         case EMULATE_DO_MMIO:
300                 run->exit_reason = KVM_EXIT_MMIO;
301                 /* We must reload nonvolatiles because "update" load/store
302                  * instructions modify register state. */
303                 /* Future optimization: only reload non-volatiles if they were
304                  * actually modified. */
305                 r = RESUME_HOST_NV;
306                 break;
307         case EMULATE_FAIL:
308         {
309                 u32 last_inst;
310
311                 kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
312                 /* XXX Deliver Program interrupt to guest. */
313                 pr_emerg("%s: emulation failed (%08x)\n", __func__, last_inst);
314                 r = RESUME_HOST;
315                 break;
316         }
317         default:
318                 WARN_ON(1);
319                 r = RESUME_GUEST;
320         }
321
322         return r;
323 }
324 EXPORT_SYMBOL_GPL(kvmppc_emulate_mmio);
325
326 int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
327               bool data)
328 {
329         ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
330         struct kvmppc_pte pte;
331         int r;
332
333         vcpu->stat.st++;
334
335         r = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
336                          XLATE_WRITE, &pte);
337         if (r < 0)
338                 return r;
339
340         *eaddr = pte.raddr;
341
342         if (!pte.may_write)
343                 return -EPERM;
344
345         /* Magic page override */
346         if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
347             ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
348             !(kvmppc_get_msr(vcpu) & MSR_PR)) {
349                 void *magic = vcpu->arch.shared;
350                 magic += pte.eaddr & 0xfff;
351                 memcpy(magic, ptr, size);
352                 return EMULATE_DONE;
353         }
354
355         if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size))
356                 return EMULATE_DO_MMIO;
357
358         return EMULATE_DONE;
359 }
360 EXPORT_SYMBOL_GPL(kvmppc_st);
361
362 int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
363                       bool data)
364 {
365         ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
366         struct kvmppc_pte pte;
367         int rc;
368
369         vcpu->stat.ld++;
370
371         rc = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
372                           XLATE_READ, &pte);
373         if (rc)
374                 return rc;
375
376         *eaddr = pte.raddr;
377
378         if (!pte.may_read)
379                 return -EPERM;
380
381         if (!data && !pte.may_execute)
382                 return -ENOEXEC;
383
384         /* Magic page override */
385         if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
386             ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
387             !(kvmppc_get_msr(vcpu) & MSR_PR)) {
388                 void *magic = vcpu->arch.shared;
389                 magic += pte.eaddr & 0xfff;
390                 memcpy(ptr, magic, size);
391                 return EMULATE_DONE;
392         }
393
394         if (kvm_read_guest(vcpu->kvm, pte.raddr, ptr, size))
395                 return EMULATE_DO_MMIO;
396
397         return EMULATE_DONE;
398 }
399 EXPORT_SYMBOL_GPL(kvmppc_ld);
400
401 int kvm_arch_hardware_enable(void)
402 {
403         return 0;
404 }
405
406 int kvm_arch_hardware_setup(void)
407 {
408         return 0;
409 }
410
411 void kvm_arch_check_processor_compat(void *rtn)
412 {
413         *(int *)rtn = kvmppc_core_check_processor_compat();
414 }
415
416 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
417 {
418         struct kvmppc_ops *kvm_ops = NULL;
419         /*
420          * if we have both HV and PR enabled, default is HV
421          */
422         if (type == 0) {
423                 if (kvmppc_hv_ops)
424                         kvm_ops = kvmppc_hv_ops;
425                 else
426                         kvm_ops = kvmppc_pr_ops;
427                 if (!kvm_ops)
428                         goto err_out;
429         } else  if (type == KVM_VM_PPC_HV) {
430                 if (!kvmppc_hv_ops)
431                         goto err_out;
432                 kvm_ops = kvmppc_hv_ops;
433         } else if (type == KVM_VM_PPC_PR) {
434                 if (!kvmppc_pr_ops)
435                         goto err_out;
436                 kvm_ops = kvmppc_pr_ops;
437         } else
438                 goto err_out;
439
440         if (kvm_ops->owner && !try_module_get(kvm_ops->owner))
441                 return -ENOENT;
442
443         kvm->arch.kvm_ops = kvm_ops;
444         return kvmppc_core_init_vm(kvm);
445 err_out:
446         return -EINVAL;
447 }
448
449 bool kvm_arch_has_vcpu_debugfs(void)
450 {
451         return false;
452 }
453
454 int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
455 {
456         return 0;
457 }
458
459 void kvm_arch_destroy_vm(struct kvm *kvm)
460 {
461         unsigned int i;
462         struct kvm_vcpu *vcpu;
463
464 #ifdef CONFIG_KVM_XICS
465         /*
466          * We call kick_all_cpus_sync() to ensure that all
467          * CPUs have executed any pending IPIs before we
468          * continue and free VCPUs structures below.
469          */
470         if (is_kvmppc_hv_enabled(kvm))
471                 kick_all_cpus_sync();
472 #endif
473
474         kvm_for_each_vcpu(i, vcpu, kvm)
475                 kvm_arch_vcpu_free(vcpu);
476
477         mutex_lock(&kvm->lock);
478         for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
479                 kvm->vcpus[i] = NULL;
480
481         atomic_set(&kvm->online_vcpus, 0);
482
483         kvmppc_core_destroy_vm(kvm);
484
485         mutex_unlock(&kvm->lock);
486
487         /* drop the module reference */
488         module_put(kvm->arch.kvm_ops->owner);
489 }
490
491 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
492 {
493         int r;
494         /* Assume we're using HV mode when the HV module is loaded */
495         int hv_enabled = kvmppc_hv_ops ? 1 : 0;
496
497         if (kvm) {
498                 /*
499                  * Hooray - we know which VM type we're running on. Depend on
500                  * that rather than the guess above.
501                  */
502                 hv_enabled = is_kvmppc_hv_enabled(kvm);
503         }
504
505         switch (ext) {
506 #ifdef CONFIG_BOOKE
507         case KVM_CAP_PPC_BOOKE_SREGS:
508         case KVM_CAP_PPC_BOOKE_WATCHDOG:
509         case KVM_CAP_PPC_EPR:
510 #else
511         case KVM_CAP_PPC_SEGSTATE:
512         case KVM_CAP_PPC_HIOR:
513         case KVM_CAP_PPC_PAPR:
514 #endif
515         case KVM_CAP_PPC_UNSET_IRQ:
516         case KVM_CAP_PPC_IRQ_LEVEL:
517         case KVM_CAP_ENABLE_CAP:
518         case KVM_CAP_ENABLE_CAP_VM:
519         case KVM_CAP_ONE_REG:
520         case KVM_CAP_IOEVENTFD:
521         case KVM_CAP_DEVICE_CTRL:
522         case KVM_CAP_IMMEDIATE_EXIT:
523                 r = 1;
524                 break;
525         case KVM_CAP_PPC_PAIRED_SINGLES:
526         case KVM_CAP_PPC_OSI:
527         case KVM_CAP_PPC_GET_PVINFO:
528 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
529         case KVM_CAP_SW_TLB:
530 #endif
531                 /* We support this only for PR */
532                 r = !hv_enabled;
533                 break;
534 #ifdef CONFIG_KVM_MPIC
535         case KVM_CAP_IRQ_MPIC:
536                 r = 1;
537                 break;
538 #endif
539
540 #ifdef CONFIG_PPC_BOOK3S_64
541         case KVM_CAP_SPAPR_TCE:
542         case KVM_CAP_SPAPR_TCE_64:
543                 /* fallthrough */
544         case KVM_CAP_SPAPR_TCE_VFIO:
545         case KVM_CAP_PPC_RTAS:
546         case KVM_CAP_PPC_FIXUP_HCALL:
547         case KVM_CAP_PPC_ENABLE_HCALL:
548 #ifdef CONFIG_KVM_XICS
549         case KVM_CAP_IRQ_XICS:
550 #endif
551                 r = 1;
552                 break;
553
554         case KVM_CAP_PPC_ALLOC_HTAB:
555                 r = hv_enabled;
556                 break;
557 #endif /* CONFIG_PPC_BOOK3S_64 */
558 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
559         case KVM_CAP_PPC_SMT:
560                 r = 0;
561                 if (kvm) {
562                         if (kvm->arch.emul_smt_mode > 1)
563                                 r = kvm->arch.emul_smt_mode;
564                         else
565                                 r = kvm->arch.smt_mode;
566                 } else if (hv_enabled) {
567                         if (cpu_has_feature(CPU_FTR_ARCH_300))
568                                 r = 1;
569                         else
570                                 r = threads_per_subcore;
571                 }
572                 break;
573         case KVM_CAP_PPC_SMT_POSSIBLE:
574                 r = 1;
575                 if (hv_enabled) {
576                         if (!cpu_has_feature(CPU_FTR_ARCH_300))
577                                 r = ((threads_per_subcore << 1) - 1);
578                         else
579                                 /* P9 can emulate dbells, so allow any mode */
580                                 r = 8 | 4 | 2 | 1;
581                 }
582                 break;
583         case KVM_CAP_PPC_RMA:
584                 r = 0;
585                 break;
586         case KVM_CAP_PPC_HWRNG:
587                 r = kvmppc_hwrng_present();
588                 break;
589         case KVM_CAP_PPC_MMU_RADIX:
590                 r = !!(hv_enabled && radix_enabled());
591                 break;
592         case KVM_CAP_PPC_MMU_HASH_V3:
593                 r = !!(hv_enabled && cpu_has_feature(CPU_FTR_ARCH_300));
594                 break;
595 #endif
596         case KVM_CAP_SYNC_MMU:
597 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
598                 r = hv_enabled;
599 #elif defined(KVM_ARCH_WANT_MMU_NOTIFIER)
600                 r = 1;
601 #else
602                 r = 0;
603 #endif
604                 break;
605 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
606         case KVM_CAP_PPC_HTAB_FD:
607                 r = hv_enabled;
608                 break;
609 #endif
610         case KVM_CAP_NR_VCPUS:
611                 /*
612                  * Recommending a number of CPUs is somewhat arbitrary; we
613                  * return the number of present CPUs for -HV (since a host
614                  * will have secondary threads "offline"), and for other KVM
615                  * implementations just count online CPUs.
616                  */
617                 if (hv_enabled)
618                         r = num_present_cpus();
619                 else
620                         r = num_online_cpus();
621                 break;
622         case KVM_CAP_NR_MEMSLOTS:
623                 r = KVM_USER_MEM_SLOTS;
624                 break;
625         case KVM_CAP_MAX_VCPUS:
626                 r = KVM_MAX_VCPUS;
627                 break;
628 #ifdef CONFIG_PPC_BOOK3S_64
629         case KVM_CAP_PPC_GET_SMMU_INFO:
630                 r = 1;
631                 break;
632         case KVM_CAP_SPAPR_MULTITCE:
633                 r = 1;
634                 break;
635         case KVM_CAP_SPAPR_RESIZE_HPT:
636                 /* Disable this on POWER9 until code handles new HPTE format */
637                 r = !!hv_enabled && !cpu_has_feature(CPU_FTR_ARCH_300);
638                 break;
639 #endif
640 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
641         case KVM_CAP_PPC_FWNMI:
642                 r = hv_enabled;
643                 break;
644 #endif
645         case KVM_CAP_PPC_HTM:
646                 r = hv_enabled &&
647                     (cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_HTM_COMP);
648                 break;
649         default:
650                 r = 0;
651                 break;
652         }
653         return r;
654
655 }
656
657 long kvm_arch_dev_ioctl(struct file *filp,
658                         unsigned int ioctl, unsigned long arg)
659 {
660         return -EINVAL;
661 }
662
663 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
664                            struct kvm_memory_slot *dont)
665 {
666         kvmppc_core_free_memslot(kvm, free, dont);
667 }
668
669 int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
670                             unsigned long npages)
671 {
672         return kvmppc_core_create_memslot(kvm, slot, npages);
673 }
674
675 int kvm_arch_prepare_memory_region(struct kvm *kvm,
676                                    struct kvm_memory_slot *memslot,
677                                    const struct kvm_userspace_memory_region *mem,
678                                    enum kvm_mr_change change)
679 {
680         return kvmppc_core_prepare_memory_region(kvm, memslot, mem);
681 }
682
683 void kvm_arch_commit_memory_region(struct kvm *kvm,
684                                    const struct kvm_userspace_memory_region *mem,
685                                    const struct kvm_memory_slot *old,
686                                    const struct kvm_memory_slot *new,
687                                    enum kvm_mr_change change)
688 {
689         kvmppc_core_commit_memory_region(kvm, mem, old, new);
690 }
691
692 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
693                                    struct kvm_memory_slot *slot)
694 {
695         kvmppc_core_flush_memslot(kvm, slot);
696 }
697
698 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
699 {
700         struct kvm_vcpu *vcpu;
701         vcpu = kvmppc_core_vcpu_create(kvm, id);
702         if (!IS_ERR(vcpu)) {
703                 vcpu->arch.wqp = &vcpu->wq;
704                 kvmppc_create_vcpu_debugfs(vcpu, id);
705         }
706         return vcpu;
707 }
708
709 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
710 {
711 }
712
713 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
714 {
715         /* Make sure we're not using the vcpu anymore */
716         hrtimer_cancel(&vcpu->arch.dec_timer);
717
718         kvmppc_remove_vcpu_debugfs(vcpu);
719
720         switch (vcpu->arch.irq_type) {
721         case KVMPPC_IRQ_MPIC:
722                 kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu);
723                 break;
724         case KVMPPC_IRQ_XICS:
725                 if (xive_enabled())
726                         kvmppc_xive_cleanup_vcpu(vcpu);
727                 else
728                         kvmppc_xics_free_icp(vcpu);
729                 break;
730         }
731
732         kvmppc_core_vcpu_free(vcpu);
733 }
734
735 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
736 {
737         kvm_arch_vcpu_free(vcpu);
738 }
739
740 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
741 {
742         return kvmppc_core_pending_dec(vcpu);
743 }
744
745 static enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer)
746 {
747         struct kvm_vcpu *vcpu;
748
749         vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer);
750         kvmppc_decrementer_func(vcpu);
751
752         return HRTIMER_NORESTART;
753 }
754
755 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
756 {
757         int ret;
758
759         hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
760         vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
761         vcpu->arch.dec_expires = ~(u64)0;
762
763 #ifdef CONFIG_KVM_EXIT_TIMING
764         mutex_init(&vcpu->arch.exit_timing_lock);
765 #endif
766         ret = kvmppc_subarch_vcpu_init(vcpu);
767         return ret;
768 }
769
770 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
771 {
772         kvmppc_mmu_destroy(vcpu);
773         kvmppc_subarch_vcpu_uninit(vcpu);
774 }
775
776 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
777 {
778 #ifdef CONFIG_BOOKE
779         /*
780          * vrsave (formerly usprg0) isn't used by Linux, but may
781          * be used by the guest.
782          *
783          * On non-booke this is associated with Altivec and
784          * is handled by code in book3s.c.
785          */
786         mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
787 #endif
788         kvmppc_core_vcpu_load(vcpu, cpu);
789 }
790
791 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
792 {
793         kvmppc_core_vcpu_put(vcpu);
794 #ifdef CONFIG_BOOKE
795         vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
796 #endif
797 }
798
799 /*
800  * irq_bypass_add_producer and irq_bypass_del_producer are only
801  * useful if the architecture supports PCI passthrough.
802  * irq_bypass_stop and irq_bypass_start are not needed and so
803  * kvm_ops are not defined for them.
804  */
805 bool kvm_arch_has_irq_bypass(void)
806 {
807         return ((kvmppc_hv_ops && kvmppc_hv_ops->irq_bypass_add_producer) ||
808                 (kvmppc_pr_ops && kvmppc_pr_ops->irq_bypass_add_producer));
809 }
810
811 int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
812                                      struct irq_bypass_producer *prod)
813 {
814         struct kvm_kernel_irqfd *irqfd =
815                 container_of(cons, struct kvm_kernel_irqfd, consumer);
816         struct kvm *kvm = irqfd->kvm;
817
818         if (kvm->arch.kvm_ops->irq_bypass_add_producer)
819                 return kvm->arch.kvm_ops->irq_bypass_add_producer(cons, prod);
820
821         return 0;
822 }
823
824 void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
825                                       struct irq_bypass_producer *prod)
826 {
827         struct kvm_kernel_irqfd *irqfd =
828                 container_of(cons, struct kvm_kernel_irqfd, consumer);
829         struct kvm *kvm = irqfd->kvm;
830
831         if (kvm->arch.kvm_ops->irq_bypass_del_producer)
832                 kvm->arch.kvm_ops->irq_bypass_del_producer(cons, prod);
833 }
834
835 #ifdef CONFIG_VSX
836 static inline int kvmppc_get_vsr_dword_offset(int index)
837 {
838         int offset;
839
840         if ((index != 0) && (index != 1))
841                 return -1;
842
843 #ifdef __BIG_ENDIAN
844         offset =  index;
845 #else
846         offset = 1 - index;
847 #endif
848
849         return offset;
850 }
851
852 static inline int kvmppc_get_vsr_word_offset(int index)
853 {
854         int offset;
855
856         if ((index > 3) || (index < 0))
857                 return -1;
858
859 #ifdef __BIG_ENDIAN
860         offset = index;
861 #else
862         offset = 3 - index;
863 #endif
864         return offset;
865 }
866
867 static inline void kvmppc_set_vsr_dword(struct kvm_vcpu *vcpu,
868         u64 gpr)
869 {
870         union kvmppc_one_reg val;
871         int offset = kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset);
872         int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
873
874         if (offset == -1)
875                 return;
876
877         if (vcpu->arch.mmio_vsx_tx_sx_enabled) {
878                 val.vval = VCPU_VSX_VR(vcpu, index);
879                 val.vsxval[offset] = gpr;
880                 VCPU_VSX_VR(vcpu, index) = val.vval;
881         } else {
882                 VCPU_VSX_FPR(vcpu, index, offset) = gpr;
883         }
884 }
885
886 static inline void kvmppc_set_vsr_dword_dump(struct kvm_vcpu *vcpu,
887         u64 gpr)
888 {
889         union kvmppc_one_reg val;
890         int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
891
892         if (vcpu->arch.mmio_vsx_tx_sx_enabled) {
893                 val.vval = VCPU_VSX_VR(vcpu, index);
894                 val.vsxval[0] = gpr;
895                 val.vsxval[1] = gpr;
896                 VCPU_VSX_VR(vcpu, index) = val.vval;
897         } else {
898                 VCPU_VSX_FPR(vcpu, index, 0) = gpr;
899                 VCPU_VSX_FPR(vcpu, index, 1) = gpr;
900         }
901 }
902
903 static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu,
904         u32 gpr32)
905 {
906         union kvmppc_one_reg val;
907         int offset = kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset);
908         int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
909         int dword_offset, word_offset;
910
911         if (offset == -1)
912                 return;
913
914         if (vcpu->arch.mmio_vsx_tx_sx_enabled) {
915                 val.vval = VCPU_VSX_VR(vcpu, index);
916                 val.vsx32val[offset] = gpr32;
917                 VCPU_VSX_VR(vcpu, index) = val.vval;
918         } else {
919                 dword_offset = offset / 2;
920                 word_offset = offset % 2;
921                 val.vsxval[0] = VCPU_VSX_FPR(vcpu, index, dword_offset);
922                 val.vsx32val[word_offset] = gpr32;
923                 VCPU_VSX_FPR(vcpu, index, dword_offset) = val.vsxval[0];
924         }
925 }
926 #endif /* CONFIG_VSX */
927
928 #ifdef CONFIG_PPC_FPU
929 static inline u64 sp_to_dp(u32 fprs)
930 {
931         u64 fprd;
932
933         preempt_disable();
934         enable_kernel_fp();
935         asm ("lfs%U1%X1 0,%1; stfd%U0%X0 0,%0" : "=m" (fprd) : "m" (fprs)
936              : "fr0");
937         preempt_enable();
938         return fprd;
939 }
940
941 static inline u32 dp_to_sp(u64 fprd)
942 {
943         u32 fprs;
944
945         preempt_disable();
946         enable_kernel_fp();
947         asm ("lfd%U1%X1 0,%1; stfs%U0%X0 0,%0" : "=m" (fprs) : "m" (fprd)
948              : "fr0");
949         preempt_enable();
950         return fprs;
951 }
952
953 #else
954 #define sp_to_dp(x)     (x)
955 #define dp_to_sp(x)     (x)
956 #endif /* CONFIG_PPC_FPU */
957
958 static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
959                                       struct kvm_run *run)
960 {
961         u64 uninitialized_var(gpr);
962
963         if (run->mmio.len > sizeof(gpr)) {
964                 printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len);
965                 return;
966         }
967
968         if (!vcpu->arch.mmio_host_swabbed) {
969                 switch (run->mmio.len) {
970                 case 8: gpr = *(u64 *)run->mmio.data; break;
971                 case 4: gpr = *(u32 *)run->mmio.data; break;
972                 case 2: gpr = *(u16 *)run->mmio.data; break;
973                 case 1: gpr = *(u8 *)run->mmio.data; break;
974                 }
975         } else {
976                 switch (run->mmio.len) {
977                 case 8: gpr = swab64(*(u64 *)run->mmio.data); break;
978                 case 4: gpr = swab32(*(u32 *)run->mmio.data); break;
979                 case 2: gpr = swab16(*(u16 *)run->mmio.data); break;
980                 case 1: gpr = *(u8 *)run->mmio.data; break;
981                 }
982         }
983
984         /* conversion between single and double precision */
985         if ((vcpu->arch.mmio_sp64_extend) && (run->mmio.len == 4))
986                 gpr = sp_to_dp(gpr);
987
988         if (vcpu->arch.mmio_sign_extend) {
989                 switch (run->mmio.len) {
990 #ifdef CONFIG_PPC64
991                 case 4:
992                         gpr = (s64)(s32)gpr;
993                         break;
994 #endif
995                 case 2:
996                         gpr = (s64)(s16)gpr;
997                         break;
998                 case 1:
999                         gpr = (s64)(s8)gpr;
1000                         break;
1001                 }
1002         }
1003
1004         switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) {
1005         case KVM_MMIO_REG_GPR:
1006                 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
1007                 break;
1008         case KVM_MMIO_REG_FPR:
1009                 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
1010                 break;
1011 #ifdef CONFIG_PPC_BOOK3S
1012         case KVM_MMIO_REG_QPR:
1013                 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
1014                 break;
1015         case KVM_MMIO_REG_FQPR:
1016                 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
1017                 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
1018                 break;
1019 #endif
1020 #ifdef CONFIG_VSX
1021         case KVM_MMIO_REG_VSX:
1022                 if (vcpu->arch.mmio_vsx_copy_type == KVMPPC_VSX_COPY_DWORD)
1023                         kvmppc_set_vsr_dword(vcpu, gpr);
1024                 else if (vcpu->arch.mmio_vsx_copy_type == KVMPPC_VSX_COPY_WORD)
1025                         kvmppc_set_vsr_word(vcpu, gpr);
1026                 else if (vcpu->arch.mmio_vsx_copy_type ==
1027                                 KVMPPC_VSX_COPY_DWORD_LOAD_DUMP)
1028                         kvmppc_set_vsr_dword_dump(vcpu, gpr);
1029                 break;
1030 #endif
1031         default:
1032                 BUG();
1033         }
1034 }
1035
1036 static int __kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
1037                                 unsigned int rt, unsigned int bytes,
1038                                 int is_default_endian, int sign_extend)
1039 {
1040         int idx, ret;
1041         bool host_swabbed;
1042
1043         /* Pity C doesn't have a logical XOR operator */
1044         if (kvmppc_need_byteswap(vcpu)) {
1045                 host_swabbed = is_default_endian;
1046         } else {
1047                 host_swabbed = !is_default_endian;
1048         }
1049
1050         if (bytes > sizeof(run->mmio.data)) {
1051                 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
1052                        run->mmio.len);
1053         }
1054
1055         run->mmio.phys_addr = vcpu->arch.paddr_accessed;
1056         run->mmio.len = bytes;
1057         run->mmio.is_write = 0;
1058
1059         vcpu->arch.io_gpr = rt;
1060         vcpu->arch.mmio_host_swabbed = host_swabbed;
1061         vcpu->mmio_needed = 1;
1062         vcpu->mmio_is_write = 0;
1063         vcpu->arch.mmio_sign_extend = sign_extend;
1064
1065         idx = srcu_read_lock(&vcpu->kvm->srcu);
1066
1067         ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
1068                               bytes, &run->mmio.data);
1069
1070         srcu_read_unlock(&vcpu->kvm->srcu, idx);
1071
1072         if (!ret) {
1073                 kvmppc_complete_mmio_load(vcpu, run);
1074                 vcpu->mmio_needed = 0;
1075                 return EMULATE_DONE;
1076         }
1077
1078         return EMULATE_DO_MMIO;
1079 }
1080
1081 int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
1082                        unsigned int rt, unsigned int bytes,
1083                        int is_default_endian)
1084 {
1085         return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 0);
1086 }
1087 EXPORT_SYMBOL_GPL(kvmppc_handle_load);
1088
1089 /* Same as above, but sign extends */
1090 int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
1091                         unsigned int rt, unsigned int bytes,
1092                         int is_default_endian)
1093 {
1094         return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 1);
1095 }
1096
1097 #ifdef CONFIG_VSX
1098 int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
1099                         unsigned int rt, unsigned int bytes,
1100                         int is_default_endian, int mmio_sign_extend)
1101 {
1102         enum emulation_result emulated = EMULATE_DONE;
1103
1104         /* Currently, mmio_vsx_copy_nums only allowed to be less than 4 */
1105         if ( (vcpu->arch.mmio_vsx_copy_nums > 4) ||
1106                 (vcpu->arch.mmio_vsx_copy_nums < 0) ) {
1107                 return EMULATE_FAIL;
1108         }
1109
1110         while (vcpu->arch.mmio_vsx_copy_nums) {
1111                 emulated = __kvmppc_handle_load(run, vcpu, rt, bytes,
1112                         is_default_endian, mmio_sign_extend);
1113
1114                 if (emulated != EMULATE_DONE)
1115                         break;
1116
1117                 vcpu->arch.paddr_accessed += run->mmio.len;
1118
1119                 vcpu->arch.mmio_vsx_copy_nums--;
1120                 vcpu->arch.mmio_vsx_offset++;
1121         }
1122         return emulated;
1123 }
1124 #endif /* CONFIG_VSX */
1125
1126 int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
1127                         u64 val, unsigned int bytes, int is_default_endian)
1128 {
1129         void *data = run->mmio.data;
1130         int idx, ret;
1131         bool host_swabbed;
1132
1133         /* Pity C doesn't have a logical XOR operator */
1134         if (kvmppc_need_byteswap(vcpu)) {
1135                 host_swabbed = is_default_endian;
1136         } else {
1137                 host_swabbed = !is_default_endian;
1138         }
1139
1140         if (bytes > sizeof(run->mmio.data)) {
1141                 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
1142                        run->mmio.len);
1143         }
1144
1145         run->mmio.phys_addr = vcpu->arch.paddr_accessed;
1146         run->mmio.len = bytes;
1147         run->mmio.is_write = 1;
1148         vcpu->mmio_needed = 1;
1149         vcpu->mmio_is_write = 1;
1150
1151         if ((vcpu->arch.mmio_sp64_extend) && (bytes == 4))
1152                 val = dp_to_sp(val);
1153
1154         /* Store the value at the lowest bytes in 'data'. */
1155         if (!host_swabbed) {
1156                 switch (bytes) {
1157                 case 8: *(u64 *)data = val; break;
1158                 case 4: *(u32 *)data = val; break;
1159                 case 2: *(u16 *)data = val; break;
1160                 case 1: *(u8  *)data = val; break;
1161                 }
1162         } else {
1163                 switch (bytes) {
1164                 case 8: *(u64 *)data = swab64(val); break;
1165                 case 4: *(u32 *)data = swab32(val); break;
1166                 case 2: *(u16 *)data = swab16(val); break;
1167                 case 1: *(u8  *)data = val; break;
1168                 }
1169         }
1170
1171         idx = srcu_read_lock(&vcpu->kvm->srcu);
1172
1173         ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
1174                                bytes, &run->mmio.data);
1175
1176         srcu_read_unlock(&vcpu->kvm->srcu, idx);
1177
1178         if (!ret) {
1179                 vcpu->mmio_needed = 0;
1180                 return EMULATE_DONE;
1181         }
1182
1183         return EMULATE_DO_MMIO;
1184 }
1185 EXPORT_SYMBOL_GPL(kvmppc_handle_store);
1186
1187 #ifdef CONFIG_VSX
1188 static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val)
1189 {
1190         u32 dword_offset, word_offset;
1191         union kvmppc_one_reg reg;
1192         int vsx_offset = 0;
1193         int copy_type = vcpu->arch.mmio_vsx_copy_type;
1194         int result = 0;
1195
1196         switch (copy_type) {
1197         case KVMPPC_VSX_COPY_DWORD:
1198                 vsx_offset =
1199                         kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset);
1200
1201                 if (vsx_offset == -1) {
1202                         result = -1;
1203                         break;
1204                 }
1205
1206                 if (!vcpu->arch.mmio_vsx_tx_sx_enabled) {
1207                         *val = VCPU_VSX_FPR(vcpu, rs, vsx_offset);
1208                 } else {
1209                         reg.vval = VCPU_VSX_VR(vcpu, rs);
1210                         *val = reg.vsxval[vsx_offset];
1211                 }
1212                 break;
1213
1214         case KVMPPC_VSX_COPY_WORD:
1215                 vsx_offset =
1216                         kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset);
1217
1218                 if (vsx_offset == -1) {
1219                         result = -1;
1220                         break;
1221                 }
1222
1223                 if (!vcpu->arch.mmio_vsx_tx_sx_enabled) {
1224                         dword_offset = vsx_offset / 2;
1225                         word_offset = vsx_offset % 2;
1226                         reg.vsxval[0] = VCPU_VSX_FPR(vcpu, rs, dword_offset);
1227                         *val = reg.vsx32val[word_offset];
1228                 } else {
1229                         reg.vval = VCPU_VSX_VR(vcpu, rs);
1230                         *val = reg.vsx32val[vsx_offset];
1231                 }
1232                 break;
1233
1234         default:
1235                 result = -1;
1236                 break;
1237         }
1238
1239         return result;
1240 }
1241
1242 int kvmppc_handle_vsx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
1243                         int rs, unsigned int bytes, int is_default_endian)
1244 {
1245         u64 val;
1246         enum emulation_result emulated = EMULATE_DONE;
1247
1248         vcpu->arch.io_gpr = rs;
1249
1250         /* Currently, mmio_vsx_copy_nums only allowed to be less than 4 */
1251         if ( (vcpu->arch.mmio_vsx_copy_nums > 4) ||
1252                 (vcpu->arch.mmio_vsx_copy_nums < 0) ) {
1253                 return EMULATE_FAIL;
1254         }
1255
1256         while (vcpu->arch.mmio_vsx_copy_nums) {
1257                 if (kvmppc_get_vsr_data(vcpu, rs, &val) == -1)
1258                         return EMULATE_FAIL;
1259
1260                 emulated = kvmppc_handle_store(run, vcpu,
1261                          val, bytes, is_default_endian);
1262
1263                 if (emulated != EMULATE_DONE)
1264                         break;
1265
1266                 vcpu->arch.paddr_accessed += run->mmio.len;
1267
1268                 vcpu->arch.mmio_vsx_copy_nums--;
1269                 vcpu->arch.mmio_vsx_offset++;
1270         }
1271
1272         return emulated;
1273 }
1274
1275 static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu,
1276                         struct kvm_run *run)
1277 {
1278         enum emulation_result emulated = EMULATE_FAIL;
1279         int r;
1280
1281         vcpu->arch.paddr_accessed += run->mmio.len;
1282
1283         if (!vcpu->mmio_is_write) {
1284                 emulated = kvmppc_handle_vsx_load(run, vcpu, vcpu->arch.io_gpr,
1285                          run->mmio.len, 1, vcpu->arch.mmio_sign_extend);
1286         } else {
1287                 emulated = kvmppc_handle_vsx_store(run, vcpu,
1288                          vcpu->arch.io_gpr, run->mmio.len, 1);
1289         }
1290
1291         switch (emulated) {
1292         case EMULATE_DO_MMIO:
1293                 run->exit_reason = KVM_EXIT_MMIO;
1294                 r = RESUME_HOST;
1295                 break;
1296         case EMULATE_FAIL:
1297                 pr_info("KVM: MMIO emulation failed (VSX repeat)\n");
1298                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1299                 run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
1300                 r = RESUME_HOST;
1301                 break;
1302         default:
1303                 r = RESUME_GUEST;
1304                 break;
1305         }
1306         return r;
1307 }
1308 #endif /* CONFIG_VSX */
1309
1310 int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1311 {
1312         int r = 0;
1313         union kvmppc_one_reg val;
1314         int size;
1315
1316         size = one_reg_size(reg->id);
1317         if (size > sizeof(val))
1318                 return -EINVAL;
1319
1320         r = kvmppc_get_one_reg(vcpu, reg->id, &val);
1321         if (r == -EINVAL) {
1322                 r = 0;
1323                 switch (reg->id) {
1324 #ifdef CONFIG_ALTIVEC
1325                 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
1326                         if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1327                                 r = -ENXIO;
1328                                 break;
1329                         }
1330                         val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0];
1331                         break;
1332                 case KVM_REG_PPC_VSCR:
1333                         if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1334                                 r = -ENXIO;
1335                                 break;
1336                         }
1337                         val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]);
1338                         break;
1339                 case KVM_REG_PPC_VRSAVE:
1340                         val = get_reg_val(reg->id, vcpu->arch.vrsave);
1341                         break;
1342 #endif /* CONFIG_ALTIVEC */
1343                 default:
1344                         r = -EINVAL;
1345                         break;
1346                 }
1347         }
1348
1349         if (r)
1350                 return r;
1351
1352         if (copy_to_user((char __user *)(unsigned long)reg->addr, &val, size))
1353                 r = -EFAULT;
1354
1355         return r;
1356 }
1357
1358 int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1359 {
1360         int r;
1361         union kvmppc_one_reg val;
1362         int size;
1363
1364         size = one_reg_size(reg->id);
1365         if (size > sizeof(val))
1366                 return -EINVAL;
1367
1368         if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size))
1369                 return -EFAULT;
1370
1371         r = kvmppc_set_one_reg(vcpu, reg->id, &val);
1372         if (r == -EINVAL) {
1373                 r = 0;
1374                 switch (reg->id) {
1375 #ifdef CONFIG_ALTIVEC
1376                 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
1377                         if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1378                                 r = -ENXIO;
1379                                 break;
1380                         }
1381                         vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
1382                         break;
1383                 case KVM_REG_PPC_VSCR:
1384                         if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1385                                 r = -ENXIO;
1386                                 break;
1387                         }
1388                         vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val);
1389                         break;
1390                 case KVM_REG_PPC_VRSAVE:
1391                         if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1392                                 r = -ENXIO;
1393                                 break;
1394                         }
1395                         vcpu->arch.vrsave = set_reg_val(reg->id, val);
1396                         break;
1397 #endif /* CONFIG_ALTIVEC */
1398                 default:
1399                         r = -EINVAL;
1400                         break;
1401                 }
1402         }
1403
1404         return r;
1405 }
1406
1407 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
1408 {
1409         int r;
1410
1411         if (vcpu->mmio_needed) {
1412                 vcpu->mmio_needed = 0;
1413                 if (!vcpu->mmio_is_write)
1414                         kvmppc_complete_mmio_load(vcpu, run);
1415 #ifdef CONFIG_VSX
1416                 if (vcpu->arch.mmio_vsx_copy_nums > 0) {
1417                         vcpu->arch.mmio_vsx_copy_nums--;
1418                         vcpu->arch.mmio_vsx_offset++;
1419                 }
1420
1421                 if (vcpu->arch.mmio_vsx_copy_nums > 0) {
1422                         r = kvmppc_emulate_mmio_vsx_loadstore(vcpu, run);
1423                         if (r == RESUME_HOST) {
1424                                 vcpu->mmio_needed = 1;
1425                                 return r;
1426                         }
1427                 }
1428 #endif
1429         } else if (vcpu->arch.osi_needed) {
1430                 u64 *gprs = run->osi.gprs;
1431                 int i;
1432
1433                 for (i = 0; i < 32; i++)
1434                         kvmppc_set_gpr(vcpu, i, gprs[i]);
1435                 vcpu->arch.osi_needed = 0;
1436         } else if (vcpu->arch.hcall_needed) {
1437                 int i;
1438
1439                 kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret);
1440                 for (i = 0; i < 9; ++i)
1441                         kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]);
1442                 vcpu->arch.hcall_needed = 0;
1443 #ifdef CONFIG_BOOKE
1444         } else if (vcpu->arch.epr_needed) {
1445                 kvmppc_set_epr(vcpu, run->epr.epr);
1446                 vcpu->arch.epr_needed = 0;
1447 #endif
1448         }
1449
1450         kvm_sigset_activate(vcpu);
1451
1452         if (run->immediate_exit)
1453                 r = -EINTR;
1454         else
1455                 r = kvmppc_vcpu_run(run, vcpu);
1456
1457         kvm_sigset_deactivate(vcpu);
1458
1459         return r;
1460 }
1461
1462 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
1463 {
1464         if (irq->irq == KVM_INTERRUPT_UNSET) {
1465                 kvmppc_core_dequeue_external(vcpu);
1466                 return 0;
1467         }
1468
1469         kvmppc_core_queue_external(vcpu, irq);
1470
1471         kvm_vcpu_kick(vcpu);
1472
1473         return 0;
1474 }
1475
1476 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
1477                                      struct kvm_enable_cap *cap)
1478 {
1479         int r;
1480
1481         if (cap->flags)
1482                 return -EINVAL;
1483
1484         switch (cap->cap) {
1485         case KVM_CAP_PPC_OSI:
1486                 r = 0;
1487                 vcpu->arch.osi_enabled = true;
1488                 break;
1489         case KVM_CAP_PPC_PAPR:
1490                 r = 0;
1491                 vcpu->arch.papr_enabled = true;
1492                 break;
1493         case KVM_CAP_PPC_EPR:
1494                 r = 0;
1495                 if (cap->args[0])
1496                         vcpu->arch.epr_flags |= KVMPPC_EPR_USER;
1497                 else
1498                         vcpu->arch.epr_flags &= ~KVMPPC_EPR_USER;
1499                 break;
1500 #ifdef CONFIG_BOOKE
1501         case KVM_CAP_PPC_BOOKE_WATCHDOG:
1502                 r = 0;
1503                 vcpu->arch.watchdog_enabled = true;
1504                 break;
1505 #endif
1506 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
1507         case KVM_CAP_SW_TLB: {
1508                 struct kvm_config_tlb cfg;
1509                 void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0];
1510
1511                 r = -EFAULT;
1512                 if (copy_from_user(&cfg, user_ptr, sizeof(cfg)))
1513                         break;
1514
1515                 r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg);
1516                 break;
1517         }
1518 #endif
1519 #ifdef CONFIG_KVM_MPIC
1520         case KVM_CAP_IRQ_MPIC: {
1521                 struct fd f;
1522                 struct kvm_device *dev;
1523
1524                 r = -EBADF;
1525                 f = fdget(cap->args[0]);
1526                 if (!f.file)
1527                         break;
1528
1529                 r = -EPERM;
1530                 dev = kvm_device_from_filp(f.file);
1531                 if (dev)
1532                         r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]);
1533
1534                 fdput(f);
1535                 break;
1536         }
1537 #endif
1538 #ifdef CONFIG_KVM_XICS
1539         case KVM_CAP_IRQ_XICS: {
1540                 struct fd f;
1541                 struct kvm_device *dev;
1542
1543                 r = -EBADF;
1544                 f = fdget(cap->args[0]);
1545                 if (!f.file)
1546                         break;
1547
1548                 r = -EPERM;
1549                 dev = kvm_device_from_filp(f.file);
1550                 if (dev) {
1551                         if (xive_enabled())
1552                                 r = kvmppc_xive_connect_vcpu(dev, vcpu, cap->args[1]);
1553                         else
1554                                 r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]);
1555                 }
1556
1557                 fdput(f);
1558                 break;
1559         }
1560 #endif /* CONFIG_KVM_XICS */
1561 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
1562         case KVM_CAP_PPC_FWNMI:
1563                 r = -EINVAL;
1564                 if (!is_kvmppc_hv_enabled(vcpu->kvm))
1565                         break;
1566                 r = 0;
1567                 vcpu->kvm->arch.fwnmi_enabled = true;
1568                 break;
1569 #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
1570         default:
1571                 r = -EINVAL;
1572                 break;
1573         }
1574
1575         if (!r)
1576                 r = kvmppc_sanity_check(vcpu);
1577
1578         return r;
1579 }
1580
1581 bool kvm_arch_intc_initialized(struct kvm *kvm)
1582 {
1583 #ifdef CONFIG_KVM_MPIC
1584         if (kvm->arch.mpic)
1585                 return true;
1586 #endif
1587 #ifdef CONFIG_KVM_XICS
1588         if (kvm->arch.xics || kvm->arch.xive)
1589                 return true;
1590 #endif
1591         return false;
1592 }
1593
1594 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
1595                                     struct kvm_mp_state *mp_state)
1596 {
1597         return -EINVAL;
1598 }
1599
1600 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
1601                                     struct kvm_mp_state *mp_state)
1602 {
1603         return -EINVAL;
1604 }
1605
1606 long kvm_arch_vcpu_ioctl(struct file *filp,
1607                          unsigned int ioctl, unsigned long arg)
1608 {
1609         struct kvm_vcpu *vcpu = filp->private_data;
1610         void __user *argp = (void __user *)arg;
1611         long r;
1612
1613         switch (ioctl) {
1614         case KVM_INTERRUPT: {
1615                 struct kvm_interrupt irq;
1616                 r = -EFAULT;
1617                 if (copy_from_user(&irq, argp, sizeof(irq)))
1618                         goto out;
1619                 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
1620                 goto out;
1621         }
1622
1623         case KVM_ENABLE_CAP:
1624         {
1625                 struct kvm_enable_cap cap;
1626                 r = -EFAULT;
1627                 if (copy_from_user(&cap, argp, sizeof(cap)))
1628                         goto out;
1629                 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
1630                 break;
1631         }
1632
1633         case KVM_SET_ONE_REG:
1634         case KVM_GET_ONE_REG:
1635         {
1636                 struct kvm_one_reg reg;
1637                 r = -EFAULT;
1638                 if (copy_from_user(&reg, argp, sizeof(reg)))
1639                         goto out;
1640                 if (ioctl == KVM_SET_ONE_REG)
1641                         r = kvm_vcpu_ioctl_set_one_reg(vcpu, &reg);
1642                 else
1643                         r = kvm_vcpu_ioctl_get_one_reg(vcpu, &reg);
1644                 break;
1645         }
1646
1647 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
1648         case KVM_DIRTY_TLB: {
1649                 struct kvm_dirty_tlb dirty;
1650                 r = -EFAULT;
1651                 if (copy_from_user(&dirty, argp, sizeof(dirty)))
1652                         goto out;
1653                 r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty);
1654                 break;
1655         }
1656 #endif
1657         default:
1658                 r = -EINVAL;
1659         }
1660
1661 out:
1662         return r;
1663 }
1664
1665 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
1666 {
1667         return VM_FAULT_SIGBUS;
1668 }
1669
1670 static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo)
1671 {
1672         u32 inst_nop = 0x60000000;
1673 #ifdef CONFIG_KVM_BOOKE_HV
1674         u32 inst_sc1 = 0x44000022;
1675         pvinfo->hcall[0] = cpu_to_be32(inst_sc1);
1676         pvinfo->hcall[1] = cpu_to_be32(inst_nop);
1677         pvinfo->hcall[2] = cpu_to_be32(inst_nop);
1678         pvinfo->hcall[3] = cpu_to_be32(inst_nop);
1679 #else
1680         u32 inst_lis = 0x3c000000;
1681         u32 inst_ori = 0x60000000;
1682         u32 inst_sc = 0x44000002;
1683         u32 inst_imm_mask = 0xffff;
1684
1685         /*
1686          * The hypercall to get into KVM from within guest context is as
1687          * follows:
1688          *
1689          *    lis r0, r0, KVM_SC_MAGIC_R0@h
1690          *    ori r0, KVM_SC_MAGIC_R0@l
1691          *    sc
1692          *    nop
1693          */
1694         pvinfo->hcall[0] = cpu_to_be32(inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask));
1695         pvinfo->hcall[1] = cpu_to_be32(inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask));
1696         pvinfo->hcall[2] = cpu_to_be32(inst_sc);
1697         pvinfo->hcall[3] = cpu_to_be32(inst_nop);
1698 #endif
1699
1700         pvinfo->flags = KVM_PPC_PVINFO_FLAGS_EV_IDLE;
1701
1702         return 0;
1703 }
1704
1705 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event,
1706                           bool line_status)
1707 {
1708         if (!irqchip_in_kernel(kvm))
1709                 return -ENXIO;
1710
1711         irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
1712                                         irq_event->irq, irq_event->level,
1713                                         line_status);
1714         return 0;
1715 }
1716
1717
1718 static int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
1719                                    struct kvm_enable_cap *cap)
1720 {
1721         int r;
1722
1723         if (cap->flags)
1724                 return -EINVAL;
1725
1726         switch (cap->cap) {
1727 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
1728         case KVM_CAP_PPC_ENABLE_HCALL: {
1729                 unsigned long hcall = cap->args[0];
1730
1731                 r = -EINVAL;
1732                 if (hcall > MAX_HCALL_OPCODE || (hcall & 3) ||
1733                     cap->args[1] > 1)
1734                         break;
1735                 if (!kvmppc_book3s_hcall_implemented(kvm, hcall))
1736                         break;
1737                 if (cap->args[1])
1738                         set_bit(hcall / 4, kvm->arch.enabled_hcalls);
1739                 else
1740                         clear_bit(hcall / 4, kvm->arch.enabled_hcalls);
1741                 r = 0;
1742                 break;
1743         }
1744         case KVM_CAP_PPC_SMT: {
1745                 unsigned long mode = cap->args[0];
1746                 unsigned long flags = cap->args[1];
1747
1748                 r = -EINVAL;
1749                 if (kvm->arch.kvm_ops->set_smt_mode)
1750                         r = kvm->arch.kvm_ops->set_smt_mode(kvm, mode, flags);
1751                 break;
1752         }
1753 #endif
1754         default:
1755                 r = -EINVAL;
1756                 break;
1757         }
1758
1759         return r;
1760 }
1761
1762 long kvm_arch_vm_ioctl(struct file *filp,
1763                        unsigned int ioctl, unsigned long arg)
1764 {
1765         struct kvm *kvm __maybe_unused = filp->private_data;
1766         void __user *argp = (void __user *)arg;
1767         long r;
1768
1769         switch (ioctl) {
1770         case KVM_PPC_GET_PVINFO: {
1771                 struct kvm_ppc_pvinfo pvinfo;
1772                 memset(&pvinfo, 0, sizeof(pvinfo));
1773                 r = kvm_vm_ioctl_get_pvinfo(&pvinfo);
1774                 if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) {
1775                         r = -EFAULT;
1776                         goto out;
1777                 }
1778
1779                 break;
1780         }
1781         case KVM_ENABLE_CAP:
1782         {
1783                 struct kvm_enable_cap cap;
1784                 r = -EFAULT;
1785                 if (copy_from_user(&cap, argp, sizeof(cap)))
1786                         goto out;
1787                 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
1788                 break;
1789         }
1790 #ifdef CONFIG_SPAPR_TCE_IOMMU
1791         case KVM_CREATE_SPAPR_TCE_64: {
1792                 struct kvm_create_spapr_tce_64 create_tce_64;
1793
1794                 r = -EFAULT;
1795                 if (copy_from_user(&create_tce_64, argp, sizeof(create_tce_64)))
1796                         goto out;
1797                 if (create_tce_64.flags) {
1798                         r = -EINVAL;
1799                         goto out;
1800                 }
1801                 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64);
1802                 goto out;
1803         }
1804         case KVM_CREATE_SPAPR_TCE: {
1805                 struct kvm_create_spapr_tce create_tce;
1806                 struct kvm_create_spapr_tce_64 create_tce_64;
1807
1808                 r = -EFAULT;
1809                 if (copy_from_user(&create_tce, argp, sizeof(create_tce)))
1810                         goto out;
1811
1812                 create_tce_64.liobn = create_tce.liobn;
1813                 create_tce_64.page_shift = IOMMU_PAGE_SHIFT_4K;
1814                 create_tce_64.offset = 0;
1815                 create_tce_64.size = create_tce.window_size >>
1816                                 IOMMU_PAGE_SHIFT_4K;
1817                 create_tce_64.flags = 0;
1818                 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64);
1819                 goto out;
1820         }
1821 #endif
1822 #ifdef CONFIG_PPC_BOOK3S_64
1823         case KVM_PPC_GET_SMMU_INFO: {
1824                 struct kvm_ppc_smmu_info info;
1825                 struct kvm *kvm = filp->private_data;
1826
1827                 memset(&info, 0, sizeof(info));
1828                 r = kvm->arch.kvm_ops->get_smmu_info(kvm, &info);
1829                 if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
1830                         r = -EFAULT;
1831                 break;
1832         }
1833         case KVM_PPC_RTAS_DEFINE_TOKEN: {
1834                 struct kvm *kvm = filp->private_data;
1835
1836                 r = kvm_vm_ioctl_rtas_define_token(kvm, argp);
1837                 break;
1838         }
1839         case KVM_PPC_CONFIGURE_V3_MMU: {
1840                 struct kvm *kvm = filp->private_data;
1841                 struct kvm_ppc_mmuv3_cfg cfg;
1842
1843                 r = -EINVAL;
1844                 if (!kvm->arch.kvm_ops->configure_mmu)
1845                         goto out;
1846                 r = -EFAULT;
1847                 if (copy_from_user(&cfg, argp, sizeof(cfg)))
1848                         goto out;
1849                 r = kvm->arch.kvm_ops->configure_mmu(kvm, &cfg);
1850                 break;
1851         }
1852         case KVM_PPC_GET_RMMU_INFO: {
1853                 struct kvm *kvm = filp->private_data;
1854                 struct kvm_ppc_rmmu_info info;
1855
1856                 r = -EINVAL;
1857                 if (!kvm->arch.kvm_ops->get_rmmu_info)
1858                         goto out;
1859                 r = kvm->arch.kvm_ops->get_rmmu_info(kvm, &info);
1860                 if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
1861                         r = -EFAULT;
1862                 break;
1863         }
1864         default: {
1865                 struct kvm *kvm = filp->private_data;
1866                 r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg);
1867         }
1868 #else /* CONFIG_PPC_BOOK3S_64 */
1869         default:
1870                 r = -ENOTTY;
1871 #endif
1872         }
1873 out:
1874         return r;
1875 }
1876
1877 static unsigned long lpid_inuse[BITS_TO_LONGS(KVMPPC_NR_LPIDS)];
1878 static unsigned long nr_lpids;
1879
1880 long kvmppc_alloc_lpid(void)
1881 {
1882         long lpid;
1883
1884         do {
1885                 lpid = find_first_zero_bit(lpid_inuse, KVMPPC_NR_LPIDS);
1886                 if (lpid >= nr_lpids) {
1887                         pr_err("%s: No LPIDs free\n", __func__);
1888                         return -ENOMEM;
1889                 }
1890         } while (test_and_set_bit(lpid, lpid_inuse));
1891
1892         return lpid;
1893 }
1894 EXPORT_SYMBOL_GPL(kvmppc_alloc_lpid);
1895
1896 void kvmppc_claim_lpid(long lpid)
1897 {
1898         set_bit(lpid, lpid_inuse);
1899 }
1900 EXPORT_SYMBOL_GPL(kvmppc_claim_lpid);
1901
1902 void kvmppc_free_lpid(long lpid)
1903 {
1904         clear_bit(lpid, lpid_inuse);
1905 }
1906 EXPORT_SYMBOL_GPL(kvmppc_free_lpid);
1907
1908 void kvmppc_init_lpid(unsigned long nr_lpids_param)
1909 {
1910         nr_lpids = min_t(unsigned long, KVMPPC_NR_LPIDS, nr_lpids_param);
1911         memset(lpid_inuse, 0, sizeof(lpid_inuse));
1912 }
1913 EXPORT_SYMBOL_GPL(kvmppc_init_lpid);
1914
1915 int kvm_arch_init(void *opaque)
1916 {
1917         return 0;
1918 }
1919
1920 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ppc_instr);